Linux Audio

Check our new training course

Loading...
v5.4
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 *  linux/net/sunrpc/xprt.c
   4 *
   5 *  This is a generic RPC call interface supporting congestion avoidance,
   6 *  and asynchronous calls.
   7 *
   8 *  The interface works like this:
   9 *
  10 *  -	When a process places a call, it allocates a request slot if
  11 *	one is available. Otherwise, it sleeps on the backlog queue
  12 *	(xprt_reserve).
  13 *  -	Next, the caller puts together the RPC message, stuffs it into
  14 *	the request struct, and calls xprt_transmit().
  15 *  -	xprt_transmit sends the message and installs the caller on the
  16 *	transport's wait list. At the same time, if a reply is expected,
  17 *	it installs a timer that is run after the packet's timeout has
  18 *	expired.
  19 *  -	When a packet arrives, the data_ready handler walks the list of
  20 *	pending requests for that transport. If a matching XID is found, the
  21 *	caller is woken up, and the timer removed.
  22 *  -	When no reply arrives within the timeout interval, the timer is
  23 *	fired by the kernel and runs xprt_timer(). It either adjusts the
  24 *	timeout values (minor timeout) or wakes up the caller with a status
  25 *	of -ETIMEDOUT.
  26 *  -	When the caller receives a notification from RPC that a reply arrived,
  27 *	it should release the RPC slot, and process the reply.
  28 *	If the call timed out, it may choose to retry the operation by
  29 *	adjusting the initial timeout value, and simply calling rpc_call
  30 *	again.
  31 *
  32 *  Support for async RPC is done through a set of RPC-specific scheduling
  33 *  primitives that `transparently' work for processes as well as async
  34 *  tasks that rely on callbacks.
  35 *
  36 *  Copyright (C) 1995-1997, Olaf Kirch <okir@monad.swb.de>
  37 *
  38 *  Transport switch API copyright (C) 2005, Chuck Lever <cel@netapp.com>
  39 */
  40
  41#include <linux/module.h>
  42
  43#include <linux/types.h>
  44#include <linux/interrupt.h>
  45#include <linux/workqueue.h>
  46#include <linux/net.h>
  47#include <linux/ktime.h>
  48
  49#include <linux/sunrpc/clnt.h>
  50#include <linux/sunrpc/metrics.h>
  51#include <linux/sunrpc/bc_xprt.h>
  52#include <linux/rcupdate.h>
  53#include <linux/sched/mm.h>
  54
  55#include <trace/events/sunrpc.h>
  56
  57#include "sunrpc.h"
  58
  59/*
  60 * Local variables
  61 */
  62
  63#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
  64# define RPCDBG_FACILITY	RPCDBG_XPRT
  65#endif
  66
  67/*
  68 * Local functions
  69 */
  70static void	 xprt_init(struct rpc_xprt *xprt, struct net *net);
  71static __be32	xprt_alloc_xid(struct rpc_xprt *xprt);
 
 
 
  72static void	 xprt_destroy(struct rpc_xprt *xprt);
  73
  74static DEFINE_SPINLOCK(xprt_list_lock);
  75static LIST_HEAD(xprt_list);
  76
  77static unsigned long xprt_request_timeout(const struct rpc_rqst *req)
  78{
  79	unsigned long timeout = jiffies + req->rq_timeout;
  80
  81	if (time_before(timeout, req->rq_majortimeo))
  82		return timeout;
  83	return req->rq_majortimeo;
  84}
  85
  86/**
  87 * xprt_register_transport - register a transport implementation
  88 * @transport: transport to register
  89 *
  90 * If a transport implementation is loaded as a kernel module, it can
  91 * call this interface to make itself known to the RPC client.
  92 *
  93 * Returns:
  94 * 0:		transport successfully registered
  95 * -EEXIST:	transport already registered
  96 * -EINVAL:	transport module being unloaded
  97 */
  98int xprt_register_transport(struct xprt_class *transport)
  99{
 100	struct xprt_class *t;
 101	int result;
 102
 103	result = -EEXIST;
 104	spin_lock(&xprt_list_lock);
 105	list_for_each_entry(t, &xprt_list, list) {
 106		/* don't register the same transport class twice */
 107		if (t->ident == transport->ident)
 108			goto out;
 109	}
 110
 111	list_add_tail(&transport->list, &xprt_list);
 112	printk(KERN_INFO "RPC: Registered %s transport module.\n",
 113	       transport->name);
 114	result = 0;
 115
 116out:
 117	spin_unlock(&xprt_list_lock);
 118	return result;
 119}
 120EXPORT_SYMBOL_GPL(xprt_register_transport);
 121
 122/**
 123 * xprt_unregister_transport - unregister a transport implementation
 124 * @transport: transport to unregister
 125 *
 126 * Returns:
 127 * 0:		transport successfully unregistered
 128 * -ENOENT:	transport never registered
 129 */
 130int xprt_unregister_transport(struct xprt_class *transport)
 131{
 132	struct xprt_class *t;
 133	int result;
 134
 135	result = 0;
 136	spin_lock(&xprt_list_lock);
 137	list_for_each_entry(t, &xprt_list, list) {
 138		if (t == transport) {
 139			printk(KERN_INFO
 140				"RPC: Unregistered %s transport module.\n",
 141				transport->name);
 142			list_del_init(&transport->list);
 143			goto out;
 144		}
 145	}
 146	result = -ENOENT;
 147
 148out:
 149	spin_unlock(&xprt_list_lock);
 150	return result;
 151}
 152EXPORT_SYMBOL_GPL(xprt_unregister_transport);
 153
 154/**
 155 * xprt_load_transport - load a transport implementation
 156 * @transport_name: transport to load
 157 *
 158 * Returns:
 159 * 0:		transport successfully loaded
 160 * -ENOENT:	transport module not available
 161 */
 162int xprt_load_transport(const char *transport_name)
 163{
 164	struct xprt_class *t;
 165	int result;
 166
 167	result = 0;
 168	spin_lock(&xprt_list_lock);
 169	list_for_each_entry(t, &xprt_list, list) {
 170		if (strcmp(t->name, transport_name) == 0) {
 171			spin_unlock(&xprt_list_lock);
 172			goto out;
 173		}
 174	}
 175	spin_unlock(&xprt_list_lock);
 176	result = request_module("xprt%s", transport_name);
 177out:
 178	return result;
 179}
 180EXPORT_SYMBOL_GPL(xprt_load_transport);
 181
 182static void xprt_clear_locked(struct rpc_xprt *xprt)
 183{
 184	xprt->snd_task = NULL;
 185	if (!test_bit(XPRT_CLOSE_WAIT, &xprt->state)) {
 186		smp_mb__before_atomic();
 187		clear_bit(XPRT_LOCKED, &xprt->state);
 188		smp_mb__after_atomic();
 189	} else
 190		queue_work(xprtiod_workqueue, &xprt->task_cleanup);
 191}
 192
 193/**
 194 * xprt_reserve_xprt - serialize write access to transports
 195 * @task: task that is requesting access to the transport
 196 * @xprt: pointer to the target transport
 197 *
 198 * This prevents mixing the payload of separate requests, and prevents
 199 * transport connects from colliding with writes.  No congestion control
 200 * is provided.
 201 */
 202int xprt_reserve_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
 203{
 204	struct rpc_rqst *req = task->tk_rqstp;
 
 205
 206	if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) {
 207		if (task == xprt->snd_task)
 208			return 1;
 209		goto out_sleep;
 210	}
 211	if (test_bit(XPRT_WRITE_SPACE, &xprt->state))
 212		goto out_unlock;
 213	xprt->snd_task = task;
 
 
 214
 215	return 1;
 216
 217out_unlock:
 218	xprt_clear_locked(xprt);
 219out_sleep:
 220	dprintk("RPC: %5u failed to lock transport %p\n",
 221			task->tk_pid, xprt);
 
 222	task->tk_status = -EAGAIN;
 223	if  (RPC_IS_SOFT(task))
 224		rpc_sleep_on_timeout(&xprt->sending, task, NULL,
 225				xprt_request_timeout(req));
 
 226	else
 227		rpc_sleep_on(&xprt->sending, task, NULL);
 
 228	return 0;
 229}
 230EXPORT_SYMBOL_GPL(xprt_reserve_xprt);
 231
 232static bool
 233xprt_need_congestion_window_wait(struct rpc_xprt *xprt)
 234{
 235	return test_bit(XPRT_CWND_WAIT, &xprt->state);
 236}
 237
 238static void
 239xprt_set_congestion_window_wait(struct rpc_xprt *xprt)
 240{
 241	if (!list_empty(&xprt->xmit_queue)) {
 242		/* Peek at head of queue to see if it can make progress */
 243		if (list_first_entry(&xprt->xmit_queue, struct rpc_rqst,
 244					rq_xmit)->rq_cong)
 245			return;
 246	}
 247	set_bit(XPRT_CWND_WAIT, &xprt->state);
 248}
 249
 250static void
 251xprt_test_and_clear_congestion_window_wait(struct rpc_xprt *xprt)
 252{
 253	if (!RPCXPRT_CONGESTED(xprt))
 254		clear_bit(XPRT_CWND_WAIT, &xprt->state);
 
 
 
 
 
 255}
 256
 257/*
 258 * xprt_reserve_xprt_cong - serialize write access to transports
 259 * @task: task that is requesting access to the transport
 260 *
 261 * Same as xprt_reserve_xprt, but Van Jacobson congestion control is
 262 * integrated into the decision of whether a request is allowed to be
 263 * woken up and given access to the transport.
 264 * Note that the lock is only granted if we know there are free slots.
 265 */
 266int xprt_reserve_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task)
 267{
 268	struct rpc_rqst *req = task->tk_rqstp;
 
 269
 270	if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) {
 271		if (task == xprt->snd_task)
 272			return 1;
 273		goto out_sleep;
 274	}
 275	if (req == NULL) {
 276		xprt->snd_task = task;
 277		return 1;
 278	}
 279	if (test_bit(XPRT_WRITE_SPACE, &xprt->state))
 280		goto out_unlock;
 281	if (!xprt_need_congestion_window_wait(xprt)) {
 282		xprt->snd_task = task;
 
 283		return 1;
 284	}
 285out_unlock:
 286	xprt_clear_locked(xprt);
 287out_sleep:
 
 
 288	dprintk("RPC: %5u failed to lock transport %p\n", task->tk_pid, xprt);
 
 289	task->tk_status = -EAGAIN;
 290	if (RPC_IS_SOFT(task))
 291		rpc_sleep_on_timeout(&xprt->sending, task, NULL,
 292				xprt_request_timeout(req));
 
 293	else
 294		rpc_sleep_on(&xprt->sending, task, NULL);
 
 295	return 0;
 296}
 297EXPORT_SYMBOL_GPL(xprt_reserve_xprt_cong);
 298
 299static inline int xprt_lock_write(struct rpc_xprt *xprt, struct rpc_task *task)
 300{
 301	int retval;
 302
 303	if (test_bit(XPRT_LOCKED, &xprt->state) && xprt->snd_task == task)
 304		return 1;
 305	spin_lock(&xprt->transport_lock);
 306	retval = xprt->ops->reserve_xprt(xprt, task);
 307	spin_unlock(&xprt->transport_lock);
 308	return retval;
 309}
 310
 311static bool __xprt_lock_write_func(struct rpc_task *task, void *data)
 312{
 313	struct rpc_xprt *xprt = data;
 
 314
 
 315	xprt->snd_task = task;
 
 
 316	return true;
 317}
 318
 319static void __xprt_lock_write_next(struct rpc_xprt *xprt)
 320{
 321	if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
 322		return;
 323	if (test_bit(XPRT_WRITE_SPACE, &xprt->state))
 324		goto out_unlock;
 325	if (rpc_wake_up_first_on_wq(xprtiod_workqueue, &xprt->sending,
 326				__xprt_lock_write_func, xprt))
 327		return;
 328out_unlock:
 329	xprt_clear_locked(xprt);
 330}
 331
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 332static void __xprt_lock_write_next_cong(struct rpc_xprt *xprt)
 333{
 334	if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
 335		return;
 336	if (test_bit(XPRT_WRITE_SPACE, &xprt->state))
 337		goto out_unlock;
 338	if (xprt_need_congestion_window_wait(xprt))
 339		goto out_unlock;
 340	if (rpc_wake_up_first_on_wq(xprtiod_workqueue, &xprt->sending,
 341				__xprt_lock_write_func, xprt))
 342		return;
 343out_unlock:
 344	xprt_clear_locked(xprt);
 345}
 346
 
 
 
 
 
 
 
 
 
 347/**
 348 * xprt_release_xprt - allow other requests to use a transport
 349 * @xprt: transport with other tasks potentially waiting
 350 * @task: task that is releasing access to the transport
 351 *
 352 * Note that "task" can be NULL.  No congestion control is provided.
 353 */
 354void xprt_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
 355{
 356	if (xprt->snd_task == task) {
 
 357		xprt_clear_locked(xprt);
 358		__xprt_lock_write_next(xprt);
 359	}
 360}
 361EXPORT_SYMBOL_GPL(xprt_release_xprt);
 362
 363/**
 364 * xprt_release_xprt_cong - allow other requests to use a transport
 365 * @xprt: transport with other tasks potentially waiting
 366 * @task: task that is releasing access to the transport
 367 *
 368 * Note that "task" can be NULL.  Another task is awoken to use the
 369 * transport if the transport's congestion window allows it.
 370 */
 371void xprt_release_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task)
 372{
 373	if (xprt->snd_task == task) {
 
 374		xprt_clear_locked(xprt);
 375		__xprt_lock_write_next_cong(xprt);
 376	}
 377}
 378EXPORT_SYMBOL_GPL(xprt_release_xprt_cong);
 379
 380static inline void xprt_release_write(struct rpc_xprt *xprt, struct rpc_task *task)
 381{
 382	if (xprt->snd_task != task)
 383		return;
 384	spin_lock(&xprt->transport_lock);
 385	xprt->ops->release_xprt(xprt, task);
 386	spin_unlock(&xprt->transport_lock);
 387}
 388
 389/*
 390 * Van Jacobson congestion avoidance. Check if the congestion window
 391 * overflowed. Put the task to sleep if this is the case.
 392 */
 393static int
 394__xprt_get_cong(struct rpc_xprt *xprt, struct rpc_rqst *req)
 395{
 
 
 396	if (req->rq_cong)
 397		return 1;
 398	dprintk("RPC: %5u xprt_cwnd_limited cong = %lu cwnd = %lu\n",
 399			req->rq_task->tk_pid, xprt->cong, xprt->cwnd);
 400	if (RPCXPRT_CONGESTED(xprt)) {
 401		xprt_set_congestion_window_wait(xprt);
 402		return 0;
 403	}
 404	req->rq_cong = 1;
 405	xprt->cong += RPC_CWNDSCALE;
 406	return 1;
 407}
 408
 409/*
 410 * Adjust the congestion window, and wake up the next task
 411 * that has been sleeping due to congestion
 412 */
 413static void
 414__xprt_put_cong(struct rpc_xprt *xprt, struct rpc_rqst *req)
 415{
 416	if (!req->rq_cong)
 417		return;
 418	req->rq_cong = 0;
 419	xprt->cong -= RPC_CWNDSCALE;
 420	xprt_test_and_clear_congestion_window_wait(xprt);
 421	__xprt_lock_write_next_cong(xprt);
 422}
 423
 424/**
 425 * xprt_request_get_cong - Request congestion control credits
 426 * @xprt: pointer to transport
 427 * @req: pointer to RPC request
 428 *
 429 * Useful for transports that require congestion control.
 430 */
 431bool
 432xprt_request_get_cong(struct rpc_xprt *xprt, struct rpc_rqst *req)
 433{
 434	bool ret = false;
 435
 436	if (req->rq_cong)
 437		return true;
 438	spin_lock(&xprt->transport_lock);
 439	ret = __xprt_get_cong(xprt, req) != 0;
 440	spin_unlock(&xprt->transport_lock);
 441	return ret;
 442}
 443EXPORT_SYMBOL_GPL(xprt_request_get_cong);
 444
 445/**
 446 * xprt_release_rqst_cong - housekeeping when request is complete
 447 * @task: RPC request that recently completed
 448 *
 449 * Useful for transports that require congestion control.
 450 */
 451void xprt_release_rqst_cong(struct rpc_task *task)
 452{
 453	struct rpc_rqst *req = task->tk_rqstp;
 454
 455	__xprt_put_cong(req->rq_xprt, req);
 456}
 457EXPORT_SYMBOL_GPL(xprt_release_rqst_cong);
 458
 459static void xprt_clear_congestion_window_wait_locked(struct rpc_xprt *xprt)
 460{
 461	if (test_and_clear_bit(XPRT_CWND_WAIT, &xprt->state))
 462		__xprt_lock_write_next_cong(xprt);
 463}
 464
 465/*
 466 * Clear the congestion window wait flag and wake up the next
 467 * entry on xprt->sending
 468 */
 469static void
 470xprt_clear_congestion_window_wait(struct rpc_xprt *xprt)
 471{
 472	if (test_and_clear_bit(XPRT_CWND_WAIT, &xprt->state)) {
 473		spin_lock(&xprt->transport_lock);
 474		__xprt_lock_write_next_cong(xprt);
 475		spin_unlock(&xprt->transport_lock);
 476	}
 477}
 478
 479/**
 480 * xprt_adjust_cwnd - adjust transport congestion window
 481 * @xprt: pointer to xprt
 482 * @task: recently completed RPC request used to adjust window
 483 * @result: result code of completed RPC request
 484 *
 485 * The transport code maintains an estimate on the maximum number of out-
 486 * standing RPC requests, using a smoothed version of the congestion
 487 * avoidance implemented in 44BSD. This is basically the Van Jacobson
 488 * congestion algorithm: If a retransmit occurs, the congestion window is
 489 * halved; otherwise, it is incremented by 1/cwnd when
 490 *
 491 *	-	a reply is received and
 492 *	-	a full number of requests are outstanding and
 493 *	-	the congestion window hasn't been updated recently.
 494 */
 495void xprt_adjust_cwnd(struct rpc_xprt *xprt, struct rpc_task *task, int result)
 496{
 497	struct rpc_rqst *req = task->tk_rqstp;
 498	unsigned long cwnd = xprt->cwnd;
 499
 500	if (result >= 0 && cwnd <= xprt->cong) {
 501		/* The (cwnd >> 1) term makes sure
 502		 * the result gets rounded properly. */
 503		cwnd += (RPC_CWNDSCALE * RPC_CWNDSCALE + (cwnd >> 1)) / cwnd;
 504		if (cwnd > RPC_MAXCWND(xprt))
 505			cwnd = RPC_MAXCWND(xprt);
 506		__xprt_lock_write_next_cong(xprt);
 507	} else if (result == -ETIMEDOUT) {
 508		cwnd >>= 1;
 509		if (cwnd < RPC_CWNDSCALE)
 510			cwnd = RPC_CWNDSCALE;
 511	}
 512	dprintk("RPC:       cong %ld, cwnd was %ld, now %ld\n",
 513			xprt->cong, xprt->cwnd, cwnd);
 514	xprt->cwnd = cwnd;
 515	__xprt_put_cong(xprt, req);
 516}
 517EXPORT_SYMBOL_GPL(xprt_adjust_cwnd);
 518
 519/**
 520 * xprt_wake_pending_tasks - wake all tasks on a transport's pending queue
 521 * @xprt: transport with waiting tasks
 522 * @status: result code to plant in each task before waking it
 523 *
 524 */
 525void xprt_wake_pending_tasks(struct rpc_xprt *xprt, int status)
 526{
 527	if (status < 0)
 528		rpc_wake_up_status(&xprt->pending, status);
 529	else
 530		rpc_wake_up(&xprt->pending);
 531}
 532EXPORT_SYMBOL_GPL(xprt_wake_pending_tasks);
 533
 534/**
 535 * xprt_wait_for_buffer_space - wait for transport output buffer to clear
 536 * @xprt: transport
 
 537 *
 538 * Note that we only set the timer for the case of RPC_IS_SOFT(), since
 539 * we don't in general want to force a socket disconnection due to
 540 * an incomplete RPC call transmission.
 541 */
 542void xprt_wait_for_buffer_space(struct rpc_xprt *xprt)
 543{
 544	set_bit(XPRT_WRITE_SPACE, &xprt->state);
 545}
 546EXPORT_SYMBOL_GPL(xprt_wait_for_buffer_space);
 547
 548static bool
 549xprt_clear_write_space_locked(struct rpc_xprt *xprt)
 550{
 551	if (test_and_clear_bit(XPRT_WRITE_SPACE, &xprt->state)) {
 552		__xprt_lock_write_next(xprt);
 553		dprintk("RPC:       write space: waking waiting task on "
 554				"xprt %p\n", xprt);
 555		return true;
 556	}
 557	return false;
 558}
 
 559
 560/**
 561 * xprt_write_space - wake the task waiting for transport output buffer space
 562 * @xprt: transport with waiting tasks
 563 *
 564 * Can be called in a soft IRQ context, so xprt_write_space never sleeps.
 565 */
 566bool xprt_write_space(struct rpc_xprt *xprt)
 567{
 568	bool ret;
 569
 570	if (!test_bit(XPRT_WRITE_SPACE, &xprt->state))
 571		return false;
 572	spin_lock(&xprt->transport_lock);
 573	ret = xprt_clear_write_space_locked(xprt);
 574	spin_unlock(&xprt->transport_lock);
 575	return ret;
 576}
 577EXPORT_SYMBOL_GPL(xprt_write_space);
 578
 579static unsigned long xprt_abs_ktime_to_jiffies(ktime_t abstime)
 
 
 
 
 
 
 
 
 580{
 581	s64 delta = ktime_to_ns(ktime_get() - abstime);
 582	return likely(delta >= 0) ?
 583		jiffies - nsecs_to_jiffies(delta) :
 584		jiffies + nsecs_to_jiffies(-delta);
 585}
 
 586
 587static unsigned long xprt_calc_majortimeo(struct rpc_rqst *req)
 
 
 
 
 
 
 588{
 589	const struct rpc_timeout *to = req->rq_task->tk_client->cl_timeout;
 590	unsigned long majortimeo = req->rq_timeout;
 
 
 
 591
 592	if (to->to_exponential)
 593		majortimeo <<= to->to_retries;
 594	else
 595		majortimeo += to->to_increment * to->to_retries;
 596	if (majortimeo > to->to_maxval || majortimeo == 0)
 597		majortimeo = to->to_maxval;
 598	return majortimeo;
 599}
 
 600
 601static void xprt_reset_majortimeo(struct rpc_rqst *req)
 602{
 603	req->rq_majortimeo += xprt_calc_majortimeo(req);
 604}
 605
 606static void xprt_init_majortimeo(struct rpc_task *task, struct rpc_rqst *req)
 607{
 608	unsigned long time_init;
 609	struct rpc_xprt *xprt = req->rq_xprt;
 610
 611	if (likely(xprt && xprt_connected(xprt)))
 612		time_init = jiffies;
 
 613	else
 614		time_init = xprt_abs_ktime_to_jiffies(task->tk_start);
 615	req->rq_timeout = task->tk_client->cl_timeout->to_initval;
 616	req->rq_majortimeo = time_init + xprt_calc_majortimeo(req);
 
 617}
 618
 619/**
 620 * xprt_adjust_timeout - adjust timeout values for next retransmit
 621 * @req: RPC request containing parameters to use for the adjustment
 622 *
 623 */
 624int xprt_adjust_timeout(struct rpc_rqst *req)
 625{
 626	struct rpc_xprt *xprt = req->rq_xprt;
 627	const struct rpc_timeout *to = req->rq_task->tk_client->cl_timeout;
 628	int status = 0;
 629
 630	if (time_before(jiffies, req->rq_majortimeo)) {
 631		if (to->to_exponential)
 632			req->rq_timeout <<= 1;
 633		else
 634			req->rq_timeout += to->to_increment;
 635		if (to->to_maxval && req->rq_timeout >= to->to_maxval)
 636			req->rq_timeout = to->to_maxval;
 637		req->rq_retries++;
 638	} else {
 639		req->rq_timeout = to->to_initval;
 640		req->rq_retries = 0;
 641		xprt_reset_majortimeo(req);
 642		/* Reset the RTT counters == "slow start" */
 643		spin_lock(&xprt->transport_lock);
 644		rpc_init_rtt(req->rq_task->tk_client->cl_rtt, to->to_initval);
 645		spin_unlock(&xprt->transport_lock);
 646		status = -ETIMEDOUT;
 647	}
 648
 649	if (req->rq_timeout == 0) {
 650		printk(KERN_WARNING "xprt_adjust_timeout: rq_timeout = 0!\n");
 651		req->rq_timeout = 5 * HZ;
 652	}
 653	return status;
 654}
 655
 656static void xprt_autoclose(struct work_struct *work)
 657{
 658	struct rpc_xprt *xprt =
 659		container_of(work, struct rpc_xprt, task_cleanup);
 660	unsigned int pflags = memalloc_nofs_save();
 661
 662	clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
 663	xprt->ops->close(xprt);
 664	xprt_release_write(xprt, NULL);
 665	wake_up_bit(&xprt->state, XPRT_LOCKED);
 666	memalloc_nofs_restore(pflags);
 667}
 668
 669/**
 670 * xprt_disconnect_done - mark a transport as disconnected
 671 * @xprt: transport to flag for disconnect
 672 *
 673 */
 674void xprt_disconnect_done(struct rpc_xprt *xprt)
 675{
 676	dprintk("RPC:       disconnected transport %p\n", xprt);
 677	spin_lock(&xprt->transport_lock);
 678	xprt_clear_connected(xprt);
 679	xprt_clear_write_space_locked(xprt);
 680	xprt_clear_congestion_window_wait_locked(xprt);
 681	xprt_wake_pending_tasks(xprt, -ENOTCONN);
 682	spin_unlock(&xprt->transport_lock);
 683}
 684EXPORT_SYMBOL_GPL(xprt_disconnect_done);
 685
 686/**
 687 * xprt_force_disconnect - force a transport to disconnect
 688 * @xprt: transport to disconnect
 689 *
 690 */
 691void xprt_force_disconnect(struct rpc_xprt *xprt)
 692{
 693	/* Don't race with the test_bit() in xprt_clear_locked() */
 694	spin_lock(&xprt->transport_lock);
 695	set_bit(XPRT_CLOSE_WAIT, &xprt->state);
 696	/* Try to schedule an autoclose RPC call */
 697	if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0)
 698		queue_work(xprtiod_workqueue, &xprt->task_cleanup);
 699	else if (xprt->snd_task)
 700		rpc_wake_up_queued_task_set_status(&xprt->pending,
 701				xprt->snd_task, -ENOTCONN);
 702	spin_unlock(&xprt->transport_lock);
 703}
 704EXPORT_SYMBOL_GPL(xprt_force_disconnect);
 705
 706static unsigned int
 707xprt_connect_cookie(struct rpc_xprt *xprt)
 708{
 709	return READ_ONCE(xprt->connect_cookie);
 710}
 711
 712static bool
 713xprt_request_retransmit_after_disconnect(struct rpc_task *task)
 714{
 715	struct rpc_rqst *req = task->tk_rqstp;
 716	struct rpc_xprt *xprt = req->rq_xprt;
 717
 718	return req->rq_connect_cookie != xprt_connect_cookie(xprt) ||
 719		!xprt_connected(xprt);
 720}
 721
 722/**
 723 * xprt_conditional_disconnect - force a transport to disconnect
 724 * @xprt: transport to disconnect
 725 * @cookie: 'connection cookie'
 726 *
 727 * This attempts to break the connection if and only if 'cookie' matches
 728 * the current transport 'connection cookie'. It ensures that we don't
 729 * try to break the connection more than once when we need to retransmit
 730 * a batch of RPC requests.
 731 *
 732 */
 733void xprt_conditional_disconnect(struct rpc_xprt *xprt, unsigned int cookie)
 734{
 735	/* Don't race with the test_bit() in xprt_clear_locked() */
 736	spin_lock(&xprt->transport_lock);
 737	if (cookie != xprt->connect_cookie)
 738		goto out;
 739	if (test_bit(XPRT_CLOSING, &xprt->state))
 740		goto out;
 741	set_bit(XPRT_CLOSE_WAIT, &xprt->state);
 742	/* Try to schedule an autoclose RPC call */
 743	if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0)
 744		queue_work(xprtiod_workqueue, &xprt->task_cleanup);
 745	xprt_wake_pending_tasks(xprt, -EAGAIN);
 746out:
 747	spin_unlock(&xprt->transport_lock);
 748}
 749
 750static bool
 751xprt_has_timer(const struct rpc_xprt *xprt)
 752{
 753	return xprt->idle_timeout != 0;
 754}
 755
 756static void
 757xprt_schedule_autodisconnect(struct rpc_xprt *xprt)
 758	__must_hold(&xprt->transport_lock)
 759{
 760	xprt->last_used = jiffies;
 761	if (RB_EMPTY_ROOT(&xprt->recv_queue) && xprt_has_timer(xprt))
 762		mod_timer(&xprt->timer, xprt->last_used + xprt->idle_timeout);
 763}
 764
 765static void
 766xprt_init_autodisconnect(struct timer_list *t)
 767{
 768	struct rpc_xprt *xprt = from_timer(xprt, t, timer);
 769
 770	if (!RB_EMPTY_ROOT(&xprt->recv_queue))
 771		return;
 772	/* Reset xprt->last_used to avoid connect/autodisconnect cycling */
 773	xprt->last_used = jiffies;
 774	if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
 775		return;
 776	queue_work(xprtiod_workqueue, &xprt->task_cleanup);
 
 
 
 
 777}
 778
 779bool xprt_lock_connect(struct rpc_xprt *xprt,
 780		struct rpc_task *task,
 781		void *cookie)
 782{
 783	bool ret = false;
 784
 785	spin_lock(&xprt->transport_lock);
 786	if (!test_bit(XPRT_LOCKED, &xprt->state))
 787		goto out;
 788	if (xprt->snd_task != task)
 789		goto out;
 
 790	xprt->snd_task = cookie;
 791	ret = true;
 792out:
 793	spin_unlock(&xprt->transport_lock);
 794	return ret;
 795}
 796
 797void xprt_unlock_connect(struct rpc_xprt *xprt, void *cookie)
 798{
 799	spin_lock(&xprt->transport_lock);
 800	if (xprt->snd_task != cookie)
 801		goto out;
 802	if (!test_bit(XPRT_LOCKED, &xprt->state))
 803		goto out;
 804	xprt->snd_task =NULL;
 805	xprt->ops->release_xprt(xprt, NULL);
 806	xprt_schedule_autodisconnect(xprt);
 807out:
 808	spin_unlock(&xprt->transport_lock);
 809	wake_up_bit(&xprt->state, XPRT_LOCKED);
 810}
 811
 812/**
 813 * xprt_connect - schedule a transport connect operation
 814 * @task: RPC task that is requesting the connect
 815 *
 816 */
 817void xprt_connect(struct rpc_task *task)
 818{
 819	struct rpc_xprt	*xprt = task->tk_rqstp->rq_xprt;
 820
 821	dprintk("RPC: %5u xprt_connect xprt %p %s connected\n", task->tk_pid,
 822			xprt, (xprt_connected(xprt) ? "is" : "is not"));
 823
 824	if (!xprt_bound(xprt)) {
 825		task->tk_status = -EAGAIN;
 826		return;
 827	}
 828	if (!xprt_lock_write(xprt, task))
 829		return;
 830
 831	if (test_and_clear_bit(XPRT_CLOSE_WAIT, &xprt->state))
 832		xprt->ops->close(xprt);
 833
 834	if (!xprt_connected(xprt)) {
 835		task->tk_rqstp->rq_connect_cookie = xprt->connect_cookie;
 836		rpc_sleep_on_timeout(&xprt->pending, task, NULL,
 837				xprt_request_timeout(task->tk_rqstp));
 838
 839		if (test_bit(XPRT_CLOSING, &xprt->state))
 840			return;
 841		if (xprt_test_and_set_connecting(xprt))
 842			return;
 843		/* Race breaker */
 844		if (!xprt_connected(xprt)) {
 845			xprt->stat.connect_start = jiffies;
 846			xprt->ops->connect(xprt, task);
 847		} else {
 848			xprt_clear_connecting(xprt);
 849			task->tk_status = 0;
 850			rpc_wake_up_queued_task(&xprt->pending, task);
 851		}
 852	}
 853	xprt_release_write(xprt, task);
 854}
 855
 856/**
 857 * xprt_reconnect_delay - compute the wait before scheduling a connect
 858 * @xprt: transport instance
 859 *
 860 */
 861unsigned long xprt_reconnect_delay(const struct rpc_xprt *xprt)
 862{
 863	unsigned long start, now = jiffies;
 864
 865	start = xprt->stat.connect_start + xprt->reestablish_timeout;
 866	if (time_after(start, now))
 867		return start - now;
 868	return 0;
 869}
 870EXPORT_SYMBOL_GPL(xprt_reconnect_delay);
 871
 872/**
 873 * xprt_reconnect_backoff - compute the new re-establish timeout
 874 * @xprt: transport instance
 875 * @init_to: initial reestablish timeout
 876 *
 877 */
 878void xprt_reconnect_backoff(struct rpc_xprt *xprt, unsigned long init_to)
 879{
 880	xprt->reestablish_timeout <<= 1;
 881	if (xprt->reestablish_timeout > xprt->max_reconnect_timeout)
 882		xprt->reestablish_timeout = xprt->max_reconnect_timeout;
 883	if (xprt->reestablish_timeout < init_to)
 884		xprt->reestablish_timeout = init_to;
 885}
 886EXPORT_SYMBOL_GPL(xprt_reconnect_backoff);
 887
 888enum xprt_xid_rb_cmp {
 889	XID_RB_EQUAL,
 890	XID_RB_LEFT,
 891	XID_RB_RIGHT,
 892};
 893static enum xprt_xid_rb_cmp
 894xprt_xid_cmp(__be32 xid1, __be32 xid2)
 895{
 896	if (xid1 == xid2)
 897		return XID_RB_EQUAL;
 898	if ((__force u32)xid1 < (__force u32)xid2)
 899		return XID_RB_LEFT;
 900	return XID_RB_RIGHT;
 901}
 902
 903static struct rpc_rqst *
 904xprt_request_rb_find(struct rpc_xprt *xprt, __be32 xid)
 905{
 906	struct rb_node *n = xprt->recv_queue.rb_node;
 907	struct rpc_rqst *req;
 908
 909	while (n != NULL) {
 910		req = rb_entry(n, struct rpc_rqst, rq_recv);
 911		switch (xprt_xid_cmp(xid, req->rq_xid)) {
 912		case XID_RB_LEFT:
 913			n = n->rb_left;
 914			break;
 915		case XID_RB_RIGHT:
 916			n = n->rb_right;
 917			break;
 918		case XID_RB_EQUAL:
 919			return req;
 920		}
 921	}
 922	return NULL;
 923}
 924
 925static void
 926xprt_request_rb_insert(struct rpc_xprt *xprt, struct rpc_rqst *new)
 927{
 928	struct rb_node **p = &xprt->recv_queue.rb_node;
 929	struct rb_node *n = NULL;
 930	struct rpc_rqst *req;
 931
 932	while (*p != NULL) {
 933		n = *p;
 934		req = rb_entry(n, struct rpc_rqst, rq_recv);
 935		switch(xprt_xid_cmp(new->rq_xid, req->rq_xid)) {
 936		case XID_RB_LEFT:
 937			p = &n->rb_left;
 938			break;
 939		case XID_RB_RIGHT:
 940			p = &n->rb_right;
 941			break;
 942		case XID_RB_EQUAL:
 943			WARN_ON_ONCE(new != req);
 944			return;
 945		}
 946	}
 947	rb_link_node(&new->rq_recv, n, p);
 948	rb_insert_color(&new->rq_recv, &xprt->recv_queue);
 949}
 950
 951static void
 952xprt_request_rb_remove(struct rpc_xprt *xprt, struct rpc_rqst *req)
 953{
 954	rb_erase(&req->rq_recv, &xprt->recv_queue);
 955}
 956
 957/**
 958 * xprt_lookup_rqst - find an RPC request corresponding to an XID
 959 * @xprt: transport on which the original request was transmitted
 960 * @xid: RPC XID of incoming reply
 961 *
 962 * Caller holds xprt->queue_lock.
 963 */
 964struct rpc_rqst *xprt_lookup_rqst(struct rpc_xprt *xprt, __be32 xid)
 965{
 966	struct rpc_rqst *entry;
 967
 968	entry = xprt_request_rb_find(xprt, xid);
 969	if (entry != NULL) {
 970		trace_xprt_lookup_rqst(xprt, xid, 0);
 971		entry->rq_rtt = ktime_sub(ktime_get(), entry->rq_xtime);
 972		return entry;
 973	}
 974
 975	dprintk("RPC:       xprt_lookup_rqst did not find xid %08x\n",
 976			ntohl(xid));
 977	trace_xprt_lookup_rqst(xprt, xid, -ENOENT);
 978	xprt->stat.bad_xids++;
 979	return NULL;
 980}
 981EXPORT_SYMBOL_GPL(xprt_lookup_rqst);
 982
 983static bool
 984xprt_is_pinned_rqst(struct rpc_rqst *req)
 985{
 986	return atomic_read(&req->rq_pin) != 0;
 987}
 988
 989/**
 990 * xprt_pin_rqst - Pin a request on the transport receive list
 991 * @req: Request to pin
 992 *
 993 * Caller must ensure this is atomic with the call to xprt_lookup_rqst()
 994 * so should be holding xprt->queue_lock.
 995 */
 996void xprt_pin_rqst(struct rpc_rqst *req)
 997{
 998	atomic_inc(&req->rq_pin);
 999}
1000EXPORT_SYMBOL_GPL(xprt_pin_rqst);
1001
1002/**
1003 * xprt_unpin_rqst - Unpin a request on the transport receive list
1004 * @req: Request to pin
1005 *
1006 * Caller should be holding xprt->queue_lock.
1007 */
1008void xprt_unpin_rqst(struct rpc_rqst *req)
1009{
1010	if (!test_bit(RPC_TASK_MSG_PIN_WAIT, &req->rq_task->tk_runstate)) {
1011		atomic_dec(&req->rq_pin);
1012		return;
1013	}
1014	if (atomic_dec_and_test(&req->rq_pin))
1015		wake_up_var(&req->rq_pin);
1016}
1017EXPORT_SYMBOL_GPL(xprt_unpin_rqst);
1018
1019static void xprt_wait_on_pinned_rqst(struct rpc_rqst *req)
1020{
1021	wait_var_event(&req->rq_pin, !xprt_is_pinned_rqst(req));
1022}
1023
1024static bool
1025xprt_request_data_received(struct rpc_task *task)
1026{
1027	return !test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate) &&
1028		READ_ONCE(task->tk_rqstp->rq_reply_bytes_recvd) != 0;
1029}
1030
1031static bool
1032xprt_request_need_enqueue_receive(struct rpc_task *task, struct rpc_rqst *req)
1033{
1034	return !test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate) &&
1035		READ_ONCE(task->tk_rqstp->rq_reply_bytes_recvd) == 0;
1036}
1037
1038/**
1039 * xprt_request_enqueue_receive - Add an request to the receive queue
1040 * @task: RPC task
1041 *
1042 */
1043void
1044xprt_request_enqueue_receive(struct rpc_task *task)
1045{
1046	struct rpc_rqst *req = task->tk_rqstp;
1047	struct rpc_xprt *xprt = req->rq_xprt;
1048
1049	if (!xprt_request_need_enqueue_receive(task, req))
1050		return;
1051
1052	xprt_request_prepare(task->tk_rqstp);
1053	spin_lock(&xprt->queue_lock);
1054
1055	/* Update the softirq receive buffer */
1056	memcpy(&req->rq_private_buf, &req->rq_rcv_buf,
1057			sizeof(req->rq_private_buf));
1058
1059	/* Add request to the receive list */
1060	xprt_request_rb_insert(xprt, req);
1061	set_bit(RPC_TASK_NEED_RECV, &task->tk_runstate);
1062	spin_unlock(&xprt->queue_lock);
1063
1064	/* Turn off autodisconnect */
1065	del_singleshot_timer_sync(&xprt->timer);
1066}
1067
1068/**
1069 * xprt_request_dequeue_receive_locked - Remove a request from the receive queue
1070 * @task: RPC task
1071 *
1072 * Caller must hold xprt->queue_lock.
1073 */
1074static void
1075xprt_request_dequeue_receive_locked(struct rpc_task *task)
1076{
1077	struct rpc_rqst *req = task->tk_rqstp;
1078
1079	if (test_and_clear_bit(RPC_TASK_NEED_RECV, &task->tk_runstate))
1080		xprt_request_rb_remove(req->rq_xprt, req);
1081}
1082
1083/**
1084 * xprt_update_rtt - Update RPC RTT statistics
1085 * @task: RPC request that recently completed
1086 *
1087 * Caller holds xprt->queue_lock.
1088 */
1089void xprt_update_rtt(struct rpc_task *task)
1090{
1091	struct rpc_rqst *req = task->tk_rqstp;
1092	struct rpc_rtt *rtt = task->tk_client->cl_rtt;
1093	unsigned int timer = task->tk_msg.rpc_proc->p_timer;
1094	long m = usecs_to_jiffies(ktime_to_us(req->rq_rtt));
1095
1096	if (timer) {
1097		if (req->rq_ntrans == 1)
1098			rpc_update_rtt(rtt, timer, m);
1099		rpc_set_timeo(rtt, timer, req->rq_ntrans - 1);
1100	}
1101}
1102EXPORT_SYMBOL_GPL(xprt_update_rtt);
1103
1104/**
1105 * xprt_complete_rqst - called when reply processing is complete
1106 * @task: RPC request that recently completed
1107 * @copied: actual number of bytes received from the transport
1108 *
1109 * Caller holds xprt->queue_lock.
1110 */
1111void xprt_complete_rqst(struct rpc_task *task, int copied)
1112{
1113	struct rpc_rqst *req = task->tk_rqstp;
1114	struct rpc_xprt *xprt = req->rq_xprt;
1115
1116	dprintk("RPC: %5u xid %08x complete (%d bytes received)\n",
1117			task->tk_pid, ntohl(req->rq_xid), copied);
1118	trace_xprt_complete_rqst(xprt, req->rq_xid, copied);
1119
1120	xprt->stat.recvs++;
 
 
 
1121
 
1122	req->rq_private_buf.len = copied;
1123	/* Ensure all writes are done before we update */
1124	/* req->rq_reply_bytes_recvd */
1125	smp_wmb();
1126	req->rq_reply_bytes_recvd = copied;
1127	xprt_request_dequeue_receive_locked(task);
1128	rpc_wake_up_queued_task(&xprt->pending, task);
1129}
1130EXPORT_SYMBOL_GPL(xprt_complete_rqst);
1131
1132static void xprt_timer(struct rpc_task *task)
1133{
1134	struct rpc_rqst *req = task->tk_rqstp;
1135	struct rpc_xprt *xprt = req->rq_xprt;
1136
1137	if (task->tk_status != -ETIMEDOUT)
1138		return;
 
1139
1140	trace_xprt_timer(xprt, req->rq_xid, task->tk_status);
1141	if (!req->rq_reply_bytes_recvd) {
1142		if (xprt->ops->timer)
1143			xprt->ops->timer(xprt, task);
1144	} else
1145		task->tk_status = 0;
 
1146}
1147
1148/**
1149 * xprt_wait_for_reply_request_def - wait for reply
1150 * @task: pointer to rpc_task
1151 *
1152 * Set a request's retransmit timeout based on the transport's
1153 * default timeout parameters.  Used by transports that don't adjust
1154 * the retransmit timeout based on round-trip time estimation,
1155 * and put the task to sleep on the pending queue.
1156 */
1157void xprt_wait_for_reply_request_def(struct rpc_task *task)
1158{
1159	struct rpc_rqst *req = task->tk_rqstp;
1160
1161	rpc_sleep_on_timeout(&req->rq_xprt->pending, task, xprt_timer,
1162			xprt_request_timeout(req));
1163}
1164EXPORT_SYMBOL_GPL(xprt_wait_for_reply_request_def);
1165
1166/**
1167 * xprt_wait_for_reply_request_rtt - wait for reply using RTT estimator
1168 * @task: pointer to rpc_task
1169 *
1170 * Set a request's retransmit timeout using the RTT estimator,
1171 * and put the task to sleep on the pending queue.
1172 */
1173void xprt_wait_for_reply_request_rtt(struct rpc_task *task)
1174{
1175	int timer = task->tk_msg.rpc_proc->p_timer;
1176	struct rpc_clnt *clnt = task->tk_client;
1177	struct rpc_rtt *rtt = clnt->cl_rtt;
1178	struct rpc_rqst *req = task->tk_rqstp;
1179	unsigned long max_timeout = clnt->cl_timeout->to_maxval;
1180	unsigned long timeout;
1181
1182	timeout = rpc_calc_rto(rtt, timer);
1183	timeout <<= rpc_ntimeo(rtt, timer) + req->rq_retries;
1184	if (timeout > max_timeout || timeout == 0)
1185		timeout = max_timeout;
1186	rpc_sleep_on_timeout(&req->rq_xprt->pending, task, xprt_timer,
1187			jiffies + timeout);
1188}
1189EXPORT_SYMBOL_GPL(xprt_wait_for_reply_request_rtt);
1190
1191/**
1192 * xprt_request_wait_receive - wait for the reply to an RPC request
1193 * @task: RPC task about to send a request
1194 *
1195 */
1196void xprt_request_wait_receive(struct rpc_task *task)
1197{
1198	struct rpc_rqst *req = task->tk_rqstp;
1199	struct rpc_xprt *xprt = req->rq_xprt;
1200
1201	if (!test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate))
1202		return;
1203	/*
1204	 * Sleep on the pending queue if we're expecting a reply.
1205	 * The spinlock ensures atomicity between the test of
1206	 * req->rq_reply_bytes_recvd, and the call to rpc_sleep_on().
1207	 */
1208	spin_lock(&xprt->queue_lock);
1209	if (test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate)) {
1210		xprt->ops->wait_for_reply_request(task);
1211		/*
1212		 * Send an extra queue wakeup call if the
1213		 * connection was dropped in case the call to
1214		 * rpc_sleep_on() raced.
1215		 */
1216		if (xprt_request_retransmit_after_disconnect(task))
1217			rpc_wake_up_queued_task_set_status(&xprt->pending,
1218					task, -ENOTCONN);
1219	}
1220	spin_unlock(&xprt->queue_lock);
1221}
1222
1223static bool
1224xprt_request_need_enqueue_transmit(struct rpc_task *task, struct rpc_rqst *req)
1225{
1226	return !test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate);
1227}
1228
1229/**
1230 * xprt_request_enqueue_transmit - queue a task for transmission
1231 * @task: pointer to rpc_task
1232 *
1233 * Add a task to the transmission queue.
1234 */
1235void
1236xprt_request_enqueue_transmit(struct rpc_task *task)
1237{
1238	struct rpc_rqst *pos, *req = task->tk_rqstp;
1239	struct rpc_xprt *xprt = req->rq_xprt;
1240
1241	if (xprt_request_need_enqueue_transmit(task, req)) {
1242		req->rq_bytes_sent = 0;
1243		spin_lock(&xprt->queue_lock);
1244		/*
1245		 * Requests that carry congestion control credits are added
1246		 * to the head of the list to avoid starvation issues.
1247		 */
1248		if (req->rq_cong) {
1249			xprt_clear_congestion_window_wait(xprt);
1250			list_for_each_entry(pos, &xprt->xmit_queue, rq_xmit) {
1251				if (pos->rq_cong)
1252					continue;
1253				/* Note: req is added _before_ pos */
1254				list_add_tail(&req->rq_xmit, &pos->rq_xmit);
1255				INIT_LIST_HEAD(&req->rq_xmit2);
1256				trace_xprt_enq_xmit(task, 1);
1257				goto out;
1258			}
1259		} else if (RPC_IS_SWAPPER(task)) {
1260			list_for_each_entry(pos, &xprt->xmit_queue, rq_xmit) {
1261				if (pos->rq_cong || pos->rq_bytes_sent)
1262					continue;
1263				if (RPC_IS_SWAPPER(pos->rq_task))
1264					continue;
1265				/* Note: req is added _before_ pos */
1266				list_add_tail(&req->rq_xmit, &pos->rq_xmit);
1267				INIT_LIST_HEAD(&req->rq_xmit2);
1268				trace_xprt_enq_xmit(task, 2);
1269				goto out;
1270			}
1271		} else if (!req->rq_seqno) {
1272			list_for_each_entry(pos, &xprt->xmit_queue, rq_xmit) {
1273				if (pos->rq_task->tk_owner != task->tk_owner)
1274					continue;
1275				list_add_tail(&req->rq_xmit2, &pos->rq_xmit2);
1276				INIT_LIST_HEAD(&req->rq_xmit);
1277				trace_xprt_enq_xmit(task, 3);
1278				goto out;
1279			}
1280		}
1281		list_add_tail(&req->rq_xmit, &xprt->xmit_queue);
1282		INIT_LIST_HEAD(&req->rq_xmit2);
1283		trace_xprt_enq_xmit(task, 4);
1284out:
1285		set_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate);
1286		spin_unlock(&xprt->queue_lock);
1287	}
1288}
1289
1290/**
1291 * xprt_request_dequeue_transmit_locked - remove a task from the transmission queue
1292 * @task: pointer to rpc_task
1293 *
1294 * Remove a task from the transmission queue
1295 * Caller must hold xprt->queue_lock
1296 */
1297static void
1298xprt_request_dequeue_transmit_locked(struct rpc_task *task)
1299{
1300	struct rpc_rqst *req = task->tk_rqstp;
1301
1302	if (!test_and_clear_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate))
1303		return;
1304	if (!list_empty(&req->rq_xmit)) {
1305		list_del(&req->rq_xmit);
1306		if (!list_empty(&req->rq_xmit2)) {
1307			struct rpc_rqst *next = list_first_entry(&req->rq_xmit2,
1308					struct rpc_rqst, rq_xmit2);
1309			list_del(&req->rq_xmit2);
1310			list_add_tail(&next->rq_xmit, &next->rq_xprt->xmit_queue);
1311		}
1312	} else
1313		list_del(&req->rq_xmit2);
1314}
1315
1316/**
1317 * xprt_request_dequeue_transmit - remove a task from the transmission queue
1318 * @task: pointer to rpc_task
1319 *
1320 * Remove a task from the transmission queue
1321 */
1322static void
1323xprt_request_dequeue_transmit(struct rpc_task *task)
1324{
1325	struct rpc_rqst *req = task->tk_rqstp;
1326	struct rpc_xprt *xprt = req->rq_xprt;
1327
1328	spin_lock(&xprt->queue_lock);
1329	xprt_request_dequeue_transmit_locked(task);
1330	spin_unlock(&xprt->queue_lock);
1331}
1332
1333/**
1334 * xprt_request_dequeue_xprt - remove a task from the transmit+receive queue
1335 * @task: pointer to rpc_task
1336 *
1337 * Remove a task from the transmit and receive queues, and ensure that
1338 * it is not pinned by the receive work item.
1339 */
1340void
1341xprt_request_dequeue_xprt(struct rpc_task *task)
1342{
1343	struct rpc_rqst	*req = task->tk_rqstp;
1344	struct rpc_xprt *xprt = req->rq_xprt;
1345
1346	if (test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate) ||
1347	    test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate) ||
1348	    xprt_is_pinned_rqst(req)) {
1349		spin_lock(&xprt->queue_lock);
1350		xprt_request_dequeue_transmit_locked(task);
1351		xprt_request_dequeue_receive_locked(task);
1352		while (xprt_is_pinned_rqst(req)) {
1353			set_bit(RPC_TASK_MSG_PIN_WAIT, &task->tk_runstate);
1354			spin_unlock(&xprt->queue_lock);
1355			xprt_wait_on_pinned_rqst(req);
1356			spin_lock(&xprt->queue_lock);
1357			clear_bit(RPC_TASK_MSG_PIN_WAIT, &task->tk_runstate);
1358		}
1359		spin_unlock(&xprt->queue_lock);
1360	}
1361}
1362
1363/**
1364 * xprt_request_prepare - prepare an encoded request for transport
1365 * @req: pointer to rpc_rqst
1366 *
1367 * Calls into the transport layer to do whatever is needed to prepare
1368 * the request for transmission or receive.
1369 */
1370void
1371xprt_request_prepare(struct rpc_rqst *req)
1372{
1373	struct rpc_xprt *xprt = req->rq_xprt;
1374
1375	if (xprt->ops->prepare_request)
1376		xprt->ops->prepare_request(req);
1377}
1378
1379/**
1380 * xprt_request_need_retransmit - Test if a task needs retransmission
1381 * @task: pointer to rpc_task
1382 *
1383 * Test for whether a connection breakage requires the task to retransmit
1384 */
1385bool
1386xprt_request_need_retransmit(struct rpc_task *task)
1387{
1388	return xprt_request_retransmit_after_disconnect(task);
1389}
1390
1391/**
1392 * xprt_prepare_transmit - reserve the transport before sending a request
1393 * @task: RPC task about to send a request
1394 *
1395 */
1396bool xprt_prepare_transmit(struct rpc_task *task)
1397{
1398	struct rpc_rqst	*req = task->tk_rqstp;
1399	struct rpc_xprt	*xprt = req->rq_xprt;
 
1400
1401	dprintk("RPC: %5u xprt_prepare_transmit\n", task->tk_pid);
1402
1403	if (!xprt_lock_write(xprt, task)) {
1404		/* Race breaker: someone may have transmitted us */
1405		if (!test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate))
1406			rpc_wake_up_queued_task_set_status(&xprt->sending,
1407					task, 0);
1408		return false;
1409
 
 
 
 
 
 
1410	}
1411	return true;
 
 
 
 
 
 
 
1412}
1413
1414void xprt_end_transmit(struct rpc_task *task)
1415{
1416	xprt_release_write(task->tk_rqstp->rq_xprt, task);
1417}
1418
1419/**
1420 * xprt_request_transmit - send an RPC request on a transport
1421 * @req: pointer to request to transmit
1422 * @snd_task: RPC task that owns the transport lock
1423 *
1424 * This performs the transmission of a single request.
1425 * Note that if the request is not the same as snd_task, then it
1426 * does need to be pinned.
1427 * Returns '0' on success.
1428 */
1429static int
1430xprt_request_transmit(struct rpc_rqst *req, struct rpc_task *snd_task)
1431{
1432	struct rpc_xprt *xprt = req->rq_xprt;
1433	struct rpc_task *task = req->rq_task;
1434	unsigned int connect_cookie;
1435	int is_retrans = RPC_WAS_SENT(task);
1436	int status;
1437
1438	if (!req->rq_bytes_sent) {
1439		if (xprt_request_data_received(task)) {
1440			status = 0;
1441			goto out_dequeue;
1442		}
1443		/* Verify that our message lies in the RPCSEC_GSS window */
1444		if (rpcauth_xmit_need_reencode(task)) {
1445			status = -EBADMSG;
1446			goto out_dequeue;
1447		}
1448		if (RPC_SIGNALLED(task)) {
1449			status = -ERESTARTSYS;
1450			goto out_dequeue;
1451		}
1452	}
1453
1454	/*
1455	 * Update req->rq_ntrans before transmitting to avoid races with
1456	 * xprt_update_rtt(), which needs to know that it is recording a
1457	 * reply to the first transmission.
1458	 */
1459	req->rq_ntrans++;
 
 
 
 
 
 
 
 
 
 
 
 
1460
1461	connect_cookie = xprt->connect_cookie;
1462	status = xprt->ops->send_request(req);
 
1463	if (status != 0) {
1464		req->rq_ntrans--;
1465		trace_xprt_transmit(req, status);
1466		return status;
1467	}
1468
1469	if (is_retrans)
1470		task->tk_client->cl_stats->rpcretrans++;
1471
1472	xprt_inject_disconnect(xprt);
1473
 
1474	task->tk_flags |= RPC_TASK_SENT;
1475	spin_lock(&xprt->transport_lock);
1476
 
 
 
 
 
1477	xprt->stat.sends++;
1478	xprt->stat.req_u += xprt->stat.sends - xprt->stat.recvs;
1479	xprt->stat.bklog_u += xprt->backlog.qlen;
1480	xprt->stat.sending_u += xprt->sending.qlen;
1481	xprt->stat.pending_u += xprt->pending.qlen;
1482	spin_unlock(&xprt->transport_lock);
1483
1484	req->rq_connect_cookie = connect_cookie;
1485out_dequeue:
1486	trace_xprt_transmit(req, status);
1487	xprt_request_dequeue_transmit(task);
1488	rpc_wake_up_queued_task_set_status(&xprt->sending, task, status);
1489	return status;
1490}
1491
1492/**
1493 * xprt_transmit - send an RPC request on a transport
1494 * @task: controlling RPC task
1495 *
1496 * Attempts to drain the transmit queue. On exit, either the transport
1497 * signalled an error that needs to be handled before transmission can
1498 * resume, or @task finished transmitting, and detected that it already
1499 * received a reply.
1500 */
1501void
1502xprt_transmit(struct rpc_task *task)
1503{
1504	struct rpc_rqst *next, *req = task->tk_rqstp;
1505	struct rpc_xprt	*xprt = req->rq_xprt;
1506	int status;
1507
1508	spin_lock(&xprt->queue_lock);
1509	while (!list_empty(&xprt->xmit_queue)) {
1510		next = list_first_entry(&xprt->xmit_queue,
1511				struct rpc_rqst, rq_xmit);
1512		xprt_pin_rqst(next);
1513		spin_unlock(&xprt->queue_lock);
1514		status = xprt_request_transmit(next, task);
1515		if (status == -EBADMSG && next != req)
1516			status = 0;
1517		cond_resched();
1518		spin_lock(&xprt->queue_lock);
1519		xprt_unpin_rqst(next);
1520		if (status == 0) {
1521			if (!xprt_request_data_received(task) ||
1522			    test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate))
1523				continue;
1524		} else if (test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate))
1525			task->tk_status = status;
1526		break;
1527	}
1528	spin_unlock(&xprt->queue_lock);
1529}
1530
1531static void xprt_add_backlog(struct rpc_xprt *xprt, struct rpc_task *task)
1532{
1533	set_bit(XPRT_CONGESTED, &xprt->state);
1534	rpc_sleep_on(&xprt->backlog, task, NULL);
1535}
1536
1537static void xprt_wake_up_backlog(struct rpc_xprt *xprt)
1538{
1539	if (rpc_wake_up_next(&xprt->backlog) == NULL)
1540		clear_bit(XPRT_CONGESTED, &xprt->state);
1541}
1542
1543static bool xprt_throttle_congested(struct rpc_xprt *xprt, struct rpc_task *task)
1544{
1545	bool ret = false;
1546
1547	if (!test_bit(XPRT_CONGESTED, &xprt->state))
1548		goto out;
1549	spin_lock(&xprt->reserve_lock);
1550	if (test_bit(XPRT_CONGESTED, &xprt->state)) {
1551		rpc_sleep_on(&xprt->backlog, task, NULL);
1552		ret = true;
1553	}
1554	spin_unlock(&xprt->reserve_lock);
1555out:
1556	return ret;
1557}
1558
1559static struct rpc_rqst *xprt_dynamic_alloc_slot(struct rpc_xprt *xprt)
1560{
1561	struct rpc_rqst *req = ERR_PTR(-EAGAIN);
1562
1563	if (xprt->num_reqs >= xprt->max_reqs)
1564		goto out;
1565	++xprt->num_reqs;
1566	spin_unlock(&xprt->reserve_lock);
1567	req = kzalloc(sizeof(struct rpc_rqst), GFP_NOFS);
1568	spin_lock(&xprt->reserve_lock);
1569	if (req != NULL)
1570		goto out;
1571	--xprt->num_reqs;
1572	req = ERR_PTR(-ENOMEM);
1573out:
1574	return req;
1575}
1576
1577static bool xprt_dynamic_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
1578{
1579	if (xprt->num_reqs > xprt->min_reqs) {
1580		--xprt->num_reqs;
1581		kfree(req);
1582		return true;
1583	}
1584	return false;
1585}
1586
1587void xprt_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task)
1588{
1589	struct rpc_rqst *req;
1590
1591	spin_lock(&xprt->reserve_lock);
1592	if (!list_empty(&xprt->free)) {
1593		req = list_entry(xprt->free.next, struct rpc_rqst, rq_list);
1594		list_del(&req->rq_list);
1595		goto out_init_req;
1596	}
1597	req = xprt_dynamic_alloc_slot(xprt);
1598	if (!IS_ERR(req))
1599		goto out_init_req;
1600	switch (PTR_ERR(req)) {
1601	case -ENOMEM:
1602		dprintk("RPC:       dynamic allocation of request slot "
1603				"failed! Retrying\n");
1604		task->tk_status = -ENOMEM;
1605		break;
1606	case -EAGAIN:
1607		xprt_add_backlog(xprt, task);
1608		dprintk("RPC:       waiting for request slot\n");
1609		/* fall through */
1610	default:
1611		task->tk_status = -EAGAIN;
1612	}
1613	spin_unlock(&xprt->reserve_lock);
1614	return;
1615out_init_req:
1616	xprt->stat.max_slots = max_t(unsigned int, xprt->stat.max_slots,
1617				     xprt->num_reqs);
1618	spin_unlock(&xprt->reserve_lock);
1619
1620	task->tk_status = 0;
1621	task->tk_rqstp = req;
 
 
1622}
1623EXPORT_SYMBOL_GPL(xprt_alloc_slot);
1624
1625void xprt_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1626{
1627	spin_lock(&xprt->reserve_lock);
1628	if (!xprt_dynamic_free_slot(xprt, req)) {
1629		memset(req, 0, sizeof(*req));	/* mark unused */
1630		list_add(&req->rq_list, &xprt->free);
1631	}
1632	xprt_wake_up_backlog(xprt);
1633	spin_unlock(&xprt->reserve_lock);
1634}
1635EXPORT_SYMBOL_GPL(xprt_free_slot);
1636
1637static void xprt_free_all_slots(struct rpc_xprt *xprt)
1638{
1639	struct rpc_rqst *req;
1640	while (!list_empty(&xprt->free)) {
1641		req = list_first_entry(&xprt->free, struct rpc_rqst, rq_list);
1642		list_del(&req->rq_list);
1643		kfree(req);
1644	}
1645}
1646
1647struct rpc_xprt *xprt_alloc(struct net *net, size_t size,
1648		unsigned int num_prealloc,
1649		unsigned int max_alloc)
1650{
1651	struct rpc_xprt *xprt;
1652	struct rpc_rqst *req;
1653	int i;
1654
1655	xprt = kzalloc(size, GFP_KERNEL);
1656	if (xprt == NULL)
1657		goto out;
1658
1659	xprt_init(xprt, net);
1660
1661	for (i = 0; i < num_prealloc; i++) {
1662		req = kzalloc(sizeof(struct rpc_rqst), GFP_KERNEL);
1663		if (!req)
1664			goto out_free;
1665		list_add(&req->rq_list, &xprt->free);
1666	}
1667	if (max_alloc > num_prealloc)
1668		xprt->max_reqs = max_alloc;
1669	else
1670		xprt->max_reqs = num_prealloc;
1671	xprt->min_reqs = num_prealloc;
1672	xprt->num_reqs = num_prealloc;
1673
1674	return xprt;
1675
1676out_free:
1677	xprt_free(xprt);
1678out:
1679	return NULL;
1680}
1681EXPORT_SYMBOL_GPL(xprt_alloc);
1682
1683void xprt_free(struct rpc_xprt *xprt)
1684{
1685	put_net(xprt->xprt_net);
1686	xprt_free_all_slots(xprt);
1687	kfree_rcu(xprt, rcu);
1688}
1689EXPORT_SYMBOL_GPL(xprt_free);
1690
1691static void
1692xprt_init_connect_cookie(struct rpc_rqst *req, struct rpc_xprt *xprt)
1693{
1694	req->rq_connect_cookie = xprt_connect_cookie(xprt) - 1;
1695}
1696
1697static __be32
1698xprt_alloc_xid(struct rpc_xprt *xprt)
1699{
1700	__be32 xid;
1701
1702	spin_lock(&xprt->reserve_lock);
1703	xid = (__force __be32)xprt->xid++;
1704	spin_unlock(&xprt->reserve_lock);
1705	return xid;
1706}
1707
1708static void
1709xprt_init_xid(struct rpc_xprt *xprt)
1710{
1711	xprt->xid = prandom_u32();
1712}
1713
1714static void
1715xprt_request_init(struct rpc_task *task)
1716{
1717	struct rpc_xprt *xprt = task->tk_xprt;
1718	struct rpc_rqst	*req = task->tk_rqstp;
1719
1720	req->rq_task	= task;
1721	req->rq_xprt    = xprt;
1722	req->rq_buffer  = NULL;
1723	req->rq_xid	= xprt_alloc_xid(xprt);
1724	xprt_init_connect_cookie(req, xprt);
1725	req->rq_snd_buf.len = 0;
1726	req->rq_snd_buf.buflen = 0;
1727	req->rq_rcv_buf.len = 0;
1728	req->rq_rcv_buf.buflen = 0;
1729	req->rq_snd_buf.bvec = NULL;
1730	req->rq_rcv_buf.bvec = NULL;
1731	req->rq_release_snd_buf = NULL;
1732	xprt_init_majortimeo(task, req);
1733	dprintk("RPC: %5u reserved req %p xid %08x\n", task->tk_pid,
1734			req, ntohl(req->rq_xid));
1735}
1736
1737static void
1738xprt_do_reserve(struct rpc_xprt *xprt, struct rpc_task *task)
1739{
1740	xprt->ops->alloc_slot(xprt, task);
1741	if (task->tk_rqstp != NULL)
1742		xprt_request_init(task);
1743}
1744
1745/**
1746 * xprt_reserve - allocate an RPC request slot
1747 * @task: RPC task requesting a slot allocation
1748 *
1749 * If the transport is marked as being congested, or if no more
1750 * slots are available, place the task on the transport's
1751 * backlog queue.
1752 */
1753void xprt_reserve(struct rpc_task *task)
1754{
1755	struct rpc_xprt *xprt = task->tk_xprt;
1756
1757	task->tk_status = 0;
1758	if (task->tk_rqstp != NULL)
1759		return;
1760
 
1761	task->tk_status = -EAGAIN;
1762	if (!xprt_throttle_congested(xprt, task))
1763		xprt_do_reserve(xprt, task);
1764}
1765
1766/**
1767 * xprt_retry_reserve - allocate an RPC request slot
1768 * @task: RPC task requesting a slot allocation
1769 *
1770 * If no more slots are available, place the task on the transport's
1771 * backlog queue.
1772 * Note that the only difference with xprt_reserve is that we now
1773 * ignore the value of the XPRT_CONGESTED flag.
1774 */
1775void xprt_retry_reserve(struct rpc_task *task)
1776{
1777	struct rpc_xprt *xprt = task->tk_xprt;
1778
1779	task->tk_status = 0;
1780	if (task->tk_rqstp != NULL)
1781		return;
1782
 
1783	task->tk_status = -EAGAIN;
1784	xprt_do_reserve(xprt, task);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1785}
1786
1787/**
1788 * xprt_release - release an RPC request slot
1789 * @task: task which is finished with the slot
1790 *
1791 */
1792void xprt_release(struct rpc_task *task)
1793{
1794	struct rpc_xprt	*xprt;
1795	struct rpc_rqst	*req = task->tk_rqstp;
1796
1797	if (req == NULL) {
1798		if (task->tk_client) {
1799			xprt = task->tk_xprt;
1800			xprt_release_write(xprt, task);
 
1801		}
1802		return;
1803	}
1804
1805	xprt = req->rq_xprt;
1806	xprt_request_dequeue_xprt(task);
1807	spin_lock(&xprt->transport_lock);
 
 
 
1808	xprt->ops->release_xprt(xprt, task);
1809	if (xprt->ops->release_request)
1810		xprt->ops->release_request(task);
1811	xprt_schedule_autodisconnect(xprt);
1812	spin_unlock(&xprt->transport_lock);
 
 
 
 
 
1813	if (req->rq_buffer)
1814		xprt->ops->buf_free(task);
1815	xprt_inject_disconnect(xprt);
1816	xdr_free_bvec(&req->rq_rcv_buf);
1817	xdr_free_bvec(&req->rq_snd_buf);
1818	if (req->rq_cred != NULL)
1819		put_rpccred(req->rq_cred);
1820	task->tk_rqstp = NULL;
1821	if (req->rq_release_snd_buf)
1822		req->rq_release_snd_buf(req);
1823
1824	dprintk("RPC: %5u release request %p\n", task->tk_pid, req);
1825	if (likely(!bc_prealloc(req)))
1826		xprt->ops->free_slot(xprt, req);
1827	else
1828		xprt_free_bc_request(req);
1829}
1830
1831#ifdef CONFIG_SUNRPC_BACKCHANNEL
1832void
1833xprt_init_bc_request(struct rpc_rqst *req, struct rpc_task *task)
1834{
1835	struct xdr_buf *xbufp = &req->rq_snd_buf;
1836
1837	task->tk_rqstp = req;
1838	req->rq_task = task;
1839	xprt_init_connect_cookie(req, req->rq_xprt);
1840	/*
1841	 * Set up the xdr_buf length.
1842	 * This also indicates that the buffer is XDR encoded already.
1843	 */
1844	xbufp->len = xbufp->head[0].iov_len + xbufp->page_len +
1845		xbufp->tail[0].iov_len;
1846}
1847#endif
1848
1849static void xprt_init(struct rpc_xprt *xprt, struct net *net)
1850{
1851	kref_init(&xprt->kref);
1852
1853	spin_lock_init(&xprt->transport_lock);
1854	spin_lock_init(&xprt->reserve_lock);
1855	spin_lock_init(&xprt->queue_lock);
1856
1857	INIT_LIST_HEAD(&xprt->free);
1858	xprt->recv_queue = RB_ROOT;
1859	INIT_LIST_HEAD(&xprt->xmit_queue);
1860#if defined(CONFIG_SUNRPC_BACKCHANNEL)
1861	spin_lock_init(&xprt->bc_pa_lock);
1862	INIT_LIST_HEAD(&xprt->bc_pa_list);
1863#endif /* CONFIG_SUNRPC_BACKCHANNEL */
1864	INIT_LIST_HEAD(&xprt->xprt_switch);
1865
1866	xprt->last_used = jiffies;
1867	xprt->cwnd = RPC_INITCWND;
1868	xprt->bind_index = 0;
1869
1870	rpc_init_wait_queue(&xprt->binding, "xprt_binding");
1871	rpc_init_wait_queue(&xprt->pending, "xprt_pending");
1872	rpc_init_wait_queue(&xprt->sending, "xprt_sending");
1873	rpc_init_priority_wait_queue(&xprt->backlog, "xprt_backlog");
1874
1875	xprt_init_xid(xprt);
1876
1877	xprt->xprt_net = get_net(net);
1878}
1879
1880/**
1881 * xprt_create_transport - create an RPC transport
1882 * @args: rpc transport creation arguments
1883 *
1884 */
1885struct rpc_xprt *xprt_create_transport(struct xprt_create *args)
1886{
1887	struct rpc_xprt	*xprt;
1888	struct xprt_class *t;
1889
1890	spin_lock(&xprt_list_lock);
1891	list_for_each_entry(t, &xprt_list, list) {
1892		if (t->ident == args->ident) {
1893			spin_unlock(&xprt_list_lock);
1894			goto found;
1895		}
1896	}
1897	spin_unlock(&xprt_list_lock);
1898	dprintk("RPC: transport (%d) not supported\n", args->ident);
1899	return ERR_PTR(-EIO);
1900
1901found:
1902	xprt = t->setup(args);
1903	if (IS_ERR(xprt)) {
1904		dprintk("RPC:       xprt_create_transport: failed, %ld\n",
1905				-PTR_ERR(xprt));
1906		goto out;
1907	}
1908	if (args->flags & XPRT_CREATE_NO_IDLE_TIMEOUT)
1909		xprt->idle_timeout = 0;
1910	INIT_WORK(&xprt->task_cleanup, xprt_autoclose);
1911	if (xprt_has_timer(xprt))
1912		timer_setup(&xprt->timer, xprt_init_autodisconnect, 0);
 
1913	else
1914		timer_setup(&xprt->timer, NULL, 0);
1915
1916	if (strlen(args->servername) > RPC_MAXNETNAMELEN) {
1917		xprt_destroy(xprt);
1918		return ERR_PTR(-EINVAL);
1919	}
1920	xprt->servername = kstrdup(args->servername, GFP_KERNEL);
1921	if (xprt->servername == NULL) {
1922		xprt_destroy(xprt);
1923		return ERR_PTR(-ENOMEM);
1924	}
1925
1926	rpc_xprt_debugfs_register(xprt);
1927
1928	dprintk("RPC:       created transport %p with %u slots\n", xprt,
1929			xprt->max_reqs);
1930out:
1931	return xprt;
1932}
1933
1934static void xprt_destroy_cb(struct work_struct *work)
1935{
1936	struct rpc_xprt *xprt =
1937		container_of(work, struct rpc_xprt, task_cleanup);
1938
1939	rpc_xprt_debugfs_unregister(xprt);
1940	rpc_destroy_wait_queue(&xprt->binding);
1941	rpc_destroy_wait_queue(&xprt->pending);
1942	rpc_destroy_wait_queue(&xprt->sending);
1943	rpc_destroy_wait_queue(&xprt->backlog);
1944	kfree(xprt->servername);
1945	/*
1946	 * Destroy any existing back channel
1947	 */
1948	xprt_destroy_backchannel(xprt, UINT_MAX);
1949
1950	/*
1951	 * Tear down transport state and free the rpc_xprt
1952	 */
1953	xprt->ops->destroy(xprt);
1954}
1955
1956/**
1957 * xprt_destroy - destroy an RPC transport, killing off all requests.
1958 * @xprt: transport to destroy
1959 *
1960 */
1961static void xprt_destroy(struct rpc_xprt *xprt)
1962{
1963	dprintk("RPC:       destroying transport %p\n", xprt);
1964
1965	/*
1966	 * Exclude transport connect/disconnect handlers and autoclose
1967	 */
1968	wait_on_bit_lock(&xprt->state, XPRT_LOCKED, TASK_UNINTERRUPTIBLE);
1969
1970	del_timer_sync(&xprt->timer);
1971
 
 
 
 
 
 
 
1972	/*
1973	 * Destroy sockets etc from the system workqueue so they can
1974	 * safely flush receive work running on rpciod.
1975	 */
1976	INIT_WORK(&xprt->task_cleanup, xprt_destroy_cb);
1977	schedule_work(&xprt->task_cleanup);
1978}
1979
1980static void xprt_destroy_kref(struct kref *kref)
1981{
1982	xprt_destroy(container_of(kref, struct rpc_xprt, kref));
1983}
1984
1985/**
1986 * xprt_get - return a reference to an RPC transport.
1987 * @xprt: pointer to the transport
1988 *
1989 */
1990struct rpc_xprt *xprt_get(struct rpc_xprt *xprt)
1991{
1992	if (xprt != NULL && kref_get_unless_zero(&xprt->kref))
1993		return xprt;
1994	return NULL;
1995}
1996EXPORT_SYMBOL_GPL(xprt_get);
1997
1998/**
1999 * xprt_put - release a reference to an RPC transport.
2000 * @xprt: pointer to the transport
2001 *
2002 */
2003void xprt_put(struct rpc_xprt *xprt)
2004{
2005	if (xprt != NULL)
2006		kref_put(&xprt->kref, xprt_destroy_kref);
2007}
2008EXPORT_SYMBOL_GPL(xprt_put);
v4.6
 
   1/*
   2 *  linux/net/sunrpc/xprt.c
   3 *
   4 *  This is a generic RPC call interface supporting congestion avoidance,
   5 *  and asynchronous calls.
   6 *
   7 *  The interface works like this:
   8 *
   9 *  -	When a process places a call, it allocates a request slot if
  10 *	one is available. Otherwise, it sleeps on the backlog queue
  11 *	(xprt_reserve).
  12 *  -	Next, the caller puts together the RPC message, stuffs it into
  13 *	the request struct, and calls xprt_transmit().
  14 *  -	xprt_transmit sends the message and installs the caller on the
  15 *	transport's wait list. At the same time, if a reply is expected,
  16 *	it installs a timer that is run after the packet's timeout has
  17 *	expired.
  18 *  -	When a packet arrives, the data_ready handler walks the list of
  19 *	pending requests for that transport. If a matching XID is found, the
  20 *	caller is woken up, and the timer removed.
  21 *  -	When no reply arrives within the timeout interval, the timer is
  22 *	fired by the kernel and runs xprt_timer(). It either adjusts the
  23 *	timeout values (minor timeout) or wakes up the caller with a status
  24 *	of -ETIMEDOUT.
  25 *  -	When the caller receives a notification from RPC that a reply arrived,
  26 *	it should release the RPC slot, and process the reply.
  27 *	If the call timed out, it may choose to retry the operation by
  28 *	adjusting the initial timeout value, and simply calling rpc_call
  29 *	again.
  30 *
  31 *  Support for async RPC is done through a set of RPC-specific scheduling
  32 *  primitives that `transparently' work for processes as well as async
  33 *  tasks that rely on callbacks.
  34 *
  35 *  Copyright (C) 1995-1997, Olaf Kirch <okir@monad.swb.de>
  36 *
  37 *  Transport switch API copyright (C) 2005, Chuck Lever <cel@netapp.com>
  38 */
  39
  40#include <linux/module.h>
  41
  42#include <linux/types.h>
  43#include <linux/interrupt.h>
  44#include <linux/workqueue.h>
  45#include <linux/net.h>
  46#include <linux/ktime.h>
  47
  48#include <linux/sunrpc/clnt.h>
  49#include <linux/sunrpc/metrics.h>
  50#include <linux/sunrpc/bc_xprt.h>
  51#include <linux/rcupdate.h>
 
  52
  53#include <trace/events/sunrpc.h>
  54
  55#include "sunrpc.h"
  56
  57/*
  58 * Local variables
  59 */
  60
  61#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
  62# define RPCDBG_FACILITY	RPCDBG_XPRT
  63#endif
  64
  65/*
  66 * Local functions
  67 */
  68static void	 xprt_init(struct rpc_xprt *xprt, struct net *net);
  69static void	xprt_request_init(struct rpc_task *, struct rpc_xprt *);
  70static void	xprt_connect_status(struct rpc_task *task);
  71static int      __xprt_get_cong(struct rpc_xprt *, struct rpc_task *);
  72static void     __xprt_put_cong(struct rpc_xprt *, struct rpc_rqst *);
  73static void	 xprt_destroy(struct rpc_xprt *xprt);
  74
  75static DEFINE_SPINLOCK(xprt_list_lock);
  76static LIST_HEAD(xprt_list);
  77
 
 
 
 
 
 
 
 
 
  78/**
  79 * xprt_register_transport - register a transport implementation
  80 * @transport: transport to register
  81 *
  82 * If a transport implementation is loaded as a kernel module, it can
  83 * call this interface to make itself known to the RPC client.
  84 *
  85 * Returns:
  86 * 0:		transport successfully registered
  87 * -EEXIST:	transport already registered
  88 * -EINVAL:	transport module being unloaded
  89 */
  90int xprt_register_transport(struct xprt_class *transport)
  91{
  92	struct xprt_class *t;
  93	int result;
  94
  95	result = -EEXIST;
  96	spin_lock(&xprt_list_lock);
  97	list_for_each_entry(t, &xprt_list, list) {
  98		/* don't register the same transport class twice */
  99		if (t->ident == transport->ident)
 100			goto out;
 101	}
 102
 103	list_add_tail(&transport->list, &xprt_list);
 104	printk(KERN_INFO "RPC: Registered %s transport module.\n",
 105	       transport->name);
 106	result = 0;
 107
 108out:
 109	spin_unlock(&xprt_list_lock);
 110	return result;
 111}
 112EXPORT_SYMBOL_GPL(xprt_register_transport);
 113
 114/**
 115 * xprt_unregister_transport - unregister a transport implementation
 116 * @transport: transport to unregister
 117 *
 118 * Returns:
 119 * 0:		transport successfully unregistered
 120 * -ENOENT:	transport never registered
 121 */
 122int xprt_unregister_transport(struct xprt_class *transport)
 123{
 124	struct xprt_class *t;
 125	int result;
 126
 127	result = 0;
 128	spin_lock(&xprt_list_lock);
 129	list_for_each_entry(t, &xprt_list, list) {
 130		if (t == transport) {
 131			printk(KERN_INFO
 132				"RPC: Unregistered %s transport module.\n",
 133				transport->name);
 134			list_del_init(&transport->list);
 135			goto out;
 136		}
 137	}
 138	result = -ENOENT;
 139
 140out:
 141	spin_unlock(&xprt_list_lock);
 142	return result;
 143}
 144EXPORT_SYMBOL_GPL(xprt_unregister_transport);
 145
 146/**
 147 * xprt_load_transport - load a transport implementation
 148 * @transport_name: transport to load
 149 *
 150 * Returns:
 151 * 0:		transport successfully loaded
 152 * -ENOENT:	transport module not available
 153 */
 154int xprt_load_transport(const char *transport_name)
 155{
 156	struct xprt_class *t;
 157	int result;
 158
 159	result = 0;
 160	spin_lock(&xprt_list_lock);
 161	list_for_each_entry(t, &xprt_list, list) {
 162		if (strcmp(t->name, transport_name) == 0) {
 163			spin_unlock(&xprt_list_lock);
 164			goto out;
 165		}
 166	}
 167	spin_unlock(&xprt_list_lock);
 168	result = request_module("xprt%s", transport_name);
 169out:
 170	return result;
 171}
 172EXPORT_SYMBOL_GPL(xprt_load_transport);
 173
 
 
 
 
 
 
 
 
 
 
 
 174/**
 175 * xprt_reserve_xprt - serialize write access to transports
 176 * @task: task that is requesting access to the transport
 177 * @xprt: pointer to the target transport
 178 *
 179 * This prevents mixing the payload of separate requests, and prevents
 180 * transport connects from colliding with writes.  No congestion control
 181 * is provided.
 182 */
 183int xprt_reserve_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
 184{
 185	struct rpc_rqst *req = task->tk_rqstp;
 186	int priority;
 187
 188	if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) {
 189		if (task == xprt->snd_task)
 190			return 1;
 191		goto out_sleep;
 192	}
 
 
 193	xprt->snd_task = task;
 194	if (req != NULL)
 195		req->rq_ntrans++;
 196
 197	return 1;
 198
 
 
 199out_sleep:
 200	dprintk("RPC: %5u failed to lock transport %p\n",
 201			task->tk_pid, xprt);
 202	task->tk_timeout = 0;
 203	task->tk_status = -EAGAIN;
 204	if (req == NULL)
 205		priority = RPC_PRIORITY_LOW;
 206	else if (!req->rq_ntrans)
 207		priority = RPC_PRIORITY_NORMAL;
 208	else
 209		priority = RPC_PRIORITY_HIGH;
 210	rpc_sleep_on_priority(&xprt->sending, task, NULL, priority);
 211	return 0;
 212}
 213EXPORT_SYMBOL_GPL(xprt_reserve_xprt);
 214
 215static void xprt_clear_locked(struct rpc_xprt *xprt)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 216{
 217	xprt->snd_task = NULL;
 218	if (!test_bit(XPRT_CLOSE_WAIT, &xprt->state)) {
 219		smp_mb__before_atomic();
 220		clear_bit(XPRT_LOCKED, &xprt->state);
 221		smp_mb__after_atomic();
 222	} else
 223		queue_work(rpciod_workqueue, &xprt->task_cleanup);
 224}
 225
 226/*
 227 * xprt_reserve_xprt_cong - serialize write access to transports
 228 * @task: task that is requesting access to the transport
 229 *
 230 * Same as xprt_reserve_xprt, but Van Jacobson congestion control is
 231 * integrated into the decision of whether a request is allowed to be
 232 * woken up and given access to the transport.
 
 233 */
 234int xprt_reserve_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task)
 235{
 236	struct rpc_rqst *req = task->tk_rqstp;
 237	int priority;
 238
 239	if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) {
 240		if (task == xprt->snd_task)
 241			return 1;
 242		goto out_sleep;
 243	}
 244	if (req == NULL) {
 245		xprt->snd_task = task;
 246		return 1;
 247	}
 248	if (__xprt_get_cong(xprt, task)) {
 
 
 249		xprt->snd_task = task;
 250		req->rq_ntrans++;
 251		return 1;
 252	}
 
 253	xprt_clear_locked(xprt);
 254out_sleep:
 255	if (req)
 256		__xprt_put_cong(xprt, req);
 257	dprintk("RPC: %5u failed to lock transport %p\n", task->tk_pid, xprt);
 258	task->tk_timeout = 0;
 259	task->tk_status = -EAGAIN;
 260	if (req == NULL)
 261		priority = RPC_PRIORITY_LOW;
 262	else if (!req->rq_ntrans)
 263		priority = RPC_PRIORITY_NORMAL;
 264	else
 265		priority = RPC_PRIORITY_HIGH;
 266	rpc_sleep_on_priority(&xprt->sending, task, NULL, priority);
 267	return 0;
 268}
 269EXPORT_SYMBOL_GPL(xprt_reserve_xprt_cong);
 270
 271static inline int xprt_lock_write(struct rpc_xprt *xprt, struct rpc_task *task)
 272{
 273	int retval;
 274
 275	spin_lock_bh(&xprt->transport_lock);
 
 
 276	retval = xprt->ops->reserve_xprt(xprt, task);
 277	spin_unlock_bh(&xprt->transport_lock);
 278	return retval;
 279}
 280
 281static bool __xprt_lock_write_func(struct rpc_task *task, void *data)
 282{
 283	struct rpc_xprt *xprt = data;
 284	struct rpc_rqst *req;
 285
 286	req = task->tk_rqstp;
 287	xprt->snd_task = task;
 288	if (req)
 289		req->rq_ntrans++;
 290	return true;
 291}
 292
 293static void __xprt_lock_write_next(struct rpc_xprt *xprt)
 294{
 295	if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
 296		return;
 297
 298	if (rpc_wake_up_first(&xprt->sending, __xprt_lock_write_func, xprt))
 
 
 299		return;
 
 300	xprt_clear_locked(xprt);
 301}
 302
 303static bool __xprt_lock_write_cong_func(struct rpc_task *task, void *data)
 304{
 305	struct rpc_xprt *xprt = data;
 306	struct rpc_rqst *req;
 307
 308	req = task->tk_rqstp;
 309	if (req == NULL) {
 310		xprt->snd_task = task;
 311		return true;
 312	}
 313	if (__xprt_get_cong(xprt, task)) {
 314		xprt->snd_task = task;
 315		req->rq_ntrans++;
 316		return true;
 317	}
 318	return false;
 319}
 320
 321static void __xprt_lock_write_next_cong(struct rpc_xprt *xprt)
 322{
 323	if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
 324		return;
 325	if (RPCXPRT_CONGESTED(xprt))
 
 
 326		goto out_unlock;
 327	if (rpc_wake_up_first(&xprt->sending, __xprt_lock_write_cong_func, xprt))
 
 328		return;
 329out_unlock:
 330	xprt_clear_locked(xprt);
 331}
 332
 333static void xprt_task_clear_bytes_sent(struct rpc_task *task)
 334{
 335	if (task != NULL) {
 336		struct rpc_rqst *req = task->tk_rqstp;
 337		if (req != NULL)
 338			req->rq_bytes_sent = 0;
 339	}
 340}
 341
 342/**
 343 * xprt_release_xprt - allow other requests to use a transport
 344 * @xprt: transport with other tasks potentially waiting
 345 * @task: task that is releasing access to the transport
 346 *
 347 * Note that "task" can be NULL.  No congestion control is provided.
 348 */
 349void xprt_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
 350{
 351	if (xprt->snd_task == task) {
 352		xprt_task_clear_bytes_sent(task);
 353		xprt_clear_locked(xprt);
 354		__xprt_lock_write_next(xprt);
 355	}
 356}
 357EXPORT_SYMBOL_GPL(xprt_release_xprt);
 358
 359/**
 360 * xprt_release_xprt_cong - allow other requests to use a transport
 361 * @xprt: transport with other tasks potentially waiting
 362 * @task: task that is releasing access to the transport
 363 *
 364 * Note that "task" can be NULL.  Another task is awoken to use the
 365 * transport if the transport's congestion window allows it.
 366 */
 367void xprt_release_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task)
 368{
 369	if (xprt->snd_task == task) {
 370		xprt_task_clear_bytes_sent(task);
 371		xprt_clear_locked(xprt);
 372		__xprt_lock_write_next_cong(xprt);
 373	}
 374}
 375EXPORT_SYMBOL_GPL(xprt_release_xprt_cong);
 376
 377static inline void xprt_release_write(struct rpc_xprt *xprt, struct rpc_task *task)
 378{
 379	spin_lock_bh(&xprt->transport_lock);
 
 
 380	xprt->ops->release_xprt(xprt, task);
 381	spin_unlock_bh(&xprt->transport_lock);
 382}
 383
 384/*
 385 * Van Jacobson congestion avoidance. Check if the congestion window
 386 * overflowed. Put the task to sleep if this is the case.
 387 */
 388static int
 389__xprt_get_cong(struct rpc_xprt *xprt, struct rpc_task *task)
 390{
 391	struct rpc_rqst *req = task->tk_rqstp;
 392
 393	if (req->rq_cong)
 394		return 1;
 395	dprintk("RPC: %5u xprt_cwnd_limited cong = %lu cwnd = %lu\n",
 396			task->tk_pid, xprt->cong, xprt->cwnd);
 397	if (RPCXPRT_CONGESTED(xprt))
 
 398		return 0;
 
 399	req->rq_cong = 1;
 400	xprt->cong += RPC_CWNDSCALE;
 401	return 1;
 402}
 403
 404/*
 405 * Adjust the congestion window, and wake up the next task
 406 * that has been sleeping due to congestion
 407 */
 408static void
 409__xprt_put_cong(struct rpc_xprt *xprt, struct rpc_rqst *req)
 410{
 411	if (!req->rq_cong)
 412		return;
 413	req->rq_cong = 0;
 414	xprt->cong -= RPC_CWNDSCALE;
 
 415	__xprt_lock_write_next_cong(xprt);
 416}
 417
 418/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 419 * xprt_release_rqst_cong - housekeeping when request is complete
 420 * @task: RPC request that recently completed
 421 *
 422 * Useful for transports that require congestion control.
 423 */
 424void xprt_release_rqst_cong(struct rpc_task *task)
 425{
 426	struct rpc_rqst *req = task->tk_rqstp;
 427
 428	__xprt_put_cong(req->rq_xprt, req);
 429}
 430EXPORT_SYMBOL_GPL(xprt_release_rqst_cong);
 431
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 432/**
 433 * xprt_adjust_cwnd - adjust transport congestion window
 434 * @xprt: pointer to xprt
 435 * @task: recently completed RPC request used to adjust window
 436 * @result: result code of completed RPC request
 437 *
 438 * The transport code maintains an estimate on the maximum number of out-
 439 * standing RPC requests, using a smoothed version of the congestion
 440 * avoidance implemented in 44BSD. This is basically the Van Jacobson
 441 * congestion algorithm: If a retransmit occurs, the congestion window is
 442 * halved; otherwise, it is incremented by 1/cwnd when
 443 *
 444 *	-	a reply is received and
 445 *	-	a full number of requests are outstanding and
 446 *	-	the congestion window hasn't been updated recently.
 447 */
 448void xprt_adjust_cwnd(struct rpc_xprt *xprt, struct rpc_task *task, int result)
 449{
 450	struct rpc_rqst *req = task->tk_rqstp;
 451	unsigned long cwnd = xprt->cwnd;
 452
 453	if (result >= 0 && cwnd <= xprt->cong) {
 454		/* The (cwnd >> 1) term makes sure
 455		 * the result gets rounded properly. */
 456		cwnd += (RPC_CWNDSCALE * RPC_CWNDSCALE + (cwnd >> 1)) / cwnd;
 457		if (cwnd > RPC_MAXCWND(xprt))
 458			cwnd = RPC_MAXCWND(xprt);
 459		__xprt_lock_write_next_cong(xprt);
 460	} else if (result == -ETIMEDOUT) {
 461		cwnd >>= 1;
 462		if (cwnd < RPC_CWNDSCALE)
 463			cwnd = RPC_CWNDSCALE;
 464	}
 465	dprintk("RPC:       cong %ld, cwnd was %ld, now %ld\n",
 466			xprt->cong, xprt->cwnd, cwnd);
 467	xprt->cwnd = cwnd;
 468	__xprt_put_cong(xprt, req);
 469}
 470EXPORT_SYMBOL_GPL(xprt_adjust_cwnd);
 471
 472/**
 473 * xprt_wake_pending_tasks - wake all tasks on a transport's pending queue
 474 * @xprt: transport with waiting tasks
 475 * @status: result code to plant in each task before waking it
 476 *
 477 */
 478void xprt_wake_pending_tasks(struct rpc_xprt *xprt, int status)
 479{
 480	if (status < 0)
 481		rpc_wake_up_status(&xprt->pending, status);
 482	else
 483		rpc_wake_up(&xprt->pending);
 484}
 485EXPORT_SYMBOL_GPL(xprt_wake_pending_tasks);
 486
 487/**
 488 * xprt_wait_for_buffer_space - wait for transport output buffer to clear
 489 * @task: task to be put to sleep
 490 * @action: function pointer to be executed after wait
 491 *
 492 * Note that we only set the timer for the case of RPC_IS_SOFT(), since
 493 * we don't in general want to force a socket disconnection due to
 494 * an incomplete RPC call transmission.
 495 */
 496void xprt_wait_for_buffer_space(struct rpc_task *task, rpc_action action)
 497{
 498	struct rpc_rqst *req = task->tk_rqstp;
 499	struct rpc_xprt *xprt = req->rq_xprt;
 
 500
 501	task->tk_timeout = RPC_IS_SOFT(task) ? req->rq_timeout : 0;
 502	rpc_sleep_on(&xprt->pending, task, action);
 
 
 
 
 
 
 
 
 503}
 504EXPORT_SYMBOL_GPL(xprt_wait_for_buffer_space);
 505
 506/**
 507 * xprt_write_space - wake the task waiting for transport output buffer space
 508 * @xprt: transport with waiting tasks
 509 *
 510 * Can be called in a soft IRQ context, so xprt_write_space never sleeps.
 511 */
 512void xprt_write_space(struct rpc_xprt *xprt)
 513{
 514	spin_lock_bh(&xprt->transport_lock);
 515	if (xprt->snd_task) {
 516		dprintk("RPC:       write space: waking waiting task on "
 517				"xprt %p\n", xprt);
 518		rpc_wake_up_queued_task(&xprt->pending, xprt->snd_task);
 519	}
 520	spin_unlock_bh(&xprt->transport_lock);
 
 521}
 522EXPORT_SYMBOL_GPL(xprt_write_space);
 523
 524/**
 525 * xprt_set_retrans_timeout_def - set a request's retransmit timeout
 526 * @task: task whose timeout is to be set
 527 *
 528 * Set a request's retransmit timeout based on the transport's
 529 * default timeout parameters.  Used by transports that don't adjust
 530 * the retransmit timeout based on round-trip time estimation.
 531 */
 532void xprt_set_retrans_timeout_def(struct rpc_task *task)
 533{
 534	task->tk_timeout = task->tk_rqstp->rq_timeout;
 
 
 
 535}
 536EXPORT_SYMBOL_GPL(xprt_set_retrans_timeout_def);
 537
 538/**
 539 * xprt_set_retrans_timeout_rtt - set a request's retransmit timeout
 540 * @task: task whose timeout is to be set
 541 *
 542 * Set a request's retransmit timeout using the RTT estimator.
 543 */
 544void xprt_set_retrans_timeout_rtt(struct rpc_task *task)
 545{
 546	int timer = task->tk_msg.rpc_proc->p_timer;
 547	struct rpc_clnt *clnt = task->tk_client;
 548	struct rpc_rtt *rtt = clnt->cl_rtt;
 549	struct rpc_rqst *req = task->tk_rqstp;
 550	unsigned long max_timeout = clnt->cl_timeout->to_maxval;
 551
 552	task->tk_timeout = rpc_calc_rto(rtt, timer);
 553	task->tk_timeout <<= rpc_ntimeo(rtt, timer) + req->rq_retries;
 554	if (task->tk_timeout > max_timeout || task->tk_timeout == 0)
 555		task->tk_timeout = max_timeout;
 
 
 
 556}
 557EXPORT_SYMBOL_GPL(xprt_set_retrans_timeout_rtt);
 558
 559static void xprt_reset_majortimeo(struct rpc_rqst *req)
 560{
 561	const struct rpc_timeout *to = req->rq_task->tk_client->cl_timeout;
 
 
 
 
 
 
 562
 563	req->rq_majortimeo = req->rq_timeout;
 564	if (to->to_exponential)
 565		req->rq_majortimeo <<= to->to_retries;
 566	else
 567		req->rq_majortimeo += to->to_increment * to->to_retries;
 568	if (req->rq_majortimeo > to->to_maxval || req->rq_majortimeo == 0)
 569		req->rq_majortimeo = to->to_maxval;
 570	req->rq_majortimeo += jiffies;
 571}
 572
 573/**
 574 * xprt_adjust_timeout - adjust timeout values for next retransmit
 575 * @req: RPC request containing parameters to use for the adjustment
 576 *
 577 */
 578int xprt_adjust_timeout(struct rpc_rqst *req)
 579{
 580	struct rpc_xprt *xprt = req->rq_xprt;
 581	const struct rpc_timeout *to = req->rq_task->tk_client->cl_timeout;
 582	int status = 0;
 583
 584	if (time_before(jiffies, req->rq_majortimeo)) {
 585		if (to->to_exponential)
 586			req->rq_timeout <<= 1;
 587		else
 588			req->rq_timeout += to->to_increment;
 589		if (to->to_maxval && req->rq_timeout >= to->to_maxval)
 590			req->rq_timeout = to->to_maxval;
 591		req->rq_retries++;
 592	} else {
 593		req->rq_timeout = to->to_initval;
 594		req->rq_retries = 0;
 595		xprt_reset_majortimeo(req);
 596		/* Reset the RTT counters == "slow start" */
 597		spin_lock_bh(&xprt->transport_lock);
 598		rpc_init_rtt(req->rq_task->tk_client->cl_rtt, to->to_initval);
 599		spin_unlock_bh(&xprt->transport_lock);
 600		status = -ETIMEDOUT;
 601	}
 602
 603	if (req->rq_timeout == 0) {
 604		printk(KERN_WARNING "xprt_adjust_timeout: rq_timeout = 0!\n");
 605		req->rq_timeout = 5 * HZ;
 606	}
 607	return status;
 608}
 609
 610static void xprt_autoclose(struct work_struct *work)
 611{
 612	struct rpc_xprt *xprt =
 613		container_of(work, struct rpc_xprt, task_cleanup);
 
 614
 615	clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
 616	xprt->ops->close(xprt);
 617	xprt_release_write(xprt, NULL);
 618	wake_up_bit(&xprt->state, XPRT_LOCKED);
 
 619}
 620
 621/**
 622 * xprt_disconnect_done - mark a transport as disconnected
 623 * @xprt: transport to flag for disconnect
 624 *
 625 */
 626void xprt_disconnect_done(struct rpc_xprt *xprt)
 627{
 628	dprintk("RPC:       disconnected transport %p\n", xprt);
 629	spin_lock_bh(&xprt->transport_lock);
 630	xprt_clear_connected(xprt);
 631	xprt_wake_pending_tasks(xprt, -EAGAIN);
 632	spin_unlock_bh(&xprt->transport_lock);
 
 
 633}
 634EXPORT_SYMBOL_GPL(xprt_disconnect_done);
 635
 636/**
 637 * xprt_force_disconnect - force a transport to disconnect
 638 * @xprt: transport to disconnect
 639 *
 640 */
 641void xprt_force_disconnect(struct rpc_xprt *xprt)
 642{
 643	/* Don't race with the test_bit() in xprt_clear_locked() */
 644	spin_lock_bh(&xprt->transport_lock);
 645	set_bit(XPRT_CLOSE_WAIT, &xprt->state);
 646	/* Try to schedule an autoclose RPC call */
 647	if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0)
 648		queue_work(rpciod_workqueue, &xprt->task_cleanup);
 649	xprt_wake_pending_tasks(xprt, -EAGAIN);
 650	spin_unlock_bh(&xprt->transport_lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 651}
 652
 653/**
 654 * xprt_conditional_disconnect - force a transport to disconnect
 655 * @xprt: transport to disconnect
 656 * @cookie: 'connection cookie'
 657 *
 658 * This attempts to break the connection if and only if 'cookie' matches
 659 * the current transport 'connection cookie'. It ensures that we don't
 660 * try to break the connection more than once when we need to retransmit
 661 * a batch of RPC requests.
 662 *
 663 */
 664void xprt_conditional_disconnect(struct rpc_xprt *xprt, unsigned int cookie)
 665{
 666	/* Don't race with the test_bit() in xprt_clear_locked() */
 667	spin_lock_bh(&xprt->transport_lock);
 668	if (cookie != xprt->connect_cookie)
 669		goto out;
 670	if (test_bit(XPRT_CLOSING, &xprt->state) || !xprt_connected(xprt))
 671		goto out;
 672	set_bit(XPRT_CLOSE_WAIT, &xprt->state);
 673	/* Try to schedule an autoclose RPC call */
 674	if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0)
 675		queue_work(rpciod_workqueue, &xprt->task_cleanup);
 676	xprt_wake_pending_tasks(xprt, -EAGAIN);
 677out:
 678	spin_unlock_bh(&xprt->transport_lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 679}
 680
 681static void
 682xprt_init_autodisconnect(unsigned long data)
 683{
 684	struct rpc_xprt *xprt = (struct rpc_xprt *)data;
 685
 686	spin_lock(&xprt->transport_lock);
 687	if (!list_empty(&xprt->recv))
 688		goto out_abort;
 
 689	if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
 690		goto out_abort;
 691	spin_unlock(&xprt->transport_lock);
 692	queue_work(rpciod_workqueue, &xprt->task_cleanup);
 693	return;
 694out_abort:
 695	spin_unlock(&xprt->transport_lock);
 696}
 697
 698bool xprt_lock_connect(struct rpc_xprt *xprt,
 699		struct rpc_task *task,
 700		void *cookie)
 701{
 702	bool ret = false;
 703
 704	spin_lock_bh(&xprt->transport_lock);
 705	if (!test_bit(XPRT_LOCKED, &xprt->state))
 706		goto out;
 707	if (xprt->snd_task != task)
 708		goto out;
 709	xprt_task_clear_bytes_sent(task);
 710	xprt->snd_task = cookie;
 711	ret = true;
 712out:
 713	spin_unlock_bh(&xprt->transport_lock);
 714	return ret;
 715}
 716
 717void xprt_unlock_connect(struct rpc_xprt *xprt, void *cookie)
 718{
 719	spin_lock_bh(&xprt->transport_lock);
 720	if (xprt->snd_task != cookie)
 721		goto out;
 722	if (!test_bit(XPRT_LOCKED, &xprt->state))
 723		goto out;
 724	xprt->snd_task =NULL;
 725	xprt->ops->release_xprt(xprt, NULL);
 
 726out:
 727	spin_unlock_bh(&xprt->transport_lock);
 728	wake_up_bit(&xprt->state, XPRT_LOCKED);
 729}
 730
 731/**
 732 * xprt_connect - schedule a transport connect operation
 733 * @task: RPC task that is requesting the connect
 734 *
 735 */
 736void xprt_connect(struct rpc_task *task)
 737{
 738	struct rpc_xprt	*xprt = task->tk_rqstp->rq_xprt;
 739
 740	dprintk("RPC: %5u xprt_connect xprt %p %s connected\n", task->tk_pid,
 741			xprt, (xprt_connected(xprt) ? "is" : "is not"));
 742
 743	if (!xprt_bound(xprt)) {
 744		task->tk_status = -EAGAIN;
 745		return;
 746	}
 747	if (!xprt_lock_write(xprt, task))
 748		return;
 749
 750	if (test_and_clear_bit(XPRT_CLOSE_WAIT, &xprt->state))
 751		xprt->ops->close(xprt);
 752
 753	if (!xprt_connected(xprt)) {
 754		task->tk_rqstp->rq_bytes_sent = 0;
 755		task->tk_timeout = task->tk_rqstp->rq_timeout;
 756		rpc_sleep_on(&xprt->pending, task, xprt_connect_status);
 757
 758		if (test_bit(XPRT_CLOSING, &xprt->state))
 759			return;
 760		if (xprt_test_and_set_connecting(xprt))
 761			return;
 762		xprt->stat.connect_start = jiffies;
 763		xprt->ops->connect(xprt, task);
 
 
 
 
 
 
 
 764	}
 765	xprt_release_write(xprt, task);
 766}
 767
 768static void xprt_connect_status(struct rpc_task *task)
 
 
 
 
 
 769{
 770	struct rpc_xprt	*xprt = task->tk_rqstp->rq_xprt;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 771
 772	if (task->tk_status == 0) {
 773		xprt->stat.connect_count++;
 774		xprt->stat.connect_time += (long)jiffies - xprt->stat.connect_start;
 775		dprintk("RPC: %5u xprt_connect_status: connection established\n",
 776				task->tk_pid);
 777		return;
 
 
 
 
 
 
 778	}
 
 
 779
 780	switch (task->tk_status) {
 781	case -ECONNREFUSED:
 782	case -ECONNRESET:
 783	case -ECONNABORTED:
 784	case -ENETUNREACH:
 785	case -EHOSTUNREACH:
 786	case -EPIPE:
 787	case -EAGAIN:
 788		dprintk("RPC: %5u xprt_connect_status: retrying\n", task->tk_pid);
 789		break;
 790	case -ETIMEDOUT:
 791		dprintk("RPC: %5u xprt_connect_status: connect attempt timed "
 792				"out\n", task->tk_pid);
 793		break;
 794	default:
 795		dprintk("RPC: %5u xprt_connect_status: error %d connecting to "
 796				"server %s\n", task->tk_pid, -task->tk_status,
 797				xprt->servername);
 798		task->tk_status = -EIO;
 
 
 799	}
 
 
 
 
 
 
 
 
 800}
 801
 802/**
 803 * xprt_lookup_rqst - find an RPC request corresponding to an XID
 804 * @xprt: transport on which the original request was transmitted
 805 * @xid: RPC XID of incoming reply
 806 *
 
 807 */
 808struct rpc_rqst *xprt_lookup_rqst(struct rpc_xprt *xprt, __be32 xid)
 809{
 810	struct rpc_rqst *entry;
 811
 812	list_for_each_entry(entry, &xprt->recv, rq_list)
 813		if (entry->rq_xid == xid) {
 814			trace_xprt_lookup_rqst(xprt, xid, 0);
 815			return entry;
 816		}
 
 817
 818	dprintk("RPC:       xprt_lookup_rqst did not find xid %08x\n",
 819			ntohl(xid));
 820	trace_xprt_lookup_rqst(xprt, xid, -ENOENT);
 821	xprt->stat.bad_xids++;
 822	return NULL;
 823}
 824EXPORT_SYMBOL_GPL(xprt_lookup_rqst);
 825
 826static void xprt_update_rtt(struct rpc_task *task)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 827{
 828	struct rpc_rqst *req = task->tk_rqstp;
 829	struct rpc_rtt *rtt = task->tk_client->cl_rtt;
 830	unsigned int timer = task->tk_msg.rpc_proc->p_timer;
 831	long m = usecs_to_jiffies(ktime_to_us(req->rq_rtt));
 832
 833	if (timer) {
 834		if (req->rq_ntrans == 1)
 835			rpc_update_rtt(rtt, timer, m);
 836		rpc_set_timeo(rtt, timer, req->rq_ntrans - 1);
 837	}
 838}
 
 839
 840/**
 841 * xprt_complete_rqst - called when reply processing is complete
 842 * @task: RPC request that recently completed
 843 * @copied: actual number of bytes received from the transport
 844 *
 845 * Caller holds transport lock.
 846 */
 847void xprt_complete_rqst(struct rpc_task *task, int copied)
 848{
 849	struct rpc_rqst *req = task->tk_rqstp;
 850	struct rpc_xprt *xprt = req->rq_xprt;
 851
 852	dprintk("RPC: %5u xid %08x complete (%d bytes received)\n",
 853			task->tk_pid, ntohl(req->rq_xid), copied);
 854	trace_xprt_complete_rqst(xprt, req->rq_xid, copied);
 855
 856	xprt->stat.recvs++;
 857	req->rq_rtt = ktime_sub(ktime_get(), req->rq_xtime);
 858	if (xprt->ops->timer != NULL)
 859		xprt_update_rtt(task);
 860
 861	list_del_init(&req->rq_list);
 862	req->rq_private_buf.len = copied;
 863	/* Ensure all writes are done before we update */
 864	/* req->rq_reply_bytes_recvd */
 865	smp_wmb();
 866	req->rq_reply_bytes_recvd = copied;
 
 867	rpc_wake_up_queued_task(&xprt->pending, task);
 868}
 869EXPORT_SYMBOL_GPL(xprt_complete_rqst);
 870
 871static void xprt_timer(struct rpc_task *task)
 872{
 873	struct rpc_rqst *req = task->tk_rqstp;
 874	struct rpc_xprt *xprt = req->rq_xprt;
 875
 876	if (task->tk_status != -ETIMEDOUT)
 877		return;
 878	dprintk("RPC: %5u xprt_timer\n", task->tk_pid);
 879
 880	spin_lock_bh(&xprt->transport_lock);
 881	if (!req->rq_reply_bytes_recvd) {
 882		if (xprt->ops->timer)
 883			xprt->ops->timer(xprt, task);
 884	} else
 885		task->tk_status = 0;
 886	spin_unlock_bh(&xprt->transport_lock);
 887}
 888
 889static inline int xprt_has_timer(struct rpc_xprt *xprt)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 890{
 891	return xprt->idle_timeout != 0;
 892}
 893
 894/**
 895 * xprt_prepare_transmit - reserve the transport before sending a request
 896 * @task: RPC task about to send a request
 897 *
 898 */
 899bool xprt_prepare_transmit(struct rpc_task *task)
 900{
 901	struct rpc_rqst	*req = task->tk_rqstp;
 902	struct rpc_xprt	*xprt = req->rq_xprt;
 903	bool ret = false;
 904
 905	dprintk("RPC: %5u xprt_prepare_transmit\n", task->tk_pid);
 906
 907	spin_lock_bh(&xprt->transport_lock);
 908	if (!req->rq_bytes_sent) {
 909		if (req->rq_reply_bytes_recvd) {
 910			task->tk_status = req->rq_reply_bytes_recvd;
 911			goto out_unlock;
 912		}
 913		if ((task->tk_flags & RPC_TASK_NO_RETRANS_TIMEOUT)
 914		    && xprt_connected(xprt)
 915		    && req->rq_connect_cookie == xprt->connect_cookie) {
 916			xprt->ops->set_retrans_timeout(task);
 917			rpc_sleep_on(&xprt->pending, task, xprt_timer);
 918			goto out_unlock;
 919		}
 920	}
 921	if (!xprt->ops->reserve_xprt(xprt, task)) {
 922		task->tk_status = -EAGAIN;
 923		goto out_unlock;
 924	}
 925	ret = true;
 926out_unlock:
 927	spin_unlock_bh(&xprt->transport_lock);
 928	return ret;
 929}
 930
 931void xprt_end_transmit(struct rpc_task *task)
 932{
 933	xprt_release_write(task->tk_rqstp->rq_xprt, task);
 934}
 935
 936/**
 937 * xprt_transmit - send an RPC request on a transport
 938 * @task: controlling RPC task
 939 *
 940 * We have to copy the iovec because sendmsg fiddles with its contents.
 
 
 
 
 941 */
 942void xprt_transmit(struct rpc_task *task)
 
 943{
 944	struct rpc_rqst	*req = task->tk_rqstp;
 945	struct rpc_xprt	*xprt = req->rq_xprt;
 946	int status, numreqs;
 
 
 947
 948	dprintk("RPC: %5u xprt_transmit(%u)\n", task->tk_pid, req->rq_slen);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 949
 950	if (!req->rq_reply_bytes_recvd) {
 951		if (list_empty(&req->rq_list) && rpc_reply_expected(task)) {
 952			/*
 953			 * Add to the list only if we're expecting a reply
 954			 */
 955			spin_lock_bh(&xprt->transport_lock);
 956			/* Update the softirq receive buffer */
 957			memcpy(&req->rq_private_buf, &req->rq_rcv_buf,
 958					sizeof(req->rq_private_buf));
 959			/* Add request to the receive list */
 960			list_add_tail(&req->rq_list, &xprt->recv);
 961			spin_unlock_bh(&xprt->transport_lock);
 962			xprt_reset_majortimeo(req);
 963			/* Turn off autodisconnect */
 964			del_singleshot_timer_sync(&xprt->timer);
 965		}
 966	} else if (!req->rq_bytes_sent)
 967		return;
 968
 969	req->rq_xtime = ktime_get();
 970	status = xprt->ops->send_request(task);
 971	trace_xprt_transmit(xprt, req->rq_xid, status);
 972	if (status != 0) {
 973		task->tk_status = status;
 974		return;
 
 975	}
 
 
 
 
 976	xprt_inject_disconnect(xprt);
 977
 978	dprintk("RPC: %5u xmit complete\n", task->tk_pid);
 979	task->tk_flags |= RPC_TASK_SENT;
 980	spin_lock_bh(&xprt->transport_lock);
 981
 982	xprt->ops->set_retrans_timeout(task);
 983
 984	numreqs = atomic_read(&xprt->num_reqs);
 985	if (numreqs > xprt->stat.max_slots)
 986		xprt->stat.max_slots = numreqs;
 987	xprt->stat.sends++;
 988	xprt->stat.req_u += xprt->stat.sends - xprt->stat.recvs;
 989	xprt->stat.bklog_u += xprt->backlog.qlen;
 990	xprt->stat.sending_u += xprt->sending.qlen;
 991	xprt->stat.pending_u += xprt->pending.qlen;
 
 992
 993	/* Don't race with disconnect */
 994	if (!xprt_connected(xprt))
 995		task->tk_status = -ENOTCONN;
 996	else {
 997		/*
 998		 * Sleep on the pending queue since
 999		 * we're expecting a reply.
1000		 */
1001		if (!req->rq_reply_bytes_recvd && rpc_reply_expected(task))
1002			rpc_sleep_on(&xprt->pending, task, xprt_timer);
1003		req->rq_connect_cookie = xprt->connect_cookie;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1004	}
1005	spin_unlock_bh(&xprt->transport_lock);
1006}
1007
1008static void xprt_add_backlog(struct rpc_xprt *xprt, struct rpc_task *task)
1009{
1010	set_bit(XPRT_CONGESTED, &xprt->state);
1011	rpc_sleep_on(&xprt->backlog, task, NULL);
1012}
1013
1014static void xprt_wake_up_backlog(struct rpc_xprt *xprt)
1015{
1016	if (rpc_wake_up_next(&xprt->backlog) == NULL)
1017		clear_bit(XPRT_CONGESTED, &xprt->state);
1018}
1019
1020static bool xprt_throttle_congested(struct rpc_xprt *xprt, struct rpc_task *task)
1021{
1022	bool ret = false;
1023
1024	if (!test_bit(XPRT_CONGESTED, &xprt->state))
1025		goto out;
1026	spin_lock(&xprt->reserve_lock);
1027	if (test_bit(XPRT_CONGESTED, &xprt->state)) {
1028		rpc_sleep_on(&xprt->backlog, task, NULL);
1029		ret = true;
1030	}
1031	spin_unlock(&xprt->reserve_lock);
1032out:
1033	return ret;
1034}
1035
1036static struct rpc_rqst *xprt_dynamic_alloc_slot(struct rpc_xprt *xprt, gfp_t gfp_flags)
1037{
1038	struct rpc_rqst *req = ERR_PTR(-EAGAIN);
1039
1040	if (!atomic_add_unless(&xprt->num_reqs, 1, xprt->max_reqs))
1041		goto out;
1042	req = kzalloc(sizeof(struct rpc_rqst), gfp_flags);
 
 
 
1043	if (req != NULL)
1044		goto out;
1045	atomic_dec(&xprt->num_reqs);
1046	req = ERR_PTR(-ENOMEM);
1047out:
1048	return req;
1049}
1050
1051static bool xprt_dynamic_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
1052{
1053	if (atomic_add_unless(&xprt->num_reqs, -1, xprt->min_reqs)) {
 
1054		kfree(req);
1055		return true;
1056	}
1057	return false;
1058}
1059
1060void xprt_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task)
1061{
1062	struct rpc_rqst *req;
1063
1064	spin_lock(&xprt->reserve_lock);
1065	if (!list_empty(&xprt->free)) {
1066		req = list_entry(xprt->free.next, struct rpc_rqst, rq_list);
1067		list_del(&req->rq_list);
1068		goto out_init_req;
1069	}
1070	req = xprt_dynamic_alloc_slot(xprt, GFP_NOWAIT|__GFP_NOWARN);
1071	if (!IS_ERR(req))
1072		goto out_init_req;
1073	switch (PTR_ERR(req)) {
1074	case -ENOMEM:
1075		dprintk("RPC:       dynamic allocation of request slot "
1076				"failed! Retrying\n");
1077		task->tk_status = -ENOMEM;
1078		break;
1079	case -EAGAIN:
1080		xprt_add_backlog(xprt, task);
1081		dprintk("RPC:       waiting for request slot\n");
 
1082	default:
1083		task->tk_status = -EAGAIN;
1084	}
1085	spin_unlock(&xprt->reserve_lock);
1086	return;
1087out_init_req:
 
 
 
 
1088	task->tk_status = 0;
1089	task->tk_rqstp = req;
1090	xprt_request_init(task, xprt);
1091	spin_unlock(&xprt->reserve_lock);
1092}
1093EXPORT_SYMBOL_GPL(xprt_alloc_slot);
1094
1095void xprt_lock_and_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task)
1096{
1097	/* Note: grabbing the xprt_lock_write() ensures that we throttle
1098	 * new slot allocation if the transport is congested (i.e. when
1099	 * reconnecting a stream transport or when out of socket write
1100	 * buffer space).
1101	 */
1102	if (xprt_lock_write(xprt, task)) {
1103		xprt_alloc_slot(xprt, task);
1104		xprt_release_write(xprt, task);
1105	}
1106}
1107EXPORT_SYMBOL_GPL(xprt_lock_and_alloc_slot);
1108
1109static void xprt_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
1110{
1111	spin_lock(&xprt->reserve_lock);
1112	if (!xprt_dynamic_free_slot(xprt, req)) {
1113		memset(req, 0, sizeof(*req));	/* mark unused */
1114		list_add(&req->rq_list, &xprt->free);
1115	}
1116	xprt_wake_up_backlog(xprt);
1117	spin_unlock(&xprt->reserve_lock);
1118}
 
1119
1120static void xprt_free_all_slots(struct rpc_xprt *xprt)
1121{
1122	struct rpc_rqst *req;
1123	while (!list_empty(&xprt->free)) {
1124		req = list_first_entry(&xprt->free, struct rpc_rqst, rq_list);
1125		list_del(&req->rq_list);
1126		kfree(req);
1127	}
1128}
1129
1130struct rpc_xprt *xprt_alloc(struct net *net, size_t size,
1131		unsigned int num_prealloc,
1132		unsigned int max_alloc)
1133{
1134	struct rpc_xprt *xprt;
1135	struct rpc_rqst *req;
1136	int i;
1137
1138	xprt = kzalloc(size, GFP_KERNEL);
1139	if (xprt == NULL)
1140		goto out;
1141
1142	xprt_init(xprt, net);
1143
1144	for (i = 0; i < num_prealloc; i++) {
1145		req = kzalloc(sizeof(struct rpc_rqst), GFP_KERNEL);
1146		if (!req)
1147			goto out_free;
1148		list_add(&req->rq_list, &xprt->free);
1149	}
1150	if (max_alloc > num_prealloc)
1151		xprt->max_reqs = max_alloc;
1152	else
1153		xprt->max_reqs = num_prealloc;
1154	xprt->min_reqs = num_prealloc;
1155	atomic_set(&xprt->num_reqs, num_prealloc);
1156
1157	return xprt;
1158
1159out_free:
1160	xprt_free(xprt);
1161out:
1162	return NULL;
1163}
1164EXPORT_SYMBOL_GPL(xprt_alloc);
1165
1166void xprt_free(struct rpc_xprt *xprt)
1167{
1168	put_net(xprt->xprt_net);
1169	xprt_free_all_slots(xprt);
1170	kfree_rcu(xprt, rcu);
1171}
1172EXPORT_SYMBOL_GPL(xprt_free);
1173
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1174/**
1175 * xprt_reserve - allocate an RPC request slot
1176 * @task: RPC task requesting a slot allocation
1177 *
1178 * If the transport is marked as being congested, or if no more
1179 * slots are available, place the task on the transport's
1180 * backlog queue.
1181 */
1182void xprt_reserve(struct rpc_task *task)
1183{
1184	struct rpc_xprt *xprt = task->tk_xprt;
1185
1186	task->tk_status = 0;
1187	if (task->tk_rqstp != NULL)
1188		return;
1189
1190	task->tk_timeout = 0;
1191	task->tk_status = -EAGAIN;
1192	if (!xprt_throttle_congested(xprt, task))
1193		xprt->ops->alloc_slot(xprt, task);
1194}
1195
1196/**
1197 * xprt_retry_reserve - allocate an RPC request slot
1198 * @task: RPC task requesting a slot allocation
1199 *
1200 * If no more slots are available, place the task on the transport's
1201 * backlog queue.
1202 * Note that the only difference with xprt_reserve is that we now
1203 * ignore the value of the XPRT_CONGESTED flag.
1204 */
1205void xprt_retry_reserve(struct rpc_task *task)
1206{
1207	struct rpc_xprt *xprt = task->tk_xprt;
1208
1209	task->tk_status = 0;
1210	if (task->tk_rqstp != NULL)
1211		return;
1212
1213	task->tk_timeout = 0;
1214	task->tk_status = -EAGAIN;
1215	xprt->ops->alloc_slot(xprt, task);
1216}
1217
1218static inline __be32 xprt_alloc_xid(struct rpc_xprt *xprt)
1219{
1220	return (__force __be32)xprt->xid++;
1221}
1222
1223static inline void xprt_init_xid(struct rpc_xprt *xprt)
1224{
1225	xprt->xid = prandom_u32();
1226}
1227
1228static void xprt_request_init(struct rpc_task *task, struct rpc_xprt *xprt)
1229{
1230	struct rpc_rqst	*req = task->tk_rqstp;
1231
1232	INIT_LIST_HEAD(&req->rq_list);
1233	req->rq_timeout = task->tk_client->cl_timeout->to_initval;
1234	req->rq_task	= task;
1235	req->rq_xprt    = xprt;
1236	req->rq_buffer  = NULL;
1237	req->rq_xid     = xprt_alloc_xid(xprt);
1238	req->rq_connect_cookie = xprt->connect_cookie - 1;
1239	req->rq_bytes_sent = 0;
1240	req->rq_snd_buf.len = 0;
1241	req->rq_snd_buf.buflen = 0;
1242	req->rq_rcv_buf.len = 0;
1243	req->rq_rcv_buf.buflen = 0;
1244	req->rq_release_snd_buf = NULL;
1245	xprt_reset_majortimeo(req);
1246	dprintk("RPC: %5u reserved req %p xid %08x\n", task->tk_pid,
1247			req, ntohl(req->rq_xid));
1248}
1249
1250/**
1251 * xprt_release - release an RPC request slot
1252 * @task: task which is finished with the slot
1253 *
1254 */
1255void xprt_release(struct rpc_task *task)
1256{
1257	struct rpc_xprt	*xprt;
1258	struct rpc_rqst	*req = task->tk_rqstp;
1259
1260	if (req == NULL) {
1261		if (task->tk_client) {
1262			xprt = task->tk_xprt;
1263			if (xprt->snd_task == task)
1264				xprt_release_write(xprt, task);
1265		}
1266		return;
1267	}
1268
1269	xprt = req->rq_xprt;
1270	if (task->tk_ops->rpc_count_stats != NULL)
1271		task->tk_ops->rpc_count_stats(task, task->tk_calldata);
1272	else if (task->tk_client)
1273		rpc_count_iostats(task, task->tk_client->cl_metrics);
1274	spin_lock_bh(&xprt->transport_lock);
1275	xprt->ops->release_xprt(xprt, task);
1276	if (xprt->ops->release_request)
1277		xprt->ops->release_request(task);
1278	if (!list_empty(&req->rq_list))
1279		list_del(&req->rq_list);
1280	xprt->last_used = jiffies;
1281	if (list_empty(&xprt->recv) && xprt_has_timer(xprt))
1282		mod_timer(&xprt->timer,
1283				xprt->last_used + xprt->idle_timeout);
1284	spin_unlock_bh(&xprt->transport_lock);
1285	if (req->rq_buffer)
1286		xprt->ops->buf_free(req->rq_buffer);
1287	xprt_inject_disconnect(xprt);
 
 
1288	if (req->rq_cred != NULL)
1289		put_rpccred(req->rq_cred);
1290	task->tk_rqstp = NULL;
1291	if (req->rq_release_snd_buf)
1292		req->rq_release_snd_buf(req);
1293
1294	dprintk("RPC: %5u release request %p\n", task->tk_pid, req);
1295	if (likely(!bc_prealloc(req)))
1296		xprt_free_slot(xprt, req);
1297	else
1298		xprt_free_bc_request(req);
1299}
1300
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1301static void xprt_init(struct rpc_xprt *xprt, struct net *net)
1302{
1303	kref_init(&xprt->kref);
1304
1305	spin_lock_init(&xprt->transport_lock);
1306	spin_lock_init(&xprt->reserve_lock);
 
1307
1308	INIT_LIST_HEAD(&xprt->free);
1309	INIT_LIST_HEAD(&xprt->recv);
 
1310#if defined(CONFIG_SUNRPC_BACKCHANNEL)
1311	spin_lock_init(&xprt->bc_pa_lock);
1312	INIT_LIST_HEAD(&xprt->bc_pa_list);
1313#endif /* CONFIG_SUNRPC_BACKCHANNEL */
1314	INIT_LIST_HEAD(&xprt->xprt_switch);
1315
1316	xprt->last_used = jiffies;
1317	xprt->cwnd = RPC_INITCWND;
1318	xprt->bind_index = 0;
1319
1320	rpc_init_wait_queue(&xprt->binding, "xprt_binding");
1321	rpc_init_wait_queue(&xprt->pending, "xprt_pending");
1322	rpc_init_priority_wait_queue(&xprt->sending, "xprt_sending");
1323	rpc_init_priority_wait_queue(&xprt->backlog, "xprt_backlog");
1324
1325	xprt_init_xid(xprt);
1326
1327	xprt->xprt_net = get_net(net);
1328}
1329
1330/**
1331 * xprt_create_transport - create an RPC transport
1332 * @args: rpc transport creation arguments
1333 *
1334 */
1335struct rpc_xprt *xprt_create_transport(struct xprt_create *args)
1336{
1337	struct rpc_xprt	*xprt;
1338	struct xprt_class *t;
1339
1340	spin_lock(&xprt_list_lock);
1341	list_for_each_entry(t, &xprt_list, list) {
1342		if (t->ident == args->ident) {
1343			spin_unlock(&xprt_list_lock);
1344			goto found;
1345		}
1346	}
1347	spin_unlock(&xprt_list_lock);
1348	dprintk("RPC: transport (%d) not supported\n", args->ident);
1349	return ERR_PTR(-EIO);
1350
1351found:
1352	xprt = t->setup(args);
1353	if (IS_ERR(xprt)) {
1354		dprintk("RPC:       xprt_create_transport: failed, %ld\n",
1355				-PTR_ERR(xprt));
1356		goto out;
1357	}
1358	if (args->flags & XPRT_CREATE_NO_IDLE_TIMEOUT)
1359		xprt->idle_timeout = 0;
1360	INIT_WORK(&xprt->task_cleanup, xprt_autoclose);
1361	if (xprt_has_timer(xprt))
1362		setup_timer(&xprt->timer, xprt_init_autodisconnect,
1363			    (unsigned long)xprt);
1364	else
1365		init_timer(&xprt->timer);
1366
1367	if (strlen(args->servername) > RPC_MAXNETNAMELEN) {
1368		xprt_destroy(xprt);
1369		return ERR_PTR(-EINVAL);
1370	}
1371	xprt->servername = kstrdup(args->servername, GFP_KERNEL);
1372	if (xprt->servername == NULL) {
1373		xprt_destroy(xprt);
1374		return ERR_PTR(-ENOMEM);
1375	}
1376
1377	rpc_xprt_debugfs_register(xprt);
1378
1379	dprintk("RPC:       created transport %p with %u slots\n", xprt,
1380			xprt->max_reqs);
1381out:
1382	return xprt;
1383}
1384
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1385/**
1386 * xprt_destroy - destroy an RPC transport, killing off all requests.
1387 * @xprt: transport to destroy
1388 *
1389 */
1390static void xprt_destroy(struct rpc_xprt *xprt)
1391{
1392	dprintk("RPC:       destroying transport %p\n", xprt);
1393
1394	/* Exclude transport connect/disconnect handlers */
 
 
1395	wait_on_bit_lock(&xprt->state, XPRT_LOCKED, TASK_UNINTERRUPTIBLE);
1396
1397	del_timer_sync(&xprt->timer);
1398
1399	rpc_xprt_debugfs_unregister(xprt);
1400	rpc_destroy_wait_queue(&xprt->binding);
1401	rpc_destroy_wait_queue(&xprt->pending);
1402	rpc_destroy_wait_queue(&xprt->sending);
1403	rpc_destroy_wait_queue(&xprt->backlog);
1404	cancel_work_sync(&xprt->task_cleanup);
1405	kfree(xprt->servername);
1406	/*
1407	 * Tear down transport state and free the rpc_xprt
 
1408	 */
1409	xprt->ops->destroy(xprt);
 
1410}
1411
1412static void xprt_destroy_kref(struct kref *kref)
1413{
1414	xprt_destroy(container_of(kref, struct rpc_xprt, kref));
1415}
1416
1417/**
1418 * xprt_get - return a reference to an RPC transport.
1419 * @xprt: pointer to the transport
1420 *
1421 */
1422struct rpc_xprt *xprt_get(struct rpc_xprt *xprt)
1423{
1424	if (xprt != NULL && kref_get_unless_zero(&xprt->kref))
1425		return xprt;
1426	return NULL;
1427}
1428EXPORT_SYMBOL_GPL(xprt_get);
1429
1430/**
1431 * xprt_put - release a reference to an RPC transport.
1432 * @xprt: pointer to the transport
1433 *
1434 */
1435void xprt_put(struct rpc_xprt *xprt)
1436{
1437	if (xprt != NULL)
1438		kref_put(&xprt->kref, xprt_destroy_kref);
1439}
1440EXPORT_SYMBOL_GPL(xprt_put);