Linux Audio

Check our new training course

Loading...
v5.4
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 *  linux/net/sunrpc/xprt.c
   4 *
   5 *  This is a generic RPC call interface supporting congestion avoidance,
   6 *  and asynchronous calls.
   7 *
   8 *  The interface works like this:
   9 *
  10 *  -	When a process places a call, it allocates a request slot if
  11 *	one is available. Otherwise, it sleeps on the backlog queue
  12 *	(xprt_reserve).
  13 *  -	Next, the caller puts together the RPC message, stuffs it into
  14 *	the request struct, and calls xprt_transmit().
  15 *  -	xprt_transmit sends the message and installs the caller on the
  16 *	transport's wait list. At the same time, if a reply is expected,
  17 *	it installs a timer that is run after the packet's timeout has
  18 *	expired.
  19 *  -	When a packet arrives, the data_ready handler walks the list of
  20 *	pending requests for that transport. If a matching XID is found, the
  21 *	caller is woken up, and the timer removed.
  22 *  -	When no reply arrives within the timeout interval, the timer is
  23 *	fired by the kernel and runs xprt_timer(). It either adjusts the
  24 *	timeout values (minor timeout) or wakes up the caller with a status
  25 *	of -ETIMEDOUT.
  26 *  -	When the caller receives a notification from RPC that a reply arrived,
  27 *	it should release the RPC slot, and process the reply.
  28 *	If the call timed out, it may choose to retry the operation by
  29 *	adjusting the initial timeout value, and simply calling rpc_call
  30 *	again.
  31 *
  32 *  Support for async RPC is done through a set of RPC-specific scheduling
  33 *  primitives that `transparently' work for processes as well as async
  34 *  tasks that rely on callbacks.
  35 *
  36 *  Copyright (C) 1995-1997, Olaf Kirch <okir@monad.swb.de>
  37 *
  38 *  Transport switch API copyright (C) 2005, Chuck Lever <cel@netapp.com>
  39 */
  40
  41#include <linux/module.h>
  42
  43#include <linux/types.h>
  44#include <linux/interrupt.h>
  45#include <linux/workqueue.h>
  46#include <linux/net.h>
  47#include <linux/ktime.h>
  48
  49#include <linux/sunrpc/clnt.h>
  50#include <linux/sunrpc/metrics.h>
  51#include <linux/sunrpc/bc_xprt.h>
  52#include <linux/rcupdate.h>
  53#include <linux/sched/mm.h>
  54
  55#include <trace/events/sunrpc.h>
  56
  57#include "sunrpc.h"
 
 
  58
  59/*
  60 * Local variables
  61 */
  62
  63#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
  64# define RPCDBG_FACILITY	RPCDBG_XPRT
  65#endif
  66
  67/*
  68 * Local functions
  69 */
  70static void	 xprt_init(struct rpc_xprt *xprt, struct net *net);
  71static __be32	xprt_alloc_xid(struct rpc_xprt *xprt);
  72static void	 xprt_destroy(struct rpc_xprt *xprt);
 
 
  73
  74static DEFINE_SPINLOCK(xprt_list_lock);
  75static LIST_HEAD(xprt_list);
  76
  77static unsigned long xprt_request_timeout(const struct rpc_rqst *req)
  78{
  79	unsigned long timeout = jiffies + req->rq_timeout;
  80
  81	if (time_before(timeout, req->rq_majortimeo))
  82		return timeout;
  83	return req->rq_majortimeo;
  84}
  85
  86/**
  87 * xprt_register_transport - register a transport implementation
  88 * @transport: transport to register
  89 *
  90 * If a transport implementation is loaded as a kernel module, it can
  91 * call this interface to make itself known to the RPC client.
  92 *
  93 * Returns:
  94 * 0:		transport successfully registered
  95 * -EEXIST:	transport already registered
  96 * -EINVAL:	transport module being unloaded
  97 */
  98int xprt_register_transport(struct xprt_class *transport)
  99{
 100	struct xprt_class *t;
 101	int result;
 102
 103	result = -EEXIST;
 104	spin_lock(&xprt_list_lock);
 105	list_for_each_entry(t, &xprt_list, list) {
 106		/* don't register the same transport class twice */
 107		if (t->ident == transport->ident)
 108			goto out;
 109	}
 110
 111	list_add_tail(&transport->list, &xprt_list);
 112	printk(KERN_INFO "RPC: Registered %s transport module.\n",
 113	       transport->name);
 114	result = 0;
 115
 116out:
 117	spin_unlock(&xprt_list_lock);
 118	return result;
 119}
 120EXPORT_SYMBOL_GPL(xprt_register_transport);
 121
 122/**
 123 * xprt_unregister_transport - unregister a transport implementation
 124 * @transport: transport to unregister
 125 *
 126 * Returns:
 127 * 0:		transport successfully unregistered
 128 * -ENOENT:	transport never registered
 129 */
 130int xprt_unregister_transport(struct xprt_class *transport)
 131{
 132	struct xprt_class *t;
 133	int result;
 134
 135	result = 0;
 136	spin_lock(&xprt_list_lock);
 137	list_for_each_entry(t, &xprt_list, list) {
 138		if (t == transport) {
 139			printk(KERN_INFO
 140				"RPC: Unregistered %s transport module.\n",
 141				transport->name);
 142			list_del_init(&transport->list);
 143			goto out;
 144		}
 145	}
 146	result = -ENOENT;
 147
 148out:
 149	spin_unlock(&xprt_list_lock);
 150	return result;
 151}
 152EXPORT_SYMBOL_GPL(xprt_unregister_transport);
 153
 154/**
 155 * xprt_load_transport - load a transport implementation
 156 * @transport_name: transport to load
 157 *
 158 * Returns:
 159 * 0:		transport successfully loaded
 160 * -ENOENT:	transport module not available
 161 */
 162int xprt_load_transport(const char *transport_name)
 163{
 164	struct xprt_class *t;
 165	int result;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 166
 167	result = 0;
 168	spin_lock(&xprt_list_lock);
 
 
 
 
 
 
 
 
 
 
 
 169	list_for_each_entry(t, &xprt_list, list) {
 170		if (strcmp(t->name, transport_name) == 0) {
 171			spin_unlock(&xprt_list_lock);
 172			goto out;
 
 
 
 173		}
 174	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 175	spin_unlock(&xprt_list_lock);
 176	result = request_module("xprt%s", transport_name);
 177out:
 178	return result;
 179}
 180EXPORT_SYMBOL_GPL(xprt_load_transport);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 181
 182static void xprt_clear_locked(struct rpc_xprt *xprt)
 183{
 184	xprt->snd_task = NULL;
 185	if (!test_bit(XPRT_CLOSE_WAIT, &xprt->state)) {
 186		smp_mb__before_atomic();
 187		clear_bit(XPRT_LOCKED, &xprt->state);
 188		smp_mb__after_atomic();
 189	} else
 190		queue_work(xprtiod_workqueue, &xprt->task_cleanup);
 191}
 192
 193/**
 194 * xprt_reserve_xprt - serialize write access to transports
 195 * @task: task that is requesting access to the transport
 196 * @xprt: pointer to the target transport
 197 *
 198 * This prevents mixing the payload of separate requests, and prevents
 199 * transport connects from colliding with writes.  No congestion control
 200 * is provided.
 201 */
 202int xprt_reserve_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
 203{
 204	struct rpc_rqst *req = task->tk_rqstp;
 205
 206	if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) {
 207		if (task == xprt->snd_task)
 208			return 1;
 209		goto out_sleep;
 210	}
 211	if (test_bit(XPRT_WRITE_SPACE, &xprt->state))
 212		goto out_unlock;
 213	xprt->snd_task = task;
 214
 
 
 215	return 1;
 216
 217out_unlock:
 218	xprt_clear_locked(xprt);
 219out_sleep:
 220	dprintk("RPC: %5u failed to lock transport %p\n",
 221			task->tk_pid, xprt);
 222	task->tk_status = -EAGAIN;
 223	if  (RPC_IS_SOFT(task))
 224		rpc_sleep_on_timeout(&xprt->sending, task, NULL,
 225				xprt_request_timeout(req));
 226	else
 227		rpc_sleep_on(&xprt->sending, task, NULL);
 228	return 0;
 229}
 230EXPORT_SYMBOL_GPL(xprt_reserve_xprt);
 231
 232static bool
 233xprt_need_congestion_window_wait(struct rpc_xprt *xprt)
 234{
 235	return test_bit(XPRT_CWND_WAIT, &xprt->state);
 236}
 237
 238static void
 239xprt_set_congestion_window_wait(struct rpc_xprt *xprt)
 240{
 241	if (!list_empty(&xprt->xmit_queue)) {
 242		/* Peek at head of queue to see if it can make progress */
 243		if (list_first_entry(&xprt->xmit_queue, struct rpc_rqst,
 244					rq_xmit)->rq_cong)
 245			return;
 246	}
 247	set_bit(XPRT_CWND_WAIT, &xprt->state);
 248}
 249
 250static void
 251xprt_test_and_clear_congestion_window_wait(struct rpc_xprt *xprt)
 252{
 253	if (!RPCXPRT_CONGESTED(xprt))
 254		clear_bit(XPRT_CWND_WAIT, &xprt->state);
 255}
 256
 257/*
 258 * xprt_reserve_xprt_cong - serialize write access to transports
 259 * @task: task that is requesting access to the transport
 260 *
 261 * Same as xprt_reserve_xprt, but Van Jacobson congestion control is
 262 * integrated into the decision of whether a request is allowed to be
 263 * woken up and given access to the transport.
 264 * Note that the lock is only granted if we know there are free slots.
 265 */
 266int xprt_reserve_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task)
 267{
 268	struct rpc_rqst *req = task->tk_rqstp;
 269
 270	if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) {
 271		if (task == xprt->snd_task)
 272			return 1;
 273		goto out_sleep;
 274	}
 275	if (req == NULL) {
 276		xprt->snd_task = task;
 277		return 1;
 278	}
 279	if (test_bit(XPRT_WRITE_SPACE, &xprt->state))
 280		goto out_unlock;
 281	if (!xprt_need_congestion_window_wait(xprt)) {
 282		xprt->snd_task = task;
 283		return 1;
 284	}
 285out_unlock:
 286	xprt_clear_locked(xprt);
 287out_sleep:
 288	dprintk("RPC: %5u failed to lock transport %p\n", task->tk_pid, xprt);
 289	task->tk_status = -EAGAIN;
 290	if (RPC_IS_SOFT(task))
 291		rpc_sleep_on_timeout(&xprt->sending, task, NULL,
 292				xprt_request_timeout(req));
 293	else
 294		rpc_sleep_on(&xprt->sending, task, NULL);
 295	return 0;
 
 
 
 296}
 297EXPORT_SYMBOL_GPL(xprt_reserve_xprt_cong);
 298
 299static inline int xprt_lock_write(struct rpc_xprt *xprt, struct rpc_task *task)
 300{
 301	int retval;
 302
 303	if (test_bit(XPRT_LOCKED, &xprt->state) && xprt->snd_task == task)
 304		return 1;
 305	spin_lock(&xprt->transport_lock);
 306	retval = xprt->ops->reserve_xprt(xprt, task);
 307	spin_unlock(&xprt->transport_lock);
 308	return retval;
 309}
 310
 311static bool __xprt_lock_write_func(struct rpc_task *task, void *data)
 312{
 313	struct rpc_xprt *xprt = data;
 314
 315	xprt->snd_task = task;
 316	return true;
 317}
 318
 319static void __xprt_lock_write_next(struct rpc_xprt *xprt)
 320{
 321	if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
 322		return;
 323	if (test_bit(XPRT_WRITE_SPACE, &xprt->state))
 324		goto out_unlock;
 325	if (rpc_wake_up_first_on_wq(xprtiod_workqueue, &xprt->sending,
 326				__xprt_lock_write_func, xprt))
 327		return;
 328out_unlock:
 329	xprt_clear_locked(xprt);
 330}
 331
 332static void __xprt_lock_write_next_cong(struct rpc_xprt *xprt)
 333{
 334	if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
 335		return;
 336	if (test_bit(XPRT_WRITE_SPACE, &xprt->state))
 337		goto out_unlock;
 338	if (xprt_need_congestion_window_wait(xprt))
 339		goto out_unlock;
 340	if (rpc_wake_up_first_on_wq(xprtiod_workqueue, &xprt->sending,
 341				__xprt_lock_write_func, xprt))
 342		return;
 343out_unlock:
 344	xprt_clear_locked(xprt);
 345}
 346
 347/**
 348 * xprt_release_xprt - allow other requests to use a transport
 349 * @xprt: transport with other tasks potentially waiting
 350 * @task: task that is releasing access to the transport
 351 *
 352 * Note that "task" can be NULL.  No congestion control is provided.
 353 */
 354void xprt_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
 355{
 356	if (xprt->snd_task == task) {
 357		xprt_clear_locked(xprt);
 358		__xprt_lock_write_next(xprt);
 359	}
 
 360}
 361EXPORT_SYMBOL_GPL(xprt_release_xprt);
 362
 363/**
 364 * xprt_release_xprt_cong - allow other requests to use a transport
 365 * @xprt: transport with other tasks potentially waiting
 366 * @task: task that is releasing access to the transport
 367 *
 368 * Note that "task" can be NULL.  Another task is awoken to use the
 369 * transport if the transport's congestion window allows it.
 370 */
 371void xprt_release_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task)
 372{
 373	if (xprt->snd_task == task) {
 374		xprt_clear_locked(xprt);
 375		__xprt_lock_write_next_cong(xprt);
 376	}
 
 377}
 378EXPORT_SYMBOL_GPL(xprt_release_xprt_cong);
 379
 380static inline void xprt_release_write(struct rpc_xprt *xprt, struct rpc_task *task)
 381{
 382	if (xprt->snd_task != task)
 383		return;
 384	spin_lock(&xprt->transport_lock);
 385	xprt->ops->release_xprt(xprt, task);
 386	spin_unlock(&xprt->transport_lock);
 387}
 388
 389/*
 390 * Van Jacobson congestion avoidance. Check if the congestion window
 391 * overflowed. Put the task to sleep if this is the case.
 392 */
 393static int
 394__xprt_get_cong(struct rpc_xprt *xprt, struct rpc_rqst *req)
 395{
 396	if (req->rq_cong)
 397		return 1;
 398	dprintk("RPC: %5u xprt_cwnd_limited cong = %lu cwnd = %lu\n",
 399			req->rq_task->tk_pid, xprt->cong, xprt->cwnd);
 400	if (RPCXPRT_CONGESTED(xprt)) {
 401		xprt_set_congestion_window_wait(xprt);
 402		return 0;
 403	}
 404	req->rq_cong = 1;
 405	xprt->cong += RPC_CWNDSCALE;
 406	return 1;
 407}
 408
 409/*
 410 * Adjust the congestion window, and wake up the next task
 411 * that has been sleeping due to congestion
 412 */
 413static void
 414__xprt_put_cong(struct rpc_xprt *xprt, struct rpc_rqst *req)
 415{
 416	if (!req->rq_cong)
 417		return;
 418	req->rq_cong = 0;
 419	xprt->cong -= RPC_CWNDSCALE;
 420	xprt_test_and_clear_congestion_window_wait(xprt);
 
 421	__xprt_lock_write_next_cong(xprt);
 422}
 423
 424/**
 425 * xprt_request_get_cong - Request congestion control credits
 426 * @xprt: pointer to transport
 427 * @req: pointer to RPC request
 428 *
 429 * Useful for transports that require congestion control.
 430 */
 431bool
 432xprt_request_get_cong(struct rpc_xprt *xprt, struct rpc_rqst *req)
 433{
 434	bool ret = false;
 435
 436	if (req->rq_cong)
 437		return true;
 438	spin_lock(&xprt->transport_lock);
 439	ret = __xprt_get_cong(xprt, req) != 0;
 440	spin_unlock(&xprt->transport_lock);
 441	return ret;
 442}
 443EXPORT_SYMBOL_GPL(xprt_request_get_cong);
 444
 445/**
 446 * xprt_release_rqst_cong - housekeeping when request is complete
 447 * @task: RPC request that recently completed
 448 *
 449 * Useful for transports that require congestion control.
 450 */
 451void xprt_release_rqst_cong(struct rpc_task *task)
 452{
 453	struct rpc_rqst *req = task->tk_rqstp;
 454
 455	__xprt_put_cong(req->rq_xprt, req);
 456}
 457EXPORT_SYMBOL_GPL(xprt_release_rqst_cong);
 458
 459static void xprt_clear_congestion_window_wait_locked(struct rpc_xprt *xprt)
 460{
 461	if (test_and_clear_bit(XPRT_CWND_WAIT, &xprt->state))
 462		__xprt_lock_write_next_cong(xprt);
 463}
 464
 465/*
 466 * Clear the congestion window wait flag and wake up the next
 467 * entry on xprt->sending
 468 */
 469static void
 470xprt_clear_congestion_window_wait(struct rpc_xprt *xprt)
 471{
 472	if (test_and_clear_bit(XPRT_CWND_WAIT, &xprt->state)) {
 473		spin_lock(&xprt->transport_lock);
 474		__xprt_lock_write_next_cong(xprt);
 475		spin_unlock(&xprt->transport_lock);
 476	}
 477}
 478
 479/**
 480 * xprt_adjust_cwnd - adjust transport congestion window
 481 * @xprt: pointer to xprt
 482 * @task: recently completed RPC request used to adjust window
 483 * @result: result code of completed RPC request
 484 *
 485 * The transport code maintains an estimate on the maximum number of out-
 486 * standing RPC requests, using a smoothed version of the congestion
 487 * avoidance implemented in 44BSD. This is basically the Van Jacobson
 488 * congestion algorithm: If a retransmit occurs, the congestion window is
 489 * halved; otherwise, it is incremented by 1/cwnd when
 490 *
 491 *	-	a reply is received and
 492 *	-	a full number of requests are outstanding and
 493 *	-	the congestion window hasn't been updated recently.
 494 */
 495void xprt_adjust_cwnd(struct rpc_xprt *xprt, struct rpc_task *task, int result)
 496{
 497	struct rpc_rqst *req = task->tk_rqstp;
 498	unsigned long cwnd = xprt->cwnd;
 499
 500	if (result >= 0 && cwnd <= xprt->cong) {
 501		/* The (cwnd >> 1) term makes sure
 502		 * the result gets rounded properly. */
 503		cwnd += (RPC_CWNDSCALE * RPC_CWNDSCALE + (cwnd >> 1)) / cwnd;
 504		if (cwnd > RPC_MAXCWND(xprt))
 505			cwnd = RPC_MAXCWND(xprt);
 506		__xprt_lock_write_next_cong(xprt);
 507	} else if (result == -ETIMEDOUT) {
 508		cwnd >>= 1;
 509		if (cwnd < RPC_CWNDSCALE)
 510			cwnd = RPC_CWNDSCALE;
 511	}
 512	dprintk("RPC:       cong %ld, cwnd was %ld, now %ld\n",
 513			xprt->cong, xprt->cwnd, cwnd);
 514	xprt->cwnd = cwnd;
 515	__xprt_put_cong(xprt, req);
 516}
 517EXPORT_SYMBOL_GPL(xprt_adjust_cwnd);
 518
 519/**
 520 * xprt_wake_pending_tasks - wake all tasks on a transport's pending queue
 521 * @xprt: transport with waiting tasks
 522 * @status: result code to plant in each task before waking it
 523 *
 524 */
 525void xprt_wake_pending_tasks(struct rpc_xprt *xprt, int status)
 526{
 527	if (status < 0)
 528		rpc_wake_up_status(&xprt->pending, status);
 529	else
 530		rpc_wake_up(&xprt->pending);
 531}
 532EXPORT_SYMBOL_GPL(xprt_wake_pending_tasks);
 533
 534/**
 535 * xprt_wait_for_buffer_space - wait for transport output buffer to clear
 536 * @xprt: transport
 537 *
 538 * Note that we only set the timer for the case of RPC_IS_SOFT(), since
 539 * we don't in general want to force a socket disconnection due to
 540 * an incomplete RPC call transmission.
 541 */
 542void xprt_wait_for_buffer_space(struct rpc_xprt *xprt)
 543{
 544	set_bit(XPRT_WRITE_SPACE, &xprt->state);
 545}
 546EXPORT_SYMBOL_GPL(xprt_wait_for_buffer_space);
 547
 548static bool
 549xprt_clear_write_space_locked(struct rpc_xprt *xprt)
 550{
 551	if (test_and_clear_bit(XPRT_WRITE_SPACE, &xprt->state)) {
 552		__xprt_lock_write_next(xprt);
 553		dprintk("RPC:       write space: waking waiting task on "
 554				"xprt %p\n", xprt);
 555		return true;
 556	}
 557	return false;
 558}
 559
 560/**
 561 * xprt_write_space - wake the task waiting for transport output buffer space
 562 * @xprt: transport with waiting tasks
 563 *
 564 * Can be called in a soft IRQ context, so xprt_write_space never sleeps.
 565 */
 566bool xprt_write_space(struct rpc_xprt *xprt)
 567{
 568	bool ret;
 569
 570	if (!test_bit(XPRT_WRITE_SPACE, &xprt->state))
 571		return false;
 572	spin_lock(&xprt->transport_lock);
 573	ret = xprt_clear_write_space_locked(xprt);
 574	spin_unlock(&xprt->transport_lock);
 575	return ret;
 576}
 577EXPORT_SYMBOL_GPL(xprt_write_space);
 578
 579static unsigned long xprt_abs_ktime_to_jiffies(ktime_t abstime)
 580{
 581	s64 delta = ktime_to_ns(ktime_get() - abstime);
 582	return likely(delta >= 0) ?
 583		jiffies - nsecs_to_jiffies(delta) :
 584		jiffies + nsecs_to_jiffies(-delta);
 585}
 586
 587static unsigned long xprt_calc_majortimeo(struct rpc_rqst *req)
 588{
 589	const struct rpc_timeout *to = req->rq_task->tk_client->cl_timeout;
 590	unsigned long majortimeo = req->rq_timeout;
 591
 592	if (to->to_exponential)
 593		majortimeo <<= to->to_retries;
 594	else
 595		majortimeo += to->to_increment * to->to_retries;
 596	if (majortimeo > to->to_maxval || majortimeo == 0)
 597		majortimeo = to->to_maxval;
 598	return majortimeo;
 599}
 600
 601static void xprt_reset_majortimeo(struct rpc_rqst *req)
 602{
 603	req->rq_majortimeo += xprt_calc_majortimeo(req);
 604}
 605
 
 
 
 
 
 606static void xprt_init_majortimeo(struct rpc_task *task, struct rpc_rqst *req)
 607{
 608	unsigned long time_init;
 609	struct rpc_xprt *xprt = req->rq_xprt;
 610
 611	if (likely(xprt && xprt_connected(xprt)))
 612		time_init = jiffies;
 613	else
 614		time_init = xprt_abs_ktime_to_jiffies(task->tk_start);
 615	req->rq_timeout = task->tk_client->cl_timeout->to_initval;
 616	req->rq_majortimeo = time_init + xprt_calc_majortimeo(req);
 
 617}
 618
 619/**
 620 * xprt_adjust_timeout - adjust timeout values for next retransmit
 621 * @req: RPC request containing parameters to use for the adjustment
 622 *
 623 */
 624int xprt_adjust_timeout(struct rpc_rqst *req)
 625{
 626	struct rpc_xprt *xprt = req->rq_xprt;
 627	const struct rpc_timeout *to = req->rq_task->tk_client->cl_timeout;
 628	int status = 0;
 629
 630	if (time_before(jiffies, req->rq_majortimeo)) {
 
 
 631		if (to->to_exponential)
 632			req->rq_timeout <<= 1;
 633		else
 634			req->rq_timeout += to->to_increment;
 635		if (to->to_maxval && req->rq_timeout >= to->to_maxval)
 636			req->rq_timeout = to->to_maxval;
 637		req->rq_retries++;
 638	} else {
 639		req->rq_timeout = to->to_initval;
 640		req->rq_retries = 0;
 641		xprt_reset_majortimeo(req);
 642		/* Reset the RTT counters == "slow start" */
 643		spin_lock(&xprt->transport_lock);
 644		rpc_init_rtt(req->rq_task->tk_client->cl_rtt, to->to_initval);
 645		spin_unlock(&xprt->transport_lock);
 646		status = -ETIMEDOUT;
 647	}
 
 648
 649	if (req->rq_timeout == 0) {
 650		printk(KERN_WARNING "xprt_adjust_timeout: rq_timeout = 0!\n");
 651		req->rq_timeout = 5 * HZ;
 652	}
 653	return status;
 654}
 655
 656static void xprt_autoclose(struct work_struct *work)
 657{
 658	struct rpc_xprt *xprt =
 659		container_of(work, struct rpc_xprt, task_cleanup);
 660	unsigned int pflags = memalloc_nofs_save();
 661
 
 
 
 662	clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
 663	xprt->ops->close(xprt);
 664	xprt_release_write(xprt, NULL);
 665	wake_up_bit(&xprt->state, XPRT_LOCKED);
 666	memalloc_nofs_restore(pflags);
 667}
 668
 669/**
 670 * xprt_disconnect_done - mark a transport as disconnected
 671 * @xprt: transport to flag for disconnect
 672 *
 673 */
 674void xprt_disconnect_done(struct rpc_xprt *xprt)
 675{
 676	dprintk("RPC:       disconnected transport %p\n", xprt);
 677	spin_lock(&xprt->transport_lock);
 678	xprt_clear_connected(xprt);
 679	xprt_clear_write_space_locked(xprt);
 680	xprt_clear_congestion_window_wait_locked(xprt);
 681	xprt_wake_pending_tasks(xprt, -ENOTCONN);
 682	spin_unlock(&xprt->transport_lock);
 683}
 684EXPORT_SYMBOL_GPL(xprt_disconnect_done);
 685
 686/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 687 * xprt_force_disconnect - force a transport to disconnect
 688 * @xprt: transport to disconnect
 689 *
 690 */
 691void xprt_force_disconnect(struct rpc_xprt *xprt)
 692{
 
 
 693	/* Don't race with the test_bit() in xprt_clear_locked() */
 694	spin_lock(&xprt->transport_lock);
 695	set_bit(XPRT_CLOSE_WAIT, &xprt->state);
 696	/* Try to schedule an autoclose RPC call */
 697	if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0)
 698		queue_work(xprtiod_workqueue, &xprt->task_cleanup);
 699	else if (xprt->snd_task)
 700		rpc_wake_up_queued_task_set_status(&xprt->pending,
 701				xprt->snd_task, -ENOTCONN);
 702	spin_unlock(&xprt->transport_lock);
 703}
 704EXPORT_SYMBOL_GPL(xprt_force_disconnect);
 705
 706static unsigned int
 707xprt_connect_cookie(struct rpc_xprt *xprt)
 708{
 709	return READ_ONCE(xprt->connect_cookie);
 710}
 711
 712static bool
 713xprt_request_retransmit_after_disconnect(struct rpc_task *task)
 714{
 715	struct rpc_rqst *req = task->tk_rqstp;
 716	struct rpc_xprt *xprt = req->rq_xprt;
 717
 718	return req->rq_connect_cookie != xprt_connect_cookie(xprt) ||
 719		!xprt_connected(xprt);
 720}
 721
 722/**
 723 * xprt_conditional_disconnect - force a transport to disconnect
 724 * @xprt: transport to disconnect
 725 * @cookie: 'connection cookie'
 726 *
 727 * This attempts to break the connection if and only if 'cookie' matches
 728 * the current transport 'connection cookie'. It ensures that we don't
 729 * try to break the connection more than once when we need to retransmit
 730 * a batch of RPC requests.
 731 *
 732 */
 733void xprt_conditional_disconnect(struct rpc_xprt *xprt, unsigned int cookie)
 734{
 735	/* Don't race with the test_bit() in xprt_clear_locked() */
 736	spin_lock(&xprt->transport_lock);
 737	if (cookie != xprt->connect_cookie)
 738		goto out;
 739	if (test_bit(XPRT_CLOSING, &xprt->state))
 740		goto out;
 741	set_bit(XPRT_CLOSE_WAIT, &xprt->state);
 742	/* Try to schedule an autoclose RPC call */
 743	if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0)
 744		queue_work(xprtiod_workqueue, &xprt->task_cleanup);
 745	xprt_wake_pending_tasks(xprt, -EAGAIN);
 746out:
 747	spin_unlock(&xprt->transport_lock);
 748}
 749
 750static bool
 751xprt_has_timer(const struct rpc_xprt *xprt)
 752{
 753	return xprt->idle_timeout != 0;
 754}
 755
 756static void
 757xprt_schedule_autodisconnect(struct rpc_xprt *xprt)
 758	__must_hold(&xprt->transport_lock)
 759{
 760	xprt->last_used = jiffies;
 761	if (RB_EMPTY_ROOT(&xprt->recv_queue) && xprt_has_timer(xprt))
 762		mod_timer(&xprt->timer, xprt->last_used + xprt->idle_timeout);
 763}
 764
 765static void
 766xprt_init_autodisconnect(struct timer_list *t)
 767{
 768	struct rpc_xprt *xprt = from_timer(xprt, t, timer);
 769
 770	if (!RB_EMPTY_ROOT(&xprt->recv_queue))
 771		return;
 772	/* Reset xprt->last_used to avoid connect/autodisconnect cycling */
 773	xprt->last_used = jiffies;
 774	if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
 775		return;
 776	queue_work(xprtiod_workqueue, &xprt->task_cleanup);
 777}
 778
 
 
 
 
 
 
 
 
 
 
 
 
 
 779bool xprt_lock_connect(struct rpc_xprt *xprt,
 780		struct rpc_task *task,
 781		void *cookie)
 782{
 783	bool ret = false;
 784
 785	spin_lock(&xprt->transport_lock);
 786	if (!test_bit(XPRT_LOCKED, &xprt->state))
 787		goto out;
 788	if (xprt->snd_task != task)
 789		goto out;
 
 790	xprt->snd_task = cookie;
 791	ret = true;
 792out:
 793	spin_unlock(&xprt->transport_lock);
 794	return ret;
 795}
 
 796
 797void xprt_unlock_connect(struct rpc_xprt *xprt, void *cookie)
 798{
 799	spin_lock(&xprt->transport_lock);
 800	if (xprt->snd_task != cookie)
 801		goto out;
 802	if (!test_bit(XPRT_LOCKED, &xprt->state))
 803		goto out;
 804	xprt->snd_task =NULL;
 
 805	xprt->ops->release_xprt(xprt, NULL);
 806	xprt_schedule_autodisconnect(xprt);
 807out:
 808	spin_unlock(&xprt->transport_lock);
 809	wake_up_bit(&xprt->state, XPRT_LOCKED);
 810}
 
 811
 812/**
 813 * xprt_connect - schedule a transport connect operation
 814 * @task: RPC task that is requesting the connect
 815 *
 816 */
 817void xprt_connect(struct rpc_task *task)
 818{
 819	struct rpc_xprt	*xprt = task->tk_rqstp->rq_xprt;
 820
 821	dprintk("RPC: %5u xprt_connect xprt %p %s connected\n", task->tk_pid,
 822			xprt, (xprt_connected(xprt) ? "is" : "is not"));
 823
 824	if (!xprt_bound(xprt)) {
 825		task->tk_status = -EAGAIN;
 826		return;
 827	}
 828	if (!xprt_lock_write(xprt, task))
 829		return;
 830
 831	if (test_and_clear_bit(XPRT_CLOSE_WAIT, &xprt->state))
 832		xprt->ops->close(xprt);
 833
 834	if (!xprt_connected(xprt)) {
 835		task->tk_rqstp->rq_connect_cookie = xprt->connect_cookie;
 836		rpc_sleep_on_timeout(&xprt->pending, task, NULL,
 837				xprt_request_timeout(task->tk_rqstp));
 838
 839		if (test_bit(XPRT_CLOSING, &xprt->state))
 840			return;
 841		if (xprt_test_and_set_connecting(xprt))
 842			return;
 843		/* Race breaker */
 844		if (!xprt_connected(xprt)) {
 845			xprt->stat.connect_start = jiffies;
 846			xprt->ops->connect(xprt, task);
 847		} else {
 848			xprt_clear_connecting(xprt);
 849			task->tk_status = 0;
 850			rpc_wake_up_queued_task(&xprt->pending, task);
 851		}
 852	}
 853	xprt_release_write(xprt, task);
 854}
 855
 856/**
 857 * xprt_reconnect_delay - compute the wait before scheduling a connect
 858 * @xprt: transport instance
 859 *
 860 */
 861unsigned long xprt_reconnect_delay(const struct rpc_xprt *xprt)
 862{
 863	unsigned long start, now = jiffies;
 864
 865	start = xprt->stat.connect_start + xprt->reestablish_timeout;
 866	if (time_after(start, now))
 867		return start - now;
 868	return 0;
 869}
 870EXPORT_SYMBOL_GPL(xprt_reconnect_delay);
 871
 872/**
 873 * xprt_reconnect_backoff - compute the new re-establish timeout
 874 * @xprt: transport instance
 875 * @init_to: initial reestablish timeout
 876 *
 877 */
 878void xprt_reconnect_backoff(struct rpc_xprt *xprt, unsigned long init_to)
 879{
 880	xprt->reestablish_timeout <<= 1;
 881	if (xprt->reestablish_timeout > xprt->max_reconnect_timeout)
 882		xprt->reestablish_timeout = xprt->max_reconnect_timeout;
 883	if (xprt->reestablish_timeout < init_to)
 884		xprt->reestablish_timeout = init_to;
 885}
 886EXPORT_SYMBOL_GPL(xprt_reconnect_backoff);
 887
 888enum xprt_xid_rb_cmp {
 889	XID_RB_EQUAL,
 890	XID_RB_LEFT,
 891	XID_RB_RIGHT,
 892};
 893static enum xprt_xid_rb_cmp
 894xprt_xid_cmp(__be32 xid1, __be32 xid2)
 895{
 896	if (xid1 == xid2)
 897		return XID_RB_EQUAL;
 898	if ((__force u32)xid1 < (__force u32)xid2)
 899		return XID_RB_LEFT;
 900	return XID_RB_RIGHT;
 901}
 902
 903static struct rpc_rqst *
 904xprt_request_rb_find(struct rpc_xprt *xprt, __be32 xid)
 905{
 906	struct rb_node *n = xprt->recv_queue.rb_node;
 907	struct rpc_rqst *req;
 908
 909	while (n != NULL) {
 910		req = rb_entry(n, struct rpc_rqst, rq_recv);
 911		switch (xprt_xid_cmp(xid, req->rq_xid)) {
 912		case XID_RB_LEFT:
 913			n = n->rb_left;
 914			break;
 915		case XID_RB_RIGHT:
 916			n = n->rb_right;
 917			break;
 918		case XID_RB_EQUAL:
 919			return req;
 920		}
 921	}
 922	return NULL;
 923}
 924
 925static void
 926xprt_request_rb_insert(struct rpc_xprt *xprt, struct rpc_rqst *new)
 927{
 928	struct rb_node **p = &xprt->recv_queue.rb_node;
 929	struct rb_node *n = NULL;
 930	struct rpc_rqst *req;
 931
 932	while (*p != NULL) {
 933		n = *p;
 934		req = rb_entry(n, struct rpc_rqst, rq_recv);
 935		switch(xprt_xid_cmp(new->rq_xid, req->rq_xid)) {
 936		case XID_RB_LEFT:
 937			p = &n->rb_left;
 938			break;
 939		case XID_RB_RIGHT:
 940			p = &n->rb_right;
 941			break;
 942		case XID_RB_EQUAL:
 943			WARN_ON_ONCE(new != req);
 944			return;
 945		}
 946	}
 947	rb_link_node(&new->rq_recv, n, p);
 948	rb_insert_color(&new->rq_recv, &xprt->recv_queue);
 949}
 950
 951static void
 952xprt_request_rb_remove(struct rpc_xprt *xprt, struct rpc_rqst *req)
 953{
 954	rb_erase(&req->rq_recv, &xprt->recv_queue);
 955}
 956
 957/**
 958 * xprt_lookup_rqst - find an RPC request corresponding to an XID
 959 * @xprt: transport on which the original request was transmitted
 960 * @xid: RPC XID of incoming reply
 961 *
 962 * Caller holds xprt->queue_lock.
 963 */
 964struct rpc_rqst *xprt_lookup_rqst(struct rpc_xprt *xprt, __be32 xid)
 965{
 966	struct rpc_rqst *entry;
 967
 968	entry = xprt_request_rb_find(xprt, xid);
 969	if (entry != NULL) {
 970		trace_xprt_lookup_rqst(xprt, xid, 0);
 971		entry->rq_rtt = ktime_sub(ktime_get(), entry->rq_xtime);
 972		return entry;
 973	}
 974
 975	dprintk("RPC:       xprt_lookup_rqst did not find xid %08x\n",
 976			ntohl(xid));
 977	trace_xprt_lookup_rqst(xprt, xid, -ENOENT);
 978	xprt->stat.bad_xids++;
 979	return NULL;
 980}
 981EXPORT_SYMBOL_GPL(xprt_lookup_rqst);
 982
 983static bool
 984xprt_is_pinned_rqst(struct rpc_rqst *req)
 985{
 986	return atomic_read(&req->rq_pin) != 0;
 987}
 988
 989/**
 990 * xprt_pin_rqst - Pin a request on the transport receive list
 991 * @req: Request to pin
 992 *
 993 * Caller must ensure this is atomic with the call to xprt_lookup_rqst()
 994 * so should be holding xprt->queue_lock.
 995 */
 996void xprt_pin_rqst(struct rpc_rqst *req)
 997{
 998	atomic_inc(&req->rq_pin);
 999}
1000EXPORT_SYMBOL_GPL(xprt_pin_rqst);
1001
1002/**
1003 * xprt_unpin_rqst - Unpin a request on the transport receive list
1004 * @req: Request to pin
1005 *
1006 * Caller should be holding xprt->queue_lock.
1007 */
1008void xprt_unpin_rqst(struct rpc_rqst *req)
1009{
1010	if (!test_bit(RPC_TASK_MSG_PIN_WAIT, &req->rq_task->tk_runstate)) {
1011		atomic_dec(&req->rq_pin);
1012		return;
1013	}
1014	if (atomic_dec_and_test(&req->rq_pin))
1015		wake_up_var(&req->rq_pin);
1016}
1017EXPORT_SYMBOL_GPL(xprt_unpin_rqst);
1018
1019static void xprt_wait_on_pinned_rqst(struct rpc_rqst *req)
1020{
1021	wait_var_event(&req->rq_pin, !xprt_is_pinned_rqst(req));
1022}
1023
1024static bool
1025xprt_request_data_received(struct rpc_task *task)
1026{
1027	return !test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate) &&
1028		READ_ONCE(task->tk_rqstp->rq_reply_bytes_recvd) != 0;
1029}
1030
1031static bool
1032xprt_request_need_enqueue_receive(struct rpc_task *task, struct rpc_rqst *req)
1033{
1034	return !test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate) &&
1035		READ_ONCE(task->tk_rqstp->rq_reply_bytes_recvd) == 0;
1036}
1037
1038/**
1039 * xprt_request_enqueue_receive - Add an request to the receive queue
1040 * @task: RPC task
1041 *
1042 */
1043void
1044xprt_request_enqueue_receive(struct rpc_task *task)
1045{
1046	struct rpc_rqst *req = task->tk_rqstp;
1047	struct rpc_xprt *xprt = req->rq_xprt;
 
1048
1049	if (!xprt_request_need_enqueue_receive(task, req))
1050		return;
1051
1052	xprt_request_prepare(task->tk_rqstp);
 
 
1053	spin_lock(&xprt->queue_lock);
1054
1055	/* Update the softirq receive buffer */
1056	memcpy(&req->rq_private_buf, &req->rq_rcv_buf,
1057			sizeof(req->rq_private_buf));
1058
1059	/* Add request to the receive list */
1060	xprt_request_rb_insert(xprt, req);
1061	set_bit(RPC_TASK_NEED_RECV, &task->tk_runstate);
1062	spin_unlock(&xprt->queue_lock);
1063
1064	/* Turn off autodisconnect */
1065	del_singleshot_timer_sync(&xprt->timer);
 
1066}
1067
1068/**
1069 * xprt_request_dequeue_receive_locked - Remove a request from the receive queue
1070 * @task: RPC task
1071 *
1072 * Caller must hold xprt->queue_lock.
1073 */
1074static void
1075xprt_request_dequeue_receive_locked(struct rpc_task *task)
1076{
1077	struct rpc_rqst *req = task->tk_rqstp;
1078
1079	if (test_and_clear_bit(RPC_TASK_NEED_RECV, &task->tk_runstate))
1080		xprt_request_rb_remove(req->rq_xprt, req);
1081}
1082
1083/**
1084 * xprt_update_rtt - Update RPC RTT statistics
1085 * @task: RPC request that recently completed
1086 *
1087 * Caller holds xprt->queue_lock.
1088 */
1089void xprt_update_rtt(struct rpc_task *task)
1090{
1091	struct rpc_rqst *req = task->tk_rqstp;
1092	struct rpc_rtt *rtt = task->tk_client->cl_rtt;
1093	unsigned int timer = task->tk_msg.rpc_proc->p_timer;
1094	long m = usecs_to_jiffies(ktime_to_us(req->rq_rtt));
1095
1096	if (timer) {
1097		if (req->rq_ntrans == 1)
1098			rpc_update_rtt(rtt, timer, m);
1099		rpc_set_timeo(rtt, timer, req->rq_ntrans - 1);
1100	}
1101}
1102EXPORT_SYMBOL_GPL(xprt_update_rtt);
1103
1104/**
1105 * xprt_complete_rqst - called when reply processing is complete
1106 * @task: RPC request that recently completed
1107 * @copied: actual number of bytes received from the transport
1108 *
1109 * Caller holds xprt->queue_lock.
1110 */
1111void xprt_complete_rqst(struct rpc_task *task, int copied)
1112{
1113	struct rpc_rqst *req = task->tk_rqstp;
1114	struct rpc_xprt *xprt = req->rq_xprt;
1115
1116	dprintk("RPC: %5u xid %08x complete (%d bytes received)\n",
1117			task->tk_pid, ntohl(req->rq_xid), copied);
1118	trace_xprt_complete_rqst(xprt, req->rq_xid, copied);
1119
1120	xprt->stat.recvs++;
1121
 
 
1122	req->rq_private_buf.len = copied;
1123	/* Ensure all writes are done before we update */
1124	/* req->rq_reply_bytes_recvd */
1125	smp_wmb();
1126	req->rq_reply_bytes_recvd = copied;
1127	xprt_request_dequeue_receive_locked(task);
1128	rpc_wake_up_queued_task(&xprt->pending, task);
1129}
1130EXPORT_SYMBOL_GPL(xprt_complete_rqst);
1131
1132static void xprt_timer(struct rpc_task *task)
1133{
1134	struct rpc_rqst *req = task->tk_rqstp;
1135	struct rpc_xprt *xprt = req->rq_xprt;
1136
1137	if (task->tk_status != -ETIMEDOUT)
1138		return;
1139
1140	trace_xprt_timer(xprt, req->rq_xid, task->tk_status);
1141	if (!req->rq_reply_bytes_recvd) {
1142		if (xprt->ops->timer)
1143			xprt->ops->timer(xprt, task);
1144	} else
1145		task->tk_status = 0;
1146}
1147
1148/**
1149 * xprt_wait_for_reply_request_def - wait for reply
1150 * @task: pointer to rpc_task
1151 *
1152 * Set a request's retransmit timeout based on the transport's
1153 * default timeout parameters.  Used by transports that don't adjust
1154 * the retransmit timeout based on round-trip time estimation,
1155 * and put the task to sleep on the pending queue.
1156 */
1157void xprt_wait_for_reply_request_def(struct rpc_task *task)
1158{
1159	struct rpc_rqst *req = task->tk_rqstp;
1160
1161	rpc_sleep_on_timeout(&req->rq_xprt->pending, task, xprt_timer,
1162			xprt_request_timeout(req));
1163}
1164EXPORT_SYMBOL_GPL(xprt_wait_for_reply_request_def);
1165
1166/**
1167 * xprt_wait_for_reply_request_rtt - wait for reply using RTT estimator
1168 * @task: pointer to rpc_task
1169 *
1170 * Set a request's retransmit timeout using the RTT estimator,
1171 * and put the task to sleep on the pending queue.
1172 */
1173void xprt_wait_for_reply_request_rtt(struct rpc_task *task)
1174{
1175	int timer = task->tk_msg.rpc_proc->p_timer;
1176	struct rpc_clnt *clnt = task->tk_client;
1177	struct rpc_rtt *rtt = clnt->cl_rtt;
1178	struct rpc_rqst *req = task->tk_rqstp;
1179	unsigned long max_timeout = clnt->cl_timeout->to_maxval;
1180	unsigned long timeout;
1181
1182	timeout = rpc_calc_rto(rtt, timer);
1183	timeout <<= rpc_ntimeo(rtt, timer) + req->rq_retries;
1184	if (timeout > max_timeout || timeout == 0)
1185		timeout = max_timeout;
1186	rpc_sleep_on_timeout(&req->rq_xprt->pending, task, xprt_timer,
1187			jiffies + timeout);
1188}
1189EXPORT_SYMBOL_GPL(xprt_wait_for_reply_request_rtt);
1190
1191/**
1192 * xprt_request_wait_receive - wait for the reply to an RPC request
1193 * @task: RPC task about to send a request
1194 *
1195 */
1196void xprt_request_wait_receive(struct rpc_task *task)
1197{
1198	struct rpc_rqst *req = task->tk_rqstp;
1199	struct rpc_xprt *xprt = req->rq_xprt;
1200
1201	if (!test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate))
1202		return;
1203	/*
1204	 * Sleep on the pending queue if we're expecting a reply.
1205	 * The spinlock ensures atomicity between the test of
1206	 * req->rq_reply_bytes_recvd, and the call to rpc_sleep_on().
1207	 */
1208	spin_lock(&xprt->queue_lock);
1209	if (test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate)) {
1210		xprt->ops->wait_for_reply_request(task);
1211		/*
1212		 * Send an extra queue wakeup call if the
1213		 * connection was dropped in case the call to
1214		 * rpc_sleep_on() raced.
1215		 */
1216		if (xprt_request_retransmit_after_disconnect(task))
1217			rpc_wake_up_queued_task_set_status(&xprt->pending,
1218					task, -ENOTCONN);
1219	}
1220	spin_unlock(&xprt->queue_lock);
1221}
1222
1223static bool
1224xprt_request_need_enqueue_transmit(struct rpc_task *task, struct rpc_rqst *req)
1225{
1226	return !test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate);
1227}
1228
1229/**
1230 * xprt_request_enqueue_transmit - queue a task for transmission
1231 * @task: pointer to rpc_task
1232 *
1233 * Add a task to the transmission queue.
1234 */
1235void
1236xprt_request_enqueue_transmit(struct rpc_task *task)
1237{
1238	struct rpc_rqst *pos, *req = task->tk_rqstp;
1239	struct rpc_xprt *xprt = req->rq_xprt;
 
1240
1241	if (xprt_request_need_enqueue_transmit(task, req)) {
 
 
 
 
 
1242		req->rq_bytes_sent = 0;
1243		spin_lock(&xprt->queue_lock);
1244		/*
1245		 * Requests that carry congestion control credits are added
1246		 * to the head of the list to avoid starvation issues.
1247		 */
1248		if (req->rq_cong) {
1249			xprt_clear_congestion_window_wait(xprt);
1250			list_for_each_entry(pos, &xprt->xmit_queue, rq_xmit) {
1251				if (pos->rq_cong)
1252					continue;
1253				/* Note: req is added _before_ pos */
1254				list_add_tail(&req->rq_xmit, &pos->rq_xmit);
1255				INIT_LIST_HEAD(&req->rq_xmit2);
1256				trace_xprt_enq_xmit(task, 1);
1257				goto out;
1258			}
1259		} else if (RPC_IS_SWAPPER(task)) {
1260			list_for_each_entry(pos, &xprt->xmit_queue, rq_xmit) {
1261				if (pos->rq_cong || pos->rq_bytes_sent)
1262					continue;
1263				if (RPC_IS_SWAPPER(pos->rq_task))
1264					continue;
1265				/* Note: req is added _before_ pos */
1266				list_add_tail(&req->rq_xmit, &pos->rq_xmit);
1267				INIT_LIST_HEAD(&req->rq_xmit2);
1268				trace_xprt_enq_xmit(task, 2);
1269				goto out;
1270			}
1271		} else if (!req->rq_seqno) {
1272			list_for_each_entry(pos, &xprt->xmit_queue, rq_xmit) {
1273				if (pos->rq_task->tk_owner != task->tk_owner)
1274					continue;
1275				list_add_tail(&req->rq_xmit2, &pos->rq_xmit2);
1276				INIT_LIST_HEAD(&req->rq_xmit);
1277				trace_xprt_enq_xmit(task, 3);
1278				goto out;
1279			}
1280		}
1281		list_add_tail(&req->rq_xmit, &xprt->xmit_queue);
1282		INIT_LIST_HEAD(&req->rq_xmit2);
1283		trace_xprt_enq_xmit(task, 4);
1284out:
 
1285		set_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate);
1286		spin_unlock(&xprt->queue_lock);
1287	}
1288}
1289
1290/**
1291 * xprt_request_dequeue_transmit_locked - remove a task from the transmission queue
1292 * @task: pointer to rpc_task
1293 *
1294 * Remove a task from the transmission queue
1295 * Caller must hold xprt->queue_lock
1296 */
1297static void
1298xprt_request_dequeue_transmit_locked(struct rpc_task *task)
1299{
1300	struct rpc_rqst *req = task->tk_rqstp;
1301
1302	if (!test_and_clear_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate))
1303		return;
1304	if (!list_empty(&req->rq_xmit)) {
1305		list_del(&req->rq_xmit);
1306		if (!list_empty(&req->rq_xmit2)) {
1307			struct rpc_rqst *next = list_first_entry(&req->rq_xmit2,
1308					struct rpc_rqst, rq_xmit2);
1309			list_del(&req->rq_xmit2);
1310			list_add_tail(&next->rq_xmit, &next->rq_xprt->xmit_queue);
1311		}
1312	} else
1313		list_del(&req->rq_xmit2);
 
 
1314}
1315
1316/**
1317 * xprt_request_dequeue_transmit - remove a task from the transmission queue
1318 * @task: pointer to rpc_task
1319 *
1320 * Remove a task from the transmission queue
1321 */
1322static void
1323xprt_request_dequeue_transmit(struct rpc_task *task)
1324{
1325	struct rpc_rqst *req = task->tk_rqstp;
1326	struct rpc_xprt *xprt = req->rq_xprt;
1327
1328	spin_lock(&xprt->queue_lock);
1329	xprt_request_dequeue_transmit_locked(task);
1330	spin_unlock(&xprt->queue_lock);
1331}
1332
1333/**
1334 * xprt_request_dequeue_xprt - remove a task from the transmit+receive queue
1335 * @task: pointer to rpc_task
1336 *
1337 * Remove a task from the transmit and receive queues, and ensure that
1338 * it is not pinned by the receive work item.
1339 */
1340void
1341xprt_request_dequeue_xprt(struct rpc_task *task)
1342{
1343	struct rpc_rqst	*req = task->tk_rqstp;
1344	struct rpc_xprt *xprt = req->rq_xprt;
1345
1346	if (test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate) ||
1347	    test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate) ||
1348	    xprt_is_pinned_rqst(req)) {
1349		spin_lock(&xprt->queue_lock);
1350		xprt_request_dequeue_transmit_locked(task);
1351		xprt_request_dequeue_receive_locked(task);
1352		while (xprt_is_pinned_rqst(req)) {
1353			set_bit(RPC_TASK_MSG_PIN_WAIT, &task->tk_runstate);
1354			spin_unlock(&xprt->queue_lock);
1355			xprt_wait_on_pinned_rqst(req);
1356			spin_lock(&xprt->queue_lock);
1357			clear_bit(RPC_TASK_MSG_PIN_WAIT, &task->tk_runstate);
1358		}
 
 
1359		spin_unlock(&xprt->queue_lock);
 
1360	}
1361}
1362
1363/**
1364 * xprt_request_prepare - prepare an encoded request for transport
1365 * @req: pointer to rpc_rqst
 
1366 *
1367 * Calls into the transport layer to do whatever is needed to prepare
1368 * the request for transmission or receive.
 
1369 */
1370void
1371xprt_request_prepare(struct rpc_rqst *req)
1372{
1373	struct rpc_xprt *xprt = req->rq_xprt;
1374
1375	if (xprt->ops->prepare_request)
1376		xprt->ops->prepare_request(req);
 
1377}
1378
1379/**
1380 * xprt_request_need_retransmit - Test if a task needs retransmission
1381 * @task: pointer to rpc_task
1382 *
1383 * Test for whether a connection breakage requires the task to retransmit
1384 */
1385bool
1386xprt_request_need_retransmit(struct rpc_task *task)
1387{
1388	return xprt_request_retransmit_after_disconnect(task);
1389}
1390
1391/**
1392 * xprt_prepare_transmit - reserve the transport before sending a request
1393 * @task: RPC task about to send a request
1394 *
1395 */
1396bool xprt_prepare_transmit(struct rpc_task *task)
1397{
1398	struct rpc_rqst	*req = task->tk_rqstp;
1399	struct rpc_xprt	*xprt = req->rq_xprt;
1400
1401	dprintk("RPC: %5u xprt_prepare_transmit\n", task->tk_pid);
1402
1403	if (!xprt_lock_write(xprt, task)) {
1404		/* Race breaker: someone may have transmitted us */
1405		if (!test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate))
1406			rpc_wake_up_queued_task_set_status(&xprt->sending,
1407					task, 0);
1408		return false;
1409
1410	}
 
 
 
1411	return true;
1412}
1413
1414void xprt_end_transmit(struct rpc_task *task)
1415{
1416	xprt_release_write(task->tk_rqstp->rq_xprt, task);
 
 
 
1417}
1418
1419/**
1420 * xprt_request_transmit - send an RPC request on a transport
1421 * @req: pointer to request to transmit
1422 * @snd_task: RPC task that owns the transport lock
1423 *
1424 * This performs the transmission of a single request.
1425 * Note that if the request is not the same as snd_task, then it
1426 * does need to be pinned.
1427 * Returns '0' on success.
1428 */
1429static int
1430xprt_request_transmit(struct rpc_rqst *req, struct rpc_task *snd_task)
1431{
1432	struct rpc_xprt *xprt = req->rq_xprt;
1433	struct rpc_task *task = req->rq_task;
1434	unsigned int connect_cookie;
1435	int is_retrans = RPC_WAS_SENT(task);
1436	int status;
1437
1438	if (!req->rq_bytes_sent) {
1439		if (xprt_request_data_received(task)) {
1440			status = 0;
1441			goto out_dequeue;
1442		}
1443		/* Verify that our message lies in the RPCSEC_GSS window */
1444		if (rpcauth_xmit_need_reencode(task)) {
1445			status = -EBADMSG;
1446			goto out_dequeue;
1447		}
1448		if (RPC_SIGNALLED(task)) {
1449			status = -ERESTARTSYS;
1450			goto out_dequeue;
1451		}
1452	}
1453
1454	/*
1455	 * Update req->rq_ntrans before transmitting to avoid races with
1456	 * xprt_update_rtt(), which needs to know that it is recording a
1457	 * reply to the first transmission.
1458	 */
1459	req->rq_ntrans++;
1460
 
1461	connect_cookie = xprt->connect_cookie;
1462	status = xprt->ops->send_request(req);
1463	if (status != 0) {
1464		req->rq_ntrans--;
1465		trace_xprt_transmit(req, status);
1466		return status;
1467	}
1468
1469	if (is_retrans)
1470		task->tk_client->cl_stats->rpcretrans++;
 
 
1471
1472	xprt_inject_disconnect(xprt);
1473
1474	task->tk_flags |= RPC_TASK_SENT;
1475	spin_lock(&xprt->transport_lock);
1476
1477	xprt->stat.sends++;
1478	xprt->stat.req_u += xprt->stat.sends - xprt->stat.recvs;
1479	xprt->stat.bklog_u += xprt->backlog.qlen;
1480	xprt->stat.sending_u += xprt->sending.qlen;
1481	xprt->stat.pending_u += xprt->pending.qlen;
1482	spin_unlock(&xprt->transport_lock);
1483
1484	req->rq_connect_cookie = connect_cookie;
1485out_dequeue:
1486	trace_xprt_transmit(req, status);
1487	xprt_request_dequeue_transmit(task);
1488	rpc_wake_up_queued_task_set_status(&xprt->sending, task, status);
1489	return status;
1490}
1491
1492/**
1493 * xprt_transmit - send an RPC request on a transport
1494 * @task: controlling RPC task
1495 *
1496 * Attempts to drain the transmit queue. On exit, either the transport
1497 * signalled an error that needs to be handled before transmission can
1498 * resume, or @task finished transmitting, and detected that it already
1499 * received a reply.
1500 */
1501void
1502xprt_transmit(struct rpc_task *task)
1503{
1504	struct rpc_rqst *next, *req = task->tk_rqstp;
1505	struct rpc_xprt	*xprt = req->rq_xprt;
1506	int status;
1507
1508	spin_lock(&xprt->queue_lock);
1509	while (!list_empty(&xprt->xmit_queue)) {
1510		next = list_first_entry(&xprt->xmit_queue,
1511				struct rpc_rqst, rq_xmit);
 
 
1512		xprt_pin_rqst(next);
1513		spin_unlock(&xprt->queue_lock);
1514		status = xprt_request_transmit(next, task);
1515		if (status == -EBADMSG && next != req)
1516			status = 0;
1517		cond_resched();
1518		spin_lock(&xprt->queue_lock);
1519		xprt_unpin_rqst(next);
1520		if (status == 0) {
1521			if (!xprt_request_data_received(task) ||
1522			    test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate))
1523				continue;
1524		} else if (test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate))
1525			task->tk_status = status;
1526		break;
 
 
 
1527	}
1528	spin_unlock(&xprt->queue_lock);
1529}
1530
1531static void xprt_add_backlog(struct rpc_xprt *xprt, struct rpc_task *task)
 
 
 
 
 
 
1532{
1533	set_bit(XPRT_CONGESTED, &xprt->state);
1534	rpc_sleep_on(&xprt->backlog, task, NULL);
 
 
 
 
 
 
 
 
 
 
 
 
 
1535}
1536
1537static void xprt_wake_up_backlog(struct rpc_xprt *xprt)
1538{
1539	if (rpc_wake_up_next(&xprt->backlog) == NULL)
1540		clear_bit(XPRT_CONGESTED, &xprt->state);
 
 
 
1541}
 
1542
1543static bool xprt_throttle_congested(struct rpc_xprt *xprt, struct rpc_task *task)
1544{
1545	bool ret = false;
1546
1547	if (!test_bit(XPRT_CONGESTED, &xprt->state))
1548		goto out;
1549	spin_lock(&xprt->reserve_lock);
1550	if (test_bit(XPRT_CONGESTED, &xprt->state)) {
1551		rpc_sleep_on(&xprt->backlog, task, NULL);
1552		ret = true;
1553	}
1554	spin_unlock(&xprt->reserve_lock);
1555out:
1556	return ret;
1557}
1558
1559static struct rpc_rqst *xprt_dynamic_alloc_slot(struct rpc_xprt *xprt)
1560{
1561	struct rpc_rqst *req = ERR_PTR(-EAGAIN);
1562
1563	if (xprt->num_reqs >= xprt->max_reqs)
1564		goto out;
1565	++xprt->num_reqs;
1566	spin_unlock(&xprt->reserve_lock);
1567	req = kzalloc(sizeof(struct rpc_rqst), GFP_NOFS);
1568	spin_lock(&xprt->reserve_lock);
1569	if (req != NULL)
1570		goto out;
1571	--xprt->num_reqs;
1572	req = ERR_PTR(-ENOMEM);
1573out:
1574	return req;
1575}
1576
1577static bool xprt_dynamic_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
1578{
1579	if (xprt->num_reqs > xprt->min_reqs) {
1580		--xprt->num_reqs;
1581		kfree(req);
1582		return true;
1583	}
1584	return false;
1585}
1586
1587void xprt_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task)
1588{
1589	struct rpc_rqst *req;
1590
1591	spin_lock(&xprt->reserve_lock);
1592	if (!list_empty(&xprt->free)) {
1593		req = list_entry(xprt->free.next, struct rpc_rqst, rq_list);
1594		list_del(&req->rq_list);
1595		goto out_init_req;
1596	}
1597	req = xprt_dynamic_alloc_slot(xprt);
1598	if (!IS_ERR(req))
1599		goto out_init_req;
1600	switch (PTR_ERR(req)) {
1601	case -ENOMEM:
1602		dprintk("RPC:       dynamic allocation of request slot "
1603				"failed! Retrying\n");
1604		task->tk_status = -ENOMEM;
1605		break;
1606	case -EAGAIN:
1607		xprt_add_backlog(xprt, task);
1608		dprintk("RPC:       waiting for request slot\n");
1609		/* fall through */
1610	default:
1611		task->tk_status = -EAGAIN;
1612	}
1613	spin_unlock(&xprt->reserve_lock);
1614	return;
1615out_init_req:
1616	xprt->stat.max_slots = max_t(unsigned int, xprt->stat.max_slots,
1617				     xprt->num_reqs);
1618	spin_unlock(&xprt->reserve_lock);
1619
1620	task->tk_status = 0;
1621	task->tk_rqstp = req;
1622}
1623EXPORT_SYMBOL_GPL(xprt_alloc_slot);
1624
1625void xprt_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
1626{
1627	spin_lock(&xprt->reserve_lock);
1628	if (!xprt_dynamic_free_slot(xprt, req)) {
 
1629		memset(req, 0, sizeof(*req));	/* mark unused */
1630		list_add(&req->rq_list, &xprt->free);
1631	}
1632	xprt_wake_up_backlog(xprt);
1633	spin_unlock(&xprt->reserve_lock);
1634}
1635EXPORT_SYMBOL_GPL(xprt_free_slot);
1636
1637static void xprt_free_all_slots(struct rpc_xprt *xprt)
1638{
1639	struct rpc_rqst *req;
1640	while (!list_empty(&xprt->free)) {
1641		req = list_first_entry(&xprt->free, struct rpc_rqst, rq_list);
1642		list_del(&req->rq_list);
1643		kfree(req);
1644	}
1645}
1646
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1647struct rpc_xprt *xprt_alloc(struct net *net, size_t size,
1648		unsigned int num_prealloc,
1649		unsigned int max_alloc)
1650{
1651	struct rpc_xprt *xprt;
1652	struct rpc_rqst *req;
1653	int i;
1654
1655	xprt = kzalloc(size, GFP_KERNEL);
1656	if (xprt == NULL)
1657		goto out;
1658
 
1659	xprt_init(xprt, net);
1660
1661	for (i = 0; i < num_prealloc; i++) {
1662		req = kzalloc(sizeof(struct rpc_rqst), GFP_KERNEL);
1663		if (!req)
1664			goto out_free;
1665		list_add(&req->rq_list, &xprt->free);
1666	}
1667	if (max_alloc > num_prealloc)
1668		xprt->max_reqs = max_alloc;
1669	else
1670		xprt->max_reqs = num_prealloc;
1671	xprt->min_reqs = num_prealloc;
1672	xprt->num_reqs = num_prealloc;
1673
1674	return xprt;
1675
1676out_free:
1677	xprt_free(xprt);
1678out:
1679	return NULL;
1680}
1681EXPORT_SYMBOL_GPL(xprt_alloc);
1682
1683void xprt_free(struct rpc_xprt *xprt)
1684{
1685	put_net(xprt->xprt_net);
1686	xprt_free_all_slots(xprt);
 
 
1687	kfree_rcu(xprt, rcu);
1688}
1689EXPORT_SYMBOL_GPL(xprt_free);
1690
1691static void
1692xprt_init_connect_cookie(struct rpc_rqst *req, struct rpc_xprt *xprt)
1693{
1694	req->rq_connect_cookie = xprt_connect_cookie(xprt) - 1;
1695}
1696
1697static __be32
1698xprt_alloc_xid(struct rpc_xprt *xprt)
1699{
1700	__be32 xid;
1701
1702	spin_lock(&xprt->reserve_lock);
1703	xid = (__force __be32)xprt->xid++;
1704	spin_unlock(&xprt->reserve_lock);
1705	return xid;
1706}
1707
1708static void
1709xprt_init_xid(struct rpc_xprt *xprt)
1710{
1711	xprt->xid = prandom_u32();
1712}
1713
1714static void
1715xprt_request_init(struct rpc_task *task)
1716{
1717	struct rpc_xprt *xprt = task->tk_xprt;
1718	struct rpc_rqst	*req = task->tk_rqstp;
1719
1720	req->rq_task	= task;
1721	req->rq_xprt    = xprt;
1722	req->rq_buffer  = NULL;
1723	req->rq_xid	= xprt_alloc_xid(xprt);
1724	xprt_init_connect_cookie(req, xprt);
1725	req->rq_snd_buf.len = 0;
1726	req->rq_snd_buf.buflen = 0;
1727	req->rq_rcv_buf.len = 0;
1728	req->rq_rcv_buf.buflen = 0;
1729	req->rq_snd_buf.bvec = NULL;
1730	req->rq_rcv_buf.bvec = NULL;
1731	req->rq_release_snd_buf = NULL;
1732	xprt_init_majortimeo(task, req);
1733	dprintk("RPC: %5u reserved req %p xid %08x\n", task->tk_pid,
1734			req, ntohl(req->rq_xid));
1735}
1736
1737static void
1738xprt_do_reserve(struct rpc_xprt *xprt, struct rpc_task *task)
1739{
1740	xprt->ops->alloc_slot(xprt, task);
1741	if (task->tk_rqstp != NULL)
1742		xprt_request_init(task);
1743}
1744
1745/**
1746 * xprt_reserve - allocate an RPC request slot
1747 * @task: RPC task requesting a slot allocation
1748 *
1749 * If the transport is marked as being congested, or if no more
1750 * slots are available, place the task on the transport's
1751 * backlog queue.
1752 */
1753void xprt_reserve(struct rpc_task *task)
1754{
1755	struct rpc_xprt *xprt = task->tk_xprt;
1756
1757	task->tk_status = 0;
1758	if (task->tk_rqstp != NULL)
1759		return;
1760
1761	task->tk_status = -EAGAIN;
1762	if (!xprt_throttle_congested(xprt, task))
1763		xprt_do_reserve(xprt, task);
1764}
1765
1766/**
1767 * xprt_retry_reserve - allocate an RPC request slot
1768 * @task: RPC task requesting a slot allocation
1769 *
1770 * If no more slots are available, place the task on the transport's
1771 * backlog queue.
1772 * Note that the only difference with xprt_reserve is that we now
1773 * ignore the value of the XPRT_CONGESTED flag.
1774 */
1775void xprt_retry_reserve(struct rpc_task *task)
1776{
1777	struct rpc_xprt *xprt = task->tk_xprt;
1778
1779	task->tk_status = 0;
1780	if (task->tk_rqstp != NULL)
1781		return;
1782
1783	task->tk_status = -EAGAIN;
1784	xprt_do_reserve(xprt, task);
1785}
1786
1787/**
1788 * xprt_release - release an RPC request slot
1789 * @task: task which is finished with the slot
1790 *
1791 */
1792void xprt_release(struct rpc_task *task)
1793{
1794	struct rpc_xprt	*xprt;
1795	struct rpc_rqst	*req = task->tk_rqstp;
1796
1797	if (req == NULL) {
1798		if (task->tk_client) {
1799			xprt = task->tk_xprt;
1800			xprt_release_write(xprt, task);
1801		}
1802		return;
1803	}
1804
1805	xprt = req->rq_xprt;
1806	xprt_request_dequeue_xprt(task);
1807	spin_lock(&xprt->transport_lock);
1808	xprt->ops->release_xprt(xprt, task);
1809	if (xprt->ops->release_request)
1810		xprt->ops->release_request(task);
1811	xprt_schedule_autodisconnect(xprt);
1812	spin_unlock(&xprt->transport_lock);
1813	if (req->rq_buffer)
1814		xprt->ops->buf_free(task);
1815	xprt_inject_disconnect(xprt);
1816	xdr_free_bvec(&req->rq_rcv_buf);
1817	xdr_free_bvec(&req->rq_snd_buf);
1818	if (req->rq_cred != NULL)
1819		put_rpccred(req->rq_cred);
1820	task->tk_rqstp = NULL;
1821	if (req->rq_release_snd_buf)
1822		req->rq_release_snd_buf(req);
1823
1824	dprintk("RPC: %5u release request %p\n", task->tk_pid, req);
1825	if (likely(!bc_prealloc(req)))
1826		xprt->ops->free_slot(xprt, req);
1827	else
1828		xprt_free_bc_request(req);
1829}
1830
1831#ifdef CONFIG_SUNRPC_BACKCHANNEL
1832void
1833xprt_init_bc_request(struct rpc_rqst *req, struct rpc_task *task)
1834{
1835	struct xdr_buf *xbufp = &req->rq_snd_buf;
1836
1837	task->tk_rqstp = req;
1838	req->rq_task = task;
1839	xprt_init_connect_cookie(req, req->rq_xprt);
1840	/*
1841	 * Set up the xdr_buf length.
1842	 * This also indicates that the buffer is XDR encoded already.
1843	 */
1844	xbufp->len = xbufp->head[0].iov_len + xbufp->page_len +
1845		xbufp->tail[0].iov_len;
1846}
1847#endif
1848
1849static void xprt_init(struct rpc_xprt *xprt, struct net *net)
1850{
1851	kref_init(&xprt->kref);
1852
1853	spin_lock_init(&xprt->transport_lock);
1854	spin_lock_init(&xprt->reserve_lock);
1855	spin_lock_init(&xprt->queue_lock);
1856
1857	INIT_LIST_HEAD(&xprt->free);
1858	xprt->recv_queue = RB_ROOT;
1859	INIT_LIST_HEAD(&xprt->xmit_queue);
1860#if defined(CONFIG_SUNRPC_BACKCHANNEL)
1861	spin_lock_init(&xprt->bc_pa_lock);
1862	INIT_LIST_HEAD(&xprt->bc_pa_list);
1863#endif /* CONFIG_SUNRPC_BACKCHANNEL */
1864	INIT_LIST_HEAD(&xprt->xprt_switch);
1865
1866	xprt->last_used = jiffies;
1867	xprt->cwnd = RPC_INITCWND;
1868	xprt->bind_index = 0;
1869
1870	rpc_init_wait_queue(&xprt->binding, "xprt_binding");
1871	rpc_init_wait_queue(&xprt->pending, "xprt_pending");
1872	rpc_init_wait_queue(&xprt->sending, "xprt_sending");
1873	rpc_init_priority_wait_queue(&xprt->backlog, "xprt_backlog");
1874
1875	xprt_init_xid(xprt);
1876
1877	xprt->xprt_net = get_net(net);
1878}
1879
1880/**
1881 * xprt_create_transport - create an RPC transport
1882 * @args: rpc transport creation arguments
1883 *
1884 */
1885struct rpc_xprt *xprt_create_transport(struct xprt_create *args)
1886{
1887	struct rpc_xprt	*xprt;
1888	struct xprt_class *t;
1889
1890	spin_lock(&xprt_list_lock);
1891	list_for_each_entry(t, &xprt_list, list) {
1892		if (t->ident == args->ident) {
1893			spin_unlock(&xprt_list_lock);
1894			goto found;
1895		}
1896	}
1897	spin_unlock(&xprt_list_lock);
1898	dprintk("RPC: transport (%d) not supported\n", args->ident);
1899	return ERR_PTR(-EIO);
1900
1901found:
1902	xprt = t->setup(args);
1903	if (IS_ERR(xprt)) {
1904		dprintk("RPC:       xprt_create_transport: failed, %ld\n",
1905				-PTR_ERR(xprt));
1906		goto out;
1907	}
1908	if (args->flags & XPRT_CREATE_NO_IDLE_TIMEOUT)
1909		xprt->idle_timeout = 0;
1910	INIT_WORK(&xprt->task_cleanup, xprt_autoclose);
1911	if (xprt_has_timer(xprt))
1912		timer_setup(&xprt->timer, xprt_init_autodisconnect, 0);
1913	else
1914		timer_setup(&xprt->timer, NULL, 0);
1915
1916	if (strlen(args->servername) > RPC_MAXNETNAMELEN) {
1917		xprt_destroy(xprt);
1918		return ERR_PTR(-EINVAL);
1919	}
1920	xprt->servername = kstrdup(args->servername, GFP_KERNEL);
1921	if (xprt->servername == NULL) {
1922		xprt_destroy(xprt);
1923		return ERR_PTR(-ENOMEM);
1924	}
1925
1926	rpc_xprt_debugfs_register(xprt);
1927
1928	dprintk("RPC:       created transport %p with %u slots\n", xprt,
1929			xprt->max_reqs);
1930out:
1931	return xprt;
1932}
1933
1934static void xprt_destroy_cb(struct work_struct *work)
1935{
1936	struct rpc_xprt *xprt =
1937		container_of(work, struct rpc_xprt, task_cleanup);
1938
 
 
1939	rpc_xprt_debugfs_unregister(xprt);
1940	rpc_destroy_wait_queue(&xprt->binding);
1941	rpc_destroy_wait_queue(&xprt->pending);
1942	rpc_destroy_wait_queue(&xprt->sending);
1943	rpc_destroy_wait_queue(&xprt->backlog);
1944	kfree(xprt->servername);
1945	/*
1946	 * Destroy any existing back channel
1947	 */
1948	xprt_destroy_backchannel(xprt, UINT_MAX);
1949
1950	/*
1951	 * Tear down transport state and free the rpc_xprt
1952	 */
1953	xprt->ops->destroy(xprt);
1954}
1955
1956/**
1957 * xprt_destroy - destroy an RPC transport, killing off all requests.
1958 * @xprt: transport to destroy
1959 *
1960 */
1961static void xprt_destroy(struct rpc_xprt *xprt)
1962{
1963	dprintk("RPC:       destroying transport %p\n", xprt);
1964
1965	/*
1966	 * Exclude transport connect/disconnect handlers and autoclose
1967	 */
1968	wait_on_bit_lock(&xprt->state, XPRT_LOCKED, TASK_UNINTERRUPTIBLE);
1969
 
 
 
 
 
 
1970	del_timer_sync(&xprt->timer);
 
1971
1972	/*
1973	 * Destroy sockets etc from the system workqueue so they can
1974	 * safely flush receive work running on rpciod.
1975	 */
1976	INIT_WORK(&xprt->task_cleanup, xprt_destroy_cb);
1977	schedule_work(&xprt->task_cleanup);
1978}
1979
1980static void xprt_destroy_kref(struct kref *kref)
1981{
1982	xprt_destroy(container_of(kref, struct rpc_xprt, kref));
1983}
1984
1985/**
1986 * xprt_get - return a reference to an RPC transport.
1987 * @xprt: pointer to the transport
1988 *
1989 */
1990struct rpc_xprt *xprt_get(struct rpc_xprt *xprt)
1991{
1992	if (xprt != NULL && kref_get_unless_zero(&xprt->kref))
1993		return xprt;
1994	return NULL;
1995}
1996EXPORT_SYMBOL_GPL(xprt_get);
1997
1998/**
1999 * xprt_put - release a reference to an RPC transport.
2000 * @xprt: pointer to the transport
2001 *
2002 */
2003void xprt_put(struct rpc_xprt *xprt)
2004{
2005	if (xprt != NULL)
2006		kref_put(&xprt->kref, xprt_destroy_kref);
2007}
2008EXPORT_SYMBOL_GPL(xprt_put);
v6.2
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 *  linux/net/sunrpc/xprt.c
   4 *
   5 *  This is a generic RPC call interface supporting congestion avoidance,
   6 *  and asynchronous calls.
   7 *
   8 *  The interface works like this:
   9 *
  10 *  -	When a process places a call, it allocates a request slot if
  11 *	one is available. Otherwise, it sleeps on the backlog queue
  12 *	(xprt_reserve).
  13 *  -	Next, the caller puts together the RPC message, stuffs it into
  14 *	the request struct, and calls xprt_transmit().
  15 *  -	xprt_transmit sends the message and installs the caller on the
  16 *	transport's wait list. At the same time, if a reply is expected,
  17 *	it installs a timer that is run after the packet's timeout has
  18 *	expired.
  19 *  -	When a packet arrives, the data_ready handler walks the list of
  20 *	pending requests for that transport. If a matching XID is found, the
  21 *	caller is woken up, and the timer removed.
  22 *  -	When no reply arrives within the timeout interval, the timer is
  23 *	fired by the kernel and runs xprt_timer(). It either adjusts the
  24 *	timeout values (minor timeout) or wakes up the caller with a status
  25 *	of -ETIMEDOUT.
  26 *  -	When the caller receives a notification from RPC that a reply arrived,
  27 *	it should release the RPC slot, and process the reply.
  28 *	If the call timed out, it may choose to retry the operation by
  29 *	adjusting the initial timeout value, and simply calling rpc_call
  30 *	again.
  31 *
  32 *  Support for async RPC is done through a set of RPC-specific scheduling
  33 *  primitives that `transparently' work for processes as well as async
  34 *  tasks that rely on callbacks.
  35 *
  36 *  Copyright (C) 1995-1997, Olaf Kirch <okir@monad.swb.de>
  37 *
  38 *  Transport switch API copyright (C) 2005, Chuck Lever <cel@netapp.com>
  39 */
  40
  41#include <linux/module.h>
  42
  43#include <linux/types.h>
  44#include <linux/interrupt.h>
  45#include <linux/workqueue.h>
  46#include <linux/net.h>
  47#include <linux/ktime.h>
  48
  49#include <linux/sunrpc/clnt.h>
  50#include <linux/sunrpc/metrics.h>
  51#include <linux/sunrpc/bc_xprt.h>
  52#include <linux/rcupdate.h>
  53#include <linux/sched/mm.h>
  54
  55#include <trace/events/sunrpc.h>
  56
  57#include "sunrpc.h"
  58#include "sysfs.h"
  59#include "fail.h"
  60
  61/*
  62 * Local variables
  63 */
  64
  65#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
  66# define RPCDBG_FACILITY	RPCDBG_XPRT
  67#endif
  68
  69/*
  70 * Local functions
  71 */
  72static void	xprt_init(struct rpc_xprt *xprt, struct net *net);
  73static __be32	xprt_alloc_xid(struct rpc_xprt *xprt);
  74static void	xprt_destroy(struct rpc_xprt *xprt);
  75static void	xprt_request_init(struct rpc_task *task);
  76static int	xprt_request_prepare(struct rpc_rqst *req, struct xdr_buf *buf);
  77
  78static DEFINE_SPINLOCK(xprt_list_lock);
  79static LIST_HEAD(xprt_list);
  80
  81static unsigned long xprt_request_timeout(const struct rpc_rqst *req)
  82{
  83	unsigned long timeout = jiffies + req->rq_timeout;
  84
  85	if (time_before(timeout, req->rq_majortimeo))
  86		return timeout;
  87	return req->rq_majortimeo;
  88}
  89
  90/**
  91 * xprt_register_transport - register a transport implementation
  92 * @transport: transport to register
  93 *
  94 * If a transport implementation is loaded as a kernel module, it can
  95 * call this interface to make itself known to the RPC client.
  96 *
  97 * Returns:
  98 * 0:		transport successfully registered
  99 * -EEXIST:	transport already registered
 100 * -EINVAL:	transport module being unloaded
 101 */
 102int xprt_register_transport(struct xprt_class *transport)
 103{
 104	struct xprt_class *t;
 105	int result;
 106
 107	result = -EEXIST;
 108	spin_lock(&xprt_list_lock);
 109	list_for_each_entry(t, &xprt_list, list) {
 110		/* don't register the same transport class twice */
 111		if (t->ident == transport->ident)
 112			goto out;
 113	}
 114
 115	list_add_tail(&transport->list, &xprt_list);
 116	printk(KERN_INFO "RPC: Registered %s transport module.\n",
 117	       transport->name);
 118	result = 0;
 119
 120out:
 121	spin_unlock(&xprt_list_lock);
 122	return result;
 123}
 124EXPORT_SYMBOL_GPL(xprt_register_transport);
 125
 126/**
 127 * xprt_unregister_transport - unregister a transport implementation
 128 * @transport: transport to unregister
 129 *
 130 * Returns:
 131 * 0:		transport successfully unregistered
 132 * -ENOENT:	transport never registered
 133 */
 134int xprt_unregister_transport(struct xprt_class *transport)
 135{
 136	struct xprt_class *t;
 137	int result;
 138
 139	result = 0;
 140	spin_lock(&xprt_list_lock);
 141	list_for_each_entry(t, &xprt_list, list) {
 142		if (t == transport) {
 143			printk(KERN_INFO
 144				"RPC: Unregistered %s transport module.\n",
 145				transport->name);
 146			list_del_init(&transport->list);
 147			goto out;
 148		}
 149	}
 150	result = -ENOENT;
 151
 152out:
 153	spin_unlock(&xprt_list_lock);
 154	return result;
 155}
 156EXPORT_SYMBOL_GPL(xprt_unregister_transport);
 157
 158static void
 159xprt_class_release(const struct xprt_class *t)
 
 
 
 
 
 
 
 160{
 161	module_put(t->owner);
 162}
 163
 164static const struct xprt_class *
 165xprt_class_find_by_ident_locked(int ident)
 166{
 167	const struct xprt_class *t;
 168
 169	list_for_each_entry(t, &xprt_list, list) {
 170		if (t->ident != ident)
 171			continue;
 172		if (!try_module_get(t->owner))
 173			continue;
 174		return t;
 175	}
 176	return NULL;
 177}
 178
 179static const struct xprt_class *
 180xprt_class_find_by_ident(int ident)
 181{
 182	const struct xprt_class *t;
 183
 
 184	spin_lock(&xprt_list_lock);
 185	t = xprt_class_find_by_ident_locked(ident);
 186	spin_unlock(&xprt_list_lock);
 187	return t;
 188}
 189
 190static const struct xprt_class *
 191xprt_class_find_by_netid_locked(const char *netid)
 192{
 193	const struct xprt_class *t;
 194	unsigned int i;
 195
 196	list_for_each_entry(t, &xprt_list, list) {
 197		for (i = 0; t->netid[i][0] != '\0'; i++) {
 198			if (strcmp(t->netid[i], netid) != 0)
 199				continue;
 200			if (!try_module_get(t->owner))
 201				continue;
 202			return t;
 203		}
 204	}
 205	return NULL;
 206}
 207
 208static const struct xprt_class *
 209xprt_class_find_by_netid(const char *netid)
 210{
 211	const struct xprt_class *t;
 212
 213	spin_lock(&xprt_list_lock);
 214	t = xprt_class_find_by_netid_locked(netid);
 215	if (!t) {
 216		spin_unlock(&xprt_list_lock);
 217		request_module("rpc%s", netid);
 218		spin_lock(&xprt_list_lock);
 219		t = xprt_class_find_by_netid_locked(netid);
 220	}
 221	spin_unlock(&xprt_list_lock);
 222	return t;
 
 
 223}
 224
 225/**
 226 * xprt_find_transport_ident - convert a netid into a transport identifier
 227 * @netid: transport to load
 228 *
 229 * Returns:
 230 * > 0:		transport identifier
 231 * -ENOENT:	transport module not available
 232 */
 233int xprt_find_transport_ident(const char *netid)
 234{
 235	const struct xprt_class *t;
 236	int ret;
 237
 238	t = xprt_class_find_by_netid(netid);
 239	if (!t)
 240		return -ENOENT;
 241	ret = t->ident;
 242	xprt_class_release(t);
 243	return ret;
 244}
 245EXPORT_SYMBOL_GPL(xprt_find_transport_ident);
 246
 247static void xprt_clear_locked(struct rpc_xprt *xprt)
 248{
 249	xprt->snd_task = NULL;
 250	if (!test_bit(XPRT_CLOSE_WAIT, &xprt->state))
 251		clear_bit_unlock(XPRT_LOCKED, &xprt->state);
 252	else
 
 
 253		queue_work(xprtiod_workqueue, &xprt->task_cleanup);
 254}
 255
 256/**
 257 * xprt_reserve_xprt - serialize write access to transports
 258 * @task: task that is requesting access to the transport
 259 * @xprt: pointer to the target transport
 260 *
 261 * This prevents mixing the payload of separate requests, and prevents
 262 * transport connects from colliding with writes.  No congestion control
 263 * is provided.
 264 */
 265int xprt_reserve_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
 266{
 267	struct rpc_rqst *req = task->tk_rqstp;
 268
 269	if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) {
 270		if (task == xprt->snd_task)
 271			goto out_locked;
 272		goto out_sleep;
 273	}
 274	if (test_bit(XPRT_WRITE_SPACE, &xprt->state))
 275		goto out_unlock;
 276	xprt->snd_task = task;
 277
 278out_locked:
 279	trace_xprt_reserve_xprt(xprt, task);
 280	return 1;
 281
 282out_unlock:
 283	xprt_clear_locked(xprt);
 284out_sleep:
 
 
 285	task->tk_status = -EAGAIN;
 286	if  (RPC_IS_SOFT(task))
 287		rpc_sleep_on_timeout(&xprt->sending, task, NULL,
 288				xprt_request_timeout(req));
 289	else
 290		rpc_sleep_on(&xprt->sending, task, NULL);
 291	return 0;
 292}
 293EXPORT_SYMBOL_GPL(xprt_reserve_xprt);
 294
 295static bool
 296xprt_need_congestion_window_wait(struct rpc_xprt *xprt)
 297{
 298	return test_bit(XPRT_CWND_WAIT, &xprt->state);
 299}
 300
 301static void
 302xprt_set_congestion_window_wait(struct rpc_xprt *xprt)
 303{
 304	if (!list_empty(&xprt->xmit_queue)) {
 305		/* Peek at head of queue to see if it can make progress */
 306		if (list_first_entry(&xprt->xmit_queue, struct rpc_rqst,
 307					rq_xmit)->rq_cong)
 308			return;
 309	}
 310	set_bit(XPRT_CWND_WAIT, &xprt->state);
 311}
 312
 313static void
 314xprt_test_and_clear_congestion_window_wait(struct rpc_xprt *xprt)
 315{
 316	if (!RPCXPRT_CONGESTED(xprt))
 317		clear_bit(XPRT_CWND_WAIT, &xprt->state);
 318}
 319
 320/*
 321 * xprt_reserve_xprt_cong - serialize write access to transports
 322 * @task: task that is requesting access to the transport
 323 *
 324 * Same as xprt_reserve_xprt, but Van Jacobson congestion control is
 325 * integrated into the decision of whether a request is allowed to be
 326 * woken up and given access to the transport.
 327 * Note that the lock is only granted if we know there are free slots.
 328 */
 329int xprt_reserve_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task)
 330{
 331	struct rpc_rqst *req = task->tk_rqstp;
 332
 333	if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) {
 334		if (task == xprt->snd_task)
 335			goto out_locked;
 336		goto out_sleep;
 337	}
 338	if (req == NULL) {
 339		xprt->snd_task = task;
 340		goto out_locked;
 341	}
 342	if (test_bit(XPRT_WRITE_SPACE, &xprt->state))
 343		goto out_unlock;
 344	if (!xprt_need_congestion_window_wait(xprt)) {
 345		xprt->snd_task = task;
 346		goto out_locked;
 347	}
 348out_unlock:
 349	xprt_clear_locked(xprt);
 350out_sleep:
 
 351	task->tk_status = -EAGAIN;
 352	if (RPC_IS_SOFT(task))
 353		rpc_sleep_on_timeout(&xprt->sending, task, NULL,
 354				xprt_request_timeout(req));
 355	else
 356		rpc_sleep_on(&xprt->sending, task, NULL);
 357	return 0;
 358out_locked:
 359	trace_xprt_reserve_cong(xprt, task);
 360	return 1;
 361}
 362EXPORT_SYMBOL_GPL(xprt_reserve_xprt_cong);
 363
 364static inline int xprt_lock_write(struct rpc_xprt *xprt, struct rpc_task *task)
 365{
 366	int retval;
 367
 368	if (test_bit(XPRT_LOCKED, &xprt->state) && xprt->snd_task == task)
 369		return 1;
 370	spin_lock(&xprt->transport_lock);
 371	retval = xprt->ops->reserve_xprt(xprt, task);
 372	spin_unlock(&xprt->transport_lock);
 373	return retval;
 374}
 375
 376static bool __xprt_lock_write_func(struct rpc_task *task, void *data)
 377{
 378	struct rpc_xprt *xprt = data;
 379
 380	xprt->snd_task = task;
 381	return true;
 382}
 383
 384static void __xprt_lock_write_next(struct rpc_xprt *xprt)
 385{
 386	if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
 387		return;
 388	if (test_bit(XPRT_WRITE_SPACE, &xprt->state))
 389		goto out_unlock;
 390	if (rpc_wake_up_first_on_wq(xprtiod_workqueue, &xprt->sending,
 391				__xprt_lock_write_func, xprt))
 392		return;
 393out_unlock:
 394	xprt_clear_locked(xprt);
 395}
 396
 397static void __xprt_lock_write_next_cong(struct rpc_xprt *xprt)
 398{
 399	if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
 400		return;
 401	if (test_bit(XPRT_WRITE_SPACE, &xprt->state))
 402		goto out_unlock;
 403	if (xprt_need_congestion_window_wait(xprt))
 404		goto out_unlock;
 405	if (rpc_wake_up_first_on_wq(xprtiod_workqueue, &xprt->sending,
 406				__xprt_lock_write_func, xprt))
 407		return;
 408out_unlock:
 409	xprt_clear_locked(xprt);
 410}
 411
 412/**
 413 * xprt_release_xprt - allow other requests to use a transport
 414 * @xprt: transport with other tasks potentially waiting
 415 * @task: task that is releasing access to the transport
 416 *
 417 * Note that "task" can be NULL.  No congestion control is provided.
 418 */
 419void xprt_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
 420{
 421	if (xprt->snd_task == task) {
 422		xprt_clear_locked(xprt);
 423		__xprt_lock_write_next(xprt);
 424	}
 425	trace_xprt_release_xprt(xprt, task);
 426}
 427EXPORT_SYMBOL_GPL(xprt_release_xprt);
 428
 429/**
 430 * xprt_release_xprt_cong - allow other requests to use a transport
 431 * @xprt: transport with other tasks potentially waiting
 432 * @task: task that is releasing access to the transport
 433 *
 434 * Note that "task" can be NULL.  Another task is awoken to use the
 435 * transport if the transport's congestion window allows it.
 436 */
 437void xprt_release_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task)
 438{
 439	if (xprt->snd_task == task) {
 440		xprt_clear_locked(xprt);
 441		__xprt_lock_write_next_cong(xprt);
 442	}
 443	trace_xprt_release_cong(xprt, task);
 444}
 445EXPORT_SYMBOL_GPL(xprt_release_xprt_cong);
 446
 447void xprt_release_write(struct rpc_xprt *xprt, struct rpc_task *task)
 448{
 449	if (xprt->snd_task != task)
 450		return;
 451	spin_lock(&xprt->transport_lock);
 452	xprt->ops->release_xprt(xprt, task);
 453	spin_unlock(&xprt->transport_lock);
 454}
 455
 456/*
 457 * Van Jacobson congestion avoidance. Check if the congestion window
 458 * overflowed. Put the task to sleep if this is the case.
 459 */
 460static int
 461__xprt_get_cong(struct rpc_xprt *xprt, struct rpc_rqst *req)
 462{
 463	if (req->rq_cong)
 464		return 1;
 465	trace_xprt_get_cong(xprt, req->rq_task);
 
 466	if (RPCXPRT_CONGESTED(xprt)) {
 467		xprt_set_congestion_window_wait(xprt);
 468		return 0;
 469	}
 470	req->rq_cong = 1;
 471	xprt->cong += RPC_CWNDSCALE;
 472	return 1;
 473}
 474
 475/*
 476 * Adjust the congestion window, and wake up the next task
 477 * that has been sleeping due to congestion
 478 */
 479static void
 480__xprt_put_cong(struct rpc_xprt *xprt, struct rpc_rqst *req)
 481{
 482	if (!req->rq_cong)
 483		return;
 484	req->rq_cong = 0;
 485	xprt->cong -= RPC_CWNDSCALE;
 486	xprt_test_and_clear_congestion_window_wait(xprt);
 487	trace_xprt_put_cong(xprt, req->rq_task);
 488	__xprt_lock_write_next_cong(xprt);
 489}
 490
 491/**
 492 * xprt_request_get_cong - Request congestion control credits
 493 * @xprt: pointer to transport
 494 * @req: pointer to RPC request
 495 *
 496 * Useful for transports that require congestion control.
 497 */
 498bool
 499xprt_request_get_cong(struct rpc_xprt *xprt, struct rpc_rqst *req)
 500{
 501	bool ret = false;
 502
 503	if (req->rq_cong)
 504		return true;
 505	spin_lock(&xprt->transport_lock);
 506	ret = __xprt_get_cong(xprt, req) != 0;
 507	spin_unlock(&xprt->transport_lock);
 508	return ret;
 509}
 510EXPORT_SYMBOL_GPL(xprt_request_get_cong);
 511
 512/**
 513 * xprt_release_rqst_cong - housekeeping when request is complete
 514 * @task: RPC request that recently completed
 515 *
 516 * Useful for transports that require congestion control.
 517 */
 518void xprt_release_rqst_cong(struct rpc_task *task)
 519{
 520	struct rpc_rqst *req = task->tk_rqstp;
 521
 522	__xprt_put_cong(req->rq_xprt, req);
 523}
 524EXPORT_SYMBOL_GPL(xprt_release_rqst_cong);
 525
 526static void xprt_clear_congestion_window_wait_locked(struct rpc_xprt *xprt)
 527{
 528	if (test_and_clear_bit(XPRT_CWND_WAIT, &xprt->state))
 529		__xprt_lock_write_next_cong(xprt);
 530}
 531
 532/*
 533 * Clear the congestion window wait flag and wake up the next
 534 * entry on xprt->sending
 535 */
 536static void
 537xprt_clear_congestion_window_wait(struct rpc_xprt *xprt)
 538{
 539	if (test_and_clear_bit(XPRT_CWND_WAIT, &xprt->state)) {
 540		spin_lock(&xprt->transport_lock);
 541		__xprt_lock_write_next_cong(xprt);
 542		spin_unlock(&xprt->transport_lock);
 543	}
 544}
 545
 546/**
 547 * xprt_adjust_cwnd - adjust transport congestion window
 548 * @xprt: pointer to xprt
 549 * @task: recently completed RPC request used to adjust window
 550 * @result: result code of completed RPC request
 551 *
 552 * The transport code maintains an estimate on the maximum number of out-
 553 * standing RPC requests, using a smoothed version of the congestion
 554 * avoidance implemented in 44BSD. This is basically the Van Jacobson
 555 * congestion algorithm: If a retransmit occurs, the congestion window is
 556 * halved; otherwise, it is incremented by 1/cwnd when
 557 *
 558 *	-	a reply is received and
 559 *	-	a full number of requests are outstanding and
 560 *	-	the congestion window hasn't been updated recently.
 561 */
 562void xprt_adjust_cwnd(struct rpc_xprt *xprt, struct rpc_task *task, int result)
 563{
 564	struct rpc_rqst *req = task->tk_rqstp;
 565	unsigned long cwnd = xprt->cwnd;
 566
 567	if (result >= 0 && cwnd <= xprt->cong) {
 568		/* The (cwnd >> 1) term makes sure
 569		 * the result gets rounded properly. */
 570		cwnd += (RPC_CWNDSCALE * RPC_CWNDSCALE + (cwnd >> 1)) / cwnd;
 571		if (cwnd > RPC_MAXCWND(xprt))
 572			cwnd = RPC_MAXCWND(xprt);
 573		__xprt_lock_write_next_cong(xprt);
 574	} else if (result == -ETIMEDOUT) {
 575		cwnd >>= 1;
 576		if (cwnd < RPC_CWNDSCALE)
 577			cwnd = RPC_CWNDSCALE;
 578	}
 579	dprintk("RPC:       cong %ld, cwnd was %ld, now %ld\n",
 580			xprt->cong, xprt->cwnd, cwnd);
 581	xprt->cwnd = cwnd;
 582	__xprt_put_cong(xprt, req);
 583}
 584EXPORT_SYMBOL_GPL(xprt_adjust_cwnd);
 585
 586/**
 587 * xprt_wake_pending_tasks - wake all tasks on a transport's pending queue
 588 * @xprt: transport with waiting tasks
 589 * @status: result code to plant in each task before waking it
 590 *
 591 */
 592void xprt_wake_pending_tasks(struct rpc_xprt *xprt, int status)
 593{
 594	if (status < 0)
 595		rpc_wake_up_status(&xprt->pending, status);
 596	else
 597		rpc_wake_up(&xprt->pending);
 598}
 599EXPORT_SYMBOL_GPL(xprt_wake_pending_tasks);
 600
 601/**
 602 * xprt_wait_for_buffer_space - wait for transport output buffer to clear
 603 * @xprt: transport
 604 *
 605 * Note that we only set the timer for the case of RPC_IS_SOFT(), since
 606 * we don't in general want to force a socket disconnection due to
 607 * an incomplete RPC call transmission.
 608 */
 609void xprt_wait_for_buffer_space(struct rpc_xprt *xprt)
 610{
 611	set_bit(XPRT_WRITE_SPACE, &xprt->state);
 612}
 613EXPORT_SYMBOL_GPL(xprt_wait_for_buffer_space);
 614
 615static bool
 616xprt_clear_write_space_locked(struct rpc_xprt *xprt)
 617{
 618	if (test_and_clear_bit(XPRT_WRITE_SPACE, &xprt->state)) {
 619		__xprt_lock_write_next(xprt);
 620		dprintk("RPC:       write space: waking waiting task on "
 621				"xprt %p\n", xprt);
 622		return true;
 623	}
 624	return false;
 625}
 626
 627/**
 628 * xprt_write_space - wake the task waiting for transport output buffer space
 629 * @xprt: transport with waiting tasks
 630 *
 631 * Can be called in a soft IRQ context, so xprt_write_space never sleeps.
 632 */
 633bool xprt_write_space(struct rpc_xprt *xprt)
 634{
 635	bool ret;
 636
 637	if (!test_bit(XPRT_WRITE_SPACE, &xprt->state))
 638		return false;
 639	spin_lock(&xprt->transport_lock);
 640	ret = xprt_clear_write_space_locked(xprt);
 641	spin_unlock(&xprt->transport_lock);
 642	return ret;
 643}
 644EXPORT_SYMBOL_GPL(xprt_write_space);
 645
 646static unsigned long xprt_abs_ktime_to_jiffies(ktime_t abstime)
 647{
 648	s64 delta = ktime_to_ns(ktime_get() - abstime);
 649	return likely(delta >= 0) ?
 650		jiffies - nsecs_to_jiffies(delta) :
 651		jiffies + nsecs_to_jiffies(-delta);
 652}
 653
 654static unsigned long xprt_calc_majortimeo(struct rpc_rqst *req)
 655{
 656	const struct rpc_timeout *to = req->rq_task->tk_client->cl_timeout;
 657	unsigned long majortimeo = req->rq_timeout;
 658
 659	if (to->to_exponential)
 660		majortimeo <<= to->to_retries;
 661	else
 662		majortimeo += to->to_increment * to->to_retries;
 663	if (majortimeo > to->to_maxval || majortimeo == 0)
 664		majortimeo = to->to_maxval;
 665	return majortimeo;
 666}
 667
 668static void xprt_reset_majortimeo(struct rpc_rqst *req)
 669{
 670	req->rq_majortimeo += xprt_calc_majortimeo(req);
 671}
 672
 673static void xprt_reset_minortimeo(struct rpc_rqst *req)
 674{
 675	req->rq_minortimeo += req->rq_timeout;
 676}
 677
 678static void xprt_init_majortimeo(struct rpc_task *task, struct rpc_rqst *req)
 679{
 680	unsigned long time_init;
 681	struct rpc_xprt *xprt = req->rq_xprt;
 682
 683	if (likely(xprt && xprt_connected(xprt)))
 684		time_init = jiffies;
 685	else
 686		time_init = xprt_abs_ktime_to_jiffies(task->tk_start);
 687	req->rq_timeout = task->tk_client->cl_timeout->to_initval;
 688	req->rq_majortimeo = time_init + xprt_calc_majortimeo(req);
 689	req->rq_minortimeo = time_init + req->rq_timeout;
 690}
 691
 692/**
 693 * xprt_adjust_timeout - adjust timeout values for next retransmit
 694 * @req: RPC request containing parameters to use for the adjustment
 695 *
 696 */
 697int xprt_adjust_timeout(struct rpc_rqst *req)
 698{
 699	struct rpc_xprt *xprt = req->rq_xprt;
 700	const struct rpc_timeout *to = req->rq_task->tk_client->cl_timeout;
 701	int status = 0;
 702
 703	if (time_before(jiffies, req->rq_majortimeo)) {
 704		if (time_before(jiffies, req->rq_minortimeo))
 705			return status;
 706		if (to->to_exponential)
 707			req->rq_timeout <<= 1;
 708		else
 709			req->rq_timeout += to->to_increment;
 710		if (to->to_maxval && req->rq_timeout >= to->to_maxval)
 711			req->rq_timeout = to->to_maxval;
 712		req->rq_retries++;
 713	} else {
 714		req->rq_timeout = to->to_initval;
 715		req->rq_retries = 0;
 716		xprt_reset_majortimeo(req);
 717		/* Reset the RTT counters == "slow start" */
 718		spin_lock(&xprt->transport_lock);
 719		rpc_init_rtt(req->rq_task->tk_client->cl_rtt, to->to_initval);
 720		spin_unlock(&xprt->transport_lock);
 721		status = -ETIMEDOUT;
 722	}
 723	xprt_reset_minortimeo(req);
 724
 725	if (req->rq_timeout == 0) {
 726		printk(KERN_WARNING "xprt_adjust_timeout: rq_timeout = 0!\n");
 727		req->rq_timeout = 5 * HZ;
 728	}
 729	return status;
 730}
 731
 732static void xprt_autoclose(struct work_struct *work)
 733{
 734	struct rpc_xprt *xprt =
 735		container_of(work, struct rpc_xprt, task_cleanup);
 736	unsigned int pflags = memalloc_nofs_save();
 737
 738	trace_xprt_disconnect_auto(xprt);
 739	xprt->connect_cookie++;
 740	smp_mb__before_atomic();
 741	clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
 742	xprt->ops->close(xprt);
 743	xprt_release_write(xprt, NULL);
 744	wake_up_bit(&xprt->state, XPRT_LOCKED);
 745	memalloc_nofs_restore(pflags);
 746}
 747
 748/**
 749 * xprt_disconnect_done - mark a transport as disconnected
 750 * @xprt: transport to flag for disconnect
 751 *
 752 */
 753void xprt_disconnect_done(struct rpc_xprt *xprt)
 754{
 755	trace_xprt_disconnect_done(xprt);
 756	spin_lock(&xprt->transport_lock);
 757	xprt_clear_connected(xprt);
 758	xprt_clear_write_space_locked(xprt);
 759	xprt_clear_congestion_window_wait_locked(xprt);
 760	xprt_wake_pending_tasks(xprt, -ENOTCONN);
 761	spin_unlock(&xprt->transport_lock);
 762}
 763EXPORT_SYMBOL_GPL(xprt_disconnect_done);
 764
 765/**
 766 * xprt_schedule_autoclose_locked - Try to schedule an autoclose RPC call
 767 * @xprt: transport to disconnect
 768 */
 769static void xprt_schedule_autoclose_locked(struct rpc_xprt *xprt)
 770{
 771	if (test_and_set_bit(XPRT_CLOSE_WAIT, &xprt->state))
 772		return;
 773	if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0)
 774		queue_work(xprtiod_workqueue, &xprt->task_cleanup);
 775	else if (xprt->snd_task && !test_bit(XPRT_SND_IS_COOKIE, &xprt->state))
 776		rpc_wake_up_queued_task_set_status(&xprt->pending,
 777						   xprt->snd_task, -ENOTCONN);
 778}
 779
 780/**
 781 * xprt_force_disconnect - force a transport to disconnect
 782 * @xprt: transport to disconnect
 783 *
 784 */
 785void xprt_force_disconnect(struct rpc_xprt *xprt)
 786{
 787	trace_xprt_disconnect_force(xprt);
 788
 789	/* Don't race with the test_bit() in xprt_clear_locked() */
 790	spin_lock(&xprt->transport_lock);
 791	xprt_schedule_autoclose_locked(xprt);
 
 
 
 
 
 
 792	spin_unlock(&xprt->transport_lock);
 793}
 794EXPORT_SYMBOL_GPL(xprt_force_disconnect);
 795
 796static unsigned int
 797xprt_connect_cookie(struct rpc_xprt *xprt)
 798{
 799	return READ_ONCE(xprt->connect_cookie);
 800}
 801
 802static bool
 803xprt_request_retransmit_after_disconnect(struct rpc_task *task)
 804{
 805	struct rpc_rqst *req = task->tk_rqstp;
 806	struct rpc_xprt *xprt = req->rq_xprt;
 807
 808	return req->rq_connect_cookie != xprt_connect_cookie(xprt) ||
 809		!xprt_connected(xprt);
 810}
 811
 812/**
 813 * xprt_conditional_disconnect - force a transport to disconnect
 814 * @xprt: transport to disconnect
 815 * @cookie: 'connection cookie'
 816 *
 817 * This attempts to break the connection if and only if 'cookie' matches
 818 * the current transport 'connection cookie'. It ensures that we don't
 819 * try to break the connection more than once when we need to retransmit
 820 * a batch of RPC requests.
 821 *
 822 */
 823void xprt_conditional_disconnect(struct rpc_xprt *xprt, unsigned int cookie)
 824{
 825	/* Don't race with the test_bit() in xprt_clear_locked() */
 826	spin_lock(&xprt->transport_lock);
 827	if (cookie != xprt->connect_cookie)
 828		goto out;
 829	if (test_bit(XPRT_CLOSING, &xprt->state))
 830		goto out;
 831	xprt_schedule_autoclose_locked(xprt);
 
 
 
 
 832out:
 833	spin_unlock(&xprt->transport_lock);
 834}
 835
 836static bool
 837xprt_has_timer(const struct rpc_xprt *xprt)
 838{
 839	return xprt->idle_timeout != 0;
 840}
 841
 842static void
 843xprt_schedule_autodisconnect(struct rpc_xprt *xprt)
 844	__must_hold(&xprt->transport_lock)
 845{
 846	xprt->last_used = jiffies;
 847	if (RB_EMPTY_ROOT(&xprt->recv_queue) && xprt_has_timer(xprt))
 848		mod_timer(&xprt->timer, xprt->last_used + xprt->idle_timeout);
 849}
 850
 851static void
 852xprt_init_autodisconnect(struct timer_list *t)
 853{
 854	struct rpc_xprt *xprt = from_timer(xprt, t, timer);
 855
 856	if (!RB_EMPTY_ROOT(&xprt->recv_queue))
 857		return;
 858	/* Reset xprt->last_used to avoid connect/autodisconnect cycling */
 859	xprt->last_used = jiffies;
 860	if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
 861		return;
 862	queue_work(xprtiod_workqueue, &xprt->task_cleanup);
 863}
 864
 865#if IS_ENABLED(CONFIG_FAIL_SUNRPC)
 866static void xprt_inject_disconnect(struct rpc_xprt *xprt)
 867{
 868	if (!fail_sunrpc.ignore_client_disconnect &&
 869	    should_fail(&fail_sunrpc.attr, 1))
 870		xprt->ops->inject_disconnect(xprt);
 871}
 872#else
 873static inline void xprt_inject_disconnect(struct rpc_xprt *xprt)
 874{
 875}
 876#endif
 877
 878bool xprt_lock_connect(struct rpc_xprt *xprt,
 879		struct rpc_task *task,
 880		void *cookie)
 881{
 882	bool ret = false;
 883
 884	spin_lock(&xprt->transport_lock);
 885	if (!test_bit(XPRT_LOCKED, &xprt->state))
 886		goto out;
 887	if (xprt->snd_task != task)
 888		goto out;
 889	set_bit(XPRT_SND_IS_COOKIE, &xprt->state);
 890	xprt->snd_task = cookie;
 891	ret = true;
 892out:
 893	spin_unlock(&xprt->transport_lock);
 894	return ret;
 895}
 896EXPORT_SYMBOL_GPL(xprt_lock_connect);
 897
 898void xprt_unlock_connect(struct rpc_xprt *xprt, void *cookie)
 899{
 900	spin_lock(&xprt->transport_lock);
 901	if (xprt->snd_task != cookie)
 902		goto out;
 903	if (!test_bit(XPRT_LOCKED, &xprt->state))
 904		goto out;
 905	xprt->snd_task =NULL;
 906	clear_bit(XPRT_SND_IS_COOKIE, &xprt->state);
 907	xprt->ops->release_xprt(xprt, NULL);
 908	xprt_schedule_autodisconnect(xprt);
 909out:
 910	spin_unlock(&xprt->transport_lock);
 911	wake_up_bit(&xprt->state, XPRT_LOCKED);
 912}
 913EXPORT_SYMBOL_GPL(xprt_unlock_connect);
 914
 915/**
 916 * xprt_connect - schedule a transport connect operation
 917 * @task: RPC task that is requesting the connect
 918 *
 919 */
 920void xprt_connect(struct rpc_task *task)
 921{
 922	struct rpc_xprt	*xprt = task->tk_rqstp->rq_xprt;
 923
 924	trace_xprt_connect(xprt);
 
 925
 926	if (!xprt_bound(xprt)) {
 927		task->tk_status = -EAGAIN;
 928		return;
 929	}
 930	if (!xprt_lock_write(xprt, task))
 931		return;
 932
 933	if (!xprt_connected(xprt) && !test_bit(XPRT_CLOSE_WAIT, &xprt->state)) {
 
 
 
 934		task->tk_rqstp->rq_connect_cookie = xprt->connect_cookie;
 935		rpc_sleep_on_timeout(&xprt->pending, task, NULL,
 936				xprt_request_timeout(task->tk_rqstp));
 937
 938		if (test_bit(XPRT_CLOSING, &xprt->state))
 939			return;
 940		if (xprt_test_and_set_connecting(xprt))
 941			return;
 942		/* Race breaker */
 943		if (!xprt_connected(xprt)) {
 944			xprt->stat.connect_start = jiffies;
 945			xprt->ops->connect(xprt, task);
 946		} else {
 947			xprt_clear_connecting(xprt);
 948			task->tk_status = 0;
 949			rpc_wake_up_queued_task(&xprt->pending, task);
 950		}
 951	}
 952	xprt_release_write(xprt, task);
 953}
 954
 955/**
 956 * xprt_reconnect_delay - compute the wait before scheduling a connect
 957 * @xprt: transport instance
 958 *
 959 */
 960unsigned long xprt_reconnect_delay(const struct rpc_xprt *xprt)
 961{
 962	unsigned long start, now = jiffies;
 963
 964	start = xprt->stat.connect_start + xprt->reestablish_timeout;
 965	if (time_after(start, now))
 966		return start - now;
 967	return 0;
 968}
 969EXPORT_SYMBOL_GPL(xprt_reconnect_delay);
 970
 971/**
 972 * xprt_reconnect_backoff - compute the new re-establish timeout
 973 * @xprt: transport instance
 974 * @init_to: initial reestablish timeout
 975 *
 976 */
 977void xprt_reconnect_backoff(struct rpc_xprt *xprt, unsigned long init_to)
 978{
 979	xprt->reestablish_timeout <<= 1;
 980	if (xprt->reestablish_timeout > xprt->max_reconnect_timeout)
 981		xprt->reestablish_timeout = xprt->max_reconnect_timeout;
 982	if (xprt->reestablish_timeout < init_to)
 983		xprt->reestablish_timeout = init_to;
 984}
 985EXPORT_SYMBOL_GPL(xprt_reconnect_backoff);
 986
 987enum xprt_xid_rb_cmp {
 988	XID_RB_EQUAL,
 989	XID_RB_LEFT,
 990	XID_RB_RIGHT,
 991};
 992static enum xprt_xid_rb_cmp
 993xprt_xid_cmp(__be32 xid1, __be32 xid2)
 994{
 995	if (xid1 == xid2)
 996		return XID_RB_EQUAL;
 997	if ((__force u32)xid1 < (__force u32)xid2)
 998		return XID_RB_LEFT;
 999	return XID_RB_RIGHT;
1000}
1001
1002static struct rpc_rqst *
1003xprt_request_rb_find(struct rpc_xprt *xprt, __be32 xid)
1004{
1005	struct rb_node *n = xprt->recv_queue.rb_node;
1006	struct rpc_rqst *req;
1007
1008	while (n != NULL) {
1009		req = rb_entry(n, struct rpc_rqst, rq_recv);
1010		switch (xprt_xid_cmp(xid, req->rq_xid)) {
1011		case XID_RB_LEFT:
1012			n = n->rb_left;
1013			break;
1014		case XID_RB_RIGHT:
1015			n = n->rb_right;
1016			break;
1017		case XID_RB_EQUAL:
1018			return req;
1019		}
1020	}
1021	return NULL;
1022}
1023
1024static void
1025xprt_request_rb_insert(struct rpc_xprt *xprt, struct rpc_rqst *new)
1026{
1027	struct rb_node **p = &xprt->recv_queue.rb_node;
1028	struct rb_node *n = NULL;
1029	struct rpc_rqst *req;
1030
1031	while (*p != NULL) {
1032		n = *p;
1033		req = rb_entry(n, struct rpc_rqst, rq_recv);
1034		switch(xprt_xid_cmp(new->rq_xid, req->rq_xid)) {
1035		case XID_RB_LEFT:
1036			p = &n->rb_left;
1037			break;
1038		case XID_RB_RIGHT:
1039			p = &n->rb_right;
1040			break;
1041		case XID_RB_EQUAL:
1042			WARN_ON_ONCE(new != req);
1043			return;
1044		}
1045	}
1046	rb_link_node(&new->rq_recv, n, p);
1047	rb_insert_color(&new->rq_recv, &xprt->recv_queue);
1048}
1049
1050static void
1051xprt_request_rb_remove(struct rpc_xprt *xprt, struct rpc_rqst *req)
1052{
1053	rb_erase(&req->rq_recv, &xprt->recv_queue);
1054}
1055
1056/**
1057 * xprt_lookup_rqst - find an RPC request corresponding to an XID
1058 * @xprt: transport on which the original request was transmitted
1059 * @xid: RPC XID of incoming reply
1060 *
1061 * Caller holds xprt->queue_lock.
1062 */
1063struct rpc_rqst *xprt_lookup_rqst(struct rpc_xprt *xprt, __be32 xid)
1064{
1065	struct rpc_rqst *entry;
1066
1067	entry = xprt_request_rb_find(xprt, xid);
1068	if (entry != NULL) {
1069		trace_xprt_lookup_rqst(xprt, xid, 0);
1070		entry->rq_rtt = ktime_sub(ktime_get(), entry->rq_xtime);
1071		return entry;
1072	}
1073
1074	dprintk("RPC:       xprt_lookup_rqst did not find xid %08x\n",
1075			ntohl(xid));
1076	trace_xprt_lookup_rqst(xprt, xid, -ENOENT);
1077	xprt->stat.bad_xids++;
1078	return NULL;
1079}
1080EXPORT_SYMBOL_GPL(xprt_lookup_rqst);
1081
1082static bool
1083xprt_is_pinned_rqst(struct rpc_rqst *req)
1084{
1085	return atomic_read(&req->rq_pin) != 0;
1086}
1087
1088/**
1089 * xprt_pin_rqst - Pin a request on the transport receive list
1090 * @req: Request to pin
1091 *
1092 * Caller must ensure this is atomic with the call to xprt_lookup_rqst()
1093 * so should be holding xprt->queue_lock.
1094 */
1095void xprt_pin_rqst(struct rpc_rqst *req)
1096{
1097	atomic_inc(&req->rq_pin);
1098}
1099EXPORT_SYMBOL_GPL(xprt_pin_rqst);
1100
1101/**
1102 * xprt_unpin_rqst - Unpin a request on the transport receive list
1103 * @req: Request to pin
1104 *
1105 * Caller should be holding xprt->queue_lock.
1106 */
1107void xprt_unpin_rqst(struct rpc_rqst *req)
1108{
1109	if (!test_bit(RPC_TASK_MSG_PIN_WAIT, &req->rq_task->tk_runstate)) {
1110		atomic_dec(&req->rq_pin);
1111		return;
1112	}
1113	if (atomic_dec_and_test(&req->rq_pin))
1114		wake_up_var(&req->rq_pin);
1115}
1116EXPORT_SYMBOL_GPL(xprt_unpin_rqst);
1117
1118static void xprt_wait_on_pinned_rqst(struct rpc_rqst *req)
1119{
1120	wait_var_event(&req->rq_pin, !xprt_is_pinned_rqst(req));
1121}
1122
1123static bool
1124xprt_request_data_received(struct rpc_task *task)
1125{
1126	return !test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate) &&
1127		READ_ONCE(task->tk_rqstp->rq_reply_bytes_recvd) != 0;
1128}
1129
1130static bool
1131xprt_request_need_enqueue_receive(struct rpc_task *task, struct rpc_rqst *req)
1132{
1133	return !test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate) &&
1134		READ_ONCE(task->tk_rqstp->rq_reply_bytes_recvd) == 0;
1135}
1136
1137/**
1138 * xprt_request_enqueue_receive - Add an request to the receive queue
1139 * @task: RPC task
1140 *
1141 */
1142int
1143xprt_request_enqueue_receive(struct rpc_task *task)
1144{
1145	struct rpc_rqst *req = task->tk_rqstp;
1146	struct rpc_xprt *xprt = req->rq_xprt;
1147	int ret;
1148
1149	if (!xprt_request_need_enqueue_receive(task, req))
1150		return 0;
1151
1152	ret = xprt_request_prepare(task->tk_rqstp, &req->rq_rcv_buf);
1153	if (ret)
1154		return ret;
1155	spin_lock(&xprt->queue_lock);
1156
1157	/* Update the softirq receive buffer */
1158	memcpy(&req->rq_private_buf, &req->rq_rcv_buf,
1159			sizeof(req->rq_private_buf));
1160
1161	/* Add request to the receive list */
1162	xprt_request_rb_insert(xprt, req);
1163	set_bit(RPC_TASK_NEED_RECV, &task->tk_runstate);
1164	spin_unlock(&xprt->queue_lock);
1165
1166	/* Turn off autodisconnect */
1167	del_timer_sync(&xprt->timer);
1168	return 0;
1169}
1170
1171/**
1172 * xprt_request_dequeue_receive_locked - Remove a request from the receive queue
1173 * @task: RPC task
1174 *
1175 * Caller must hold xprt->queue_lock.
1176 */
1177static void
1178xprt_request_dequeue_receive_locked(struct rpc_task *task)
1179{
1180	struct rpc_rqst *req = task->tk_rqstp;
1181
1182	if (test_and_clear_bit(RPC_TASK_NEED_RECV, &task->tk_runstate))
1183		xprt_request_rb_remove(req->rq_xprt, req);
1184}
1185
1186/**
1187 * xprt_update_rtt - Update RPC RTT statistics
1188 * @task: RPC request that recently completed
1189 *
1190 * Caller holds xprt->queue_lock.
1191 */
1192void xprt_update_rtt(struct rpc_task *task)
1193{
1194	struct rpc_rqst *req = task->tk_rqstp;
1195	struct rpc_rtt *rtt = task->tk_client->cl_rtt;
1196	unsigned int timer = task->tk_msg.rpc_proc->p_timer;
1197	long m = usecs_to_jiffies(ktime_to_us(req->rq_rtt));
1198
1199	if (timer) {
1200		if (req->rq_ntrans == 1)
1201			rpc_update_rtt(rtt, timer, m);
1202		rpc_set_timeo(rtt, timer, req->rq_ntrans - 1);
1203	}
1204}
1205EXPORT_SYMBOL_GPL(xprt_update_rtt);
1206
1207/**
1208 * xprt_complete_rqst - called when reply processing is complete
1209 * @task: RPC request that recently completed
1210 * @copied: actual number of bytes received from the transport
1211 *
1212 * Caller holds xprt->queue_lock.
1213 */
1214void xprt_complete_rqst(struct rpc_task *task, int copied)
1215{
1216	struct rpc_rqst *req = task->tk_rqstp;
1217	struct rpc_xprt *xprt = req->rq_xprt;
1218
 
 
 
 
1219	xprt->stat.recvs++;
1220
1221	xdr_free_bvec(&req->rq_rcv_buf);
1222	req->rq_private_buf.bvec = NULL;
1223	req->rq_private_buf.len = copied;
1224	/* Ensure all writes are done before we update */
1225	/* req->rq_reply_bytes_recvd */
1226	smp_wmb();
1227	req->rq_reply_bytes_recvd = copied;
1228	xprt_request_dequeue_receive_locked(task);
1229	rpc_wake_up_queued_task(&xprt->pending, task);
1230}
1231EXPORT_SYMBOL_GPL(xprt_complete_rqst);
1232
1233static void xprt_timer(struct rpc_task *task)
1234{
1235	struct rpc_rqst *req = task->tk_rqstp;
1236	struct rpc_xprt *xprt = req->rq_xprt;
1237
1238	if (task->tk_status != -ETIMEDOUT)
1239		return;
1240
1241	trace_xprt_timer(xprt, req->rq_xid, task->tk_status);
1242	if (!req->rq_reply_bytes_recvd) {
1243		if (xprt->ops->timer)
1244			xprt->ops->timer(xprt, task);
1245	} else
1246		task->tk_status = 0;
1247}
1248
1249/**
1250 * xprt_wait_for_reply_request_def - wait for reply
1251 * @task: pointer to rpc_task
1252 *
1253 * Set a request's retransmit timeout based on the transport's
1254 * default timeout parameters.  Used by transports that don't adjust
1255 * the retransmit timeout based on round-trip time estimation,
1256 * and put the task to sleep on the pending queue.
1257 */
1258void xprt_wait_for_reply_request_def(struct rpc_task *task)
1259{
1260	struct rpc_rqst *req = task->tk_rqstp;
1261
1262	rpc_sleep_on_timeout(&req->rq_xprt->pending, task, xprt_timer,
1263			xprt_request_timeout(req));
1264}
1265EXPORT_SYMBOL_GPL(xprt_wait_for_reply_request_def);
1266
1267/**
1268 * xprt_wait_for_reply_request_rtt - wait for reply using RTT estimator
1269 * @task: pointer to rpc_task
1270 *
1271 * Set a request's retransmit timeout using the RTT estimator,
1272 * and put the task to sleep on the pending queue.
1273 */
1274void xprt_wait_for_reply_request_rtt(struct rpc_task *task)
1275{
1276	int timer = task->tk_msg.rpc_proc->p_timer;
1277	struct rpc_clnt *clnt = task->tk_client;
1278	struct rpc_rtt *rtt = clnt->cl_rtt;
1279	struct rpc_rqst *req = task->tk_rqstp;
1280	unsigned long max_timeout = clnt->cl_timeout->to_maxval;
1281	unsigned long timeout;
1282
1283	timeout = rpc_calc_rto(rtt, timer);
1284	timeout <<= rpc_ntimeo(rtt, timer) + req->rq_retries;
1285	if (timeout > max_timeout || timeout == 0)
1286		timeout = max_timeout;
1287	rpc_sleep_on_timeout(&req->rq_xprt->pending, task, xprt_timer,
1288			jiffies + timeout);
1289}
1290EXPORT_SYMBOL_GPL(xprt_wait_for_reply_request_rtt);
1291
1292/**
1293 * xprt_request_wait_receive - wait for the reply to an RPC request
1294 * @task: RPC task about to send a request
1295 *
1296 */
1297void xprt_request_wait_receive(struct rpc_task *task)
1298{
1299	struct rpc_rqst *req = task->tk_rqstp;
1300	struct rpc_xprt *xprt = req->rq_xprt;
1301
1302	if (!test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate))
1303		return;
1304	/*
1305	 * Sleep on the pending queue if we're expecting a reply.
1306	 * The spinlock ensures atomicity between the test of
1307	 * req->rq_reply_bytes_recvd, and the call to rpc_sleep_on().
1308	 */
1309	spin_lock(&xprt->queue_lock);
1310	if (test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate)) {
1311		xprt->ops->wait_for_reply_request(task);
1312		/*
1313		 * Send an extra queue wakeup call if the
1314		 * connection was dropped in case the call to
1315		 * rpc_sleep_on() raced.
1316		 */
1317		if (xprt_request_retransmit_after_disconnect(task))
1318			rpc_wake_up_queued_task_set_status(&xprt->pending,
1319					task, -ENOTCONN);
1320	}
1321	spin_unlock(&xprt->queue_lock);
1322}
1323
1324static bool
1325xprt_request_need_enqueue_transmit(struct rpc_task *task, struct rpc_rqst *req)
1326{
1327	return !test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate);
1328}
1329
1330/**
1331 * xprt_request_enqueue_transmit - queue a task for transmission
1332 * @task: pointer to rpc_task
1333 *
1334 * Add a task to the transmission queue.
1335 */
1336void
1337xprt_request_enqueue_transmit(struct rpc_task *task)
1338{
1339	struct rpc_rqst *pos, *req = task->tk_rqstp;
1340	struct rpc_xprt *xprt = req->rq_xprt;
1341	int ret;
1342
1343	if (xprt_request_need_enqueue_transmit(task, req)) {
1344		ret = xprt_request_prepare(task->tk_rqstp, &req->rq_snd_buf);
1345		if (ret) {
1346			task->tk_status = ret;
1347			return;
1348		}
1349		req->rq_bytes_sent = 0;
1350		spin_lock(&xprt->queue_lock);
1351		/*
1352		 * Requests that carry congestion control credits are added
1353		 * to the head of the list to avoid starvation issues.
1354		 */
1355		if (req->rq_cong) {
1356			xprt_clear_congestion_window_wait(xprt);
1357			list_for_each_entry(pos, &xprt->xmit_queue, rq_xmit) {
1358				if (pos->rq_cong)
1359					continue;
1360				/* Note: req is added _before_ pos */
1361				list_add_tail(&req->rq_xmit, &pos->rq_xmit);
1362				INIT_LIST_HEAD(&req->rq_xmit2);
 
 
 
 
 
 
 
 
 
 
 
 
 
1363				goto out;
1364			}
1365		} else if (!req->rq_seqno) {
1366			list_for_each_entry(pos, &xprt->xmit_queue, rq_xmit) {
1367				if (pos->rq_task->tk_owner != task->tk_owner)
1368					continue;
1369				list_add_tail(&req->rq_xmit2, &pos->rq_xmit2);
1370				INIT_LIST_HEAD(&req->rq_xmit);
 
1371				goto out;
1372			}
1373		}
1374		list_add_tail(&req->rq_xmit, &xprt->xmit_queue);
1375		INIT_LIST_HEAD(&req->rq_xmit2);
 
1376out:
1377		atomic_long_inc(&xprt->xmit_queuelen);
1378		set_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate);
1379		spin_unlock(&xprt->queue_lock);
1380	}
1381}
1382
1383/**
1384 * xprt_request_dequeue_transmit_locked - remove a task from the transmission queue
1385 * @task: pointer to rpc_task
1386 *
1387 * Remove a task from the transmission queue
1388 * Caller must hold xprt->queue_lock
1389 */
1390static void
1391xprt_request_dequeue_transmit_locked(struct rpc_task *task)
1392{
1393	struct rpc_rqst *req = task->tk_rqstp;
1394
1395	if (!test_and_clear_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate))
1396		return;
1397	if (!list_empty(&req->rq_xmit)) {
1398		list_del(&req->rq_xmit);
1399		if (!list_empty(&req->rq_xmit2)) {
1400			struct rpc_rqst *next = list_first_entry(&req->rq_xmit2,
1401					struct rpc_rqst, rq_xmit2);
1402			list_del(&req->rq_xmit2);
1403			list_add_tail(&next->rq_xmit, &next->rq_xprt->xmit_queue);
1404		}
1405	} else
1406		list_del(&req->rq_xmit2);
1407	atomic_long_dec(&req->rq_xprt->xmit_queuelen);
1408	xdr_free_bvec(&req->rq_snd_buf);
1409}
1410
1411/**
1412 * xprt_request_dequeue_transmit - remove a task from the transmission queue
1413 * @task: pointer to rpc_task
1414 *
1415 * Remove a task from the transmission queue
1416 */
1417static void
1418xprt_request_dequeue_transmit(struct rpc_task *task)
1419{
1420	struct rpc_rqst *req = task->tk_rqstp;
1421	struct rpc_xprt *xprt = req->rq_xprt;
1422
1423	spin_lock(&xprt->queue_lock);
1424	xprt_request_dequeue_transmit_locked(task);
1425	spin_unlock(&xprt->queue_lock);
1426}
1427
1428/**
1429 * xprt_request_dequeue_xprt - remove a task from the transmit+receive queue
1430 * @task: pointer to rpc_task
1431 *
1432 * Remove a task from the transmit and receive queues, and ensure that
1433 * it is not pinned by the receive work item.
1434 */
1435void
1436xprt_request_dequeue_xprt(struct rpc_task *task)
1437{
1438	struct rpc_rqst	*req = task->tk_rqstp;
1439	struct rpc_xprt *xprt = req->rq_xprt;
1440
1441	if (test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate) ||
1442	    test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate) ||
1443	    xprt_is_pinned_rqst(req)) {
1444		spin_lock(&xprt->queue_lock);
 
 
1445		while (xprt_is_pinned_rqst(req)) {
1446			set_bit(RPC_TASK_MSG_PIN_WAIT, &task->tk_runstate);
1447			spin_unlock(&xprt->queue_lock);
1448			xprt_wait_on_pinned_rqst(req);
1449			spin_lock(&xprt->queue_lock);
1450			clear_bit(RPC_TASK_MSG_PIN_WAIT, &task->tk_runstate);
1451		}
1452		xprt_request_dequeue_transmit_locked(task);
1453		xprt_request_dequeue_receive_locked(task);
1454		spin_unlock(&xprt->queue_lock);
1455		xdr_free_bvec(&req->rq_rcv_buf);
1456	}
1457}
1458
1459/**
1460 * xprt_request_prepare - prepare an encoded request for transport
1461 * @req: pointer to rpc_rqst
1462 * @buf: pointer to send/rcv xdr_buf
1463 *
1464 * Calls into the transport layer to do whatever is needed to prepare
1465 * the request for transmission or receive.
1466 * Returns error, or zero.
1467 */
1468static int
1469xprt_request_prepare(struct rpc_rqst *req, struct xdr_buf *buf)
1470{
1471	struct rpc_xprt *xprt = req->rq_xprt;
1472
1473	if (xprt->ops->prepare_request)
1474		return xprt->ops->prepare_request(req, buf);
1475	return 0;
1476}
1477
1478/**
1479 * xprt_request_need_retransmit - Test if a task needs retransmission
1480 * @task: pointer to rpc_task
1481 *
1482 * Test for whether a connection breakage requires the task to retransmit
1483 */
1484bool
1485xprt_request_need_retransmit(struct rpc_task *task)
1486{
1487	return xprt_request_retransmit_after_disconnect(task);
1488}
1489
1490/**
1491 * xprt_prepare_transmit - reserve the transport before sending a request
1492 * @task: RPC task about to send a request
1493 *
1494 */
1495bool xprt_prepare_transmit(struct rpc_task *task)
1496{
1497	struct rpc_rqst	*req = task->tk_rqstp;
1498	struct rpc_xprt	*xprt = req->rq_xprt;
1499
 
 
1500	if (!xprt_lock_write(xprt, task)) {
1501		/* Race breaker: someone may have transmitted us */
1502		if (!test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate))
1503			rpc_wake_up_queued_task_set_status(&xprt->sending,
1504					task, 0);
1505		return false;
1506
1507	}
1508	if (atomic_read(&xprt->swapper))
1509		/* This will be clear in __rpc_execute */
1510		current->flags |= PF_MEMALLOC;
1511	return true;
1512}
1513
1514void xprt_end_transmit(struct rpc_task *task)
1515{
1516	struct rpc_xprt	*xprt = task->tk_rqstp->rq_xprt;
1517
1518	xprt_inject_disconnect(xprt);
1519	xprt_release_write(xprt, task);
1520}
1521
1522/**
1523 * xprt_request_transmit - send an RPC request on a transport
1524 * @req: pointer to request to transmit
1525 * @snd_task: RPC task that owns the transport lock
1526 *
1527 * This performs the transmission of a single request.
1528 * Note that if the request is not the same as snd_task, then it
1529 * does need to be pinned.
1530 * Returns '0' on success.
1531 */
1532static int
1533xprt_request_transmit(struct rpc_rqst *req, struct rpc_task *snd_task)
1534{
1535	struct rpc_xprt *xprt = req->rq_xprt;
1536	struct rpc_task *task = req->rq_task;
1537	unsigned int connect_cookie;
1538	int is_retrans = RPC_WAS_SENT(task);
1539	int status;
1540
1541	if (!req->rq_bytes_sent) {
1542		if (xprt_request_data_received(task)) {
1543			status = 0;
1544			goto out_dequeue;
1545		}
1546		/* Verify that our message lies in the RPCSEC_GSS window */
1547		if (rpcauth_xmit_need_reencode(task)) {
1548			status = -EBADMSG;
1549			goto out_dequeue;
1550		}
1551		if (RPC_SIGNALLED(task)) {
1552			status = -ERESTARTSYS;
1553			goto out_dequeue;
1554		}
1555	}
1556
1557	/*
1558	 * Update req->rq_ntrans before transmitting to avoid races with
1559	 * xprt_update_rtt(), which needs to know that it is recording a
1560	 * reply to the first transmission.
1561	 */
1562	req->rq_ntrans++;
1563
1564	trace_rpc_xdr_sendto(task, &req->rq_snd_buf);
1565	connect_cookie = xprt->connect_cookie;
1566	status = xprt->ops->send_request(req);
1567	if (status != 0) {
1568		req->rq_ntrans--;
1569		trace_xprt_transmit(req, status);
1570		return status;
1571	}
1572
1573	if (is_retrans) {
1574		task->tk_client->cl_stats->rpcretrans++;
1575		trace_xprt_retransmit(req);
1576	}
1577
1578	xprt_inject_disconnect(xprt);
1579
1580	task->tk_flags |= RPC_TASK_SENT;
1581	spin_lock(&xprt->transport_lock);
1582
1583	xprt->stat.sends++;
1584	xprt->stat.req_u += xprt->stat.sends - xprt->stat.recvs;
1585	xprt->stat.bklog_u += xprt->backlog.qlen;
1586	xprt->stat.sending_u += xprt->sending.qlen;
1587	xprt->stat.pending_u += xprt->pending.qlen;
1588	spin_unlock(&xprt->transport_lock);
1589
1590	req->rq_connect_cookie = connect_cookie;
1591out_dequeue:
1592	trace_xprt_transmit(req, status);
1593	xprt_request_dequeue_transmit(task);
1594	rpc_wake_up_queued_task_set_status(&xprt->sending, task, status);
1595	return status;
1596}
1597
1598/**
1599 * xprt_transmit - send an RPC request on a transport
1600 * @task: controlling RPC task
1601 *
1602 * Attempts to drain the transmit queue. On exit, either the transport
1603 * signalled an error that needs to be handled before transmission can
1604 * resume, or @task finished transmitting, and detected that it already
1605 * received a reply.
1606 */
1607void
1608xprt_transmit(struct rpc_task *task)
1609{
1610	struct rpc_rqst *next, *req = task->tk_rqstp;
1611	struct rpc_xprt	*xprt = req->rq_xprt;
1612	int status;
1613
1614	spin_lock(&xprt->queue_lock);
1615	for (;;) {
1616		next = list_first_entry_or_null(&xprt->xmit_queue,
1617						struct rpc_rqst, rq_xmit);
1618		if (!next)
1619			break;
1620		xprt_pin_rqst(next);
1621		spin_unlock(&xprt->queue_lock);
1622		status = xprt_request_transmit(next, task);
1623		if (status == -EBADMSG && next != req)
1624			status = 0;
 
1625		spin_lock(&xprt->queue_lock);
1626		xprt_unpin_rqst(next);
1627		if (status < 0) {
1628			if (test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate))
1629				task->tk_status = status;
1630			break;
1631		}
1632		/* Was @task transmitted, and has it received a reply? */
1633		if (xprt_request_data_received(task) &&
1634		    !test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate))
1635			break;
1636		cond_resched_lock(&xprt->queue_lock);
1637	}
1638	spin_unlock(&xprt->queue_lock);
1639}
1640
1641static void xprt_complete_request_init(struct rpc_task *task)
1642{
1643	if (task->tk_rqstp)
1644		xprt_request_init(task);
1645}
1646
1647void xprt_add_backlog(struct rpc_xprt *xprt, struct rpc_task *task)
1648{
1649	set_bit(XPRT_CONGESTED, &xprt->state);
1650	rpc_sleep_on(&xprt->backlog, task, xprt_complete_request_init);
1651}
1652EXPORT_SYMBOL_GPL(xprt_add_backlog);
1653
1654static bool __xprt_set_rq(struct rpc_task *task, void *data)
1655{
1656	struct rpc_rqst *req = data;
1657
1658	if (task->tk_rqstp == NULL) {
1659		memset(req, 0, sizeof(*req));	/* mark unused */
1660		task->tk_rqstp = req;
1661		return true;
1662	}
1663	return false;
1664}
1665
1666bool xprt_wake_up_backlog(struct rpc_xprt *xprt, struct rpc_rqst *req)
1667{
1668	if (rpc_wake_up_first(&xprt->backlog, __xprt_set_rq, req) == NULL) {
1669		clear_bit(XPRT_CONGESTED, &xprt->state);
1670		return false;
1671	}
1672	return true;
1673}
1674EXPORT_SYMBOL_GPL(xprt_wake_up_backlog);
1675
1676static bool xprt_throttle_congested(struct rpc_xprt *xprt, struct rpc_task *task)
1677{
1678	bool ret = false;
1679
1680	if (!test_bit(XPRT_CONGESTED, &xprt->state))
1681		goto out;
1682	spin_lock(&xprt->reserve_lock);
1683	if (test_bit(XPRT_CONGESTED, &xprt->state)) {
1684		xprt_add_backlog(xprt, task);
1685		ret = true;
1686	}
1687	spin_unlock(&xprt->reserve_lock);
1688out:
1689	return ret;
1690}
1691
1692static struct rpc_rqst *xprt_dynamic_alloc_slot(struct rpc_xprt *xprt)
1693{
1694	struct rpc_rqst *req = ERR_PTR(-EAGAIN);
1695
1696	if (xprt->num_reqs >= xprt->max_reqs)
1697		goto out;
1698	++xprt->num_reqs;
1699	spin_unlock(&xprt->reserve_lock);
1700	req = kzalloc(sizeof(*req), rpc_task_gfp_mask());
1701	spin_lock(&xprt->reserve_lock);
1702	if (req != NULL)
1703		goto out;
1704	--xprt->num_reqs;
1705	req = ERR_PTR(-ENOMEM);
1706out:
1707	return req;
1708}
1709
1710static bool xprt_dynamic_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
1711{
1712	if (xprt->num_reqs > xprt->min_reqs) {
1713		--xprt->num_reqs;
1714		kfree(req);
1715		return true;
1716	}
1717	return false;
1718}
1719
1720void xprt_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task)
1721{
1722	struct rpc_rqst *req;
1723
1724	spin_lock(&xprt->reserve_lock);
1725	if (!list_empty(&xprt->free)) {
1726		req = list_entry(xprt->free.next, struct rpc_rqst, rq_list);
1727		list_del(&req->rq_list);
1728		goto out_init_req;
1729	}
1730	req = xprt_dynamic_alloc_slot(xprt);
1731	if (!IS_ERR(req))
1732		goto out_init_req;
1733	switch (PTR_ERR(req)) {
1734	case -ENOMEM:
1735		dprintk("RPC:       dynamic allocation of request slot "
1736				"failed! Retrying\n");
1737		task->tk_status = -ENOMEM;
1738		break;
1739	case -EAGAIN:
1740		xprt_add_backlog(xprt, task);
1741		dprintk("RPC:       waiting for request slot\n");
1742		fallthrough;
1743	default:
1744		task->tk_status = -EAGAIN;
1745	}
1746	spin_unlock(&xprt->reserve_lock);
1747	return;
1748out_init_req:
1749	xprt->stat.max_slots = max_t(unsigned int, xprt->stat.max_slots,
1750				     xprt->num_reqs);
1751	spin_unlock(&xprt->reserve_lock);
1752
1753	task->tk_status = 0;
1754	task->tk_rqstp = req;
1755}
1756EXPORT_SYMBOL_GPL(xprt_alloc_slot);
1757
1758void xprt_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
1759{
1760	spin_lock(&xprt->reserve_lock);
1761	if (!xprt_wake_up_backlog(xprt, req) &&
1762	    !xprt_dynamic_free_slot(xprt, req)) {
1763		memset(req, 0, sizeof(*req));	/* mark unused */
1764		list_add(&req->rq_list, &xprt->free);
1765	}
 
1766	spin_unlock(&xprt->reserve_lock);
1767}
1768EXPORT_SYMBOL_GPL(xprt_free_slot);
1769
1770static void xprt_free_all_slots(struct rpc_xprt *xprt)
1771{
1772	struct rpc_rqst *req;
1773	while (!list_empty(&xprt->free)) {
1774		req = list_first_entry(&xprt->free, struct rpc_rqst, rq_list);
1775		list_del(&req->rq_list);
1776		kfree(req);
1777	}
1778}
1779
1780static DEFINE_IDA(rpc_xprt_ids);
1781
1782void xprt_cleanup_ids(void)
1783{
1784	ida_destroy(&rpc_xprt_ids);
1785}
1786
1787static int xprt_alloc_id(struct rpc_xprt *xprt)
1788{
1789	int id;
1790
1791	id = ida_alloc(&rpc_xprt_ids, GFP_KERNEL);
1792	if (id < 0)
1793		return id;
1794
1795	xprt->id = id;
1796	return 0;
1797}
1798
1799static void xprt_free_id(struct rpc_xprt *xprt)
1800{
1801	ida_free(&rpc_xprt_ids, xprt->id);
1802}
1803
1804struct rpc_xprt *xprt_alloc(struct net *net, size_t size,
1805		unsigned int num_prealloc,
1806		unsigned int max_alloc)
1807{
1808	struct rpc_xprt *xprt;
1809	struct rpc_rqst *req;
1810	int i;
1811
1812	xprt = kzalloc(size, GFP_KERNEL);
1813	if (xprt == NULL)
1814		goto out;
1815
1816	xprt_alloc_id(xprt);
1817	xprt_init(xprt, net);
1818
1819	for (i = 0; i < num_prealloc; i++) {
1820		req = kzalloc(sizeof(struct rpc_rqst), GFP_KERNEL);
1821		if (!req)
1822			goto out_free;
1823		list_add(&req->rq_list, &xprt->free);
1824	}
1825	xprt->max_reqs = max_t(unsigned int, max_alloc, num_prealloc);
 
 
 
1826	xprt->min_reqs = num_prealloc;
1827	xprt->num_reqs = num_prealloc;
1828
1829	return xprt;
1830
1831out_free:
1832	xprt_free(xprt);
1833out:
1834	return NULL;
1835}
1836EXPORT_SYMBOL_GPL(xprt_alloc);
1837
1838void xprt_free(struct rpc_xprt *xprt)
1839{
1840	put_net_track(xprt->xprt_net, &xprt->ns_tracker);
1841	xprt_free_all_slots(xprt);
1842	xprt_free_id(xprt);
1843	rpc_sysfs_xprt_destroy(xprt);
1844	kfree_rcu(xprt, rcu);
1845}
1846EXPORT_SYMBOL_GPL(xprt_free);
1847
1848static void
1849xprt_init_connect_cookie(struct rpc_rqst *req, struct rpc_xprt *xprt)
1850{
1851	req->rq_connect_cookie = xprt_connect_cookie(xprt) - 1;
1852}
1853
1854static __be32
1855xprt_alloc_xid(struct rpc_xprt *xprt)
1856{
1857	__be32 xid;
1858
1859	spin_lock(&xprt->reserve_lock);
1860	xid = (__force __be32)xprt->xid++;
1861	spin_unlock(&xprt->reserve_lock);
1862	return xid;
1863}
1864
1865static void
1866xprt_init_xid(struct rpc_xprt *xprt)
1867{
1868	xprt->xid = get_random_u32();
1869}
1870
1871static void
1872xprt_request_init(struct rpc_task *task)
1873{
1874	struct rpc_xprt *xprt = task->tk_xprt;
1875	struct rpc_rqst	*req = task->tk_rqstp;
1876
1877	req->rq_task	= task;
1878	req->rq_xprt    = xprt;
1879	req->rq_buffer  = NULL;
1880	req->rq_xid	= xprt_alloc_xid(xprt);
1881	xprt_init_connect_cookie(req, xprt);
1882	req->rq_snd_buf.len = 0;
1883	req->rq_snd_buf.buflen = 0;
1884	req->rq_rcv_buf.len = 0;
1885	req->rq_rcv_buf.buflen = 0;
1886	req->rq_snd_buf.bvec = NULL;
1887	req->rq_rcv_buf.bvec = NULL;
1888	req->rq_release_snd_buf = NULL;
1889	xprt_init_majortimeo(task, req);
1890
1891	trace_xprt_reserve(req);
1892}
1893
1894static void
1895xprt_do_reserve(struct rpc_xprt *xprt, struct rpc_task *task)
1896{
1897	xprt->ops->alloc_slot(xprt, task);
1898	if (task->tk_rqstp != NULL)
1899		xprt_request_init(task);
1900}
1901
1902/**
1903 * xprt_reserve - allocate an RPC request slot
1904 * @task: RPC task requesting a slot allocation
1905 *
1906 * If the transport is marked as being congested, or if no more
1907 * slots are available, place the task on the transport's
1908 * backlog queue.
1909 */
1910void xprt_reserve(struct rpc_task *task)
1911{
1912	struct rpc_xprt *xprt = task->tk_xprt;
1913
1914	task->tk_status = 0;
1915	if (task->tk_rqstp != NULL)
1916		return;
1917
1918	task->tk_status = -EAGAIN;
1919	if (!xprt_throttle_congested(xprt, task))
1920		xprt_do_reserve(xprt, task);
1921}
1922
1923/**
1924 * xprt_retry_reserve - allocate an RPC request slot
1925 * @task: RPC task requesting a slot allocation
1926 *
1927 * If no more slots are available, place the task on the transport's
1928 * backlog queue.
1929 * Note that the only difference with xprt_reserve is that we now
1930 * ignore the value of the XPRT_CONGESTED flag.
1931 */
1932void xprt_retry_reserve(struct rpc_task *task)
1933{
1934	struct rpc_xprt *xprt = task->tk_xprt;
1935
1936	task->tk_status = 0;
1937	if (task->tk_rqstp != NULL)
1938		return;
1939
1940	task->tk_status = -EAGAIN;
1941	xprt_do_reserve(xprt, task);
1942}
1943
1944/**
1945 * xprt_release - release an RPC request slot
1946 * @task: task which is finished with the slot
1947 *
1948 */
1949void xprt_release(struct rpc_task *task)
1950{
1951	struct rpc_xprt	*xprt;
1952	struct rpc_rqst	*req = task->tk_rqstp;
1953
1954	if (req == NULL) {
1955		if (task->tk_client) {
1956			xprt = task->tk_xprt;
1957			xprt_release_write(xprt, task);
1958		}
1959		return;
1960	}
1961
1962	xprt = req->rq_xprt;
1963	xprt_request_dequeue_xprt(task);
1964	spin_lock(&xprt->transport_lock);
1965	xprt->ops->release_xprt(xprt, task);
1966	if (xprt->ops->release_request)
1967		xprt->ops->release_request(task);
1968	xprt_schedule_autodisconnect(xprt);
1969	spin_unlock(&xprt->transport_lock);
1970	if (req->rq_buffer)
1971		xprt->ops->buf_free(task);
 
 
 
1972	if (req->rq_cred != NULL)
1973		put_rpccred(req->rq_cred);
 
1974	if (req->rq_release_snd_buf)
1975		req->rq_release_snd_buf(req);
1976
1977	task->tk_rqstp = NULL;
1978	if (likely(!bc_prealloc(req)))
1979		xprt->ops->free_slot(xprt, req);
1980	else
1981		xprt_free_bc_request(req);
1982}
1983
1984#ifdef CONFIG_SUNRPC_BACKCHANNEL
1985void
1986xprt_init_bc_request(struct rpc_rqst *req, struct rpc_task *task)
1987{
1988	struct xdr_buf *xbufp = &req->rq_snd_buf;
1989
1990	task->tk_rqstp = req;
1991	req->rq_task = task;
1992	xprt_init_connect_cookie(req, req->rq_xprt);
1993	/*
1994	 * Set up the xdr_buf length.
1995	 * This also indicates that the buffer is XDR encoded already.
1996	 */
1997	xbufp->len = xbufp->head[0].iov_len + xbufp->page_len +
1998		xbufp->tail[0].iov_len;
1999}
2000#endif
2001
2002static void xprt_init(struct rpc_xprt *xprt, struct net *net)
2003{
2004	kref_init(&xprt->kref);
2005
2006	spin_lock_init(&xprt->transport_lock);
2007	spin_lock_init(&xprt->reserve_lock);
2008	spin_lock_init(&xprt->queue_lock);
2009
2010	INIT_LIST_HEAD(&xprt->free);
2011	xprt->recv_queue = RB_ROOT;
2012	INIT_LIST_HEAD(&xprt->xmit_queue);
2013#if defined(CONFIG_SUNRPC_BACKCHANNEL)
2014	spin_lock_init(&xprt->bc_pa_lock);
2015	INIT_LIST_HEAD(&xprt->bc_pa_list);
2016#endif /* CONFIG_SUNRPC_BACKCHANNEL */
2017	INIT_LIST_HEAD(&xprt->xprt_switch);
2018
2019	xprt->last_used = jiffies;
2020	xprt->cwnd = RPC_INITCWND;
2021	xprt->bind_index = 0;
2022
2023	rpc_init_wait_queue(&xprt->binding, "xprt_binding");
2024	rpc_init_wait_queue(&xprt->pending, "xprt_pending");
2025	rpc_init_wait_queue(&xprt->sending, "xprt_sending");
2026	rpc_init_priority_wait_queue(&xprt->backlog, "xprt_backlog");
2027
2028	xprt_init_xid(xprt);
2029
2030	xprt->xprt_net = get_net_track(net, &xprt->ns_tracker, GFP_KERNEL);
2031}
2032
2033/**
2034 * xprt_create_transport - create an RPC transport
2035 * @args: rpc transport creation arguments
2036 *
2037 */
2038struct rpc_xprt *xprt_create_transport(struct xprt_create *args)
2039{
2040	struct rpc_xprt	*xprt;
2041	const struct xprt_class *t;
2042
2043	t = xprt_class_find_by_ident(args->ident);
2044	if (!t) {
2045		dprintk("RPC: transport (%d) not supported\n", args->ident);
2046		return ERR_PTR(-EIO);
 
 
2047	}
 
 
 
2048
 
2049	xprt = t->setup(args);
2050	xprt_class_release(t);
2051
2052	if (IS_ERR(xprt))
2053		goto out;
 
2054	if (args->flags & XPRT_CREATE_NO_IDLE_TIMEOUT)
2055		xprt->idle_timeout = 0;
2056	INIT_WORK(&xprt->task_cleanup, xprt_autoclose);
2057	if (xprt_has_timer(xprt))
2058		timer_setup(&xprt->timer, xprt_init_autodisconnect, 0);
2059	else
2060		timer_setup(&xprt->timer, NULL, 0);
2061
2062	if (strlen(args->servername) > RPC_MAXNETNAMELEN) {
2063		xprt_destroy(xprt);
2064		return ERR_PTR(-EINVAL);
2065	}
2066	xprt->servername = kstrdup(args->servername, GFP_KERNEL);
2067	if (xprt->servername == NULL) {
2068		xprt_destroy(xprt);
2069		return ERR_PTR(-ENOMEM);
2070	}
2071
2072	rpc_xprt_debugfs_register(xprt);
2073
2074	trace_xprt_create(xprt);
 
2075out:
2076	return xprt;
2077}
2078
2079static void xprt_destroy_cb(struct work_struct *work)
2080{
2081	struct rpc_xprt *xprt =
2082		container_of(work, struct rpc_xprt, task_cleanup);
2083
2084	trace_xprt_destroy(xprt);
2085
2086	rpc_xprt_debugfs_unregister(xprt);
2087	rpc_destroy_wait_queue(&xprt->binding);
2088	rpc_destroy_wait_queue(&xprt->pending);
2089	rpc_destroy_wait_queue(&xprt->sending);
2090	rpc_destroy_wait_queue(&xprt->backlog);
2091	kfree(xprt->servername);
2092	/*
2093	 * Destroy any existing back channel
2094	 */
2095	xprt_destroy_backchannel(xprt, UINT_MAX);
2096
2097	/*
2098	 * Tear down transport state and free the rpc_xprt
2099	 */
2100	xprt->ops->destroy(xprt);
2101}
2102
2103/**
2104 * xprt_destroy - destroy an RPC transport, killing off all requests.
2105 * @xprt: transport to destroy
2106 *
2107 */
2108static void xprt_destroy(struct rpc_xprt *xprt)
2109{
 
 
2110	/*
2111	 * Exclude transport connect/disconnect handlers and autoclose
2112	 */
2113	wait_on_bit_lock(&xprt->state, XPRT_LOCKED, TASK_UNINTERRUPTIBLE);
2114
2115	/*
2116	 * xprt_schedule_autodisconnect() can run after XPRT_LOCKED
2117	 * is cleared.  We use ->transport_lock to ensure the mod_timer()
2118	 * can only run *before* del_time_sync(), never after.
2119	 */
2120	spin_lock(&xprt->transport_lock);
2121	del_timer_sync(&xprt->timer);
2122	spin_unlock(&xprt->transport_lock);
2123
2124	/*
2125	 * Destroy sockets etc from the system workqueue so they can
2126	 * safely flush receive work running on rpciod.
2127	 */
2128	INIT_WORK(&xprt->task_cleanup, xprt_destroy_cb);
2129	schedule_work(&xprt->task_cleanup);
2130}
2131
2132static void xprt_destroy_kref(struct kref *kref)
2133{
2134	xprt_destroy(container_of(kref, struct rpc_xprt, kref));
2135}
2136
2137/**
2138 * xprt_get - return a reference to an RPC transport.
2139 * @xprt: pointer to the transport
2140 *
2141 */
2142struct rpc_xprt *xprt_get(struct rpc_xprt *xprt)
2143{
2144	if (xprt != NULL && kref_get_unless_zero(&xprt->kref))
2145		return xprt;
2146	return NULL;
2147}
2148EXPORT_SYMBOL_GPL(xprt_get);
2149
2150/**
2151 * xprt_put - release a reference to an RPC transport.
2152 * @xprt: pointer to the transport
2153 *
2154 */
2155void xprt_put(struct rpc_xprt *xprt)
2156{
2157	if (xprt != NULL)
2158		kref_put(&xprt->kref, xprt_destroy_kref);
2159}
2160EXPORT_SYMBOL_GPL(xprt_put);
2161
2162void xprt_set_offline_locked(struct rpc_xprt *xprt, struct rpc_xprt_switch *xps)
2163{
2164	if (!test_and_set_bit(XPRT_OFFLINE, &xprt->state)) {
2165		spin_lock(&xps->xps_lock);
2166		xps->xps_nactive--;
2167		spin_unlock(&xps->xps_lock);
2168	}
2169}
2170
2171void xprt_set_online_locked(struct rpc_xprt *xprt, struct rpc_xprt_switch *xps)
2172{
2173	if (test_and_clear_bit(XPRT_OFFLINE, &xprt->state)) {
2174		spin_lock(&xps->xps_lock);
2175		xps->xps_nactive++;
2176		spin_unlock(&xps->xps_lock);
2177	}
2178}
2179
2180void xprt_delete_locked(struct rpc_xprt *xprt, struct rpc_xprt_switch *xps)
2181{
2182	if (test_and_set_bit(XPRT_REMOVE, &xprt->state))
2183		return;
2184
2185	xprt_force_disconnect(xprt);
2186	if (!test_bit(XPRT_CONNECTED, &xprt->state))
2187		return;
2188
2189	if (!xprt->sending.qlen && !xprt->pending.qlen &&
2190	    !xprt->backlog.qlen && !atomic_long_read(&xprt->queuelen))
2191		rpc_xprt_switch_remove_xprt(xps, xprt, true);
2192}