Linux Audio

Check our new training course

Loading...
v4.17
   1/*
   2 * Copyright (c) 2014-2017 Oracle.  All rights reserved.
   3 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
   4 *
   5 * This software is available to you under a choice of one of two
   6 * licenses.  You may choose to be licensed under the terms of the GNU
   7 * General Public License (GPL) Version 2, available from the file
   8 * COPYING in the main directory of this source tree, or the BSD-type
   9 * license below:
  10 *
  11 * Redistribution and use in source and binary forms, with or without
  12 * modification, are permitted provided that the following conditions
  13 * are met:
  14 *
  15 *      Redistributions of source code must retain the above copyright
  16 *      notice, this list of conditions and the following disclaimer.
  17 *
  18 *      Redistributions in binary form must reproduce the above
  19 *      copyright notice, this list of conditions and the following
  20 *      disclaimer in the documentation and/or other materials provided
  21 *      with the distribution.
  22 *
  23 *      Neither the name of the Network Appliance, Inc. nor the names of
  24 *      its contributors may be used to endorse or promote products
  25 *      derived from this software without specific prior written
  26 *      permission.
  27 *
  28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  39 */
  40
  41/*
  42 * rpc_rdma.c
  43 *
  44 * This file contains the guts of the RPC RDMA protocol, and
  45 * does marshaling/unmarshaling, etc. It is also where interfacing
  46 * to the Linux RPC framework lives.
  47 */
  48
  49#include "xprt_rdma.h"
  50
  51#include <linux/highmem.h>
  52
  53#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
  54# define RPCDBG_FACILITY	RPCDBG_TRANS
  55#endif
  56
 
 
 
 
 
 
 
 
 
  57static const char transfertypes[][12] = {
  58	"inline",	/* no chunks */
  59	"read list",	/* some argument via rdma read */
  60	"*read list",	/* entire request via rdma read */
  61	"write list",	/* some result via rdma write */
  62	"reply chunk"	/* entire reply via rdma write */
  63};
 
  64
  65/* Returns size of largest RPC-over-RDMA header in a Call message
  66 *
  67 * The largest Call header contains a full-size Read list and a
  68 * minimal Reply chunk.
  69 */
  70static unsigned int rpcrdma_max_call_header_size(unsigned int maxsegs)
  71{
  72	unsigned int size;
  73
  74	/* Fixed header fields and list discriminators */
  75	size = RPCRDMA_HDRLEN_MIN;
  76
  77	/* Maximum Read list size */
  78	maxsegs += 2;	/* segment for head and tail buffers */
  79	size = maxsegs * rpcrdma_readchunk_maxsz * sizeof(__be32);
  80
  81	/* Minimal Read chunk size */
  82	size += sizeof(__be32);	/* segment count */
  83	size += rpcrdma_segment_maxsz * sizeof(__be32);
  84	size += sizeof(__be32);	/* list discriminator */
  85
  86	dprintk("RPC:       %s: max call header size = %u\n",
  87		__func__, size);
  88	return size;
  89}
  90
  91/* Returns size of largest RPC-over-RDMA header in a Reply message
  92 *
  93 * There is only one Write list or one Reply chunk per Reply
  94 * message.  The larger list is the Write list.
 
  95 */
  96static unsigned int rpcrdma_max_reply_header_size(unsigned int maxsegs)
  97{
  98	unsigned int size;
  99
 100	/* Fixed header fields and list discriminators */
 101	size = RPCRDMA_HDRLEN_MIN;
 102
 103	/* Maximum Write list size */
 104	maxsegs += 2;	/* segment for head and tail buffers */
 105	size = sizeof(__be32);		/* segment count */
 106	size += maxsegs * rpcrdma_segment_maxsz * sizeof(__be32);
 107	size += sizeof(__be32);	/* list discriminator */
 108
 109	dprintk("RPC:       %s: max reply header size = %u\n",
 110		__func__, size);
 111	return size;
 112}
 113
 114void rpcrdma_set_max_header_sizes(struct rpcrdma_xprt *r_xprt)
 
 115{
 116	struct rpcrdma_create_data_internal *cdata = &r_xprt->rx_data;
 117	struct rpcrdma_ia *ia = &r_xprt->rx_ia;
 118	unsigned int maxsegs = ia->ri_max_segs;
 119
 120	ia->ri_max_inline_write = cdata->inline_wsize -
 121				  rpcrdma_max_call_header_size(maxsegs);
 122	ia->ri_max_inline_read = cdata->inline_rsize -
 123				 rpcrdma_max_reply_header_size(maxsegs);
 124}
 125
 126/* The client can send a request inline as long as the RPCRDMA header
 127 * plus the RPC call fit under the transport's inline limit. If the
 128 * combined call message size exceeds that limit, the client must use
 129 * a Read chunk for this operation.
 130 *
 131 * A Read chunk is also required if sending the RPC call inline would
 132 * exceed this device's max_sge limit.
 133 */
 134static bool rpcrdma_args_inline(struct rpcrdma_xprt *r_xprt,
 135				struct rpc_rqst *rqst)
 136{
 137	struct xdr_buf *xdr = &rqst->rq_snd_buf;
 138	unsigned int count, remaining, offset;
 139
 140	if (xdr->len > r_xprt->rx_ia.ri_max_inline_write)
 141		return false;
 
 
 
 
 
 
 142
 143	if (xdr->page_len) {
 144		remaining = xdr->page_len;
 145		offset = offset_in_page(xdr->page_base);
 146		count = RPCRDMA_MIN_SEND_SGES;
 147		while (remaining) {
 148			remaining -= min_t(unsigned int,
 149					   PAGE_SIZE - offset, remaining);
 150			offset = 0;
 151			if (++count > r_xprt->rx_ia.ri_max_send_sges)
 152				return false;
 153		}
 154	}
 155
 156	return true;
 157}
 158
 159/* The client can't know how large the actual reply will be. Thus it
 160 * plans for the largest possible reply for that particular ULP
 161 * operation. If the maximum combined reply message size exceeds that
 162 * limit, the client must provide a write list or a reply chunk for
 163 * this request.
 164 */
 165static bool rpcrdma_results_inline(struct rpcrdma_xprt *r_xprt,
 166				   struct rpc_rqst *rqst)
 167{
 168	struct rpcrdma_ia *ia = &r_xprt->rx_ia;
 169
 170	return rqst->rq_rcv_buf.buflen <= ia->ri_max_inline_read;
 
 
 
 
 171}
 172
 173/* Split @vec on page boundaries into SGEs. FMR registers pages, not
 174 * a byte range. Other modes coalesce these SGEs into a single MR
 175 * when they can.
 176 *
 177 * Returns pointer to next available SGE, and bumps the total number
 178 * of SGEs consumed.
 179 */
 180static struct rpcrdma_mr_seg *
 181rpcrdma_convert_kvec(struct kvec *vec, struct rpcrdma_mr_seg *seg,
 182		     unsigned int *n)
 183{
 184	u32 remaining, page_offset;
 
 185	char *base;
 186
 187	base = vec->iov_base;
 188	page_offset = offset_in_page(base);
 189	remaining = vec->iov_len;
 190	while (remaining) {
 191		seg->mr_page = NULL;
 192		seg->mr_offset = base;
 193		seg->mr_len = min_t(u32, PAGE_SIZE - page_offset, remaining);
 194		remaining -= seg->mr_len;
 195		base += seg->mr_len;
 196		++seg;
 197		++(*n);
 198		page_offset = 0;
 199	}
 200	return seg;
 201}
 202
 203/* Convert @xdrbuf into SGEs no larger than a page each. As they
 204 * are registered, these SGEs are then coalesced into RDMA segments
 205 * when the selected memreg mode supports it.
 206 *
 207 * Returns positive number of SGEs consumed, or a negative errno.
 
 
 
 
 208 */
 209
 210static int
 211rpcrdma_convert_iovs(struct rpcrdma_xprt *r_xprt, struct xdr_buf *xdrbuf,
 212		     unsigned int pos, enum rpcrdma_chunktype type,
 213		     struct rpcrdma_mr_seg *seg)
 214{
 215	unsigned long page_base;
 216	unsigned int len, n;
 217	struct page **ppages;
 218
 219	n = 0;
 220	if (pos == 0)
 221		seg = rpcrdma_convert_kvec(&xdrbuf->head[0], seg, &n);
 
 
 222
 223	len = xdrbuf->page_len;
 224	ppages = xdrbuf->pages + (xdrbuf->page_base >> PAGE_SHIFT);
 225	page_base = offset_in_page(xdrbuf->page_base);
 226	while (len) {
 227		if (unlikely(!*ppages)) {
 228			/* XXX: Certain upper layer operations do
 229			 *	not provide receive buffer pages.
 230			 */
 231			*ppages = alloc_page(GFP_ATOMIC);
 232			if (!*ppages)
 233				return -EAGAIN;
 234		}
 235		seg->mr_page = *ppages;
 236		seg->mr_offset = (char *)page_base;
 237		seg->mr_len = min_t(u32, PAGE_SIZE - page_base, len);
 238		len -= seg->mr_len;
 239		++ppages;
 240		++seg;
 241		++n;
 242		page_base = 0;
 
 243	}
 244
 245	/* When encoding a Read chunk, the tail iovec contains an
 246	 * XDR pad and may be omitted.
 247	 */
 248	if (type == rpcrdma_readch && r_xprt->rx_ia.ri_implicit_roundup)
 249		goto out;
 250
 251	/* When encoding a Write chunk, some servers need to see an
 252	 * extra segment for non-XDR-aligned Write chunks. The upper
 253	 * layer provides space in the tail iovec that may be used
 254	 * for this purpose.
 255	 */
 256	if (type == rpcrdma_writech && r_xprt->rx_ia.ri_implicit_roundup)
 257		goto out;
 258
 259	if (xdrbuf->tail[0].iov_len)
 260		seg = rpcrdma_convert_kvec(&xdrbuf->tail[0], seg, &n);
 261
 262out:
 263	if (unlikely(n > RPCRDMA_MAX_SEGS))
 264		return -EIO;
 265	return n;
 266}
 267
 268static inline int
 269encode_item_present(struct xdr_stream *xdr)
 270{
 271	__be32 *p;
 272
 273	p = xdr_reserve_space(xdr, sizeof(*p));
 274	if (unlikely(!p))
 275		return -EMSGSIZE;
 276
 277	*p = xdr_one;
 278	return 0;
 279}
 280
 281static inline int
 282encode_item_not_present(struct xdr_stream *xdr)
 283{
 284	__be32 *p;
 285
 286	p = xdr_reserve_space(xdr, sizeof(*p));
 287	if (unlikely(!p))
 288		return -EMSGSIZE;
 289
 290	*p = xdr_zero;
 291	return 0;
 292}
 293
 294static void
 295xdr_encode_rdma_segment(__be32 *iptr, struct rpcrdma_mr *mr)
 296{
 297	*iptr++ = cpu_to_be32(mr->mr_handle);
 298	*iptr++ = cpu_to_be32(mr->mr_length);
 299	xdr_encode_hyper(iptr, mr->mr_offset);
 300}
 301
 302static int
 303encode_rdma_segment(struct xdr_stream *xdr, struct rpcrdma_mr *mr)
 304{
 305	__be32 *p;
 306
 307	p = xdr_reserve_space(xdr, 4 * sizeof(*p));
 308	if (unlikely(!p))
 309		return -EMSGSIZE;
 310
 311	xdr_encode_rdma_segment(p, mr);
 312	return 0;
 313}
 314
 315static int
 316encode_read_segment(struct xdr_stream *xdr, struct rpcrdma_mr *mr,
 317		    u32 position)
 318{
 319	__be32 *p;
 320
 321	p = xdr_reserve_space(xdr, 6 * sizeof(*p));
 322	if (unlikely(!p))
 323		return -EMSGSIZE;
 324
 325	*p++ = xdr_one;			/* Item present */
 326	*p++ = cpu_to_be32(position);
 327	xdr_encode_rdma_segment(p, mr);
 328	return 0;
 329}
 330
 331/* Register and XDR encode the Read list. Supports encoding a list of read
 332 * segments that belong to a single read chunk.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 333 *
 334 * Encoding key for single-list chunks (HLOO = Handle32 Length32 Offset64):
 335 *
 336 *  Read chunklist (a linked list):
 337 *   N elements, position P (same P for all chunks of same arg!):
 338 *    1 - PHLOO - 1 - PHLOO - ... - 1 - PHLOO - 0
 339 *
 340 * Returns zero on success, or a negative errno if a failure occurred.
 341 * @xdr is advanced to the next position in the stream.
 342 *
 343 * Only a single @pos value is currently supported.
 344 */
 345static noinline int
 346rpcrdma_encode_read_list(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
 347			 struct rpc_rqst *rqst, enum rpcrdma_chunktype rtype)
 348{
 349	struct xdr_stream *xdr = &req->rl_stream;
 350	struct rpcrdma_mr_seg *seg;
 351	struct rpcrdma_mr *mr;
 352	unsigned int pos;
 353	int nsegs;
 354
 355	pos = rqst->rq_snd_buf.head[0].iov_len;
 356	if (rtype == rpcrdma_areadch)
 357		pos = 0;
 358	seg = req->rl_segments;
 359	nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_snd_buf, pos,
 360				     rtype, seg);
 361	if (nsegs < 0)
 362		return nsegs;
 363
 364	do {
 365		seg = r_xprt->rx_ia.ri_ops->ro_map(r_xprt, seg, nsegs,
 366						   false, &mr);
 367		if (IS_ERR(seg))
 368			goto out_maperr;
 369		rpcrdma_mr_push(mr, &req->rl_registered);
 370
 371		if (encode_read_segment(xdr, mr, pos) < 0)
 372			return -EMSGSIZE;
 373
 374		trace_xprtrdma_read_chunk(rqst->rq_task, pos, mr, nsegs);
 375		r_xprt->rx_stats.read_chunk_count++;
 376		nsegs -= mr->mr_nents;
 377	} while (nsegs);
 378
 379	return 0;
 380
 381out_maperr:
 382	if (PTR_ERR(seg) == -EAGAIN)
 383		xprt_wait_for_buffer_space(rqst->rq_task, NULL);
 384	return PTR_ERR(seg);
 385}
 386
 387/* Register and XDR encode the Write list. Supports encoding a list
 388 * containing one array of plain segments that belong to a single
 389 * write chunk.
 390 *
 391 * Encoding key for single-list chunks (HLOO = Handle32 Length32 Offset64):
 392 *
 393 *  Write chunklist (a list of (one) counted array):
 394 *   N elements:
 395 *    1 - N - HLOO - HLOO - ... - HLOO - 0
 396 *
 397 * Returns zero on success, or a negative errno if a failure occurred.
 398 * @xdr is advanced to the next position in the stream.
 399 *
 400 * Only a single Write chunk is currently supported.
 401 */
 402static noinline int
 403rpcrdma_encode_write_list(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
 404			  struct rpc_rqst *rqst, enum rpcrdma_chunktype wtype)
 405{
 406	struct xdr_stream *xdr = &req->rl_stream;
 407	struct rpcrdma_mr_seg *seg;
 408	struct rpcrdma_mr *mr;
 409	int nsegs, nchunks;
 410	__be32 *segcount;
 411
 412	seg = req->rl_segments;
 413	nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_rcv_buf,
 414				     rqst->rq_rcv_buf.head[0].iov_len,
 415				     wtype, seg);
 416	if (nsegs < 0)
 417		return nsegs;
 418
 419	if (encode_item_present(xdr) < 0)
 420		return -EMSGSIZE;
 421	segcount = xdr_reserve_space(xdr, sizeof(*segcount));
 422	if (unlikely(!segcount))
 423		return -EMSGSIZE;
 424	/* Actual value encoded below */
 425
 426	nchunks = 0;
 427	do {
 428		seg = r_xprt->rx_ia.ri_ops->ro_map(r_xprt, seg, nsegs,
 429						   true, &mr);
 430		if (IS_ERR(seg))
 431			goto out_maperr;
 432		rpcrdma_mr_push(mr, &req->rl_registered);
 433
 434		if (encode_rdma_segment(xdr, mr) < 0)
 435			return -EMSGSIZE;
 436
 437		trace_xprtrdma_write_chunk(rqst->rq_task, mr, nsegs);
 438		r_xprt->rx_stats.write_chunk_count++;
 439		r_xprt->rx_stats.total_rdma_request += mr->mr_length;
 440		nchunks++;
 441		nsegs -= mr->mr_nents;
 442	} while (nsegs);
 443
 444	/* Update count of segments in this Write chunk */
 445	*segcount = cpu_to_be32(nchunks);
 446
 447	return 0;
 448
 449out_maperr:
 450	if (PTR_ERR(seg) == -EAGAIN)
 451		xprt_wait_for_buffer_space(rqst->rq_task, NULL);
 452	return PTR_ERR(seg);
 453}
 454
 455/* Register and XDR encode the Reply chunk. Supports encoding an array
 456 * of plain segments that belong to a single write (reply) chunk.
 457 *
 458 * Encoding key for single-list chunks (HLOO = Handle32 Length32 Offset64):
 459 *
 460 *  Reply chunk (a counted array):
 461 *   N elements:
 462 *    1 - N - HLOO - HLOO - ... - HLOO
 463 *
 464 * Returns zero on success, or a negative errno if a failure occurred.
 465 * @xdr is advanced to the next position in the stream.
 466 */
 467static noinline int
 468rpcrdma_encode_reply_chunk(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
 469			   struct rpc_rqst *rqst, enum rpcrdma_chunktype wtype)
 
 470{
 471	struct xdr_stream *xdr = &req->rl_stream;
 472	struct rpcrdma_mr_seg *seg;
 473	struct rpcrdma_mr *mr;
 474	int nsegs, nchunks;
 475	__be32 *segcount;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 476
 477	seg = req->rl_segments;
 478	nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_rcv_buf, 0, wtype, seg);
 
 
 
 
 479	if (nsegs < 0)
 480		return nsegs;
 481
 482	if (encode_item_present(xdr) < 0)
 483		return -EMSGSIZE;
 484	segcount = xdr_reserve_space(xdr, sizeof(*segcount));
 485	if (unlikely(!segcount))
 486		return -EMSGSIZE;
 487	/* Actual value encoded below */
 488
 489	nchunks = 0;
 490	do {
 491		seg = r_xprt->rx_ia.ri_ops->ro_map(r_xprt, seg, nsegs,
 492						   true, &mr);
 493		if (IS_ERR(seg))
 494			goto out_maperr;
 495		rpcrdma_mr_push(mr, &req->rl_registered);
 496
 497		if (encode_rdma_segment(xdr, mr) < 0)
 498			return -EMSGSIZE;
 499
 500		trace_xprtrdma_reply_chunk(rqst->rq_task, mr, nsegs);
 501		r_xprt->rx_stats.reply_chunk_count++;
 502		r_xprt->rx_stats.total_rdma_request += mr->mr_length;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 503		nchunks++;
 504		nsegs -= mr->mr_nents;
 
 505	} while (nsegs);
 506
 507	/* Update count of segments in the Reply chunk */
 508	*segcount = cpu_to_be32(nchunks);
 509
 510	return 0;
 511
 512out_maperr:
 513	if (PTR_ERR(seg) == -EAGAIN)
 514		xprt_wait_for_buffer_space(rqst->rq_task, NULL);
 515	return PTR_ERR(seg);
 516}
 517
 518/**
 519 * rpcrdma_unmap_sendctx - DMA-unmap Send buffers
 520 * @sc: sendctx containing SGEs to unmap
 521 *
 522 */
 523void
 524rpcrdma_unmap_sendctx(struct rpcrdma_sendctx *sc)
 525{
 526	struct rpcrdma_ia *ia = &sc->sc_xprt->rx_ia;
 527	struct ib_sge *sge;
 528	unsigned int count;
 529
 530	/* The first two SGEs contain the transport header and
 531	 * the inline buffer. These are always left mapped so
 532	 * they can be cheaply re-used.
 533	 */
 534	sge = &sc->sc_sges[2];
 535	for (count = sc->sc_unmap_count; count; ++sge, --count)
 536		ib_dma_unmap_page(ia->ri_device,
 537				  sge->addr, sge->length, DMA_TO_DEVICE);
 538
 539	if (test_and_clear_bit(RPCRDMA_REQ_F_TX_RESOURCES, &sc->sc_req->rl_flags)) {
 540		smp_mb__after_atomic();
 541		wake_up_bit(&sc->sc_req->rl_flags, RPCRDMA_REQ_F_TX_RESOURCES);
 542	}
 543}
 544
 545/* Prepare an SGE for the RPC-over-RDMA transport header.
 546 */
 547static bool
 548rpcrdma_prepare_hdr_sge(struct rpcrdma_ia *ia, struct rpcrdma_req *req,
 549			u32 len)
 550{
 551	struct rpcrdma_sendctx *sc = req->rl_sendctx;
 552	struct rpcrdma_regbuf *rb = req->rl_rdmabuf;
 553	struct ib_sge *sge = sc->sc_sges;
 554
 555	if (!rpcrdma_dma_map_regbuf(ia, rb))
 556		goto out_regbuf;
 557	sge->addr = rdmab_addr(rb);
 558	sge->length = len;
 559	sge->lkey = rdmab_lkey(rb);
 560
 561	ib_dma_sync_single_for_device(rdmab_device(rb), sge->addr,
 562				      sge->length, DMA_TO_DEVICE);
 563	sc->sc_wr.num_sge++;
 564	return true;
 565
 566out_regbuf:
 567	pr_err("rpcrdma: failed to DMA map a Send buffer\n");
 568	return false;
 569}
 570
 571/* Prepare the Send SGEs. The head and tail iovec, and each entry
 572 * in the page list, gets its own SGE.
 573 */
 574static bool
 575rpcrdma_prepare_msg_sges(struct rpcrdma_ia *ia, struct rpcrdma_req *req,
 576			 struct xdr_buf *xdr, enum rpcrdma_chunktype rtype)
 577{
 578	struct rpcrdma_sendctx *sc = req->rl_sendctx;
 579	unsigned int sge_no, page_base, len, remaining;
 580	struct rpcrdma_regbuf *rb = req->rl_sendbuf;
 581	struct ib_device *device = ia->ri_device;
 582	struct ib_sge *sge = sc->sc_sges;
 583	u32 lkey = ia->ri_pd->local_dma_lkey;
 584	struct page *page, **ppages;
 585
 586	/* The head iovec is straightforward, as it is already
 587	 * DMA-mapped. Sync the content that has changed.
 588	 */
 589	if (!rpcrdma_dma_map_regbuf(ia, rb))
 590		goto out_regbuf;
 591	sge_no = 1;
 592	sge[sge_no].addr = rdmab_addr(rb);
 593	sge[sge_no].length = xdr->head[0].iov_len;
 594	sge[sge_no].lkey = rdmab_lkey(rb);
 595	ib_dma_sync_single_for_device(rdmab_device(rb), sge[sge_no].addr,
 596				      sge[sge_no].length, DMA_TO_DEVICE);
 597
 598	/* If there is a Read chunk, the page list is being handled
 599	 * via explicit RDMA, and thus is skipped here. However, the
 600	 * tail iovec may include an XDR pad for the page list, as
 601	 * well as additional content, and may not reside in the
 602	 * same page as the head iovec.
 603	 */
 604	if (rtype == rpcrdma_readch) {
 605		len = xdr->tail[0].iov_len;
 606
 607		/* Do not include the tail if it is only an XDR pad */
 608		if (len < 4)
 609			goto out;
 610
 611		page = virt_to_page(xdr->tail[0].iov_base);
 612		page_base = offset_in_page(xdr->tail[0].iov_base);
 613
 614		/* If the content in the page list is an odd length,
 615		 * xdr_write_pages() has added a pad at the beginning
 616		 * of the tail iovec. Force the tail's non-pad content
 617		 * to land at the next XDR position in the Send message.
 618		 */
 619		page_base += len & 3;
 620		len -= len & 3;
 621		goto map_tail;
 622	}
 623
 624	/* If there is a page list present, temporarily DMA map
 625	 * and prepare an SGE for each page to be sent.
 626	 */
 627	if (xdr->page_len) {
 628		ppages = xdr->pages + (xdr->page_base >> PAGE_SHIFT);
 629		page_base = offset_in_page(xdr->page_base);
 630		remaining = xdr->page_len;
 631		while (remaining) {
 632			sge_no++;
 633			if (sge_no > RPCRDMA_MAX_SEND_SGES - 2)
 634				goto out_mapping_overflow;
 635
 636			len = min_t(u32, PAGE_SIZE - page_base, remaining);
 637			sge[sge_no].addr = ib_dma_map_page(device, *ppages,
 638							   page_base, len,
 639							   DMA_TO_DEVICE);
 640			if (ib_dma_mapping_error(device, sge[sge_no].addr))
 641				goto out_mapping_err;
 642			sge[sge_no].length = len;
 643			sge[sge_no].lkey = lkey;
 644
 645			sc->sc_unmap_count++;
 646			ppages++;
 647			remaining -= len;
 648			page_base = 0;
 649		}
 650	}
 651
 652	/* The tail iovec is not always constructed in the same
 653	 * page where the head iovec resides (see, for example,
 654	 * gss_wrap_req_priv). To neatly accommodate that case,
 655	 * DMA map it separately.
 656	 */
 657	if (xdr->tail[0].iov_len) {
 658		page = virt_to_page(xdr->tail[0].iov_base);
 659		page_base = offset_in_page(xdr->tail[0].iov_base);
 660		len = xdr->tail[0].iov_len;
 661
 662map_tail:
 663		sge_no++;
 664		sge[sge_no].addr = ib_dma_map_page(device, page,
 665						   page_base, len,
 666						   DMA_TO_DEVICE);
 667		if (ib_dma_mapping_error(device, sge[sge_no].addr))
 668			goto out_mapping_err;
 669		sge[sge_no].length = len;
 670		sge[sge_no].lkey = lkey;
 671		sc->sc_unmap_count++;
 672	}
 673
 674out:
 675	sc->sc_wr.num_sge += sge_no;
 676	if (sc->sc_unmap_count)
 677		__set_bit(RPCRDMA_REQ_F_TX_RESOURCES, &req->rl_flags);
 678	return true;
 679
 680out_regbuf:
 681	pr_err("rpcrdma: failed to DMA map a Send buffer\n");
 682	return false;
 683
 684out_mapping_overflow:
 685	rpcrdma_unmap_sendctx(sc);
 686	pr_err("rpcrdma: too many Send SGEs (%u)\n", sge_no);
 687	return false;
 688
 689out_mapping_err:
 690	rpcrdma_unmap_sendctx(sc);
 691	pr_err("rpcrdma: Send mapping error\n");
 692	return false;
 693}
 694
 695/**
 696 * rpcrdma_prepare_send_sges - Construct SGEs for a Send WR
 697 * @r_xprt: controlling transport
 698 * @req: context of RPC Call being marshalled
 699 * @hdrlen: size of transport header, in bytes
 700 * @xdr: xdr_buf containing RPC Call
 701 * @rtype: chunk type being encoded
 702 *
 703 * Returns 0 on success; otherwise a negative errno is returned.
 704 */
 705int
 706rpcrdma_prepare_send_sges(struct rpcrdma_xprt *r_xprt,
 707			  struct rpcrdma_req *req, u32 hdrlen,
 708			  struct xdr_buf *xdr, enum rpcrdma_chunktype rtype)
 709{
 710	req->rl_sendctx = rpcrdma_sendctx_get_locked(&r_xprt->rx_buf);
 711	if (!req->rl_sendctx)
 712		return -ENOBUFS;
 713	req->rl_sendctx->sc_wr.num_sge = 0;
 714	req->rl_sendctx->sc_unmap_count = 0;
 715	req->rl_sendctx->sc_req = req;
 716	__clear_bit(RPCRDMA_REQ_F_TX_RESOURCES, &req->rl_flags);
 717
 718	if (!rpcrdma_prepare_hdr_sge(&r_xprt->rx_ia, req, hdrlen))
 719		return -EIO;
 720
 721	if (rtype != rpcrdma_areadch)
 722		if (!rpcrdma_prepare_msg_sges(&r_xprt->rx_ia, req, xdr, rtype))
 723			return -EIO;
 724
 725	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 726}
 727
 728/**
 729 * rpcrdma_marshal_req - Marshal and send one RPC request
 730 * @r_xprt: controlling transport
 731 * @rqst: RPC request to be marshaled
 732 *
 733 * For the RPC in "rqst", this function:
 734 *  - Chooses the transfer mode (eg., RDMA_MSG or RDMA_NOMSG)
 735 *  - Registers Read, Write, and Reply chunks
 736 *  - Constructs the transport header
 737 *  - Posts a Send WR to send the transport header and request
 
 
 738 *
 739 * Returns:
 740 *	%0 if the RPC was sent successfully,
 741 *	%-ENOTCONN if the connection was lost,
 742 *	%-EAGAIN if the caller should call again with the same arguments,
 743 *	%-ENOBUFS if the caller should call again after a delay,
 744 *	%-EMSGSIZE if the transport header is too small,
 745 *	%-EIO if a permanent problem occurred while marshaling.
 746 */
 
 747int
 748rpcrdma_marshal_req(struct rpcrdma_xprt *r_xprt, struct rpc_rqst *rqst)
 749{
 
 
 750	struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
 751	struct xdr_stream *xdr = &req->rl_stream;
 
 
 752	enum rpcrdma_chunktype rtype, wtype;
 753	bool ddp_allowed;
 754	__be32 *p;
 755	int ret;
 756
 757	rpcrdma_set_xdrlen(&req->rl_hdrbuf, 0);
 758	xdr_init_encode(xdr, &req->rl_hdrbuf,
 759			req->rl_rdmabuf->rg_base);
 760
 761	/* Fixed header fields */
 762	ret = -EMSGSIZE;
 763	p = xdr_reserve_space(xdr, 4 * sizeof(*p));
 764	if (!p)
 765		goto out_err;
 766	*p++ = rqst->rq_xid;
 767	*p++ = rpcrdma_version;
 768	*p++ = cpu_to_be32(r_xprt->rx_buf.rb_max_requests);
 769
 770	/* When the ULP employs a GSS flavor that guarantees integrity
 771	 * or privacy, direct data placement of individual data items
 772	 * is not allowed.
 773	 */
 774	ddp_allowed = !(rqst->rq_cred->cr_auth->au_flags &
 775						RPCAUTH_AUTH_DATATOUCH);
 
 
 
 
 
 
 
 776
 777	/*
 778	 * Chunks needed for results?
 779	 *
 
 780	 * o If the expected result is under the inline threshold, all ops
 781	 *   return as inline.
 782	 * o Large read ops return data as write chunk(s), header as
 783	 *   inline.
 784	 * o Large non-read ops return as a single reply chunk.
 785	 */
 786	if (rpcrdma_results_inline(r_xprt, rqst))
 787		wtype = rpcrdma_noch;
 788	else if (ddp_allowed && rqst->rq_rcv_buf.flags & XDRBUF_READ)
 789		wtype = rpcrdma_writech;
 
 
 790	else
 791		wtype = rpcrdma_replych;
 792
 793	/*
 794	 * Chunks needed for arguments?
 795	 *
 796	 * o If the total request is under the inline threshold, all ops
 797	 *   are sent as inline.
 798	 * o Large write ops transmit data as read chunk(s), header as
 799	 *   inline.
 800	 * o Large non-write ops are sent with the entire message as a
 801	 *   single read chunk (protocol 0-position special case).
 802	 *
 803	 * This assumes that the upper layer does not present a request
 804	 * that both has a data payload, and whose non-data arguments
 805	 * by themselves are larger than the inline threshold.
 806	 */
 807	if (rpcrdma_args_inline(r_xprt, rqst)) {
 808		*p++ = rdma_msg;
 809		rtype = rpcrdma_noch;
 810	} else if (ddp_allowed && rqst->rq_snd_buf.flags & XDRBUF_WRITE) {
 811		*p++ = rdma_msg;
 812		rtype = rpcrdma_readch;
 813	} else {
 814		r_xprt->rx_stats.nomsg_call_count++;
 815		*p++ = rdma_nomsg;
 816		rtype = rpcrdma_areadch;
 
 817	}
 818
 819	/* If this is a retransmit, discard previously registered
 820	 * chunks. Very likely the connection has been replaced,
 821	 * so these registrations are invalid and unusable.
 822	 */
 823	while (unlikely(!list_empty(&req->rl_registered))) {
 824		struct rpcrdma_mr *mr;
 825
 826		mr = rpcrdma_mr_pop(&req->rl_registered);
 827		rpcrdma_mr_defer_recovery(mr);
 828	}
 829
 830	/* This implementation supports the following combinations
 831	 * of chunk lists in one RPC-over-RDMA Call message:
 832	 *
 833	 *   - Read list
 834	 *   - Write list
 835	 *   - Reply chunk
 836	 *   - Read list + Reply chunk
 837	 *
 838	 * It might not yet support the following combinations:
 839	 *
 840	 *   - Read list + Write list
 841	 *
 842	 * It does not support the following combinations:
 843	 *
 844	 *   - Write list + Reply chunk
 845	 *   - Read list + Write list + Reply chunk
 846	 *
 847	 * This implementation supports only a single chunk in each
 848	 * Read or Write list. Thus for example the client cannot
 849	 * send a Call message with a Position Zero Read chunk and a
 850	 * regular Read chunk at the same time.
 851	 */
 852	if (rtype != rpcrdma_noch) {
 853		ret = rpcrdma_encode_read_list(r_xprt, req, rqst, rtype);
 854		if (ret)
 855			goto out_err;
 856	}
 857	ret = encode_item_not_present(xdr);
 858	if (ret)
 859		goto out_err;
 860
 861	if (wtype == rpcrdma_writech) {
 862		ret = rpcrdma_encode_write_list(r_xprt, req, rqst, wtype);
 863		if (ret)
 864			goto out_err;
 865	}
 866	ret = encode_item_not_present(xdr);
 867	if (ret)
 868		goto out_err;
 869
 870	if (wtype != rpcrdma_replych)
 871		ret = encode_item_not_present(xdr);
 872	else
 873		ret = rpcrdma_encode_reply_chunk(r_xprt, req, rqst, wtype);
 874	if (ret)
 875		goto out_err;
 876
 877	trace_xprtrdma_marshal(rqst, xdr_stream_pos(xdr), rtype, wtype);
 878
 879	ret = rpcrdma_prepare_send_sges(r_xprt, req, xdr_stream_pos(xdr),
 880					&rqst->rq_snd_buf, rtype);
 881	if (ret)
 882		goto out_err;
 
 
 
 
 
 
 
 883	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 884
 885out_err:
 886	r_xprt->rx_stats.failed_marshal_count++;
 887	return ret;
 888}
 889
 890/**
 891 * rpcrdma_inline_fixup - Scatter inline received data into rqst's iovecs
 892 * @rqst: controlling RPC request
 893 * @srcp: points to RPC message payload in receive buffer
 894 * @copy_len: remaining length of receive buffer content
 895 * @pad: Write chunk pad bytes needed (zero for pure inline)
 896 *
 897 * The upper layer has set the maximum number of bytes it can
 898 * receive in each component of rq_rcv_buf. These values are set in
 899 * the head.iov_len, page_len, tail.iov_len, and buflen fields.
 900 *
 901 * Unlike the TCP equivalent (xdr_partial_copy_from_skb), in
 902 * many cases this function simply updates iov_base pointers in
 903 * rq_rcv_buf to point directly to the received reply data, to
 904 * avoid copying reply data.
 905 *
 906 * Returns the count of bytes which had to be memcopied.
 907 */
 908static unsigned long
 909rpcrdma_inline_fixup(struct rpc_rqst *rqst, char *srcp, int copy_len, int pad)
 910{
 911	unsigned long fixup_copy_count;
 912	int i, npages, curlen;
 913	char *destp;
 914	struct page **ppages;
 915	int page_base;
 916
 917	/* The head iovec is redirected to the RPC reply message
 918	 * in the receive buffer, to avoid a memcopy.
 919	 */
 920	rqst->rq_rcv_buf.head[0].iov_base = srcp;
 921	rqst->rq_private_buf.head[0].iov_base = srcp;
 922
 923	/* The contents of the receive buffer that follow
 924	 * head.iov_len bytes are copied into the page list.
 925	 */
 926	curlen = rqst->rq_rcv_buf.head[0].iov_len;
 927	if (curlen > copy_len)
 928		curlen = copy_len;
 929	trace_xprtrdma_fixup(rqst, copy_len, curlen);
 
 
 
 
 
 
 
 930	srcp += curlen;
 931	copy_len -= curlen;
 932
 933	ppages = rqst->rq_rcv_buf.pages +
 934		(rqst->rq_rcv_buf.page_base >> PAGE_SHIFT);
 935	page_base = offset_in_page(rqst->rq_rcv_buf.page_base);
 936	fixup_copy_count = 0;
 937	if (copy_len && rqst->rq_rcv_buf.page_len) {
 938		int pagelist_len;
 939
 940		pagelist_len = rqst->rq_rcv_buf.page_len;
 941		if (pagelist_len > copy_len)
 942			pagelist_len = copy_len;
 943		npages = PAGE_ALIGN(page_base + pagelist_len) >> PAGE_SHIFT;
 944		for (i = 0; i < npages; i++) {
 945			curlen = PAGE_SIZE - page_base;
 946			if (curlen > pagelist_len)
 947				curlen = pagelist_len;
 948
 949			trace_xprtrdma_fixup_pg(rqst, i, srcp,
 950						copy_len, curlen);
 951			destp = kmap_atomic(ppages[i]);
 952			memcpy(destp + page_base, srcp, curlen);
 953			flush_dcache_page(ppages[i]);
 954			kunmap_atomic(destp);
 955			srcp += curlen;
 956			copy_len -= curlen;
 957			fixup_copy_count += curlen;
 958			pagelist_len -= curlen;
 959			if (!pagelist_len)
 960				break;
 961			page_base = 0;
 962		}
 963
 964		/* Implicit padding for the last segment in a Write
 965		 * chunk is inserted inline at the front of the tail
 966		 * iovec. The upper layer ignores the content of
 967		 * the pad. Simply ensure inline content in the tail
 968		 * that follows the Write chunk is properly aligned.
 969		 */
 970		if (pad)
 971			srcp -= pad;
 972	}
 973
 974	/* The tail iovec is redirected to the remaining data
 975	 * in the receive buffer, to avoid a memcopy.
 976	 */
 977	if (copy_len || pad) {
 978		rqst->rq_rcv_buf.tail[0].iov_base = srcp;
 979		rqst->rq_private_buf.tail[0].iov_base = srcp;
 980	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 981
 982	return fixup_copy_count;
 
 
 
 
 
 
 
 
 
 
 983}
 984
 
 985/* By convention, backchannel calls arrive via rdma_msg type
 986 * messages, and never populate the chunk lists. This makes
 987 * the RPC/RDMA header small and fixed in size, so it is
 988 * straightforward to check the RPC header's direction field.
 989 */
 990static bool
 991rpcrdma_is_bcall(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep)
 992#if defined(CONFIG_SUNRPC_BACKCHANNEL)
 993{
 994	struct xdr_stream *xdr = &rep->rr_stream;
 995	__be32 *p;
 996
 997	if (rep->rr_proc != rdma_msg)
 998		return false;
 999
1000	/* Peek at stream contents without advancing. */
1001	p = xdr_inline_decode(xdr, 0);
1002
1003	/* Chunk lists */
1004	if (*p++ != xdr_zero)
1005		return false;
1006	if (*p++ != xdr_zero)
1007		return false;
1008	if (*p++ != xdr_zero)
1009		return false;
1010
1011	/* RPC header */
1012	if (*p++ != rep->rr_xid)
1013		return false;
1014	if (*p != cpu_to_be32(RPC_CALL))
 
1015		return false;
1016
1017	/* Now that we are sure this is a backchannel call,
1018	 * advance to the RPC header.
1019	 */
1020	p = xdr_inline_decode(xdr, 3 * sizeof(*p));
1021	if (unlikely(!p))
1022		goto out_short;
1023
1024	rpcrdma_bc_receive_call(r_xprt, rep);
1025	return true;
1026
1027out_short:
1028	pr_warn("RPC/RDMA short backward direction call\n");
1029	if (rpcrdma_ep_post_recv(&r_xprt->rx_ia, rep))
1030		xprt_disconnect_done(&r_xprt->rx_xprt);
1031	return true;
1032}
1033#else	/* CONFIG_SUNRPC_BACKCHANNEL */
1034{
1035	return false;
1036}
1037#endif	/* CONFIG_SUNRPC_BACKCHANNEL */
1038
1039static int decode_rdma_segment(struct xdr_stream *xdr, u32 *length)
1040{
1041	u32 handle;
1042	u64 offset;
1043	__be32 *p;
1044
1045	p = xdr_inline_decode(xdr, 4 * sizeof(*p));
1046	if (unlikely(!p))
1047		return -EIO;
1048
1049	handle = be32_to_cpup(p++);
1050	*length = be32_to_cpup(p++);
1051	xdr_decode_hyper(p, &offset);
1052
1053	trace_xprtrdma_decode_seg(handle, *length, offset);
1054	return 0;
1055}
1056
1057static int decode_write_chunk(struct xdr_stream *xdr, u32 *length)
1058{
1059	u32 segcount, seglength;
1060	__be32 *p;
1061
1062	p = xdr_inline_decode(xdr, sizeof(*p));
1063	if (unlikely(!p))
1064		return -EIO;
1065
1066	*length = 0;
1067	segcount = be32_to_cpup(p);
1068	while (segcount--) {
1069		if (decode_rdma_segment(xdr, &seglength))
1070			return -EIO;
1071		*length += seglength;
1072	}
1073
1074	return 0;
1075}
1076
1077/* In RPC-over-RDMA Version One replies, a Read list is never
1078 * expected. This decoder is a stub that returns an error if
1079 * a Read list is present.
1080 */
1081static int decode_read_list(struct xdr_stream *xdr)
 
1082{
1083	__be32 *p;
1084
1085	p = xdr_inline_decode(xdr, sizeof(*p));
1086	if (unlikely(!p))
1087		return -EIO;
1088	if (unlikely(*p != xdr_zero))
1089		return -EIO;
1090	return 0;
1091}
1092
1093/* Supports only one Write chunk in the Write list
 
 
 
1094 */
1095static int decode_write_list(struct xdr_stream *xdr, u32 *length)
1096{
1097	u32 chunklen;
1098	bool first;
1099	__be32 *p;
1100
1101	*length = 0;
1102	first = true;
1103	do {
1104		p = xdr_inline_decode(xdr, sizeof(*p));
1105		if (unlikely(!p))
1106			return -EIO;
1107		if (*p == xdr_zero)
1108			break;
1109		if (!first)
1110			return -EIO;
1111
1112		if (decode_write_chunk(xdr, &chunklen))
1113			return -EIO;
1114		*length += chunklen;
1115		first = false;
1116	} while (true);
1117	return 0;
1118}
1119
1120static int decode_reply_chunk(struct xdr_stream *xdr, u32 *length)
1121{
1122	__be32 *p;
1123
1124	p = xdr_inline_decode(xdr, sizeof(*p));
1125	if (unlikely(!p))
1126		return -EIO;
1127
1128	*length = 0;
1129	if (*p != xdr_zero)
1130		if (decode_write_chunk(xdr, length))
1131			return -EIO;
1132	return 0;
1133}
1134
1135static int
1136rpcrdma_decode_msg(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep,
1137		   struct rpc_rqst *rqst)
1138{
1139	struct xdr_stream *xdr = &rep->rr_stream;
1140	u32 writelist, replychunk, rpclen;
1141	char *base;
1142
1143	/* Decode the chunk lists */
1144	if (decode_read_list(xdr))
1145		return -EIO;
1146	if (decode_write_list(xdr, &writelist))
1147		return -EIO;
1148	if (decode_reply_chunk(xdr, &replychunk))
1149		return -EIO;
1150
1151	/* RDMA_MSG sanity checks */
1152	if (unlikely(replychunk))
1153		return -EIO;
1154
1155	/* Build the RPC reply's Payload stream in rqst->rq_rcv_buf */
1156	base = (char *)xdr_inline_decode(xdr, 0);
1157	rpclen = xdr_stream_remaining(xdr);
1158	r_xprt->rx_stats.fixup_copy_count +=
1159		rpcrdma_inline_fixup(rqst, base, rpclen, writelist & 3);
1160
1161	r_xprt->rx_stats.total_rdma_reply += writelist;
1162	return rpclen + xdr_align_size(writelist);
1163}
1164
1165static noinline int
1166rpcrdma_decode_nomsg(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep)
1167{
1168	struct xdr_stream *xdr = &rep->rr_stream;
1169	u32 writelist, replychunk;
1170
1171	/* Decode the chunk lists */
1172	if (decode_read_list(xdr))
1173		return -EIO;
1174	if (decode_write_list(xdr, &writelist))
1175		return -EIO;
1176	if (decode_reply_chunk(xdr, &replychunk))
1177		return -EIO;
1178
1179	/* RDMA_NOMSG sanity checks */
1180	if (unlikely(writelist))
1181		return -EIO;
1182	if (unlikely(!replychunk))
1183		return -EIO;
1184
1185	/* Reply chunk buffer already is the reply vector */
1186	r_xprt->rx_stats.total_rdma_reply += replychunk;
1187	return replychunk;
1188}
1189
1190static noinline int
1191rpcrdma_decode_error(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep,
1192		     struct rpc_rqst *rqst)
1193{
1194	struct xdr_stream *xdr = &rep->rr_stream;
1195	__be32 *p;
1196
1197	p = xdr_inline_decode(xdr, sizeof(*p));
1198	if (unlikely(!p))
1199		return -EIO;
 
 
1200
1201	switch (*p) {
1202	case err_vers:
1203		p = xdr_inline_decode(xdr, 2 * sizeof(*p));
1204		if (!p)
1205			break;
1206		dprintk("RPC: %5u: %s: server reports version error (%u-%u)\n",
1207			rqst->rq_task->tk_pid, __func__,
1208			be32_to_cpup(p), be32_to_cpu(*(p + 1)));
1209		break;
1210	case err_chunk:
1211		dprintk("RPC: %5u: %s: server reports header decoding error\n",
1212			rqst->rq_task->tk_pid, __func__);
1213		break;
1214	default:
1215		dprintk("RPC: %5u: %s: server reports unrecognized error %d\n",
1216			rqst->rq_task->tk_pid, __func__, be32_to_cpup(p));
1217	}
1218
1219	r_xprt->rx_stats.bad_reply_count++;
1220	return -EREMOTEIO;
1221}
1222
1223/* Perform XID lookup, reconstruction of the RPC reply, and
1224 * RPC completion while holding the transport lock to ensure
1225 * the rep, rqst, and rq_task pointers remain stable.
1226 */
1227void rpcrdma_complete_rqst(struct rpcrdma_rep *rep)
1228{
1229	struct rpcrdma_xprt *r_xprt = rep->rr_rxprt;
1230	struct rpc_xprt *xprt = &r_xprt->rx_xprt;
1231	struct rpc_rqst *rqst = rep->rr_rqst;
1232	unsigned long cwnd;
1233	int status;
1234
 
 
1235	xprt->reestablish_timeout = 0;
1236
1237	switch (rep->rr_proc) {
 
 
 
 
 
1238	case rdma_msg:
1239		status = rpcrdma_decode_msg(r_xprt, rep, rqst);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1240		break;
 
1241	case rdma_nomsg:
1242		status = rpcrdma_decode_nomsg(r_xprt, rep);
 
 
 
 
 
 
 
 
 
 
 
 
 
1243		break;
 
1244	case rdma_error:
1245		status = rpcrdma_decode_error(r_xprt, rep, rqst);
1246		break;
 
1247	default:
 
 
 
 
 
 
 
 
1248		status = -EIO;
 
 
1249	}
1250	if (status < 0)
1251		goto out_badheader;
1252
1253out:
1254	spin_lock(&xprt->recv_lock);
 
 
 
 
 
 
 
 
 
 
1255	cwnd = xprt->cwnd;
1256	xprt->cwnd = r_xprt->rx_buf.rb_credits << RPC_CWNDSHIFT;
1257	if (xprt->cwnd > cwnd)
1258		xprt_release_rqst_cong(rqst->rq_task);
1259
1260	xprt_complete_rqst(rqst->rq_task, status);
1261	xprt_unpin_rqst(rqst);
1262	spin_unlock(&xprt->recv_lock);
1263	return;
1264
1265/* If the incoming reply terminated a pending RPC, the next
1266 * RPC call will post a replacement receive buffer as it is
1267 * being marshaled.
1268 */
1269out_badheader:
1270	trace_xprtrdma_reply_hdr(rep);
1271	r_xprt->rx_stats.bad_reply_count++;
1272	status = -EIO;
1273	goto out;
1274}
1275
1276void rpcrdma_release_rqst(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
1277{
1278	/* Invalidate and unmap the data payloads before waking
1279	 * the waiting application. This guarantees the memory
1280	 * regions are properly fenced from the server before the
1281	 * application accesses the data. It also ensures proper
1282	 * send flow control: waking the next RPC waits until this
1283	 * RPC has relinquished all its Send Queue entries.
1284	 */
1285	if (!list_empty(&req->rl_registered))
1286		r_xprt->rx_ia.ri_ops->ro_unmap_sync(r_xprt,
1287						    &req->rl_registered);
1288
1289	/* Ensure that any DMA mapped pages associated with
1290	 * the Send of the RPC Call have been unmapped before
1291	 * allowing the RPC to complete. This protects argument
1292	 * memory not controlled by the RPC client from being
1293	 * re-used before we're done with it.
1294	 */
1295	if (test_bit(RPCRDMA_REQ_F_TX_RESOURCES, &req->rl_flags)) {
1296		r_xprt->rx_stats.reply_waits_for_send++;
1297		out_of_line_wait_on_bit(&req->rl_flags,
1298					RPCRDMA_REQ_F_TX_RESOURCES,
1299					bit_wait,
1300					TASK_UNINTERRUPTIBLE);
1301	}
1302}
1303
1304/* Reply handling runs in the poll worker thread. Anything that
1305 * might wait is deferred to a separate workqueue.
1306 */
1307void rpcrdma_deferred_completion(struct work_struct *work)
1308{
1309	struct rpcrdma_rep *rep =
1310			container_of(work, struct rpcrdma_rep, rr_work);
1311	struct rpcrdma_req *req = rpcr_to_rdmar(rep->rr_rqst);
1312	struct rpcrdma_xprt *r_xprt = rep->rr_rxprt;
1313
1314	trace_xprtrdma_defer_cmp(rep);
1315	if (rep->rr_wc_flags & IB_WC_WITH_INVALIDATE)
1316		r_xprt->rx_ia.ri_ops->ro_reminv(rep, &req->rl_registered);
1317	rpcrdma_release_rqst(r_xprt, req);
1318	rpcrdma_complete_rqst(rep);
1319}
1320
1321/* Process received RPC/RDMA messages.
1322 *
1323 * Errors must result in the RPC task either being awakened, or
1324 * allowed to timeout, to discover the errors at that time.
1325 */
1326void rpcrdma_reply_handler(struct rpcrdma_rep *rep)
1327{
1328	struct rpcrdma_xprt *r_xprt = rep->rr_rxprt;
1329	struct rpc_xprt *xprt = &r_xprt->rx_xprt;
1330	struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
1331	struct rpcrdma_req *req;
1332	struct rpc_rqst *rqst;
1333	u32 credits;
1334	__be32 *p;
1335
1336	if (rep->rr_hdrbuf.head[0].iov_len == 0)
1337		goto out_badstatus;
1338
1339	xdr_init_decode(&rep->rr_stream, &rep->rr_hdrbuf,
1340			rep->rr_hdrbuf.head[0].iov_base);
1341
1342	/* Fixed transport header fields */
1343	p = xdr_inline_decode(&rep->rr_stream, 4 * sizeof(*p));
1344	if (unlikely(!p))
1345		goto out_shortreply;
1346	rep->rr_xid = *p++;
1347	rep->rr_vers = *p++;
1348	credits = be32_to_cpu(*p++);
1349	rep->rr_proc = *p++;
1350
1351	if (rep->rr_vers != rpcrdma_version)
1352		goto out_badversion;
1353
1354	if (rpcrdma_is_bcall(r_xprt, rep))
1355		return;
1356
1357	/* Match incoming rpcrdma_rep to an rpcrdma_req to
1358	 * get context for handling any incoming chunks.
1359	 */
1360	spin_lock(&xprt->recv_lock);
1361	rqst = xprt_lookup_rqst(xprt, rep->rr_xid);
1362	if (!rqst)
1363		goto out_norqst;
1364	xprt_pin_rqst(rqst);
1365
1366	if (credits == 0)
1367		credits = 1;	/* don't deadlock */
1368	else if (credits > buf->rb_max_requests)
1369		credits = buf->rb_max_requests;
1370	buf->rb_credits = credits;
1371
1372	spin_unlock(&xprt->recv_lock);
1373
1374	req = rpcr_to_rdmar(rqst);
1375	req->rl_reply = rep;
1376	rep->rr_rqst = rqst;
1377	clear_bit(RPCRDMA_REQ_F_PENDING, &req->rl_flags);
1378
1379	trace_xprtrdma_reply(rqst->rq_task, rep, req, credits);
1380
1381	queue_work(rpcrdma_receive_wq, &rep->rr_work);
1382	return;
1383
1384out_badstatus:
1385	rpcrdma_recv_buffer_put(rep);
1386	if (r_xprt->rx_ep.rep_connected == 1) {
1387		r_xprt->rx_ep.rep_connected = -EIO;
1388		rpcrdma_conn_func(&r_xprt->rx_ep);
1389	}
1390	return;
1391
1392out_badversion:
1393	trace_xprtrdma_reply_vers(rep);
1394	goto repost;
 
 
1395
1396/* The RPC transaction has already been terminated, or the header
1397 * is corrupt.
 
1398 */
1399out_norqst:
1400	spin_unlock(&xprt->recv_lock);
1401	trace_xprtrdma_reply_rqst(rep);
1402	goto repost;
 
 
1403
1404out_shortreply:
1405	trace_xprtrdma_reply_short(rep);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1406
1407/* If no pending RPC transaction was matched, post a replacement
1408 * receive buffer before returning.
1409 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1410repost:
1411	r_xprt->rx_stats.bad_reply_count++;
1412	if (rpcrdma_ep_post_recv(&r_xprt->rx_ia, rep))
1413		rpcrdma_recv_buffer_put(rep);
1414}
v4.6
   1/*
 
   2 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the BSD-type
   8 * license below:
   9 *
  10 * Redistribution and use in source and binary forms, with or without
  11 * modification, are permitted provided that the following conditions
  12 * are met:
  13 *
  14 *      Redistributions of source code must retain the above copyright
  15 *      notice, this list of conditions and the following disclaimer.
  16 *
  17 *      Redistributions in binary form must reproduce the above
  18 *      copyright notice, this list of conditions and the following
  19 *      disclaimer in the documentation and/or other materials provided
  20 *      with the distribution.
  21 *
  22 *      Neither the name of the Network Appliance, Inc. nor the names of
  23 *      its contributors may be used to endorse or promote products
  24 *      derived from this software without specific prior written
  25 *      permission.
  26 *
  27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  31 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  32 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  33 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  34 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  35 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  36 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  37 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  38 */
  39
  40/*
  41 * rpc_rdma.c
  42 *
  43 * This file contains the guts of the RPC RDMA protocol, and
  44 * does marshaling/unmarshaling, etc. It is also where interfacing
  45 * to the Linux RPC framework lives.
  46 */
  47
  48#include "xprt_rdma.h"
  49
  50#include <linux/highmem.h>
  51
  52#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
  53# define RPCDBG_FACILITY	RPCDBG_TRANS
  54#endif
  55
  56enum rpcrdma_chunktype {
  57	rpcrdma_noch = 0,
  58	rpcrdma_readch,
  59	rpcrdma_areadch,
  60	rpcrdma_writech,
  61	rpcrdma_replych
  62};
  63
  64#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
  65static const char transfertypes[][12] = {
  66	"pure inline",	/* no chunks */
  67	" read chunk",	/* some argument via rdma read */
  68	"*read chunk",	/* entire request via rdma read */
  69	"write chunk",	/* some result via rdma write */
  70	"reply chunk"	/* entire reply via rdma write */
  71};
  72#endif
  73
  74/* The client can send a request inline as long as the RPCRDMA header
  75 * plus the RPC call fit under the transport's inline limit. If the
  76 * combined call message size exceeds that limit, the client must use
  77 * the read chunk list for this operation.
  78 */
  79static bool rpcrdma_args_inline(struct rpc_rqst *rqst)
  80{
  81	unsigned int callsize = RPCRDMA_HDRLEN_MIN + rqst->rq_snd_buf.len;
  82
  83	return callsize <= RPCRDMA_INLINE_WRITE_THRESHOLD(rqst);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  84}
  85
  86/* The client can't know how large the actual reply will be. Thus it
  87 * plans for the largest possible reply for that particular ULP
  88 * operation. If the maximum combined reply message size exceeds that
  89 * limit, the client must provide a write list or a reply chunk for
  90 * this request.
  91 */
  92static bool rpcrdma_results_inline(struct rpc_rqst *rqst)
  93{
  94	unsigned int repsize = RPCRDMA_HDRLEN_MIN + rqst->rq_rcv_buf.buflen;
  95
  96	return repsize <= RPCRDMA_INLINE_READ_THRESHOLD(rqst);
 
 
 
 
 
 
 
 
 
 
 
  97}
  98
  99static int
 100rpcrdma_tail_pullup(struct xdr_buf *buf)
 101{
 102	size_t tlen = buf->tail[0].iov_len;
 103	size_t skip = tlen & 3;
 
 
 
 
 
 
 
 104
 105	/* Do not include the tail if it is only an XDR pad */
 106	if (tlen < 4)
 107		return 0;
 
 
 
 
 
 
 
 
 
 
 108
 109	/* xdr_write_pages() adds a pad at the beginning of the tail
 110	 * if the content in "buf->pages" is unaligned. Force the
 111	 * tail's actual content to land at the next XDR position
 112	 * after the head instead.
 113	 */
 114	if (skip) {
 115		unsigned char *src, *dst;
 116		unsigned int count;
 117
 118		src = buf->tail[0].iov_base;
 119		dst = buf->head[0].iov_base;
 120		dst += buf->head[0].iov_len;
 
 
 
 
 
 
 
 
 
 121
 122		src += skip;
 123		tlen -= skip;
 124
 125		dprintk("RPC:       %s: skip=%zu, memmove(%p, %p, %zu)\n",
 126			__func__, skip, dst, src, tlen);
 
 
 
 
 
 
 
 
 127
 128		for (count = tlen; count; count--)
 129			*dst++ = *src++;
 130	}
 131
 132	return tlen;
 133}
 134
 135/* Split "vec" on page boundaries into segments. FMR registers pages,
 136 * not a byte range. Other modes coalesce these segments into a single
 137 * MR when they can.
 
 
 
 138 */
 139static int
 140rpcrdma_convert_kvec(struct kvec *vec, struct rpcrdma_mr_seg *seg,
 141		     int n, int nsegs)
 142{
 143	size_t page_offset;
 144	u32 remaining;
 145	char *base;
 146
 147	base = vec->iov_base;
 148	page_offset = offset_in_page(base);
 149	remaining = vec->iov_len;
 150	while (remaining && n < nsegs) {
 151		seg[n].mr_page = NULL;
 152		seg[n].mr_offset = base;
 153		seg[n].mr_len = min_t(u32, PAGE_SIZE - page_offset, remaining);
 154		remaining -= seg[n].mr_len;
 155		base += seg[n].mr_len;
 156		++n;
 
 157		page_offset = 0;
 158	}
 159	return n;
 160}
 161
 162/*
 163 * Chunk assembly from upper layer xdr_buf.
 
 164 *
 165 * Prepare the passed-in xdr_buf into representation as RPC/RDMA chunk
 166 * elements. Segments are then coalesced when registered, if possible
 167 * within the selected memreg mode.
 168 *
 169 * Returns positive number of segments converted, or a negative errno.
 170 */
 171
 172static int
 173rpcrdma_convert_iovs(struct xdr_buf *xdrbuf, unsigned int pos,
 174	enum rpcrdma_chunktype type, struct rpcrdma_mr_seg *seg, int nsegs)
 
 175{
 176	int len, n = 0, p;
 177	int page_base;
 178	struct page **ppages;
 179
 180	if (pos == 0) {
 181		n = rpcrdma_convert_kvec(&xdrbuf->head[0], seg, n, nsegs);
 182		if (n == nsegs)
 183			return -EIO;
 184	}
 185
 186	len = xdrbuf->page_len;
 187	ppages = xdrbuf->pages + (xdrbuf->page_base >> PAGE_SHIFT);
 188	page_base = xdrbuf->page_base & ~PAGE_MASK;
 189	p = 0;
 190	while (len && n < nsegs) {
 191		if (!ppages[p]) {
 192			/* alloc the pagelist for receiving buffer */
 193			ppages[p] = alloc_page(GFP_ATOMIC);
 194			if (!ppages[p])
 195				return -ENOMEM;
 
 196		}
 197		seg[n].mr_page = ppages[p];
 198		seg[n].mr_offset = (void *)(unsigned long) page_base;
 199		seg[n].mr_len = min_t(u32, PAGE_SIZE - page_base, len);
 200		if (seg[n].mr_len > PAGE_SIZE)
 201			return -EIO;
 202		len -= seg[n].mr_len;
 203		++n;
 204		++p;
 205		page_base = 0;	/* page offset only applies to first page */
 206	}
 207
 208	/* Message overflows the seg array */
 209	if (len && n == nsegs)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 210		return -EIO;
 
 
 211
 212	/* When encoding the read list, the tail is always sent inline */
 213	if (type == rpcrdma_readch)
 214		return n;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 215
 216	if (xdrbuf->tail[0].iov_len) {
 217		/* the rpcrdma protocol allows us to omit any trailing
 218		 * xdr pad bytes, saving the server an RDMA operation. */
 219		if (xdrbuf->tail[0].iov_len < 4 && xprt_rdma_pad_optimize)
 220			return n;
 221		n = rpcrdma_convert_kvec(&xdrbuf->tail[0], seg, n, nsegs);
 222		if (n == nsegs)
 223			return -EIO;
 224	}
 
 
 
 
 
 
 
 
 
 225
 226	return n;
 
 
 
 
 
 
 
 227}
 228
 229/*
 230 * Create read/write chunk lists, and reply chunks, for RDMA
 231 *
 232 *   Assume check against THRESHOLD has been done, and chunks are required.
 233 *   Assume only encoding one list entry for read|write chunks. The NFSv3
 234 *     protocol is simple enough to allow this as it only has a single "bulk
 235 *     result" in each procedure - complicated NFSv4 COMPOUNDs are not. (The
 236 *     RDMA/Sessions NFSv4 proposal addresses this for future v4 revs.)
 237 *
 238 * When used for a single reply chunk (which is a special write
 239 * chunk used for the entire reply, rather than just the data), it
 240 * is used primarily for READDIR and READLINK which would otherwise
 241 * be severely size-limited by a small rdma inline read max. The server
 242 * response will come back as an RDMA Write, followed by a message
 243 * of type RDMA_NOMSG carrying the xid and length. As a result, reply
 244 * chunks do not provide data alignment, however they do not require
 245 * "fixup" (moving the response to the upper layer buffer) either.
 246 *
 247 * Encoding key for single-list chunks (HLOO = Handle32 Length32 Offset64):
 248 *
 249 *  Read chunklist (a linked list):
 250 *   N elements, position P (same P for all chunks of same arg!):
 251 *    1 - PHLOO - 1 - PHLOO - ... - 1 - PHLOO - 0
 252 *
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 253 *  Write chunklist (a list of (one) counted array):
 254 *   N elements:
 255 *    1 - N - HLOO - HLOO - ... - HLOO - 0
 256 *
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 257 *  Reply chunk (a counted array):
 258 *   N elements:
 259 *    1 - N - HLOO - HLOO - ... - HLOO
 260 *
 261 * Returns positive RPC/RDMA header size, or negative errno.
 
 262 */
 263
 264static ssize_t
 265rpcrdma_create_chunks(struct rpc_rqst *rqst, struct xdr_buf *target,
 266		struct rpcrdma_msg *headerp, enum rpcrdma_chunktype type)
 267{
 268	struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
 269	struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(rqst->rq_xprt);
 270	int n, nsegs, nchunks = 0;
 271	unsigned int pos;
 272	struct rpcrdma_mr_seg *seg = req->rl_segments;
 273	struct rpcrdma_read_chunk *cur_rchunk = NULL;
 274	struct rpcrdma_write_array *warray = NULL;
 275	struct rpcrdma_write_chunk *cur_wchunk = NULL;
 276	__be32 *iptr = headerp->rm_body.rm_chunks;
 277	int (*map)(struct rpcrdma_xprt *, struct rpcrdma_mr_seg *, int, bool);
 278
 279	if (type == rpcrdma_readch || type == rpcrdma_areadch) {
 280		/* a read chunk - server will RDMA Read our memory */
 281		cur_rchunk = (struct rpcrdma_read_chunk *) iptr;
 282	} else {
 283		/* a write or reply chunk - server will RDMA Write our memory */
 284		*iptr++ = xdr_zero;	/* encode a NULL read chunk list */
 285		if (type == rpcrdma_replych)
 286			*iptr++ = xdr_zero;	/* a NULL write chunk list */
 287		warray = (struct rpcrdma_write_array *) iptr;
 288		cur_wchunk = (struct rpcrdma_write_chunk *) (warray + 1);
 289	}
 290
 291	if (type == rpcrdma_replych || type == rpcrdma_areadch)
 292		pos = 0;
 293	else
 294		pos = target->head[0].iov_len;
 295
 296	nsegs = rpcrdma_convert_iovs(target, pos, type, seg, RPCRDMA_MAX_SEGS);
 297	if (nsegs < 0)
 298		return nsegs;
 299
 300	map = r_xprt->rx_ia.ri_ops->ro_map;
 
 
 
 
 
 
 
 301	do {
 302		n = map(r_xprt, seg, nsegs, cur_wchunk != NULL);
 303		if (n <= 0)
 304			goto out;
 305		if (cur_rchunk) {	/* read */
 306			cur_rchunk->rc_discrim = xdr_one;
 307			/* all read chunks have the same "position" */
 308			cur_rchunk->rc_position = cpu_to_be32(pos);
 309			cur_rchunk->rc_target.rs_handle =
 310						cpu_to_be32(seg->mr_rkey);
 311			cur_rchunk->rc_target.rs_length =
 312						cpu_to_be32(seg->mr_len);
 313			xdr_encode_hyper(
 314					(__be32 *)&cur_rchunk->rc_target.rs_offset,
 315					seg->mr_base);
 316			dprintk("RPC:       %s: read chunk "
 317				"elem %d@0x%llx:0x%x pos %u (%s)\n", __func__,
 318				seg->mr_len, (unsigned long long)seg->mr_base,
 319				seg->mr_rkey, pos, n < nsegs ? "more" : "last");
 320			cur_rchunk++;
 321			r_xprt->rx_stats.read_chunk_count++;
 322		} else {		/* write/reply */
 323			cur_wchunk->wc_target.rs_handle =
 324						cpu_to_be32(seg->mr_rkey);
 325			cur_wchunk->wc_target.rs_length =
 326						cpu_to_be32(seg->mr_len);
 327			xdr_encode_hyper(
 328					(__be32 *)&cur_wchunk->wc_target.rs_offset,
 329					seg->mr_base);
 330			dprintk("RPC:       %s: %s chunk "
 331				"elem %d@0x%llx:0x%x (%s)\n", __func__,
 332				(type == rpcrdma_replych) ? "reply" : "write",
 333				seg->mr_len, (unsigned long long)seg->mr_base,
 334				seg->mr_rkey, n < nsegs ? "more" : "last");
 335			cur_wchunk++;
 336			if (type == rpcrdma_replych)
 337				r_xprt->rx_stats.reply_chunk_count++;
 338			else
 339				r_xprt->rx_stats.write_chunk_count++;
 340			r_xprt->rx_stats.total_rdma_request += seg->mr_len;
 341		}
 342		nchunks++;
 343		seg   += n;
 344		nsegs -= n;
 345	} while (nsegs);
 346
 347	/* success. all failures return above */
 348	req->rl_nchunks = nchunks;
 
 
 349
 350	/*
 351	 * finish off header. If write, marshal discrim and nchunks.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 352	 */
 353	if (cur_rchunk) {
 354		iptr = (__be32 *) cur_rchunk;
 355		*iptr++ = xdr_zero;	/* finish the read chunk list */
 356		*iptr++ = xdr_zero;	/* encode a NULL write chunk list */
 357		*iptr++ = xdr_zero;	/* encode a NULL reply chunk */
 358	} else {
 359		warray->wc_discrim = xdr_one;
 360		warray->wc_nchunks = cpu_to_be32(nchunks);
 361		iptr = (__be32 *) cur_wchunk;
 362		if (type == rpcrdma_writech) {
 363			*iptr++ = xdr_zero; /* finish the write chunk list */
 364			*iptr++ = xdr_zero; /* encode a NULL reply chunk */
 
 
 
 
 
 
 
 
 
 
 365		}
 366	}
 367
 368	/*
 369	 * Return header size.
 
 
 370	 */
 371	return (unsigned char *)iptr - (unsigned char *)headerp;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 372
 373out:
 374	for (pos = 0; nchunks--;)
 375		pos += r_xprt->rx_ia.ri_ops->ro_unmap(r_xprt,
 376						      &req->rl_segments[pos]);
 377	return n;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 378}
 379
 380/*
 381 * Copy write data inline.
 382 * This function is used for "small" requests. Data which is passed
 383 * to RPC via iovecs (or page list) is copied directly into the
 384 * pre-registered memory buffer for this request. For small amounts
 385 * of data, this is efficient. The cutoff value is tunable.
 
 
 
 386 */
 387static void rpcrdma_inline_pullup(struct rpc_rqst *rqst)
 
 
 
 388{
 389	int i, npages, curlen;
 390	int copy_len;
 391	unsigned char *srcp, *destp;
 392	struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(rqst->rq_xprt);
 393	int page_base;
 394	struct page **ppages;
 
 
 
 
 
 
 
 
 395
 396	destp = rqst->rq_svec[0].iov_base;
 397	curlen = rqst->rq_svec[0].iov_len;
 398	destp += curlen;
 399
 400	dprintk("RPC:       %s: destp 0x%p len %d hdrlen %d\n",
 401		__func__, destp, rqst->rq_slen, curlen);
 402
 403	copy_len = rqst->rq_snd_buf.page_len;
 404
 405	if (rqst->rq_snd_buf.tail[0].iov_len) {
 406		curlen = rqst->rq_snd_buf.tail[0].iov_len;
 407		if (destp + copy_len != rqst->rq_snd_buf.tail[0].iov_base) {
 408			memmove(destp + copy_len,
 409				rqst->rq_snd_buf.tail[0].iov_base, curlen);
 410			r_xprt->rx_stats.pullup_copy_count += curlen;
 411		}
 412		dprintk("RPC:       %s: tail destp 0x%p len %d\n",
 413			__func__, destp + copy_len, curlen);
 414		rqst->rq_svec[0].iov_len += curlen;
 415	}
 416	r_xprt->rx_stats.pullup_copy_count += copy_len;
 417
 418	page_base = rqst->rq_snd_buf.page_base;
 419	ppages = rqst->rq_snd_buf.pages + (page_base >> PAGE_SHIFT);
 420	page_base &= ~PAGE_MASK;
 421	npages = PAGE_ALIGN(page_base+copy_len) >> PAGE_SHIFT;
 422	for (i = 0; copy_len && i < npages; i++) {
 423		curlen = PAGE_SIZE - page_base;
 424		if (curlen > copy_len)
 425			curlen = copy_len;
 426		dprintk("RPC:       %s: page %d destp 0x%p len %d curlen %d\n",
 427			__func__, i, destp, copy_len, curlen);
 428		srcp = kmap_atomic(ppages[i]);
 429		memcpy(destp, srcp+page_base, curlen);
 430		kunmap_atomic(srcp);
 431		rqst->rq_svec[0].iov_len += curlen;
 432		destp += curlen;
 433		copy_len -= curlen;
 434		page_base = 0;
 435	}
 436	/* header now contains entire send message */
 437}
 438
 439/*
 440 * Marshal a request: the primary job of this routine is to choose
 441 * the transfer modes. See comments below.
 
 442 *
 443 * Uses multiple RDMA IOVs for a request:
 444 *  [0] -- RPC RDMA header, which uses memory from the *start* of the
 445 *         preregistered buffer that already holds the RPC data in
 446 *         its middle.
 447 *  [1] -- the RPC header/data, marshaled by RPC and the NFS protocol.
 448 *  [2] -- optional padding.
 449 *  [3] -- if padded, header only in [1] and data here.
 450 *
 451 * Returns zero on success, otherwise a negative errno.
 
 
 
 
 
 
 452 */
 453
 454int
 455rpcrdma_marshal_req(struct rpc_rqst *rqst)
 456{
 457	struct rpc_xprt *xprt = rqst->rq_xprt;
 458	struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
 459	struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
 460	char *base;
 461	size_t rpclen;
 462	ssize_t hdrlen;
 463	enum rpcrdma_chunktype rtype, wtype;
 464	struct rpcrdma_msg *headerp;
 465
 466#if defined(CONFIG_SUNRPC_BACKCHANNEL)
 467	if (test_bit(RPC_BC_PA_IN_USE, &rqst->rq_bc_pa_state))
 468		return rpcrdma_bc_marshal_reply(rqst);
 469#endif
 470
 471	/*
 472	 * rpclen gets amount of data in first buffer, which is the
 473	 * pre-registered buffer.
 
 
 
 
 
 
 
 
 
 
 474	 */
 475	base = rqst->rq_svec[0].iov_base;
 476	rpclen = rqst->rq_svec[0].iov_len;
 477
 478	headerp = rdmab_to_msg(req->rl_rdmabuf);
 479	/* don't byte-swap XID, it's already done in request */
 480	headerp->rm_xid = rqst->rq_xid;
 481	headerp->rm_vers = rpcrdma_version;
 482	headerp->rm_credit = cpu_to_be32(r_xprt->rx_buf.rb_max_requests);
 483	headerp->rm_type = rdma_msg;
 484
 485	/*
 486	 * Chunks needed for results?
 487	 *
 488	 * o Read ops return data as write chunk(s), header as inline.
 489	 * o If the expected result is under the inline threshold, all ops
 490	 *   return as inline.
 
 
 491	 * o Large non-read ops return as a single reply chunk.
 492	 */
 493	if (rqst->rq_rcv_buf.flags & XDRBUF_READ)
 
 
 494		wtype = rpcrdma_writech;
 495	else if (rpcrdma_results_inline(rqst))
 496		wtype = rpcrdma_noch;
 497	else
 498		wtype = rpcrdma_replych;
 499
 500	/*
 501	 * Chunks needed for arguments?
 502	 *
 503	 * o If the total request is under the inline threshold, all ops
 504	 *   are sent as inline.
 505	 * o Large write ops transmit data as read chunk(s), header as
 506	 *   inline.
 507	 * o Large non-write ops are sent with the entire message as a
 508	 *   single read chunk (protocol 0-position special case).
 509	 *
 510	 * This assumes that the upper layer does not present a request
 511	 * that both has a data payload, and whose non-data arguments
 512	 * by themselves are larger than the inline threshold.
 513	 */
 514	if (rpcrdma_args_inline(rqst)) {
 
 515		rtype = rpcrdma_noch;
 516	} else if (rqst->rq_snd_buf.flags & XDRBUF_WRITE) {
 
 517		rtype = rpcrdma_readch;
 518	} else {
 519		r_xprt->rx_stats.nomsg_call_count++;
 520		headerp->rm_type = htonl(RDMA_NOMSG);
 521		rtype = rpcrdma_areadch;
 522		rpclen = 0;
 523	}
 524
 525	/* The following simplification is not true forever */
 526	if (rtype != rpcrdma_noch && wtype == rpcrdma_replych)
 527		wtype = rpcrdma_noch;
 528	if (rtype != rpcrdma_noch && wtype != rpcrdma_noch) {
 529		dprintk("RPC:       %s: cannot marshal multiple chunk lists\n",
 530			__func__);
 531		return -EIO;
 
 
 532	}
 533
 534	hdrlen = RPCRDMA_HDRLEN_MIN;
 535
 536	/*
 537	 * Pull up any extra send data into the preregistered buffer.
 538	 * When padding is in use and applies to the transfer, insert
 539	 * it and change the message type.
 540	 */
 541	if (rtype == rpcrdma_noch) {
 542
 543		rpcrdma_inline_pullup(rqst);
 544
 545		headerp->rm_body.rm_nochunks.rm_empty[0] = xdr_zero;
 546		headerp->rm_body.rm_nochunks.rm_empty[1] = xdr_zero;
 547		headerp->rm_body.rm_nochunks.rm_empty[2] = xdr_zero;
 548		/* new length after pullup */
 549		rpclen = rqst->rq_svec[0].iov_len;
 550	} else if (rtype == rpcrdma_readch)
 551		rpclen += rpcrdma_tail_pullup(&rqst->rq_snd_buf);
 
 
 
 
 552	if (rtype != rpcrdma_noch) {
 553		hdrlen = rpcrdma_create_chunks(rqst, &rqst->rq_snd_buf,
 554					       headerp, rtype);
 555		wtype = rtype;	/* simplify dprintk */
 556
 557	} else if (wtype != rpcrdma_noch) {
 558		hdrlen = rpcrdma_create_chunks(rqst, &rqst->rq_rcv_buf,
 559					       headerp, wtype);
 560	}
 561	if (hdrlen < 0)
 562		return hdrlen;
 563
 564	dprintk("RPC:       %s: %s: hdrlen %zd rpclen %zd"
 565		" headerp 0x%p base 0x%p lkey 0x%x\n",
 566		__func__, transfertypes[wtype], hdrlen, rpclen,
 567		headerp, base, rdmab_lkey(req->rl_rdmabuf));
 
 568
 569	/*
 570	 * initialize send_iov's - normally only two: rdma chunk header and
 571	 * single preregistered RPC header buffer, but if padding is present,
 572	 * then use a preregistered (and zeroed) pad buffer between the RPC
 573	 * header and any write data. In all non-rdma cases, any following
 574	 * data has been copied into the RPC header buffer.
 575	 */
 576	req->rl_send_iov[0].addr = rdmab_addr(req->rl_rdmabuf);
 577	req->rl_send_iov[0].length = hdrlen;
 578	req->rl_send_iov[0].lkey = rdmab_lkey(req->rl_rdmabuf);
 579
 580	req->rl_niovs = 1;
 581	if (rtype == rpcrdma_areadch)
 582		return 0;
 583
 584	req->rl_send_iov[1].addr = rdmab_addr(req->rl_sendbuf);
 585	req->rl_send_iov[1].length = rpclen;
 586	req->rl_send_iov[1].lkey = rdmab_lkey(req->rl_sendbuf);
 587
 588	req->rl_niovs = 2;
 589	return 0;
 590}
 591
 592/*
 593 * Chase down a received write or reply chunklist to get length
 594 * RDMA'd by server. See map at rpcrdma_create_chunks()! :-)
 595 */
 596static int
 597rpcrdma_count_chunks(struct rpcrdma_rep *rep, unsigned int max, int wrchunk, __be32 **iptrp)
 598{
 599	unsigned int i, total_len;
 600	struct rpcrdma_write_chunk *cur_wchunk;
 601	char *base = (char *)rdmab_to_msg(rep->rr_rdmabuf);
 602
 603	i = be32_to_cpu(**iptrp);
 604	if (i > max)
 605		return -1;
 606	cur_wchunk = (struct rpcrdma_write_chunk *) (*iptrp + 1);
 607	total_len = 0;
 608	while (i--) {
 609		struct rpcrdma_segment *seg = &cur_wchunk->wc_target;
 610		ifdebug(FACILITY) {
 611			u64 off;
 612			xdr_decode_hyper((__be32 *)&seg->rs_offset, &off);
 613			dprintk("RPC:       %s: chunk %d@0x%llx:0x%x\n",
 614				__func__,
 615				be32_to_cpu(seg->rs_length),
 616				(unsigned long long)off,
 617				be32_to_cpu(seg->rs_handle));
 618		}
 619		total_len += be32_to_cpu(seg->rs_length);
 620		++cur_wchunk;
 621	}
 622	/* check and adjust for properly terminated write chunk */
 623	if (wrchunk) {
 624		__be32 *w = (__be32 *) cur_wchunk;
 625		if (*w++ != xdr_zero)
 626			return -1;
 627		cur_wchunk = (struct rpcrdma_write_chunk *) w;
 628	}
 629	if ((char *)cur_wchunk > base + rep->rr_len)
 630		return -1;
 631
 632	*iptrp = (__be32 *) cur_wchunk;
 633	return total_len;
 
 634}
 635
 636/*
 637 * Scatter inline received data back into provided iov's.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 638 */
 639static void
 640rpcrdma_inline_fixup(struct rpc_rqst *rqst, char *srcp, int copy_len, int pad)
 641{
 642	int i, npages, curlen, olen;
 
 643	char *destp;
 644	struct page **ppages;
 645	int page_base;
 646
 
 
 
 
 
 
 
 
 
 647	curlen = rqst->rq_rcv_buf.head[0].iov_len;
 648	if (curlen > copy_len) {	/* write chunk header fixup */
 649		curlen = copy_len;
 650		rqst->rq_rcv_buf.head[0].iov_len = curlen;
 651	}
 652
 653	dprintk("RPC:       %s: srcp 0x%p len %d hdrlen %d\n",
 654		__func__, srcp, copy_len, curlen);
 655
 656	/* Shift pointer for first receive segment only */
 657	rqst->rq_rcv_buf.head[0].iov_base = srcp;
 658	srcp += curlen;
 659	copy_len -= curlen;
 660
 661	olen = copy_len;
 662	i = 0;
 663	rpcx_to_rdmax(rqst->rq_xprt)->rx_stats.fixup_copy_count += olen;
 664	page_base = rqst->rq_rcv_buf.page_base;
 665	ppages = rqst->rq_rcv_buf.pages + (page_base >> PAGE_SHIFT);
 666	page_base &= ~PAGE_MASK;
 667
 668	if (copy_len && rqst->rq_rcv_buf.page_len) {
 669		npages = PAGE_ALIGN(page_base +
 670			rqst->rq_rcv_buf.page_len) >> PAGE_SHIFT;
 671		for (; i < npages; i++) {
 
 672			curlen = PAGE_SIZE - page_base;
 673			if (curlen > copy_len)
 674				curlen = copy_len;
 675			dprintk("RPC:       %s: page %d"
 676				" srcp 0x%p len %d curlen %d\n",
 677				__func__, i, srcp, copy_len, curlen);
 678			destp = kmap_atomic(ppages[i]);
 679			memcpy(destp + page_base, srcp, curlen);
 680			flush_dcache_page(ppages[i]);
 681			kunmap_atomic(destp);
 682			srcp += curlen;
 683			copy_len -= curlen;
 684			if (copy_len == 0)
 
 
 685				break;
 686			page_base = 0;
 687		}
 
 
 
 
 
 
 
 
 
 688	}
 689
 690	if (copy_len && rqst->rq_rcv_buf.tail[0].iov_len) {
 691		curlen = copy_len;
 692		if (curlen > rqst->rq_rcv_buf.tail[0].iov_len)
 693			curlen = rqst->rq_rcv_buf.tail[0].iov_len;
 694		if (rqst->rq_rcv_buf.tail[0].iov_base != srcp)
 695			memmove(rqst->rq_rcv_buf.tail[0].iov_base, srcp, curlen);
 696		dprintk("RPC:       %s: tail srcp 0x%p len %d curlen %d\n",
 697			__func__, srcp, copy_len, curlen);
 698		rqst->rq_rcv_buf.tail[0].iov_len = curlen;
 699		copy_len -= curlen; ++i;
 700	} else
 701		rqst->rq_rcv_buf.tail[0].iov_len = 0;
 702
 703	if (pad) {
 704		/* implicit padding on terminal chunk */
 705		unsigned char *p = rqst->rq_rcv_buf.tail[0].iov_base;
 706		while (pad--)
 707			p[rqst->rq_rcv_buf.tail[0].iov_len++] = 0;
 708	}
 709
 710	if (copy_len)
 711		dprintk("RPC:       %s: %d bytes in"
 712			" %d extra segments (%d lost)\n",
 713			__func__, olen, i, copy_len);
 714
 715	/* TBD avoid a warning from call_decode() */
 716	rqst->rq_private_buf = rqst->rq_rcv_buf;
 717}
 718
 719void
 720rpcrdma_connect_worker(struct work_struct *work)
 721{
 722	struct rpcrdma_ep *ep =
 723		container_of(work, struct rpcrdma_ep, rep_connect_worker.work);
 724	struct rpcrdma_xprt *r_xprt =
 725		container_of(ep, struct rpcrdma_xprt, rx_ep);
 726	struct rpc_xprt *xprt = &r_xprt->rx_xprt;
 727
 728	spin_lock_bh(&xprt->transport_lock);
 729	if (++xprt->connect_cookie == 0)	/* maintain a reserved value */
 730		++xprt->connect_cookie;
 731	if (ep->rep_connected > 0) {
 732		if (!xprt_test_and_set_connected(xprt))
 733			xprt_wake_pending_tasks(xprt, 0);
 734	} else {
 735		if (xprt_test_and_clear_connected(xprt))
 736			xprt_wake_pending_tasks(xprt, -ENOTCONN);
 737	}
 738	spin_unlock_bh(&xprt->transport_lock);
 739}
 740
 741#if defined(CONFIG_SUNRPC_BACKCHANNEL)
 742/* By convention, backchannel calls arrive via rdma_msg type
 743 * messages, and never populate the chunk lists. This makes
 744 * the RPC/RDMA header small and fixed in size, so it is
 745 * straightforward to check the RPC header's direction field.
 746 */
 747static bool
 748rpcrdma_is_bcall(struct rpcrdma_msg *headerp)
 
 749{
 750	__be32 *p = (__be32 *)headerp;
 
 751
 752	if (headerp->rm_type != rdma_msg)
 753		return false;
 754	if (headerp->rm_body.rm_chunks[0] != xdr_zero)
 
 
 
 
 
 755		return false;
 756	if (headerp->rm_body.rm_chunks[1] != xdr_zero)
 757		return false;
 758	if (headerp->rm_body.rm_chunks[2] != xdr_zero)
 759		return false;
 760
 761	/* sanity */
 762	if (p[7] != headerp->rm_xid)
 763		return false;
 764	/* call direction */
 765	if (p[8] != cpu_to_be32(RPC_CALL))
 766		return false;
 767
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 768	return true;
 769}
 
 
 
 
 770#endif	/* CONFIG_SUNRPC_BACKCHANNEL */
 771
 772/*
 773 * This function is called when an async event is posted to
 774 * the connection which changes the connection state. All it
 775 * does at this point is mark the connection up/down, the rpc
 776 * timers do the rest.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 777 */
 778void
 779rpcrdma_conn_func(struct rpcrdma_ep *ep)
 780{
 781	schedule_delayed_work(&ep->rep_connect_worker, 0);
 
 
 
 
 
 
 
 782}
 783
 784/* Process received RPC/RDMA messages.
 785 *
 786 * Errors must result in the RPC task either being awakened, or
 787 * allowed to timeout, to discover the errors at that time.
 788 */
 789void
 790rpcrdma_reply_handler(struct rpcrdma_rep *rep)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 791{
 792	struct rpcrdma_msg *headerp;
 793	struct rpcrdma_req *req;
 794	struct rpc_rqst *rqst;
 795	struct rpcrdma_xprt *r_xprt = rep->rr_rxprt;
 796	struct rpc_xprt *xprt = &r_xprt->rx_xprt;
 797	__be32 *iptr;
 798	int rdmalen, status, rmerr;
 799	unsigned long cwnd;
 
 
 
 
 
 
 
 
 800
 801	dprintk("RPC:       %s: incoming rep %p\n", __func__, rep);
 
 
 
 802
 803	if (rep->rr_len == RPCRDMA_BAD_LEN)
 804		goto out_badstatus;
 805	if (rep->rr_len < RPCRDMA_HDRLEN_ERR)
 806		goto out_shortreply;
 
 
 807
 808	headerp = rdmab_to_msg(rep->rr_rdmabuf);
 809#if defined(CONFIG_SUNRPC_BACKCHANNEL)
 810	if (rpcrdma_is_bcall(headerp))
 811		goto out_bcall;
 812#endif
 813
 814	/* Match incoming rpcrdma_rep to an rpcrdma_req to
 815	 * get context for handling any incoming chunks.
 816	 */
 817	spin_lock_bh(&xprt->transport_lock);
 818	rqst = xprt_lookup_rqst(xprt, headerp->rm_xid);
 819	if (!rqst)
 820		goto out_nomatch;
 
 
 
 
 
 
 
 
 
 
 821
 822	req = rpcr_to_rdmar(rqst);
 823	if (req->rl_reply)
 824		goto out_duplicate;
 825
 826	/* Sanity checking has passed. We are now committed
 827	 * to complete this transaction.
 828	 */
 829	list_del_init(&rqst->rq_list);
 830	spin_unlock_bh(&xprt->transport_lock);
 831	dprintk("RPC:       %s: reply %p completes request %p (xid 0x%08x)\n",
 832		__func__, rep, req, be32_to_cpu(headerp->rm_xid));
 
 
 
 
 833
 834	/* from here on, the reply is no longer an orphan */
 835	req->rl_reply = rep;
 836	xprt->reestablish_timeout = 0;
 837
 838	if (headerp->rm_vers != rpcrdma_version)
 839		goto out_badversion;
 840
 841	/* check for expected message types */
 842	/* The order of some of these tests is important. */
 843	switch (headerp->rm_type) {
 844	case rdma_msg:
 845		/* never expect read chunks */
 846		/* never expect reply chunks (two ways to check) */
 847		/* never expect write chunks without having offered RDMA */
 848		if (headerp->rm_body.rm_chunks[0] != xdr_zero ||
 849		    (headerp->rm_body.rm_chunks[1] == xdr_zero &&
 850		     headerp->rm_body.rm_chunks[2] != xdr_zero) ||
 851		    (headerp->rm_body.rm_chunks[1] != xdr_zero &&
 852		     req->rl_nchunks == 0))
 853			goto badheader;
 854		if (headerp->rm_body.rm_chunks[1] != xdr_zero) {
 855			/* count any expected write chunks in read reply */
 856			/* start at write chunk array count */
 857			iptr = &headerp->rm_body.rm_chunks[2];
 858			rdmalen = rpcrdma_count_chunks(rep,
 859						req->rl_nchunks, 1, &iptr);
 860			/* check for validity, and no reply chunk after */
 861			if (rdmalen < 0 || *iptr++ != xdr_zero)
 862				goto badheader;
 863			rep->rr_len -=
 864			    ((unsigned char *)iptr - (unsigned char *)headerp);
 865			status = rep->rr_len + rdmalen;
 866			r_xprt->rx_stats.total_rdma_reply += rdmalen;
 867			/* special case - last chunk may omit padding */
 868			if (rdmalen &= 3) {
 869				rdmalen = 4 - rdmalen;
 870				status += rdmalen;
 871			}
 872		} else {
 873			/* else ordinary inline */
 874			rdmalen = 0;
 875			iptr = (__be32 *)((unsigned char *)headerp +
 876							RPCRDMA_HDRLEN_MIN);
 877			rep->rr_len -= RPCRDMA_HDRLEN_MIN;
 878			status = rep->rr_len;
 879		}
 880		/* Fix up the rpc results for upper layer */
 881		rpcrdma_inline_fixup(rqst, (char *)iptr, rep->rr_len, rdmalen);
 882		break;
 883
 884	case rdma_nomsg:
 885		/* never expect read or write chunks, always reply chunks */
 886		if (headerp->rm_body.rm_chunks[0] != xdr_zero ||
 887		    headerp->rm_body.rm_chunks[1] != xdr_zero ||
 888		    headerp->rm_body.rm_chunks[2] != xdr_one ||
 889		    req->rl_nchunks == 0)
 890			goto badheader;
 891		iptr = (__be32 *)((unsigned char *)headerp +
 892							RPCRDMA_HDRLEN_MIN);
 893		rdmalen = rpcrdma_count_chunks(rep, req->rl_nchunks, 0, &iptr);
 894		if (rdmalen < 0)
 895			goto badheader;
 896		r_xprt->rx_stats.total_rdma_reply += rdmalen;
 897		/* Reply chunk buffer already is the reply vector - no fixup. */
 898		status = rdmalen;
 899		break;
 900
 901	case rdma_error:
 902		goto out_rdmaerr;
 903
 904badheader:
 905	default:
 906		dprintk("%s: invalid rpcrdma reply header (type %d):"
 907				" chunks[012] == %d %d %d"
 908				" expected chunks <= %d\n",
 909				__func__, be32_to_cpu(headerp->rm_type),
 910				headerp->rm_body.rm_chunks[0],
 911				headerp->rm_body.rm_chunks[1],
 912				headerp->rm_body.rm_chunks[2],
 913				req->rl_nchunks);
 914		status = -EIO;
 915		r_xprt->rx_stats.bad_reply_count++;
 916		break;
 917	}
 
 
 918
 919out:
 920	/* Invalidate and flush the data payloads before waking the
 921	 * waiting application. This guarantees the memory region is
 922	 * properly fenced from the server before the application
 923	 * accesses the data. It also ensures proper send flow
 924	 * control: waking the next RPC waits until this RPC has
 925	 * relinquished all its Send Queue entries.
 926	 */
 927	if (req->rl_nchunks)
 928		r_xprt->rx_ia.ri_ops->ro_unmap_sync(r_xprt, req);
 929
 930	spin_lock_bh(&xprt->transport_lock);
 931	cwnd = xprt->cwnd;
 932	xprt->cwnd = atomic_read(&r_xprt->rx_buf.rb_credits) << RPC_CWNDSHIFT;
 933	if (xprt->cwnd > cwnd)
 934		xprt_release_rqst_cong(rqst->rq_task);
 935
 936	xprt_complete_rqst(rqst->rq_task, status);
 937	spin_unlock_bh(&xprt->transport_lock);
 938	dprintk("RPC:       %s: xprt_complete_rqst(0x%p, 0x%p, %d)\n",
 939			__func__, xprt, rqst, status);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 940	return;
 941
 942out_badstatus:
 943	rpcrdma_recv_buffer_put(rep);
 944	if (r_xprt->rx_ep.rep_connected == 1) {
 945		r_xprt->rx_ep.rep_connected = -EIO;
 946		rpcrdma_conn_func(&r_xprt->rx_ep);
 947	}
 948	return;
 949
 950#if defined(CONFIG_SUNRPC_BACKCHANNEL)
 951out_bcall:
 952	rpcrdma_bc_receive_call(r_xprt, rep);
 953	return;
 954#endif
 955
 956/* If the incoming reply terminated a pending RPC, the next
 957 * RPC call will post a replacement receive buffer as it is
 958 * being marshaled.
 959 */
 960out_badversion:
 961	dprintk("RPC:       %s: invalid version %d\n",
 962		__func__, be32_to_cpu(headerp->rm_vers));
 963	status = -EIO;
 964	r_xprt->rx_stats.bad_reply_count++;
 965	goto out;
 966
 967out_rdmaerr:
 968	rmerr = be32_to_cpu(headerp->rm_body.rm_error.rm_err);
 969	switch (rmerr) {
 970	case ERR_VERS:
 971		pr_err("%s: server reports header version error (%u-%u)\n",
 972		       __func__,
 973		       be32_to_cpu(headerp->rm_body.rm_error.rm_vers_low),
 974		       be32_to_cpu(headerp->rm_body.rm_error.rm_vers_high));
 975		break;
 976	case ERR_CHUNK:
 977		pr_err("%s: server reports header decoding error\n",
 978		       __func__);
 979		break;
 980	default:
 981		pr_err("%s: server reports unknown error %d\n",
 982		       __func__, rmerr);
 983	}
 984	status = -EREMOTEIO;
 985	r_xprt->rx_stats.bad_reply_count++;
 986	goto out;
 987
 988/* If no pending RPC transaction was matched, post a replacement
 989 * receive buffer before returning.
 990 */
 991out_shortreply:
 992	dprintk("RPC:       %s: short/invalid reply\n", __func__);
 993	goto repost;
 994
 995out_nomatch:
 996	spin_unlock_bh(&xprt->transport_lock);
 997	dprintk("RPC:       %s: no match for incoming xid 0x%08x len %d\n",
 998		__func__, be32_to_cpu(headerp->rm_xid),
 999		rep->rr_len);
1000	goto repost;
1001
1002out_duplicate:
1003	spin_unlock_bh(&xprt->transport_lock);
1004	dprintk("RPC:       %s: "
1005		"duplicate reply %p to RPC request %p: xid 0x%08x\n",
1006		__func__, rep, req, be32_to_cpu(headerp->rm_xid));
1007
1008repost:
1009	r_xprt->rx_stats.bad_reply_count++;
1010	if (rpcrdma_ep_post_recv(&r_xprt->rx_ia, &r_xprt->rx_ep, rep))
1011		rpcrdma_recv_buffer_put(rep);
1012}