Linux Audio

Check our new training course

Loading...
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
   2/*
   3 * Copyright (c) 2014-2020, Oracle and/or its affiliates.
   4 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
   5 *
   6 * This software is available to you under a choice of one of two
   7 * licenses.  You may choose to be licensed under the terms of the GNU
   8 * General Public License (GPL) Version 2, available from the file
   9 * COPYING in the main directory of this source tree, or the BSD-type
  10 * license below:
  11 *
  12 * Redistribution and use in source and binary forms, with or without
  13 * modification, are permitted provided that the following conditions
  14 * are met:
  15 *
  16 *      Redistributions of source code must retain the above copyright
  17 *      notice, this list of conditions and the following disclaimer.
  18 *
  19 *      Redistributions in binary form must reproduce the above
  20 *      copyright notice, this list of conditions and the following
  21 *      disclaimer in the documentation and/or other materials provided
  22 *      with the distribution.
  23 *
  24 *      Neither the name of the Network Appliance, Inc. nor the names of
  25 *      its contributors may be used to endorse or promote products
  26 *      derived from this software without specific prior written
  27 *      permission.
  28 *
  29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  40 */
  41
  42/*
  43 * rpc_rdma.c
  44 *
  45 * This file contains the guts of the RPC RDMA protocol, and
  46 * does marshaling/unmarshaling, etc. It is also where interfacing
  47 * to the Linux RPC framework lives.
  48 */
  49
  50#include <linux/highmem.h>
  51
  52#include <linux/sunrpc/svc_rdma.h>
  53
  54#include "xprt_rdma.h"
  55#include <trace/events/rpcrdma.h>
  56
  57#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
 
 
  58# define RPCDBG_FACILITY	RPCDBG_TRANS
  59#endif
  60
  61/* Returns size of largest RPC-over-RDMA header in a Call message
  62 *
  63 * The largest Call header contains a full-size Read list and a
  64 * minimal Reply chunk.
  65 */
  66static unsigned int rpcrdma_max_call_header_size(unsigned int maxsegs)
  67{
  68	unsigned int size;
  69
  70	/* Fixed header fields and list discriminators */
  71	size = RPCRDMA_HDRLEN_MIN;
  72
  73	/* Maximum Read list size */
  74	size += maxsegs * rpcrdma_readchunk_maxsz * sizeof(__be32);
  75
  76	/* Minimal Read chunk size */
  77	size += sizeof(__be32);	/* segment count */
  78	size += rpcrdma_segment_maxsz * sizeof(__be32);
  79	size += sizeof(__be32);	/* list discriminator */
  80
  81	return size;
  82}
  83
  84/* Returns size of largest RPC-over-RDMA header in a Reply message
  85 *
  86 * There is only one Write list or one Reply chunk per Reply
  87 * message.  The larger list is the Write list.
  88 */
  89static unsigned int rpcrdma_max_reply_header_size(unsigned int maxsegs)
  90{
  91	unsigned int size;
  92
  93	/* Fixed header fields and list discriminators */
  94	size = RPCRDMA_HDRLEN_MIN;
  95
  96	/* Maximum Write list size */
  97	size += sizeof(__be32);		/* segment count */
  98	size += maxsegs * rpcrdma_segment_maxsz * sizeof(__be32);
  99	size += sizeof(__be32);	/* list discriminator */
 100
 101	return size;
 102}
 103
 104/**
 105 * rpcrdma_set_max_header_sizes - Initialize inline payload sizes
 106 * @ep: endpoint to initialize
 107 *
 108 * The max_inline fields contain the maximum size of an RPC message
 109 * so the marshaling code doesn't have to repeat this calculation
 110 * for every RPC.
 111 */
 112void rpcrdma_set_max_header_sizes(struct rpcrdma_ep *ep)
 113{
 114	unsigned int maxsegs = ep->re_max_rdma_segs;
 115
 116	ep->re_max_inline_send =
 117		ep->re_inline_send - rpcrdma_max_call_header_size(maxsegs);
 118	ep->re_max_inline_recv =
 119		ep->re_inline_recv - rpcrdma_max_reply_header_size(maxsegs);
 120}
 121
 122/* The client can send a request inline as long as the RPCRDMA header
 123 * plus the RPC call fit under the transport's inline limit. If the
 124 * combined call message size exceeds that limit, the client must use
 125 * a Read chunk for this operation.
 126 *
 127 * A Read chunk is also required if sending the RPC call inline would
 128 * exceed this device's max_sge limit.
 129 */
 130static bool rpcrdma_args_inline(struct rpcrdma_xprt *r_xprt,
 131				struct rpc_rqst *rqst)
 132{
 133	struct xdr_buf *xdr = &rqst->rq_snd_buf;
 134	struct rpcrdma_ep *ep = r_xprt->rx_ep;
 135	unsigned int count, remaining, offset;
 136
 137	if (xdr->len > ep->re_max_inline_send)
 138		return false;
 139
 140	if (xdr->page_len) {
 141		remaining = xdr->page_len;
 142		offset = offset_in_page(xdr->page_base);
 143		count = RPCRDMA_MIN_SEND_SGES;
 144		while (remaining) {
 145			remaining -= min_t(unsigned int,
 146					   PAGE_SIZE - offset, remaining);
 147			offset = 0;
 148			if (++count > ep->re_attr.cap.max_send_sge)
 149				return false;
 150		}
 151	}
 152
 153	return true;
 154}
 155
 156/* The client can't know how large the actual reply will be. Thus it
 157 * plans for the largest possible reply for that particular ULP
 158 * operation. If the maximum combined reply message size exceeds that
 159 * limit, the client must provide a write list or a reply chunk for
 160 * this request.
 161 */
 162static bool rpcrdma_results_inline(struct rpcrdma_xprt *r_xprt,
 163				   struct rpc_rqst *rqst)
 164{
 165	return rqst->rq_rcv_buf.buflen <= r_xprt->rx_ep->re_max_inline_recv;
 166}
 167
 168/* The client is required to provide a Reply chunk if the maximum
 169 * size of the non-payload part of the RPC Reply is larger than
 170 * the inline threshold.
 171 */
 172static bool
 173rpcrdma_nonpayload_inline(const struct rpcrdma_xprt *r_xprt,
 174			  const struct rpc_rqst *rqst)
 175{
 176	const struct xdr_buf *buf = &rqst->rq_rcv_buf;
 177
 178	return (buf->head[0].iov_len + buf->tail[0].iov_len) <
 179		r_xprt->rx_ep->re_max_inline_recv;
 180}
 181
 182/* ACL likes to be lazy in allocating pages. For TCP, these
 183 * pages can be allocated during receive processing. Not true
 184 * for RDMA, which must always provision receive buffers
 185 * up front.
 186 */
 187static noinline int
 188rpcrdma_alloc_sparse_pages(struct xdr_buf *buf)
 189{
 190	struct page **ppages;
 191	int len;
 192
 193	len = buf->page_len;
 194	ppages = buf->pages + (buf->page_base >> PAGE_SHIFT);
 195	while (len > 0) {
 196		if (!*ppages)
 197			*ppages = alloc_page(GFP_NOWAIT | __GFP_NOWARN);
 198		if (!*ppages)
 199			return -ENOBUFS;
 200		ppages++;
 201		len -= PAGE_SIZE;
 202	}
 203
 204	return 0;
 205}
 206
 207/* Convert @vec to a single SGL element.
 
 208 *
 209 * Returns pointer to next available SGE, and bumps the total number
 210 * of SGEs consumed.
 211 */
 212static struct rpcrdma_mr_seg *
 213rpcrdma_convert_kvec(struct kvec *vec, struct rpcrdma_mr_seg *seg,
 214		     unsigned int *n)
 215{
 216	seg->mr_page = virt_to_page(vec->iov_base);
 217	seg->mr_offset = offset_in_page(vec->iov_base);
 218	seg->mr_len = vec->iov_len;
 219	++seg;
 220	++(*n);
 221	return seg;
 222}
 223
 224/* Convert @xdrbuf into SGEs no larger than a page each. As they
 225 * are registered, these SGEs are then coalesced into RDMA segments
 226 * when the selected memreg mode supports it.
 227 *
 228 * Returns positive number of SGEs consumed, or a negative errno.
 
 229 */
 230
 231static int
 232rpcrdma_convert_iovs(struct rpcrdma_xprt *r_xprt, struct xdr_buf *xdrbuf,
 233		     unsigned int pos, enum rpcrdma_chunktype type,
 234		     struct rpcrdma_mr_seg *seg)
 235{
 236	unsigned long page_base;
 237	unsigned int len, n;
 238	struct page **ppages;
 239
 240	n = 0;
 241	if (pos == 0)
 242		seg = rpcrdma_convert_kvec(&xdrbuf->head[0], seg, &n);
 
 
 
 243
 244	len = xdrbuf->page_len;
 245	ppages = xdrbuf->pages + (xdrbuf->page_base >> PAGE_SHIFT);
 246	page_base = offset_in_page(xdrbuf->page_base);
 247	while (len) {
 248		seg->mr_page = *ppages;
 249		seg->mr_offset = page_base;
 250		seg->mr_len = min_t(u32, PAGE_SIZE - page_base, len);
 251		len -= seg->mr_len;
 252		++ppages;
 253		++seg;
 254		++n;
 255		page_base = 0;
 
 256	}
 257
 258	if (type == rpcrdma_readch)
 259		goto out;
 260
 261	/* When encoding a Write chunk, some servers need to see an
 262	 * extra segment for non-XDR-aligned Write chunks. The upper
 263	 * layer provides space in the tail iovec that may be used
 264	 * for this purpose.
 265	 */
 266	if (type == rpcrdma_writech && r_xprt->rx_ep->re_implicit_roundup)
 267		goto out;
 268
 269	if (xdrbuf->tail[0].iov_len)
 270		rpcrdma_convert_kvec(&xdrbuf->tail[0], seg, &n);
 
 
 
 
 
 
 
 
 
 
 
 271
 272out:
 273	if (unlikely(n > RPCRDMA_MAX_SEGS))
 274		return -EIO;
 275	return n;
 276}
 277
 278static int
 279encode_rdma_segment(struct xdr_stream *xdr, struct rpcrdma_mr *mr)
 280{
 281	__be32 *p;
 282
 283	p = xdr_reserve_space(xdr, 4 * sizeof(*p));
 284	if (unlikely(!p))
 285		return -EMSGSIZE;
 286
 287	xdr_encode_rdma_segment(p, mr->mr_handle, mr->mr_length, mr->mr_offset);
 288	return 0;
 289}
 290
 291static int
 292encode_read_segment(struct xdr_stream *xdr, struct rpcrdma_mr *mr,
 293		    u32 position)
 294{
 295	__be32 *p;
 296
 297	p = xdr_reserve_space(xdr, 6 * sizeof(*p));
 298	if (unlikely(!p))
 299		return -EMSGSIZE;
 300
 301	*p++ = xdr_one;			/* Item present */
 302	xdr_encode_read_segment(p, position, mr->mr_handle, mr->mr_length,
 303				mr->mr_offset);
 304	return 0;
 305}
 306
 307static struct rpcrdma_mr_seg *rpcrdma_mr_prepare(struct rpcrdma_xprt *r_xprt,
 308						 struct rpcrdma_req *req,
 309						 struct rpcrdma_mr_seg *seg,
 310						 int nsegs, bool writing,
 311						 struct rpcrdma_mr **mr)
 312{
 313	*mr = rpcrdma_mr_pop(&req->rl_free_mrs);
 314	if (!*mr) {
 315		*mr = rpcrdma_mr_get(r_xprt);
 316		if (!*mr)
 317			goto out_getmr_err;
 318		(*mr)->mr_req = req;
 319	}
 320
 321	rpcrdma_mr_push(*mr, &req->rl_registered);
 322	return frwr_map(r_xprt, seg, nsegs, writing, req->rl_slot.rq_xid, *mr);
 323
 324out_getmr_err:
 325	trace_xprtrdma_nomrs_err(r_xprt, req);
 326	xprt_wait_for_buffer_space(&r_xprt->rx_xprt);
 327	rpcrdma_mrs_refresh(r_xprt);
 328	return ERR_PTR(-EAGAIN);
 329}
 330
 331/* Register and XDR encode the Read list. Supports encoding a list of read
 332 * segments that belong to a single read chunk.
 333 *
 334 * Encoding key for single-list chunks (HLOO = Handle32 Length32 Offset64):
 335 *
 336 *  Read chunklist (a linked list):
 337 *   N elements, position P (same P for all chunks of same arg!):
 338 *    1 - PHLOO - 1 - PHLOO - ... - 1 - PHLOO - 0
 339 *
 340 * Returns zero on success, or a negative errno if a failure occurred.
 341 * @xdr is advanced to the next position in the stream.
 342 *
 343 * Only a single @pos value is currently supported.
 344 */
 345static int rpcrdma_encode_read_list(struct rpcrdma_xprt *r_xprt,
 346				    struct rpcrdma_req *req,
 347				    struct rpc_rqst *rqst,
 348				    enum rpcrdma_chunktype rtype)
 349{
 350	struct xdr_stream *xdr = &req->rl_stream;
 351	struct rpcrdma_mr_seg *seg;
 352	struct rpcrdma_mr *mr;
 353	unsigned int pos;
 354	int nsegs;
 355
 356	if (rtype == rpcrdma_noch_pullup || rtype == rpcrdma_noch_mapped)
 357		goto done;
 358
 359	pos = rqst->rq_snd_buf.head[0].iov_len;
 360	if (rtype == rpcrdma_areadch)
 361		pos = 0;
 362	seg = req->rl_segments;
 363	nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_snd_buf, pos,
 364				     rtype, seg);
 365	if (nsegs < 0)
 366		return nsegs;
 367
 368	do {
 369		seg = rpcrdma_mr_prepare(r_xprt, req, seg, nsegs, false, &mr);
 370		if (IS_ERR(seg))
 371			return PTR_ERR(seg);
 372
 373		if (encode_read_segment(xdr, mr, pos) < 0)
 374			return -EMSGSIZE;
 375
 376		trace_xprtrdma_chunk_read(rqst->rq_task, pos, mr, nsegs);
 377		r_xprt->rx_stats.read_chunk_count++;
 378		nsegs -= mr->mr_nents;
 379	} while (nsegs);
 380
 381done:
 382	if (xdr_stream_encode_item_absent(xdr) < 0)
 383		return -EMSGSIZE;
 384	return 0;
 385}
 386
 387/* Register and XDR encode the Write list. Supports encoding a list
 388 * containing one array of plain segments that belong to a single
 389 * write chunk.
 390 *
 391 * Encoding key for single-list chunks (HLOO = Handle32 Length32 Offset64):
 392 *
 393 *  Write chunklist (a list of (one) counted array):
 394 *   N elements:
 395 *    1 - N - HLOO - HLOO - ... - HLOO - 0
 396 *
 397 * Returns zero on success, or a negative errno if a failure occurred.
 398 * @xdr is advanced to the next position in the stream.
 399 *
 400 * Only a single Write chunk is currently supported.
 401 */
 402static int rpcrdma_encode_write_list(struct rpcrdma_xprt *r_xprt,
 403				     struct rpcrdma_req *req,
 404				     struct rpc_rqst *rqst,
 405				     enum rpcrdma_chunktype wtype)
 406{
 407	struct xdr_stream *xdr = &req->rl_stream;
 408	struct rpcrdma_mr_seg *seg;
 409	struct rpcrdma_mr *mr;
 410	int nsegs, nchunks;
 411	__be32 *segcount;
 412
 413	if (wtype != rpcrdma_writech)
 414		goto done;
 415
 416	seg = req->rl_segments;
 417	nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_rcv_buf,
 418				     rqst->rq_rcv_buf.head[0].iov_len,
 419				     wtype, seg);
 420	if (nsegs < 0)
 421		return nsegs;
 422
 423	if (xdr_stream_encode_item_present(xdr) < 0)
 424		return -EMSGSIZE;
 425	segcount = xdr_reserve_space(xdr, sizeof(*segcount));
 426	if (unlikely(!segcount))
 427		return -EMSGSIZE;
 428	/* Actual value encoded below */
 429
 430	nchunks = 0;
 431	do {
 432		seg = rpcrdma_mr_prepare(r_xprt, req, seg, nsegs, true, &mr);
 433		if (IS_ERR(seg))
 434			return PTR_ERR(seg);
 435
 436		if (encode_rdma_segment(xdr, mr) < 0)
 437			return -EMSGSIZE;
 438
 439		trace_xprtrdma_chunk_write(rqst->rq_task, mr, nsegs);
 440		r_xprt->rx_stats.write_chunk_count++;
 441		r_xprt->rx_stats.total_rdma_request += mr->mr_length;
 442		nchunks++;
 443		nsegs -= mr->mr_nents;
 444	} while (nsegs);
 445
 446	/* Update count of segments in this Write chunk */
 447	*segcount = cpu_to_be32(nchunks);
 448
 449done:
 450	if (xdr_stream_encode_item_absent(xdr) < 0)
 451		return -EMSGSIZE;
 452	return 0;
 453}
 454
 455/* Register and XDR encode the Reply chunk. Supports encoding an array
 456 * of plain segments that belong to a single write (reply) chunk.
 457 *
 458 * Encoding key for single-list chunks (HLOO = Handle32 Length32 Offset64):
 459 *
 460 *  Reply chunk (a counted array):
 461 *   N elements:
 462 *    1 - N - HLOO - HLOO - ... - HLOO
 463 *
 464 * Returns zero on success, or a negative errno if a failure occurred.
 465 * @xdr is advanced to the next position in the stream.
 466 */
 467static int rpcrdma_encode_reply_chunk(struct rpcrdma_xprt *r_xprt,
 468				      struct rpcrdma_req *req,
 469				      struct rpc_rqst *rqst,
 470				      enum rpcrdma_chunktype wtype)
 471{
 472	struct xdr_stream *xdr = &req->rl_stream;
 473	struct rpcrdma_mr_seg *seg;
 474	struct rpcrdma_mr *mr;
 475	int nsegs, nchunks;
 476	__be32 *segcount;
 477
 478	if (wtype != rpcrdma_replych) {
 479		if (xdr_stream_encode_item_absent(xdr) < 0)
 480			return -EMSGSIZE;
 481		return 0;
 
 
 
 
 
 
 
 
 
 
 482	}
 483
 484	seg = req->rl_segments;
 485	nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_rcv_buf, 0, wtype, seg);
 486	if (nsegs < 0)
 487		return nsegs;
 488
 489	if (xdr_stream_encode_item_present(xdr) < 0)
 490		return -EMSGSIZE;
 491	segcount = xdr_reserve_space(xdr, sizeof(*segcount));
 492	if (unlikely(!segcount))
 493		return -EMSGSIZE;
 494	/* Actual value encoded below */
 495
 496	nchunks = 0;
 497	do {
 498		seg = rpcrdma_mr_prepare(r_xprt, req, seg, nsegs, true, &mr);
 499		if (IS_ERR(seg))
 500			return PTR_ERR(seg);
 501
 502		if (encode_rdma_segment(xdr, mr) < 0)
 503			return -EMSGSIZE;
 504
 505		trace_xprtrdma_chunk_reply(rqst->rq_task, mr, nsegs);
 506		r_xprt->rx_stats.reply_chunk_count++;
 507		r_xprt->rx_stats.total_rdma_request += mr->mr_length;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 508		nchunks++;
 509		nsegs -= mr->mr_nents;
 
 510	} while (nsegs);
 511
 512	/* Update count of segments in the Reply chunk */
 513	*segcount = cpu_to_be32(nchunks);
 514
 515	return 0;
 516}
 
 517
 518static void rpcrdma_sendctx_done(struct kref *kref)
 519{
 520	struct rpcrdma_req *req =
 521		container_of(kref, struct rpcrdma_req, rl_kref);
 522	struct rpcrdma_rep *rep = req->rl_reply;
 523
 524	rpcrdma_complete_rqst(rep);
 525	rep->rr_rxprt->rx_stats.reply_waits_for_send++;
 526}
 527
 528/**
 529 * rpcrdma_sendctx_unmap - DMA-unmap Send buffer
 530 * @sc: sendctx containing SGEs to unmap
 531 *
 532 */
 533void rpcrdma_sendctx_unmap(struct rpcrdma_sendctx *sc)
 534{
 535	struct rpcrdma_regbuf *rb = sc->sc_req->rl_sendbuf;
 536	struct ib_sge *sge;
 537
 538	if (!sc->sc_unmap_count)
 539		return;
 540
 541	/* The first two SGEs contain the transport header and
 542	 * the inline buffer. These are always left mapped so
 543	 * they can be cheaply re-used.
 544	 */
 545	for (sge = &sc->sc_sges[2]; sc->sc_unmap_count;
 546	     ++sge, --sc->sc_unmap_count)
 547		ib_dma_unmap_page(rdmab_device(rb), sge->addr, sge->length,
 548				  DMA_TO_DEVICE);
 549
 550	kref_put(&sc->sc_req->rl_kref, rpcrdma_sendctx_done);
 551}
 552
 553/* Prepare an SGE for the RPC-over-RDMA transport header.
 554 */
 555static void rpcrdma_prepare_hdr_sge(struct rpcrdma_xprt *r_xprt,
 556				    struct rpcrdma_req *req, u32 len)
 557{
 558	struct rpcrdma_sendctx *sc = req->rl_sendctx;
 559	struct rpcrdma_regbuf *rb = req->rl_rdmabuf;
 560	struct ib_sge *sge = &sc->sc_sges[req->rl_wr.num_sge++];
 561
 562	sge->addr = rdmab_addr(rb);
 563	sge->length = len;
 564	sge->lkey = rdmab_lkey(rb);
 565
 566	ib_dma_sync_single_for_device(rdmab_device(rb), sge->addr, sge->length,
 567				      DMA_TO_DEVICE);
 568}
 569
 570/* The head iovec is straightforward, as it is usually already
 571 * DMA-mapped. Sync the content that has changed.
 572 */
 573static bool rpcrdma_prepare_head_iov(struct rpcrdma_xprt *r_xprt,
 574				     struct rpcrdma_req *req, unsigned int len)
 575{
 576	struct rpcrdma_sendctx *sc = req->rl_sendctx;
 577	struct ib_sge *sge = &sc->sc_sges[req->rl_wr.num_sge++];
 578	struct rpcrdma_regbuf *rb = req->rl_sendbuf;
 579
 580	if (!rpcrdma_regbuf_dma_map(r_xprt, rb))
 581		return false;
 582
 583	sge->addr = rdmab_addr(rb);
 584	sge->length = len;
 585	sge->lkey = rdmab_lkey(rb);
 586
 587	ib_dma_sync_single_for_device(rdmab_device(rb), sge->addr, sge->length,
 588				      DMA_TO_DEVICE);
 589	return true;
 590}
 591
 592/* If there is a page list present, DMA map and prepare an
 593 * SGE for each page to be sent.
 594 */
 595static bool rpcrdma_prepare_pagelist(struct rpcrdma_req *req,
 596				     struct xdr_buf *xdr)
 597{
 598	struct rpcrdma_sendctx *sc = req->rl_sendctx;
 599	struct rpcrdma_regbuf *rb = req->rl_sendbuf;
 600	unsigned int page_base, len, remaining;
 601	struct page **ppages;
 602	struct ib_sge *sge;
 603
 604	ppages = xdr->pages + (xdr->page_base >> PAGE_SHIFT);
 605	page_base = offset_in_page(xdr->page_base);
 606	remaining = xdr->page_len;
 607	while (remaining) {
 608		sge = &sc->sc_sges[req->rl_wr.num_sge++];
 609		len = min_t(unsigned int, PAGE_SIZE - page_base, remaining);
 610		sge->addr = ib_dma_map_page(rdmab_device(rb), *ppages,
 611					    page_base, len, DMA_TO_DEVICE);
 612		if (ib_dma_mapping_error(rdmab_device(rb), sge->addr))
 613			goto out_mapping_err;
 614
 615		sge->length = len;
 616		sge->lkey = rdmab_lkey(rb);
 617
 618		sc->sc_unmap_count++;
 619		ppages++;
 620		remaining -= len;
 621		page_base = 0;
 622	}
 623
 624	return true;
 625
 626out_mapping_err:
 627	trace_xprtrdma_dma_maperr(sge->addr);
 628	return false;
 629}
 630
 631/* The tail iovec may include an XDR pad for the page list,
 632 * as well as additional content, and may not reside in the
 633 * same page as the head iovec.
 634 */
 635static bool rpcrdma_prepare_tail_iov(struct rpcrdma_req *req,
 636				     struct xdr_buf *xdr,
 637				     unsigned int page_base, unsigned int len)
 638{
 639	struct rpcrdma_sendctx *sc = req->rl_sendctx;
 640	struct ib_sge *sge = &sc->sc_sges[req->rl_wr.num_sge++];
 641	struct rpcrdma_regbuf *rb = req->rl_sendbuf;
 642	struct page *page = virt_to_page(xdr->tail[0].iov_base);
 643
 644	sge->addr = ib_dma_map_page(rdmab_device(rb), page, page_base, len,
 645				    DMA_TO_DEVICE);
 646	if (ib_dma_mapping_error(rdmab_device(rb), sge->addr))
 647		goto out_mapping_err;
 648
 649	sge->length = len;
 650	sge->lkey = rdmab_lkey(rb);
 651	++sc->sc_unmap_count;
 652	return true;
 653
 654out_mapping_err:
 655	trace_xprtrdma_dma_maperr(sge->addr);
 656	return false;
 657}
 658
 659/* Copy the tail to the end of the head buffer.
 660 */
 661static void rpcrdma_pullup_tail_iov(struct rpcrdma_xprt *r_xprt,
 662				    struct rpcrdma_req *req,
 663				    struct xdr_buf *xdr)
 664{
 665	unsigned char *dst;
 666
 667	dst = (unsigned char *)xdr->head[0].iov_base;
 668	dst += xdr->head[0].iov_len + xdr->page_len;
 669	memmove(dst, xdr->tail[0].iov_base, xdr->tail[0].iov_len);
 670	r_xprt->rx_stats.pullup_copy_count += xdr->tail[0].iov_len;
 
 671}
 672
 673/* Copy pagelist content into the head buffer.
 
 
 
 
 
 674 */
 675static void rpcrdma_pullup_pagelist(struct rpcrdma_xprt *r_xprt,
 676				    struct rpcrdma_req *req,
 677				    struct xdr_buf *xdr)
 678{
 679	unsigned int len, page_base, remaining;
 
 
 
 
 680	struct page **ppages;
 681	unsigned char *src, *dst;
 682
 683	dst = (unsigned char *)xdr->head[0].iov_base;
 684	dst += xdr->head[0].iov_len;
 685	ppages = xdr->pages + (xdr->page_base >> PAGE_SHIFT);
 686	page_base = offset_in_page(xdr->page_base);
 687	remaining = xdr->page_len;
 688	while (remaining) {
 689		src = page_address(*ppages);
 690		src += page_base;
 691		len = min_t(unsigned int, PAGE_SIZE - page_base, remaining);
 692		memcpy(dst, src, len);
 693		r_xprt->rx_stats.pullup_copy_count += len;
 694
 695		ppages++;
 696		dst += len;
 697		remaining -= len;
 698		page_base = 0;
 699	}
 700}
 701
 702/* Copy the contents of @xdr into @rl_sendbuf and DMA sync it.
 703 * When the head, pagelist, and tail are small, a pull-up copy
 704 * is considerably less costly than DMA mapping the components
 705 * of @xdr.
 706 *
 707 * Assumptions:
 708 *  - the caller has already verified that the total length
 709 *    of the RPC Call body will fit into @rl_sendbuf.
 710 */
 711static bool rpcrdma_prepare_noch_pullup(struct rpcrdma_xprt *r_xprt,
 712					struct rpcrdma_req *req,
 713					struct xdr_buf *xdr)
 714{
 715	if (unlikely(xdr->tail[0].iov_len))
 716		rpcrdma_pullup_tail_iov(r_xprt, req, xdr);
 717
 718	if (unlikely(xdr->page_len))
 719		rpcrdma_pullup_pagelist(r_xprt, req, xdr);
 720
 721	/* The whole RPC message resides in the head iovec now */
 722	return rpcrdma_prepare_head_iov(r_xprt, req, xdr->len);
 723}
 724
 725static bool rpcrdma_prepare_noch_mapped(struct rpcrdma_xprt *r_xprt,
 726					struct rpcrdma_req *req,
 727					struct xdr_buf *xdr)
 728{
 729	struct kvec *tail = &xdr->tail[0];
 730
 731	if (!rpcrdma_prepare_head_iov(r_xprt, req, xdr->head[0].iov_len))
 732		return false;
 733	if (xdr->page_len)
 734		if (!rpcrdma_prepare_pagelist(req, xdr))
 735			return false;
 736	if (tail->iov_len)
 737		if (!rpcrdma_prepare_tail_iov(req, xdr,
 738					      offset_in_page(tail->iov_base),
 739					      tail->iov_len))
 740			return false;
 741
 742	if (req->rl_sendctx->sc_unmap_count)
 743		kref_get(&req->rl_kref);
 744	return true;
 745}
 746
 747static bool rpcrdma_prepare_readch(struct rpcrdma_xprt *r_xprt,
 748				   struct rpcrdma_req *req,
 749				   struct xdr_buf *xdr)
 750{
 751	if (!rpcrdma_prepare_head_iov(r_xprt, req, xdr->head[0].iov_len))
 752		return false;
 753
 754	/* If there is a Read chunk, the page list is being handled
 755	 * via explicit RDMA, and thus is skipped here.
 756	 */
 757
 758	/* Do not include the tail if it is only an XDR pad */
 759	if (xdr->tail[0].iov_len > 3) {
 760		unsigned int page_base, len;
 761
 762		/* If the content in the page list is an odd length,
 763		 * xdr_write_pages() adds a pad at the beginning of
 764		 * the tail iovec. Force the tail's non-pad content to
 765		 * land at the next XDR position in the Send message.
 766		 */
 767		page_base = offset_in_page(xdr->tail[0].iov_base);
 768		len = xdr->tail[0].iov_len;
 769		page_base += len & 3;
 770		len -= len & 3;
 771		if (!rpcrdma_prepare_tail_iov(req, xdr, page_base, len))
 772			return false;
 773		kref_get(&req->rl_kref);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 774	}
 775
 776	return true;
 777}
 778
 779/**
 780 * rpcrdma_prepare_send_sges - Construct SGEs for a Send WR
 781 * @r_xprt: controlling transport
 782 * @req: context of RPC Call being marshalled
 783 * @hdrlen: size of transport header, in bytes
 784 * @xdr: xdr_buf containing RPC Call
 785 * @rtype: chunk type being encoded
 786 *
 787 * Returns 0 on success; otherwise a negative errno is returned.
 
 
 
 
 
 
 788 */
 789inline int rpcrdma_prepare_send_sges(struct rpcrdma_xprt *r_xprt,
 790				     struct rpcrdma_req *req, u32 hdrlen,
 791				     struct xdr_buf *xdr,
 792				     enum rpcrdma_chunktype rtype)
 793{
 794	int ret;
 795
 796	ret = -EAGAIN;
 797	req->rl_sendctx = rpcrdma_sendctx_get_locked(r_xprt);
 798	if (!req->rl_sendctx)
 799		goto out_nosc;
 800	req->rl_sendctx->sc_unmap_count = 0;
 801	req->rl_sendctx->sc_req = req;
 802	kref_init(&req->rl_kref);
 803	req->rl_wr.wr_cqe = &req->rl_sendctx->sc_cqe;
 804	req->rl_wr.sg_list = req->rl_sendctx->sc_sges;
 805	req->rl_wr.num_sge = 0;
 806	req->rl_wr.opcode = IB_WR_SEND;
 807
 808	rpcrdma_prepare_hdr_sge(r_xprt, req, hdrlen);
 809
 810	ret = -EIO;
 811	switch (rtype) {
 812	case rpcrdma_noch_pullup:
 813		if (!rpcrdma_prepare_noch_pullup(r_xprt, req, xdr))
 814			goto out_unmap;
 815		break;
 816	case rpcrdma_noch_mapped:
 817		if (!rpcrdma_prepare_noch_mapped(r_xprt, req, xdr))
 818			goto out_unmap;
 819		break;
 820	case rpcrdma_readch:
 821		if (!rpcrdma_prepare_readch(r_xprt, req, xdr))
 822			goto out_unmap;
 823		break;
 824	case rpcrdma_areadch:
 825		break;
 826	default:
 827		goto out_unmap;
 828	}
 829
 830	return 0;
 831
 832out_unmap:
 833	rpcrdma_sendctx_unmap(req->rl_sendctx);
 834out_nosc:
 835	trace_xprtrdma_prepsend_failed(&req->rl_slot, ret);
 836	return ret;
 837}
 838
 839/**
 840 * rpcrdma_marshal_req - Marshal and send one RPC request
 841 * @r_xprt: controlling transport
 842 * @rqst: RPC request to be marshaled
 843 *
 844 * For the RPC in "rqst", this function:
 845 *  - Chooses the transfer mode (eg., RDMA_MSG or RDMA_NOMSG)
 846 *  - Registers Read, Write, and Reply chunks
 847 *  - Constructs the transport header
 848 *  - Posts a Send WR to send the transport header and request
 849 *
 850 * Returns:
 851 *	%0 if the RPC was sent successfully,
 852 *	%-ENOTCONN if the connection was lost,
 853 *	%-EAGAIN if the caller should call again with the same arguments,
 854 *	%-ENOBUFS if the caller should call again after a delay,
 855 *	%-EMSGSIZE if the transport header is too small,
 856 *	%-EIO if a permanent problem occurred while marshaling.
 857 */
 858int
 859rpcrdma_marshal_req(struct rpcrdma_xprt *r_xprt, struct rpc_rqst *rqst)
 860{
 
 
 861	struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
 862	struct xdr_stream *xdr = &req->rl_stream;
 
 863	enum rpcrdma_chunktype rtype, wtype;
 864	struct xdr_buf *buf = &rqst->rq_snd_buf;
 865	bool ddp_allowed;
 866	__be32 *p;
 867	int ret;
 868
 869	if (unlikely(rqst->rq_rcv_buf.flags & XDRBUF_SPARSE_PAGES)) {
 870		ret = rpcrdma_alloc_sparse_pages(&rqst->rq_rcv_buf);
 871		if (ret)
 872			return ret;
 873	}
 874
 875	rpcrdma_set_xdrlen(&req->rl_hdrbuf, 0);
 876	xdr_init_encode(xdr, &req->rl_hdrbuf, rdmab_data(req->rl_rdmabuf),
 877			rqst);
 878
 879	/* Fixed header fields */
 880	ret = -EMSGSIZE;
 881	p = xdr_reserve_space(xdr, 4 * sizeof(*p));
 882	if (!p)
 883		goto out_err;
 884	*p++ = rqst->rq_xid;
 885	*p++ = rpcrdma_version;
 886	*p++ = r_xprt->rx_buf.rb_max_requests;
 887
 888	/* When the ULP employs a GSS flavor that guarantees integrity
 889	 * or privacy, direct data placement of individual data items
 890	 * is not allowed.
 891	 */
 892	ddp_allowed = !test_bit(RPCAUTH_AUTH_DATATOUCH,
 893				&rqst->rq_cred->cr_auth->au_flags);
 
 
 
 
 
 
 
 
 894
 895	/*
 896	 * Chunks needed for results?
 897	 *
 898	 * o If the expected result is under the inline threshold, all ops
 899	 *   return as inline.
 900	 * o Large read ops return data as write chunk(s), header as
 901	 *   inline.
 902	 * o Large non-read ops return as a single reply chunk.
 
 
 
 
 
 
 
 
 
 
 
 
 903	 */
 904	if (rpcrdma_results_inline(r_xprt, rqst))
 905		wtype = rpcrdma_noch;
 906	else if ((ddp_allowed && rqst->rq_rcv_buf.flags & XDRBUF_READ) &&
 907		 rpcrdma_nonpayload_inline(r_xprt, rqst))
 
 908		wtype = rpcrdma_writech;
 909	else
 910		wtype = rpcrdma_replych;
 911
 912	/*
 913	 * Chunks needed for arguments?
 914	 *
 915	 * o If the total request is under the inline threshold, all ops
 916	 *   are sent as inline.
 917	 * o Large write ops transmit data as read chunk(s), header as
 918	 *   inline.
 919	 * o Large non-write ops are sent with the entire message as a
 920	 *   single read chunk (protocol 0-position special case).
 
 
 921	 *
 922	 * This assumes that the upper layer does not present a request
 923	 * that both has a data payload, and whose non-data arguments
 924	 * by themselves are larger than the inline threshold.
 925	 */
 926	if (rpcrdma_args_inline(r_xprt, rqst)) {
 927		*p++ = rdma_msg;
 928		rtype = buf->len < rdmab_length(req->rl_sendbuf) ?
 929			rpcrdma_noch_pullup : rpcrdma_noch_mapped;
 930	} else if (ddp_allowed && buf->flags & XDRBUF_WRITE) {
 931		*p++ = rdma_msg;
 932		rtype = rpcrdma_readch;
 933	} else {
 934		r_xprt->rx_stats.nomsg_call_count++;
 935		*p++ = rdma_nomsg;
 936		rtype = rpcrdma_areadch;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 937	}
 938
 939	/* This implementation supports the following combinations
 940	 * of chunk lists in one RPC-over-RDMA Call message:
 941	 *
 942	 *   - Read list
 943	 *   - Write list
 944	 *   - Reply chunk
 945	 *   - Read list + Reply chunk
 946	 *
 947	 * It might not yet support the following combinations:
 948	 *
 949	 *   - Read list + Write list
 950	 *
 951	 * It does not support the following combinations:
 952	 *
 953	 *   - Write list + Reply chunk
 954	 *   - Read list + Write list + Reply chunk
 955	 *
 956	 * This implementation supports only a single chunk in each
 957	 * Read or Write list. Thus for example the client cannot
 958	 * send a Call message with a Position Zero Read chunk and a
 959	 * regular Read chunk at the same time.
 960	 */
 961	ret = rpcrdma_encode_read_list(r_xprt, req, rqst, rtype);
 962	if (ret)
 963		goto out_err;
 964	ret = rpcrdma_encode_write_list(r_xprt, req, rqst, wtype);
 965	if (ret)
 966		goto out_err;
 967	ret = rpcrdma_encode_reply_chunk(r_xprt, req, rqst, wtype);
 968	if (ret)
 969		goto out_err;
 970
 971	ret = rpcrdma_prepare_send_sges(r_xprt, req, req->rl_hdrbuf.len,
 972					buf, rtype);
 973	if (ret)
 974		goto out_err;
 975
 976	trace_xprtrdma_marshal(req, rtype, wtype);
 977	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 978
 979out_err:
 980	trace_xprtrdma_marshal_failed(rqst, ret);
 981	r_xprt->rx_stats.failed_marshal_count++;
 982	frwr_reset(req);
 983	return ret;
 984}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 985
 986static void __rpcrdma_update_cwnd_locked(struct rpc_xprt *xprt,
 987					 struct rpcrdma_buffer *buf,
 988					 u32 grant)
 989{
 990	buf->rb_credits = grant;
 991	xprt->cwnd = grant << RPC_CWNDSHIFT;
 992}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 993
 994static void rpcrdma_update_cwnd(struct rpcrdma_xprt *r_xprt, u32 grant)
 995{
 996	struct rpc_xprt *xprt = &r_xprt->rx_xprt;
 997
 998	spin_lock(&xprt->transport_lock);
 999	__rpcrdma_update_cwnd_locked(xprt, &r_xprt->rx_buf, grant);
1000	spin_unlock(&xprt->transport_lock);
1001}
1002
1003/**
1004 * rpcrdma_reset_cwnd - Reset the xprt's congestion window
1005 * @r_xprt: controlling transport instance
1006 *
1007 * Prepare @r_xprt for the next connection by reinitializing
1008 * its credit grant to one (see RFC 8166, Section 3.3.3).
1009 */
1010void rpcrdma_reset_cwnd(struct rpcrdma_xprt *r_xprt)
 
1011{
1012	struct rpc_xprt *xprt = &r_xprt->rx_xprt;
 
1013
1014	spin_lock(&xprt->transport_lock);
1015	xprt->cong = 0;
1016	__rpcrdma_update_cwnd_locked(xprt, &r_xprt->rx_buf, 1);
1017	spin_unlock(&xprt->transport_lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1018}
1019
1020/**
1021 * rpcrdma_inline_fixup - Scatter inline received data into rqst's iovecs
1022 * @rqst: controlling RPC request
1023 * @srcp: points to RPC message payload in receive buffer
1024 * @copy_len: remaining length of receive buffer content
1025 * @pad: Write chunk pad bytes needed (zero for pure inline)
1026 *
1027 * The upper layer has set the maximum number of bytes it can
1028 * receive in each component of rq_rcv_buf. These values are set in
1029 * the head.iov_len, page_len, tail.iov_len, and buflen fields.
1030 *
1031 * Unlike the TCP equivalent (xdr_partial_copy_from_skb), in
1032 * many cases this function simply updates iov_base pointers in
1033 * rq_rcv_buf to point directly to the received reply data, to
1034 * avoid copying reply data.
1035 *
1036 * Returns the count of bytes which had to be memcopied.
1037 */
1038static unsigned long
1039rpcrdma_inline_fixup(struct rpc_rqst *rqst, char *srcp, int copy_len, int pad)
1040{
1041	unsigned long fixup_copy_count;
1042	int i, npages, curlen;
1043	char *destp;
1044	struct page **ppages;
1045	int page_base;
1046
1047	/* The head iovec is redirected to the RPC reply message
1048	 * in the receive buffer, to avoid a memcopy.
1049	 */
1050	rqst->rq_rcv_buf.head[0].iov_base = srcp;
1051	rqst->rq_private_buf.head[0].iov_base = srcp;
1052
1053	/* The contents of the receive buffer that follow
1054	 * head.iov_len bytes are copied into the page list.
1055	 */
1056	curlen = rqst->rq_rcv_buf.head[0].iov_len;
1057	if (curlen > copy_len)
1058		curlen = copy_len;
 
 
 
 
 
 
 
 
1059	srcp += curlen;
1060	copy_len -= curlen;
1061
1062	ppages = rqst->rq_rcv_buf.pages +
1063		(rqst->rq_rcv_buf.page_base >> PAGE_SHIFT);
1064	page_base = offset_in_page(rqst->rq_rcv_buf.page_base);
1065	fixup_copy_count = 0;
1066	if (copy_len && rqst->rq_rcv_buf.page_len) {
1067		int pagelist_len;
1068
1069		pagelist_len = rqst->rq_rcv_buf.page_len;
1070		if (pagelist_len > copy_len)
1071			pagelist_len = copy_len;
1072		npages = PAGE_ALIGN(page_base + pagelist_len) >> PAGE_SHIFT;
1073		for (i = 0; i < npages; i++) {
1074			curlen = PAGE_SIZE - page_base;
1075			if (curlen > pagelist_len)
1076				curlen = pagelist_len;
1077
1078			destp = kmap_atomic(ppages[i]);
 
 
1079			memcpy(destp + page_base, srcp, curlen);
1080			flush_dcache_page(ppages[i]);
1081			kunmap_atomic(destp);
1082			srcp += curlen;
1083			copy_len -= curlen;
1084			fixup_copy_count += curlen;
1085			pagelist_len -= curlen;
1086			if (!pagelist_len)
1087				break;
1088			page_base = 0;
1089		}
 
 
 
1090
1091		/* Implicit padding for the last segment in a Write
1092		 * chunk is inserted inline at the front of the tail
1093		 * iovec. The upper layer ignores the content of
1094		 * the pad. Simply ensure inline content in the tail
1095		 * that follows the Write chunk is properly aligned.
1096		 */
1097		if (pad)
1098			srcp -= pad;
1099	}
1100
1101	/* The tail iovec is redirected to the remaining data
1102	 * in the receive buffer, to avoid a memcopy.
1103	 */
1104	if (copy_len || pad) {
1105		rqst->rq_rcv_buf.tail[0].iov_base = srcp;
1106		rqst->rq_private_buf.tail[0].iov_base = srcp;
1107	}
1108
1109	if (fixup_copy_count)
1110		trace_xprtrdma_fixup(rqst, fixup_copy_count);
1111	return fixup_copy_count;
1112}
1113
1114/* By convention, backchannel calls arrive via rdma_msg type
1115 * messages, and never populate the chunk lists. This makes
1116 * the RPC/RDMA header small and fixed in size, so it is
1117 * straightforward to check the RPC header's direction field.
1118 */
1119static bool
1120rpcrdma_is_bcall(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep)
1121#if defined(CONFIG_SUNRPC_BACKCHANNEL)
1122{
1123	struct xdr_stream *xdr = &rep->rr_stream;
1124	__be32 *p;
1125
1126	if (rep->rr_proc != rdma_msg)
1127		return false;
1128
1129	/* Peek at stream contents without advancing. */
1130	p = xdr_inline_decode(xdr, 0);
1131
1132	/* Chunk lists */
1133	if (xdr_item_is_present(p++))
1134		return false;
1135	if (xdr_item_is_present(p++))
1136		return false;
1137	if (xdr_item_is_present(p++))
1138		return false;
1139
1140	/* RPC header */
1141	if (*p++ != rep->rr_xid)
1142		return false;
1143	if (*p != cpu_to_be32(RPC_CALL))
1144		return false;
1145
1146	/* Now that we are sure this is a backchannel call,
1147	 * advance to the RPC header.
1148	 */
1149	p = xdr_inline_decode(xdr, 3 * sizeof(*p));
1150	if (unlikely(!p))
1151		return true;
1152
1153	rpcrdma_bc_receive_call(r_xprt, rep);
1154	return true;
1155}
1156#else	/* CONFIG_SUNRPC_BACKCHANNEL */
1157{
1158	return false;
1159}
1160#endif	/* CONFIG_SUNRPC_BACKCHANNEL */
1161
1162static int decode_rdma_segment(struct xdr_stream *xdr, u32 *length)
1163{
1164	u32 handle;
1165	u64 offset;
1166	__be32 *p;
1167
1168	p = xdr_inline_decode(xdr, 4 * sizeof(*p));
1169	if (unlikely(!p))
1170		return -EIO;
1171
1172	xdr_decode_rdma_segment(p, &handle, length, &offset);
1173	trace_xprtrdma_decode_seg(handle, *length, offset);
1174	return 0;
1175}
1176
1177static int decode_write_chunk(struct xdr_stream *xdr, u32 *length)
1178{
1179	u32 segcount, seglength;
1180	__be32 *p;
1181
1182	p = xdr_inline_decode(xdr, sizeof(*p));
1183	if (unlikely(!p))
1184		return -EIO;
1185
1186	*length = 0;
1187	segcount = be32_to_cpup(p);
1188	while (segcount--) {
1189		if (decode_rdma_segment(xdr, &seglength))
1190			return -EIO;
1191		*length += seglength;
 
 
 
 
 
1192	}
1193
1194	return 0;
1195}
1196
1197/* In RPC-over-RDMA Version One replies, a Read list is never
1198 * expected. This decoder is a stub that returns an error if
1199 * a Read list is present.
1200 */
1201static int decode_read_list(struct xdr_stream *xdr)
 
1202{
1203	__be32 *p;
1204
1205	p = xdr_inline_decode(xdr, sizeof(*p));
1206	if (unlikely(!p))
1207		return -EIO;
1208	if (unlikely(xdr_item_is_present(p)))
1209		return -EIO;
1210	return 0;
1211}
1212
1213/* Supports only one Write chunk in the Write list
 
 
 
1214 */
1215static int decode_write_list(struct xdr_stream *xdr, u32 *length)
1216{
1217	u32 chunklen;
1218	bool first;
1219	__be32 *p;
1220
1221	*length = 0;
1222	first = true;
1223	do {
1224		p = xdr_inline_decode(xdr, sizeof(*p));
1225		if (unlikely(!p))
1226			return -EIO;
1227		if (xdr_item_is_absent(p))
1228			break;
1229		if (!first)
1230			return -EIO;
1231
1232		if (decode_write_chunk(xdr, &chunklen))
1233			return -EIO;
1234		*length += chunklen;
1235		first = false;
1236	} while (true);
1237	return 0;
1238}
1239
1240static int decode_reply_chunk(struct xdr_stream *xdr, u32 *length)
1241{
1242	__be32 *p;
1243
1244	p = xdr_inline_decode(xdr, sizeof(*p));
1245	if (unlikely(!p))
1246		return -EIO;
1247
1248	*length = 0;
1249	if (xdr_item_is_present(p))
1250		if (decode_write_chunk(xdr, length))
1251			return -EIO;
1252	return 0;
1253}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1254
1255static int
1256rpcrdma_decode_msg(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep,
1257		   struct rpc_rqst *rqst)
1258{
1259	struct xdr_stream *xdr = &rep->rr_stream;
1260	u32 writelist, replychunk, rpclen;
1261	char *base;
 
 
 
 
 
 
1262
1263	/* Decode the chunk lists */
1264	if (decode_read_list(xdr))
1265		return -EIO;
1266	if (decode_write_list(xdr, &writelist))
1267		return -EIO;
1268	if (decode_reply_chunk(xdr, &replychunk))
1269		return -EIO;
1270
1271	/* RDMA_MSG sanity checks */
1272	if (unlikely(replychunk))
1273		return -EIO;
1274
1275	/* Build the RPC reply's Payload stream in rqst->rq_rcv_buf */
1276	base = (char *)xdr_inline_decode(xdr, 0);
1277	rpclen = xdr_stream_remaining(xdr);
1278	r_xprt->rx_stats.fixup_copy_count +=
1279		rpcrdma_inline_fixup(rqst, base, rpclen, writelist & 3);
1280
1281	r_xprt->rx_stats.total_rdma_reply += writelist;
1282	return rpclen + xdr_align_size(writelist);
1283}
1284
1285static noinline int
1286rpcrdma_decode_nomsg(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep)
1287{
1288	struct xdr_stream *xdr = &rep->rr_stream;
1289	u32 writelist, replychunk;
1290
1291	/* Decode the chunk lists */
1292	if (decode_read_list(xdr))
1293		return -EIO;
1294	if (decode_write_list(xdr, &writelist))
1295		return -EIO;
1296	if (decode_reply_chunk(xdr, &replychunk))
1297		return -EIO;
1298
1299	/* RDMA_NOMSG sanity checks */
1300	if (unlikely(writelist))
1301		return -EIO;
1302	if (unlikely(!replychunk))
1303		return -EIO;
1304
1305	/* Reply chunk buffer already is the reply vector */
1306	r_xprt->rx_stats.total_rdma_reply += replychunk;
1307	return replychunk;
1308}
1309
1310static noinline int
1311rpcrdma_decode_error(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep,
1312		     struct rpc_rqst *rqst)
1313{
1314	struct xdr_stream *xdr = &rep->rr_stream;
1315	__be32 *p;
1316
1317	p = xdr_inline_decode(xdr, sizeof(*p));
1318	if (unlikely(!p))
1319		return -EIO;
1320
1321	switch (*p) {
1322	case err_vers:
1323		p = xdr_inline_decode(xdr, 2 * sizeof(*p));
1324		if (!p)
1325			break;
1326		trace_xprtrdma_err_vers(rqst, p, p + 1);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1327		break;
1328	case err_chunk:
1329		trace_xprtrdma_err_chunk(rqst);
 
 
 
 
 
 
 
 
 
 
 
 
 
1330		break;
 
 
1331	default:
1332		trace_xprtrdma_err_unrecognized(rqst, p);
 
 
 
 
 
 
 
 
 
 
1333	}
1334
1335	return -EIO;
1336}
1337
1338/**
1339 * rpcrdma_unpin_rqst - Release rqst without completing it
1340 * @rep: RPC/RDMA Receive context
1341 *
1342 * This is done when a connection is lost so that a Reply
1343 * can be dropped and its matching Call can be subsequently
1344 * retransmitted on a new connection.
1345 */
1346void rpcrdma_unpin_rqst(struct rpcrdma_rep *rep)
1347{
1348	struct rpc_xprt *xprt = &rep->rr_rxprt->rx_xprt;
1349	struct rpc_rqst *rqst = rep->rr_rqst;
1350	struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
1351
1352	req->rl_reply = NULL;
1353	rep->rr_rqst = NULL;
1354
1355	spin_lock(&xprt->queue_lock);
1356	xprt_unpin_rqst(rqst);
1357	spin_unlock(&xprt->queue_lock);
1358}
1359
1360/**
1361 * rpcrdma_complete_rqst - Pass completed rqst back to RPC
1362 * @rep: RPC/RDMA Receive context
1363 *
1364 * Reconstruct the RPC reply and complete the transaction
1365 * while @rqst is still pinned to ensure the rep, rqst, and
1366 * rq_task pointers remain stable.
1367 */
1368void rpcrdma_complete_rqst(struct rpcrdma_rep *rep)
1369{
1370	struct rpcrdma_xprt *r_xprt = rep->rr_rxprt;
1371	struct rpc_xprt *xprt = &r_xprt->rx_xprt;
1372	struct rpc_rqst *rqst = rep->rr_rqst;
1373	int status;
1374
1375	switch (rep->rr_proc) {
1376	case rdma_msg:
1377		status = rpcrdma_decode_msg(r_xprt, rep, rqst);
1378		break;
1379	case rdma_nomsg:
1380		status = rpcrdma_decode_nomsg(r_xprt, rep);
1381		break;
1382	case rdma_error:
1383		status = rpcrdma_decode_error(r_xprt, rep, rqst);
 
 
1384		break;
1385	default:
1386		status = -EIO;
1387	}
1388	if (status < 0)
1389		goto out_badheader;
1390
1391out:
1392	spin_lock(&xprt->queue_lock);
1393	xprt_complete_rqst(rqst->rq_task, status);
1394	xprt_unpin_rqst(rqst);
1395	spin_unlock(&xprt->queue_lock);
1396	return;
1397
1398out_badheader:
1399	trace_xprtrdma_reply_hdr_err(rep);
1400	r_xprt->rx_stats.bad_reply_count++;
1401	rqst->rq_task->tk_status = status;
1402	status = 0;
1403	goto out;
1404}
1405
1406static void rpcrdma_reply_done(struct kref *kref)
1407{
1408	struct rpcrdma_req *req =
1409		container_of(kref, struct rpcrdma_req, rl_kref);
1410
1411	rpcrdma_complete_rqst(req->rl_reply);
1412}
1413
1414/**
1415 * rpcrdma_reply_handler - Process received RPC/RDMA messages
1416 * @rep: Incoming rpcrdma_rep object to process
1417 *
1418 * Errors must result in the RPC task either being awakened, or
1419 * allowed to timeout, to discover the errors at that time.
1420 */
1421void rpcrdma_reply_handler(struct rpcrdma_rep *rep)
1422{
1423	struct rpcrdma_xprt *r_xprt = rep->rr_rxprt;
1424	struct rpc_xprt *xprt = &r_xprt->rx_xprt;
1425	struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
1426	struct rpcrdma_req *req;
1427	struct rpc_rqst *rqst;
1428	u32 credits;
1429	__be32 *p;
1430
1431	/* Any data means we had a useful conversation, so
1432	 * then we don't need to delay the next reconnect.
1433	 */
1434	if (xprt->reestablish_timeout)
1435		xprt->reestablish_timeout = 0;
1436
1437	/* Fixed transport header fields */
1438	xdr_init_decode(&rep->rr_stream, &rep->rr_hdrbuf,
1439			rep->rr_hdrbuf.head[0].iov_base, NULL);
1440	p = xdr_inline_decode(&rep->rr_stream, 4 * sizeof(*p));
1441	if (unlikely(!p))
1442		goto out_shortreply;
1443	rep->rr_xid = *p++;
1444	rep->rr_vers = *p++;
1445	credits = be32_to_cpu(*p++);
1446	rep->rr_proc = *p++;
1447
1448	if (rep->rr_vers != rpcrdma_version)
1449		goto out_badversion;
1450
1451	if (rpcrdma_is_bcall(r_xprt, rep))
1452		return;
1453
1454	/* Match incoming rpcrdma_rep to an rpcrdma_req to
1455	 * get context for handling any incoming chunks.
1456	 */
1457	spin_lock(&xprt->queue_lock);
1458	rqst = xprt_lookup_rqst(xprt, rep->rr_xid);
1459	if (!rqst)
1460		goto out_norqst;
1461	xprt_pin_rqst(rqst);
1462	spin_unlock(&xprt->queue_lock);
1463
1464	if (credits == 0)
1465		credits = 1;	/* don't deadlock */
1466	else if (credits > r_xprt->rx_ep->re_max_requests)
1467		credits = r_xprt->rx_ep->re_max_requests;
1468	rpcrdma_post_recvs(r_xprt, credits + (buf->rb_bc_srv_max_requests << 1),
1469			   false);
1470	if (buf->rb_credits != credits)
1471		rpcrdma_update_cwnd(r_xprt, credits);
1472
1473	req = rpcr_to_rdmar(rqst);
1474	if (unlikely(req->rl_reply))
1475		rpcrdma_rep_put(buf, req->rl_reply);
1476	req->rl_reply = rep;
1477	rep->rr_rqst = rqst;
1478
1479	trace_xprtrdma_reply(rqst->rq_task, rep, credits);
1480
1481	if (rep->rr_wc_flags & IB_WC_WITH_INVALIDATE)
1482		frwr_reminv(rep, &req->rl_registered);
1483	if (!list_empty(&req->rl_registered))
1484		frwr_unmap_async(r_xprt, req);
1485		/* LocalInv completion will complete the RPC */
1486	else
1487		kref_put(&req->rl_kref, rpcrdma_reply_done);
1488	return;
1489
1490out_badversion:
1491	trace_xprtrdma_reply_vers_err(rep);
1492	goto out;
1493
1494out_norqst:
1495	spin_unlock(&xprt->queue_lock);
1496	trace_xprtrdma_reply_rqst_err(rep);
1497	goto out;
1498
1499out_shortreply:
1500	trace_xprtrdma_reply_short_err(rep);
1501
1502out:
1503	rpcrdma_rep_put(buf, rep);
1504}
v3.1
 
  1/*
 
  2 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
  3 *
  4 * This software is available to you under a choice of one of two
  5 * licenses.  You may choose to be licensed under the terms of the GNU
  6 * General Public License (GPL) Version 2, available from the file
  7 * COPYING in the main directory of this source tree, or the BSD-type
  8 * license below:
  9 *
 10 * Redistribution and use in source and binary forms, with or without
 11 * modification, are permitted provided that the following conditions
 12 * are met:
 13 *
 14 *      Redistributions of source code must retain the above copyright
 15 *      notice, this list of conditions and the following disclaimer.
 16 *
 17 *      Redistributions in binary form must reproduce the above
 18 *      copyright notice, this list of conditions and the following
 19 *      disclaimer in the documentation and/or other materials provided
 20 *      with the distribution.
 21 *
 22 *      Neither the name of the Network Appliance, Inc. nor the names of
 23 *      its contributors may be used to endorse or promote products
 24 *      derived from this software without specific prior written
 25 *      permission.
 26 *
 27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
 28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
 29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
 30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
 31 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
 32 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
 33 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
 34 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
 35 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 36 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 37 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 38 */
 39
 40/*
 41 * rpc_rdma.c
 42 *
 43 * This file contains the guts of the RPC RDMA protocol, and
 44 * does marshaling/unmarshaling, etc. It is also where interfacing
 45 * to the Linux RPC framework lives.
 46 */
 47
 
 
 
 
 48#include "xprt_rdma.h"
 
 49
 50#include <linux/highmem.h>
 51
 52#ifdef RPC_DEBUG
 53# define RPCDBG_FACILITY	RPCDBG_TRANS
 54#endif
 55
 56enum rpcrdma_chunktype {
 57	rpcrdma_noch = 0,
 58	rpcrdma_readch,
 59	rpcrdma_areadch,
 60	rpcrdma_writech,
 61	rpcrdma_replych
 62};
 63
 64#ifdef RPC_DEBUG
 65static const char transfertypes[][12] = {
 66	"pure inline",	/* no chunks */
 67	" read chunk",	/* some argument via rdma read */
 68	"*read chunk",	/* entire request via rdma read */
 69	"write chunk",	/* some result via rdma write */
 70	"reply chunk"	/* entire reply via rdma write */
 71};
 72#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 73
 74/*
 75 * Chunk assembly from upper layer xdr_buf.
 76 *
 77 * Prepare the passed-in xdr_buf into representation as RPC/RDMA chunk
 78 * elements. Segments are then coalesced when registered, if possible
 79 * within the selected memreg mode.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 80 *
 81 * Note, this routine is never called if the connection's memory
 82 * registration strategy is 0 (bounce buffers).
 83 */
 84
 85static int
 86rpcrdma_convert_iovs(struct xdr_buf *xdrbuf, unsigned int pos,
 87	enum rpcrdma_chunktype type, struct rpcrdma_mr_seg *seg, int nsegs)
 
 88{
 89	int len, n = 0, p;
 90	int page_base;
 91	struct page **ppages;
 92
 93	if (pos == 0 && xdrbuf->head[0].iov_len) {
 94		seg[n].mr_page = NULL;
 95		seg[n].mr_offset = xdrbuf->head[0].iov_base;
 96		seg[n].mr_len = xdrbuf->head[0].iov_len;
 97		++n;
 98	}
 99
100	len = xdrbuf->page_len;
101	ppages = xdrbuf->pages + (xdrbuf->page_base >> PAGE_SHIFT);
102	page_base = xdrbuf->page_base & ~PAGE_MASK;
103	p = 0;
104	while (len && n < nsegs) {
105		seg[n].mr_page = ppages[p];
106		seg[n].mr_offset = (void *)(unsigned long) page_base;
107		seg[n].mr_len = min_t(u32, PAGE_SIZE - page_base, len);
108		BUG_ON(seg[n].mr_len > PAGE_SIZE);
109		len -= seg[n].mr_len;
110		++n;
111		++p;
112		page_base = 0;	/* page offset only applies to first page */
113	}
114
115	/* Message overflows the seg array */
116	if (len && n == nsegs)
117		return 0;
 
 
 
 
 
 
 
118
119	if (xdrbuf->tail[0].iov_len) {
120		/* the rpcrdma protocol allows us to omit any trailing
121		 * xdr pad bytes, saving the server an RDMA operation. */
122		if (xdrbuf->tail[0].iov_len < 4 && xprt_rdma_pad_optimize)
123			return n;
124		if (n == nsegs)
125			/* Tail remains, but we're out of segments */
126			return 0;
127		seg[n].mr_page = NULL;
128		seg[n].mr_offset = xdrbuf->tail[0].iov_base;
129		seg[n].mr_len = xdrbuf->tail[0].iov_len;
130		++n;
131	}
132
 
 
 
133	return n;
134}
135
136/*
137 * Create read/write chunk lists, and reply chunks, for RDMA
138 *
139 *   Assume check against THRESHOLD has been done, and chunks are required.
140 *   Assume only encoding one list entry for read|write chunks. The NFSv3
141 *     protocol is simple enough to allow this as it only has a single "bulk
142 *     result" in each procedure - complicated NFSv4 COMPOUNDs are not. (The
143 *     RDMA/Sessions NFSv4 proposal addresses this for future v4 revs.)
144 *
145 * When used for a single reply chunk (which is a special write
146 * chunk used for the entire reply, rather than just the data), it
147 * is used primarily for READDIR and READLINK which would otherwise
148 * be severely size-limited by a small rdma inline read max. The server
149 * response will come back as an RDMA Write, followed by a message
150 * of type RDMA_NOMSG carrying the xid and length. As a result, reply
151 * chunks do not provide data alignment, however they do not require
152 * "fixup" (moving the response to the upper layer buffer) either.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
153 *
154 * Encoding key for single-list chunks (HLOO = Handle32 Length32 Offset64):
155 *
156 *  Read chunklist (a linked list):
157 *   N elements, position P (same P for all chunks of same arg!):
158 *    1 - PHLOO - 1 - PHLOO - ... - 1 - PHLOO - 0
159 *
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
160 *  Write chunklist (a list of (one) counted array):
161 *   N elements:
162 *    1 - N - HLOO - HLOO - ... - HLOO - 0
163 *
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
164 *  Reply chunk (a counted array):
165 *   N elements:
166 *    1 - N - HLOO - HLOO - ... - HLOO
 
 
 
167 */
168
169static unsigned int
170rpcrdma_create_chunks(struct rpc_rqst *rqst, struct xdr_buf *target,
171		struct rpcrdma_msg *headerp, enum rpcrdma_chunktype type)
172{
173	struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
174	struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(rqst->rq_task->tk_xprt);
175	int nsegs, nchunks = 0;
176	unsigned int pos;
177	struct rpcrdma_mr_seg *seg = req->rl_segments;
178	struct rpcrdma_read_chunk *cur_rchunk = NULL;
179	struct rpcrdma_write_array *warray = NULL;
180	struct rpcrdma_write_chunk *cur_wchunk = NULL;
181	__be32 *iptr = headerp->rm_body.rm_chunks;
182
183	if (type == rpcrdma_readch || type == rpcrdma_areadch) {
184		/* a read chunk - server will RDMA Read our memory */
185		cur_rchunk = (struct rpcrdma_read_chunk *) iptr;
186	} else {
187		/* a write or reply chunk - server will RDMA Write our memory */
188		*iptr++ = xdr_zero;	/* encode a NULL read chunk list */
189		if (type == rpcrdma_replych)
190			*iptr++ = xdr_zero;	/* a NULL write chunk list */
191		warray = (struct rpcrdma_write_array *) iptr;
192		cur_wchunk = (struct rpcrdma_write_chunk *) (warray + 1);
193	}
194
195	if (type == rpcrdma_replych || type == rpcrdma_areadch)
196		pos = 0;
197	else
198		pos = target->head[0].iov_len;
199
200	nsegs = rpcrdma_convert_iovs(target, pos, type, seg, RPCRDMA_MAX_SEGS);
201	if (nsegs == 0)
202		return 0;
 
 
 
203
 
204	do {
205		/* bind/register the memory, then build chunk from result. */
206		int n = rpcrdma_register_external(seg, nsegs,
207						cur_wchunk != NULL, r_xprt);
208		if (n <= 0)
209			goto out;
210		if (cur_rchunk) {	/* read */
211			cur_rchunk->rc_discrim = xdr_one;
212			/* all read chunks have the same "position" */
213			cur_rchunk->rc_position = htonl(pos);
214			cur_rchunk->rc_target.rs_handle = htonl(seg->mr_rkey);
215			cur_rchunk->rc_target.rs_length = htonl(seg->mr_len);
216			xdr_encode_hyper(
217					(__be32 *)&cur_rchunk->rc_target.rs_offset,
218					seg->mr_base);
219			dprintk("RPC:       %s: read chunk "
220				"elem %d@0x%llx:0x%x pos %u (%s)\n", __func__,
221				seg->mr_len, (unsigned long long)seg->mr_base,
222				seg->mr_rkey, pos, n < nsegs ? "more" : "last");
223			cur_rchunk++;
224			r_xprt->rx_stats.read_chunk_count++;
225		} else {		/* write/reply */
226			cur_wchunk->wc_target.rs_handle = htonl(seg->mr_rkey);
227			cur_wchunk->wc_target.rs_length = htonl(seg->mr_len);
228			xdr_encode_hyper(
229					(__be32 *)&cur_wchunk->wc_target.rs_offset,
230					seg->mr_base);
231			dprintk("RPC:       %s: %s chunk "
232				"elem %d@0x%llx:0x%x (%s)\n", __func__,
233				(type == rpcrdma_replych) ? "reply" : "write",
234				seg->mr_len, (unsigned long long)seg->mr_base,
235				seg->mr_rkey, n < nsegs ? "more" : "last");
236			cur_wchunk++;
237			if (type == rpcrdma_replych)
238				r_xprt->rx_stats.reply_chunk_count++;
239			else
240				r_xprt->rx_stats.write_chunk_count++;
241			r_xprt->rx_stats.total_rdma_request += seg->mr_len;
242		}
243		nchunks++;
244		seg   += n;
245		nsegs -= n;
246	} while (nsegs);
247
248	/* success. all failures return above */
249	req->rl_nchunks = nchunks;
250
251	BUG_ON(nchunks == 0);
252	BUG_ON((r_xprt->rx_ia.ri_memreg_strategy == RPCRDMA_FRMR)
253	       && (nchunks > 3));
254
255	/*
256	 * finish off header. If write, marshal discrim and nchunks.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
257	 */
258	if (cur_rchunk) {
259		iptr = (__be32 *) cur_rchunk;
260		*iptr++ = xdr_zero;	/* finish the read chunk list */
261		*iptr++ = xdr_zero;	/* encode a NULL write chunk list */
262		*iptr++ = xdr_zero;	/* encode a NULL reply chunk */
263	} else {
264		warray->wc_discrim = xdr_one;
265		warray->wc_nchunks = htonl(nchunks);
266		iptr = (__be32 *) cur_wchunk;
267		if (type == rpcrdma_writech) {
268			*iptr++ = xdr_zero; /* finish the write chunk list */
269			*iptr++ = xdr_zero; /* encode a NULL reply chunk */
270		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
271	}
272
273	/*
274	 * Return header size.
275	 */
276	return (unsigned char *)iptr - (unsigned char *)headerp;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
277
278out:
279	for (pos = 0; nchunks--;)
280		pos += rpcrdma_deregister_external(
281				&req->rl_segments[pos], r_xprt, NULL);
282	return 0;
283}
284
285/*
286 * Copy write data inline.
287 * This function is used for "small" requests. Data which is passed
288 * to RPC via iovecs (or page list) is copied directly into the
289 * pre-registered memory buffer for this request. For small amounts
290 * of data, this is efficient. The cutoff value is tunable.
291 */
292static int
293rpcrdma_inline_pullup(struct rpc_rqst *rqst, int pad)
 
294{
295	int i, npages, curlen;
296	int copy_len;
297	unsigned char *srcp, *destp;
298	struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(rqst->rq_xprt);
299	int page_base;
300	struct page **ppages;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
301
302	destp = rqst->rq_svec[0].iov_base;
303	curlen = rqst->rq_svec[0].iov_len;
304	destp += curlen;
305	/*
306	 * Do optional padding where it makes sense. Alignment of write
307	 * payload can help the server, if our setting is accurate.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
308	 */
309	pad -= (curlen + 36/*sizeof(struct rpcrdma_msg_padded)*/);
310	if (pad < 0 || rqst->rq_slen - curlen < RPCRDMA_INLINE_PAD_THRESH)
311		pad = 0;	/* don't pad this request */
312
313	dprintk("RPC:       %s: pad %d destp 0x%p len %d hdrlen %d\n",
314		__func__, pad, destp, rqst->rq_slen, curlen);
315
316	copy_len = rqst->rq_snd_buf.page_len;
317
318	if (rqst->rq_snd_buf.tail[0].iov_len) {
319		curlen = rqst->rq_snd_buf.tail[0].iov_len;
320		if (destp + copy_len != rqst->rq_snd_buf.tail[0].iov_base) {
321			memmove(destp + copy_len,
322				rqst->rq_snd_buf.tail[0].iov_base, curlen);
323			r_xprt->rx_stats.pullup_copy_count += curlen;
324		}
325		dprintk("RPC:       %s: tail destp 0x%p len %d\n",
326			__func__, destp + copy_len, curlen);
327		rqst->rq_svec[0].iov_len += curlen;
328	}
329	r_xprt->rx_stats.pullup_copy_count += copy_len;
330
331	page_base = rqst->rq_snd_buf.page_base;
332	ppages = rqst->rq_snd_buf.pages + (page_base >> PAGE_SHIFT);
333	page_base &= ~PAGE_MASK;
334	npages = PAGE_ALIGN(page_base+copy_len) >> PAGE_SHIFT;
335	for (i = 0; copy_len && i < npages; i++) {
336		curlen = PAGE_SIZE - page_base;
337		if (curlen > copy_len)
338			curlen = copy_len;
339		dprintk("RPC:       %s: page %d destp 0x%p len %d curlen %d\n",
340			__func__, i, destp, copy_len, curlen);
341		srcp = kmap_atomic(ppages[i], KM_SKB_SUNRPC_DATA);
342		memcpy(destp, srcp+page_base, curlen);
343		kunmap_atomic(srcp, KM_SKB_SUNRPC_DATA);
344		rqst->rq_svec[0].iov_len += curlen;
345		destp += curlen;
346		copy_len -= curlen;
347		page_base = 0;
348	}
349	/* header now contains entire send message */
350	return pad;
351}
352
353/*
354 * Marshal a request: the primary job of this routine is to choose
355 * the transfer modes. See comments below.
 
 
 
 
356 *
357 * Uses multiple RDMA IOVs for a request:
358 *  [0] -- RPC RDMA header, which uses memory from the *start* of the
359 *         preregistered buffer that already holds the RPC data in
360 *         its middle.
361 *  [1] -- the RPC header/data, marshaled by RPC and the NFS protocol.
362 *  [2] -- optional padding.
363 *  [3] -- if padded, header only in [1] and data here.
364 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
365
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
366int
367rpcrdma_marshal_req(struct rpc_rqst *rqst)
368{
369	struct rpc_xprt *xprt = rqst->rq_task->tk_xprt;
370	struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
371	struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
372	char *base;
373	size_t hdrlen, rpclen, padlen;
374	enum rpcrdma_chunktype rtype, wtype;
375	struct rpcrdma_msg *headerp;
376
377	/*
378	 * rpclen gets amount of data in first buffer, which is the
379	 * pre-registered buffer.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
380	 */
381	base = rqst->rq_svec[0].iov_base;
382	rpclen = rqst->rq_svec[0].iov_len;
383
384	/* build RDMA header in private area at front */
385	headerp = (struct rpcrdma_msg *) req->rl_base;
386	/* don't htonl XID, it's already done in request */
387	headerp->rm_xid = rqst->rq_xid;
388	headerp->rm_vers = xdr_one;
389	headerp->rm_credit = htonl(r_xprt->rx_buf.rb_max_requests);
390	headerp->rm_type = htonl(RDMA_MSG);
391
392	/*
393	 * Chunks needed for results?
394	 *
395	 * o If the expected result is under the inline threshold, all ops
396	 *   return as inline (but see later).
 
 
397	 * o Large non-read ops return as a single reply chunk.
398	 * o Large read ops return data as write chunk(s), header as inline.
399	 *
400	 * Note: the NFS code sending down multiple result segments implies
401	 * the op is one of read, readdir[plus], readlink or NFSv4 getacl.
402	 */
403
404	/*
405	 * This code can handle read chunks, write chunks OR reply
406	 * chunks -- only one type. If the request is too big to fit
407	 * inline, then we will choose read chunks. If the request is
408	 * a READ, then use write chunks to separate the file data
409	 * into pages; otherwise use reply chunks.
410	 */
411	if (rqst->rq_rcv_buf.buflen <= RPCRDMA_INLINE_READ_THRESHOLD(rqst))
412		wtype = rpcrdma_noch;
413	else if (rqst->rq_rcv_buf.page_len == 0)
414		wtype = rpcrdma_replych;
415	else if (rqst->rq_rcv_buf.flags & XDRBUF_READ)
416		wtype = rpcrdma_writech;
417	else
418		wtype = rpcrdma_replych;
419
420	/*
421	 * Chunks needed for arguments?
422	 *
423	 * o If the total request is under the inline threshold, all ops
424	 *   are sent as inline.
 
 
425	 * o Large non-write ops are sent with the entire message as a
426	 *   single read chunk (protocol 0-position special case).
427	 * o Large write ops transmit data as read chunk(s), header as
428	 *   inline.
429	 *
430	 * Note: the NFS code sending down multiple argument segments
431	 * implies the op is a write.
432	 * TBD check NFSv4 setacl
433	 */
434	if (rqst->rq_snd_buf.len <= RPCRDMA_INLINE_WRITE_THRESHOLD(rqst))
435		rtype = rpcrdma_noch;
436	else if (rqst->rq_snd_buf.page_len == 0)
 
 
 
 
 
 
 
437		rtype = rpcrdma_areadch;
438	else
439		rtype = rpcrdma_readch;
440
441	/* The following simplification is not true forever */
442	if (rtype != rpcrdma_noch && wtype == rpcrdma_replych)
443		wtype = rpcrdma_noch;
444	BUG_ON(rtype != rpcrdma_noch && wtype != rpcrdma_noch);
445
446	if (r_xprt->rx_ia.ri_memreg_strategy == RPCRDMA_BOUNCEBUFFERS &&
447	    (rtype != rpcrdma_noch || wtype != rpcrdma_noch)) {
448		/* forced to "pure inline"? */
449		dprintk("RPC:       %s: too much data (%d/%d) for inline\n",
450			__func__, rqst->rq_rcv_buf.len, rqst->rq_snd_buf.len);
451		return -1;
452	}
453
454	hdrlen = 28; /*sizeof *headerp;*/
455	padlen = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
456
457	/*
458	 * Pull up any extra send data into the preregistered buffer.
459	 * When padding is in use and applies to the transfer, insert
460	 * it and change the message type.
461	 */
462	if (rtype == rpcrdma_noch) {
463
464		padlen = rpcrdma_inline_pullup(rqst,
465						RPCRDMA_INLINE_PAD_VALUE(rqst));
466
467		if (padlen) {
468			headerp->rm_type = htonl(RDMA_MSGP);
469			headerp->rm_body.rm_padded.rm_align =
470				htonl(RPCRDMA_INLINE_PAD_VALUE(rqst));
471			headerp->rm_body.rm_padded.rm_thresh =
472				htonl(RPCRDMA_INLINE_PAD_THRESH);
473			headerp->rm_body.rm_padded.rm_pempty[0] = xdr_zero;
474			headerp->rm_body.rm_padded.rm_pempty[1] = xdr_zero;
475			headerp->rm_body.rm_padded.rm_pempty[2] = xdr_zero;
476			hdrlen += 2 * sizeof(u32); /* extra words in padhdr */
477			BUG_ON(wtype != rpcrdma_noch);
478
479		} else {
480			headerp->rm_body.rm_nochunks.rm_empty[0] = xdr_zero;
481			headerp->rm_body.rm_nochunks.rm_empty[1] = xdr_zero;
482			headerp->rm_body.rm_nochunks.rm_empty[2] = xdr_zero;
483			/* new length after pullup */
484			rpclen = rqst->rq_svec[0].iov_len;
485			/*
486			 * Currently we try to not actually use read inline.
487			 * Reply chunks have the desirable property that
488			 * they land, packed, directly in the target buffers
489			 * without headers, so they require no fixup. The
490			 * additional RDMA Write op sends the same amount
491			 * of data, streams on-the-wire and adds no overhead
492			 * on receive. Therefore, we request a reply chunk
493			 * for non-writes wherever feasible and efficient.
494			 */
495			if (wtype == rpcrdma_noch &&
496			    r_xprt->rx_ia.ri_memreg_strategy > RPCRDMA_REGISTER)
497				wtype = rpcrdma_replych;
498		}
499	}
500
501	/*
502	 * Marshal chunks. This routine will return the header length
503	 * consumed by marshaling.
504	 */
505	if (rtype != rpcrdma_noch) {
506		hdrlen = rpcrdma_create_chunks(rqst,
507					&rqst->rq_snd_buf, headerp, rtype);
508		wtype = rtype;	/* simplify dprintk */
509
510	} else if (wtype != rpcrdma_noch) {
511		hdrlen = rpcrdma_create_chunks(rqst,
512					&rqst->rq_rcv_buf, headerp, wtype);
513	}
514
515	if (hdrlen == 0)
516		return -1;
517
518	dprintk("RPC:       %s: %s: hdrlen %zd rpclen %zd padlen %zd"
519		" headerp 0x%p base 0x%p lkey 0x%x\n",
520		__func__, transfertypes[wtype], hdrlen, rpclen, padlen,
521		headerp, base, req->rl_iov.lkey);
522
523	/*
524	 * initialize send_iov's - normally only two: rdma chunk header and
525	 * single preregistered RPC header buffer, but if padding is present,
526	 * then use a preregistered (and zeroed) pad buffer between the RPC
527	 * header and any write data. In all non-rdma cases, any following
528	 * data has been copied into the RPC header buffer.
529	 */
530	req->rl_send_iov[0].addr = req->rl_iov.addr;
531	req->rl_send_iov[0].length = hdrlen;
532	req->rl_send_iov[0].lkey = req->rl_iov.lkey;
533
534	req->rl_send_iov[1].addr = req->rl_iov.addr + (base - req->rl_base);
535	req->rl_send_iov[1].length = rpclen;
536	req->rl_send_iov[1].lkey = req->rl_iov.lkey;
537
538	req->rl_niovs = 2;
539
540	if (padlen) {
541		struct rpcrdma_ep *ep = &r_xprt->rx_ep;
542
543		req->rl_send_iov[2].addr = ep->rep_pad.addr;
544		req->rl_send_iov[2].length = padlen;
545		req->rl_send_iov[2].lkey = ep->rep_pad.lkey;
546
547		req->rl_send_iov[3].addr = req->rl_send_iov[1].addr + rpclen;
548		req->rl_send_iov[3].length = rqst->rq_slen - rpclen;
549		req->rl_send_iov[3].lkey = req->rl_iov.lkey;
550
551		req->rl_niovs = 4;
552	}
 
553
554	return 0;
 
 
555}
556
557/*
558 * Chase down a received write or reply chunklist to get length
559 * RDMA'd by server. See map at rpcrdma_create_chunks()! :-)
 
 
 
560 */
561static int
562rpcrdma_count_chunks(struct rpcrdma_rep *rep, unsigned int max, int wrchunk, __be32 **iptrp)
563{
564	unsigned int i, total_len;
565	struct rpcrdma_write_chunk *cur_wchunk;
566
567	i = ntohl(**iptrp);	/* get array count */
568	if (i > max)
569		return -1;
570	cur_wchunk = (struct rpcrdma_write_chunk *) (*iptrp + 1);
571	total_len = 0;
572	while (i--) {
573		struct rpcrdma_segment *seg = &cur_wchunk->wc_target;
574		ifdebug(FACILITY) {
575			u64 off;
576			xdr_decode_hyper((__be32 *)&seg->rs_offset, &off);
577			dprintk("RPC:       %s: chunk %d@0x%llx:0x%x\n",
578				__func__,
579				ntohl(seg->rs_length),
580				(unsigned long long)off,
581				ntohl(seg->rs_handle));
582		}
583		total_len += ntohl(seg->rs_length);
584		++cur_wchunk;
585	}
586	/* check and adjust for properly terminated write chunk */
587	if (wrchunk) {
588		__be32 *w = (__be32 *) cur_wchunk;
589		if (*w++ != xdr_zero)
590			return -1;
591		cur_wchunk = (struct rpcrdma_write_chunk *) w;
592	}
593	if ((char *) cur_wchunk > rep->rr_base + rep->rr_len)
594		return -1;
595
596	*iptrp = (__be32 *) cur_wchunk;
597	return total_len;
598}
599
600/*
601 * Scatter inline received data back into provided iov's.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
602 */
603static void
604rpcrdma_inline_fixup(struct rpc_rqst *rqst, char *srcp, int copy_len, int pad)
605{
606	int i, npages, curlen, olen;
 
607	char *destp;
608	struct page **ppages;
609	int page_base;
610
 
 
 
 
 
 
 
 
 
611	curlen = rqst->rq_rcv_buf.head[0].iov_len;
612	if (curlen > copy_len) {	/* write chunk header fixup */
613		curlen = copy_len;
614		rqst->rq_rcv_buf.head[0].iov_len = curlen;
615	}
616
617	dprintk("RPC:       %s: srcp 0x%p len %d hdrlen %d\n",
618		__func__, srcp, copy_len, curlen);
619
620	/* Shift pointer for first receive segment only */
621	rqst->rq_rcv_buf.head[0].iov_base = srcp;
622	srcp += curlen;
623	copy_len -= curlen;
624
625	olen = copy_len;
626	i = 0;
627	rpcx_to_rdmax(rqst->rq_xprt)->rx_stats.fixup_copy_count += olen;
628	page_base = rqst->rq_rcv_buf.page_base;
629	ppages = rqst->rq_rcv_buf.pages + (page_base >> PAGE_SHIFT);
630	page_base &= ~PAGE_MASK;
631
632	if (copy_len && rqst->rq_rcv_buf.page_len) {
633		npages = PAGE_ALIGN(page_base +
634			rqst->rq_rcv_buf.page_len) >> PAGE_SHIFT;
635		for (; i < npages; i++) {
 
636			curlen = PAGE_SIZE - page_base;
637			if (curlen > copy_len)
638				curlen = copy_len;
639			dprintk("RPC:       %s: page %d"
640				" srcp 0x%p len %d curlen %d\n",
641				__func__, i, srcp, copy_len, curlen);
642			destp = kmap_atomic(ppages[i], KM_SKB_SUNRPC_DATA);
643			memcpy(destp + page_base, srcp, curlen);
644			flush_dcache_page(ppages[i]);
645			kunmap_atomic(destp, KM_SKB_SUNRPC_DATA);
646			srcp += curlen;
647			copy_len -= curlen;
648			if (copy_len == 0)
 
 
649				break;
650			page_base = 0;
651		}
652		rqst->rq_rcv_buf.page_len = olen - copy_len;
653	} else
654		rqst->rq_rcv_buf.page_len = 0;
655
656	if (copy_len && rqst->rq_rcv_buf.tail[0].iov_len) {
657		curlen = copy_len;
658		if (curlen > rqst->rq_rcv_buf.tail[0].iov_len)
659			curlen = rqst->rq_rcv_buf.tail[0].iov_len;
660		if (rqst->rq_rcv_buf.tail[0].iov_base != srcp)
661			memmove(rqst->rq_rcv_buf.tail[0].iov_base, srcp, curlen);
662		dprintk("RPC:       %s: tail srcp 0x%p len %d curlen %d\n",
663			__func__, srcp, copy_len, curlen);
664		rqst->rq_rcv_buf.tail[0].iov_len = curlen;
665		copy_len -= curlen; ++i;
666	} else
667		rqst->rq_rcv_buf.tail[0].iov_len = 0;
668
669	if (pad) {
670		/* implicit padding on terminal chunk */
671		unsigned char *p = rqst->rq_rcv_buf.tail[0].iov_base;
672		while (pad--)
673			p[rqst->rq_rcv_buf.tail[0].iov_len++] = 0;
674	}
675
676	if (copy_len)
677		dprintk("RPC:       %s: %d bytes in"
678			" %d extra segments (%d lost)\n",
679			__func__, olen, i, copy_len);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
680
681	/* TBD avoid a warning from call_decode() */
682	rqst->rq_private_buf = rqst->rq_rcv_buf;
 
683}
684
685/*
686 * This function is called when an async event is posted to
687 * the connection which changes the connection state. All it
688 * does at this point is mark the connection up/down, the rpc
689 * timers do the rest.
690 */
691void
692rpcrdma_conn_func(struct rpcrdma_ep *ep)
693{
694	struct rpc_xprt *xprt = ep->rep_xprt;
695
696	spin_lock_bh(&xprt->transport_lock);
697	if (++xprt->connect_cookie == 0)	/* maintain a reserved value */
698		++xprt->connect_cookie;
699	if (ep->rep_connected > 0) {
700		if (!xprt_test_and_set_connected(xprt))
701			xprt_wake_pending_tasks(xprt, 0);
702	} else {
703		if (xprt_test_and_clear_connected(xprt))
704			xprt_wake_pending_tasks(xprt, -ENOTCONN);
705	}
706	spin_unlock_bh(&xprt->transport_lock);
 
707}
708
709/*
710 * This function is called when memory window unbind which we are waiting
711 * for completes. Just use rr_func (zeroed by upcall) to signal completion.
712 */
713static void
714rpcrdma_unbind_func(struct rpcrdma_rep *rep)
715{
716	wake_up(&rep->rr_unbind);
 
 
 
 
 
 
 
717}
718
719/*
720 * Called as a tasklet to do req/reply match and complete a request
721 * Errors must result in the RPC task either being awakened, or
722 * allowed to timeout, to discover the errors at that time.
723 */
724void
725rpcrdma_reply_handler(struct rpcrdma_rep *rep)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
726{
727	struct rpcrdma_msg *headerp;
728	struct rpcrdma_req *req;
729	struct rpc_rqst *rqst;
730	struct rpc_xprt *xprt = rep->rr_xprt;
731	struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
732	__be32 *iptr;
733	int i, rdmalen, status;
734
735	/* Check status. If bad, signal disconnect and return rep to pool */
736	if (rep->rr_len == ~0U) {
737		rpcrdma_recv_buffer_put(rep);
738		if (r_xprt->rx_ep.rep_connected == 1) {
739			r_xprt->rx_ep.rep_connected = -EIO;
740			rpcrdma_conn_func(&r_xprt->rx_ep);
741		}
742		return;
743	}
744	if (rep->rr_len < 28) {
745		dprintk("RPC:       %s: short/invalid reply\n", __func__);
746		goto repost;
747	}
748	headerp = (struct rpcrdma_msg *) rep->rr_base;
749	if (headerp->rm_vers != xdr_one) {
750		dprintk("RPC:       %s: invalid version %d\n",
751			__func__, ntohl(headerp->rm_vers));
752		goto repost;
753	}
754
755	/* Get XID and try for a match. */
756	spin_lock(&xprt->transport_lock);
757	rqst = xprt_lookup_rqst(xprt, headerp->rm_xid);
758	if (rqst == NULL) {
759		spin_unlock(&xprt->transport_lock);
760		dprintk("RPC:       %s: reply 0x%p failed "
761			"to match any request xid 0x%08x len %d\n",
762			__func__, rep, headerp->rm_xid, rep->rr_len);
763repost:
764		r_xprt->rx_stats.bad_reply_count++;
765		rep->rr_func = rpcrdma_reply_handler;
766		if (rpcrdma_ep_post_recv(&r_xprt->rx_ia, &r_xprt->rx_ep, rep))
767			rpcrdma_recv_buffer_put(rep);
768
769		return;
770	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
771
772	/* get request object */
773	req = rpcr_to_rdmar(rqst);
 
774
775	dprintk("RPC:       %s: reply 0x%p completes request 0x%p\n"
776		"                   RPC request 0x%p xid 0x%08x\n",
777			__func__, rep, req, rqst, headerp->rm_xid);
 
 
778
779	BUG_ON(!req || req->rl_reply);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
780
781	/* from here on, the reply is no longer an orphan */
782	req->rl_reply = rep;
 
 
 
 
783
784	/* check for expected message types */
785	/* The order of some of these tests is important. */
786	switch (headerp->rm_type) {
787	case htonl(RDMA_MSG):
788		/* never expect read chunks */
789		/* never expect reply chunks (two ways to check) */
790		/* never expect write chunks without having offered RDMA */
791		if (headerp->rm_body.rm_chunks[0] != xdr_zero ||
792		    (headerp->rm_body.rm_chunks[1] == xdr_zero &&
793		     headerp->rm_body.rm_chunks[2] != xdr_zero) ||
794		    (headerp->rm_body.rm_chunks[1] != xdr_zero &&
795		     req->rl_nchunks == 0))
796			goto badheader;
797		if (headerp->rm_body.rm_chunks[1] != xdr_zero) {
798			/* count any expected write chunks in read reply */
799			/* start at write chunk array count */
800			iptr = &headerp->rm_body.rm_chunks[2];
801			rdmalen = rpcrdma_count_chunks(rep,
802						req->rl_nchunks, 1, &iptr);
803			/* check for validity, and no reply chunk after */
804			if (rdmalen < 0 || *iptr++ != xdr_zero)
805				goto badheader;
806			rep->rr_len -=
807			    ((unsigned char *)iptr - (unsigned char *)headerp);
808			status = rep->rr_len + rdmalen;
809			r_xprt->rx_stats.total_rdma_reply += rdmalen;
810			/* special case - last chunk may omit padding */
811			if (rdmalen &= 3) {
812				rdmalen = 4 - rdmalen;
813				status += rdmalen;
814			}
815		} else {
816			/* else ordinary inline */
817			rdmalen = 0;
818			iptr = (__be32 *)((unsigned char *)headerp + 28);
819			rep->rr_len -= 28; /*sizeof *headerp;*/
820			status = rep->rr_len;
821		}
822		/* Fix up the rpc results for upper layer */
823		rpcrdma_inline_fixup(rqst, (char *)iptr, rep->rr_len, rdmalen);
824		break;
825
826	case htonl(RDMA_NOMSG):
827		/* never expect read or write chunks, always reply chunks */
828		if (headerp->rm_body.rm_chunks[0] != xdr_zero ||
829		    headerp->rm_body.rm_chunks[1] != xdr_zero ||
830		    headerp->rm_body.rm_chunks[2] != xdr_one ||
831		    req->rl_nchunks == 0)
832			goto badheader;
833		iptr = (__be32 *)((unsigned char *)headerp + 28);
834		rdmalen = rpcrdma_count_chunks(rep, req->rl_nchunks, 0, &iptr);
835		if (rdmalen < 0)
836			goto badheader;
837		r_xprt->rx_stats.total_rdma_reply += rdmalen;
838		/* Reply chunk buffer already is the reply vector - no fixup. */
839		status = rdmalen;
840		break;
841
842badheader:
843	default:
844		dprintk("%s: invalid rpcrdma reply header (type %d):"
845				" chunks[012] == %d %d %d"
846				" expected chunks <= %d\n",
847				__func__, ntohl(headerp->rm_type),
848				headerp->rm_body.rm_chunks[0],
849				headerp->rm_body.rm_chunks[1],
850				headerp->rm_body.rm_chunks[2],
851				req->rl_nchunks);
852		status = -EIO;
853		r_xprt->rx_stats.bad_reply_count++;
854		break;
855	}
856
857	/* If using mw bind, start the deregister process now. */
858	/* (Note: if mr_free(), cannot perform it here, in tasklet context) */
859	if (req->rl_nchunks) switch (r_xprt->rx_ia.ri_memreg_strategy) {
860	case RPCRDMA_MEMWINDOWS:
861		for (i = 0; req->rl_nchunks-- > 1;)
862			i += rpcrdma_deregister_external(
863				&req->rl_segments[i], r_xprt, NULL);
864		/* Optionally wait (not here) for unbinds to complete */
865		rep->rr_func = rpcrdma_unbind_func;
866		(void) rpcrdma_deregister_external(&req->rl_segments[i],
867						   r_xprt, rep);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
868		break;
869	case RPCRDMA_MEMWINDOWS_ASYNC:
870		for (i = 0; req->rl_nchunks--;)
871			i += rpcrdma_deregister_external(&req->rl_segments[i],
872							 r_xprt, NULL);
873		break;
874	default:
875		break;
876	}
 
 
877
878	dprintk("RPC:       %s: xprt_complete_rqst(0x%p, 0x%p, %d)\n",
879			__func__, xprt, rqst, status);
880	xprt_complete_rqst(rqst->rq_task, status);
881	spin_unlock(&xprt->transport_lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
882}