Loading...
1/*
2 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the BSD-type
8 * license below:
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 *
14 * Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 *
17 * Redistributions in binary form must reproduce the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer in the documentation and/or other materials provided
20 * with the distribution.
21 *
22 * Neither the name of the Network Appliance, Inc. nor the names of
23 * its contributors may be used to endorse or promote products
24 * derived from this software without specific prior written
25 * permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
31 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
32 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
33 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
34 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
35 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
36 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
37 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38 */
39
40/*
41 * rpc_rdma.c
42 *
43 * This file contains the guts of the RPC RDMA protocol, and
44 * does marshaling/unmarshaling, etc. It is also where interfacing
45 * to the Linux RPC framework lives.
46 */
47
48#include "xprt_rdma.h"
49
50#include <linux/highmem.h>
51
52#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
53# define RPCDBG_FACILITY RPCDBG_TRANS
54#endif
55
56static const char transfertypes[][12] = {
57 "inline", /* no chunks */
58 "read list", /* some argument via rdma read */
59 "*read list", /* entire request via rdma read */
60 "write list", /* some result via rdma write */
61 "reply chunk" /* entire reply via rdma write */
62};
63
64/* Returns size of largest RPC-over-RDMA header in a Call message
65 *
66 * The largest Call header contains a full-size Read list and a
67 * minimal Reply chunk.
68 */
69static unsigned int rpcrdma_max_call_header_size(unsigned int maxsegs)
70{
71 unsigned int size;
72
73 /* Fixed header fields and list discriminators */
74 size = RPCRDMA_HDRLEN_MIN;
75
76 /* Maximum Read list size */
77 maxsegs += 2; /* segment for head and tail buffers */
78 size = maxsegs * sizeof(struct rpcrdma_read_chunk);
79
80 /* Minimal Read chunk size */
81 size += sizeof(__be32); /* segment count */
82 size += sizeof(struct rpcrdma_segment);
83 size += sizeof(__be32); /* list discriminator */
84
85 dprintk("RPC: %s: max call header size = %u\n",
86 __func__, size);
87 return size;
88}
89
90/* Returns size of largest RPC-over-RDMA header in a Reply message
91 *
92 * There is only one Write list or one Reply chunk per Reply
93 * message. The larger list is the Write list.
94 */
95static unsigned int rpcrdma_max_reply_header_size(unsigned int maxsegs)
96{
97 unsigned int size;
98
99 /* Fixed header fields and list discriminators */
100 size = RPCRDMA_HDRLEN_MIN;
101
102 /* Maximum Write list size */
103 maxsegs += 2; /* segment for head and tail buffers */
104 size = sizeof(__be32); /* segment count */
105 size += maxsegs * sizeof(struct rpcrdma_segment);
106 size += sizeof(__be32); /* list discriminator */
107
108 dprintk("RPC: %s: max reply header size = %u\n",
109 __func__, size);
110 return size;
111}
112
113void rpcrdma_set_max_header_sizes(struct rpcrdma_xprt *r_xprt)
114{
115 struct rpcrdma_create_data_internal *cdata = &r_xprt->rx_data;
116 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
117 unsigned int maxsegs = ia->ri_max_segs;
118
119 ia->ri_max_inline_write = cdata->inline_wsize -
120 rpcrdma_max_call_header_size(maxsegs);
121 ia->ri_max_inline_read = cdata->inline_rsize -
122 rpcrdma_max_reply_header_size(maxsegs);
123}
124
125/* The client can send a request inline as long as the RPCRDMA header
126 * plus the RPC call fit under the transport's inline limit. If the
127 * combined call message size exceeds that limit, the client must use
128 * a Read chunk for this operation.
129 *
130 * A Read chunk is also required if sending the RPC call inline would
131 * exceed this device's max_sge limit.
132 */
133static bool rpcrdma_args_inline(struct rpcrdma_xprt *r_xprt,
134 struct rpc_rqst *rqst)
135{
136 struct xdr_buf *xdr = &rqst->rq_snd_buf;
137 unsigned int count, remaining, offset;
138
139 if (xdr->len > r_xprt->rx_ia.ri_max_inline_write)
140 return false;
141
142 if (xdr->page_len) {
143 remaining = xdr->page_len;
144 offset = xdr->page_base & ~PAGE_MASK;
145 count = 0;
146 while (remaining) {
147 remaining -= min_t(unsigned int,
148 PAGE_SIZE - offset, remaining);
149 offset = 0;
150 if (++count > r_xprt->rx_ia.ri_max_send_sges)
151 return false;
152 }
153 }
154
155 return true;
156}
157
158/* The client can't know how large the actual reply will be. Thus it
159 * plans for the largest possible reply for that particular ULP
160 * operation. If the maximum combined reply message size exceeds that
161 * limit, the client must provide a write list or a reply chunk for
162 * this request.
163 */
164static bool rpcrdma_results_inline(struct rpcrdma_xprt *r_xprt,
165 struct rpc_rqst *rqst)
166{
167 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
168
169 return rqst->rq_rcv_buf.buflen <= ia->ri_max_inline_read;
170}
171
172/* Split "vec" on page boundaries into segments. FMR registers pages,
173 * not a byte range. Other modes coalesce these segments into a single
174 * MR when they can.
175 */
176static int
177rpcrdma_convert_kvec(struct kvec *vec, struct rpcrdma_mr_seg *seg, int n)
178{
179 size_t page_offset;
180 u32 remaining;
181 char *base;
182
183 base = vec->iov_base;
184 page_offset = offset_in_page(base);
185 remaining = vec->iov_len;
186 while (remaining && n < RPCRDMA_MAX_SEGS) {
187 seg[n].mr_page = NULL;
188 seg[n].mr_offset = base;
189 seg[n].mr_len = min_t(u32, PAGE_SIZE - page_offset, remaining);
190 remaining -= seg[n].mr_len;
191 base += seg[n].mr_len;
192 ++n;
193 page_offset = 0;
194 }
195 return n;
196}
197
198/*
199 * Chunk assembly from upper layer xdr_buf.
200 *
201 * Prepare the passed-in xdr_buf into representation as RPC/RDMA chunk
202 * elements. Segments are then coalesced when registered, if possible
203 * within the selected memreg mode.
204 *
205 * Returns positive number of segments converted, or a negative errno.
206 */
207
208static int
209rpcrdma_convert_iovs(struct rpcrdma_xprt *r_xprt, struct xdr_buf *xdrbuf,
210 unsigned int pos, enum rpcrdma_chunktype type,
211 struct rpcrdma_mr_seg *seg)
212{
213 int len, n, p, page_base;
214 struct page **ppages;
215
216 n = 0;
217 if (pos == 0) {
218 n = rpcrdma_convert_kvec(&xdrbuf->head[0], seg, n);
219 if (n == RPCRDMA_MAX_SEGS)
220 goto out_overflow;
221 }
222
223 len = xdrbuf->page_len;
224 ppages = xdrbuf->pages + (xdrbuf->page_base >> PAGE_SHIFT);
225 page_base = xdrbuf->page_base & ~PAGE_MASK;
226 p = 0;
227 while (len && n < RPCRDMA_MAX_SEGS) {
228 if (!ppages[p]) {
229 /* alloc the pagelist for receiving buffer */
230 ppages[p] = alloc_page(GFP_ATOMIC);
231 if (!ppages[p])
232 return -EAGAIN;
233 }
234 seg[n].mr_page = ppages[p];
235 seg[n].mr_offset = (void *)(unsigned long) page_base;
236 seg[n].mr_len = min_t(u32, PAGE_SIZE - page_base, len);
237 if (seg[n].mr_len > PAGE_SIZE)
238 goto out_overflow;
239 len -= seg[n].mr_len;
240 ++n;
241 ++p;
242 page_base = 0; /* page offset only applies to first page */
243 }
244
245 /* Message overflows the seg array */
246 if (len && n == RPCRDMA_MAX_SEGS)
247 goto out_overflow;
248
249 /* When encoding a Read chunk, the tail iovec contains an
250 * XDR pad and may be omitted.
251 */
252 if (type == rpcrdma_readch && r_xprt->rx_ia.ri_implicit_roundup)
253 return n;
254
255 /* When encoding a Write chunk, some servers need to see an
256 * extra segment for non-XDR-aligned Write chunks. The upper
257 * layer provides space in the tail iovec that may be used
258 * for this purpose.
259 */
260 if (type == rpcrdma_writech && r_xprt->rx_ia.ri_implicit_roundup)
261 return n;
262
263 if (xdrbuf->tail[0].iov_len) {
264 n = rpcrdma_convert_kvec(&xdrbuf->tail[0], seg, n);
265 if (n == RPCRDMA_MAX_SEGS)
266 goto out_overflow;
267 }
268
269 return n;
270
271out_overflow:
272 pr_err("rpcrdma: segment array overflow\n");
273 return -EIO;
274}
275
276static inline __be32 *
277xdr_encode_rdma_segment(__be32 *iptr, struct rpcrdma_mw *mw)
278{
279 *iptr++ = cpu_to_be32(mw->mw_handle);
280 *iptr++ = cpu_to_be32(mw->mw_length);
281 return xdr_encode_hyper(iptr, mw->mw_offset);
282}
283
284/* XDR-encode the Read list. Supports encoding a list of read
285 * segments that belong to a single read chunk.
286 *
287 * Encoding key for single-list chunks (HLOO = Handle32 Length32 Offset64):
288 *
289 * Read chunklist (a linked list):
290 * N elements, position P (same P for all chunks of same arg!):
291 * 1 - PHLOO - 1 - PHLOO - ... - 1 - PHLOO - 0
292 *
293 * Returns a pointer to the XDR word in the RDMA header following
294 * the end of the Read list, or an error pointer.
295 */
296static __be32 *
297rpcrdma_encode_read_list(struct rpcrdma_xprt *r_xprt,
298 struct rpcrdma_req *req, struct rpc_rqst *rqst,
299 __be32 *iptr, enum rpcrdma_chunktype rtype)
300{
301 struct rpcrdma_mr_seg *seg;
302 struct rpcrdma_mw *mw;
303 unsigned int pos;
304 int n, nsegs;
305
306 if (rtype == rpcrdma_noch) {
307 *iptr++ = xdr_zero; /* item not present */
308 return iptr;
309 }
310
311 pos = rqst->rq_snd_buf.head[0].iov_len;
312 if (rtype == rpcrdma_areadch)
313 pos = 0;
314 seg = req->rl_segments;
315 nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_snd_buf, pos,
316 rtype, seg);
317 if (nsegs < 0)
318 return ERR_PTR(nsegs);
319
320 do {
321 n = r_xprt->rx_ia.ri_ops->ro_map(r_xprt, seg, nsegs,
322 false, &mw);
323 if (n < 0)
324 return ERR_PTR(n);
325 list_add(&mw->mw_list, &req->rl_registered);
326
327 *iptr++ = xdr_one; /* item present */
328
329 /* All read segments in this chunk
330 * have the same "position".
331 */
332 *iptr++ = cpu_to_be32(pos);
333 iptr = xdr_encode_rdma_segment(iptr, mw);
334
335 dprintk("RPC: %5u %s: pos %u %u@0x%016llx:0x%08x (%s)\n",
336 rqst->rq_task->tk_pid, __func__, pos,
337 mw->mw_length, (unsigned long long)mw->mw_offset,
338 mw->mw_handle, n < nsegs ? "more" : "last");
339
340 r_xprt->rx_stats.read_chunk_count++;
341 seg += n;
342 nsegs -= n;
343 } while (nsegs);
344
345 /* Finish Read list */
346 *iptr++ = xdr_zero; /* Next item not present */
347 return iptr;
348}
349
350/* XDR-encode the Write list. Supports encoding a list containing
351 * one array of plain segments that belong to a single write chunk.
352 *
353 * Encoding key for single-list chunks (HLOO = Handle32 Length32 Offset64):
354 *
355 * Write chunklist (a list of (one) counted array):
356 * N elements:
357 * 1 - N - HLOO - HLOO - ... - HLOO - 0
358 *
359 * Returns a pointer to the XDR word in the RDMA header following
360 * the end of the Write list, or an error pointer.
361 */
362static __be32 *
363rpcrdma_encode_write_list(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
364 struct rpc_rqst *rqst, __be32 *iptr,
365 enum rpcrdma_chunktype wtype)
366{
367 struct rpcrdma_mr_seg *seg;
368 struct rpcrdma_mw *mw;
369 int n, nsegs, nchunks;
370 __be32 *segcount;
371
372 if (wtype != rpcrdma_writech) {
373 *iptr++ = xdr_zero; /* no Write list present */
374 return iptr;
375 }
376
377 seg = req->rl_segments;
378 nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_rcv_buf,
379 rqst->rq_rcv_buf.head[0].iov_len,
380 wtype, seg);
381 if (nsegs < 0)
382 return ERR_PTR(nsegs);
383
384 *iptr++ = xdr_one; /* Write list present */
385 segcount = iptr++; /* save location of segment count */
386
387 nchunks = 0;
388 do {
389 n = r_xprt->rx_ia.ri_ops->ro_map(r_xprt, seg, nsegs,
390 true, &mw);
391 if (n < 0)
392 return ERR_PTR(n);
393 list_add(&mw->mw_list, &req->rl_registered);
394
395 iptr = xdr_encode_rdma_segment(iptr, mw);
396
397 dprintk("RPC: %5u %s: %u@0x016%llx:0x%08x (%s)\n",
398 rqst->rq_task->tk_pid, __func__,
399 mw->mw_length, (unsigned long long)mw->mw_offset,
400 mw->mw_handle, n < nsegs ? "more" : "last");
401
402 r_xprt->rx_stats.write_chunk_count++;
403 r_xprt->rx_stats.total_rdma_request += seg->mr_len;
404 nchunks++;
405 seg += n;
406 nsegs -= n;
407 } while (nsegs);
408
409 /* Update count of segments in this Write chunk */
410 *segcount = cpu_to_be32(nchunks);
411
412 /* Finish Write list */
413 *iptr++ = xdr_zero; /* Next item not present */
414 return iptr;
415}
416
417/* XDR-encode the Reply chunk. Supports encoding an array of plain
418 * segments that belong to a single write (reply) chunk.
419 *
420 * Encoding key for single-list chunks (HLOO = Handle32 Length32 Offset64):
421 *
422 * Reply chunk (a counted array):
423 * N elements:
424 * 1 - N - HLOO - HLOO - ... - HLOO
425 *
426 * Returns a pointer to the XDR word in the RDMA header following
427 * the end of the Reply chunk, or an error pointer.
428 */
429static __be32 *
430rpcrdma_encode_reply_chunk(struct rpcrdma_xprt *r_xprt,
431 struct rpcrdma_req *req, struct rpc_rqst *rqst,
432 __be32 *iptr, enum rpcrdma_chunktype wtype)
433{
434 struct rpcrdma_mr_seg *seg;
435 struct rpcrdma_mw *mw;
436 int n, nsegs, nchunks;
437 __be32 *segcount;
438
439 if (wtype != rpcrdma_replych) {
440 *iptr++ = xdr_zero; /* no Reply chunk present */
441 return iptr;
442 }
443
444 seg = req->rl_segments;
445 nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_rcv_buf, 0, wtype, seg);
446 if (nsegs < 0)
447 return ERR_PTR(nsegs);
448
449 *iptr++ = xdr_one; /* Reply chunk present */
450 segcount = iptr++; /* save location of segment count */
451
452 nchunks = 0;
453 do {
454 n = r_xprt->rx_ia.ri_ops->ro_map(r_xprt, seg, nsegs,
455 true, &mw);
456 if (n < 0)
457 return ERR_PTR(n);
458 list_add(&mw->mw_list, &req->rl_registered);
459
460 iptr = xdr_encode_rdma_segment(iptr, mw);
461
462 dprintk("RPC: %5u %s: %u@0x%016llx:0x%08x (%s)\n",
463 rqst->rq_task->tk_pid, __func__,
464 mw->mw_length, (unsigned long long)mw->mw_offset,
465 mw->mw_handle, n < nsegs ? "more" : "last");
466
467 r_xprt->rx_stats.reply_chunk_count++;
468 r_xprt->rx_stats.total_rdma_request += seg->mr_len;
469 nchunks++;
470 seg += n;
471 nsegs -= n;
472 } while (nsegs);
473
474 /* Update count of segments in the Reply chunk */
475 *segcount = cpu_to_be32(nchunks);
476
477 return iptr;
478}
479
480/* Prepare the RPC-over-RDMA header SGE.
481 */
482static bool
483rpcrdma_prepare_hdr_sge(struct rpcrdma_ia *ia, struct rpcrdma_req *req,
484 u32 len)
485{
486 struct rpcrdma_regbuf *rb = req->rl_rdmabuf;
487 struct ib_sge *sge = &req->rl_send_sge[0];
488
489 if (unlikely(!rpcrdma_regbuf_is_mapped(rb))) {
490 if (!__rpcrdma_dma_map_regbuf(ia, rb))
491 return false;
492 sge->addr = rdmab_addr(rb);
493 sge->lkey = rdmab_lkey(rb);
494 }
495 sge->length = len;
496
497 ib_dma_sync_single_for_device(ia->ri_device, sge->addr,
498 sge->length, DMA_TO_DEVICE);
499 req->rl_send_wr.num_sge++;
500 return true;
501}
502
503/* Prepare the Send SGEs. The head and tail iovec, and each entry
504 * in the page list, gets its own SGE.
505 */
506static bool
507rpcrdma_prepare_msg_sges(struct rpcrdma_ia *ia, struct rpcrdma_req *req,
508 struct xdr_buf *xdr, enum rpcrdma_chunktype rtype)
509{
510 unsigned int sge_no, page_base, len, remaining;
511 struct rpcrdma_regbuf *rb = req->rl_sendbuf;
512 struct ib_device *device = ia->ri_device;
513 struct ib_sge *sge = req->rl_send_sge;
514 u32 lkey = ia->ri_pd->local_dma_lkey;
515 struct page *page, **ppages;
516
517 /* The head iovec is straightforward, as it is already
518 * DMA-mapped. Sync the content that has changed.
519 */
520 if (!rpcrdma_dma_map_regbuf(ia, rb))
521 return false;
522 sge_no = 1;
523 sge[sge_no].addr = rdmab_addr(rb);
524 sge[sge_no].length = xdr->head[0].iov_len;
525 sge[sge_no].lkey = rdmab_lkey(rb);
526 ib_dma_sync_single_for_device(device, sge[sge_no].addr,
527 sge[sge_no].length, DMA_TO_DEVICE);
528
529 /* If there is a Read chunk, the page list is being handled
530 * via explicit RDMA, and thus is skipped here. However, the
531 * tail iovec may include an XDR pad for the page list, as
532 * well as additional content, and may not reside in the
533 * same page as the head iovec.
534 */
535 if (rtype == rpcrdma_readch) {
536 len = xdr->tail[0].iov_len;
537
538 /* Do not include the tail if it is only an XDR pad */
539 if (len < 4)
540 goto out;
541
542 page = virt_to_page(xdr->tail[0].iov_base);
543 page_base = (unsigned long)xdr->tail[0].iov_base & ~PAGE_MASK;
544
545 /* If the content in the page list is an odd length,
546 * xdr_write_pages() has added a pad at the beginning
547 * of the tail iovec. Force the tail's non-pad content
548 * to land at the next XDR position in the Send message.
549 */
550 page_base += len & 3;
551 len -= len & 3;
552 goto map_tail;
553 }
554
555 /* If there is a page list present, temporarily DMA map
556 * and prepare an SGE for each page to be sent.
557 */
558 if (xdr->page_len) {
559 ppages = xdr->pages + (xdr->page_base >> PAGE_SHIFT);
560 page_base = xdr->page_base & ~PAGE_MASK;
561 remaining = xdr->page_len;
562 while (remaining) {
563 sge_no++;
564 if (sge_no > RPCRDMA_MAX_SEND_SGES - 2)
565 goto out_mapping_overflow;
566
567 len = min_t(u32, PAGE_SIZE - page_base, remaining);
568 sge[sge_no].addr = ib_dma_map_page(device, *ppages,
569 page_base, len,
570 DMA_TO_DEVICE);
571 if (ib_dma_mapping_error(device, sge[sge_no].addr))
572 goto out_mapping_err;
573 sge[sge_no].length = len;
574 sge[sge_no].lkey = lkey;
575
576 req->rl_mapped_sges++;
577 ppages++;
578 remaining -= len;
579 page_base = 0;
580 }
581 }
582
583 /* The tail iovec is not always constructed in the same
584 * page where the head iovec resides (see, for example,
585 * gss_wrap_req_priv). To neatly accommodate that case,
586 * DMA map it separately.
587 */
588 if (xdr->tail[0].iov_len) {
589 page = virt_to_page(xdr->tail[0].iov_base);
590 page_base = (unsigned long)xdr->tail[0].iov_base & ~PAGE_MASK;
591 len = xdr->tail[0].iov_len;
592
593map_tail:
594 sge_no++;
595 sge[sge_no].addr = ib_dma_map_page(device, page,
596 page_base, len,
597 DMA_TO_DEVICE);
598 if (ib_dma_mapping_error(device, sge[sge_no].addr))
599 goto out_mapping_err;
600 sge[sge_no].length = len;
601 sge[sge_no].lkey = lkey;
602 req->rl_mapped_sges++;
603 }
604
605out:
606 req->rl_send_wr.num_sge = sge_no + 1;
607 return true;
608
609out_mapping_overflow:
610 pr_err("rpcrdma: too many Send SGEs (%u)\n", sge_no);
611 return false;
612
613out_mapping_err:
614 pr_err("rpcrdma: Send mapping error\n");
615 return false;
616}
617
618bool
619rpcrdma_prepare_send_sges(struct rpcrdma_ia *ia, struct rpcrdma_req *req,
620 u32 hdrlen, struct xdr_buf *xdr,
621 enum rpcrdma_chunktype rtype)
622{
623 req->rl_send_wr.num_sge = 0;
624 req->rl_mapped_sges = 0;
625
626 if (!rpcrdma_prepare_hdr_sge(ia, req, hdrlen))
627 goto out_map;
628
629 if (rtype != rpcrdma_areadch)
630 if (!rpcrdma_prepare_msg_sges(ia, req, xdr, rtype))
631 goto out_map;
632
633 return true;
634
635out_map:
636 pr_err("rpcrdma: failed to DMA map a Send buffer\n");
637 return false;
638}
639
640void
641rpcrdma_unmap_sges(struct rpcrdma_ia *ia, struct rpcrdma_req *req)
642{
643 struct ib_device *device = ia->ri_device;
644 struct ib_sge *sge;
645 int count;
646
647 sge = &req->rl_send_sge[2];
648 for (count = req->rl_mapped_sges; count--; sge++)
649 ib_dma_unmap_page(device, sge->addr, sge->length,
650 DMA_TO_DEVICE);
651 req->rl_mapped_sges = 0;
652}
653
654/*
655 * Marshal a request: the primary job of this routine is to choose
656 * the transfer modes. See comments below.
657 *
658 * Returns zero on success, otherwise a negative errno.
659 */
660
661int
662rpcrdma_marshal_req(struct rpc_rqst *rqst)
663{
664 struct rpc_xprt *xprt = rqst->rq_xprt;
665 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
666 struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
667 enum rpcrdma_chunktype rtype, wtype;
668 struct rpcrdma_msg *headerp;
669 bool ddp_allowed;
670 ssize_t hdrlen;
671 size_t rpclen;
672 __be32 *iptr;
673
674#if defined(CONFIG_SUNRPC_BACKCHANNEL)
675 if (test_bit(RPC_BC_PA_IN_USE, &rqst->rq_bc_pa_state))
676 return rpcrdma_bc_marshal_reply(rqst);
677#endif
678
679 headerp = rdmab_to_msg(req->rl_rdmabuf);
680 /* don't byte-swap XID, it's already done in request */
681 headerp->rm_xid = rqst->rq_xid;
682 headerp->rm_vers = rpcrdma_version;
683 headerp->rm_credit = cpu_to_be32(r_xprt->rx_buf.rb_max_requests);
684 headerp->rm_type = rdma_msg;
685
686 /* When the ULP employs a GSS flavor that guarantees integrity
687 * or privacy, direct data placement of individual data items
688 * is not allowed.
689 */
690 ddp_allowed = !(rqst->rq_cred->cr_auth->au_flags &
691 RPCAUTH_AUTH_DATATOUCH);
692
693 /*
694 * Chunks needed for results?
695 *
696 * o If the expected result is under the inline threshold, all ops
697 * return as inline.
698 * o Large read ops return data as write chunk(s), header as
699 * inline.
700 * o Large non-read ops return as a single reply chunk.
701 */
702 if (rpcrdma_results_inline(r_xprt, rqst))
703 wtype = rpcrdma_noch;
704 else if (ddp_allowed && rqst->rq_rcv_buf.flags & XDRBUF_READ)
705 wtype = rpcrdma_writech;
706 else
707 wtype = rpcrdma_replych;
708
709 /*
710 * Chunks needed for arguments?
711 *
712 * o If the total request is under the inline threshold, all ops
713 * are sent as inline.
714 * o Large write ops transmit data as read chunk(s), header as
715 * inline.
716 * o Large non-write ops are sent with the entire message as a
717 * single read chunk (protocol 0-position special case).
718 *
719 * This assumes that the upper layer does not present a request
720 * that both has a data payload, and whose non-data arguments
721 * by themselves are larger than the inline threshold.
722 */
723 if (rpcrdma_args_inline(r_xprt, rqst)) {
724 rtype = rpcrdma_noch;
725 rpclen = rqst->rq_snd_buf.len;
726 } else if (ddp_allowed && rqst->rq_snd_buf.flags & XDRBUF_WRITE) {
727 rtype = rpcrdma_readch;
728 rpclen = rqst->rq_snd_buf.head[0].iov_len +
729 rqst->rq_snd_buf.tail[0].iov_len;
730 } else {
731 r_xprt->rx_stats.nomsg_call_count++;
732 headerp->rm_type = htonl(RDMA_NOMSG);
733 rtype = rpcrdma_areadch;
734 rpclen = 0;
735 }
736
737 /* This implementation supports the following combinations
738 * of chunk lists in one RPC-over-RDMA Call message:
739 *
740 * - Read list
741 * - Write list
742 * - Reply chunk
743 * - Read list + Reply chunk
744 *
745 * It might not yet support the following combinations:
746 *
747 * - Read list + Write list
748 *
749 * It does not support the following combinations:
750 *
751 * - Write list + Reply chunk
752 * - Read list + Write list + Reply chunk
753 *
754 * This implementation supports only a single chunk in each
755 * Read or Write list. Thus for example the client cannot
756 * send a Call message with a Position Zero Read chunk and a
757 * regular Read chunk at the same time.
758 */
759 iptr = headerp->rm_body.rm_chunks;
760 iptr = rpcrdma_encode_read_list(r_xprt, req, rqst, iptr, rtype);
761 if (IS_ERR(iptr))
762 goto out_unmap;
763 iptr = rpcrdma_encode_write_list(r_xprt, req, rqst, iptr, wtype);
764 if (IS_ERR(iptr))
765 goto out_unmap;
766 iptr = rpcrdma_encode_reply_chunk(r_xprt, req, rqst, iptr, wtype);
767 if (IS_ERR(iptr))
768 goto out_unmap;
769 hdrlen = (unsigned char *)iptr - (unsigned char *)headerp;
770
771 dprintk("RPC: %5u %s: %s/%s: hdrlen %zd rpclen %zd\n",
772 rqst->rq_task->tk_pid, __func__,
773 transfertypes[rtype], transfertypes[wtype],
774 hdrlen, rpclen);
775
776 if (!rpcrdma_prepare_send_sges(&r_xprt->rx_ia, req, hdrlen,
777 &rqst->rq_snd_buf, rtype)) {
778 iptr = ERR_PTR(-EIO);
779 goto out_unmap;
780 }
781 return 0;
782
783out_unmap:
784 r_xprt->rx_ia.ri_ops->ro_unmap_safe(r_xprt, req, false);
785 return PTR_ERR(iptr);
786}
787
788/*
789 * Chase down a received write or reply chunklist to get length
790 * RDMA'd by server. See map at rpcrdma_create_chunks()! :-)
791 */
792static int
793rpcrdma_count_chunks(struct rpcrdma_rep *rep, int wrchunk, __be32 **iptrp)
794{
795 unsigned int i, total_len;
796 struct rpcrdma_write_chunk *cur_wchunk;
797 char *base = (char *)rdmab_to_msg(rep->rr_rdmabuf);
798
799 i = be32_to_cpu(**iptrp);
800 cur_wchunk = (struct rpcrdma_write_chunk *) (*iptrp + 1);
801 total_len = 0;
802 while (i--) {
803 struct rpcrdma_segment *seg = &cur_wchunk->wc_target;
804 ifdebug(FACILITY) {
805 u64 off;
806 xdr_decode_hyper((__be32 *)&seg->rs_offset, &off);
807 dprintk("RPC: %s: chunk %d@0x%016llx:0x%08x\n",
808 __func__,
809 be32_to_cpu(seg->rs_length),
810 (unsigned long long)off,
811 be32_to_cpu(seg->rs_handle));
812 }
813 total_len += be32_to_cpu(seg->rs_length);
814 ++cur_wchunk;
815 }
816 /* check and adjust for properly terminated write chunk */
817 if (wrchunk) {
818 __be32 *w = (__be32 *) cur_wchunk;
819 if (*w++ != xdr_zero)
820 return -1;
821 cur_wchunk = (struct rpcrdma_write_chunk *) w;
822 }
823 if ((char *)cur_wchunk > base + rep->rr_len)
824 return -1;
825
826 *iptrp = (__be32 *) cur_wchunk;
827 return total_len;
828}
829
830/**
831 * rpcrdma_inline_fixup - Scatter inline received data into rqst's iovecs
832 * @rqst: controlling RPC request
833 * @srcp: points to RPC message payload in receive buffer
834 * @copy_len: remaining length of receive buffer content
835 * @pad: Write chunk pad bytes needed (zero for pure inline)
836 *
837 * The upper layer has set the maximum number of bytes it can
838 * receive in each component of rq_rcv_buf. These values are set in
839 * the head.iov_len, page_len, tail.iov_len, and buflen fields.
840 *
841 * Unlike the TCP equivalent (xdr_partial_copy_from_skb), in
842 * many cases this function simply updates iov_base pointers in
843 * rq_rcv_buf to point directly to the received reply data, to
844 * avoid copying reply data.
845 *
846 * Returns the count of bytes which had to be memcopied.
847 */
848static unsigned long
849rpcrdma_inline_fixup(struct rpc_rqst *rqst, char *srcp, int copy_len, int pad)
850{
851 unsigned long fixup_copy_count;
852 int i, npages, curlen;
853 char *destp;
854 struct page **ppages;
855 int page_base;
856
857 /* The head iovec is redirected to the RPC reply message
858 * in the receive buffer, to avoid a memcopy.
859 */
860 rqst->rq_rcv_buf.head[0].iov_base = srcp;
861 rqst->rq_private_buf.head[0].iov_base = srcp;
862
863 /* The contents of the receive buffer that follow
864 * head.iov_len bytes are copied into the page list.
865 */
866 curlen = rqst->rq_rcv_buf.head[0].iov_len;
867 if (curlen > copy_len)
868 curlen = copy_len;
869 dprintk("RPC: %s: srcp 0x%p len %d hdrlen %d\n",
870 __func__, srcp, copy_len, curlen);
871 srcp += curlen;
872 copy_len -= curlen;
873
874 page_base = rqst->rq_rcv_buf.page_base;
875 ppages = rqst->rq_rcv_buf.pages + (page_base >> PAGE_SHIFT);
876 page_base &= ~PAGE_MASK;
877 fixup_copy_count = 0;
878 if (copy_len && rqst->rq_rcv_buf.page_len) {
879 int pagelist_len;
880
881 pagelist_len = rqst->rq_rcv_buf.page_len;
882 if (pagelist_len > copy_len)
883 pagelist_len = copy_len;
884 npages = PAGE_ALIGN(page_base + pagelist_len) >> PAGE_SHIFT;
885 for (i = 0; i < npages; i++) {
886 curlen = PAGE_SIZE - page_base;
887 if (curlen > pagelist_len)
888 curlen = pagelist_len;
889
890 dprintk("RPC: %s: page %d"
891 " srcp 0x%p len %d curlen %d\n",
892 __func__, i, srcp, copy_len, curlen);
893 destp = kmap_atomic(ppages[i]);
894 memcpy(destp + page_base, srcp, curlen);
895 flush_dcache_page(ppages[i]);
896 kunmap_atomic(destp);
897 srcp += curlen;
898 copy_len -= curlen;
899 fixup_copy_count += curlen;
900 pagelist_len -= curlen;
901 if (!pagelist_len)
902 break;
903 page_base = 0;
904 }
905
906 /* Implicit padding for the last segment in a Write
907 * chunk is inserted inline at the front of the tail
908 * iovec. The upper layer ignores the content of
909 * the pad. Simply ensure inline content in the tail
910 * that follows the Write chunk is properly aligned.
911 */
912 if (pad)
913 srcp -= pad;
914 }
915
916 /* The tail iovec is redirected to the remaining data
917 * in the receive buffer, to avoid a memcopy.
918 */
919 if (copy_len || pad) {
920 rqst->rq_rcv_buf.tail[0].iov_base = srcp;
921 rqst->rq_private_buf.tail[0].iov_base = srcp;
922 }
923
924 return fixup_copy_count;
925}
926
927#if defined(CONFIG_SUNRPC_BACKCHANNEL)
928/* By convention, backchannel calls arrive via rdma_msg type
929 * messages, and never populate the chunk lists. This makes
930 * the RPC/RDMA header small and fixed in size, so it is
931 * straightforward to check the RPC header's direction field.
932 */
933static bool
934rpcrdma_is_bcall(struct rpcrdma_msg *headerp)
935{
936 __be32 *p = (__be32 *)headerp;
937
938 if (headerp->rm_type != rdma_msg)
939 return false;
940 if (headerp->rm_body.rm_chunks[0] != xdr_zero)
941 return false;
942 if (headerp->rm_body.rm_chunks[1] != xdr_zero)
943 return false;
944 if (headerp->rm_body.rm_chunks[2] != xdr_zero)
945 return false;
946
947 /* sanity */
948 if (p[7] != headerp->rm_xid)
949 return false;
950 /* call direction */
951 if (p[8] != cpu_to_be32(RPC_CALL))
952 return false;
953
954 return true;
955}
956#endif /* CONFIG_SUNRPC_BACKCHANNEL */
957
958/* Process received RPC/RDMA messages.
959 *
960 * Errors must result in the RPC task either being awakened, or
961 * allowed to timeout, to discover the errors at that time.
962 */
963void
964rpcrdma_reply_handler(struct work_struct *work)
965{
966 struct rpcrdma_rep *rep =
967 container_of(work, struct rpcrdma_rep, rr_work);
968 struct rpcrdma_msg *headerp;
969 struct rpcrdma_req *req;
970 struct rpc_rqst *rqst;
971 struct rpcrdma_xprt *r_xprt = rep->rr_rxprt;
972 struct rpc_xprt *xprt = &r_xprt->rx_xprt;
973 __be32 *iptr;
974 int rdmalen, status, rmerr;
975 unsigned long cwnd;
976
977 dprintk("RPC: %s: incoming rep %p\n", __func__, rep);
978
979 if (rep->rr_len == RPCRDMA_BAD_LEN)
980 goto out_badstatus;
981 if (rep->rr_len < RPCRDMA_HDRLEN_ERR)
982 goto out_shortreply;
983
984 headerp = rdmab_to_msg(rep->rr_rdmabuf);
985#if defined(CONFIG_SUNRPC_BACKCHANNEL)
986 if (rpcrdma_is_bcall(headerp))
987 goto out_bcall;
988#endif
989
990 /* Match incoming rpcrdma_rep to an rpcrdma_req to
991 * get context for handling any incoming chunks.
992 */
993 spin_lock_bh(&xprt->transport_lock);
994 rqst = xprt_lookup_rqst(xprt, headerp->rm_xid);
995 if (!rqst)
996 goto out_nomatch;
997
998 req = rpcr_to_rdmar(rqst);
999 if (req->rl_reply)
1000 goto out_duplicate;
1001
1002 /* Sanity checking has passed. We are now committed
1003 * to complete this transaction.
1004 */
1005 list_del_init(&rqst->rq_list);
1006 spin_unlock_bh(&xprt->transport_lock);
1007 dprintk("RPC: %s: reply %p completes request %p (xid 0x%08x)\n",
1008 __func__, rep, req, be32_to_cpu(headerp->rm_xid));
1009
1010 /* from here on, the reply is no longer an orphan */
1011 req->rl_reply = rep;
1012 xprt->reestablish_timeout = 0;
1013
1014 if (headerp->rm_vers != rpcrdma_version)
1015 goto out_badversion;
1016
1017 /* check for expected message types */
1018 /* The order of some of these tests is important. */
1019 switch (headerp->rm_type) {
1020 case rdma_msg:
1021 /* never expect read chunks */
1022 /* never expect reply chunks (two ways to check) */
1023 /* never expect write chunks without having offered RDMA */
1024 if (headerp->rm_body.rm_chunks[0] != xdr_zero ||
1025 (headerp->rm_body.rm_chunks[1] == xdr_zero &&
1026 headerp->rm_body.rm_chunks[2] != xdr_zero) ||
1027 (headerp->rm_body.rm_chunks[1] != xdr_zero &&
1028 list_empty(&req->rl_registered)))
1029 goto badheader;
1030 if (headerp->rm_body.rm_chunks[1] != xdr_zero) {
1031 /* count any expected write chunks in read reply */
1032 /* start at write chunk array count */
1033 iptr = &headerp->rm_body.rm_chunks[2];
1034 rdmalen = rpcrdma_count_chunks(rep, 1, &iptr);
1035 /* check for validity, and no reply chunk after */
1036 if (rdmalen < 0 || *iptr++ != xdr_zero)
1037 goto badheader;
1038 rep->rr_len -=
1039 ((unsigned char *)iptr - (unsigned char *)headerp);
1040 status = rep->rr_len + rdmalen;
1041 r_xprt->rx_stats.total_rdma_reply += rdmalen;
1042 /* special case - last chunk may omit padding */
1043 if (rdmalen &= 3) {
1044 rdmalen = 4 - rdmalen;
1045 status += rdmalen;
1046 }
1047 } else {
1048 /* else ordinary inline */
1049 rdmalen = 0;
1050 iptr = (__be32 *)((unsigned char *)headerp +
1051 RPCRDMA_HDRLEN_MIN);
1052 rep->rr_len -= RPCRDMA_HDRLEN_MIN;
1053 status = rep->rr_len;
1054 }
1055
1056 r_xprt->rx_stats.fixup_copy_count +=
1057 rpcrdma_inline_fixup(rqst, (char *)iptr, rep->rr_len,
1058 rdmalen);
1059 break;
1060
1061 case rdma_nomsg:
1062 /* never expect read or write chunks, always reply chunks */
1063 if (headerp->rm_body.rm_chunks[0] != xdr_zero ||
1064 headerp->rm_body.rm_chunks[1] != xdr_zero ||
1065 headerp->rm_body.rm_chunks[2] != xdr_one ||
1066 list_empty(&req->rl_registered))
1067 goto badheader;
1068 iptr = (__be32 *)((unsigned char *)headerp +
1069 RPCRDMA_HDRLEN_MIN);
1070 rdmalen = rpcrdma_count_chunks(rep, 0, &iptr);
1071 if (rdmalen < 0)
1072 goto badheader;
1073 r_xprt->rx_stats.total_rdma_reply += rdmalen;
1074 /* Reply chunk buffer already is the reply vector - no fixup. */
1075 status = rdmalen;
1076 break;
1077
1078 case rdma_error:
1079 goto out_rdmaerr;
1080
1081badheader:
1082 default:
1083 dprintk("RPC: %5u %s: invalid rpcrdma reply (type %u)\n",
1084 rqst->rq_task->tk_pid, __func__,
1085 be32_to_cpu(headerp->rm_type));
1086 status = -EIO;
1087 r_xprt->rx_stats.bad_reply_count++;
1088 break;
1089 }
1090
1091out:
1092 /* Invalidate and flush the data payloads before waking the
1093 * waiting application. This guarantees the memory region is
1094 * properly fenced from the server before the application
1095 * accesses the data. It also ensures proper send flow
1096 * control: waking the next RPC waits until this RPC has
1097 * relinquished all its Send Queue entries.
1098 */
1099 if (!list_empty(&req->rl_registered))
1100 r_xprt->rx_ia.ri_ops->ro_unmap_sync(r_xprt, req);
1101
1102 spin_lock_bh(&xprt->transport_lock);
1103 cwnd = xprt->cwnd;
1104 xprt->cwnd = atomic_read(&r_xprt->rx_buf.rb_credits) << RPC_CWNDSHIFT;
1105 if (xprt->cwnd > cwnd)
1106 xprt_release_rqst_cong(rqst->rq_task);
1107
1108 xprt_complete_rqst(rqst->rq_task, status);
1109 spin_unlock_bh(&xprt->transport_lock);
1110 dprintk("RPC: %s: xprt_complete_rqst(0x%p, 0x%p, %d)\n",
1111 __func__, xprt, rqst, status);
1112 return;
1113
1114out_badstatus:
1115 rpcrdma_recv_buffer_put(rep);
1116 if (r_xprt->rx_ep.rep_connected == 1) {
1117 r_xprt->rx_ep.rep_connected = -EIO;
1118 rpcrdma_conn_func(&r_xprt->rx_ep);
1119 }
1120 return;
1121
1122#if defined(CONFIG_SUNRPC_BACKCHANNEL)
1123out_bcall:
1124 rpcrdma_bc_receive_call(r_xprt, rep);
1125 return;
1126#endif
1127
1128/* If the incoming reply terminated a pending RPC, the next
1129 * RPC call will post a replacement receive buffer as it is
1130 * being marshaled.
1131 */
1132out_badversion:
1133 dprintk("RPC: %s: invalid version %d\n",
1134 __func__, be32_to_cpu(headerp->rm_vers));
1135 status = -EIO;
1136 r_xprt->rx_stats.bad_reply_count++;
1137 goto out;
1138
1139out_rdmaerr:
1140 rmerr = be32_to_cpu(headerp->rm_body.rm_error.rm_err);
1141 switch (rmerr) {
1142 case ERR_VERS:
1143 pr_err("%s: server reports header version error (%u-%u)\n",
1144 __func__,
1145 be32_to_cpu(headerp->rm_body.rm_error.rm_vers_low),
1146 be32_to_cpu(headerp->rm_body.rm_error.rm_vers_high));
1147 break;
1148 case ERR_CHUNK:
1149 pr_err("%s: server reports header decoding error\n",
1150 __func__);
1151 break;
1152 default:
1153 pr_err("%s: server reports unknown error %d\n",
1154 __func__, rmerr);
1155 }
1156 status = -EREMOTEIO;
1157 r_xprt->rx_stats.bad_reply_count++;
1158 goto out;
1159
1160/* If no pending RPC transaction was matched, post a replacement
1161 * receive buffer before returning.
1162 */
1163out_shortreply:
1164 dprintk("RPC: %s: short/invalid reply\n", __func__);
1165 goto repost;
1166
1167out_nomatch:
1168 spin_unlock_bh(&xprt->transport_lock);
1169 dprintk("RPC: %s: no match for incoming xid 0x%08x len %d\n",
1170 __func__, be32_to_cpu(headerp->rm_xid),
1171 rep->rr_len);
1172 goto repost;
1173
1174out_duplicate:
1175 spin_unlock_bh(&xprt->transport_lock);
1176 dprintk("RPC: %s: "
1177 "duplicate reply %p to RPC request %p: xid 0x%08x\n",
1178 __func__, rep, req, be32_to_cpu(headerp->rm_xid));
1179
1180repost:
1181 r_xprt->rx_stats.bad_reply_count++;
1182 if (rpcrdma_ep_post_recv(&r_xprt->rx_ia, rep))
1183 rpcrdma_recv_buffer_put(rep);
1184}
1/*
2 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the BSD-type
8 * license below:
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 *
14 * Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 *
17 * Redistributions in binary form must reproduce the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer in the documentation and/or other materials provided
20 * with the distribution.
21 *
22 * Neither the name of the Network Appliance, Inc. nor the names of
23 * its contributors may be used to endorse or promote products
24 * derived from this software without specific prior written
25 * permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
31 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
32 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
33 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
34 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
35 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
36 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
37 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38 */
39
40/*
41 * rpc_rdma.c
42 *
43 * This file contains the guts of the RPC RDMA protocol, and
44 * does marshaling/unmarshaling, etc. It is also where interfacing
45 * to the Linux RPC framework lives.
46 */
47
48#include "xprt_rdma.h"
49
50#include <linux/highmem.h>
51
52#ifdef RPC_DEBUG
53# define RPCDBG_FACILITY RPCDBG_TRANS
54#endif
55
56enum rpcrdma_chunktype {
57 rpcrdma_noch = 0,
58 rpcrdma_readch,
59 rpcrdma_areadch,
60 rpcrdma_writech,
61 rpcrdma_replych
62};
63
64#ifdef RPC_DEBUG
65static const char transfertypes[][12] = {
66 "pure inline", /* no chunks */
67 " read chunk", /* some argument via rdma read */
68 "*read chunk", /* entire request via rdma read */
69 "write chunk", /* some result via rdma write */
70 "reply chunk" /* entire reply via rdma write */
71};
72#endif
73
74/*
75 * Chunk assembly from upper layer xdr_buf.
76 *
77 * Prepare the passed-in xdr_buf into representation as RPC/RDMA chunk
78 * elements. Segments are then coalesced when registered, if possible
79 * within the selected memreg mode.
80 *
81 * Note, this routine is never called if the connection's memory
82 * registration strategy is 0 (bounce buffers).
83 */
84
85static int
86rpcrdma_convert_iovs(struct xdr_buf *xdrbuf, unsigned int pos,
87 enum rpcrdma_chunktype type, struct rpcrdma_mr_seg *seg, int nsegs)
88{
89 int len, n = 0, p;
90 int page_base;
91 struct page **ppages;
92
93 if (pos == 0 && xdrbuf->head[0].iov_len) {
94 seg[n].mr_page = NULL;
95 seg[n].mr_offset = xdrbuf->head[0].iov_base;
96 seg[n].mr_len = xdrbuf->head[0].iov_len;
97 ++n;
98 }
99
100 len = xdrbuf->page_len;
101 ppages = xdrbuf->pages + (xdrbuf->page_base >> PAGE_SHIFT);
102 page_base = xdrbuf->page_base & ~PAGE_MASK;
103 p = 0;
104 while (len && n < nsegs) {
105 seg[n].mr_page = ppages[p];
106 seg[n].mr_offset = (void *)(unsigned long) page_base;
107 seg[n].mr_len = min_t(u32, PAGE_SIZE - page_base, len);
108 BUG_ON(seg[n].mr_len > PAGE_SIZE);
109 len -= seg[n].mr_len;
110 ++n;
111 ++p;
112 page_base = 0; /* page offset only applies to first page */
113 }
114
115 /* Message overflows the seg array */
116 if (len && n == nsegs)
117 return 0;
118
119 if (xdrbuf->tail[0].iov_len) {
120 /* the rpcrdma protocol allows us to omit any trailing
121 * xdr pad bytes, saving the server an RDMA operation. */
122 if (xdrbuf->tail[0].iov_len < 4 && xprt_rdma_pad_optimize)
123 return n;
124 if (n == nsegs)
125 /* Tail remains, but we're out of segments */
126 return 0;
127 seg[n].mr_page = NULL;
128 seg[n].mr_offset = xdrbuf->tail[0].iov_base;
129 seg[n].mr_len = xdrbuf->tail[0].iov_len;
130 ++n;
131 }
132
133 return n;
134}
135
136/*
137 * Create read/write chunk lists, and reply chunks, for RDMA
138 *
139 * Assume check against THRESHOLD has been done, and chunks are required.
140 * Assume only encoding one list entry for read|write chunks. The NFSv3
141 * protocol is simple enough to allow this as it only has a single "bulk
142 * result" in each procedure - complicated NFSv4 COMPOUNDs are not. (The
143 * RDMA/Sessions NFSv4 proposal addresses this for future v4 revs.)
144 *
145 * When used for a single reply chunk (which is a special write
146 * chunk used for the entire reply, rather than just the data), it
147 * is used primarily for READDIR and READLINK which would otherwise
148 * be severely size-limited by a small rdma inline read max. The server
149 * response will come back as an RDMA Write, followed by a message
150 * of type RDMA_NOMSG carrying the xid and length. As a result, reply
151 * chunks do not provide data alignment, however they do not require
152 * "fixup" (moving the response to the upper layer buffer) either.
153 *
154 * Encoding key for single-list chunks (HLOO = Handle32 Length32 Offset64):
155 *
156 * Read chunklist (a linked list):
157 * N elements, position P (same P for all chunks of same arg!):
158 * 1 - PHLOO - 1 - PHLOO - ... - 1 - PHLOO - 0
159 *
160 * Write chunklist (a list of (one) counted array):
161 * N elements:
162 * 1 - N - HLOO - HLOO - ... - HLOO - 0
163 *
164 * Reply chunk (a counted array):
165 * N elements:
166 * 1 - N - HLOO - HLOO - ... - HLOO
167 */
168
169static unsigned int
170rpcrdma_create_chunks(struct rpc_rqst *rqst, struct xdr_buf *target,
171 struct rpcrdma_msg *headerp, enum rpcrdma_chunktype type)
172{
173 struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
174 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(rqst->rq_task->tk_xprt);
175 int nsegs, nchunks = 0;
176 unsigned int pos;
177 struct rpcrdma_mr_seg *seg = req->rl_segments;
178 struct rpcrdma_read_chunk *cur_rchunk = NULL;
179 struct rpcrdma_write_array *warray = NULL;
180 struct rpcrdma_write_chunk *cur_wchunk = NULL;
181 __be32 *iptr = headerp->rm_body.rm_chunks;
182
183 if (type == rpcrdma_readch || type == rpcrdma_areadch) {
184 /* a read chunk - server will RDMA Read our memory */
185 cur_rchunk = (struct rpcrdma_read_chunk *) iptr;
186 } else {
187 /* a write or reply chunk - server will RDMA Write our memory */
188 *iptr++ = xdr_zero; /* encode a NULL read chunk list */
189 if (type == rpcrdma_replych)
190 *iptr++ = xdr_zero; /* a NULL write chunk list */
191 warray = (struct rpcrdma_write_array *) iptr;
192 cur_wchunk = (struct rpcrdma_write_chunk *) (warray + 1);
193 }
194
195 if (type == rpcrdma_replych || type == rpcrdma_areadch)
196 pos = 0;
197 else
198 pos = target->head[0].iov_len;
199
200 nsegs = rpcrdma_convert_iovs(target, pos, type, seg, RPCRDMA_MAX_SEGS);
201 if (nsegs == 0)
202 return 0;
203
204 do {
205 /* bind/register the memory, then build chunk from result. */
206 int n = rpcrdma_register_external(seg, nsegs,
207 cur_wchunk != NULL, r_xprt);
208 if (n <= 0)
209 goto out;
210 if (cur_rchunk) { /* read */
211 cur_rchunk->rc_discrim = xdr_one;
212 /* all read chunks have the same "position" */
213 cur_rchunk->rc_position = htonl(pos);
214 cur_rchunk->rc_target.rs_handle = htonl(seg->mr_rkey);
215 cur_rchunk->rc_target.rs_length = htonl(seg->mr_len);
216 xdr_encode_hyper(
217 (__be32 *)&cur_rchunk->rc_target.rs_offset,
218 seg->mr_base);
219 dprintk("RPC: %s: read chunk "
220 "elem %d@0x%llx:0x%x pos %u (%s)\n", __func__,
221 seg->mr_len, (unsigned long long)seg->mr_base,
222 seg->mr_rkey, pos, n < nsegs ? "more" : "last");
223 cur_rchunk++;
224 r_xprt->rx_stats.read_chunk_count++;
225 } else { /* write/reply */
226 cur_wchunk->wc_target.rs_handle = htonl(seg->mr_rkey);
227 cur_wchunk->wc_target.rs_length = htonl(seg->mr_len);
228 xdr_encode_hyper(
229 (__be32 *)&cur_wchunk->wc_target.rs_offset,
230 seg->mr_base);
231 dprintk("RPC: %s: %s chunk "
232 "elem %d@0x%llx:0x%x (%s)\n", __func__,
233 (type == rpcrdma_replych) ? "reply" : "write",
234 seg->mr_len, (unsigned long long)seg->mr_base,
235 seg->mr_rkey, n < nsegs ? "more" : "last");
236 cur_wchunk++;
237 if (type == rpcrdma_replych)
238 r_xprt->rx_stats.reply_chunk_count++;
239 else
240 r_xprt->rx_stats.write_chunk_count++;
241 r_xprt->rx_stats.total_rdma_request += seg->mr_len;
242 }
243 nchunks++;
244 seg += n;
245 nsegs -= n;
246 } while (nsegs);
247
248 /* success. all failures return above */
249 req->rl_nchunks = nchunks;
250
251 BUG_ON(nchunks == 0);
252 BUG_ON((r_xprt->rx_ia.ri_memreg_strategy == RPCRDMA_FRMR)
253 && (nchunks > 3));
254
255 /*
256 * finish off header. If write, marshal discrim and nchunks.
257 */
258 if (cur_rchunk) {
259 iptr = (__be32 *) cur_rchunk;
260 *iptr++ = xdr_zero; /* finish the read chunk list */
261 *iptr++ = xdr_zero; /* encode a NULL write chunk list */
262 *iptr++ = xdr_zero; /* encode a NULL reply chunk */
263 } else {
264 warray->wc_discrim = xdr_one;
265 warray->wc_nchunks = htonl(nchunks);
266 iptr = (__be32 *) cur_wchunk;
267 if (type == rpcrdma_writech) {
268 *iptr++ = xdr_zero; /* finish the write chunk list */
269 *iptr++ = xdr_zero; /* encode a NULL reply chunk */
270 }
271 }
272
273 /*
274 * Return header size.
275 */
276 return (unsigned char *)iptr - (unsigned char *)headerp;
277
278out:
279 for (pos = 0; nchunks--;)
280 pos += rpcrdma_deregister_external(
281 &req->rl_segments[pos], r_xprt, NULL);
282 return 0;
283}
284
285/*
286 * Copy write data inline.
287 * This function is used for "small" requests. Data which is passed
288 * to RPC via iovecs (or page list) is copied directly into the
289 * pre-registered memory buffer for this request. For small amounts
290 * of data, this is efficient. The cutoff value is tunable.
291 */
292static int
293rpcrdma_inline_pullup(struct rpc_rqst *rqst, int pad)
294{
295 int i, npages, curlen;
296 int copy_len;
297 unsigned char *srcp, *destp;
298 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(rqst->rq_xprt);
299 int page_base;
300 struct page **ppages;
301
302 destp = rqst->rq_svec[0].iov_base;
303 curlen = rqst->rq_svec[0].iov_len;
304 destp += curlen;
305 /*
306 * Do optional padding where it makes sense. Alignment of write
307 * payload can help the server, if our setting is accurate.
308 */
309 pad -= (curlen + 36/*sizeof(struct rpcrdma_msg_padded)*/);
310 if (pad < 0 || rqst->rq_slen - curlen < RPCRDMA_INLINE_PAD_THRESH)
311 pad = 0; /* don't pad this request */
312
313 dprintk("RPC: %s: pad %d destp 0x%p len %d hdrlen %d\n",
314 __func__, pad, destp, rqst->rq_slen, curlen);
315
316 copy_len = rqst->rq_snd_buf.page_len;
317
318 if (rqst->rq_snd_buf.tail[0].iov_len) {
319 curlen = rqst->rq_snd_buf.tail[0].iov_len;
320 if (destp + copy_len != rqst->rq_snd_buf.tail[0].iov_base) {
321 memmove(destp + copy_len,
322 rqst->rq_snd_buf.tail[0].iov_base, curlen);
323 r_xprt->rx_stats.pullup_copy_count += curlen;
324 }
325 dprintk("RPC: %s: tail destp 0x%p len %d\n",
326 __func__, destp + copy_len, curlen);
327 rqst->rq_svec[0].iov_len += curlen;
328 }
329 r_xprt->rx_stats.pullup_copy_count += copy_len;
330
331 page_base = rqst->rq_snd_buf.page_base;
332 ppages = rqst->rq_snd_buf.pages + (page_base >> PAGE_SHIFT);
333 page_base &= ~PAGE_MASK;
334 npages = PAGE_ALIGN(page_base+copy_len) >> PAGE_SHIFT;
335 for (i = 0; copy_len && i < npages; i++) {
336 curlen = PAGE_SIZE - page_base;
337 if (curlen > copy_len)
338 curlen = copy_len;
339 dprintk("RPC: %s: page %d destp 0x%p len %d curlen %d\n",
340 __func__, i, destp, copy_len, curlen);
341 srcp = kmap_atomic(ppages[i], KM_SKB_SUNRPC_DATA);
342 memcpy(destp, srcp+page_base, curlen);
343 kunmap_atomic(srcp, KM_SKB_SUNRPC_DATA);
344 rqst->rq_svec[0].iov_len += curlen;
345 destp += curlen;
346 copy_len -= curlen;
347 page_base = 0;
348 }
349 /* header now contains entire send message */
350 return pad;
351}
352
353/*
354 * Marshal a request: the primary job of this routine is to choose
355 * the transfer modes. See comments below.
356 *
357 * Uses multiple RDMA IOVs for a request:
358 * [0] -- RPC RDMA header, which uses memory from the *start* of the
359 * preregistered buffer that already holds the RPC data in
360 * its middle.
361 * [1] -- the RPC header/data, marshaled by RPC and the NFS protocol.
362 * [2] -- optional padding.
363 * [3] -- if padded, header only in [1] and data here.
364 */
365
366int
367rpcrdma_marshal_req(struct rpc_rqst *rqst)
368{
369 struct rpc_xprt *xprt = rqst->rq_task->tk_xprt;
370 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
371 struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
372 char *base;
373 size_t hdrlen, rpclen, padlen;
374 enum rpcrdma_chunktype rtype, wtype;
375 struct rpcrdma_msg *headerp;
376
377 /*
378 * rpclen gets amount of data in first buffer, which is the
379 * pre-registered buffer.
380 */
381 base = rqst->rq_svec[0].iov_base;
382 rpclen = rqst->rq_svec[0].iov_len;
383
384 /* build RDMA header in private area at front */
385 headerp = (struct rpcrdma_msg *) req->rl_base;
386 /* don't htonl XID, it's already done in request */
387 headerp->rm_xid = rqst->rq_xid;
388 headerp->rm_vers = xdr_one;
389 headerp->rm_credit = htonl(r_xprt->rx_buf.rb_max_requests);
390 headerp->rm_type = htonl(RDMA_MSG);
391
392 /*
393 * Chunks needed for results?
394 *
395 * o If the expected result is under the inline threshold, all ops
396 * return as inline (but see later).
397 * o Large non-read ops return as a single reply chunk.
398 * o Large read ops return data as write chunk(s), header as inline.
399 *
400 * Note: the NFS code sending down multiple result segments implies
401 * the op is one of read, readdir[plus], readlink or NFSv4 getacl.
402 */
403
404 /*
405 * This code can handle read chunks, write chunks OR reply
406 * chunks -- only one type. If the request is too big to fit
407 * inline, then we will choose read chunks. If the request is
408 * a READ, then use write chunks to separate the file data
409 * into pages; otherwise use reply chunks.
410 */
411 if (rqst->rq_rcv_buf.buflen <= RPCRDMA_INLINE_READ_THRESHOLD(rqst))
412 wtype = rpcrdma_noch;
413 else if (rqst->rq_rcv_buf.page_len == 0)
414 wtype = rpcrdma_replych;
415 else if (rqst->rq_rcv_buf.flags & XDRBUF_READ)
416 wtype = rpcrdma_writech;
417 else
418 wtype = rpcrdma_replych;
419
420 /*
421 * Chunks needed for arguments?
422 *
423 * o If the total request is under the inline threshold, all ops
424 * are sent as inline.
425 * o Large non-write ops are sent with the entire message as a
426 * single read chunk (protocol 0-position special case).
427 * o Large write ops transmit data as read chunk(s), header as
428 * inline.
429 *
430 * Note: the NFS code sending down multiple argument segments
431 * implies the op is a write.
432 * TBD check NFSv4 setacl
433 */
434 if (rqst->rq_snd_buf.len <= RPCRDMA_INLINE_WRITE_THRESHOLD(rqst))
435 rtype = rpcrdma_noch;
436 else if (rqst->rq_snd_buf.page_len == 0)
437 rtype = rpcrdma_areadch;
438 else
439 rtype = rpcrdma_readch;
440
441 /* The following simplification is not true forever */
442 if (rtype != rpcrdma_noch && wtype == rpcrdma_replych)
443 wtype = rpcrdma_noch;
444 BUG_ON(rtype != rpcrdma_noch && wtype != rpcrdma_noch);
445
446 if (r_xprt->rx_ia.ri_memreg_strategy == RPCRDMA_BOUNCEBUFFERS &&
447 (rtype != rpcrdma_noch || wtype != rpcrdma_noch)) {
448 /* forced to "pure inline"? */
449 dprintk("RPC: %s: too much data (%d/%d) for inline\n",
450 __func__, rqst->rq_rcv_buf.len, rqst->rq_snd_buf.len);
451 return -1;
452 }
453
454 hdrlen = 28; /*sizeof *headerp;*/
455 padlen = 0;
456
457 /*
458 * Pull up any extra send data into the preregistered buffer.
459 * When padding is in use and applies to the transfer, insert
460 * it and change the message type.
461 */
462 if (rtype == rpcrdma_noch) {
463
464 padlen = rpcrdma_inline_pullup(rqst,
465 RPCRDMA_INLINE_PAD_VALUE(rqst));
466
467 if (padlen) {
468 headerp->rm_type = htonl(RDMA_MSGP);
469 headerp->rm_body.rm_padded.rm_align =
470 htonl(RPCRDMA_INLINE_PAD_VALUE(rqst));
471 headerp->rm_body.rm_padded.rm_thresh =
472 htonl(RPCRDMA_INLINE_PAD_THRESH);
473 headerp->rm_body.rm_padded.rm_pempty[0] = xdr_zero;
474 headerp->rm_body.rm_padded.rm_pempty[1] = xdr_zero;
475 headerp->rm_body.rm_padded.rm_pempty[2] = xdr_zero;
476 hdrlen += 2 * sizeof(u32); /* extra words in padhdr */
477 BUG_ON(wtype != rpcrdma_noch);
478
479 } else {
480 headerp->rm_body.rm_nochunks.rm_empty[0] = xdr_zero;
481 headerp->rm_body.rm_nochunks.rm_empty[1] = xdr_zero;
482 headerp->rm_body.rm_nochunks.rm_empty[2] = xdr_zero;
483 /* new length after pullup */
484 rpclen = rqst->rq_svec[0].iov_len;
485 /*
486 * Currently we try to not actually use read inline.
487 * Reply chunks have the desirable property that
488 * they land, packed, directly in the target buffers
489 * without headers, so they require no fixup. The
490 * additional RDMA Write op sends the same amount
491 * of data, streams on-the-wire and adds no overhead
492 * on receive. Therefore, we request a reply chunk
493 * for non-writes wherever feasible and efficient.
494 */
495 if (wtype == rpcrdma_noch &&
496 r_xprt->rx_ia.ri_memreg_strategy > RPCRDMA_REGISTER)
497 wtype = rpcrdma_replych;
498 }
499 }
500
501 /*
502 * Marshal chunks. This routine will return the header length
503 * consumed by marshaling.
504 */
505 if (rtype != rpcrdma_noch) {
506 hdrlen = rpcrdma_create_chunks(rqst,
507 &rqst->rq_snd_buf, headerp, rtype);
508 wtype = rtype; /* simplify dprintk */
509
510 } else if (wtype != rpcrdma_noch) {
511 hdrlen = rpcrdma_create_chunks(rqst,
512 &rqst->rq_rcv_buf, headerp, wtype);
513 }
514
515 if (hdrlen == 0)
516 return -1;
517
518 dprintk("RPC: %s: %s: hdrlen %zd rpclen %zd padlen %zd"
519 " headerp 0x%p base 0x%p lkey 0x%x\n",
520 __func__, transfertypes[wtype], hdrlen, rpclen, padlen,
521 headerp, base, req->rl_iov.lkey);
522
523 /*
524 * initialize send_iov's - normally only two: rdma chunk header and
525 * single preregistered RPC header buffer, but if padding is present,
526 * then use a preregistered (and zeroed) pad buffer between the RPC
527 * header and any write data. In all non-rdma cases, any following
528 * data has been copied into the RPC header buffer.
529 */
530 req->rl_send_iov[0].addr = req->rl_iov.addr;
531 req->rl_send_iov[0].length = hdrlen;
532 req->rl_send_iov[0].lkey = req->rl_iov.lkey;
533
534 req->rl_send_iov[1].addr = req->rl_iov.addr + (base - req->rl_base);
535 req->rl_send_iov[1].length = rpclen;
536 req->rl_send_iov[1].lkey = req->rl_iov.lkey;
537
538 req->rl_niovs = 2;
539
540 if (padlen) {
541 struct rpcrdma_ep *ep = &r_xprt->rx_ep;
542
543 req->rl_send_iov[2].addr = ep->rep_pad.addr;
544 req->rl_send_iov[2].length = padlen;
545 req->rl_send_iov[2].lkey = ep->rep_pad.lkey;
546
547 req->rl_send_iov[3].addr = req->rl_send_iov[1].addr + rpclen;
548 req->rl_send_iov[3].length = rqst->rq_slen - rpclen;
549 req->rl_send_iov[3].lkey = req->rl_iov.lkey;
550
551 req->rl_niovs = 4;
552 }
553
554 return 0;
555}
556
557/*
558 * Chase down a received write or reply chunklist to get length
559 * RDMA'd by server. See map at rpcrdma_create_chunks()! :-)
560 */
561static int
562rpcrdma_count_chunks(struct rpcrdma_rep *rep, unsigned int max, int wrchunk, __be32 **iptrp)
563{
564 unsigned int i, total_len;
565 struct rpcrdma_write_chunk *cur_wchunk;
566
567 i = ntohl(**iptrp); /* get array count */
568 if (i > max)
569 return -1;
570 cur_wchunk = (struct rpcrdma_write_chunk *) (*iptrp + 1);
571 total_len = 0;
572 while (i--) {
573 struct rpcrdma_segment *seg = &cur_wchunk->wc_target;
574 ifdebug(FACILITY) {
575 u64 off;
576 xdr_decode_hyper((__be32 *)&seg->rs_offset, &off);
577 dprintk("RPC: %s: chunk %d@0x%llx:0x%x\n",
578 __func__,
579 ntohl(seg->rs_length),
580 (unsigned long long)off,
581 ntohl(seg->rs_handle));
582 }
583 total_len += ntohl(seg->rs_length);
584 ++cur_wchunk;
585 }
586 /* check and adjust for properly terminated write chunk */
587 if (wrchunk) {
588 __be32 *w = (__be32 *) cur_wchunk;
589 if (*w++ != xdr_zero)
590 return -1;
591 cur_wchunk = (struct rpcrdma_write_chunk *) w;
592 }
593 if ((char *) cur_wchunk > rep->rr_base + rep->rr_len)
594 return -1;
595
596 *iptrp = (__be32 *) cur_wchunk;
597 return total_len;
598}
599
600/*
601 * Scatter inline received data back into provided iov's.
602 */
603static void
604rpcrdma_inline_fixup(struct rpc_rqst *rqst, char *srcp, int copy_len, int pad)
605{
606 int i, npages, curlen, olen;
607 char *destp;
608 struct page **ppages;
609 int page_base;
610
611 curlen = rqst->rq_rcv_buf.head[0].iov_len;
612 if (curlen > copy_len) { /* write chunk header fixup */
613 curlen = copy_len;
614 rqst->rq_rcv_buf.head[0].iov_len = curlen;
615 }
616
617 dprintk("RPC: %s: srcp 0x%p len %d hdrlen %d\n",
618 __func__, srcp, copy_len, curlen);
619
620 /* Shift pointer for first receive segment only */
621 rqst->rq_rcv_buf.head[0].iov_base = srcp;
622 srcp += curlen;
623 copy_len -= curlen;
624
625 olen = copy_len;
626 i = 0;
627 rpcx_to_rdmax(rqst->rq_xprt)->rx_stats.fixup_copy_count += olen;
628 page_base = rqst->rq_rcv_buf.page_base;
629 ppages = rqst->rq_rcv_buf.pages + (page_base >> PAGE_SHIFT);
630 page_base &= ~PAGE_MASK;
631
632 if (copy_len && rqst->rq_rcv_buf.page_len) {
633 npages = PAGE_ALIGN(page_base +
634 rqst->rq_rcv_buf.page_len) >> PAGE_SHIFT;
635 for (; i < npages; i++) {
636 curlen = PAGE_SIZE - page_base;
637 if (curlen > copy_len)
638 curlen = copy_len;
639 dprintk("RPC: %s: page %d"
640 " srcp 0x%p len %d curlen %d\n",
641 __func__, i, srcp, copy_len, curlen);
642 destp = kmap_atomic(ppages[i], KM_SKB_SUNRPC_DATA);
643 memcpy(destp + page_base, srcp, curlen);
644 flush_dcache_page(ppages[i]);
645 kunmap_atomic(destp, KM_SKB_SUNRPC_DATA);
646 srcp += curlen;
647 copy_len -= curlen;
648 if (copy_len == 0)
649 break;
650 page_base = 0;
651 }
652 rqst->rq_rcv_buf.page_len = olen - copy_len;
653 } else
654 rqst->rq_rcv_buf.page_len = 0;
655
656 if (copy_len && rqst->rq_rcv_buf.tail[0].iov_len) {
657 curlen = copy_len;
658 if (curlen > rqst->rq_rcv_buf.tail[0].iov_len)
659 curlen = rqst->rq_rcv_buf.tail[0].iov_len;
660 if (rqst->rq_rcv_buf.tail[0].iov_base != srcp)
661 memmove(rqst->rq_rcv_buf.tail[0].iov_base, srcp, curlen);
662 dprintk("RPC: %s: tail srcp 0x%p len %d curlen %d\n",
663 __func__, srcp, copy_len, curlen);
664 rqst->rq_rcv_buf.tail[0].iov_len = curlen;
665 copy_len -= curlen; ++i;
666 } else
667 rqst->rq_rcv_buf.tail[0].iov_len = 0;
668
669 if (pad) {
670 /* implicit padding on terminal chunk */
671 unsigned char *p = rqst->rq_rcv_buf.tail[0].iov_base;
672 while (pad--)
673 p[rqst->rq_rcv_buf.tail[0].iov_len++] = 0;
674 }
675
676 if (copy_len)
677 dprintk("RPC: %s: %d bytes in"
678 " %d extra segments (%d lost)\n",
679 __func__, olen, i, copy_len);
680
681 /* TBD avoid a warning from call_decode() */
682 rqst->rq_private_buf = rqst->rq_rcv_buf;
683}
684
685/*
686 * This function is called when an async event is posted to
687 * the connection which changes the connection state. All it
688 * does at this point is mark the connection up/down, the rpc
689 * timers do the rest.
690 */
691void
692rpcrdma_conn_func(struct rpcrdma_ep *ep)
693{
694 struct rpc_xprt *xprt = ep->rep_xprt;
695
696 spin_lock_bh(&xprt->transport_lock);
697 if (++xprt->connect_cookie == 0) /* maintain a reserved value */
698 ++xprt->connect_cookie;
699 if (ep->rep_connected > 0) {
700 if (!xprt_test_and_set_connected(xprt))
701 xprt_wake_pending_tasks(xprt, 0);
702 } else {
703 if (xprt_test_and_clear_connected(xprt))
704 xprt_wake_pending_tasks(xprt, -ENOTCONN);
705 }
706 spin_unlock_bh(&xprt->transport_lock);
707}
708
709/*
710 * This function is called when memory window unbind which we are waiting
711 * for completes. Just use rr_func (zeroed by upcall) to signal completion.
712 */
713static void
714rpcrdma_unbind_func(struct rpcrdma_rep *rep)
715{
716 wake_up(&rep->rr_unbind);
717}
718
719/*
720 * Called as a tasklet to do req/reply match and complete a request
721 * Errors must result in the RPC task either being awakened, or
722 * allowed to timeout, to discover the errors at that time.
723 */
724void
725rpcrdma_reply_handler(struct rpcrdma_rep *rep)
726{
727 struct rpcrdma_msg *headerp;
728 struct rpcrdma_req *req;
729 struct rpc_rqst *rqst;
730 struct rpc_xprt *xprt = rep->rr_xprt;
731 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
732 __be32 *iptr;
733 int i, rdmalen, status;
734
735 /* Check status. If bad, signal disconnect and return rep to pool */
736 if (rep->rr_len == ~0U) {
737 rpcrdma_recv_buffer_put(rep);
738 if (r_xprt->rx_ep.rep_connected == 1) {
739 r_xprt->rx_ep.rep_connected = -EIO;
740 rpcrdma_conn_func(&r_xprt->rx_ep);
741 }
742 return;
743 }
744 if (rep->rr_len < 28) {
745 dprintk("RPC: %s: short/invalid reply\n", __func__);
746 goto repost;
747 }
748 headerp = (struct rpcrdma_msg *) rep->rr_base;
749 if (headerp->rm_vers != xdr_one) {
750 dprintk("RPC: %s: invalid version %d\n",
751 __func__, ntohl(headerp->rm_vers));
752 goto repost;
753 }
754
755 /* Get XID and try for a match. */
756 spin_lock(&xprt->transport_lock);
757 rqst = xprt_lookup_rqst(xprt, headerp->rm_xid);
758 if (rqst == NULL) {
759 spin_unlock(&xprt->transport_lock);
760 dprintk("RPC: %s: reply 0x%p failed "
761 "to match any request xid 0x%08x len %d\n",
762 __func__, rep, headerp->rm_xid, rep->rr_len);
763repost:
764 r_xprt->rx_stats.bad_reply_count++;
765 rep->rr_func = rpcrdma_reply_handler;
766 if (rpcrdma_ep_post_recv(&r_xprt->rx_ia, &r_xprt->rx_ep, rep))
767 rpcrdma_recv_buffer_put(rep);
768
769 return;
770 }
771
772 /* get request object */
773 req = rpcr_to_rdmar(rqst);
774
775 dprintk("RPC: %s: reply 0x%p completes request 0x%p\n"
776 " RPC request 0x%p xid 0x%08x\n",
777 __func__, rep, req, rqst, headerp->rm_xid);
778
779 BUG_ON(!req || req->rl_reply);
780
781 /* from here on, the reply is no longer an orphan */
782 req->rl_reply = rep;
783
784 /* check for expected message types */
785 /* The order of some of these tests is important. */
786 switch (headerp->rm_type) {
787 case htonl(RDMA_MSG):
788 /* never expect read chunks */
789 /* never expect reply chunks (two ways to check) */
790 /* never expect write chunks without having offered RDMA */
791 if (headerp->rm_body.rm_chunks[0] != xdr_zero ||
792 (headerp->rm_body.rm_chunks[1] == xdr_zero &&
793 headerp->rm_body.rm_chunks[2] != xdr_zero) ||
794 (headerp->rm_body.rm_chunks[1] != xdr_zero &&
795 req->rl_nchunks == 0))
796 goto badheader;
797 if (headerp->rm_body.rm_chunks[1] != xdr_zero) {
798 /* count any expected write chunks in read reply */
799 /* start at write chunk array count */
800 iptr = &headerp->rm_body.rm_chunks[2];
801 rdmalen = rpcrdma_count_chunks(rep,
802 req->rl_nchunks, 1, &iptr);
803 /* check for validity, and no reply chunk after */
804 if (rdmalen < 0 || *iptr++ != xdr_zero)
805 goto badheader;
806 rep->rr_len -=
807 ((unsigned char *)iptr - (unsigned char *)headerp);
808 status = rep->rr_len + rdmalen;
809 r_xprt->rx_stats.total_rdma_reply += rdmalen;
810 /* special case - last chunk may omit padding */
811 if (rdmalen &= 3) {
812 rdmalen = 4 - rdmalen;
813 status += rdmalen;
814 }
815 } else {
816 /* else ordinary inline */
817 rdmalen = 0;
818 iptr = (__be32 *)((unsigned char *)headerp + 28);
819 rep->rr_len -= 28; /*sizeof *headerp;*/
820 status = rep->rr_len;
821 }
822 /* Fix up the rpc results for upper layer */
823 rpcrdma_inline_fixup(rqst, (char *)iptr, rep->rr_len, rdmalen);
824 break;
825
826 case htonl(RDMA_NOMSG):
827 /* never expect read or write chunks, always reply chunks */
828 if (headerp->rm_body.rm_chunks[0] != xdr_zero ||
829 headerp->rm_body.rm_chunks[1] != xdr_zero ||
830 headerp->rm_body.rm_chunks[2] != xdr_one ||
831 req->rl_nchunks == 0)
832 goto badheader;
833 iptr = (__be32 *)((unsigned char *)headerp + 28);
834 rdmalen = rpcrdma_count_chunks(rep, req->rl_nchunks, 0, &iptr);
835 if (rdmalen < 0)
836 goto badheader;
837 r_xprt->rx_stats.total_rdma_reply += rdmalen;
838 /* Reply chunk buffer already is the reply vector - no fixup. */
839 status = rdmalen;
840 break;
841
842badheader:
843 default:
844 dprintk("%s: invalid rpcrdma reply header (type %d):"
845 " chunks[012] == %d %d %d"
846 " expected chunks <= %d\n",
847 __func__, ntohl(headerp->rm_type),
848 headerp->rm_body.rm_chunks[0],
849 headerp->rm_body.rm_chunks[1],
850 headerp->rm_body.rm_chunks[2],
851 req->rl_nchunks);
852 status = -EIO;
853 r_xprt->rx_stats.bad_reply_count++;
854 break;
855 }
856
857 /* If using mw bind, start the deregister process now. */
858 /* (Note: if mr_free(), cannot perform it here, in tasklet context) */
859 if (req->rl_nchunks) switch (r_xprt->rx_ia.ri_memreg_strategy) {
860 case RPCRDMA_MEMWINDOWS:
861 for (i = 0; req->rl_nchunks-- > 1;)
862 i += rpcrdma_deregister_external(
863 &req->rl_segments[i], r_xprt, NULL);
864 /* Optionally wait (not here) for unbinds to complete */
865 rep->rr_func = rpcrdma_unbind_func;
866 (void) rpcrdma_deregister_external(&req->rl_segments[i],
867 r_xprt, rep);
868 break;
869 case RPCRDMA_MEMWINDOWS_ASYNC:
870 for (i = 0; req->rl_nchunks--;)
871 i += rpcrdma_deregister_external(&req->rl_segments[i],
872 r_xprt, NULL);
873 break;
874 default:
875 break;
876 }
877
878 dprintk("RPC: %s: xprt_complete_rqst(0x%p, 0x%p, %d)\n",
879 __func__, xprt, rqst, status);
880 xprt_complete_rqst(rqst->rq_task, status);
881 spin_unlock(&xprt->transport_lock);
882}