Linux Audio

Check our new training course

Loading...
  1/*
  2 * Copyright (c) 2005-2006 Network Appliance, Inc. All rights reserved.
  3 *
  4 * This software is available to you under a choice of one of two
  5 * licenses.  You may choose to be licensed under the terms of the GNU
  6 * General Public License (GPL) Version 2, available from the file
  7 * COPYING in the main directory of this source tree, or the BSD-type
  8 * license below:
  9 *
 10 * Redistribution and use in source and binary forms, with or without
 11 * modification, are permitted provided that the following conditions
 12 * are met:
 13 *
 14 *      Redistributions of source code must retain the above copyright
 15 *      notice, this list of conditions and the following disclaimer.
 16 *
 17 *      Redistributions in binary form must reproduce the above
 18 *      copyright notice, this list of conditions and the following
 19 *      disclaimer in the documentation and/or other materials provided
 20 *      with the distribution.
 21 *
 22 *      Neither the name of the Network Appliance, Inc. nor the names of
 23 *      its contributors may be used to endorse or promote products
 24 *      derived from this software without specific prior written
 25 *      permission.
 26 *
 27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
 28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
 29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
 30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
 31 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
 32 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
 33 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
 34 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
 35 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 36 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 37 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 38 *
 39 * Author: Tom Tucker <tom@opengridcomputing.com>
 40 */
 41
 42#include <linux/sunrpc/debug.h>
 43#include <linux/sunrpc/rpc_rdma.h>
 44#include <linux/spinlock.h>
 45#include <asm/unaligned.h>
 46#include <rdma/ib_verbs.h>
 47#include <rdma/rdma_cm.h>
 48#include <linux/sunrpc/svc_rdma.h>
 49
 50#define RPCDBG_FACILITY	RPCDBG_SVCXPRT
 51
 52/*
 53 * Replace the pages in the rq_argpages array with the pages from the SGE in
 54 * the RDMA_RECV completion. The SGL should contain full pages up until the
 55 * last one.
 56 */
 57static void rdma_build_arg_xdr(struct svc_rqst *rqstp,
 58			       struct svc_rdma_op_ctxt *ctxt,
 59			       u32 byte_count)
 60{
 61	struct page *page;
 62	u32 bc;
 63	int sge_no;
 64
 65	/* Swap the page in the SGE with the page in argpages */
 66	page = ctxt->pages[0];
 67	put_page(rqstp->rq_pages[0]);
 68	rqstp->rq_pages[0] = page;
 69
 70	/* Set up the XDR head */
 71	rqstp->rq_arg.head[0].iov_base = page_address(page);
 72	rqstp->rq_arg.head[0].iov_len = min(byte_count, ctxt->sge[0].length);
 73	rqstp->rq_arg.len = byte_count;
 74	rqstp->rq_arg.buflen = byte_count;
 75
 76	/* Compute bytes past head in the SGL */
 77	bc = byte_count - rqstp->rq_arg.head[0].iov_len;
 78
 79	/* If data remains, store it in the pagelist */
 80	rqstp->rq_arg.page_len = bc;
 81	rqstp->rq_arg.page_base = 0;
 82	rqstp->rq_arg.pages = &rqstp->rq_pages[1];
 83	sge_no = 1;
 84	while (bc && sge_no < ctxt->count) {
 85		page = ctxt->pages[sge_no];
 86		put_page(rqstp->rq_pages[sge_no]);
 87		rqstp->rq_pages[sge_no] = page;
 88		bc -= min(bc, ctxt->sge[sge_no].length);
 89		rqstp->rq_arg.buflen += ctxt->sge[sge_no].length;
 90		sge_no++;
 91	}
 92	rqstp->rq_respages = &rqstp->rq_pages[sge_no];
 93
 94	/* We should never run out of SGE because the limit is defined to
 95	 * support the max allowed RPC data length
 96	 */
 97	BUG_ON(bc && (sge_no == ctxt->count));
 98	BUG_ON((rqstp->rq_arg.head[0].iov_len + rqstp->rq_arg.page_len)
 99	       != byte_count);
100	BUG_ON(rqstp->rq_arg.len != byte_count);
101
102	/* If not all pages were used from the SGL, free the remaining ones */
103	bc = sge_no;
104	while (sge_no < ctxt->count) {
105		page = ctxt->pages[sge_no++];
106		put_page(page);
107	}
108	ctxt->count = bc;
109
110	/* Set up tail */
111	rqstp->rq_arg.tail[0].iov_base = NULL;
112	rqstp->rq_arg.tail[0].iov_len = 0;
113}
114
115/* Encode a read-chunk-list as an array of IB SGE
116 *
117 * Assumptions:
118 * - chunk[0]->position points to pages[0] at an offset of 0
119 * - pages[] is not physically or virtually contiguous and consists of
120 *   PAGE_SIZE elements.
121 *
122 * Output:
123 * - sge array pointing into pages[] array.
124 * - chunk_sge array specifying sge index and count for each
125 *   chunk in the read list
126 *
127 */
128static int map_read_chunks(struct svcxprt_rdma *xprt,
129			   struct svc_rqst *rqstp,
130			   struct svc_rdma_op_ctxt *head,
131			   struct rpcrdma_msg *rmsgp,
132			   struct svc_rdma_req_map *rpl_map,
133			   struct svc_rdma_req_map *chl_map,
134			   int ch_count,
135			   int byte_count)
136{
137	int sge_no;
138	int sge_bytes;
139	int page_off;
140	int page_no;
141	int ch_bytes;
142	int ch_no;
143	struct rpcrdma_read_chunk *ch;
144
145	sge_no = 0;
146	page_no = 0;
147	page_off = 0;
148	ch = (struct rpcrdma_read_chunk *)&rmsgp->rm_body.rm_chunks[0];
149	ch_no = 0;
150	ch_bytes = ntohl(ch->rc_target.rs_length);
151	head->arg.head[0] = rqstp->rq_arg.head[0];
152	head->arg.tail[0] = rqstp->rq_arg.tail[0];
153	head->arg.pages = &head->pages[head->count];
154	head->hdr_count = head->count; /* save count of hdr pages */
155	head->arg.page_base = 0;
156	head->arg.page_len = ch_bytes;
157	head->arg.len = rqstp->rq_arg.len + ch_bytes;
158	head->arg.buflen = rqstp->rq_arg.buflen + ch_bytes;
159	head->count++;
160	chl_map->ch[0].start = 0;
161	while (byte_count) {
162		rpl_map->sge[sge_no].iov_base =
163			page_address(rqstp->rq_arg.pages[page_no]) + page_off;
164		sge_bytes = min_t(int, PAGE_SIZE-page_off, ch_bytes);
165		rpl_map->sge[sge_no].iov_len = sge_bytes;
166		/*
167		 * Don't bump head->count here because the same page
168		 * may be used by multiple SGE.
169		 */
170		head->arg.pages[page_no] = rqstp->rq_arg.pages[page_no];
171		rqstp->rq_respages = &rqstp->rq_arg.pages[page_no+1];
172
173		byte_count -= sge_bytes;
174		ch_bytes -= sge_bytes;
175		sge_no++;
176		/*
177		 * If all bytes for this chunk have been mapped to an
178		 * SGE, move to the next SGE
179		 */
180		if (ch_bytes == 0) {
181			chl_map->ch[ch_no].count =
182				sge_no - chl_map->ch[ch_no].start;
183			ch_no++;
184			ch++;
185			chl_map->ch[ch_no].start = sge_no;
186			ch_bytes = ntohl(ch->rc_target.rs_length);
187			/* If bytes remaining account for next chunk */
188			if (byte_count) {
189				head->arg.page_len += ch_bytes;
190				head->arg.len += ch_bytes;
191				head->arg.buflen += ch_bytes;
192			}
193		}
194		/*
195		 * If this SGE consumed all of the page, move to the
196		 * next page
197		 */
198		if ((sge_bytes + page_off) == PAGE_SIZE) {
199			page_no++;
200			page_off = 0;
201			/*
202			 * If there are still bytes left to map, bump
203			 * the page count
204			 */
205			if (byte_count)
206				head->count++;
207		} else
208			page_off += sge_bytes;
209	}
210	BUG_ON(byte_count != 0);
211	return sge_no;
212}
213
214/* Map a read-chunk-list to an XDR and fast register the page-list.
215 *
216 * Assumptions:
217 * - chunk[0]	position points to pages[0] at an offset of 0
218 * - pages[]	will be made physically contiguous by creating a one-off memory
219 *		region using the fastreg verb.
220 * - byte_count is # of bytes in read-chunk-list
221 * - ch_count	is # of chunks in read-chunk-list
222 *
223 * Output:
224 * - sge array pointing into pages[] array.
225 * - chunk_sge array specifying sge index and count for each
226 *   chunk in the read list
227 */
228static int fast_reg_read_chunks(struct svcxprt_rdma *xprt,
229				struct svc_rqst *rqstp,
230				struct svc_rdma_op_ctxt *head,
231				struct rpcrdma_msg *rmsgp,
232				struct svc_rdma_req_map *rpl_map,
233				struct svc_rdma_req_map *chl_map,
234				int ch_count,
235				int byte_count)
236{
237	int page_no;
238	int ch_no;
239	u32 offset;
240	struct rpcrdma_read_chunk *ch;
241	struct svc_rdma_fastreg_mr *frmr;
242	int ret = 0;
243
244	frmr = svc_rdma_get_frmr(xprt);
245	if (IS_ERR(frmr))
246		return -ENOMEM;
247
248	head->frmr = frmr;
249	head->arg.head[0] = rqstp->rq_arg.head[0];
250	head->arg.tail[0] = rqstp->rq_arg.tail[0];
251	head->arg.pages = &head->pages[head->count];
252	head->hdr_count = head->count; /* save count of hdr pages */
253	head->arg.page_base = 0;
254	head->arg.page_len = byte_count;
255	head->arg.len = rqstp->rq_arg.len + byte_count;
256	head->arg.buflen = rqstp->rq_arg.buflen + byte_count;
257
258	/* Fast register the page list */
259	frmr->kva = page_address(rqstp->rq_arg.pages[0]);
260	frmr->direction = DMA_FROM_DEVICE;
261	frmr->access_flags = (IB_ACCESS_LOCAL_WRITE|IB_ACCESS_REMOTE_WRITE);
262	frmr->map_len = byte_count;
263	frmr->page_list_len = PAGE_ALIGN(byte_count) >> PAGE_SHIFT;
264	for (page_no = 0; page_no < frmr->page_list_len; page_no++) {
265		frmr->page_list->page_list[page_no] =
266			ib_dma_map_page(xprt->sc_cm_id->device,
267					rqstp->rq_arg.pages[page_no], 0,
268					PAGE_SIZE, DMA_FROM_DEVICE);
269		if (ib_dma_mapping_error(xprt->sc_cm_id->device,
270					 frmr->page_list->page_list[page_no]))
271			goto fatal_err;
272		atomic_inc(&xprt->sc_dma_used);
273		head->arg.pages[page_no] = rqstp->rq_arg.pages[page_no];
274	}
275	head->count += page_no;
276
277	/* rq_respages points one past arg pages */
278	rqstp->rq_respages = &rqstp->rq_arg.pages[page_no];
279
280	/* Create the reply and chunk maps */
281	offset = 0;
282	ch = (struct rpcrdma_read_chunk *)&rmsgp->rm_body.rm_chunks[0];
283	for (ch_no = 0; ch_no < ch_count; ch_no++) {
284		int len = ntohl(ch->rc_target.rs_length);
285		rpl_map->sge[ch_no].iov_base = frmr->kva + offset;
286		rpl_map->sge[ch_no].iov_len = len;
287		chl_map->ch[ch_no].count = 1;
288		chl_map->ch[ch_no].start = ch_no;
289		offset += len;
290		ch++;
291	}
292
293	ret = svc_rdma_fastreg(xprt, frmr);
294	if (ret)
295		goto fatal_err;
296
297	return ch_no;
298
299 fatal_err:
300	printk("svcrdma: error fast registering xdr for xprt %p", xprt);
301	svc_rdma_put_frmr(xprt, frmr);
302	return -EIO;
303}
304
305static int rdma_set_ctxt_sge(struct svcxprt_rdma *xprt,
306			     struct svc_rdma_op_ctxt *ctxt,
307			     struct svc_rdma_fastreg_mr *frmr,
308			     struct kvec *vec,
309			     u64 *sgl_offset,
310			     int count)
311{
312	int i;
313	unsigned long off;
314
315	ctxt->count = count;
316	ctxt->direction = DMA_FROM_DEVICE;
317	for (i = 0; i < count; i++) {
318		ctxt->sge[i].length = 0; /* in case map fails */
319		if (!frmr) {
320			BUG_ON(!virt_to_page(vec[i].iov_base));
321			off = (unsigned long)vec[i].iov_base & ~PAGE_MASK;
322			ctxt->sge[i].addr =
323				ib_dma_map_page(xprt->sc_cm_id->device,
324						virt_to_page(vec[i].iov_base),
325						off,
326						vec[i].iov_len,
327						DMA_FROM_DEVICE);
328			if (ib_dma_mapping_error(xprt->sc_cm_id->device,
329						 ctxt->sge[i].addr))
330				return -EINVAL;
331			ctxt->sge[i].lkey = xprt->sc_dma_lkey;
332			atomic_inc(&xprt->sc_dma_used);
333		} else {
334			ctxt->sge[i].addr = (unsigned long)vec[i].iov_base;
335			ctxt->sge[i].lkey = frmr->mr->lkey;
336		}
337		ctxt->sge[i].length = vec[i].iov_len;
338		*sgl_offset = *sgl_offset + vec[i].iov_len;
339	}
340	return 0;
341}
342
343static int rdma_read_max_sge(struct svcxprt_rdma *xprt, int sge_count)
344{
345	if ((rdma_node_get_transport(xprt->sc_cm_id->device->node_type) ==
346	     RDMA_TRANSPORT_IWARP) &&
347	    sge_count > 1)
348		return 1;
349	else
350		return min_t(int, sge_count, xprt->sc_max_sge);
351}
352
353/*
354 * Use RDMA_READ to read data from the advertised client buffer into the
355 * XDR stream starting at rq_arg.head[0].iov_base.
356 * Each chunk in the array
357 * contains the following fields:
358 * discrim      - '1', This isn't used for data placement
359 * position     - The xdr stream offset (the same for every chunk)
360 * handle       - RMR for client memory region
361 * length       - data transfer length
362 * offset       - 64 bit tagged offset in remote memory region
363 *
364 * On our side, we need to read into a pagelist. The first page immediately
365 * follows the RPC header.
366 *
367 * This function returns:
368 * 0 - No error and no read-list found.
369 *
370 * 1 - Successful read-list processing. The data is not yet in
371 * the pagelist and therefore the RPC request must be deferred. The
372 * I/O completion will enqueue the transport again and
373 * svc_rdma_recvfrom will complete the request.
374 *
375 * <0 - Error processing/posting read-list.
376 *
377 * NOTE: The ctxt must not be touched after the last WR has been posted
378 * because the I/O completion processing may occur on another
379 * processor and free / modify the context. Ne touche pas!
380 */
381static int rdma_read_xdr(struct svcxprt_rdma *xprt,
382			 struct rpcrdma_msg *rmsgp,
383			 struct svc_rqst *rqstp,
384			 struct svc_rdma_op_ctxt *hdr_ctxt)
385{
386	struct ib_send_wr read_wr;
387	struct ib_send_wr inv_wr;
388	int err = 0;
389	int ch_no;
390	int ch_count;
391	int byte_count;
392	int sge_count;
393	u64 sgl_offset;
394	struct rpcrdma_read_chunk *ch;
395	struct svc_rdma_op_ctxt *ctxt = NULL;
396	struct svc_rdma_req_map *rpl_map;
397	struct svc_rdma_req_map *chl_map;
398
399	/* If no read list is present, return 0 */
400	ch = svc_rdma_get_read_chunk(rmsgp);
401	if (!ch)
402		return 0;
403
404	svc_rdma_rcl_chunk_counts(ch, &ch_count, &byte_count);
405	if (ch_count > RPCSVC_MAXPAGES)
406		return -EINVAL;
407
408	/* Allocate temporary reply and chunk maps */
409	rpl_map = svc_rdma_get_req_map();
410	chl_map = svc_rdma_get_req_map();
411
412	if (!xprt->sc_frmr_pg_list_len)
413		sge_count = map_read_chunks(xprt, rqstp, hdr_ctxt, rmsgp,
414					    rpl_map, chl_map, ch_count,
415					    byte_count);
416	else
417		sge_count = fast_reg_read_chunks(xprt, rqstp, hdr_ctxt, rmsgp,
418						 rpl_map, chl_map, ch_count,
419						 byte_count);
420	if (sge_count < 0) {
421		err = -EIO;
422		goto out;
423	}
424
425	sgl_offset = 0;
426	ch_no = 0;
427
428	for (ch = (struct rpcrdma_read_chunk *)&rmsgp->rm_body.rm_chunks[0];
429	     ch->rc_discrim != 0; ch++, ch_no++) {
430		u64 rs_offset;
431next_sge:
432		ctxt = svc_rdma_get_context(xprt);
433		ctxt->direction = DMA_FROM_DEVICE;
434		ctxt->frmr = hdr_ctxt->frmr;
435		ctxt->read_hdr = NULL;
436		clear_bit(RDMACTXT_F_LAST_CTXT, &ctxt->flags);
437		clear_bit(RDMACTXT_F_FAST_UNREG, &ctxt->flags);
438
439		/* Prepare READ WR */
440		memset(&read_wr, 0, sizeof read_wr);
441		read_wr.wr_id = (unsigned long)ctxt;
442		read_wr.opcode = IB_WR_RDMA_READ;
443		ctxt->wr_op = read_wr.opcode;
444		read_wr.send_flags = IB_SEND_SIGNALED;
445		read_wr.wr.rdma.rkey = ntohl(ch->rc_target.rs_handle);
446		xdr_decode_hyper((__be32 *)&ch->rc_target.rs_offset,
447				 &rs_offset);
448		read_wr.wr.rdma.remote_addr = rs_offset + sgl_offset;
449		read_wr.sg_list = ctxt->sge;
450		read_wr.num_sge =
451			rdma_read_max_sge(xprt, chl_map->ch[ch_no].count);
452		err = rdma_set_ctxt_sge(xprt, ctxt, hdr_ctxt->frmr,
453					&rpl_map->sge[chl_map->ch[ch_no].start],
454					&sgl_offset,
455					read_wr.num_sge);
456		if (err) {
457			svc_rdma_unmap_dma(ctxt);
458			svc_rdma_put_context(ctxt, 0);
459			goto out;
460		}
461		if (((ch+1)->rc_discrim == 0) &&
462		    (read_wr.num_sge == chl_map->ch[ch_no].count)) {
463			/*
464			 * Mark the last RDMA_READ with a bit to
465			 * indicate all RPC data has been fetched from
466			 * the client and the RPC needs to be enqueued.
467			 */
468			set_bit(RDMACTXT_F_LAST_CTXT, &ctxt->flags);
469			if (hdr_ctxt->frmr) {
470				set_bit(RDMACTXT_F_FAST_UNREG, &ctxt->flags);
471				/*
472				 * Invalidate the local MR used to map the data
473				 * sink.
474				 */
475				if (xprt->sc_dev_caps &
476				    SVCRDMA_DEVCAP_READ_W_INV) {
477					read_wr.opcode =
478						IB_WR_RDMA_READ_WITH_INV;
479					ctxt->wr_op = read_wr.opcode;
480					read_wr.ex.invalidate_rkey =
481						ctxt->frmr->mr->lkey;
482				} else {
483					/* Prepare INVALIDATE WR */
484					memset(&inv_wr, 0, sizeof inv_wr);
485					inv_wr.opcode = IB_WR_LOCAL_INV;
486					inv_wr.send_flags = IB_SEND_SIGNALED;
487					inv_wr.ex.invalidate_rkey =
488						hdr_ctxt->frmr->mr->lkey;
489					read_wr.next = &inv_wr;
490				}
491			}
492			ctxt->read_hdr = hdr_ctxt;
493		}
494		/* Post the read */
495		err = svc_rdma_send(xprt, &read_wr);
496		if (err) {
497			printk(KERN_ERR "svcrdma: Error %d posting RDMA_READ\n",
498			       err);
499			set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags);
500			svc_rdma_unmap_dma(ctxt);
501			svc_rdma_put_context(ctxt, 0);
502			goto out;
503		}
504		atomic_inc(&rdma_stat_read);
505
506		if (read_wr.num_sge < chl_map->ch[ch_no].count) {
507			chl_map->ch[ch_no].count -= read_wr.num_sge;
508			chl_map->ch[ch_no].start += read_wr.num_sge;
509			goto next_sge;
510		}
511		sgl_offset = 0;
512		err = 1;
513	}
514
515 out:
516	svc_rdma_put_req_map(rpl_map);
517	svc_rdma_put_req_map(chl_map);
518
519	/* Detach arg pages. svc_recv will replenish them */
520	for (ch_no = 0; &rqstp->rq_pages[ch_no] < rqstp->rq_respages; ch_no++)
521		rqstp->rq_pages[ch_no] = NULL;
522
523	/*
524	 * Detach res pages. svc_release must see a resused count of
525	 * zero or it will attempt to put them.
526	 */
527	while (rqstp->rq_resused)
528		rqstp->rq_respages[--rqstp->rq_resused] = NULL;
529
530	return err;
531}
532
533static int rdma_read_complete(struct svc_rqst *rqstp,
534			      struct svc_rdma_op_ctxt *head)
535{
536	int page_no;
537	int ret;
538
539	BUG_ON(!head);
540
541	/* Copy RPC pages */
542	for (page_no = 0; page_no < head->count; page_no++) {
543		put_page(rqstp->rq_pages[page_no]);
544		rqstp->rq_pages[page_no] = head->pages[page_no];
545	}
546	/* Point rq_arg.pages past header */
547	rqstp->rq_arg.pages = &rqstp->rq_pages[head->hdr_count];
548	rqstp->rq_arg.page_len = head->arg.page_len;
549	rqstp->rq_arg.page_base = head->arg.page_base;
550
551	/* rq_respages starts after the last arg page */
552	rqstp->rq_respages = &rqstp->rq_arg.pages[page_no];
553	rqstp->rq_resused = 0;
554
555	/* Rebuild rq_arg head and tail. */
556	rqstp->rq_arg.head[0] = head->arg.head[0];
557	rqstp->rq_arg.tail[0] = head->arg.tail[0];
558	rqstp->rq_arg.len = head->arg.len;
559	rqstp->rq_arg.buflen = head->arg.buflen;
560
561	/* Free the context */
562	svc_rdma_put_context(head, 0);
563
564	/* XXX: What should this be? */
565	rqstp->rq_prot = IPPROTO_MAX;
566	svc_xprt_copy_addrs(rqstp, rqstp->rq_xprt);
567
568	ret = rqstp->rq_arg.head[0].iov_len
569		+ rqstp->rq_arg.page_len
570		+ rqstp->rq_arg.tail[0].iov_len;
571	dprintk("svcrdma: deferred read ret=%d, rq_arg.len =%d, "
572		"rq_arg.head[0].iov_base=%p, rq_arg.head[0].iov_len = %zd\n",
573		ret, rqstp->rq_arg.len,	rqstp->rq_arg.head[0].iov_base,
574		rqstp->rq_arg.head[0].iov_len);
575
576	return ret;
577}
578
579/*
580 * Set up the rqstp thread context to point to the RQ buffer. If
581 * necessary, pull additional data from the client with an RDMA_READ
582 * request.
583 */
584int svc_rdma_recvfrom(struct svc_rqst *rqstp)
585{
586	struct svc_xprt *xprt = rqstp->rq_xprt;
587	struct svcxprt_rdma *rdma_xprt =
588		container_of(xprt, struct svcxprt_rdma, sc_xprt);
589	struct svc_rdma_op_ctxt *ctxt = NULL;
590	struct rpcrdma_msg *rmsgp;
591	int ret = 0;
592	int len;
593
594	dprintk("svcrdma: rqstp=%p\n", rqstp);
595
596	spin_lock_bh(&rdma_xprt->sc_rq_dto_lock);
597	if (!list_empty(&rdma_xprt->sc_read_complete_q)) {
598		ctxt = list_entry(rdma_xprt->sc_read_complete_q.next,
599				  struct svc_rdma_op_ctxt,
600				  dto_q);
601		list_del_init(&ctxt->dto_q);
602	}
603	if (ctxt) {
604		spin_unlock_bh(&rdma_xprt->sc_rq_dto_lock);
605		return rdma_read_complete(rqstp, ctxt);
606	}
607
608	if (!list_empty(&rdma_xprt->sc_rq_dto_q)) {
609		ctxt = list_entry(rdma_xprt->sc_rq_dto_q.next,
610				  struct svc_rdma_op_ctxt,
611				  dto_q);
612		list_del_init(&ctxt->dto_q);
613	} else {
614		atomic_inc(&rdma_stat_rq_starve);
615		clear_bit(XPT_DATA, &xprt->xpt_flags);
616		ctxt = NULL;
617	}
618	spin_unlock_bh(&rdma_xprt->sc_rq_dto_lock);
619	if (!ctxt) {
620		/* This is the EAGAIN path. The svc_recv routine will
621		 * return -EAGAIN, the nfsd thread will go to call into
622		 * svc_recv again and we shouldn't be on the active
623		 * transport list
624		 */
625		if (test_bit(XPT_CLOSE, &xprt->xpt_flags))
626			goto close_out;
627
628		BUG_ON(ret);
629		goto out;
630	}
631	dprintk("svcrdma: processing ctxt=%p on xprt=%p, rqstp=%p, status=%d\n",
632		ctxt, rdma_xprt, rqstp, ctxt->wc_status);
633	BUG_ON(ctxt->wc_status != IB_WC_SUCCESS);
634	atomic_inc(&rdma_stat_recv);
635
636	/* Build up the XDR from the receive buffers. */
637	rdma_build_arg_xdr(rqstp, ctxt, ctxt->byte_len);
638
639	/* Decode the RDMA header. */
640	len = svc_rdma_xdr_decode_req(&rmsgp, rqstp);
641	rqstp->rq_xprt_hlen = len;
642
643	/* If the request is invalid, reply with an error */
644	if (len < 0) {
645		if (len == -ENOSYS)
646			svc_rdma_send_error(rdma_xprt, rmsgp, ERR_VERS);
647		goto close_out;
648	}
649
650	/* Read read-list data. */
651	ret = rdma_read_xdr(rdma_xprt, rmsgp, rqstp, ctxt);
652	if (ret > 0) {
653		/* read-list posted, defer until data received from client. */
654		goto defer;
655	}
656	if (ret < 0) {
657		/* Post of read-list failed, free context. */
658		svc_rdma_put_context(ctxt, 1);
659		return 0;
660	}
661
662	ret = rqstp->rq_arg.head[0].iov_len
663		+ rqstp->rq_arg.page_len
664		+ rqstp->rq_arg.tail[0].iov_len;
665	svc_rdma_put_context(ctxt, 0);
666 out:
667	dprintk("svcrdma: ret = %d, rq_arg.len =%d, "
668		"rq_arg.head[0].iov_base=%p, rq_arg.head[0].iov_len = %zd\n",
669		ret, rqstp->rq_arg.len,
670		rqstp->rq_arg.head[0].iov_base,
671		rqstp->rq_arg.head[0].iov_len);
672	rqstp->rq_prot = IPPROTO_MAX;
673	svc_xprt_copy_addrs(rqstp, xprt);
674	return ret;
675
676 close_out:
677	if (ctxt)
678		svc_rdma_put_context(ctxt, 1);
679	dprintk("svcrdma: transport %p is closing\n", xprt);
680	/*
681	 * Set the close bit and enqueue it. svc_recv will see the
682	 * close bit and call svc_xprt_delete
683	 */
684	set_bit(XPT_CLOSE, &xprt->xpt_flags);
685defer:
686	return 0;
687}
  1// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
  2/*
  3 * Copyright (c) 2016-2018 Oracle. All rights reserved.
  4 * Copyright (c) 2014 Open Grid Computing, Inc. All rights reserved.
  5 * Copyright (c) 2005-2006 Network Appliance, Inc. All rights reserved.
  6 *
  7 * This software is available to you under a choice of one of two
  8 * licenses.  You may choose to be licensed under the terms of the GNU
  9 * General Public License (GPL) Version 2, available from the file
 10 * COPYING in the main directory of this source tree, or the BSD-type
 11 * license below:
 12 *
 13 * Redistribution and use in source and binary forms, with or without
 14 * modification, are permitted provided that the following conditions
 15 * are met:
 16 *
 17 *      Redistributions of source code must retain the above copyright
 18 *      notice, this list of conditions and the following disclaimer.
 19 *
 20 *      Redistributions in binary form must reproduce the above
 21 *      copyright notice, this list of conditions and the following
 22 *      disclaimer in the documentation and/or other materials provided
 23 *      with the distribution.
 24 *
 25 *      Neither the name of the Network Appliance, Inc. nor the names of
 26 *      its contributors may be used to endorse or promote products
 27 *      derived from this software without specific prior written
 28 *      permission.
 29 *
 30 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
 31 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
 32 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
 33 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
 34 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
 35 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
 36 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
 37 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
 38 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 39 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 40 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 41 *
 42 * Author: Tom Tucker <tom@opengridcomputing.com>
 43 */
 44
 45/* Operation
 46 *
 47 * The main entry point is svc_rdma_recvfrom. This is called from
 48 * svc_recv when the transport indicates there is incoming data to
 49 * be read. "Data Ready" is signaled when an RDMA Receive completes,
 50 * or when a set of RDMA Reads complete.
 51 *
 52 * An svc_rqst is passed in. This structure contains an array of
 53 * free pages (rq_pages) that will contain the incoming RPC message.
 54 *
 55 * Short messages are moved directly into svc_rqst::rq_arg, and
 56 * the RPC Call is ready to be processed by the Upper Layer.
 57 * svc_rdma_recvfrom returns the length of the RPC Call message,
 58 * completing the reception of the RPC Call.
 59 *
 60 * However, when an incoming message has Read chunks,
 61 * svc_rdma_recvfrom must post RDMA Reads to pull the RPC Call's
 62 * data payload from the client. svc_rdma_recvfrom sets up the
 63 * RDMA Reads using pages in svc_rqst::rq_pages, which are
 64 * transferred to an svc_rdma_recv_ctxt for the duration of the
 65 * I/O. svc_rdma_recvfrom then returns zero, since the RPC message
 66 * is still not yet ready.
 67 *
 68 * When the Read chunk payloads have become available on the
 69 * server, "Data Ready" is raised again, and svc_recv calls
 70 * svc_rdma_recvfrom again. This second call may use a different
 71 * svc_rqst than the first one, thus any information that needs
 72 * to be preserved across these two calls is kept in an
 73 * svc_rdma_recv_ctxt.
 74 *
 75 * The second call to svc_rdma_recvfrom performs final assembly
 76 * of the RPC Call message, using the RDMA Read sink pages kept in
 77 * the svc_rdma_recv_ctxt. The xdr_buf is copied from the
 78 * svc_rdma_recv_ctxt to the second svc_rqst. The second call returns
 79 * the length of the completed RPC Call message.
 80 *
 81 * Page Management
 82 *
 83 * Pages under I/O must be transferred from the first svc_rqst to an
 84 * svc_rdma_recv_ctxt before the first svc_rdma_recvfrom call returns.
 85 *
 86 * The first svc_rqst supplies pages for RDMA Reads. These are moved
 87 * from rqstp::rq_pages into ctxt::pages. The consumed elements of
 88 * the rq_pages array are set to NULL and refilled with the first
 89 * svc_rdma_recvfrom call returns.
 90 *
 91 * During the second svc_rdma_recvfrom call, RDMA Read sink pages
 92 * are transferred from the svc_rdma_recv_ctxt to the second svc_rqst
 93 * (see rdma_read_complete() below).
 94 */
 95
 96#include <linux/spinlock.h>
 97#include <asm/unaligned.h>
 98#include <rdma/ib_verbs.h>
 99#include <rdma/rdma_cm.h>
100
101#include <linux/sunrpc/xdr.h>
102#include <linux/sunrpc/debug.h>
103#include <linux/sunrpc/rpc_rdma.h>
104#include <linux/sunrpc/svc_rdma.h>
105
106#include "xprt_rdma.h"
107#include <trace/events/rpcrdma.h>
108
109#define RPCDBG_FACILITY	RPCDBG_SVCXPRT
110
111static void svc_rdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc);
112
113static inline struct svc_rdma_recv_ctxt *
114svc_rdma_next_recv_ctxt(struct list_head *list)
115{
116	return list_first_entry_or_null(list, struct svc_rdma_recv_ctxt,
117					rc_list);
118}
119
120static struct svc_rdma_recv_ctxt *
121svc_rdma_recv_ctxt_alloc(struct svcxprt_rdma *rdma)
122{
123	struct svc_rdma_recv_ctxt *ctxt;
124	dma_addr_t addr;
125	void *buffer;
126
127	ctxt = kmalloc(sizeof(*ctxt), GFP_KERNEL);
128	if (!ctxt)
129		goto fail0;
130	buffer = kmalloc(rdma->sc_max_req_size, GFP_KERNEL);
131	if (!buffer)
132		goto fail1;
133	addr = ib_dma_map_single(rdma->sc_pd->device, buffer,
134				 rdma->sc_max_req_size, DMA_FROM_DEVICE);
135	if (ib_dma_mapping_error(rdma->sc_pd->device, addr))
136		goto fail2;
137
138	ctxt->rc_recv_wr.next = NULL;
139	ctxt->rc_recv_wr.wr_cqe = &ctxt->rc_cqe;
140	ctxt->rc_recv_wr.sg_list = &ctxt->rc_recv_sge;
141	ctxt->rc_recv_wr.num_sge = 1;
142	ctxt->rc_cqe.done = svc_rdma_wc_receive;
143	ctxt->rc_recv_sge.addr = addr;
144	ctxt->rc_recv_sge.length = rdma->sc_max_req_size;
145	ctxt->rc_recv_sge.lkey = rdma->sc_pd->local_dma_lkey;
146	ctxt->rc_recv_buf = buffer;
147	ctxt->rc_temp = false;
148	return ctxt;
149
150fail2:
151	kfree(buffer);
152fail1:
153	kfree(ctxt);
154fail0:
155	return NULL;
156}
157
158static void svc_rdma_recv_ctxt_destroy(struct svcxprt_rdma *rdma,
159				       struct svc_rdma_recv_ctxt *ctxt)
160{
161	ib_dma_unmap_single(rdma->sc_pd->device, ctxt->rc_recv_sge.addr,
162			    ctxt->rc_recv_sge.length, DMA_FROM_DEVICE);
163	kfree(ctxt->rc_recv_buf);
164	kfree(ctxt);
165}
166
167/**
168 * svc_rdma_recv_ctxts_destroy - Release all recv_ctxt's for an xprt
169 * @rdma: svcxprt_rdma being torn down
170 *
171 */
172void svc_rdma_recv_ctxts_destroy(struct svcxprt_rdma *rdma)
173{
174	struct svc_rdma_recv_ctxt *ctxt;
175	struct llist_node *node;
176
177	while ((node = llist_del_first(&rdma->sc_recv_ctxts))) {
178		ctxt = llist_entry(node, struct svc_rdma_recv_ctxt, rc_node);
179		svc_rdma_recv_ctxt_destroy(rdma, ctxt);
180	}
181}
182
183static struct svc_rdma_recv_ctxt *
184svc_rdma_recv_ctxt_get(struct svcxprt_rdma *rdma)
185{
186	struct svc_rdma_recv_ctxt *ctxt;
187	struct llist_node *node;
188
189	node = llist_del_first(&rdma->sc_recv_ctxts);
190	if (!node)
191		goto out_empty;
192	ctxt = llist_entry(node, struct svc_rdma_recv_ctxt, rc_node);
193
194out:
195	ctxt->rc_page_count = 0;
196	return ctxt;
197
198out_empty:
199	ctxt = svc_rdma_recv_ctxt_alloc(rdma);
200	if (!ctxt)
201		return NULL;
202	goto out;
203}
204
205/**
206 * svc_rdma_recv_ctxt_put - Return recv_ctxt to free list
207 * @rdma: controlling svcxprt_rdma
208 * @ctxt: object to return to the free list
209 *
210 */
211void svc_rdma_recv_ctxt_put(struct svcxprt_rdma *rdma,
212			    struct svc_rdma_recv_ctxt *ctxt)
213{
214	unsigned int i;
215
216	for (i = 0; i < ctxt->rc_page_count; i++)
217		put_page(ctxt->rc_pages[i]);
218
219	if (!ctxt->rc_temp)
220		llist_add(&ctxt->rc_node, &rdma->sc_recv_ctxts);
221	else
222		svc_rdma_recv_ctxt_destroy(rdma, ctxt);
223}
224
225static int __svc_rdma_post_recv(struct svcxprt_rdma *rdma,
226				struct svc_rdma_recv_ctxt *ctxt)
227{
228	int ret;
229
230	svc_xprt_get(&rdma->sc_xprt);
231	ret = ib_post_recv(rdma->sc_qp, &ctxt->rc_recv_wr, NULL);
232	trace_svcrdma_post_recv(&ctxt->rc_recv_wr, ret);
233	if (ret)
234		goto err_post;
235	return 0;
236
237err_post:
238	svc_rdma_recv_ctxt_put(rdma, ctxt);
239	svc_xprt_put(&rdma->sc_xprt);
240	return ret;
241}
242
243static int svc_rdma_post_recv(struct svcxprt_rdma *rdma)
244{
245	struct svc_rdma_recv_ctxt *ctxt;
246
247	ctxt = svc_rdma_recv_ctxt_get(rdma);
248	if (!ctxt)
249		return -ENOMEM;
250	return __svc_rdma_post_recv(rdma, ctxt);
251}
252
253/**
254 * svc_rdma_post_recvs - Post initial set of Recv WRs
255 * @rdma: fresh svcxprt_rdma
256 *
257 * Returns true if successful, otherwise false.
258 */
259bool svc_rdma_post_recvs(struct svcxprt_rdma *rdma)
260{
261	struct svc_rdma_recv_ctxt *ctxt;
262	unsigned int i;
263	int ret;
264
265	for (i = 0; i < rdma->sc_max_requests; i++) {
266		ctxt = svc_rdma_recv_ctxt_get(rdma);
267		if (!ctxt)
268			return false;
269		ctxt->rc_temp = true;
270		ret = __svc_rdma_post_recv(rdma, ctxt);
271		if (ret)
272			return false;
273	}
274	return true;
275}
276
277/**
278 * svc_rdma_wc_receive - Invoked by RDMA provider for each polled Receive WC
279 * @cq: Completion Queue context
280 * @wc: Work Completion object
281 *
282 * NB: The svc_xprt/svcxprt_rdma is pinned whenever it's possible that
283 * the Receive completion handler could be running.
284 */
285static void svc_rdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc)
286{
287	struct svcxprt_rdma *rdma = cq->cq_context;
288	struct ib_cqe *cqe = wc->wr_cqe;
289	struct svc_rdma_recv_ctxt *ctxt;
290
291	trace_svcrdma_wc_receive(wc);
292
293	/* WARNING: Only wc->wr_cqe and wc->status are reliable */
294	ctxt = container_of(cqe, struct svc_rdma_recv_ctxt, rc_cqe);
295
296	if (wc->status != IB_WC_SUCCESS)
297		goto flushed;
298
299	if (svc_rdma_post_recv(rdma))
300		goto post_err;
301
302	/* All wc fields are now known to be valid */
303	ctxt->rc_byte_len = wc->byte_len;
304	ib_dma_sync_single_for_cpu(rdma->sc_pd->device,
305				   ctxt->rc_recv_sge.addr,
306				   wc->byte_len, DMA_FROM_DEVICE);
307
308	spin_lock(&rdma->sc_rq_dto_lock);
309	list_add_tail(&ctxt->rc_list, &rdma->sc_rq_dto_q);
310	/* Note the unlock pairs with the smp_rmb in svc_xprt_ready: */
311	set_bit(XPT_DATA, &rdma->sc_xprt.xpt_flags);
312	spin_unlock(&rdma->sc_rq_dto_lock);
313	if (!test_bit(RDMAXPRT_CONN_PENDING, &rdma->sc_flags))
314		svc_xprt_enqueue(&rdma->sc_xprt);
315	goto out;
316
317flushed:
318post_err:
319	svc_rdma_recv_ctxt_put(rdma, ctxt);
320	set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags);
321	svc_xprt_enqueue(&rdma->sc_xprt);
322out:
323	svc_xprt_put(&rdma->sc_xprt);
324}
325
326/**
327 * svc_rdma_flush_recv_queues - Drain pending Receive work
328 * @rdma: svcxprt_rdma being shut down
329 *
330 */
331void svc_rdma_flush_recv_queues(struct svcxprt_rdma *rdma)
332{
333	struct svc_rdma_recv_ctxt *ctxt;
334
335	while ((ctxt = svc_rdma_next_recv_ctxt(&rdma->sc_read_complete_q))) {
336		list_del(&ctxt->rc_list);
337		svc_rdma_recv_ctxt_put(rdma, ctxt);
338	}
339	while ((ctxt = svc_rdma_next_recv_ctxt(&rdma->sc_rq_dto_q))) {
340		list_del(&ctxt->rc_list);
341		svc_rdma_recv_ctxt_put(rdma, ctxt);
342	}
343}
344
345static void svc_rdma_build_arg_xdr(struct svc_rqst *rqstp,
346				   struct svc_rdma_recv_ctxt *ctxt)
347{
348	struct xdr_buf *arg = &rqstp->rq_arg;
349
350	arg->head[0].iov_base = ctxt->rc_recv_buf;
351	arg->head[0].iov_len = ctxt->rc_byte_len;
352	arg->tail[0].iov_base = NULL;
353	arg->tail[0].iov_len = 0;
354	arg->page_len = 0;
355	arg->page_base = 0;
356	arg->buflen = ctxt->rc_byte_len;
357	arg->len = ctxt->rc_byte_len;
358}
359
360/* This accommodates the largest possible Write chunk,
361 * in one segment.
362 */
363#define MAX_BYTES_WRITE_SEG	((u32)(RPCSVC_MAXPAGES << PAGE_SHIFT))
364
365/* This accommodates the largest possible Position-Zero
366 * Read chunk or Reply chunk, in one segment.
367 */
368#define MAX_BYTES_SPECIAL_SEG	((u32)((RPCSVC_MAXPAGES + 2) << PAGE_SHIFT))
369
370/* Sanity check the Read list.
371 *
372 * Implementation limits:
373 * - This implementation supports only one Read chunk.
374 *
375 * Sanity checks:
376 * - Read list does not overflow buffer.
377 * - Segment size limited by largest NFS data payload.
378 *
379 * The segment count is limited to how many segments can
380 * fit in the transport header without overflowing the
381 * buffer. That's about 40 Read segments for a 1KB inline
382 * threshold.
383 *
384 * Returns pointer to the following Write list.
385 */
386static __be32 *xdr_check_read_list(__be32 *p, const __be32 *end)
387{
388	u32 position;
389	bool first;
390
391	first = true;
392	while (*p++ != xdr_zero) {
393		if (first) {
394			position = be32_to_cpup(p++);
395			first = false;
396		} else if (be32_to_cpup(p++) != position) {
397			return NULL;
398		}
399		p++;	/* handle */
400		if (be32_to_cpup(p++) > MAX_BYTES_SPECIAL_SEG)
401			return NULL;
402		p += 2;	/* offset */
403
404		if (p > end)
405			return NULL;
406	}
407	return p;
408}
409
410/* The segment count is limited to how many segments can
411 * fit in the transport header without overflowing the
412 * buffer. That's about 60 Write segments for a 1KB inline
413 * threshold.
414 */
415static __be32 *xdr_check_write_chunk(__be32 *p, const __be32 *end,
416				     u32 maxlen)
417{
418	u32 i, segcount;
419
420	segcount = be32_to_cpup(p++);
421	for (i = 0; i < segcount; i++) {
422		p++;	/* handle */
423		if (be32_to_cpup(p++) > maxlen)
424			return NULL;
425		p += 2;	/* offset */
426
427		if (p > end)
428			return NULL;
429	}
430
431	return p;
432}
433
434/* Sanity check the Write list.
435 *
436 * Implementation limits:
437 * - This implementation supports only one Write chunk.
438 *
439 * Sanity checks:
440 * - Write list does not overflow buffer.
441 * - Segment size limited by largest NFS data payload.
442 *
443 * Returns pointer to the following Reply chunk.
444 */
445static __be32 *xdr_check_write_list(__be32 *p, const __be32 *end)
446{
447	u32 chcount;
448
449	chcount = 0;
450	while (*p++ != xdr_zero) {
451		p = xdr_check_write_chunk(p, end, MAX_BYTES_WRITE_SEG);
452		if (!p)
453			return NULL;
454		if (chcount++ > 1)
455			return NULL;
456	}
457	return p;
458}
459
460/* Sanity check the Reply chunk.
461 *
462 * Sanity checks:
463 * - Reply chunk does not overflow buffer.
464 * - Segment size limited by largest NFS data payload.
465 *
466 * Returns pointer to the following RPC header.
467 */
468static __be32 *xdr_check_reply_chunk(__be32 *p, const __be32 *end)
469{
470	if (*p++ != xdr_zero) {
471		p = xdr_check_write_chunk(p, end, MAX_BYTES_SPECIAL_SEG);
472		if (!p)
473			return NULL;
474	}
475	return p;
476}
477
478/* RPC-over-RDMA Version One private extension: Remote Invalidation.
479 * Responder's choice: requester signals it can handle Send With
480 * Invalidate, and responder chooses one R_key to invalidate.
481 *
482 * If there is exactly one distinct R_key in the received transport
483 * header, set rc_inv_rkey to that R_key. Otherwise, set it to zero.
484 *
485 * Perform this operation while the received transport header is
486 * still in the CPU cache.
487 */
488static void svc_rdma_get_inv_rkey(struct svcxprt_rdma *rdma,
489				  struct svc_rdma_recv_ctxt *ctxt)
490{
491	__be32 inv_rkey, *p;
492	u32 i, segcount;
493
494	ctxt->rc_inv_rkey = 0;
495
496	if (!rdma->sc_snd_w_inv)
497		return;
498
499	inv_rkey = xdr_zero;
500	p = ctxt->rc_recv_buf;
501	p += rpcrdma_fixed_maxsz;
502
503	/* Read list */
504	while (*p++ != xdr_zero) {
505		p++;	/* position */
506		if (inv_rkey == xdr_zero)
507			inv_rkey = *p;
508		else if (inv_rkey != *p)
509			return;
510		p += 4;
511	}
512
513	/* Write list */
514	while (*p++ != xdr_zero) {
515		segcount = be32_to_cpup(p++);
516		for (i = 0; i < segcount; i++) {
517			if (inv_rkey == xdr_zero)
518				inv_rkey = *p;
519			else if (inv_rkey != *p)
520				return;
521			p += 4;
522		}
523	}
524
525	/* Reply chunk */
526	if (*p++ != xdr_zero) {
527		segcount = be32_to_cpup(p++);
528		for (i = 0; i < segcount; i++) {
529			if (inv_rkey == xdr_zero)
530				inv_rkey = *p;
531			else if (inv_rkey != *p)
532				return;
533			p += 4;
534		}
535	}
536
537	ctxt->rc_inv_rkey = be32_to_cpu(inv_rkey);
538}
539
540/* On entry, xdr->head[0].iov_base points to first byte in the
541 * RPC-over-RDMA header.
542 *
543 * On successful exit, head[0] points to first byte past the
544 * RPC-over-RDMA header. For RDMA_MSG, this is the RPC message.
545 * The length of the RPC-over-RDMA header is returned.
546 *
547 * Assumptions:
548 * - The transport header is entirely contained in the head iovec.
549 */
550static int svc_rdma_xdr_decode_req(struct xdr_buf *rq_arg)
551{
552	__be32 *p, *end, *rdma_argp;
553	unsigned int hdr_len;
554
555	/* Verify that there's enough bytes for header + something */
556	if (rq_arg->len <= RPCRDMA_HDRLEN_ERR)
557		goto out_short;
558
559	rdma_argp = rq_arg->head[0].iov_base;
560	if (*(rdma_argp + 1) != rpcrdma_version)
561		goto out_version;
562
563	switch (*(rdma_argp + 3)) {
564	case rdma_msg:
565		break;
566	case rdma_nomsg:
567		break;
568
569	case rdma_done:
570		goto out_drop;
571
572	case rdma_error:
573		goto out_drop;
574
575	default:
576		goto out_proc;
577	}
578
579	end = (__be32 *)((unsigned long)rdma_argp + rq_arg->len);
580	p = xdr_check_read_list(rdma_argp + 4, end);
581	if (!p)
582		goto out_inval;
583	p = xdr_check_write_list(p, end);
584	if (!p)
585		goto out_inval;
586	p = xdr_check_reply_chunk(p, end);
587	if (!p)
588		goto out_inval;
589	if (p > end)
590		goto out_inval;
591
592	rq_arg->head[0].iov_base = p;
593	hdr_len = (unsigned long)p - (unsigned long)rdma_argp;
594	rq_arg->head[0].iov_len -= hdr_len;
595	rq_arg->len -= hdr_len;
596	trace_svcrdma_decode_rqst(rdma_argp, hdr_len);
597	return hdr_len;
598
599out_short:
600	trace_svcrdma_decode_short(rq_arg->len);
601	return -EINVAL;
602
603out_version:
604	trace_svcrdma_decode_badvers(rdma_argp);
605	return -EPROTONOSUPPORT;
606
607out_drop:
608	trace_svcrdma_decode_drop(rdma_argp);
609	return 0;
610
611out_proc:
612	trace_svcrdma_decode_badproc(rdma_argp);
613	return -EINVAL;
614
615out_inval:
616	trace_svcrdma_decode_parse(rdma_argp);
617	return -EINVAL;
618}
619
620static void rdma_read_complete(struct svc_rqst *rqstp,
621			       struct svc_rdma_recv_ctxt *head)
622{
623	int page_no;
624
625	/* Move Read chunk pages to rqstp so that they will be released
626	 * when svc_process is done with them.
627	 */
628	for (page_no = 0; page_no < head->rc_page_count; page_no++) {
629		put_page(rqstp->rq_pages[page_no]);
630		rqstp->rq_pages[page_no] = head->rc_pages[page_no];
631	}
632	head->rc_page_count = 0;
633
634	/* Point rq_arg.pages past header */
635	rqstp->rq_arg.pages = &rqstp->rq_pages[head->rc_hdr_count];
636	rqstp->rq_arg.page_len = head->rc_arg.page_len;
637
638	/* rq_respages starts after the last arg page */
639	rqstp->rq_respages = &rqstp->rq_pages[page_no];
640	rqstp->rq_next_page = rqstp->rq_respages + 1;
641
642	/* Rebuild rq_arg head and tail. */
643	rqstp->rq_arg.head[0] = head->rc_arg.head[0];
644	rqstp->rq_arg.tail[0] = head->rc_arg.tail[0];
645	rqstp->rq_arg.len = head->rc_arg.len;
646	rqstp->rq_arg.buflen = head->rc_arg.buflen;
647}
648
649static void svc_rdma_send_error(struct svcxprt_rdma *xprt,
650				__be32 *rdma_argp, int status)
651{
652	struct svc_rdma_send_ctxt *ctxt;
653	unsigned int length;
654	__be32 *p;
655	int ret;
656
657	ctxt = svc_rdma_send_ctxt_get(xprt);
658	if (!ctxt)
659		return;
660
661	p = ctxt->sc_xprt_buf;
662	*p++ = *rdma_argp;
663	*p++ = *(rdma_argp + 1);
664	*p++ = xprt->sc_fc_credits;
665	*p++ = rdma_error;
666	switch (status) {
667	case -EPROTONOSUPPORT:
668		*p++ = err_vers;
669		*p++ = rpcrdma_version;
670		*p++ = rpcrdma_version;
671		trace_svcrdma_err_vers(*rdma_argp);
672		break;
673	default:
674		*p++ = err_chunk;
675		trace_svcrdma_err_chunk(*rdma_argp);
676	}
677	length = (unsigned long)p - (unsigned long)ctxt->sc_xprt_buf;
678	svc_rdma_sync_reply_hdr(xprt, ctxt, length);
679
680	ctxt->sc_send_wr.opcode = IB_WR_SEND;
681	ret = svc_rdma_send(xprt, &ctxt->sc_send_wr);
682	if (ret)
683		svc_rdma_send_ctxt_put(xprt, ctxt);
684}
685
686/* By convention, backchannel calls arrive via rdma_msg type
687 * messages, and never populate the chunk lists. This makes
688 * the RPC/RDMA header small and fixed in size, so it is
689 * straightforward to check the RPC header's direction field.
690 */
691static bool svc_rdma_is_backchannel_reply(struct svc_xprt *xprt,
692					  __be32 *rdma_resp)
693{
694	__be32 *p;
695
696	if (!xprt->xpt_bc_xprt)
697		return false;
698
699	p = rdma_resp + 3;
700	if (*p++ != rdma_msg)
701		return false;
702
703	if (*p++ != xdr_zero)
704		return false;
705	if (*p++ != xdr_zero)
706		return false;
707	if (*p++ != xdr_zero)
708		return false;
709
710	/* XID sanity */
711	if (*p++ != *rdma_resp)
712		return false;
713	/* call direction */
714	if (*p == cpu_to_be32(RPC_CALL))
715		return false;
716
717	return true;
718}
719
720/**
721 * svc_rdma_recvfrom - Receive an RPC call
722 * @rqstp: request structure into which to receive an RPC Call
723 *
724 * Returns:
725 *	The positive number of bytes in the RPC Call message,
726 *	%0 if there were no Calls ready to return,
727 *	%-EINVAL if the Read chunk data is too large,
728 *	%-ENOMEM if rdma_rw context pool was exhausted,
729 *	%-ENOTCONN if posting failed (connection is lost),
730 *	%-EIO if rdma_rw initialization failed (DMA mapping, etc).
731 *
732 * Called in a loop when XPT_DATA is set. XPT_DATA is cleared only
733 * when there are no remaining ctxt's to process.
734 *
735 * The next ctxt is removed from the "receive" lists.
736 *
737 * - If the ctxt completes a Read, then finish assembling the Call
738 *   message and return the number of bytes in the message.
739 *
740 * - If the ctxt completes a Receive, then construct the Call
741 *   message from the contents of the Receive buffer.
742 *
743 *   - If there are no Read chunks in this message, then finish
744 *     assembling the Call message and return the number of bytes
745 *     in the message.
746 *
747 *   - If there are Read chunks in this message, post Read WRs to
748 *     pull that payload and return 0.
749 */
750int svc_rdma_recvfrom(struct svc_rqst *rqstp)
751{
752	struct svc_xprt *xprt = rqstp->rq_xprt;
753	struct svcxprt_rdma *rdma_xprt =
754		container_of(xprt, struct svcxprt_rdma, sc_xprt);
755	struct svc_rdma_recv_ctxt *ctxt;
756	__be32 *p;
757	int ret;
758
759	spin_lock(&rdma_xprt->sc_rq_dto_lock);
760	ctxt = svc_rdma_next_recv_ctxt(&rdma_xprt->sc_read_complete_q);
761	if (ctxt) {
762		list_del(&ctxt->rc_list);
763		spin_unlock(&rdma_xprt->sc_rq_dto_lock);
764		rdma_read_complete(rqstp, ctxt);
765		goto complete;
766	}
767	ctxt = svc_rdma_next_recv_ctxt(&rdma_xprt->sc_rq_dto_q);
768	if (!ctxt) {
769		/* No new incoming requests, terminate the loop */
770		clear_bit(XPT_DATA, &xprt->xpt_flags);
771		spin_unlock(&rdma_xprt->sc_rq_dto_lock);
772		return 0;
773	}
774	list_del(&ctxt->rc_list);
775	spin_unlock(&rdma_xprt->sc_rq_dto_lock);
776
777	atomic_inc(&rdma_stat_recv);
778
779	svc_rdma_build_arg_xdr(rqstp, ctxt);
780
781	/* Prevent svc_xprt_release from releasing pages in rq_pages
782	 * if we return 0 or an error.
783	 */
784	rqstp->rq_respages = rqstp->rq_pages;
785	rqstp->rq_next_page = rqstp->rq_respages;
786
787	p = (__be32 *)rqstp->rq_arg.head[0].iov_base;
788	ret = svc_rdma_xdr_decode_req(&rqstp->rq_arg);
789	if (ret < 0)
790		goto out_err;
791	if (ret == 0)
792		goto out_drop;
793	rqstp->rq_xprt_hlen = ret;
794
795	if (svc_rdma_is_backchannel_reply(xprt, p)) {
796		ret = svc_rdma_handle_bc_reply(xprt->xpt_bc_xprt, p,
797					       &rqstp->rq_arg);
798		svc_rdma_recv_ctxt_put(rdma_xprt, ctxt);
799		return ret;
800	}
801	svc_rdma_get_inv_rkey(rdma_xprt, ctxt);
802
803	p += rpcrdma_fixed_maxsz;
804	if (*p != xdr_zero)
805		goto out_readchunk;
806
807complete:
808	rqstp->rq_xprt_ctxt = ctxt;
809	rqstp->rq_prot = IPPROTO_MAX;
810	svc_xprt_copy_addrs(rqstp, xprt);
811	return rqstp->rq_arg.len;
812
813out_readchunk:
814	ret = svc_rdma_recv_read_chunk(rdma_xprt, rqstp, ctxt, p);
815	if (ret < 0)
816		goto out_postfail;
817	return 0;
818
819out_err:
820	svc_rdma_send_error(rdma_xprt, p, ret);
821	svc_rdma_recv_ctxt_put(rdma_xprt, ctxt);
822	return 0;
823
824out_postfail:
825	if (ret == -EINVAL)
826		svc_rdma_send_error(rdma_xprt, p, ret);
827	svc_rdma_recv_ctxt_put(rdma_xprt, ctxt);
828	return ret;
829
830out_drop:
831	svc_rdma_recv_ctxt_put(rdma_xprt, ctxt);
832	return 0;
833}