Linux Audio

Check our new training course

Loading...
v5.4
  1// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
  2/*
  3 * Copyright (c) 2016-2018 Oracle. All rights reserved.
  4 * Copyright (c) 2014 Open Grid Computing, Inc. All rights reserved.
  5 * Copyright (c) 2005-2006 Network Appliance, Inc. All rights reserved.
  6 *
  7 * This software is available to you under a choice of one of two
  8 * licenses.  You may choose to be licensed under the terms of the GNU
  9 * General Public License (GPL) Version 2, available from the file
 10 * COPYING in the main directory of this source tree, or the BSD-type
 11 * license below:
 12 *
 13 * Redistribution and use in source and binary forms, with or without
 14 * modification, are permitted provided that the following conditions
 15 * are met:
 16 *
 17 *      Redistributions of source code must retain the above copyright
 18 *      notice, this list of conditions and the following disclaimer.
 19 *
 20 *      Redistributions in binary form must reproduce the above
 21 *      copyright notice, this list of conditions and the following
 22 *      disclaimer in the documentation and/or other materials provided
 23 *      with the distribution.
 24 *
 25 *      Neither the name of the Network Appliance, Inc. nor the names of
 26 *      its contributors may be used to endorse or promote products
 27 *      derived from this software without specific prior written
 28 *      permission.
 29 *
 30 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
 31 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
 32 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
 33 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
 34 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
 35 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
 36 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
 37 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
 38 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 39 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 40 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 41 *
 42 * Author: Tom Tucker <tom@opengridcomputing.com>
 43 */
 44
 45/* Operation
 46 *
 47 * The main entry point is svc_rdma_sendto. This is called by the
 48 * RPC server when an RPC Reply is ready to be transmitted to a client.
 49 *
 50 * The passed-in svc_rqst contains a struct xdr_buf which holds an
 51 * XDR-encoded RPC Reply message. sendto must construct the RPC-over-RDMA
 52 * transport header, post all Write WRs needed for this Reply, then post
 53 * a Send WR conveying the transport header and the RPC message itself to
 54 * the client.
 55 *
 56 * svc_rdma_sendto must fully transmit the Reply before returning, as
 57 * the svc_rqst will be recycled as soon as sendto returns. Remaining
 58 * resources referred to by the svc_rqst are also recycled at that time.
 59 * Therefore any resources that must remain longer must be detached
 60 * from the svc_rqst and released later.
 61 *
 62 * Page Management
 63 *
 64 * The I/O that performs Reply transmission is asynchronous, and may
 65 * complete well after sendto returns. Thus pages under I/O must be
 66 * removed from the svc_rqst before sendto returns.
 67 *
 68 * The logic here depends on Send Queue and completion ordering. Since
 69 * the Send WR is always posted last, it will always complete last. Thus
 70 * when it completes, it is guaranteed that all previous Write WRs have
 71 * also completed.
 72 *
 73 * Write WRs are constructed and posted. Each Write segment gets its own
 74 * svc_rdma_rw_ctxt, allowing the Write completion handler to find and
 75 * DMA-unmap the pages under I/O for that Write segment. The Write
 76 * completion handler does not release any pages.
 77 *
 78 * When the Send WR is constructed, it also gets its own svc_rdma_send_ctxt.
 79 * The ownership of all of the Reply's pages are transferred into that
 80 * ctxt, the Send WR is posted, and sendto returns.
 81 *
 82 * The svc_rdma_send_ctxt is presented when the Send WR completes. The
 83 * Send completion handler finally releases the Reply's pages.
 84 *
 85 * This mechanism also assumes that completions on the transport's Send
 86 * Completion Queue do not run in parallel. Otherwise a Write completion
 87 * and Send completion running at the same time could release pages that
 88 * are still DMA-mapped.
 89 *
 90 * Error Handling
 91 *
 92 * - If the Send WR is posted successfully, it will either complete
 93 *   successfully, or get flushed. Either way, the Send completion
 94 *   handler releases the Reply's pages.
 95 * - If the Send WR cannot be not posted, the forward path releases
 96 *   the Reply's pages.
 97 *
 98 * This handles the case, without the use of page reference counting,
 99 * where two different Write segments send portions of the same page.
100 */
101
102#include <linux/spinlock.h>
103#include <asm/unaligned.h>
104
105#include <rdma/ib_verbs.h>
106#include <rdma/rdma_cm.h>
107
108#include <linux/sunrpc/debug.h>
109#include <linux/sunrpc/rpc_rdma.h>
110#include <linux/sunrpc/svc_rdma.h>
111
112#include "xprt_rdma.h"
113#include <trace/events/rpcrdma.h>
114
115#define RPCDBG_FACILITY	RPCDBG_SVCXPRT
116
117static void svc_rdma_wc_send(struct ib_cq *cq, struct ib_wc *wc);
118
119static inline struct svc_rdma_send_ctxt *
120svc_rdma_next_send_ctxt(struct list_head *list)
121{
122	return list_first_entry_or_null(list, struct svc_rdma_send_ctxt,
123					sc_list);
124}
125
126static struct svc_rdma_send_ctxt *
127svc_rdma_send_ctxt_alloc(struct svcxprt_rdma *rdma)
128{
129	struct svc_rdma_send_ctxt *ctxt;
130	dma_addr_t addr;
131	void *buffer;
132	size_t size;
133	int i;
134
135	size = sizeof(*ctxt);
136	size += rdma->sc_max_send_sges * sizeof(struct ib_sge);
137	ctxt = kmalloc(size, GFP_KERNEL);
138	if (!ctxt)
139		goto fail0;
140	buffer = kmalloc(rdma->sc_max_req_size, GFP_KERNEL);
141	if (!buffer)
142		goto fail1;
143	addr = ib_dma_map_single(rdma->sc_pd->device, buffer,
144				 rdma->sc_max_req_size, DMA_TO_DEVICE);
145	if (ib_dma_mapping_error(rdma->sc_pd->device, addr))
146		goto fail2;
147
148	ctxt->sc_send_wr.next = NULL;
149	ctxt->sc_send_wr.wr_cqe = &ctxt->sc_cqe;
150	ctxt->sc_send_wr.sg_list = ctxt->sc_sges;
151	ctxt->sc_send_wr.send_flags = IB_SEND_SIGNALED;
152	ctxt->sc_cqe.done = svc_rdma_wc_send;
153	ctxt->sc_xprt_buf = buffer;
154	ctxt->sc_sges[0].addr = addr;
155
156	for (i = 0; i < rdma->sc_max_send_sges; i++)
157		ctxt->sc_sges[i].lkey = rdma->sc_pd->local_dma_lkey;
158	return ctxt;
159
160fail2:
161	kfree(buffer);
162fail1:
163	kfree(ctxt);
164fail0:
165	return NULL;
166}
167
168/**
169 * svc_rdma_send_ctxts_destroy - Release all send_ctxt's for an xprt
170 * @rdma: svcxprt_rdma being torn down
171 *
172 */
173void svc_rdma_send_ctxts_destroy(struct svcxprt_rdma *rdma)
174{
175	struct svc_rdma_send_ctxt *ctxt;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
176
177	while ((ctxt = svc_rdma_next_send_ctxt(&rdma->sc_send_ctxts))) {
178		list_del(&ctxt->sc_list);
179		ib_dma_unmap_single(rdma->sc_pd->device,
180				    ctxt->sc_sges[0].addr,
181				    rdma->sc_max_req_size,
 
 
 
 
182				    DMA_TO_DEVICE);
183		kfree(ctxt->sc_xprt_buf);
184		kfree(ctxt);
 
 
 
 
185	}
186}
187
188/**
189 * svc_rdma_send_ctxt_get - Get a free send_ctxt
190 * @rdma: controlling svcxprt_rdma
191 *
192 * Returns a ready-to-use send_ctxt, or NULL if none are
193 * available and a fresh one cannot be allocated.
194 */
195struct svc_rdma_send_ctxt *svc_rdma_send_ctxt_get(struct svcxprt_rdma *rdma)
196{
197	struct svc_rdma_send_ctxt *ctxt;
198
199	spin_lock(&rdma->sc_send_lock);
200	ctxt = svc_rdma_next_send_ctxt(&rdma->sc_send_ctxts);
201	if (!ctxt)
202		goto out_empty;
203	list_del(&ctxt->sc_list);
204	spin_unlock(&rdma->sc_send_lock);
205
206out:
207	ctxt->sc_send_wr.num_sge = 0;
208	ctxt->sc_cur_sge_no = 0;
209	ctxt->sc_page_count = 0;
210	return ctxt;
211
212out_empty:
213	spin_unlock(&rdma->sc_send_lock);
214	ctxt = svc_rdma_send_ctxt_alloc(rdma);
215	if (!ctxt)
216		return NULL;
217	goto out;
218}
219
220/**
221 * svc_rdma_send_ctxt_put - Return send_ctxt to free list
222 * @rdma: controlling svcxprt_rdma
223 * @ctxt: object to return to the free list
224 *
225 * Pages left in sc_pages are DMA unmapped and released.
226 */
227void svc_rdma_send_ctxt_put(struct svcxprt_rdma *rdma,
228			    struct svc_rdma_send_ctxt *ctxt)
229{
230	struct ib_device *device = rdma->sc_cm_id->device;
231	unsigned int i;
232
233	/* The first SGE contains the transport header, which
234	 * remains mapped until @ctxt is destroyed.
235	 */
236	for (i = 1; i < ctxt->sc_send_wr.num_sge; i++)
237		ib_dma_unmap_page(device,
238				  ctxt->sc_sges[i].addr,
239				  ctxt->sc_sges[i].length,
240				  DMA_TO_DEVICE);
241
242	for (i = 0; i < ctxt->sc_page_count; ++i)
243		put_page(ctxt->sc_pages[i]);
244
245	spin_lock(&rdma->sc_send_lock);
246	list_add(&ctxt->sc_list, &rdma->sc_send_ctxts);
247	spin_unlock(&rdma->sc_send_lock);
248}
249
250/**
251 * svc_rdma_wc_send - Invoked by RDMA provider for each polled Send WC
252 * @cq: Completion Queue context
253 * @wc: Work Completion object
254 *
255 * NB: The svc_xprt/svcxprt_rdma is pinned whenever it's possible that
256 * the Send completion handler could be running.
257 */
258static void svc_rdma_wc_send(struct ib_cq *cq, struct ib_wc *wc)
259{
260	struct svcxprt_rdma *rdma = cq->cq_context;
261	struct ib_cqe *cqe = wc->wr_cqe;
262	struct svc_rdma_send_ctxt *ctxt;
263
264	trace_svcrdma_wc_send(wc);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
265
266	atomic_inc(&rdma->sc_sq_avail);
267	wake_up(&rdma->sc_send_wait);
268
269	ctxt = container_of(cqe, struct svc_rdma_send_ctxt, sc_cqe);
270	svc_rdma_send_ctxt_put(rdma, ctxt);
271
272	if (unlikely(wc->status != IB_WC_SUCCESS)) {
273		set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags);
274		svc_xprt_enqueue(&rdma->sc_xprt);
275	}
276
277	svc_xprt_put(&rdma->sc_xprt);
278}
279
280/**
281 * svc_rdma_send - Post a single Send WR
282 * @rdma: transport on which to post the WR
283 * @wr: prepared Send WR to post
284 *
285 * Returns zero the Send WR was posted successfully. Otherwise, a
286 * negative errno is returned.
287 */
288int svc_rdma_send(struct svcxprt_rdma *rdma, struct ib_send_wr *wr)
289{
290	int ret;
291
292	might_sleep();
293
294	/* If the SQ is full, wait until an SQ entry is available */
295	while (1) {
296		if ((atomic_dec_return(&rdma->sc_sq_avail) < 0)) {
297			atomic_inc(&rdma_stat_sq_starve);
298			trace_svcrdma_sq_full(rdma);
299			atomic_inc(&rdma->sc_sq_avail);
300			wait_event(rdma->sc_send_wait,
301				   atomic_read(&rdma->sc_sq_avail) > 1);
302			if (test_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags))
303				return -ENOTCONN;
304			trace_svcrdma_sq_retry(rdma);
305			continue;
306		}
307
308		svc_xprt_get(&rdma->sc_xprt);
309		ret = ib_post_send(rdma->sc_qp, wr, NULL);
310		trace_svcrdma_post_send(wr, ret);
311		if (ret) {
312			set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags);
313			svc_xprt_put(&rdma->sc_xprt);
314			wake_up(&rdma->sc_send_wait);
315		}
316		break;
317	}
318	return ret;
319}
320
321static u32 xdr_padsize(u32 len)
322{
323	return (len & 3) ? (4 - (len & 3)) : 0;
324}
325
326/* Returns length of transport header, in bytes.
327 */
328static unsigned int svc_rdma_reply_hdr_len(__be32 *rdma_resp)
329{
330	unsigned int nsegs;
331	__be32 *p;
332
333	p = rdma_resp;
334
335	/* RPC-over-RDMA V1 replies never have a Read list. */
336	p += rpcrdma_fixed_maxsz + 1;
337
338	/* Skip Write list. */
339	while (*p++ != xdr_zero) {
340		nsegs = be32_to_cpup(p++);
341		p += nsegs * rpcrdma_segment_maxsz;
342	}
343
344	/* Skip Reply chunk. */
345	if (*p++ != xdr_zero) {
346		nsegs = be32_to_cpup(p++);
347		p += nsegs * rpcrdma_segment_maxsz;
348	}
349
350	return (unsigned long)p - (unsigned long)rdma_resp;
351}
352
353/* One Write chunk is copied from Call transport header to Reply
354 * transport header. Each segment's length field is updated to
355 * reflect number of bytes consumed in the segment.
356 *
357 * Returns number of segments in this chunk.
358 */
359static unsigned int xdr_encode_write_chunk(__be32 *dst, __be32 *src,
360					   unsigned int remaining)
361{
362	unsigned int i, nsegs;
363	u32 seg_len;
364
365	/* Write list discriminator */
366	*dst++ = *src++;
367
368	/* number of segments in this chunk */
369	nsegs = be32_to_cpup(src);
370	*dst++ = *src++;
371
372	for (i = nsegs; i; i--) {
373		/* segment's RDMA handle */
374		*dst++ = *src++;
375
376		/* bytes returned in this segment */
377		seg_len = be32_to_cpu(*src);
378		if (remaining >= seg_len) {
379			/* entire segment was consumed */
380			*dst = *src;
381			remaining -= seg_len;
382		} else {
383			/* segment only partly filled */
384			*dst = cpu_to_be32(remaining);
385			remaining = 0;
386		}
387		dst++; src++;
388
389		/* segment's RDMA offset */
390		*dst++ = *src++;
391		*dst++ = *src++;
392	}
393
394	return nsegs;
395}
396
397/* The client provided a Write list in the Call message. Fill in
398 * the segments in the first Write chunk in the Reply's transport
399 * header with the number of bytes consumed in each segment.
400 * Remaining chunks are returned unused.
401 *
402 * Assumptions:
403 *  - Client has provided only one Write chunk
404 */
405static void svc_rdma_xdr_encode_write_list(__be32 *rdma_resp, __be32 *wr_ch,
406					   unsigned int consumed)
407{
408	unsigned int nsegs;
409	__be32 *p, *q;
410
411	/* RPC-over-RDMA V1 replies never have a Read list. */
412	p = rdma_resp + rpcrdma_fixed_maxsz + 1;
413
414	q = wr_ch;
415	while (*q != xdr_zero) {
416		nsegs = xdr_encode_write_chunk(p, q, consumed);
417		q += 2 + nsegs * rpcrdma_segment_maxsz;
418		p += 2 + nsegs * rpcrdma_segment_maxsz;
419		consumed = 0;
420	}
421
422	/* Terminate Write list */
423	*p++ = xdr_zero;
424
425	/* Reply chunk discriminator; may be replaced later */
426	*p = xdr_zero;
427}
428
429/* The client provided a Reply chunk in the Call message. Fill in
430 * the segments in the Reply chunk in the Reply message with the
431 * number of bytes consumed in each segment.
432 *
433 * Assumptions:
434 * - Reply can always fit in the provided Reply chunk
435 */
436static void svc_rdma_xdr_encode_reply_chunk(__be32 *rdma_resp, __be32 *rp_ch,
437					    unsigned int consumed)
438{
439	__be32 *p;
440
441	/* Find the Reply chunk in the Reply's xprt header.
442	 * RPC-over-RDMA V1 replies never have a Read list.
443	 */
444	p = rdma_resp + rpcrdma_fixed_maxsz + 1;
445
446	/* Skip past Write list */
447	while (*p++ != xdr_zero)
448		p += 1 + be32_to_cpup(p) * rpcrdma_segment_maxsz;
449
450	xdr_encode_write_chunk(p, rp_ch, consumed);
451}
452
453/* Parse the RPC Call's transport header.
454 */
455static void svc_rdma_get_write_arrays(__be32 *rdma_argp,
456				      __be32 **write, __be32 **reply)
457{
458	__be32 *p;
459
460	p = rdma_argp + rpcrdma_fixed_maxsz;
461
462	/* Read list */
463	while (*p++ != xdr_zero)
464		p += 5;
465
466	/* Write list */
467	if (*p != xdr_zero) {
468		*write = p;
469		while (*p++ != xdr_zero)
470			p += 1 + be32_to_cpu(*p) * 4;
471	} else {
472		*write = NULL;
473		p++;
474	}
475
476	/* Reply chunk */
477	if (*p != xdr_zero)
478		*reply = p;
479	else
480		*reply = NULL;
481}
482
483static int svc_rdma_dma_map_page(struct svcxprt_rdma *rdma,
484				 struct svc_rdma_send_ctxt *ctxt,
485				 struct page *page,
486				 unsigned long offset,
487				 unsigned int len)
488{
489	struct ib_device *dev = rdma->sc_cm_id->device;
490	dma_addr_t dma_addr;
491
492	dma_addr = ib_dma_map_page(dev, page, offset, len, DMA_TO_DEVICE);
493	if (ib_dma_mapping_error(dev, dma_addr))
494		goto out_maperr;
495
496	ctxt->sc_sges[ctxt->sc_cur_sge_no].addr = dma_addr;
497	ctxt->sc_sges[ctxt->sc_cur_sge_no].length = len;
498	ctxt->sc_send_wr.num_sge++;
499	return 0;
500
501out_maperr:
502	trace_svcrdma_dma_map_page(rdma, page);
 
 
503	return -EIO;
504}
505
506/* ib_dma_map_page() is used here because svc_rdma_dma_unmap()
507 * handles DMA-unmap and it uses ib_dma_unmap_page() exclusively.
508 */
509static int svc_rdma_dma_map_buf(struct svcxprt_rdma *rdma,
510				struct svc_rdma_send_ctxt *ctxt,
511				unsigned char *base,
512				unsigned int len)
513{
514	return svc_rdma_dma_map_page(rdma, ctxt, virt_to_page(base),
515				     offset_in_page(base), len);
516}
 
 
 
 
517
518/**
519 * svc_rdma_sync_reply_hdr - DMA sync the transport header buffer
520 * @rdma: controlling transport
521 * @ctxt: send_ctxt for the Send WR
522 * @len: length of transport header
523 *
524 */
525void svc_rdma_sync_reply_hdr(struct svcxprt_rdma *rdma,
526			     struct svc_rdma_send_ctxt *ctxt,
527			     unsigned int len)
528{
529	ctxt->sc_sges[0].length = len;
530	ctxt->sc_send_wr.num_sge++;
531	ib_dma_sync_single_for_device(rdma->sc_pd->device,
532				      ctxt->sc_sges[0].addr, len,
533				      DMA_TO_DEVICE);
534}
535
536/* If the xdr_buf has more elements than the device can
537 * transmit in a single RDMA Send, then the reply will
538 * have to be copied into a bounce buffer.
539 */
540static bool svc_rdma_pull_up_needed(struct svcxprt_rdma *rdma,
541				    struct xdr_buf *xdr,
542				    __be32 *wr_lst)
543{
544	int elements;
545
546	/* xdr->head */
547	elements = 1;
548
549	/* xdr->pages */
550	if (!wr_lst) {
551		unsigned int remaining;
552		unsigned long pageoff;
553
554		pageoff = xdr->page_base & ~PAGE_MASK;
555		remaining = xdr->page_len;
556		while (remaining) {
557			++elements;
558			remaining -= min_t(u32, PAGE_SIZE - pageoff,
559					   remaining);
560			pageoff = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
561		}
562	}
 
 
563
564	/* xdr->tail */
565	if (xdr->tail[0].iov_len)
566		++elements;
567
568	/* assume 1 SGE is needed for the transport header */
569	return elements >= rdma->sc_max_send_sges;
570}
571
572/* The device is not capable of sending the reply directly.
573 * Assemble the elements of @xdr into the transport header
574 * buffer.
575 */
576static int svc_rdma_pull_up_reply_msg(struct svcxprt_rdma *rdma,
577				      struct svc_rdma_send_ctxt *ctxt,
578				      struct xdr_buf *xdr, __be32 *wr_lst)
579{
580	unsigned char *dst, *tailbase;
581	unsigned int taillen;
 
 
 
 
 
 
582
583	dst = ctxt->sc_xprt_buf;
584	dst += ctxt->sc_sges[0].length;
 
 
 
 
 
585
586	memcpy(dst, xdr->head[0].iov_base, xdr->head[0].iov_len);
587	dst += xdr->head[0].iov_len;
 
 
588
589	tailbase = xdr->tail[0].iov_base;
590	taillen = xdr->tail[0].iov_len;
591	if (wr_lst) {
592		u32 xdrpad;
593
594		xdrpad = xdr_padsize(xdr->page_len);
595		if (taillen && xdrpad) {
596			tailbase += xdrpad;
597			taillen -= xdrpad;
598		}
599	} else {
600		unsigned int len, remaining;
601		unsigned long pageoff;
602		struct page **ppages;
603
604		ppages = xdr->pages + (xdr->page_base >> PAGE_SHIFT);
605		pageoff = xdr->page_base & ~PAGE_MASK;
606		remaining = xdr->page_len;
607		while (remaining) {
608			len = min_t(u32, PAGE_SIZE - pageoff, remaining);
609
610			memcpy(dst, page_address(*ppages), len);
611			remaining -= len;
612			dst += len;
613			pageoff = 0;
 
 
 
 
 
 
 
 
 
614		}
615	}
 
 
616
617	if (taillen)
618		memcpy(dst, tailbase, taillen);
619
620	ctxt->sc_sges[0].length += xdr->len;
621	ib_dma_sync_single_for_device(rdma->sc_pd->device,
622				      ctxt->sc_sges[0].addr,
623				      ctxt->sc_sges[0].length,
624				      DMA_TO_DEVICE);
625
626	return 0;
627}
628
629/* svc_rdma_map_reply_msg - Map the buffer holding RPC message
630 * @rdma: controlling transport
631 * @ctxt: send_ctxt for the Send WR
632 * @xdr: prepared xdr_buf containing RPC message
633 * @wr_lst: pointer to Call header's Write list, or NULL
634 *
635 * Load the xdr_buf into the ctxt's sge array, and DMA map each
636 * element as it is added.
637 *
638 * Returns zero on success, or a negative errno on failure.
639 */
640int svc_rdma_map_reply_msg(struct svcxprt_rdma *rdma,
641			   struct svc_rdma_send_ctxt *ctxt,
642			   struct xdr_buf *xdr, __be32 *wr_lst)
643{
644	unsigned int len, remaining;
645	unsigned long page_off;
646	struct page **ppages;
647	unsigned char *base;
648	u32 xdr_pad;
 
 
 
 
 
 
 
 
 
649	int ret;
650
651	if (svc_rdma_pull_up_needed(rdma, xdr, wr_lst))
652		return svc_rdma_pull_up_reply_msg(rdma, ctxt, xdr, wr_lst);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
653
654	++ctxt->sc_cur_sge_no;
655	ret = svc_rdma_dma_map_buf(rdma, ctxt,
656				   xdr->head[0].iov_base,
657				   xdr->head[0].iov_len);
658	if (ret < 0)
659		return ret;
660
661	/* If a Write chunk is present, the xdr_buf's page list
662	 * is not included inline. However the Upper Layer may
663	 * have added XDR padding in the tail buffer, and that
664	 * should not be included inline.
665	 */
666	if (wr_lst) {
667		base = xdr->tail[0].iov_base;
668		len = xdr->tail[0].iov_len;
669		xdr_pad = xdr_padsize(xdr->page_len);
670
671		if (len && xdr_pad) {
672			base += xdr_pad;
673			len -= xdr_pad;
 
 
 
 
 
 
 
 
 
 
 
674		}
675
676		goto tail;
677	}
 
678
679	ppages = xdr->pages + (xdr->page_base >> PAGE_SHIFT);
680	page_off = xdr->page_base & ~PAGE_MASK;
681	remaining = xdr->page_len;
682	while (remaining) {
683		len = min_t(u32, PAGE_SIZE - page_off, remaining);
684
685		++ctxt->sc_cur_sge_no;
686		ret = svc_rdma_dma_map_page(rdma, ctxt, *ppages++,
687					    page_off, len);
688		if (ret < 0)
689			return ret;
690
691		remaining -= len;
692		page_off = 0;
693	}
694
695	base = xdr->tail[0].iov_base;
696	len = xdr->tail[0].iov_len;
697tail:
698	if (len) {
699		++ctxt->sc_cur_sge_no;
700		ret = svc_rdma_dma_map_buf(rdma, ctxt, base, len);
701		if (ret < 0)
702			return ret;
703	}
 
 
 
 
 
 
 
 
 
 
 
 
704
705	return 0;
706}
707
708/* The svc_rqst and all resources it owns are released as soon as
709 * svc_rdma_sendto returns. Transfer pages under I/O to the ctxt
710 * so they are released by the Send completion handler.
711 */
712static void svc_rdma_save_io_pages(struct svc_rqst *rqstp,
713				   struct svc_rdma_send_ctxt *ctxt)
714{
715	int i, pages = rqstp->rq_next_page - rqstp->rq_respages;
716
717	ctxt->sc_page_count += pages;
718	for (i = 0; i < pages; i++) {
719		ctxt->sc_pages[i] = rqstp->rq_respages[i];
720		rqstp->rq_respages[i] = NULL;
721	}
722
723	/* Prevent svc_xprt_release from releasing pages in rq_pages */
724	rqstp->rq_next_page = rqstp->rq_respages;
725}
726
727/* Prepare the portion of the RPC Reply that will be transmitted
728 * via RDMA Send. The RPC-over-RDMA transport header is prepared
729 * in sc_sges[0], and the RPC xdr_buf is prepared in following sges.
730 *
731 * Depending on whether a Write list or Reply chunk is present,
732 * the server may send all, a portion of, or none of the xdr_buf.
733 * In the latter case, only the transport header (sc_sges[0]) is
734 * transmitted.
735 *
736 * RDMA Send is the last step of transmitting an RPC reply. Pages
737 * involved in the earlier RDMA Writes are here transferred out
738 * of the rqstp and into the sctxt's page array. These pages are
739 * DMA unmapped by each Write completion, but the subsequent Send
740 * completion finally releases these pages.
741 *
742 * Assumptions:
743 * - The Reply's transport header will never be larger than a page.
744 */
745static int svc_rdma_send_reply_msg(struct svcxprt_rdma *rdma,
746				   struct svc_rdma_send_ctxt *sctxt,
747				   struct svc_rdma_recv_ctxt *rctxt,
748				   struct svc_rqst *rqstp,
749				   __be32 *wr_lst, __be32 *rp_ch)
750{
751	int ret;
752
753	if (!rp_ch) {
754		ret = svc_rdma_map_reply_msg(rdma, sctxt,
755					     &rqstp->rq_res, wr_lst);
756		if (ret < 0)
757			return ret;
758	}
759
760	svc_rdma_save_io_pages(rqstp, sctxt);
761
762	if (rctxt->rc_inv_rkey) {
763		sctxt->sc_send_wr.opcode = IB_WR_SEND_WITH_INV;
764		sctxt->sc_send_wr.ex.invalidate_rkey = rctxt->rc_inv_rkey;
765	} else {
766		sctxt->sc_send_wr.opcode = IB_WR_SEND;
767	}
768	dprintk("svcrdma: posting Send WR with %u sge(s)\n",
769		sctxt->sc_send_wr.num_sge);
770	return svc_rdma_send(rdma, &sctxt->sc_send_wr);
771}
772
773/* Given the client-provided Write and Reply chunks, the server was not
774 * able to form a complete reply. Return an RDMA_ERROR message so the
775 * client can retire this RPC transaction. As above, the Send completion
776 * routine releases payload pages that were part of a previous RDMA Write.
777 *
778 * Remote Invalidation is skipped for simplicity.
779 */
780static int svc_rdma_send_error_msg(struct svcxprt_rdma *rdma,
781				   struct svc_rdma_send_ctxt *ctxt,
782				   struct svc_rqst *rqstp)
783{
784	__be32 *p;
785	int ret;
786
787	p = ctxt->sc_xprt_buf;
788	trace_svcrdma_err_chunk(*p);
789	p += 3;
790	*p++ = rdma_error;
791	*p   = err_chunk;
792	svc_rdma_sync_reply_hdr(rdma, ctxt, RPCRDMA_HDRLEN_ERR);
793
794	svc_rdma_save_io_pages(rqstp, ctxt);
795
796	ctxt->sc_send_wr.opcode = IB_WR_SEND;
797	ret = svc_rdma_send(rdma, &ctxt->sc_send_wr);
798	if (ret) {
799		svc_rdma_send_ctxt_put(rdma, ctxt);
800		return ret;
801	}
802
803	return 0;
804}
805
806/**
807 * svc_rdma_sendto - Transmit an RPC reply
808 * @rqstp: processed RPC request, reply XDR already in ::rq_res
809 *
810 * Any resources still associated with @rqstp are released upon return.
811 * If no reply message was possible, the connection is closed.
812 *
813 * Returns:
814 *	%0 if an RPC reply has been successfully posted,
815 *	%-ENOMEM if a resource shortage occurred (connection is lost),
816 *	%-ENOTCONN if posting failed (connection is lost).
817 */
818int svc_rdma_sendto(struct svc_rqst *rqstp)
819{
820	struct svc_xprt *xprt = rqstp->rq_xprt;
821	struct svcxprt_rdma *rdma =
822		container_of(xprt, struct svcxprt_rdma, sc_xprt);
823	struct svc_rdma_recv_ctxt *rctxt = rqstp->rq_xprt_ctxt;
824	__be32 *p, *rdma_argp, *rdma_resp, *wr_lst, *rp_ch;
825	struct xdr_buf *xdr = &rqstp->rq_res;
826	struct svc_rdma_send_ctxt *sctxt;
827	int ret;
828
829	rdma_argp = rctxt->rc_recv_buf;
830	svc_rdma_get_write_arrays(rdma_argp, &wr_lst, &rp_ch);
831
832	/* Create the RDMA response header. xprt->xpt_mutex,
833	 * acquired in svc_send(), serializes RPC replies. The
834	 * code path below that inserts the credit grant value
835	 * into each transport header runs only inside this
836	 * critical section.
837	 */
838	ret = -ENOMEM;
839	sctxt = svc_rdma_send_ctxt_get(rdma);
840	if (!sctxt)
 
 
 
841		goto err0;
842	rdma_resp = sctxt->sc_xprt_buf;
843
844	p = rdma_resp;
845	*p++ = *rdma_argp;
846	*p++ = *(rdma_argp + 1);
847	*p++ = rdma->sc_fc_credits;
848	*p++ = rp_ch ? rdma_nomsg : rdma_msg;
849
850	/* Start with empty chunks */
851	*p++ = xdr_zero;
852	*p++ = xdr_zero;
853	*p   = xdr_zero;
854
855	if (wr_lst) {
856		/* XXX: Presume the client sent only one Write chunk */
857		ret = svc_rdma_send_write_chunk(rdma, wr_lst, xdr);
858		if (ret < 0)
859			goto err2;
860		svc_rdma_xdr_encode_write_list(rdma_resp, wr_lst, ret);
861	}
862	if (rp_ch) {
863		ret = svc_rdma_send_reply_chunk(rdma, rp_ch, wr_lst, xdr);
864		if (ret < 0)
865			goto err2;
866		svc_rdma_xdr_encode_reply_chunk(rdma_resp, rp_ch, ret);
867	}
868
869	svc_rdma_sync_reply_hdr(rdma, sctxt, svc_rdma_reply_hdr_len(rdma_resp));
870	ret = svc_rdma_send_reply_msg(rdma, sctxt, rctxt, rqstp,
871				      wr_lst, rp_ch);
872	if (ret < 0)
873		goto err1;
874	ret = 0;
875
876out:
877	rqstp->rq_xprt_ctxt = NULL;
878	svc_rdma_recv_ctxt_put(rdma, rctxt);
879	return ret;
880
881 err2:
882	if (ret != -E2BIG && ret != -EINVAL)
 
 
 
 
883		goto err1;
 
 
884
885	ret = svc_rdma_send_error_msg(rdma, sctxt, rqstp);
886	if (ret < 0)
887		goto err1;
888	ret = 0;
889	goto out;
890
891 err1:
892	svc_rdma_send_ctxt_put(rdma, sctxt);
893 err0:
894	trace_svcrdma_send_failed(rqstp, ret);
895	set_bit(XPT_CLOSE, &xprt->xpt_flags);
896	ret = -ENOTCONN;
897	goto out;
898}
v3.1
 
  1/*
 
 
  2 * Copyright (c) 2005-2006 Network Appliance, Inc. All rights reserved.
  3 *
  4 * This software is available to you under a choice of one of two
  5 * licenses.  You may choose to be licensed under the terms of the GNU
  6 * General Public License (GPL) Version 2, available from the file
  7 * COPYING in the main directory of this source tree, or the BSD-type
  8 * license below:
  9 *
 10 * Redistribution and use in source and binary forms, with or without
 11 * modification, are permitted provided that the following conditions
 12 * are met:
 13 *
 14 *      Redistributions of source code must retain the above copyright
 15 *      notice, this list of conditions and the following disclaimer.
 16 *
 17 *      Redistributions in binary form must reproduce the above
 18 *      copyright notice, this list of conditions and the following
 19 *      disclaimer in the documentation and/or other materials provided
 20 *      with the distribution.
 21 *
 22 *      Neither the name of the Network Appliance, Inc. nor the names of
 23 *      its contributors may be used to endorse or promote products
 24 *      derived from this software without specific prior written
 25 *      permission.
 26 *
 27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
 28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
 29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
 30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
 31 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
 32 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
 33 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
 34 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
 35 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 36 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 37 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 38 *
 39 * Author: Tom Tucker <tom@opengridcomputing.com>
 40 */
 41
 42#include <linux/sunrpc/debug.h>
 43#include <linux/sunrpc/rpc_rdma.h>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 44#include <linux/spinlock.h>
 45#include <asm/unaligned.h>
 
 46#include <rdma/ib_verbs.h>
 47#include <rdma/rdma_cm.h>
 
 
 
 48#include <linux/sunrpc/svc_rdma.h>
 49
 
 
 
 50#define RPCDBG_FACILITY	RPCDBG_SVCXPRT
 51
 52/* Encode an XDR as an array of IB SGE
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 53 *
 54 * Assumptions:
 55 * - head[0] is physically contiguous.
 56 * - tail[0] is physically contiguous.
 57 * - pages[] is not physically or virtually contiguous and consists of
 58 *   PAGE_SIZE elements.
 59 *
 60 * Output:
 61 * SGE[0]              reserved for RCPRDMA header
 62 * SGE[1]              data from xdr->head[]
 63 * SGE[2..sge_count-2] data from xdr->pages[]
 64 * SGE[sge_count-1]    data from xdr->tail.
 65 *
 66 * The max SGE we need is the length of the XDR / pagesize + one for
 67 * head + one for tail + one for RPCRDMA header. Since RPCSVC_MAXPAGES
 68 * reserves a page for both the request and the reply header, and this
 69 * array is only concerned with the reply we are assured that we have
 70 * on extra page for the RPCRMDA header.
 71 */
 72static int fast_reg_xdr(struct svcxprt_rdma *xprt,
 73			struct xdr_buf *xdr,
 74			struct svc_rdma_req_map *vec)
 75{
 76	int sge_no;
 77	u32 sge_bytes;
 78	u32 page_bytes;
 79	u32 page_off;
 80	int page_no = 0;
 81	u8 *frva;
 82	struct svc_rdma_fastreg_mr *frmr;
 83
 84	frmr = svc_rdma_get_frmr(xprt);
 85	if (IS_ERR(frmr))
 86		return -ENOMEM;
 87	vec->frmr = frmr;
 88
 89	/* Skip the RPCRDMA header */
 90	sge_no = 1;
 91
 92	/* Map the head. */
 93	frva = (void *)((unsigned long)(xdr->head[0].iov_base) & PAGE_MASK);
 94	vec->sge[sge_no].iov_base = xdr->head[0].iov_base;
 95	vec->sge[sge_no].iov_len = xdr->head[0].iov_len;
 96	vec->count = 2;
 97	sge_no++;
 98
 99	/* Map the XDR head */
100	frmr->kva = frva;
101	frmr->direction = DMA_TO_DEVICE;
102	frmr->access_flags = 0;
103	frmr->map_len = PAGE_SIZE;
104	frmr->page_list_len = 1;
105	page_off = (unsigned long)xdr->head[0].iov_base & ~PAGE_MASK;
106	frmr->page_list->page_list[page_no] =
107		ib_dma_map_page(xprt->sc_cm_id->device,
108				virt_to_page(xdr->head[0].iov_base),
109				page_off,
110				PAGE_SIZE - page_off,
111				DMA_TO_DEVICE);
112	if (ib_dma_mapping_error(xprt->sc_cm_id->device,
113				 frmr->page_list->page_list[page_no]))
114		goto fatal_err;
115	atomic_inc(&xprt->sc_dma_used);
116
117	/* Map the XDR page list */
118	page_off = xdr->page_base;
119	page_bytes = xdr->page_len + page_off;
120	if (!page_bytes)
121		goto encode_tail;
122
123	/* Map the pages */
124	vec->sge[sge_no].iov_base = frva + frmr->map_len + page_off;
125	vec->sge[sge_no].iov_len = page_bytes;
126	sge_no++;
127	while (page_bytes) {
128		struct page *page;
129
130		page = xdr->pages[page_no++];
131		sge_bytes = min_t(u32, page_bytes, (PAGE_SIZE - page_off));
132		page_bytes -= sge_bytes;
133
134		frmr->page_list->page_list[page_no] =
135			ib_dma_map_page(xprt->sc_cm_id->device,
136					page, page_off,
137					sge_bytes, DMA_TO_DEVICE);
138		if (ib_dma_mapping_error(xprt->sc_cm_id->device,
139					 frmr->page_list->page_list[page_no]))
140			goto fatal_err;
141
142		atomic_inc(&xprt->sc_dma_used);
143		page_off = 0; /* reset for next time through loop */
144		frmr->map_len += PAGE_SIZE;
145		frmr->page_list_len++;
146	}
147	vec->count++;
148
149 encode_tail:
150	/* Map tail */
151	if (0 == xdr->tail[0].iov_len)
152		goto done;
153
154	vec->count++;
155	vec->sge[sge_no].iov_len = xdr->tail[0].iov_len;
156
157	if (((unsigned long)xdr->tail[0].iov_base & PAGE_MASK) ==
158	    ((unsigned long)xdr->head[0].iov_base & PAGE_MASK)) {
159		/*
160		 * If head and tail use the same page, we don't need
161		 * to map it again.
162		 */
163		vec->sge[sge_no].iov_base = xdr->tail[0].iov_base;
164	} else {
165		void *va;
166
167		/* Map another page for the tail */
168		page_off = (unsigned long)xdr->tail[0].iov_base & ~PAGE_MASK;
169		va = (void *)((unsigned long)xdr->tail[0].iov_base & PAGE_MASK);
170		vec->sge[sge_no].iov_base = frva + frmr->map_len + page_off;
171
172		frmr->page_list->page_list[page_no] =
173		    ib_dma_map_page(xprt->sc_cm_id->device, virt_to_page(va),
174				    page_off,
175				    PAGE_SIZE,
176				    DMA_TO_DEVICE);
177		if (ib_dma_mapping_error(xprt->sc_cm_id->device,
178					 frmr->page_list->page_list[page_no]))
179			goto fatal_err;
180		atomic_inc(&xprt->sc_dma_used);
181		frmr->map_len += PAGE_SIZE;
182		frmr->page_list_len++;
183	}
 
 
 
 
 
 
 
 
 
 
 
 
184
185 done:
186	if (svc_rdma_fastreg(xprt, frmr))
187		goto fatal_err;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
188
189	return 0;
 
 
 
 
 
 
 
 
 
 
 
190
191 fatal_err:
192	printk("svcrdma: Error fast registering memory for xprt %p\n", xprt);
193	vec->frmr = NULL;
194	svc_rdma_put_frmr(xprt, frmr);
195	return -EIO;
 
 
 
 
 
 
 
 
 
 
196}
197
198static int map_xdr(struct svcxprt_rdma *xprt,
199		   struct xdr_buf *xdr,
200		   struct svc_rdma_req_map *vec)
201{
202	int sge_no;
203	u32 sge_bytes;
204	u32 page_bytes;
205	u32 page_off;
206	int page_no;
207
208	BUG_ON(xdr->len !=
209	       (xdr->head[0].iov_len + xdr->page_len + xdr->tail[0].iov_len));
210
211	if (xprt->sc_frmr_pg_list_len)
212		return fast_reg_xdr(xprt, xdr, vec);
213
214	/* Skip the first sge, this is for the RPCRDMA header */
215	sge_no = 1;
216
217	/* Head SGE */
218	vec->sge[sge_no].iov_base = xdr->head[0].iov_base;
219	vec->sge[sge_no].iov_len = xdr->head[0].iov_len;
220	sge_no++;
221
222	/* pages SGE */
223	page_no = 0;
224	page_bytes = xdr->page_len;
225	page_off = xdr->page_base;
226	while (page_bytes) {
227		vec->sge[sge_no].iov_base =
228			page_address(xdr->pages[page_no]) + page_off;
229		sge_bytes = min_t(u32, page_bytes, (PAGE_SIZE - page_off));
230		page_bytes -= sge_bytes;
231		vec->sge[sge_no].iov_len = sge_bytes;
232
233		sge_no++;
234		page_no++;
235		page_off = 0; /* reset for next time through loop */
236	}
237
238	/* Tail SGE */
239	if (xdr->tail[0].iov_len) {
240		vec->sge[sge_no].iov_base = xdr->tail[0].iov_base;
241		vec->sge[sge_no].iov_len = xdr->tail[0].iov_len;
242		sge_no++;
243	}
244
245	dprintk("svcrdma: map_xdr: sge_no %d page_no %d "
246		"page_base %u page_len %u head_len %zu tail_len %zu\n",
247		sge_no, page_no, xdr->page_base, xdr->page_len,
248		xdr->head[0].iov_len, xdr->tail[0].iov_len);
249
250	vec->count = sge_no;
251	return 0;
 
 
 
 
 
 
 
 
 
 
252}
253
254static dma_addr_t dma_map_xdr(struct svcxprt_rdma *xprt,
255			      struct xdr_buf *xdr,
256			      u32 xdr_off, size_t len, int dir)
 
 
 
 
 
 
257{
258	struct page *page;
259	dma_addr_t dma_addr;
260	if (xdr_off < xdr->head[0].iov_len) {
261		/* This offset is in the head */
262		xdr_off += (unsigned long)xdr->head[0].iov_base & ~PAGE_MASK;
263		page = virt_to_page(xdr->head[0].iov_base);
264	} else {
265		xdr_off -= xdr->head[0].iov_len;
266		if (xdr_off < xdr->page_len) {
267			/* This offset is in the page list */
268			page = xdr->pages[xdr_off >> PAGE_SHIFT];
269			xdr_off &= ~PAGE_MASK;
270		} else {
271			/* This offset is in the tail */
272			xdr_off -= xdr->page_len;
273			xdr_off += (unsigned long)
274				xdr->tail[0].iov_base & ~PAGE_MASK;
275			page = virt_to_page(xdr->tail[0].iov_base);
 
 
 
 
 
 
 
276		}
 
277	}
278	dma_addr = ib_dma_map_page(xprt->sc_cm_id->device, page, xdr_off,
279				   min_t(size_t, PAGE_SIZE, len), dir);
280	return dma_addr;
281}
282
283/* Assumptions:
284 * - We are using FRMR
285 *     - or -
286 * - The specified write_len can be represented in sc_max_sge * PAGE_SIZE
287 */
288static int send_write(struct svcxprt_rdma *xprt, struct svc_rqst *rqstp,
289		      u32 rmr, u64 to,
290		      u32 xdr_off, int write_len,
291		      struct svc_rdma_req_map *vec)
292{
293	struct ib_send_wr write_wr;
294	struct ib_sge *sge;
295	int xdr_sge_no;
296	int sge_no;
297	int sge_bytes;
298	int sge_off;
299	int bc;
300	struct svc_rdma_op_ctxt *ctxt;
301
302	BUG_ON(vec->count > RPCSVC_MAXPAGES);
303	dprintk("svcrdma: RDMA_WRITE rmr=%x, to=%llx, xdr_off=%d, "
304		"write_len=%d, vec->sge=%p, vec->count=%lu\n",
305		rmr, (unsigned long long)to, xdr_off,
306		write_len, vec->sge, vec->count);
307
308	ctxt = svc_rdma_get_context(xprt);
309	ctxt->direction = DMA_TO_DEVICE;
310	sge = ctxt->sge;
311
312	/* Find the SGE associated with xdr_off */
313	for (bc = xdr_off, xdr_sge_no = 1; bc && xdr_sge_no < vec->count;
314	     xdr_sge_no++) {
315		if (vec->sge[xdr_sge_no].iov_len > bc)
316			break;
317		bc -= vec->sge[xdr_sge_no].iov_len;
318	}
319
320	sge_off = bc;
321	bc = write_len;
322	sge_no = 0;
323
324	/* Copy the remaining SGE */
325	while (bc != 0) {
326		sge_bytes = min_t(size_t,
327			  bc, vec->sge[xdr_sge_no].iov_len-sge_off);
328		sge[sge_no].length = sge_bytes;
329		if (!vec->frmr) {
330			sge[sge_no].addr =
331				dma_map_xdr(xprt, &rqstp->rq_res, xdr_off,
332					    sge_bytes, DMA_TO_DEVICE);
333			xdr_off += sge_bytes;
334			if (ib_dma_mapping_error(xprt->sc_cm_id->device,
335						 sge[sge_no].addr))
336				goto err;
337			atomic_inc(&xprt->sc_dma_used);
338			sge[sge_no].lkey = xprt->sc_dma_lkey;
 
 
 
339		} else {
340			sge[sge_no].addr = (unsigned long)
341				vec->sge[xdr_sge_no].iov_base + sge_off;
342			sge[sge_no].lkey = vec->frmr->mr->lkey;
343		}
344		ctxt->count++;
345		ctxt->frmr = vec->frmr;
346		sge_off = 0;
347		sge_no++;
348		xdr_sge_no++;
349		BUG_ON(xdr_sge_no > vec->count);
350		bc -= sge_bytes;
351	}
352
353	/* Prepare WRITE WR */
354	memset(&write_wr, 0, sizeof write_wr);
355	ctxt->wr_op = IB_WR_RDMA_WRITE;
356	write_wr.wr_id = (unsigned long)ctxt;
357	write_wr.sg_list = &sge[0];
358	write_wr.num_sge = sge_no;
359	write_wr.opcode = IB_WR_RDMA_WRITE;
360	write_wr.send_flags = IB_SEND_SIGNALED;
361	write_wr.wr.rdma.rkey = rmr;
362	write_wr.wr.rdma.remote_addr = to;
363
364	/* Post It */
365	atomic_inc(&rdma_stat_write);
366	if (svc_rdma_send(xprt, &write_wr))
367		goto err;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
368	return 0;
369 err:
370	svc_rdma_unmap_dma(ctxt);
371	svc_rdma_put_frmr(xprt, vec->frmr);
372	svc_rdma_put_context(ctxt, 0);
373	/* Fatal error, close transport */
374	return -EIO;
375}
376
377static int send_write_chunks(struct svcxprt_rdma *xprt,
378			     struct rpcrdma_msg *rdma_argp,
379			     struct rpcrdma_msg *rdma_resp,
380			     struct svc_rqst *rqstp,
381			     struct svc_rdma_req_map *vec)
382{
383	u32 xfer_len = rqstp->rq_res.page_len + rqstp->rq_res.tail[0].iov_len;
384	int write_len;
385	int max_write;
386	u32 xdr_off;
387	int chunk_off;
388	int chunk_no;
389	struct rpcrdma_write_array *arg_ary;
390	struct rpcrdma_write_array *res_ary;
391	int ret;
392
393	arg_ary = svc_rdma_get_write_array(rdma_argp);
394	if (!arg_ary)
395		return 0;
396	res_ary = (struct rpcrdma_write_array *)
397		&rdma_resp->rm_body.rm_chunks[1];
 
 
 
 
 
 
 
 
 
 
 
 
398
399	if (vec->frmr)
400		max_write = vec->frmr->map_len;
401	else
402		max_write = xprt->sc_max_sge * PAGE_SIZE;
 
 
 
 
 
 
 
 
403
404	/* Write chunks start at the pagelist */
405	for (xdr_off = rqstp->rq_res.head[0].iov_len, chunk_no = 0;
406	     xfer_len && chunk_no < arg_ary->wc_nchunks;
407	     chunk_no++) {
408		struct rpcrdma_segment *arg_ch;
409		u64 rs_offset;
410
411		arg_ch = &arg_ary->wc_array[chunk_no].wc_target;
412		write_len = min(xfer_len, arg_ch->rs_length);
413
414		/* Prepare the response chunk given the length actually
415		 * written */
416		rs_offset = get_unaligned(&(arg_ch->rs_offset));
417		svc_rdma_xdr_encode_array_chunk(res_ary, chunk_no,
418					    arg_ch->rs_handle,
419					    rs_offset,
420					    write_len);
421		chunk_off = 0;
422		while (write_len) {
423			int this_write;
424			this_write = min(write_len, max_write);
425			ret = send_write(xprt, rqstp,
426					 arg_ch->rs_handle,
427					 rs_offset + chunk_off,
428					 xdr_off,
429					 this_write,
430					 vec);
431			if (ret) {
432				dprintk("svcrdma: RDMA_WRITE failed, ret=%d\n",
433					ret);
434				return -EIO;
435			}
436			chunk_off += this_write;
437			xdr_off += this_write;
438			xfer_len -= this_write;
439			write_len -= this_write;
440		}
441	}
442	/* Update the req with the number of chunks actually used */
443	svc_rdma_xdr_encode_write_list(rdma_resp, chunk_no);
444
445	return rqstp->rq_res.page_len + rqstp->rq_res.tail[0].iov_len;
 
 
 
 
 
446}
447
448static int send_reply_chunks(struct svcxprt_rdma *xprt,
449			     struct rpcrdma_msg *rdma_argp,
450			     struct rpcrdma_msg *rdma_resp,
451			     struct svc_rqst *rqstp,
452			     struct svc_rdma_req_map *vec)
453{
454	u32 xfer_len = rqstp->rq_res.len;
455	int write_len;
456	int max_write;
457	u32 xdr_off;
458	int chunk_no;
459	int chunk_off;
460	struct rpcrdma_segment *ch;
461	struct rpcrdma_write_array *arg_ary;
462	struct rpcrdma_write_array *res_ary;
463	int ret;
464
465	arg_ary = svc_rdma_get_reply_array(rdma_argp);
466	if (!arg_ary)
467		return 0;
468	/* XXX: need to fix when reply lists occur with read-list and or
469	 * write-list */
470	res_ary = (struct rpcrdma_write_array *)
471		&rdma_resp->rm_body.rm_chunks[2];
472
473	if (vec->frmr)
474		max_write = vec->frmr->map_len;
475	else
476		max_write = xprt->sc_max_sge * PAGE_SIZE;
477
478	/* xdr offset starts at RPC message */
479	for (xdr_off = 0, chunk_no = 0;
480	     xfer_len && chunk_no < arg_ary->wc_nchunks;
481	     chunk_no++) {
482		u64 rs_offset;
483		ch = &arg_ary->wc_array[chunk_no].wc_target;
484		write_len = min(xfer_len, ch->rs_length);
485
486		/* Prepare the reply chunk given the length actually
487		 * written */
488		rs_offset = get_unaligned(&(ch->rs_offset));
489		svc_rdma_xdr_encode_array_chunk(res_ary, chunk_no,
490					    ch->rs_handle, rs_offset,
491					    write_len);
492		chunk_off = 0;
493		while (write_len) {
494			int this_write;
495
496			this_write = min(write_len, max_write);
497			ret = send_write(xprt, rqstp,
498					 ch->rs_handle,
499					 rs_offset + chunk_off,
500					 xdr_off,
501					 this_write,
502					 vec);
503			if (ret) {
504				dprintk("svcrdma: RDMA_WRITE failed, ret=%d\n",
505					ret);
506				return -EIO;
507			}
508			chunk_off += this_write;
509			xdr_off += this_write;
510			xfer_len -= this_write;
511			write_len -= this_write;
512		}
513	}
514	/* Update the req with the number of chunks actually used */
515	svc_rdma_xdr_encode_reply_array(res_ary, chunk_no);
516
517	return rqstp->rq_res.len;
 
 
 
 
 
 
 
 
 
518}
519
520/* This function prepares the portion of the RPCRDMA message to be
521 * sent in the RDMA_SEND. This function is called after data sent via
522 * RDMA has already been transmitted. There are three cases:
523 * - The RPCRDMA header, RPC header, and payload are all sent in a
524 *   single RDMA_SEND. This is the "inline" case.
525 * - The RPCRDMA header and some portion of the RPC header and data
526 *   are sent via this RDMA_SEND and another portion of the data is
527 *   sent via RDMA.
528 * - The RPCRDMA header [NOMSG] is sent in this RDMA_SEND and the RPC
529 *   header and data are all transmitted via RDMA.
530 * In all three cases, this function prepares the RPCRDMA header in
531 * sge[0], the 'type' parameter indicates the type to place in the
532 * RPCRDMA header, and the 'byte_count' field indicates how much of
533 * the XDR to include in this RDMA_SEND. NB: The offset of the payload
534 * to send is zero in the XDR.
535 */
536static int send_reply(struct svcxprt_rdma *rdma,
537		      struct svc_rqst *rqstp,
538		      struct page *page,
539		      struct rpcrdma_msg *rdma_resp,
540		      struct svc_rdma_op_ctxt *ctxt,
541		      struct svc_rdma_req_map *vec,
542		      int byte_count)
543{
544	struct ib_send_wr send_wr;
545	struct ib_send_wr inv_wr;
546	int sge_no;
547	int sge_bytes;
548	int page_no;
549	int ret;
550
551	/* Post a recv buffer to handle another request. */
552	ret = svc_rdma_post_recv(rdma);
553	if (ret) {
554		printk(KERN_INFO
555		       "svcrdma: could not post a receive buffer, err=%d."
556		       "Closing transport %p.\n", ret, rdma);
557		set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags);
558		svc_rdma_put_frmr(rdma, vec->frmr);
559		svc_rdma_put_context(ctxt, 0);
560		return -ENOTCONN;
561	}
562
563	/* Prepare the context */
564	ctxt->pages[0] = page;
565	ctxt->count = 1;
566	ctxt->frmr = vec->frmr;
567	if (vec->frmr)
568		set_bit(RDMACTXT_F_FAST_UNREG, &ctxt->flags);
569	else
570		clear_bit(RDMACTXT_F_FAST_UNREG, &ctxt->flags);
571
572	/* Prepare the SGE for the RPCRDMA Header */
573	ctxt->sge[0].lkey = rdma->sc_dma_lkey;
574	ctxt->sge[0].length = svc_rdma_xdr_get_reply_hdr_len(rdma_resp);
575	ctxt->sge[0].addr =
576	    ib_dma_map_page(rdma->sc_cm_id->device, page, 0,
577			    ctxt->sge[0].length, DMA_TO_DEVICE);
578	if (ib_dma_mapping_error(rdma->sc_cm_id->device, ctxt->sge[0].addr))
579		goto err;
580	atomic_inc(&rdma->sc_dma_used);
581
582	ctxt->direction = DMA_TO_DEVICE;
583
584	/* Map the payload indicated by 'byte_count' */
585	for (sge_no = 1; byte_count && sge_no < vec->count; sge_no++) {
586		int xdr_off = 0;
587		sge_bytes = min_t(size_t, vec->sge[sge_no].iov_len, byte_count);
588		byte_count -= sge_bytes;
589		if (!vec->frmr) {
590			ctxt->sge[sge_no].addr =
591				dma_map_xdr(rdma, &rqstp->rq_res, xdr_off,
592					    sge_bytes, DMA_TO_DEVICE);
593			xdr_off += sge_bytes;
594			if (ib_dma_mapping_error(rdma->sc_cm_id->device,
595						 ctxt->sge[sge_no].addr))
596				goto err;
597			atomic_inc(&rdma->sc_dma_used);
598			ctxt->sge[sge_no].lkey = rdma->sc_dma_lkey;
599		} else {
600			ctxt->sge[sge_no].addr = (unsigned long)
601				vec->sge[sge_no].iov_base;
602			ctxt->sge[sge_no].lkey = vec->frmr->mr->lkey;
603		}
604		ctxt->sge[sge_no].length = sge_bytes;
 
605	}
606	BUG_ON(byte_count != 0);
607
608	/* Save all respages in the ctxt and remove them from the
609	 * respages array. They are our pages until the I/O
610	 * completes.
611	 */
612	for (page_no = 0; page_no < rqstp->rq_resused; page_no++) {
613		ctxt->pages[page_no+1] = rqstp->rq_respages[page_no];
614		ctxt->count++;
615		rqstp->rq_respages[page_no] = NULL;
616		/*
617		 * If there are more pages than SGE, terminate SGE
618		 * list so that svc_rdma_unmap_dma doesn't attempt to
619		 * unmap garbage.
620		 */
621		if (page_no+1 >= sge_no)
622			ctxt->sge[page_no+1].length = 0;
623	}
624	BUG_ON(sge_no > rdma->sc_max_sge);
625	memset(&send_wr, 0, sizeof send_wr);
626	ctxt->wr_op = IB_WR_SEND;
627	send_wr.wr_id = (unsigned long)ctxt;
628	send_wr.sg_list = ctxt->sge;
629	send_wr.num_sge = sge_no;
630	send_wr.opcode = IB_WR_SEND;
631	send_wr.send_flags =  IB_SEND_SIGNALED;
632	if (vec->frmr) {
633		/* Prepare INVALIDATE WR */
634		memset(&inv_wr, 0, sizeof inv_wr);
635		inv_wr.opcode = IB_WR_LOCAL_INV;
636		inv_wr.send_flags = IB_SEND_SIGNALED;
637		inv_wr.ex.invalidate_rkey =
638			vec->frmr->mr->lkey;
639		send_wr.next = &inv_wr;
640	}
641
642	ret = svc_rdma_send(rdma, &send_wr);
643	if (ret)
644		goto err;
645
646	return 0;
 
 
 
 
 
 
 
 
 
 
647
648 err:
649	svc_rdma_unmap_dma(ctxt);
650	svc_rdma_put_frmr(rdma, vec->frmr);
651	svc_rdma_put_context(ctxt, 1);
652	return -EIO;
 
 
 
653}
654
655void svc_rdma_prep_reply_hdr(struct svc_rqst *rqstp)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
656{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
657}
658
659/*
660 * Return the start of an xdr buffer.
 
 
 
 
661 */
662static void *xdr_start(struct xdr_buf *xdr)
 
 
663{
664	return xdr->head[0].iov_base -
665		(xdr->len -
666		 xdr->page_len -
667		 xdr->tail[0].iov_len -
668		 xdr->head[0].iov_len);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
669}
670
 
 
 
 
 
 
 
 
 
 
 
 
671int svc_rdma_sendto(struct svc_rqst *rqstp)
672{
673	struct svc_xprt *xprt = rqstp->rq_xprt;
674	struct svcxprt_rdma *rdma =
675		container_of(xprt, struct svcxprt_rdma, sc_xprt);
676	struct rpcrdma_msg *rdma_argp;
677	struct rpcrdma_msg *rdma_resp;
678	struct rpcrdma_write_array *reply_ary;
679	enum rpcrdma_proc reply_type;
680	int ret;
681	int inline_bytes;
682	struct page *res_page;
683	struct svc_rdma_op_ctxt *ctxt;
684	struct svc_rdma_req_map *vec;
685
686	dprintk("svcrdma: sending response for rqstp=%p\n", rqstp);
687
688	/* Get the RDMA request header. */
689	rdma_argp = xdr_start(&rqstp->rq_arg);
690
691	/* Build an req vec for the XDR */
692	ctxt = svc_rdma_get_context(rdma);
693	ctxt->direction = DMA_TO_DEVICE;
694	vec = svc_rdma_get_req_map();
695	ret = map_xdr(rdma, &rqstp->rq_res, vec);
696	if (ret)
697		goto err0;
698	inline_bytes = rqstp->rq_res.len;
699
700	/* Create the RDMA response header */
701	res_page = svc_rdma_get_page();
702	rdma_resp = page_address(res_page);
703	reply_ary = svc_rdma_get_reply_array(rdma_argp);
704	if (reply_ary)
705		reply_type = RDMA_NOMSG;
706	else
707		reply_type = RDMA_MSG;
708	svc_rdma_xdr_encode_reply_header(rdma, rdma_argp,
709					 rdma_resp, reply_type);
710
711	/* Send any write-chunk data and build resp write-list */
712	ret = send_write_chunks(rdma, rdma_argp, rdma_resp,
713				rqstp, vec);
714	if (ret < 0) {
715		printk(KERN_ERR "svcrdma: failed to send write chunks, rc=%d\n",
716		       ret);
 
 
 
 
 
 
 
 
 
 
 
 
717		goto err1;
718	}
719	inline_bytes -= ret;
 
 
 
 
720
721	/* Send any reply-list data and update resp reply-list */
722	ret = send_reply_chunks(rdma, rdma_argp, rdma_resp,
723				rqstp, vec);
724	if (ret < 0) {
725		printk(KERN_ERR "svcrdma: failed to send reply chunks, rc=%d\n",
726		       ret);
727		goto err1;
728	}
729	inline_bytes -= ret;
730
731	ret = send_reply(rdma, rqstp, res_page, rdma_resp, ctxt, vec,
732			 inline_bytes);
733	svc_rdma_put_req_map(vec);
734	dprintk("svcrdma: send_reply returns %d\n", ret);
735	return ret;
736
737 err1:
738	put_page(res_page);
739 err0:
740	svc_rdma_put_req_map(vec);
741	svc_rdma_put_context(ctxt, 0);
742	return ret;
 
743}