Linux Audio

Check our new training course

Loading...
v4.6
  1/*
  2 * Copyright (c) 2007 Oracle.  All rights reserved.
  3 *
  4 * This software is available to you under a choice of one of two
  5 * licenses.  You may choose to be licensed under the terms of the GNU
  6 * General Public License (GPL) Version 2, available from the file
  7 * COPYING in the main directory of this source tree, or the
  8 * OpenIB.org BSD license below:
  9 *
 10 *     Redistribution and use in source and binary forms, with or
 11 *     without modification, are permitted provided that the following
 12 *     conditions are met:
 13 *
 14 *      - Redistributions of source code must retain the above
 15 *        copyright notice, this list of conditions and the following
 16 *        disclaimer.
 17 *
 18 *      - Redistributions in binary form must reproduce the above
 19 *        copyright notice, this list of conditions and the following
 20 *        disclaimer in the documentation and/or other materials
 21 *        provided with the distribution.
 22 *
 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 30 * SOFTWARE.
 31 *
 32 */
 33#include <linux/pagemap.h>
 34#include <linux/slab.h>
 35#include <linux/rbtree.h>
 36#include <linux/dma-mapping.h> /* for DMA_*_DEVICE */
 37
 38#include "rds.h"
 39
 40/*
 41 * XXX
 42 *  - build with sparse
 43 *  - should we limit the size of a mr region?  let transport return failure?
 44 *  - should we detect duplicate keys on a socket?  hmm.
 45 *  - an rdma is an mlock, apply rlimit?
 46 */
 47
 48/*
 49 * get the number of pages by looking at the page indices that the start and
 50 * end addresses fall in.
 51 *
 52 * Returns 0 if the vec is invalid.  It is invalid if the number of bytes
 53 * causes the address to wrap or overflows an unsigned int.  This comes
 54 * from being stored in the 'length' member of 'struct scatterlist'.
 55 */
 56static unsigned int rds_pages_in_vec(struct rds_iovec *vec)
 57{
 58	if ((vec->addr + vec->bytes <= vec->addr) ||
 59	    (vec->bytes > (u64)UINT_MAX))
 60		return 0;
 61
 62	return ((vec->addr + vec->bytes + PAGE_SIZE - 1) >> PAGE_SHIFT) -
 63		(vec->addr >> PAGE_SHIFT);
 64}
 65
 66static struct rds_mr *rds_mr_tree_walk(struct rb_root *root, u64 key,
 67				       struct rds_mr *insert)
 68{
 69	struct rb_node **p = &root->rb_node;
 70	struct rb_node *parent = NULL;
 71	struct rds_mr *mr;
 72
 73	while (*p) {
 74		parent = *p;
 75		mr = rb_entry(parent, struct rds_mr, r_rb_node);
 76
 77		if (key < mr->r_key)
 78			p = &(*p)->rb_left;
 79		else if (key > mr->r_key)
 80			p = &(*p)->rb_right;
 81		else
 82			return mr;
 83	}
 84
 85	if (insert) {
 86		rb_link_node(&insert->r_rb_node, parent, p);
 87		rb_insert_color(&insert->r_rb_node, root);
 88		atomic_inc(&insert->r_refcount);
 89	}
 90	return NULL;
 91}
 92
 93/*
 94 * Destroy the transport-specific part of a MR.
 95 */
 96static void rds_destroy_mr(struct rds_mr *mr)
 97{
 98	struct rds_sock *rs = mr->r_sock;
 99	void *trans_private = NULL;
100	unsigned long flags;
101
102	rdsdebug("RDS: destroy mr key is %x refcnt %u\n",
103			mr->r_key, atomic_read(&mr->r_refcount));
104
105	if (test_and_set_bit(RDS_MR_DEAD, &mr->r_state))
106		return;
107
108	spin_lock_irqsave(&rs->rs_rdma_lock, flags);
109	if (!RB_EMPTY_NODE(&mr->r_rb_node))
110		rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys);
111	trans_private = mr->r_trans_private;
112	mr->r_trans_private = NULL;
113	spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
114
115	if (trans_private)
116		mr->r_trans->free_mr(trans_private, mr->r_invalidate);
117}
118
119void __rds_put_mr_final(struct rds_mr *mr)
120{
 
 
121	rds_destroy_mr(mr);
122	kfree(mr);
123}
124
125/*
126 * By the time this is called we can't have any more ioctls called on
127 * the socket so we don't need to worry about racing with others.
128 */
129void rds_rdma_drop_keys(struct rds_sock *rs)
130{
131	struct rds_mr *mr;
132	struct rb_node *node;
133	unsigned long flags;
134
135	/* Release any MRs associated with this socket */
136	spin_lock_irqsave(&rs->rs_rdma_lock, flags);
137	while ((node = rb_first(&rs->rs_rdma_keys))) {
138		mr = container_of(node, struct rds_mr, r_rb_node);
139		if (mr->r_trans == rs->rs_transport)
140			mr->r_invalidate = 0;
141		rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys);
142		RB_CLEAR_NODE(&mr->r_rb_node);
143		spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
144		rds_destroy_mr(mr);
145		rds_mr_put(mr);
146		spin_lock_irqsave(&rs->rs_rdma_lock, flags);
147	}
148	spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
149
150	if (rs->rs_transport && rs->rs_transport->flush_mrs)
151		rs->rs_transport->flush_mrs();
152}
153
154/*
155 * Helper function to pin user pages.
156 */
157static int rds_pin_pages(unsigned long user_addr, unsigned int nr_pages,
158			struct page **pages, int write)
159{
 
160	int ret;
161
162	ret = get_user_pages_fast(user_addr, nr_pages, write, pages);
 
163
 
164	if (ret >= 0 && ret < nr_pages) {
165		while (ret--)
166			put_page(pages[ret]);
167		ret = -EFAULT;
168	}
169
170	return ret;
171}
172
173static int __rds_rdma_map(struct rds_sock *rs, struct rds_get_mr_args *args,
174				u64 *cookie_ret, struct rds_mr **mr_ret)
 
175{
176	struct rds_mr *mr = NULL, *found;
 
177	unsigned int nr_pages;
178	struct page **pages = NULL;
179	struct scatterlist *sg;
180	void *trans_private;
181	unsigned long flags;
182	rds_rdma_cookie_t cookie;
183	unsigned int nents;
 
184	long i;
185	int ret;
186
187	if (rs->rs_bound_addr == 0) {
188		ret = -ENOTCONN; /* XXX not a great errno */
189		goto out;
190	}
191
192	if (!rs->rs_transport->get_mr) {
193		ret = -EOPNOTSUPP;
194		goto out;
195	}
196
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
197	nr_pages = rds_pages_in_vec(&args->vec);
198	if (nr_pages == 0) {
199		ret = -EINVAL;
200		goto out;
201	}
202
 
 
 
 
 
 
 
 
203	rdsdebug("RDS: get_mr addr %llx len %llu nr_pages %u\n",
204		args->vec.addr, args->vec.bytes, nr_pages);
205
206	/* XXX clamp nr_pages to limit the size of this alloc? */
207	pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL);
208	if (!pages) {
209		ret = -ENOMEM;
210		goto out;
211	}
212
213	mr = kzalloc(sizeof(struct rds_mr), GFP_KERNEL);
214	if (!mr) {
215		ret = -ENOMEM;
216		goto out;
217	}
218
219	atomic_set(&mr->r_refcount, 1);
220	RB_CLEAR_NODE(&mr->r_rb_node);
221	mr->r_trans = rs->rs_transport;
222	mr->r_sock = rs;
223
224	if (args->flags & RDS_RDMA_USE_ONCE)
225		mr->r_use_once = 1;
226	if (args->flags & RDS_RDMA_INVALIDATE)
227		mr->r_invalidate = 1;
228	if (args->flags & RDS_RDMA_READWRITE)
229		mr->r_write = 1;
230
231	/*
232	 * Pin the pages that make up the user buffer and transfer the page
233	 * pointers to the mr's sg array.  We check to see if we've mapped
234	 * the whole region after transferring the partial page references
235	 * to the sg array so that we can have one page ref cleanup path.
236	 *
237	 * For now we have no flag that tells us whether the mapping is
238	 * r/o or r/w. We need to assume r/w, or we'll do a lot of RDMA to
239	 * the zero page.
240	 */
241	ret = rds_pin_pages(args->vec.addr, nr_pages, pages, 1);
242	if (ret < 0)
243		goto out;
244
245	nents = ret;
246	sg = kcalloc(nents, sizeof(*sg), GFP_KERNEL);
247	if (!sg) {
248		ret = -ENOMEM;
249		goto out;
250	}
251	WARN_ON(!nents);
252	sg_init_table(sg, nents);
253
254	/* Stick all pages into the scatterlist */
255	for (i = 0 ; i < nents; i++)
256		sg_set_page(&sg[i], pages[i], PAGE_SIZE, 0);
257
258	rdsdebug("RDS: trans_private nents is %u\n", nents);
 
 
259
 
 
260	/* Obtain a transport specific MR. If this succeeds, the
261	 * s/g list is now owned by the MR.
262	 * Note that dma_map() implies that pending writes are
263	 * flushed to RAM, so no dma_sync is needed here. */
264	trans_private = rs->rs_transport->get_mr(sg, nents, rs,
265						 &mr->r_key);
 
 
266
267	if (IS_ERR(trans_private)) {
268		for (i = 0 ; i < nents; i++)
269			put_page(sg_page(&sg[i]));
270		kfree(sg);
 
 
 
 
271		ret = PTR_ERR(trans_private);
272		goto out;
273	}
274
275	mr->r_trans_private = trans_private;
276
277	rdsdebug("RDS: get_mr put_user key is %x cookie_addr %p\n",
278	       mr->r_key, (void *)(unsigned long) args->cookie_addr);
279
280	/* The user may pass us an unaligned address, but we can only
281	 * map page aligned regions. So we keep the offset, and build
282	 * a 64bit cookie containing <R_Key, offset> and pass that
283	 * around. */
284	cookie = rds_rdma_make_cookie(mr->r_key, args->vec.addr & ~PAGE_MASK);
 
 
 
 
285	if (cookie_ret)
286		*cookie_ret = cookie;
287
288	if (args->cookie_addr && put_user(cookie, (u64 __user *)(unsigned long) args->cookie_addr)) {
 
 
 
 
 
289		ret = -EFAULT;
290		goto out;
291	}
292
293	/* Inserting the new MR into the rbtree bumps its
294	 * reference count. */
295	spin_lock_irqsave(&rs->rs_rdma_lock, flags);
296	found = rds_mr_tree_walk(&rs->rs_rdma_keys, mr->r_key, mr);
297	spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
298
299	BUG_ON(found && found != mr);
300
301	rdsdebug("RDS: get_mr key is %x\n", mr->r_key);
302	if (mr_ret) {
303		atomic_inc(&mr->r_refcount);
304		*mr_ret = mr;
305	}
306
307	ret = 0;
308out:
309	kfree(pages);
310	if (mr)
311		rds_mr_put(mr);
312	return ret;
313}
314
315int rds_get_mr(struct rds_sock *rs, char __user *optval, int optlen)
316{
317	struct rds_get_mr_args args;
318
319	if (optlen != sizeof(struct rds_get_mr_args))
320		return -EINVAL;
321
322	if (copy_from_user(&args, (struct rds_get_mr_args __user *)optval,
323			   sizeof(struct rds_get_mr_args)))
324		return -EFAULT;
325
326	return __rds_rdma_map(rs, &args, NULL, NULL);
327}
328
329int rds_get_mr_for_dest(struct rds_sock *rs, char __user *optval, int optlen)
330{
331	struct rds_get_mr_for_dest_args args;
332	struct rds_get_mr_args new_args;
333
334	if (optlen != sizeof(struct rds_get_mr_for_dest_args))
335		return -EINVAL;
336
337	if (copy_from_user(&args, (struct rds_get_mr_for_dest_args __user *)optval,
338			   sizeof(struct rds_get_mr_for_dest_args)))
339		return -EFAULT;
340
341	/*
342	 * Initially, just behave like get_mr().
343	 * TODO: Implement get_mr as wrapper around this
344	 *	 and deprecate it.
345	 */
346	new_args.vec = args.vec;
347	new_args.cookie_addr = args.cookie_addr;
348	new_args.flags = args.flags;
349
350	return __rds_rdma_map(rs, &new_args, NULL, NULL);
351}
352
353/*
354 * Free the MR indicated by the given R_Key
355 */
356int rds_free_mr(struct rds_sock *rs, char __user *optval, int optlen)
357{
358	struct rds_free_mr_args args;
359	struct rds_mr *mr;
360	unsigned long flags;
361
362	if (optlen != sizeof(struct rds_free_mr_args))
363		return -EINVAL;
364
365	if (copy_from_user(&args, (struct rds_free_mr_args __user *)optval,
366			   sizeof(struct rds_free_mr_args)))
367		return -EFAULT;
368
369	/* Special case - a null cookie means flush all unused MRs */
370	if (args.cookie == 0) {
371		if (!rs->rs_transport || !rs->rs_transport->flush_mrs)
372			return -EINVAL;
373		rs->rs_transport->flush_mrs();
374		return 0;
375	}
376
377	/* Look up the MR given its R_key and remove it from the rbtree
378	 * so nobody else finds it.
379	 * This should also prevent races with rds_rdma_unuse.
380	 */
381	spin_lock_irqsave(&rs->rs_rdma_lock, flags);
382	mr = rds_mr_tree_walk(&rs->rs_rdma_keys, rds_rdma_cookie_key(args.cookie), NULL);
383	if (mr) {
384		rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys);
385		RB_CLEAR_NODE(&mr->r_rb_node);
386		if (args.flags & RDS_RDMA_INVALIDATE)
387			mr->r_invalidate = 1;
388	}
389	spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
390
391	if (!mr)
392		return -EINVAL;
393
394	/*
395	 * call rds_destroy_mr() ourselves so that we're sure it's done by the time
396	 * we return.  If we let rds_mr_put() do it it might not happen until
397	 * someone else drops their ref.
398	 */
399	rds_destroy_mr(mr);
400	rds_mr_put(mr);
401	return 0;
402}
403
404/*
405 * This is called when we receive an extension header that
406 * tells us this MR was used. It allows us to implement
407 * use_once semantics
408 */
409void rds_rdma_unuse(struct rds_sock *rs, u32 r_key, int force)
410{
411	struct rds_mr *mr;
412	unsigned long flags;
413	int zot_me = 0;
414
415	spin_lock_irqsave(&rs->rs_rdma_lock, flags);
416	mr = rds_mr_tree_walk(&rs->rs_rdma_keys, r_key, NULL);
417	if (!mr) {
418		printk(KERN_ERR "rds: trying to unuse MR with unknown r_key %u!\n", r_key);
 
419		spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
420		return;
421	}
422
 
 
 
 
 
 
 
 
423	if (mr->r_use_once || force) {
424		rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys);
425		RB_CLEAR_NODE(&mr->r_rb_node);
426		zot_me = 1;
427	}
428	spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
429
430	/* May have to issue a dma_sync on this memory region.
431	 * Note we could avoid this if the operation was a RDMA READ,
432	 * but at this point we can't tell. */
433	if (mr->r_trans->sync_mr)
434		mr->r_trans->sync_mr(mr->r_trans_private, DMA_FROM_DEVICE);
435
 
 
 
436	/* If the MR was marked as invalidate, this will
437	 * trigger an async flush. */
438	if (zot_me) {
439		rds_destroy_mr(mr);
440		rds_mr_put(mr);
441	}
442}
443
444void rds_rdma_free_op(struct rm_rdma_op *ro)
445{
446	unsigned int i;
447
448	for (i = 0; i < ro->op_nents; i++) {
449		struct page *page = sg_page(&ro->op_sg[i]);
450
451		/* Mark page dirty if it was possibly modified, which
452		 * is the case for a RDMA_READ which copies from remote
453		 * to local memory */
454		if (!ro->op_write) {
455			WARN_ON(!page->mapping && irqs_disabled());
456			set_page_dirty(page);
 
 
457		}
458		put_page(page);
459	}
460
461	kfree(ro->op_notifier);
462	ro->op_notifier = NULL;
463	ro->op_active = 0;
 
464}
465
466void rds_atomic_free_op(struct rm_atomic_op *ao)
467{
468	struct page *page = sg_page(ao->op_sg);
469
470	/* Mark page dirty if it was possibly modified, which
471	 * is the case for a RDMA_READ which copies from remote
472	 * to local memory */
473	set_page_dirty(page);
474	put_page(page);
475
476	kfree(ao->op_notifier);
477	ao->op_notifier = NULL;
478	ao->op_active = 0;
479}
480
481
482/*
483 * Count the number of pages needed to describe an incoming iovec array.
484 */
485static int rds_rdma_pages(struct rds_iovec iov[], int nr_iovecs)
486{
487	int tot_pages = 0;
488	unsigned int nr_pages;
489	unsigned int i;
490
491	/* figure out the number of pages in the vector */
492	for (i = 0; i < nr_iovecs; i++) {
493		nr_pages = rds_pages_in_vec(&iov[i]);
494		if (nr_pages == 0)
495			return -EINVAL;
496
497		tot_pages += nr_pages;
498
499		/*
500		 * nr_pages for one entry is limited to (UINT_MAX>>PAGE_SHIFT)+1,
501		 * so tot_pages cannot overflow without first going negative.
502		 */
503		if (tot_pages < 0)
504			return -EINVAL;
505	}
506
507	return tot_pages;
508}
509
510int rds_rdma_extra_size(struct rds_rdma_args *args)
 
511{
512	struct rds_iovec vec;
513	struct rds_iovec __user *local_vec;
514	int tot_pages = 0;
515	unsigned int nr_pages;
516	unsigned int i;
517
518	local_vec = (struct rds_iovec __user *)(unsigned long) args->local_vec_addr;
519
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
520	/* figure out the number of pages in the vector */
521	for (i = 0; i < args->nr_local; i++) {
522		if (copy_from_user(&vec, &local_vec[i],
523				   sizeof(struct rds_iovec)))
524			return -EFAULT;
525
526		nr_pages = rds_pages_in_vec(&vec);
527		if (nr_pages == 0)
528			return -EINVAL;
529
530		tot_pages += nr_pages;
531
532		/*
533		 * nr_pages for one entry is limited to (UINT_MAX>>PAGE_SHIFT)+1,
534		 * so tot_pages cannot overflow without first going negative.
535		 */
536		if (tot_pages < 0)
537			return -EINVAL;
538	}
539
540	return tot_pages * sizeof(struct scatterlist);
541}
542
543/*
544 * The application asks for a RDMA transfer.
545 * Extract all arguments and set up the rdma_op
546 */
547int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
548			  struct cmsghdr *cmsg)
 
549{
550	struct rds_rdma_args *args;
551	struct rm_rdma_op *op = &rm->rdma;
552	int nr_pages;
553	unsigned int nr_bytes;
554	struct page **pages = NULL;
555	struct rds_iovec iovstack[UIO_FASTIOV], *iovs = iovstack;
556	int iov_size;
557	unsigned int i, j;
558	int ret = 0;
 
559
560	if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct rds_rdma_args))
561	    || rm->rdma.op_active)
562		return -EINVAL;
563
564	args = CMSG_DATA(cmsg);
565
566	if (rs->rs_bound_addr == 0) {
567		ret = -ENOTCONN; /* XXX not a great errno */
568		goto out_ret;
569	}
570
571	if (args->nr_local > UIO_MAXIOV) {
572		ret = -EMSGSIZE;
573		goto out_ret;
574	}
575
576	/* Check whether to allocate the iovec area */
577	iov_size = args->nr_local * sizeof(struct rds_iovec);
578	if (args->nr_local > UIO_FASTIOV) {
579		iovs = sock_kmalloc(rds_rs_to_sk(rs), iov_size, GFP_KERNEL);
580		if (!iovs) {
581			ret = -ENOMEM;
582			goto out_ret;
583		}
584	}
 
 
 
585
586	if (copy_from_user(iovs, (struct rds_iovec __user *)(unsigned long) args->local_vec_addr, iov_size)) {
587		ret = -EFAULT;
588		goto out;
589	}
590
591	nr_pages = rds_rdma_pages(iovs, args->nr_local);
592	if (nr_pages < 0) {
593		ret = -EINVAL;
594		goto out;
595	}
596
597	pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL);
598	if (!pages) {
599		ret = -ENOMEM;
600		goto out;
601	}
602
603	op->op_write = !!(args->flags & RDS_RDMA_READWRITE);
604	op->op_fence = !!(args->flags & RDS_RDMA_FENCE);
605	op->op_notify = !!(args->flags & RDS_RDMA_NOTIFY_ME);
606	op->op_silent = !!(args->flags & RDS_RDMA_SILENT);
607	op->op_active = 1;
608	op->op_recverr = rs->rs_recverr;
 
 
609	WARN_ON(!nr_pages);
610	op->op_sg = rds_message_alloc_sgs(rm, nr_pages);
611	if (!op->op_sg) {
612		ret = -ENOMEM;
613		goto out;
614	}
615
616	if (op->op_notify || op->op_recverr) {
617		/* We allocate an uninitialized notifier here, because
618		 * we don't want to do that in the completion handler. We
619		 * would have to use GFP_ATOMIC there, and don't want to deal
620		 * with failed allocations.
621		 */
622		op->op_notifier = kmalloc(sizeof(struct rds_notifier), GFP_KERNEL);
623		if (!op->op_notifier) {
624			ret = -ENOMEM;
625			goto out;
626		}
627		op->op_notifier->n_user_token = args->user_token;
628		op->op_notifier->n_status = RDS_RDMA_SUCCESS;
629	}
630
631	/* The cookie contains the R_Key of the remote memory region, and
632	 * optionally an offset into it. This is how we implement RDMA into
633	 * unaligned memory.
634	 * When setting up the RDMA, we need to add that offset to the
635	 * destination address (which is really an offset into the MR)
636	 * FIXME: We may want to move this into ib_rdma.c
637	 */
638	op->op_rkey = rds_rdma_cookie_key(args->cookie);
639	op->op_remote_addr = args->remote_vec.addr + rds_rdma_cookie_offset(args->cookie);
640
641	nr_bytes = 0;
642
643	rdsdebug("RDS: rdma prepare nr_local %llu rva %llx rkey %x\n",
644	       (unsigned long long)args->nr_local,
645	       (unsigned long long)args->remote_vec.addr,
646	       op->op_rkey);
647
648	for (i = 0; i < args->nr_local; i++) {
649		struct rds_iovec *iov = &iovs[i];
650		/* don't need to check, rds_rdma_pages() verified nr will be +nonzero */
651		unsigned int nr = rds_pages_in_vec(iov);
652
653		rs->rs_user_addr = iov->addr;
654		rs->rs_user_bytes = iov->bytes;
655
656		/* If it's a WRITE operation, we want to pin the pages for reading.
657		 * If it's a READ operation, we need to pin the pages for writing.
658		 */
659		ret = rds_pin_pages(iov->addr, nr, pages, !op->op_write);
660		if (ret < 0)
661			goto out;
662		else
663			ret = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
664
665		rdsdebug("RDS: nr_bytes %u nr %u iov->bytes %llu iov->addr %llx\n",
666			 nr_bytes, nr, iov->bytes, iov->addr);
667
668		nr_bytes += iov->bytes;
669
670		for (j = 0; j < nr; j++) {
671			unsigned int offset = iov->addr & ~PAGE_MASK;
672			struct scatterlist *sg;
673
674			sg = &op->op_sg[op->op_nents + j];
675			sg_set_page(sg, pages[j],
676					min_t(unsigned int, iov->bytes, PAGE_SIZE - offset),
677					offset);
678
 
679			rdsdebug("RDS: sg->offset %x sg->len %x iov->addr %llx iov->bytes %llu\n",
680			       sg->offset, sg->length, iov->addr, iov->bytes);
681
682			iov->addr += sg->length;
683			iov->bytes -= sg->length;
684		}
685
686		op->op_nents += nr;
687	}
688
689	if (nr_bytes > args->remote_vec.bytes) {
690		rdsdebug("RDS nr_bytes %u remote_bytes %u do not match\n",
691				nr_bytes,
692				(unsigned int) args->remote_vec.bytes);
693		ret = -EINVAL;
694		goto out;
695	}
696	op->op_bytes = nr_bytes;
 
697
698out:
699	if (iovs != iovstack)
700		sock_kfree_s(rds_rs_to_sk(rs), iovs, iov_size);
701	kfree(pages);
702out_ret:
703	if (ret)
704		rds_rdma_free_op(op);
705	else
706		rds_stats_inc(s_send_rdma);
707
708	return ret;
709}
710
711/*
712 * The application wants us to pass an RDMA destination (aka MR)
713 * to the remote
714 */
715int rds_cmsg_rdma_dest(struct rds_sock *rs, struct rds_message *rm,
716			  struct cmsghdr *cmsg)
717{
718	unsigned long flags;
719	struct rds_mr *mr;
720	u32 r_key;
721	int err = 0;
722
723	if (cmsg->cmsg_len < CMSG_LEN(sizeof(rds_rdma_cookie_t)) ||
724	    rm->m_rdma_cookie != 0)
725		return -EINVAL;
726
727	memcpy(&rm->m_rdma_cookie, CMSG_DATA(cmsg), sizeof(rm->m_rdma_cookie));
728
729	/* We are reusing a previously mapped MR here. Most likely, the
730	 * application has written to the buffer, so we need to explicitly
731	 * flush those writes to RAM. Otherwise the HCA may not see them
732	 * when doing a DMA from that buffer.
733	 */
734	r_key = rds_rdma_cookie_key(rm->m_rdma_cookie);
735
736	spin_lock_irqsave(&rs->rs_rdma_lock, flags);
737	mr = rds_mr_tree_walk(&rs->rs_rdma_keys, r_key, NULL);
738	if (!mr)
739		err = -EINVAL;	/* invalid r_key */
740	else
741		atomic_inc(&mr->r_refcount);
742	spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
743
744	if (mr) {
745		mr->r_trans->sync_mr(mr->r_trans_private, DMA_TO_DEVICE);
 
746		rm->rdma.op_rdma_mr = mr;
747	}
748	return err;
749}
750
751/*
752 * The application passes us an address range it wants to enable RDMA
753 * to/from. We map the area, and save the <R_Key,offset> pair
754 * in rm->m_rdma_cookie. This causes it to be sent along to the peer
755 * in an extension header.
756 */
757int rds_cmsg_rdma_map(struct rds_sock *rs, struct rds_message *rm,
758			  struct cmsghdr *cmsg)
759{
760	if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct rds_get_mr_args)) ||
761	    rm->m_rdma_cookie != 0)
762		return -EINVAL;
763
764	return __rds_rdma_map(rs, CMSG_DATA(cmsg), &rm->m_rdma_cookie, &rm->rdma.op_rdma_mr);
 
765}
766
767/*
768 * Fill in rds_message for an atomic request.
769 */
770int rds_cmsg_atomic(struct rds_sock *rs, struct rds_message *rm,
771		    struct cmsghdr *cmsg)
772{
773	struct page *page = NULL;
774	struct rds_atomic_args *args;
775	int ret = 0;
776
777	if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct rds_atomic_args))
778	 || rm->atomic.op_active)
779		return -EINVAL;
780
781	args = CMSG_DATA(cmsg);
782
783	/* Nonmasked & masked cmsg ops converted to masked hw ops */
784	switch (cmsg->cmsg_type) {
785	case RDS_CMSG_ATOMIC_FADD:
786		rm->atomic.op_type = RDS_ATOMIC_TYPE_FADD;
787		rm->atomic.op_m_fadd.add = args->fadd.add;
788		rm->atomic.op_m_fadd.nocarry_mask = 0;
789		break;
790	case RDS_CMSG_MASKED_ATOMIC_FADD:
791		rm->atomic.op_type = RDS_ATOMIC_TYPE_FADD;
792		rm->atomic.op_m_fadd.add = args->m_fadd.add;
793		rm->atomic.op_m_fadd.nocarry_mask = args->m_fadd.nocarry_mask;
794		break;
795	case RDS_CMSG_ATOMIC_CSWP:
796		rm->atomic.op_type = RDS_ATOMIC_TYPE_CSWP;
797		rm->atomic.op_m_cswp.compare = args->cswp.compare;
798		rm->atomic.op_m_cswp.swap = args->cswp.swap;
799		rm->atomic.op_m_cswp.compare_mask = ~0;
800		rm->atomic.op_m_cswp.swap_mask = ~0;
801		break;
802	case RDS_CMSG_MASKED_ATOMIC_CSWP:
803		rm->atomic.op_type = RDS_ATOMIC_TYPE_CSWP;
804		rm->atomic.op_m_cswp.compare = args->m_cswp.compare;
805		rm->atomic.op_m_cswp.swap = args->m_cswp.swap;
806		rm->atomic.op_m_cswp.compare_mask = args->m_cswp.compare_mask;
807		rm->atomic.op_m_cswp.swap_mask = args->m_cswp.swap_mask;
808		break;
809	default:
810		BUG(); /* should never happen */
811	}
812
813	rm->atomic.op_notify = !!(args->flags & RDS_RDMA_NOTIFY_ME);
814	rm->atomic.op_silent = !!(args->flags & RDS_RDMA_SILENT);
815	rm->atomic.op_active = 1;
816	rm->atomic.op_recverr = rs->rs_recverr;
817	rm->atomic.op_sg = rds_message_alloc_sgs(rm, 1);
818	if (!rm->atomic.op_sg) {
819		ret = -ENOMEM;
820		goto err;
821	}
822
823	/* verify 8 byte-aligned */
824	if (args->local_addr & 0x7) {
825		ret = -EFAULT;
826		goto err;
827	}
828
829	ret = rds_pin_pages(args->local_addr, 1, &page, 1);
830	if (ret != 1)
831		goto err;
832	ret = 0;
833
834	sg_set_page(rm->atomic.op_sg, page, 8, offset_in_page(args->local_addr));
835
836	if (rm->atomic.op_notify || rm->atomic.op_recverr) {
837		/* We allocate an uninitialized notifier here, because
838		 * we don't want to do that in the completion handler. We
839		 * would have to use GFP_ATOMIC there, and don't want to deal
840		 * with failed allocations.
841		 */
842		rm->atomic.op_notifier = kmalloc(sizeof(*rm->atomic.op_notifier), GFP_KERNEL);
843		if (!rm->atomic.op_notifier) {
844			ret = -ENOMEM;
845			goto err;
846		}
847
848		rm->atomic.op_notifier->n_user_token = args->user_token;
849		rm->atomic.op_notifier->n_status = RDS_RDMA_SUCCESS;
850	}
851
852	rm->atomic.op_rkey = rds_rdma_cookie_key(args->cookie);
853	rm->atomic.op_remote_addr = args->remote_addr + rds_rdma_cookie_offset(args->cookie);
854
855	return ret;
856err:
857	if (page)
858		put_page(page);
 
859	kfree(rm->atomic.op_notifier);
860
861	return ret;
862}
v6.2
  1/*
  2 * Copyright (c) 2007, 2020 Oracle and/or its affiliates.
  3 *
  4 * This software is available to you under a choice of one of two
  5 * licenses.  You may choose to be licensed under the terms of the GNU
  6 * General Public License (GPL) Version 2, available from the file
  7 * COPYING in the main directory of this source tree, or the
  8 * OpenIB.org BSD license below:
  9 *
 10 *     Redistribution and use in source and binary forms, with or
 11 *     without modification, are permitted provided that the following
 12 *     conditions are met:
 13 *
 14 *      - Redistributions of source code must retain the above
 15 *        copyright notice, this list of conditions and the following
 16 *        disclaimer.
 17 *
 18 *      - Redistributions in binary form must reproduce the above
 19 *        copyright notice, this list of conditions and the following
 20 *        disclaimer in the documentation and/or other materials
 21 *        provided with the distribution.
 22 *
 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 30 * SOFTWARE.
 31 *
 32 */
 33#include <linux/pagemap.h>
 34#include <linux/slab.h>
 35#include <linux/rbtree.h>
 36#include <linux/dma-mapping.h> /* for DMA_*_DEVICE */
 37
 38#include "rds.h"
 39
 40/*
 41 * XXX
 42 *  - build with sparse
 
 43 *  - should we detect duplicate keys on a socket?  hmm.
 44 *  - an rdma is an mlock, apply rlimit?
 45 */
 46
 47/*
 48 * get the number of pages by looking at the page indices that the start and
 49 * end addresses fall in.
 50 *
 51 * Returns 0 if the vec is invalid.  It is invalid if the number of bytes
 52 * causes the address to wrap or overflows an unsigned int.  This comes
 53 * from being stored in the 'length' member of 'struct scatterlist'.
 54 */
 55static unsigned int rds_pages_in_vec(struct rds_iovec *vec)
 56{
 57	if ((vec->addr + vec->bytes <= vec->addr) ||
 58	    (vec->bytes > (u64)UINT_MAX))
 59		return 0;
 60
 61	return ((vec->addr + vec->bytes + PAGE_SIZE - 1) >> PAGE_SHIFT) -
 62		(vec->addr >> PAGE_SHIFT);
 63}
 64
 65static struct rds_mr *rds_mr_tree_walk(struct rb_root *root, u64 key,
 66				       struct rds_mr *insert)
 67{
 68	struct rb_node **p = &root->rb_node;
 69	struct rb_node *parent = NULL;
 70	struct rds_mr *mr;
 71
 72	while (*p) {
 73		parent = *p;
 74		mr = rb_entry(parent, struct rds_mr, r_rb_node);
 75
 76		if (key < mr->r_key)
 77			p = &(*p)->rb_left;
 78		else if (key > mr->r_key)
 79			p = &(*p)->rb_right;
 80		else
 81			return mr;
 82	}
 83
 84	if (insert) {
 85		rb_link_node(&insert->r_rb_node, parent, p);
 86		rb_insert_color(&insert->r_rb_node, root);
 87		kref_get(&insert->r_kref);
 88	}
 89	return NULL;
 90}
 91
 92/*
 93 * Destroy the transport-specific part of a MR.
 94 */
 95static void rds_destroy_mr(struct rds_mr *mr)
 96{
 97	struct rds_sock *rs = mr->r_sock;
 98	void *trans_private = NULL;
 99	unsigned long flags;
100
101	rdsdebug("RDS: destroy mr key is %x refcnt %u\n",
102		 mr->r_key, kref_read(&mr->r_kref));
 
 
 
103
104	spin_lock_irqsave(&rs->rs_rdma_lock, flags);
105	if (!RB_EMPTY_NODE(&mr->r_rb_node))
106		rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys);
107	trans_private = mr->r_trans_private;
108	mr->r_trans_private = NULL;
109	spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
110
111	if (trans_private)
112		mr->r_trans->free_mr(trans_private, mr->r_invalidate);
113}
114
115void __rds_put_mr_final(struct kref *kref)
116{
117	struct rds_mr *mr = container_of(kref, struct rds_mr, r_kref);
118
119	rds_destroy_mr(mr);
120	kfree(mr);
121}
122
123/*
124 * By the time this is called we can't have any more ioctls called on
125 * the socket so we don't need to worry about racing with others.
126 */
127void rds_rdma_drop_keys(struct rds_sock *rs)
128{
129	struct rds_mr *mr;
130	struct rb_node *node;
131	unsigned long flags;
132
133	/* Release any MRs associated with this socket */
134	spin_lock_irqsave(&rs->rs_rdma_lock, flags);
135	while ((node = rb_first(&rs->rs_rdma_keys))) {
136		mr = rb_entry(node, struct rds_mr, r_rb_node);
137		if (mr->r_trans == rs->rs_transport)
138			mr->r_invalidate = 0;
139		rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys);
140		RB_CLEAR_NODE(&mr->r_rb_node);
141		spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
142		kref_put(&mr->r_kref, __rds_put_mr_final);
 
143		spin_lock_irqsave(&rs->rs_rdma_lock, flags);
144	}
145	spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
146
147	if (rs->rs_transport && rs->rs_transport->flush_mrs)
148		rs->rs_transport->flush_mrs();
149}
150
151/*
152 * Helper function to pin user pages.
153 */
154static int rds_pin_pages(unsigned long user_addr, unsigned int nr_pages,
155			struct page **pages, int write)
156{
157	unsigned int gup_flags = FOLL_LONGTERM;
158	int ret;
159
160	if (write)
161		gup_flags |= FOLL_WRITE;
162
163	ret = pin_user_pages_fast(user_addr, nr_pages, gup_flags, pages);
164	if (ret >= 0 && ret < nr_pages) {
165		unpin_user_pages(pages, ret);
 
166		ret = -EFAULT;
167	}
168
169	return ret;
170}
171
172static int __rds_rdma_map(struct rds_sock *rs, struct rds_get_mr_args *args,
173			  u64 *cookie_ret, struct rds_mr **mr_ret,
174			  struct rds_conn_path *cp)
175{
176	struct rds_mr *mr = NULL, *found;
177	struct scatterlist *sg = NULL;
178	unsigned int nr_pages;
179	struct page **pages = NULL;
 
180	void *trans_private;
181	unsigned long flags;
182	rds_rdma_cookie_t cookie;
183	unsigned int nents = 0;
184	int need_odp = 0;
185	long i;
186	int ret;
187
188	if (ipv6_addr_any(&rs->rs_bound_addr) || !rs->rs_transport) {
189		ret = -ENOTCONN; /* XXX not a great errno */
190		goto out;
191	}
192
193	if (!rs->rs_transport->get_mr) {
194		ret = -EOPNOTSUPP;
195		goto out;
196	}
197
198	/* If the combination of the addr and size requested for this memory
199	 * region causes an integer overflow, return error.
200	 */
201	if (((args->vec.addr + args->vec.bytes) < args->vec.addr) ||
202	    PAGE_ALIGN(args->vec.addr + args->vec.bytes) <
203		    (args->vec.addr + args->vec.bytes)) {
204		ret = -EINVAL;
205		goto out;
206	}
207
208	if (!can_do_mlock()) {
209		ret = -EPERM;
210		goto out;
211	}
212
213	nr_pages = rds_pages_in_vec(&args->vec);
214	if (nr_pages == 0) {
215		ret = -EINVAL;
216		goto out;
217	}
218
219	/* Restrict the size of mr irrespective of underlying transport
220	 * To account for unaligned mr regions, subtract one from nr_pages
221	 */
222	if ((nr_pages - 1) > (RDS_MAX_MSG_SIZE >> PAGE_SHIFT)) {
223		ret = -EMSGSIZE;
224		goto out;
225	}
226
227	rdsdebug("RDS: get_mr addr %llx len %llu nr_pages %u\n",
228		args->vec.addr, args->vec.bytes, nr_pages);
229
230	/* XXX clamp nr_pages to limit the size of this alloc? */
231	pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL);
232	if (!pages) {
233		ret = -ENOMEM;
234		goto out;
235	}
236
237	mr = kzalloc(sizeof(struct rds_mr), GFP_KERNEL);
238	if (!mr) {
239		ret = -ENOMEM;
240		goto out;
241	}
242
243	kref_init(&mr->r_kref);
244	RB_CLEAR_NODE(&mr->r_rb_node);
245	mr->r_trans = rs->rs_transport;
246	mr->r_sock = rs;
247
248	if (args->flags & RDS_RDMA_USE_ONCE)
249		mr->r_use_once = 1;
250	if (args->flags & RDS_RDMA_INVALIDATE)
251		mr->r_invalidate = 1;
252	if (args->flags & RDS_RDMA_READWRITE)
253		mr->r_write = 1;
254
255	/*
256	 * Pin the pages that make up the user buffer and transfer the page
257	 * pointers to the mr's sg array.  We check to see if we've mapped
258	 * the whole region after transferring the partial page references
259	 * to the sg array so that we can have one page ref cleanup path.
260	 *
261	 * For now we have no flag that tells us whether the mapping is
262	 * r/o or r/w. We need to assume r/w, or we'll do a lot of RDMA to
263	 * the zero page.
264	 */
265	ret = rds_pin_pages(args->vec.addr, nr_pages, pages, 1);
266	if (ret == -EOPNOTSUPP) {
267		need_odp = 1;
268	} else if (ret <= 0) {
269		goto out;
270	} else {
271		nents = ret;
272		sg = kmalloc_array(nents, sizeof(*sg), GFP_KERNEL);
273		if (!sg) {
274			ret = -ENOMEM;
275			goto out;
276		}
277		WARN_ON(!nents);
278		sg_init_table(sg, nents);
 
 
279
280		/* Stick all pages into the scatterlist */
281		for (i = 0 ; i < nents; i++)
282			sg_set_page(&sg[i], pages[i], PAGE_SIZE, 0);
283
284		rdsdebug("RDS: trans_private nents is %u\n", nents);
285	}
286	/* Obtain a transport specific MR. If this succeeds, the
287	 * s/g list is now owned by the MR.
288	 * Note that dma_map() implies that pending writes are
289	 * flushed to RAM, so no dma_sync is needed here. */
290	trans_private = rs->rs_transport->get_mr(
291		sg, nents, rs, &mr->r_key, cp ? cp->cp_conn : NULL,
292		args->vec.addr, args->vec.bytes,
293		need_odp ? ODP_ZEROBASED : ODP_NOT_NEEDED);
294
295	if (IS_ERR(trans_private)) {
296		/* In ODP case, we don't GUP pages, so don't need
297		 * to release anything.
298		 */
299		if (!need_odp) {
300			unpin_user_pages(pages, nr_pages);
301			kfree(sg);
302		}
303		ret = PTR_ERR(trans_private);
304		goto out;
305	}
306
307	mr->r_trans_private = trans_private;
308
309	rdsdebug("RDS: get_mr put_user key is %x cookie_addr %p\n",
310	       mr->r_key, (void *)(unsigned long) args->cookie_addr);
311
312	/* The user may pass us an unaligned address, but we can only
313	 * map page aligned regions. So we keep the offset, and build
314	 * a 64bit cookie containing <R_Key, offset> and pass that
315	 * around. */
316	if (need_odp)
317		cookie = rds_rdma_make_cookie(mr->r_key, 0);
318	else
319		cookie = rds_rdma_make_cookie(mr->r_key,
320					      args->vec.addr & ~PAGE_MASK);
321	if (cookie_ret)
322		*cookie_ret = cookie;
323
324	if (args->cookie_addr &&
325	    put_user(cookie, (u64 __user *)(unsigned long)args->cookie_addr)) {
326		if (!need_odp) {
327			unpin_user_pages(pages, nr_pages);
328			kfree(sg);
329		}
330		ret = -EFAULT;
331		goto out;
332	}
333
334	/* Inserting the new MR into the rbtree bumps its
335	 * reference count. */
336	spin_lock_irqsave(&rs->rs_rdma_lock, flags);
337	found = rds_mr_tree_walk(&rs->rs_rdma_keys, mr->r_key, mr);
338	spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
339
340	BUG_ON(found && found != mr);
341
342	rdsdebug("RDS: get_mr key is %x\n", mr->r_key);
343	if (mr_ret) {
344		kref_get(&mr->r_kref);
345		*mr_ret = mr;
346	}
347
348	ret = 0;
349out:
350	kfree(pages);
351	if (mr)
352		kref_put(&mr->r_kref, __rds_put_mr_final);
353	return ret;
354}
355
356int rds_get_mr(struct rds_sock *rs, sockptr_t optval, int optlen)
357{
358	struct rds_get_mr_args args;
359
360	if (optlen != sizeof(struct rds_get_mr_args))
361		return -EINVAL;
362
363	if (copy_from_sockptr(&args, optval, sizeof(struct rds_get_mr_args)))
 
364		return -EFAULT;
365
366	return __rds_rdma_map(rs, &args, NULL, NULL, NULL);
367}
368
369int rds_get_mr_for_dest(struct rds_sock *rs, sockptr_t optval, int optlen)
370{
371	struct rds_get_mr_for_dest_args args;
372	struct rds_get_mr_args new_args;
373
374	if (optlen != sizeof(struct rds_get_mr_for_dest_args))
375		return -EINVAL;
376
377	if (copy_from_sockptr(&args, optval,
378			   sizeof(struct rds_get_mr_for_dest_args)))
379		return -EFAULT;
380
381	/*
382	 * Initially, just behave like get_mr().
383	 * TODO: Implement get_mr as wrapper around this
384	 *	 and deprecate it.
385	 */
386	new_args.vec = args.vec;
387	new_args.cookie_addr = args.cookie_addr;
388	new_args.flags = args.flags;
389
390	return __rds_rdma_map(rs, &new_args, NULL, NULL, NULL);
391}
392
393/*
394 * Free the MR indicated by the given R_Key
395 */
396int rds_free_mr(struct rds_sock *rs, sockptr_t optval, int optlen)
397{
398	struct rds_free_mr_args args;
399	struct rds_mr *mr;
400	unsigned long flags;
401
402	if (optlen != sizeof(struct rds_free_mr_args))
403		return -EINVAL;
404
405	if (copy_from_sockptr(&args, optval, sizeof(struct rds_free_mr_args)))
 
406		return -EFAULT;
407
408	/* Special case - a null cookie means flush all unused MRs */
409	if (args.cookie == 0) {
410		if (!rs->rs_transport || !rs->rs_transport->flush_mrs)
411			return -EINVAL;
412		rs->rs_transport->flush_mrs();
413		return 0;
414	}
415
416	/* Look up the MR given its R_key and remove it from the rbtree
417	 * so nobody else finds it.
418	 * This should also prevent races with rds_rdma_unuse.
419	 */
420	spin_lock_irqsave(&rs->rs_rdma_lock, flags);
421	mr = rds_mr_tree_walk(&rs->rs_rdma_keys, rds_rdma_cookie_key(args.cookie), NULL);
422	if (mr) {
423		rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys);
424		RB_CLEAR_NODE(&mr->r_rb_node);
425		if (args.flags & RDS_RDMA_INVALIDATE)
426			mr->r_invalidate = 1;
427	}
428	spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
429
430	if (!mr)
431		return -EINVAL;
432
433	kref_put(&mr->r_kref, __rds_put_mr_final);
 
 
 
 
 
 
434	return 0;
435}
436
437/*
438 * This is called when we receive an extension header that
439 * tells us this MR was used. It allows us to implement
440 * use_once semantics
441 */
442void rds_rdma_unuse(struct rds_sock *rs, u32 r_key, int force)
443{
444	struct rds_mr *mr;
445	unsigned long flags;
446	int zot_me = 0;
447
448	spin_lock_irqsave(&rs->rs_rdma_lock, flags);
449	mr = rds_mr_tree_walk(&rs->rs_rdma_keys, r_key, NULL);
450	if (!mr) {
451		pr_debug("rds: trying to unuse MR with unknown r_key %u!\n",
452			 r_key);
453		spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
454		return;
455	}
456
457	/* Get a reference so that the MR won't go away before calling
458	 * sync_mr() below.
459	 */
460	kref_get(&mr->r_kref);
461
462	/* If it is going to be freed, remove it from the tree now so
463	 * that no other thread can find it and free it.
464	 */
465	if (mr->r_use_once || force) {
466		rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys);
467		RB_CLEAR_NODE(&mr->r_rb_node);
468		zot_me = 1;
469	}
470	spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
471
472	/* May have to issue a dma_sync on this memory region.
473	 * Note we could avoid this if the operation was a RDMA READ,
474	 * but at this point we can't tell. */
475	if (mr->r_trans->sync_mr)
476		mr->r_trans->sync_mr(mr->r_trans_private, DMA_FROM_DEVICE);
477
478	/* Release the reference held above. */
479	kref_put(&mr->r_kref, __rds_put_mr_final);
480
481	/* If the MR was marked as invalidate, this will
482	 * trigger an async flush. */
483	if (zot_me)
484		kref_put(&mr->r_kref, __rds_put_mr_final);
 
 
485}
486
487void rds_rdma_free_op(struct rm_rdma_op *ro)
488{
489	unsigned int i;
490
491	if (ro->op_odp_mr) {
492		kref_put(&ro->op_odp_mr->r_kref, __rds_put_mr_final);
493	} else {
494		for (i = 0; i < ro->op_nents; i++) {
495			struct page *page = sg_page(&ro->op_sg[i]);
496
497			/* Mark page dirty if it was possibly modified, which
498			 * is the case for a RDMA_READ which copies from remote
499			 * to local memory
500			 */
501			unpin_user_pages_dirty_lock(&page, 1, !ro->op_write);
502		}
 
503	}
504
505	kfree(ro->op_notifier);
506	ro->op_notifier = NULL;
507	ro->op_active = 0;
508	ro->op_odp_mr = NULL;
509}
510
511void rds_atomic_free_op(struct rm_atomic_op *ao)
512{
513	struct page *page = sg_page(ao->op_sg);
514
515	/* Mark page dirty if it was possibly modified, which
516	 * is the case for a RDMA_READ which copies from remote
517	 * to local memory */
518	unpin_user_pages_dirty_lock(&page, 1, true);
 
519
520	kfree(ao->op_notifier);
521	ao->op_notifier = NULL;
522	ao->op_active = 0;
523}
524
525
526/*
527 * Count the number of pages needed to describe an incoming iovec array.
528 */
529static int rds_rdma_pages(struct rds_iovec iov[], int nr_iovecs)
530{
531	int tot_pages = 0;
532	unsigned int nr_pages;
533	unsigned int i;
534
535	/* figure out the number of pages in the vector */
536	for (i = 0; i < nr_iovecs; i++) {
537		nr_pages = rds_pages_in_vec(&iov[i]);
538		if (nr_pages == 0)
539			return -EINVAL;
540
541		tot_pages += nr_pages;
542
543		/*
544		 * nr_pages for one entry is limited to (UINT_MAX>>PAGE_SHIFT)+1,
545		 * so tot_pages cannot overflow without first going negative.
546		 */
547		if (tot_pages < 0)
548			return -EINVAL;
549	}
550
551	return tot_pages;
552}
553
554int rds_rdma_extra_size(struct rds_rdma_args *args,
555			struct rds_iov_vector *iov)
556{
557	struct rds_iovec *vec;
558	struct rds_iovec __user *local_vec;
559	int tot_pages = 0;
560	unsigned int nr_pages;
561	unsigned int i;
562
563	local_vec = (struct rds_iovec __user *)(unsigned long) args->local_vec_addr;
564
565	if (args->nr_local == 0)
566		return -EINVAL;
567
568	if (args->nr_local > UIO_MAXIOV)
569		return -EMSGSIZE;
570
571	iov->iov = kcalloc(args->nr_local,
572			   sizeof(struct rds_iovec),
573			   GFP_KERNEL);
574	if (!iov->iov)
575		return -ENOMEM;
576
577	vec = &iov->iov[0];
578
579	if (copy_from_user(vec, local_vec, args->nr_local *
580			   sizeof(struct rds_iovec)))
581		return -EFAULT;
582	iov->len = args->nr_local;
583
584	/* figure out the number of pages in the vector */
585	for (i = 0; i < args->nr_local; i++, vec++) {
 
 
 
586
587		nr_pages = rds_pages_in_vec(vec);
588		if (nr_pages == 0)
589			return -EINVAL;
590
591		tot_pages += nr_pages;
592
593		/*
594		 * nr_pages for one entry is limited to (UINT_MAX>>PAGE_SHIFT)+1,
595		 * so tot_pages cannot overflow without first going negative.
596		 */
597		if (tot_pages < 0)
598			return -EINVAL;
599	}
600
601	return tot_pages * sizeof(struct scatterlist);
602}
603
604/*
605 * The application asks for a RDMA transfer.
606 * Extract all arguments and set up the rdma_op
607 */
608int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
609		       struct cmsghdr *cmsg,
610		       struct rds_iov_vector *vec)
611{
612	struct rds_rdma_args *args;
613	struct rm_rdma_op *op = &rm->rdma;
614	int nr_pages;
615	unsigned int nr_bytes;
616	struct page **pages = NULL;
617	struct rds_iovec *iovs;
 
618	unsigned int i, j;
619	int ret = 0;
620	bool odp_supported = true;
621
622	if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct rds_rdma_args))
623	    || rm->rdma.op_active)
624		return -EINVAL;
625
626	args = CMSG_DATA(cmsg);
627
628	if (ipv6_addr_any(&rs->rs_bound_addr)) {
629		ret = -ENOTCONN; /* XXX not a great errno */
630		goto out_ret;
631	}
632
633	if (args->nr_local > UIO_MAXIOV) {
634		ret = -EMSGSIZE;
635		goto out_ret;
636	}
637
638	if (vec->len != args->nr_local) {
639		ret = -EINVAL;
640		goto out_ret;
 
 
 
 
 
641	}
642	/* odp-mr is not supported for multiple requests within one message */
643	if (args->nr_local != 1)
644		odp_supported = false;
645
646	iovs = vec->iov;
 
 
 
647
648	nr_pages = rds_rdma_pages(iovs, args->nr_local);
649	if (nr_pages < 0) {
650		ret = -EINVAL;
651		goto out_ret;
652	}
653
654	pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL);
655	if (!pages) {
656		ret = -ENOMEM;
657		goto out_ret;
658	}
659
660	op->op_write = !!(args->flags & RDS_RDMA_READWRITE);
661	op->op_fence = !!(args->flags & RDS_RDMA_FENCE);
662	op->op_notify = !!(args->flags & RDS_RDMA_NOTIFY_ME);
663	op->op_silent = !!(args->flags & RDS_RDMA_SILENT);
664	op->op_active = 1;
665	op->op_recverr = rs->rs_recverr;
666	op->op_odp_mr = NULL;
667
668	WARN_ON(!nr_pages);
669	op->op_sg = rds_message_alloc_sgs(rm, nr_pages);
670	if (IS_ERR(op->op_sg)) {
671		ret = PTR_ERR(op->op_sg);
672		goto out_pages;
673	}
674
675	if (op->op_notify || op->op_recverr) {
676		/* We allocate an uninitialized notifier here, because
677		 * we don't want to do that in the completion handler. We
678		 * would have to use GFP_ATOMIC there, and don't want to deal
679		 * with failed allocations.
680		 */
681		op->op_notifier = kmalloc(sizeof(struct rds_notifier), GFP_KERNEL);
682		if (!op->op_notifier) {
683			ret = -ENOMEM;
684			goto out_pages;
685		}
686		op->op_notifier->n_user_token = args->user_token;
687		op->op_notifier->n_status = RDS_RDMA_SUCCESS;
688	}
689
690	/* The cookie contains the R_Key of the remote memory region, and
691	 * optionally an offset into it. This is how we implement RDMA into
692	 * unaligned memory.
693	 * When setting up the RDMA, we need to add that offset to the
694	 * destination address (which is really an offset into the MR)
695	 * FIXME: We may want to move this into ib_rdma.c
696	 */
697	op->op_rkey = rds_rdma_cookie_key(args->cookie);
698	op->op_remote_addr = args->remote_vec.addr + rds_rdma_cookie_offset(args->cookie);
699
700	nr_bytes = 0;
701
702	rdsdebug("RDS: rdma prepare nr_local %llu rva %llx rkey %x\n",
703	       (unsigned long long)args->nr_local,
704	       (unsigned long long)args->remote_vec.addr,
705	       op->op_rkey);
706
707	for (i = 0; i < args->nr_local; i++) {
708		struct rds_iovec *iov = &iovs[i];
709		/* don't need to check, rds_rdma_pages() verified nr will be +nonzero */
710		unsigned int nr = rds_pages_in_vec(iov);
711
712		rs->rs_user_addr = iov->addr;
713		rs->rs_user_bytes = iov->bytes;
714
715		/* If it's a WRITE operation, we want to pin the pages for reading.
716		 * If it's a READ operation, we need to pin the pages for writing.
717		 */
718		ret = rds_pin_pages(iov->addr, nr, pages, !op->op_write);
719		if ((!odp_supported && ret <= 0) ||
720		    (odp_supported && ret <= 0 && ret != -EOPNOTSUPP))
721			goto out_pages;
722
723		if (ret == -EOPNOTSUPP) {
724			struct rds_mr *local_odp_mr;
725
726			if (!rs->rs_transport->get_mr) {
727				ret = -EOPNOTSUPP;
728				goto out_pages;
729			}
730			local_odp_mr =
731				kzalloc(sizeof(*local_odp_mr), GFP_KERNEL);
732			if (!local_odp_mr) {
733				ret = -ENOMEM;
734				goto out_pages;
735			}
736			RB_CLEAR_NODE(&local_odp_mr->r_rb_node);
737			kref_init(&local_odp_mr->r_kref);
738			local_odp_mr->r_trans = rs->rs_transport;
739			local_odp_mr->r_sock = rs;
740			local_odp_mr->r_trans_private =
741				rs->rs_transport->get_mr(
742					NULL, 0, rs, &local_odp_mr->r_key, NULL,
743					iov->addr, iov->bytes, ODP_VIRTUAL);
744			if (IS_ERR(local_odp_mr->r_trans_private)) {
745				ret = PTR_ERR(local_odp_mr->r_trans_private);
746				rdsdebug("get_mr ret %d %p\"", ret,
747					 local_odp_mr->r_trans_private);
748				kfree(local_odp_mr);
749				ret = -EOPNOTSUPP;
750				goto out_pages;
751			}
752			rdsdebug("Need odp; local_odp_mr %p trans_private %p\n",
753				 local_odp_mr, local_odp_mr->r_trans_private);
754			op->op_odp_mr = local_odp_mr;
755			op->op_odp_addr = iov->addr;
756		}
757
758		rdsdebug("RDS: nr_bytes %u nr %u iov->bytes %llu iov->addr %llx\n",
759			 nr_bytes, nr, iov->bytes, iov->addr);
760
761		nr_bytes += iov->bytes;
762
763		for (j = 0; j < nr; j++) {
764			unsigned int offset = iov->addr & ~PAGE_MASK;
765			struct scatterlist *sg;
766
767			sg = &op->op_sg[op->op_nents + j];
768			sg_set_page(sg, pages[j],
769					min_t(unsigned int, iov->bytes, PAGE_SIZE - offset),
770					offset);
771
772			sg_dma_len(sg) = sg->length;
773			rdsdebug("RDS: sg->offset %x sg->len %x iov->addr %llx iov->bytes %llu\n",
774			       sg->offset, sg->length, iov->addr, iov->bytes);
775
776			iov->addr += sg->length;
777			iov->bytes -= sg->length;
778		}
779
780		op->op_nents += nr;
781	}
782
783	if (nr_bytes > args->remote_vec.bytes) {
784		rdsdebug("RDS nr_bytes %u remote_bytes %u do not match\n",
785				nr_bytes,
786				(unsigned int) args->remote_vec.bytes);
787		ret = -EINVAL;
788		goto out_pages;
789	}
790	op->op_bytes = nr_bytes;
791	ret = 0;
792
793out_pages:
 
 
794	kfree(pages);
795out_ret:
796	if (ret)
797		rds_rdma_free_op(op);
798	else
799		rds_stats_inc(s_send_rdma);
800
801	return ret;
802}
803
804/*
805 * The application wants us to pass an RDMA destination (aka MR)
806 * to the remote
807 */
808int rds_cmsg_rdma_dest(struct rds_sock *rs, struct rds_message *rm,
809			  struct cmsghdr *cmsg)
810{
811	unsigned long flags;
812	struct rds_mr *mr;
813	u32 r_key;
814	int err = 0;
815
816	if (cmsg->cmsg_len < CMSG_LEN(sizeof(rds_rdma_cookie_t)) ||
817	    rm->m_rdma_cookie != 0)
818		return -EINVAL;
819
820	memcpy(&rm->m_rdma_cookie, CMSG_DATA(cmsg), sizeof(rm->m_rdma_cookie));
821
822	/* We are reusing a previously mapped MR here. Most likely, the
823	 * application has written to the buffer, so we need to explicitly
824	 * flush those writes to RAM. Otherwise the HCA may not see them
825	 * when doing a DMA from that buffer.
826	 */
827	r_key = rds_rdma_cookie_key(rm->m_rdma_cookie);
828
829	spin_lock_irqsave(&rs->rs_rdma_lock, flags);
830	mr = rds_mr_tree_walk(&rs->rs_rdma_keys, r_key, NULL);
831	if (!mr)
832		err = -EINVAL;	/* invalid r_key */
833	else
834		kref_get(&mr->r_kref);
835	spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
836
837	if (mr) {
838		mr->r_trans->sync_mr(mr->r_trans_private,
839				     DMA_TO_DEVICE);
840		rm->rdma.op_rdma_mr = mr;
841	}
842	return err;
843}
844
845/*
846 * The application passes us an address range it wants to enable RDMA
847 * to/from. We map the area, and save the <R_Key,offset> pair
848 * in rm->m_rdma_cookie. This causes it to be sent along to the peer
849 * in an extension header.
850 */
851int rds_cmsg_rdma_map(struct rds_sock *rs, struct rds_message *rm,
852			  struct cmsghdr *cmsg)
853{
854	if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct rds_get_mr_args)) ||
855	    rm->m_rdma_cookie != 0)
856		return -EINVAL;
857
858	return __rds_rdma_map(rs, CMSG_DATA(cmsg), &rm->m_rdma_cookie,
859			      &rm->rdma.op_rdma_mr, rm->m_conn_path);
860}
861
862/*
863 * Fill in rds_message for an atomic request.
864 */
865int rds_cmsg_atomic(struct rds_sock *rs, struct rds_message *rm,
866		    struct cmsghdr *cmsg)
867{
868	struct page *page = NULL;
869	struct rds_atomic_args *args;
870	int ret = 0;
871
872	if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct rds_atomic_args))
873	 || rm->atomic.op_active)
874		return -EINVAL;
875
876	args = CMSG_DATA(cmsg);
877
878	/* Nonmasked & masked cmsg ops converted to masked hw ops */
879	switch (cmsg->cmsg_type) {
880	case RDS_CMSG_ATOMIC_FADD:
881		rm->atomic.op_type = RDS_ATOMIC_TYPE_FADD;
882		rm->atomic.op_m_fadd.add = args->fadd.add;
883		rm->atomic.op_m_fadd.nocarry_mask = 0;
884		break;
885	case RDS_CMSG_MASKED_ATOMIC_FADD:
886		rm->atomic.op_type = RDS_ATOMIC_TYPE_FADD;
887		rm->atomic.op_m_fadd.add = args->m_fadd.add;
888		rm->atomic.op_m_fadd.nocarry_mask = args->m_fadd.nocarry_mask;
889		break;
890	case RDS_CMSG_ATOMIC_CSWP:
891		rm->atomic.op_type = RDS_ATOMIC_TYPE_CSWP;
892		rm->atomic.op_m_cswp.compare = args->cswp.compare;
893		rm->atomic.op_m_cswp.swap = args->cswp.swap;
894		rm->atomic.op_m_cswp.compare_mask = ~0;
895		rm->atomic.op_m_cswp.swap_mask = ~0;
896		break;
897	case RDS_CMSG_MASKED_ATOMIC_CSWP:
898		rm->atomic.op_type = RDS_ATOMIC_TYPE_CSWP;
899		rm->atomic.op_m_cswp.compare = args->m_cswp.compare;
900		rm->atomic.op_m_cswp.swap = args->m_cswp.swap;
901		rm->atomic.op_m_cswp.compare_mask = args->m_cswp.compare_mask;
902		rm->atomic.op_m_cswp.swap_mask = args->m_cswp.swap_mask;
903		break;
904	default:
905		BUG(); /* should never happen */
906	}
907
908	rm->atomic.op_notify = !!(args->flags & RDS_RDMA_NOTIFY_ME);
909	rm->atomic.op_silent = !!(args->flags & RDS_RDMA_SILENT);
910	rm->atomic.op_active = 1;
911	rm->atomic.op_recverr = rs->rs_recverr;
912	rm->atomic.op_sg = rds_message_alloc_sgs(rm, 1);
913	if (IS_ERR(rm->atomic.op_sg)) {
914		ret = PTR_ERR(rm->atomic.op_sg);
915		goto err;
916	}
917
918	/* verify 8 byte-aligned */
919	if (args->local_addr & 0x7) {
920		ret = -EFAULT;
921		goto err;
922	}
923
924	ret = rds_pin_pages(args->local_addr, 1, &page, 1);
925	if (ret != 1)
926		goto err;
927	ret = 0;
928
929	sg_set_page(rm->atomic.op_sg, page, 8, offset_in_page(args->local_addr));
930
931	if (rm->atomic.op_notify || rm->atomic.op_recverr) {
932		/* We allocate an uninitialized notifier here, because
933		 * we don't want to do that in the completion handler. We
934		 * would have to use GFP_ATOMIC there, and don't want to deal
935		 * with failed allocations.
936		 */
937		rm->atomic.op_notifier = kmalloc(sizeof(*rm->atomic.op_notifier), GFP_KERNEL);
938		if (!rm->atomic.op_notifier) {
939			ret = -ENOMEM;
940			goto err;
941		}
942
943		rm->atomic.op_notifier->n_user_token = args->user_token;
944		rm->atomic.op_notifier->n_status = RDS_RDMA_SUCCESS;
945	}
946
947	rm->atomic.op_rkey = rds_rdma_cookie_key(args->cookie);
948	rm->atomic.op_remote_addr = args->remote_addr + rds_rdma_cookie_offset(args->cookie);
949
950	return ret;
951err:
952	if (page)
953		unpin_user_page(page);
954	rm->atomic.op_active = 0;
955	kfree(rm->atomic.op_notifier);
956
957	return ret;
958}