Linux Audio

Check our new training course

Loading...
v5.14.15
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * linux/fs/9p/trans_rdma.c
  4 *
  5 * RDMA transport layer based on the trans_fd.c implementation.
  6 *
  7 *  Copyright (C) 2008 by Tom Tucker <tom@opengridcomputing.com>
  8 *  Copyright (C) 2006 by Russ Cox <rsc@swtch.com>
  9 *  Copyright (C) 2004-2005 by Latchesar Ionkov <lucho@ionkov.net>
 10 *  Copyright (C) 2004-2008 by Eric Van Hensbergen <ericvh@gmail.com>
 11 *  Copyright (C) 1997-2002 by Ron Minnich <rminnich@sarnoff.com>
 12 */
 13
 14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 15
 16#include <linux/in.h>
 17#include <linux/module.h>
 18#include <linux/net.h>
 19#include <linux/ipv6.h>
 20#include <linux/kthread.h>
 21#include <linux/errno.h>
 22#include <linux/kernel.h>
 23#include <linux/un.h>
 24#include <linux/uaccess.h>
 25#include <linux/inet.h>
 26#include <linux/idr.h>
 27#include <linux/file.h>
 28#include <linux/parser.h>
 29#include <linux/semaphore.h>
 30#include <linux/slab.h>
 31#include <linux/seq_file.h>
 32#include <net/9p/9p.h>
 33#include <net/9p/client.h>
 34#include <net/9p/transport.h>
 35#include <rdma/ib_verbs.h>
 36#include <rdma/rdma_cm.h>
 37
 38#define P9_PORT			5640
 39#define P9_RDMA_SQ_DEPTH	32
 40#define P9_RDMA_RQ_DEPTH	32
 41#define P9_RDMA_SEND_SGE	4
 42#define P9_RDMA_RECV_SGE	4
 43#define P9_RDMA_IRD		0
 44#define P9_RDMA_ORD		0
 45#define P9_RDMA_TIMEOUT		30000		/* 30 seconds */
 46#define P9_RDMA_MAXSIZE		(1024*1024)	/* 1MB */
 47
 48/**
 49 * struct p9_trans_rdma - RDMA transport instance
 50 *
 51 * @state: tracks the transport state machine for connection setup and tear down
 52 * @cm_id: The RDMA CM ID
 53 * @pd: Protection Domain pointer
 54 * @qp: Queue Pair pointer
 55 * @cq: Completion Queue pointer
 56 * @timeout: Number of uSecs to wait for connection management events
 57 * @privport: Whether a privileged port may be used
 58 * @port: The port to use
 59 * @sq_depth: The depth of the Send Queue
 60 * @sq_sem: Semaphore for the SQ
 61 * @rq_depth: The depth of the Receive Queue.
 62 * @rq_sem: Semaphore for the RQ
 63 * @excess_rc : Amount of posted Receive Contexts without a pending request.
 64 *		See rdma_request()
 65 * @addr: The remote peer's address
 66 * @req_lock: Protects the active request list
 67 * @cm_done: Completion event for connection management tracking
 68 */
 69struct p9_trans_rdma {
 70	enum {
 71		P9_RDMA_INIT,
 72		P9_RDMA_ADDR_RESOLVED,
 73		P9_RDMA_ROUTE_RESOLVED,
 74		P9_RDMA_CONNECTED,
 75		P9_RDMA_FLUSHING,
 76		P9_RDMA_CLOSING,
 77		P9_RDMA_CLOSED,
 78	} state;
 79	struct rdma_cm_id *cm_id;
 80	struct ib_pd *pd;
 81	struct ib_qp *qp;
 82	struct ib_cq *cq;
 83	long timeout;
 84	bool privport;
 85	u16 port;
 86	int sq_depth;
 87	struct semaphore sq_sem;
 88	int rq_depth;
 89	struct semaphore rq_sem;
 90	atomic_t excess_rc;
 91	struct sockaddr_in addr;
 92	spinlock_t req_lock;
 93
 94	struct completion cm_done;
 95};
 96
 97struct p9_rdma_req;
 98
 99/**
100 * struct p9_rdma_context - Keeps track of in-process WR
101 *
102 * @cqe: completion queue entry
103 * @busa: Bus address to unmap when the WR completes
104 * @req: Keeps track of requests (send)
105 * @rc: Keepts track of replies (receive)
106 */
 
107struct p9_rdma_context {
108	struct ib_cqe cqe;
109	dma_addr_t busa;
110	union {
111		struct p9_req_t *req;
112		struct p9_fcall rc;
113	};
114};
115
116/**
117 * struct p9_rdma_opts - Collection of mount options
118 * @port: port of connection
119 * @privport: Whether a privileged port may be used
120 * @sq_depth: The requested depth of the SQ. This really doesn't need
121 * to be any deeper than the number of threads used in the client
122 * @rq_depth: The depth of the RQ. Should be greater than or equal to SQ depth
123 * @timeout: Time to wait in msecs for CM events
124 */
125struct p9_rdma_opts {
126	short port;
127	bool privport;
128	int sq_depth;
129	int rq_depth;
130	long timeout;
131};
132
133/*
134 * Option Parsing (code inspired by NFS code)
135 */
136enum {
137	/* Options that take integer arguments */
138	Opt_port, Opt_rq_depth, Opt_sq_depth, Opt_timeout,
139	/* Options that take no argument */
140	Opt_privport,
141	Opt_err,
142};
143
144static match_table_t tokens = {
145	{Opt_port, "port=%u"},
146	{Opt_sq_depth, "sq=%u"},
147	{Opt_rq_depth, "rq=%u"},
148	{Opt_timeout, "timeout=%u"},
149	{Opt_privport, "privport"},
150	{Opt_err, NULL},
151};
152
153static int p9_rdma_show_options(struct seq_file *m, struct p9_client *clnt)
154{
155	struct p9_trans_rdma *rdma = clnt->trans;
156
157	if (rdma->port != P9_PORT)
158		seq_printf(m, ",port=%u", rdma->port);
159	if (rdma->sq_depth != P9_RDMA_SQ_DEPTH)
160		seq_printf(m, ",sq=%u", rdma->sq_depth);
161	if (rdma->rq_depth != P9_RDMA_RQ_DEPTH)
162		seq_printf(m, ",rq=%u", rdma->rq_depth);
163	if (rdma->timeout != P9_RDMA_TIMEOUT)
164		seq_printf(m, ",timeout=%lu", rdma->timeout);
165	if (rdma->privport)
166		seq_puts(m, ",privport");
167	return 0;
168}
169
170/**
171 * parse_opts - parse mount options into rdma options structure
172 * @params: options string passed from mount
173 * @opts: rdma transport-specific structure to parse options into
174 *
175 * Returns 0 upon success, -ERRNO upon failure
176 */
177static int parse_opts(char *params, struct p9_rdma_opts *opts)
178{
179	char *p;
180	substring_t args[MAX_OPT_ARGS];
181	int option;
182	char *options, *tmp_options;
183
184	opts->port = P9_PORT;
185	opts->sq_depth = P9_RDMA_SQ_DEPTH;
186	opts->rq_depth = P9_RDMA_RQ_DEPTH;
187	opts->timeout = P9_RDMA_TIMEOUT;
188	opts->privport = false;
189
190	if (!params)
191		return 0;
192
193	tmp_options = kstrdup(params, GFP_KERNEL);
194	if (!tmp_options) {
195		p9_debug(P9_DEBUG_ERROR,
196			 "failed to allocate copy of option string\n");
197		return -ENOMEM;
198	}
199	options = tmp_options;
200
201	while ((p = strsep(&options, ",")) != NULL) {
202		int token;
203		int r;
204		if (!*p)
205			continue;
206		token = match_token(p, tokens, args);
207		if ((token != Opt_err) && (token != Opt_privport)) {
208			r = match_int(&args[0], &option);
209			if (r < 0) {
210				p9_debug(P9_DEBUG_ERROR,
211					 "integer field, but no integer?\n");
212				continue;
213			}
214		}
215		switch (token) {
216		case Opt_port:
217			opts->port = option;
218			break;
219		case Opt_sq_depth:
220			opts->sq_depth = option;
221			break;
222		case Opt_rq_depth:
223			opts->rq_depth = option;
224			break;
225		case Opt_timeout:
226			opts->timeout = option;
227			break;
228		case Opt_privport:
229			opts->privport = true;
230			break;
231		default:
232			continue;
233		}
234	}
235	/* RQ must be at least as large as the SQ */
236	opts->rq_depth = max(opts->rq_depth, opts->sq_depth);
237	kfree(tmp_options);
238	return 0;
239}
240
241static int
242p9_cm_event_handler(struct rdma_cm_id *id, struct rdma_cm_event *event)
243{
244	struct p9_client *c = id->context;
245	struct p9_trans_rdma *rdma = c->trans;
246	switch (event->event) {
247	case RDMA_CM_EVENT_ADDR_RESOLVED:
248		BUG_ON(rdma->state != P9_RDMA_INIT);
249		rdma->state = P9_RDMA_ADDR_RESOLVED;
250		break;
251
252	case RDMA_CM_EVENT_ROUTE_RESOLVED:
253		BUG_ON(rdma->state != P9_RDMA_ADDR_RESOLVED);
254		rdma->state = P9_RDMA_ROUTE_RESOLVED;
255		break;
256
257	case RDMA_CM_EVENT_ESTABLISHED:
258		BUG_ON(rdma->state != P9_RDMA_ROUTE_RESOLVED);
259		rdma->state = P9_RDMA_CONNECTED;
260		break;
261
262	case RDMA_CM_EVENT_DISCONNECTED:
263		if (rdma)
264			rdma->state = P9_RDMA_CLOSED;
265		c->status = Disconnected;
266		break;
267
268	case RDMA_CM_EVENT_TIMEWAIT_EXIT:
269		break;
270
271	case RDMA_CM_EVENT_ADDR_CHANGE:
272	case RDMA_CM_EVENT_ROUTE_ERROR:
273	case RDMA_CM_EVENT_DEVICE_REMOVAL:
274	case RDMA_CM_EVENT_MULTICAST_JOIN:
275	case RDMA_CM_EVENT_MULTICAST_ERROR:
276	case RDMA_CM_EVENT_REJECTED:
277	case RDMA_CM_EVENT_CONNECT_REQUEST:
278	case RDMA_CM_EVENT_CONNECT_RESPONSE:
279	case RDMA_CM_EVENT_CONNECT_ERROR:
280	case RDMA_CM_EVENT_ADDR_ERROR:
281	case RDMA_CM_EVENT_UNREACHABLE:
282		c->status = Disconnected;
283		rdma_disconnect(rdma->cm_id);
284		break;
285	default:
286		BUG();
287	}
288	complete(&rdma->cm_done);
289	return 0;
290}
291
292static void
293recv_done(struct ib_cq *cq, struct ib_wc *wc)
294{
295	struct p9_client *client = cq->cq_context;
296	struct p9_trans_rdma *rdma = client->trans;
297	struct p9_rdma_context *c =
298		container_of(wc->wr_cqe, struct p9_rdma_context, cqe);
299	struct p9_req_t *req;
300	int err = 0;
301	int16_t tag;
302
303	req = NULL;
304	ib_dma_unmap_single(rdma->cm_id->device, c->busa, client->msize,
305							 DMA_FROM_DEVICE);
306
307	if (wc->status != IB_WC_SUCCESS)
308		goto err_out;
309
310	c->rc.size = wc->byte_len;
311	err = p9_parse_header(&c->rc, NULL, NULL, &tag, 1);
312	if (err)
313		goto err_out;
314
315	req = p9_tag_lookup(client, tag);
316	if (!req)
317		goto err_out;
318
319	/* Check that we have not yet received a reply for this request.
320	 */
321	if (unlikely(req->rc.sdata)) {
322		pr_err("Duplicate reply for request %d", tag);
323		goto err_out;
324	}
325
326	req->rc.size = c->rc.size;
327	req->rc.sdata = c->rc.sdata;
328	p9_client_cb(client, req, REQ_STATUS_RCVD);
329
330 out:
331	up(&rdma->rq_sem);
332	kfree(c);
333	return;
334
335 err_out:
336	p9_debug(P9_DEBUG_ERROR, "req %p err %d status %d\n",
337			req, err, wc->status);
338	rdma->state = P9_RDMA_FLUSHING;
339	client->status = Disconnected;
340	goto out;
341}
342
343static void
344send_done(struct ib_cq *cq, struct ib_wc *wc)
345{
346	struct p9_client *client = cq->cq_context;
347	struct p9_trans_rdma *rdma = client->trans;
348	struct p9_rdma_context *c =
349		container_of(wc->wr_cqe, struct p9_rdma_context, cqe);
350
351	ib_dma_unmap_single(rdma->cm_id->device,
352			    c->busa, c->req->tc.size,
353			    DMA_TO_DEVICE);
354	up(&rdma->sq_sem);
355	p9_req_put(c->req);
356	kfree(c);
357}
358
359static void qp_event_handler(struct ib_event *event, void *context)
360{
361	p9_debug(P9_DEBUG_ERROR, "QP event %d context %p\n",
362		 event->event, context);
363}
364
365static void rdma_destroy_trans(struct p9_trans_rdma *rdma)
366{
367	if (!rdma)
368		return;
369
370	if (rdma->qp && !IS_ERR(rdma->qp))
371		ib_destroy_qp(rdma->qp);
372
373	if (rdma->pd && !IS_ERR(rdma->pd))
374		ib_dealloc_pd(rdma->pd);
375
376	if (rdma->cq && !IS_ERR(rdma->cq))
377		ib_free_cq(rdma->cq);
378
379	if (rdma->cm_id && !IS_ERR(rdma->cm_id))
380		rdma_destroy_id(rdma->cm_id);
381
382	kfree(rdma);
383}
384
385static int
386post_recv(struct p9_client *client, struct p9_rdma_context *c)
387{
388	struct p9_trans_rdma *rdma = client->trans;
389	struct ib_recv_wr wr;
390	struct ib_sge sge;
391
392	c->busa = ib_dma_map_single(rdma->cm_id->device,
393				    c->rc.sdata, client->msize,
394				    DMA_FROM_DEVICE);
395	if (ib_dma_mapping_error(rdma->cm_id->device, c->busa))
396		goto error;
397
398	c->cqe.done = recv_done;
399
400	sge.addr = c->busa;
401	sge.length = client->msize;
402	sge.lkey = rdma->pd->local_dma_lkey;
403
404	wr.next = NULL;
405	wr.wr_cqe = &c->cqe;
406	wr.sg_list = &sge;
407	wr.num_sge = 1;
408	return ib_post_recv(rdma->qp, &wr, NULL);
409
410 error:
411	p9_debug(P9_DEBUG_ERROR, "EIO\n");
412	return -EIO;
413}
414
415static int rdma_request(struct p9_client *client, struct p9_req_t *req)
416{
417	struct p9_trans_rdma *rdma = client->trans;
418	struct ib_send_wr wr;
419	struct ib_sge sge;
420	int err = 0;
421	unsigned long flags;
422	struct p9_rdma_context *c = NULL;
423	struct p9_rdma_context *rpl_context = NULL;
424
425	/* When an error occurs between posting the recv and the send,
426	 * there will be a receive context posted without a pending request.
427	 * Since there is no way to "un-post" it, we remember it and skip
428	 * post_recv() for the next request.
429	 * So here,
430	 * see if we are this `next request' and need to absorb an excess rc.
431	 * If yes, then drop and free our own, and do not recv_post().
432	 **/
433	if (unlikely(atomic_read(&rdma->excess_rc) > 0)) {
434		if ((atomic_sub_return(1, &rdma->excess_rc) >= 0)) {
435			/* Got one! */
436			p9_fcall_fini(&req->rc);
437			req->rc.sdata = NULL;
438			goto dont_need_post_recv;
439		} else {
440			/* We raced and lost. */
441			atomic_inc(&rdma->excess_rc);
442		}
443	}
444
445	/* Allocate an fcall for the reply */
446	rpl_context = kmalloc(sizeof *rpl_context, GFP_NOFS);
447	if (!rpl_context) {
448		err = -ENOMEM;
449		goto recv_error;
450	}
451	rpl_context->rc.sdata = req->rc.sdata;
452
453	/*
454	 * Post a receive buffer for this request. We need to ensure
455	 * there is a reply buffer available for every outstanding
456	 * request. A flushed request can result in no reply for an
457	 * outstanding request, so we must keep a count to avoid
458	 * overflowing the RQ.
459	 */
460	if (down_interruptible(&rdma->rq_sem)) {
461		err = -EINTR;
462		goto recv_error;
463	}
464
465	err = post_recv(client, rpl_context);
466	if (err) {
467		p9_debug(P9_DEBUG_ERROR, "POST RECV failed: %d\n", err);
468		goto recv_error;
469	}
470	/* remove posted receive buffer from request structure */
471	req->rc.sdata = NULL;
472
473dont_need_post_recv:
474	/* Post the request */
475	c = kmalloc(sizeof *c, GFP_NOFS);
476	if (!c) {
477		err = -ENOMEM;
478		goto send_error;
479	}
480	c->req = req;
481
482	c->busa = ib_dma_map_single(rdma->cm_id->device,
483				    c->req->tc.sdata, c->req->tc.size,
484				    DMA_TO_DEVICE);
485	if (ib_dma_mapping_error(rdma->cm_id->device, c->busa)) {
486		err = -EIO;
487		goto send_error;
488	}
489
490	c->cqe.done = send_done;
491
492	sge.addr = c->busa;
493	sge.length = c->req->tc.size;
494	sge.lkey = rdma->pd->local_dma_lkey;
495
496	wr.next = NULL;
497	wr.wr_cqe = &c->cqe;
498	wr.opcode = IB_WR_SEND;
499	wr.send_flags = IB_SEND_SIGNALED;
500	wr.sg_list = &sge;
501	wr.num_sge = 1;
502
503	if (down_interruptible(&rdma->sq_sem)) {
504		err = -EINTR;
505		goto send_error;
506	}
507
508	/* Mark request as `sent' *before* we actually send it,
509	 * because doing if after could erase the REQ_STATUS_RCVD
510	 * status in case of a very fast reply.
511	 */
512	req->status = REQ_STATUS_SENT;
513	err = ib_post_send(rdma->qp, &wr, NULL);
514	if (err)
515		goto send_error;
516
517	/* Success */
518	return 0;
519
520 /* Handle errors that happened during or while preparing the send: */
521 send_error:
522	req->status = REQ_STATUS_ERROR;
523	kfree(c);
524	p9_debug(P9_DEBUG_ERROR, "Error %d in rdma_request()\n", err);
525
526	/* Ach.
527	 *  We did recv_post(), but not send. We have one recv_post in excess.
528	 */
529	atomic_inc(&rdma->excess_rc);
530	return err;
531
532 /* Handle errors that happened during or while preparing post_recv(): */
533 recv_error:
534	kfree(rpl_context);
535	spin_lock_irqsave(&rdma->req_lock, flags);
536	if (err != -EINTR && rdma->state < P9_RDMA_CLOSING) {
537		rdma->state = P9_RDMA_CLOSING;
538		spin_unlock_irqrestore(&rdma->req_lock, flags);
539		rdma_disconnect(rdma->cm_id);
540	} else
541		spin_unlock_irqrestore(&rdma->req_lock, flags);
542	return err;
543}
544
545static void rdma_close(struct p9_client *client)
546{
547	struct p9_trans_rdma *rdma;
548
549	if (!client)
550		return;
551
552	rdma = client->trans;
553	if (!rdma)
554		return;
555
556	client->status = Disconnected;
557	rdma_disconnect(rdma->cm_id);
558	rdma_destroy_trans(rdma);
559}
560
561/**
562 * alloc_rdma - Allocate and initialize the rdma transport structure
563 * @opts: Mount options structure
564 */
565static struct p9_trans_rdma *alloc_rdma(struct p9_rdma_opts *opts)
566{
567	struct p9_trans_rdma *rdma;
568
569	rdma = kzalloc(sizeof(struct p9_trans_rdma), GFP_KERNEL);
570	if (!rdma)
571		return NULL;
572
573	rdma->port = opts->port;
574	rdma->privport = opts->privport;
575	rdma->sq_depth = opts->sq_depth;
576	rdma->rq_depth = opts->rq_depth;
577	rdma->timeout = opts->timeout;
578	spin_lock_init(&rdma->req_lock);
579	init_completion(&rdma->cm_done);
580	sema_init(&rdma->sq_sem, rdma->sq_depth);
581	sema_init(&rdma->rq_sem, rdma->rq_depth);
582	atomic_set(&rdma->excess_rc, 0);
583
584	return rdma;
585}
586
587static int rdma_cancel(struct p9_client *client, struct p9_req_t *req)
588{
589	/* Nothing to do here.
590	 * We will take care of it (if we have to) in rdma_cancelled()
591	 */
592	return 1;
593}
594
595/* A request has been fully flushed without a reply.
596 * That means we have posted one buffer in excess.
597 */
598static int rdma_cancelled(struct p9_client *client, struct p9_req_t *req)
599{
600	struct p9_trans_rdma *rdma = client->trans;
601	atomic_inc(&rdma->excess_rc);
602	return 0;
603}
604
605static int p9_rdma_bind_privport(struct p9_trans_rdma *rdma)
606{
607	struct sockaddr_in cl = {
608		.sin_family = AF_INET,
609		.sin_addr.s_addr = htonl(INADDR_ANY),
610	};
611	int port, err = -EINVAL;
612
613	for (port = P9_DEF_MAX_RESVPORT; port >= P9_DEF_MIN_RESVPORT; port--) {
614		cl.sin_port = htons((ushort)port);
615		err = rdma_bind_addr(rdma->cm_id, (struct sockaddr *)&cl);
616		if (err != -EADDRINUSE)
617			break;
618	}
619	return err;
620}
621
622/**
623 * rdma_create_trans - Transport method for creating a transport instance
624 * @client: client instance
625 * @addr: IP address string
626 * @args: Mount options string
627 */
628static int
629rdma_create_trans(struct p9_client *client, const char *addr, char *args)
630{
631	int err;
632	struct p9_rdma_opts opts;
633	struct p9_trans_rdma *rdma;
634	struct rdma_conn_param conn_param;
635	struct ib_qp_init_attr qp_attr;
636
637	if (addr == NULL)
638		return -EINVAL;
639
640	/* Parse the transport specific mount options */
641	err = parse_opts(args, &opts);
642	if (err < 0)
643		return err;
644
645	/* Create and initialize the RDMA transport structure */
646	rdma = alloc_rdma(&opts);
647	if (!rdma)
648		return -ENOMEM;
649
650	/* Create the RDMA CM ID */
651	rdma->cm_id = rdma_create_id(&init_net, p9_cm_event_handler, client,
652				     RDMA_PS_TCP, IB_QPT_RC);
653	if (IS_ERR(rdma->cm_id))
654		goto error;
655
656	/* Associate the client with the transport */
657	client->trans = rdma;
658
659	/* Bind to a privileged port if we need to */
660	if (opts.privport) {
661		err = p9_rdma_bind_privport(rdma);
662		if (err < 0) {
663			pr_err("%s (%d): problem binding to privport: %d\n",
664			       __func__, task_pid_nr(current), -err);
665			goto error;
666		}
667	}
668
669	/* Resolve the server's address */
670	rdma->addr.sin_family = AF_INET;
671	rdma->addr.sin_addr.s_addr = in_aton(addr);
672	rdma->addr.sin_port = htons(opts.port);
673	err = rdma_resolve_addr(rdma->cm_id, NULL,
674				(struct sockaddr *)&rdma->addr,
675				rdma->timeout);
676	if (err)
677		goto error;
678	err = wait_for_completion_interruptible(&rdma->cm_done);
679	if (err || (rdma->state != P9_RDMA_ADDR_RESOLVED))
680		goto error;
681
682	/* Resolve the route to the server */
683	err = rdma_resolve_route(rdma->cm_id, rdma->timeout);
684	if (err)
685		goto error;
686	err = wait_for_completion_interruptible(&rdma->cm_done);
687	if (err || (rdma->state != P9_RDMA_ROUTE_RESOLVED))
688		goto error;
689
690	/* Create the Completion Queue */
691	rdma->cq = ib_alloc_cq_any(rdma->cm_id->device, client,
692				   opts.sq_depth + opts.rq_depth + 1,
693				   IB_POLL_SOFTIRQ);
694	if (IS_ERR(rdma->cq))
695		goto error;
696
697	/* Create the Protection Domain */
698	rdma->pd = ib_alloc_pd(rdma->cm_id->device, 0);
699	if (IS_ERR(rdma->pd))
700		goto error;
701
702	/* Create the Queue Pair */
703	memset(&qp_attr, 0, sizeof qp_attr);
704	qp_attr.event_handler = qp_event_handler;
705	qp_attr.qp_context = client;
706	qp_attr.cap.max_send_wr = opts.sq_depth;
707	qp_attr.cap.max_recv_wr = opts.rq_depth;
708	qp_attr.cap.max_send_sge = P9_RDMA_SEND_SGE;
709	qp_attr.cap.max_recv_sge = P9_RDMA_RECV_SGE;
710	qp_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
711	qp_attr.qp_type = IB_QPT_RC;
712	qp_attr.send_cq = rdma->cq;
713	qp_attr.recv_cq = rdma->cq;
714	err = rdma_create_qp(rdma->cm_id, rdma->pd, &qp_attr);
715	if (err)
716		goto error;
717	rdma->qp = rdma->cm_id->qp;
718
719	/* Request a connection */
720	memset(&conn_param, 0, sizeof(conn_param));
721	conn_param.private_data = NULL;
722	conn_param.private_data_len = 0;
723	conn_param.responder_resources = P9_RDMA_IRD;
724	conn_param.initiator_depth = P9_RDMA_ORD;
725	err = rdma_connect(rdma->cm_id, &conn_param);
726	if (err)
727		goto error;
728	err = wait_for_completion_interruptible(&rdma->cm_done);
729	if (err || (rdma->state != P9_RDMA_CONNECTED))
730		goto error;
731
732	client->status = Connected;
733
734	return 0;
735
736error:
737	rdma_destroy_trans(rdma);
738	return -ENOTCONN;
739}
740
741static struct p9_trans_module p9_rdma_trans = {
742	.name = "rdma",
743	.maxsize = P9_RDMA_MAXSIZE,
744	.def = 0,
745	.owner = THIS_MODULE,
746	.create = rdma_create_trans,
747	.close = rdma_close,
748	.request = rdma_request,
749	.cancel = rdma_cancel,
750	.cancelled = rdma_cancelled,
751	.show_options = p9_rdma_show_options,
752};
753
754/**
755 * p9_trans_rdma_init - Register the 9P RDMA transport driver
756 */
757static int __init p9_trans_rdma_init(void)
758{
759	v9fs_register_trans(&p9_rdma_trans);
760	return 0;
761}
762
763static void __exit p9_trans_rdma_exit(void)
764{
765	v9fs_unregister_trans(&p9_rdma_trans);
766}
767
768module_init(p9_trans_rdma_init);
769module_exit(p9_trans_rdma_exit);
770
771MODULE_AUTHOR("Tom Tucker <tom@opengridcomputing.com>");
772MODULE_DESCRIPTION("RDMA Transport for 9P");
773MODULE_LICENSE("Dual BSD/GPL");
v5.4
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * linux/fs/9p/trans_rdma.c
  4 *
  5 * RDMA transport layer based on the trans_fd.c implementation.
  6 *
  7 *  Copyright (C) 2008 by Tom Tucker <tom@opengridcomputing.com>
  8 *  Copyright (C) 2006 by Russ Cox <rsc@swtch.com>
  9 *  Copyright (C) 2004-2005 by Latchesar Ionkov <lucho@ionkov.net>
 10 *  Copyright (C) 2004-2008 by Eric Van Hensbergen <ericvh@gmail.com>
 11 *  Copyright (C) 1997-2002 by Ron Minnich <rminnich@sarnoff.com>
 12 */
 13
 14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 15
 16#include <linux/in.h>
 17#include <linux/module.h>
 18#include <linux/net.h>
 19#include <linux/ipv6.h>
 20#include <linux/kthread.h>
 21#include <linux/errno.h>
 22#include <linux/kernel.h>
 23#include <linux/un.h>
 24#include <linux/uaccess.h>
 25#include <linux/inet.h>
 26#include <linux/idr.h>
 27#include <linux/file.h>
 28#include <linux/parser.h>
 29#include <linux/semaphore.h>
 30#include <linux/slab.h>
 31#include <linux/seq_file.h>
 32#include <net/9p/9p.h>
 33#include <net/9p/client.h>
 34#include <net/9p/transport.h>
 35#include <rdma/ib_verbs.h>
 36#include <rdma/rdma_cm.h>
 37
 38#define P9_PORT			5640
 39#define P9_RDMA_SQ_DEPTH	32
 40#define P9_RDMA_RQ_DEPTH	32
 41#define P9_RDMA_SEND_SGE	4
 42#define P9_RDMA_RECV_SGE	4
 43#define P9_RDMA_IRD		0
 44#define P9_RDMA_ORD		0
 45#define P9_RDMA_TIMEOUT		30000		/* 30 seconds */
 46#define P9_RDMA_MAXSIZE		(1024*1024)	/* 1MB */
 47
 48/**
 49 * struct p9_trans_rdma - RDMA transport instance
 50 *
 51 * @state: tracks the transport state machine for connection setup and tear down
 52 * @cm_id: The RDMA CM ID
 53 * @pd: Protection Domain pointer
 54 * @qp: Queue Pair pointer
 55 * @cq: Completion Queue pointer
 56 * @timeout: Number of uSecs to wait for connection management events
 57 * @privport: Whether a privileged port may be used
 58 * @port: The port to use
 59 * @sq_depth: The depth of the Send Queue
 60 * @sq_sem: Semaphore for the SQ
 61 * @rq_depth: The depth of the Receive Queue.
 62 * @rq_sem: Semaphore for the RQ
 63 * @excess_rc : Amount of posted Receive Contexts without a pending request.
 64 *		See rdma_request()
 65 * @addr: The remote peer's address
 66 * @req_lock: Protects the active request list
 67 * @cm_done: Completion event for connection management tracking
 68 */
 69struct p9_trans_rdma {
 70	enum {
 71		P9_RDMA_INIT,
 72		P9_RDMA_ADDR_RESOLVED,
 73		P9_RDMA_ROUTE_RESOLVED,
 74		P9_RDMA_CONNECTED,
 75		P9_RDMA_FLUSHING,
 76		P9_RDMA_CLOSING,
 77		P9_RDMA_CLOSED,
 78	} state;
 79	struct rdma_cm_id *cm_id;
 80	struct ib_pd *pd;
 81	struct ib_qp *qp;
 82	struct ib_cq *cq;
 83	long timeout;
 84	bool privport;
 85	u16 port;
 86	int sq_depth;
 87	struct semaphore sq_sem;
 88	int rq_depth;
 89	struct semaphore rq_sem;
 90	atomic_t excess_rc;
 91	struct sockaddr_in addr;
 92	spinlock_t req_lock;
 93
 94	struct completion cm_done;
 95};
 96
 
 
 97/**
 98 * p9_rdma_context - Keeps track of in-process WR
 99 *
 
100 * @busa: Bus address to unmap when the WR completes
101 * @req: Keeps track of requests (send)
102 * @rc: Keepts track of replies (receive)
103 */
104struct p9_rdma_req;
105struct p9_rdma_context {
106	struct ib_cqe cqe;
107	dma_addr_t busa;
108	union {
109		struct p9_req_t *req;
110		struct p9_fcall rc;
111	};
112};
113
114/**
115 * p9_rdma_opts - Collection of mount options
116 * @port: port of connection
 
117 * @sq_depth: The requested depth of the SQ. This really doesn't need
118 * to be any deeper than the number of threads used in the client
119 * @rq_depth: The depth of the RQ. Should be greater than or equal to SQ depth
120 * @timeout: Time to wait in msecs for CM events
121 */
122struct p9_rdma_opts {
123	short port;
124	bool privport;
125	int sq_depth;
126	int rq_depth;
127	long timeout;
128};
129
130/*
131 * Option Parsing (code inspired by NFS code)
132 */
133enum {
134	/* Options that take integer arguments */
135	Opt_port, Opt_rq_depth, Opt_sq_depth, Opt_timeout,
136	/* Options that take no argument */
137	Opt_privport,
138	Opt_err,
139};
140
141static match_table_t tokens = {
142	{Opt_port, "port=%u"},
143	{Opt_sq_depth, "sq=%u"},
144	{Opt_rq_depth, "rq=%u"},
145	{Opt_timeout, "timeout=%u"},
146	{Opt_privport, "privport"},
147	{Opt_err, NULL},
148};
149
150static int p9_rdma_show_options(struct seq_file *m, struct p9_client *clnt)
151{
152	struct p9_trans_rdma *rdma = clnt->trans;
153
154	if (rdma->port != P9_PORT)
155		seq_printf(m, ",port=%u", rdma->port);
156	if (rdma->sq_depth != P9_RDMA_SQ_DEPTH)
157		seq_printf(m, ",sq=%u", rdma->sq_depth);
158	if (rdma->rq_depth != P9_RDMA_RQ_DEPTH)
159		seq_printf(m, ",rq=%u", rdma->rq_depth);
160	if (rdma->timeout != P9_RDMA_TIMEOUT)
161		seq_printf(m, ",timeout=%lu", rdma->timeout);
162	if (rdma->privport)
163		seq_puts(m, ",privport");
164	return 0;
165}
166
167/**
168 * parse_opts - parse mount options into rdma options structure
169 * @params: options string passed from mount
170 * @opts: rdma transport-specific structure to parse options into
171 *
172 * Returns 0 upon success, -ERRNO upon failure
173 */
174static int parse_opts(char *params, struct p9_rdma_opts *opts)
175{
176	char *p;
177	substring_t args[MAX_OPT_ARGS];
178	int option;
179	char *options, *tmp_options;
180
181	opts->port = P9_PORT;
182	opts->sq_depth = P9_RDMA_SQ_DEPTH;
183	opts->rq_depth = P9_RDMA_RQ_DEPTH;
184	opts->timeout = P9_RDMA_TIMEOUT;
185	opts->privport = false;
186
187	if (!params)
188		return 0;
189
190	tmp_options = kstrdup(params, GFP_KERNEL);
191	if (!tmp_options) {
192		p9_debug(P9_DEBUG_ERROR,
193			 "failed to allocate copy of option string\n");
194		return -ENOMEM;
195	}
196	options = tmp_options;
197
198	while ((p = strsep(&options, ",")) != NULL) {
199		int token;
200		int r;
201		if (!*p)
202			continue;
203		token = match_token(p, tokens, args);
204		if ((token != Opt_err) && (token != Opt_privport)) {
205			r = match_int(&args[0], &option);
206			if (r < 0) {
207				p9_debug(P9_DEBUG_ERROR,
208					 "integer field, but no integer?\n");
209				continue;
210			}
211		}
212		switch (token) {
213		case Opt_port:
214			opts->port = option;
215			break;
216		case Opt_sq_depth:
217			opts->sq_depth = option;
218			break;
219		case Opt_rq_depth:
220			opts->rq_depth = option;
221			break;
222		case Opt_timeout:
223			opts->timeout = option;
224			break;
225		case Opt_privport:
226			opts->privport = true;
227			break;
228		default:
229			continue;
230		}
231	}
232	/* RQ must be at least as large as the SQ */
233	opts->rq_depth = max(opts->rq_depth, opts->sq_depth);
234	kfree(tmp_options);
235	return 0;
236}
237
238static int
239p9_cm_event_handler(struct rdma_cm_id *id, struct rdma_cm_event *event)
240{
241	struct p9_client *c = id->context;
242	struct p9_trans_rdma *rdma = c->trans;
243	switch (event->event) {
244	case RDMA_CM_EVENT_ADDR_RESOLVED:
245		BUG_ON(rdma->state != P9_RDMA_INIT);
246		rdma->state = P9_RDMA_ADDR_RESOLVED;
247		break;
248
249	case RDMA_CM_EVENT_ROUTE_RESOLVED:
250		BUG_ON(rdma->state != P9_RDMA_ADDR_RESOLVED);
251		rdma->state = P9_RDMA_ROUTE_RESOLVED;
252		break;
253
254	case RDMA_CM_EVENT_ESTABLISHED:
255		BUG_ON(rdma->state != P9_RDMA_ROUTE_RESOLVED);
256		rdma->state = P9_RDMA_CONNECTED;
257		break;
258
259	case RDMA_CM_EVENT_DISCONNECTED:
260		if (rdma)
261			rdma->state = P9_RDMA_CLOSED;
262		c->status = Disconnected;
263		break;
264
265	case RDMA_CM_EVENT_TIMEWAIT_EXIT:
266		break;
267
268	case RDMA_CM_EVENT_ADDR_CHANGE:
269	case RDMA_CM_EVENT_ROUTE_ERROR:
270	case RDMA_CM_EVENT_DEVICE_REMOVAL:
271	case RDMA_CM_EVENT_MULTICAST_JOIN:
272	case RDMA_CM_EVENT_MULTICAST_ERROR:
273	case RDMA_CM_EVENT_REJECTED:
274	case RDMA_CM_EVENT_CONNECT_REQUEST:
275	case RDMA_CM_EVENT_CONNECT_RESPONSE:
276	case RDMA_CM_EVENT_CONNECT_ERROR:
277	case RDMA_CM_EVENT_ADDR_ERROR:
278	case RDMA_CM_EVENT_UNREACHABLE:
279		c->status = Disconnected;
280		rdma_disconnect(rdma->cm_id);
281		break;
282	default:
283		BUG();
284	}
285	complete(&rdma->cm_done);
286	return 0;
287}
288
289static void
290recv_done(struct ib_cq *cq, struct ib_wc *wc)
291{
292	struct p9_client *client = cq->cq_context;
293	struct p9_trans_rdma *rdma = client->trans;
294	struct p9_rdma_context *c =
295		container_of(wc->wr_cqe, struct p9_rdma_context, cqe);
296	struct p9_req_t *req;
297	int err = 0;
298	int16_t tag;
299
300	req = NULL;
301	ib_dma_unmap_single(rdma->cm_id->device, c->busa, client->msize,
302							 DMA_FROM_DEVICE);
303
304	if (wc->status != IB_WC_SUCCESS)
305		goto err_out;
306
307	c->rc.size = wc->byte_len;
308	err = p9_parse_header(&c->rc, NULL, NULL, &tag, 1);
309	if (err)
310		goto err_out;
311
312	req = p9_tag_lookup(client, tag);
313	if (!req)
314		goto err_out;
315
316	/* Check that we have not yet received a reply for this request.
317	 */
318	if (unlikely(req->rc.sdata)) {
319		pr_err("Duplicate reply for request %d", tag);
320		goto err_out;
321	}
322
323	req->rc.size = c->rc.size;
324	req->rc.sdata = c->rc.sdata;
325	p9_client_cb(client, req, REQ_STATUS_RCVD);
326
327 out:
328	up(&rdma->rq_sem);
329	kfree(c);
330	return;
331
332 err_out:
333	p9_debug(P9_DEBUG_ERROR, "req %p err %d status %d\n",
334			req, err, wc->status);
335	rdma->state = P9_RDMA_FLUSHING;
336	client->status = Disconnected;
337	goto out;
338}
339
340static void
341send_done(struct ib_cq *cq, struct ib_wc *wc)
342{
343	struct p9_client *client = cq->cq_context;
344	struct p9_trans_rdma *rdma = client->trans;
345	struct p9_rdma_context *c =
346		container_of(wc->wr_cqe, struct p9_rdma_context, cqe);
347
348	ib_dma_unmap_single(rdma->cm_id->device,
349			    c->busa, c->req->tc.size,
350			    DMA_TO_DEVICE);
351	up(&rdma->sq_sem);
352	p9_req_put(c->req);
353	kfree(c);
354}
355
356static void qp_event_handler(struct ib_event *event, void *context)
357{
358	p9_debug(P9_DEBUG_ERROR, "QP event %d context %p\n",
359		 event->event, context);
360}
361
362static void rdma_destroy_trans(struct p9_trans_rdma *rdma)
363{
364	if (!rdma)
365		return;
366
367	if (rdma->qp && !IS_ERR(rdma->qp))
368		ib_destroy_qp(rdma->qp);
369
370	if (rdma->pd && !IS_ERR(rdma->pd))
371		ib_dealloc_pd(rdma->pd);
372
373	if (rdma->cq && !IS_ERR(rdma->cq))
374		ib_free_cq(rdma->cq);
375
376	if (rdma->cm_id && !IS_ERR(rdma->cm_id))
377		rdma_destroy_id(rdma->cm_id);
378
379	kfree(rdma);
380}
381
382static int
383post_recv(struct p9_client *client, struct p9_rdma_context *c)
384{
385	struct p9_trans_rdma *rdma = client->trans;
386	struct ib_recv_wr wr;
387	struct ib_sge sge;
388
389	c->busa = ib_dma_map_single(rdma->cm_id->device,
390				    c->rc.sdata, client->msize,
391				    DMA_FROM_DEVICE);
392	if (ib_dma_mapping_error(rdma->cm_id->device, c->busa))
393		goto error;
394
395	c->cqe.done = recv_done;
396
397	sge.addr = c->busa;
398	sge.length = client->msize;
399	sge.lkey = rdma->pd->local_dma_lkey;
400
401	wr.next = NULL;
402	wr.wr_cqe = &c->cqe;
403	wr.sg_list = &sge;
404	wr.num_sge = 1;
405	return ib_post_recv(rdma->qp, &wr, NULL);
406
407 error:
408	p9_debug(P9_DEBUG_ERROR, "EIO\n");
409	return -EIO;
410}
411
412static int rdma_request(struct p9_client *client, struct p9_req_t *req)
413{
414	struct p9_trans_rdma *rdma = client->trans;
415	struct ib_send_wr wr;
416	struct ib_sge sge;
417	int err = 0;
418	unsigned long flags;
419	struct p9_rdma_context *c = NULL;
420	struct p9_rdma_context *rpl_context = NULL;
421
422	/* When an error occurs between posting the recv and the send,
423	 * there will be a receive context posted without a pending request.
424	 * Since there is no way to "un-post" it, we remember it and skip
425	 * post_recv() for the next request.
426	 * So here,
427	 * see if we are this `next request' and need to absorb an excess rc.
428	 * If yes, then drop and free our own, and do not recv_post().
429	 **/
430	if (unlikely(atomic_read(&rdma->excess_rc) > 0)) {
431		if ((atomic_sub_return(1, &rdma->excess_rc) >= 0)) {
432			/* Got one! */
433			p9_fcall_fini(&req->rc);
434			req->rc.sdata = NULL;
435			goto dont_need_post_recv;
436		} else {
437			/* We raced and lost. */
438			atomic_inc(&rdma->excess_rc);
439		}
440	}
441
442	/* Allocate an fcall for the reply */
443	rpl_context = kmalloc(sizeof *rpl_context, GFP_NOFS);
444	if (!rpl_context) {
445		err = -ENOMEM;
446		goto recv_error;
447	}
448	rpl_context->rc.sdata = req->rc.sdata;
449
450	/*
451	 * Post a receive buffer for this request. We need to ensure
452	 * there is a reply buffer available for every outstanding
453	 * request. A flushed request can result in no reply for an
454	 * outstanding request, so we must keep a count to avoid
455	 * overflowing the RQ.
456	 */
457	if (down_interruptible(&rdma->rq_sem)) {
458		err = -EINTR;
459		goto recv_error;
460	}
461
462	err = post_recv(client, rpl_context);
463	if (err) {
464		p9_debug(P9_DEBUG_ERROR, "POST RECV failed: %d\n", err);
465		goto recv_error;
466	}
467	/* remove posted receive buffer from request structure */
468	req->rc.sdata = NULL;
469
470dont_need_post_recv:
471	/* Post the request */
472	c = kmalloc(sizeof *c, GFP_NOFS);
473	if (!c) {
474		err = -ENOMEM;
475		goto send_error;
476	}
477	c->req = req;
478
479	c->busa = ib_dma_map_single(rdma->cm_id->device,
480				    c->req->tc.sdata, c->req->tc.size,
481				    DMA_TO_DEVICE);
482	if (ib_dma_mapping_error(rdma->cm_id->device, c->busa)) {
483		err = -EIO;
484		goto send_error;
485	}
486
487	c->cqe.done = send_done;
488
489	sge.addr = c->busa;
490	sge.length = c->req->tc.size;
491	sge.lkey = rdma->pd->local_dma_lkey;
492
493	wr.next = NULL;
494	wr.wr_cqe = &c->cqe;
495	wr.opcode = IB_WR_SEND;
496	wr.send_flags = IB_SEND_SIGNALED;
497	wr.sg_list = &sge;
498	wr.num_sge = 1;
499
500	if (down_interruptible(&rdma->sq_sem)) {
501		err = -EINTR;
502		goto send_error;
503	}
504
505	/* Mark request as `sent' *before* we actually send it,
506	 * because doing if after could erase the REQ_STATUS_RCVD
507	 * status in case of a very fast reply.
508	 */
509	req->status = REQ_STATUS_SENT;
510	err = ib_post_send(rdma->qp, &wr, NULL);
511	if (err)
512		goto send_error;
513
514	/* Success */
515	return 0;
516
517 /* Handle errors that happened during or while preparing the send: */
518 send_error:
519	req->status = REQ_STATUS_ERROR;
520	kfree(c);
521	p9_debug(P9_DEBUG_ERROR, "Error %d in rdma_request()\n", err);
522
523	/* Ach.
524	 *  We did recv_post(), but not send. We have one recv_post in excess.
525	 */
526	atomic_inc(&rdma->excess_rc);
527	return err;
528
529 /* Handle errors that happened during or while preparing post_recv(): */
530 recv_error:
531	kfree(rpl_context);
532	spin_lock_irqsave(&rdma->req_lock, flags);
533	if (err != -EINTR && rdma->state < P9_RDMA_CLOSING) {
534		rdma->state = P9_RDMA_CLOSING;
535		spin_unlock_irqrestore(&rdma->req_lock, flags);
536		rdma_disconnect(rdma->cm_id);
537	} else
538		spin_unlock_irqrestore(&rdma->req_lock, flags);
539	return err;
540}
541
542static void rdma_close(struct p9_client *client)
543{
544	struct p9_trans_rdma *rdma;
545
546	if (!client)
547		return;
548
549	rdma = client->trans;
550	if (!rdma)
551		return;
552
553	client->status = Disconnected;
554	rdma_disconnect(rdma->cm_id);
555	rdma_destroy_trans(rdma);
556}
557
558/**
559 * alloc_rdma - Allocate and initialize the rdma transport structure
560 * @opts: Mount options structure
561 */
562static struct p9_trans_rdma *alloc_rdma(struct p9_rdma_opts *opts)
563{
564	struct p9_trans_rdma *rdma;
565
566	rdma = kzalloc(sizeof(struct p9_trans_rdma), GFP_KERNEL);
567	if (!rdma)
568		return NULL;
569
570	rdma->port = opts->port;
571	rdma->privport = opts->privport;
572	rdma->sq_depth = opts->sq_depth;
573	rdma->rq_depth = opts->rq_depth;
574	rdma->timeout = opts->timeout;
575	spin_lock_init(&rdma->req_lock);
576	init_completion(&rdma->cm_done);
577	sema_init(&rdma->sq_sem, rdma->sq_depth);
578	sema_init(&rdma->rq_sem, rdma->rq_depth);
579	atomic_set(&rdma->excess_rc, 0);
580
581	return rdma;
582}
583
584static int rdma_cancel(struct p9_client *client, struct p9_req_t *req)
585{
586	/* Nothing to do here.
587	 * We will take care of it (if we have to) in rdma_cancelled()
588	 */
589	return 1;
590}
591
592/* A request has been fully flushed without a reply.
593 * That means we have posted one buffer in excess.
594 */
595static int rdma_cancelled(struct p9_client *client, struct p9_req_t *req)
596{
597	struct p9_trans_rdma *rdma = client->trans;
598	atomic_inc(&rdma->excess_rc);
599	return 0;
600}
601
602static int p9_rdma_bind_privport(struct p9_trans_rdma *rdma)
603{
604	struct sockaddr_in cl = {
605		.sin_family = AF_INET,
606		.sin_addr.s_addr = htonl(INADDR_ANY),
607	};
608	int port, err = -EINVAL;
609
610	for (port = P9_DEF_MAX_RESVPORT; port >= P9_DEF_MIN_RESVPORT; port--) {
611		cl.sin_port = htons((ushort)port);
612		err = rdma_bind_addr(rdma->cm_id, (struct sockaddr *)&cl);
613		if (err != -EADDRINUSE)
614			break;
615	}
616	return err;
617}
618
619/**
620 * rdma_create_trans - Transport method for creating a transport instance
621 * @client: client instance
622 * @addr: IP address string
623 * @args: Mount options string
624 */
625static int
626rdma_create_trans(struct p9_client *client, const char *addr, char *args)
627{
628	int err;
629	struct p9_rdma_opts opts;
630	struct p9_trans_rdma *rdma;
631	struct rdma_conn_param conn_param;
632	struct ib_qp_init_attr qp_attr;
633
634	if (addr == NULL)
635		return -EINVAL;
636
637	/* Parse the transport specific mount options */
638	err = parse_opts(args, &opts);
639	if (err < 0)
640		return err;
641
642	/* Create and initialize the RDMA transport structure */
643	rdma = alloc_rdma(&opts);
644	if (!rdma)
645		return -ENOMEM;
646
647	/* Create the RDMA CM ID */
648	rdma->cm_id = rdma_create_id(&init_net, p9_cm_event_handler, client,
649				     RDMA_PS_TCP, IB_QPT_RC);
650	if (IS_ERR(rdma->cm_id))
651		goto error;
652
653	/* Associate the client with the transport */
654	client->trans = rdma;
655
656	/* Bind to a privileged port if we need to */
657	if (opts.privport) {
658		err = p9_rdma_bind_privport(rdma);
659		if (err < 0) {
660			pr_err("%s (%d): problem binding to privport: %d\n",
661			       __func__, task_pid_nr(current), -err);
662			goto error;
663		}
664	}
665
666	/* Resolve the server's address */
667	rdma->addr.sin_family = AF_INET;
668	rdma->addr.sin_addr.s_addr = in_aton(addr);
669	rdma->addr.sin_port = htons(opts.port);
670	err = rdma_resolve_addr(rdma->cm_id, NULL,
671				(struct sockaddr *)&rdma->addr,
672				rdma->timeout);
673	if (err)
674		goto error;
675	err = wait_for_completion_interruptible(&rdma->cm_done);
676	if (err || (rdma->state != P9_RDMA_ADDR_RESOLVED))
677		goto error;
678
679	/* Resolve the route to the server */
680	err = rdma_resolve_route(rdma->cm_id, rdma->timeout);
681	if (err)
682		goto error;
683	err = wait_for_completion_interruptible(&rdma->cm_done);
684	if (err || (rdma->state != P9_RDMA_ROUTE_RESOLVED))
685		goto error;
686
687	/* Create the Completion Queue */
688	rdma->cq = ib_alloc_cq_any(rdma->cm_id->device, client,
689				   opts.sq_depth + opts.rq_depth + 1,
690				   IB_POLL_SOFTIRQ);
691	if (IS_ERR(rdma->cq))
692		goto error;
693
694	/* Create the Protection Domain */
695	rdma->pd = ib_alloc_pd(rdma->cm_id->device, 0);
696	if (IS_ERR(rdma->pd))
697		goto error;
698
699	/* Create the Queue Pair */
700	memset(&qp_attr, 0, sizeof qp_attr);
701	qp_attr.event_handler = qp_event_handler;
702	qp_attr.qp_context = client;
703	qp_attr.cap.max_send_wr = opts.sq_depth;
704	qp_attr.cap.max_recv_wr = opts.rq_depth;
705	qp_attr.cap.max_send_sge = P9_RDMA_SEND_SGE;
706	qp_attr.cap.max_recv_sge = P9_RDMA_RECV_SGE;
707	qp_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
708	qp_attr.qp_type = IB_QPT_RC;
709	qp_attr.send_cq = rdma->cq;
710	qp_attr.recv_cq = rdma->cq;
711	err = rdma_create_qp(rdma->cm_id, rdma->pd, &qp_attr);
712	if (err)
713		goto error;
714	rdma->qp = rdma->cm_id->qp;
715
716	/* Request a connection */
717	memset(&conn_param, 0, sizeof(conn_param));
718	conn_param.private_data = NULL;
719	conn_param.private_data_len = 0;
720	conn_param.responder_resources = P9_RDMA_IRD;
721	conn_param.initiator_depth = P9_RDMA_ORD;
722	err = rdma_connect(rdma->cm_id, &conn_param);
723	if (err)
724		goto error;
725	err = wait_for_completion_interruptible(&rdma->cm_done);
726	if (err || (rdma->state != P9_RDMA_CONNECTED))
727		goto error;
728
729	client->status = Connected;
730
731	return 0;
732
733error:
734	rdma_destroy_trans(rdma);
735	return -ENOTCONN;
736}
737
738static struct p9_trans_module p9_rdma_trans = {
739	.name = "rdma",
740	.maxsize = P9_RDMA_MAXSIZE,
741	.def = 0,
742	.owner = THIS_MODULE,
743	.create = rdma_create_trans,
744	.close = rdma_close,
745	.request = rdma_request,
746	.cancel = rdma_cancel,
747	.cancelled = rdma_cancelled,
748	.show_options = p9_rdma_show_options,
749};
750
751/**
752 * p9_trans_rdma_init - Register the 9P RDMA transport driver
753 */
754static int __init p9_trans_rdma_init(void)
755{
756	v9fs_register_trans(&p9_rdma_trans);
757	return 0;
758}
759
760static void __exit p9_trans_rdma_exit(void)
761{
762	v9fs_unregister_trans(&p9_rdma_trans);
763}
764
765module_init(p9_trans_rdma_init);
766module_exit(p9_trans_rdma_exit);
767
768MODULE_AUTHOR("Tom Tucker <tom@opengridcomputing.com>");
769MODULE_DESCRIPTION("RDMA Transport for 9P");
770MODULE_LICENSE("Dual BSD/GPL");