Loading...
1/*
2 * Copyright (c) 2005-2006 Intel Corporation. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/completion.h>
34#include <linux/file.h>
35#include <linux/mutex.h>
36#include <linux/poll.h>
37#include <linux/sched.h>
38#include <linux/idr.h>
39#include <linux/in.h>
40#include <linux/in6.h>
41#include <linux/miscdevice.h>
42#include <linux/slab.h>
43#include <linux/sysctl.h>
44
45#include <rdma/rdma_user_cm.h>
46#include <rdma/ib_marshall.h>
47#include <rdma/rdma_cm.h>
48#include <rdma/rdma_cm_ib.h>
49
50MODULE_AUTHOR("Sean Hefty");
51MODULE_DESCRIPTION("RDMA Userspace Connection Manager Access");
52MODULE_LICENSE("Dual BSD/GPL");
53
54static unsigned int max_backlog = 1024;
55
56static struct ctl_table_header *ucma_ctl_table_hdr;
57static ctl_table ucma_ctl_table[] = {
58 {
59 .procname = "max_backlog",
60 .data = &max_backlog,
61 .maxlen = sizeof max_backlog,
62 .mode = 0644,
63 .proc_handler = proc_dointvec,
64 },
65 { }
66};
67
68static struct ctl_path ucma_ctl_path[] = {
69 { .procname = "net" },
70 { .procname = "rdma_ucm" },
71 { }
72};
73
74struct ucma_file {
75 struct mutex mut;
76 struct file *filp;
77 struct list_head ctx_list;
78 struct list_head event_list;
79 wait_queue_head_t poll_wait;
80};
81
82struct ucma_context {
83 int id;
84 struct completion comp;
85 atomic_t ref;
86 int events_reported;
87 int backlog;
88
89 struct ucma_file *file;
90 struct rdma_cm_id *cm_id;
91 u64 uid;
92
93 struct list_head list;
94 struct list_head mc_list;
95};
96
97struct ucma_multicast {
98 struct ucma_context *ctx;
99 int id;
100 int events_reported;
101
102 u64 uid;
103 struct list_head list;
104 struct sockaddr_storage addr;
105};
106
107struct ucma_event {
108 struct ucma_context *ctx;
109 struct ucma_multicast *mc;
110 struct list_head list;
111 struct rdma_cm_id *cm_id;
112 struct rdma_ucm_event_resp resp;
113};
114
115static DEFINE_MUTEX(mut);
116static DEFINE_IDR(ctx_idr);
117static DEFINE_IDR(multicast_idr);
118
119static inline struct ucma_context *_ucma_find_context(int id,
120 struct ucma_file *file)
121{
122 struct ucma_context *ctx;
123
124 ctx = idr_find(&ctx_idr, id);
125 if (!ctx)
126 ctx = ERR_PTR(-ENOENT);
127 else if (ctx->file != file)
128 ctx = ERR_PTR(-EINVAL);
129 return ctx;
130}
131
132static struct ucma_context *ucma_get_ctx(struct ucma_file *file, int id)
133{
134 struct ucma_context *ctx;
135
136 mutex_lock(&mut);
137 ctx = _ucma_find_context(id, file);
138 if (!IS_ERR(ctx))
139 atomic_inc(&ctx->ref);
140 mutex_unlock(&mut);
141 return ctx;
142}
143
144static void ucma_put_ctx(struct ucma_context *ctx)
145{
146 if (atomic_dec_and_test(&ctx->ref))
147 complete(&ctx->comp);
148}
149
150static struct ucma_context *ucma_alloc_ctx(struct ucma_file *file)
151{
152 struct ucma_context *ctx;
153 int ret;
154
155 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
156 if (!ctx)
157 return NULL;
158
159 atomic_set(&ctx->ref, 1);
160 init_completion(&ctx->comp);
161 INIT_LIST_HEAD(&ctx->mc_list);
162 ctx->file = file;
163
164 do {
165 ret = idr_pre_get(&ctx_idr, GFP_KERNEL);
166 if (!ret)
167 goto error;
168
169 mutex_lock(&mut);
170 ret = idr_get_new(&ctx_idr, ctx, &ctx->id);
171 mutex_unlock(&mut);
172 } while (ret == -EAGAIN);
173
174 if (ret)
175 goto error;
176
177 list_add_tail(&ctx->list, &file->ctx_list);
178 return ctx;
179
180error:
181 kfree(ctx);
182 return NULL;
183}
184
185static struct ucma_multicast* ucma_alloc_multicast(struct ucma_context *ctx)
186{
187 struct ucma_multicast *mc;
188 int ret;
189
190 mc = kzalloc(sizeof(*mc), GFP_KERNEL);
191 if (!mc)
192 return NULL;
193
194 do {
195 ret = idr_pre_get(&multicast_idr, GFP_KERNEL);
196 if (!ret)
197 goto error;
198
199 mutex_lock(&mut);
200 ret = idr_get_new(&multicast_idr, mc, &mc->id);
201 mutex_unlock(&mut);
202 } while (ret == -EAGAIN);
203
204 if (ret)
205 goto error;
206
207 mc->ctx = ctx;
208 list_add_tail(&mc->list, &ctx->mc_list);
209 return mc;
210
211error:
212 kfree(mc);
213 return NULL;
214}
215
216static void ucma_copy_conn_event(struct rdma_ucm_conn_param *dst,
217 struct rdma_conn_param *src)
218{
219 if (src->private_data_len)
220 memcpy(dst->private_data, src->private_data,
221 src->private_data_len);
222 dst->private_data_len = src->private_data_len;
223 dst->responder_resources =src->responder_resources;
224 dst->initiator_depth = src->initiator_depth;
225 dst->flow_control = src->flow_control;
226 dst->retry_count = src->retry_count;
227 dst->rnr_retry_count = src->rnr_retry_count;
228 dst->srq = src->srq;
229 dst->qp_num = src->qp_num;
230}
231
232static void ucma_copy_ud_event(struct rdma_ucm_ud_param *dst,
233 struct rdma_ud_param *src)
234{
235 if (src->private_data_len)
236 memcpy(dst->private_data, src->private_data,
237 src->private_data_len);
238 dst->private_data_len = src->private_data_len;
239 ib_copy_ah_attr_to_user(&dst->ah_attr, &src->ah_attr);
240 dst->qp_num = src->qp_num;
241 dst->qkey = src->qkey;
242}
243
244static void ucma_set_event_context(struct ucma_context *ctx,
245 struct rdma_cm_event *event,
246 struct ucma_event *uevent)
247{
248 uevent->ctx = ctx;
249 switch (event->event) {
250 case RDMA_CM_EVENT_MULTICAST_JOIN:
251 case RDMA_CM_EVENT_MULTICAST_ERROR:
252 uevent->mc = (struct ucma_multicast *)
253 event->param.ud.private_data;
254 uevent->resp.uid = uevent->mc->uid;
255 uevent->resp.id = uevent->mc->id;
256 break;
257 default:
258 uevent->resp.uid = ctx->uid;
259 uevent->resp.id = ctx->id;
260 break;
261 }
262}
263
264static int ucma_event_handler(struct rdma_cm_id *cm_id,
265 struct rdma_cm_event *event)
266{
267 struct ucma_event *uevent;
268 struct ucma_context *ctx = cm_id->context;
269 int ret = 0;
270
271 uevent = kzalloc(sizeof(*uevent), GFP_KERNEL);
272 if (!uevent)
273 return event->event == RDMA_CM_EVENT_CONNECT_REQUEST;
274
275 uevent->cm_id = cm_id;
276 ucma_set_event_context(ctx, event, uevent);
277 uevent->resp.event = event->event;
278 uevent->resp.status = event->status;
279 if (cm_id->ps == RDMA_PS_UDP || cm_id->ps == RDMA_PS_IPOIB)
280 ucma_copy_ud_event(&uevent->resp.param.ud, &event->param.ud);
281 else
282 ucma_copy_conn_event(&uevent->resp.param.conn,
283 &event->param.conn);
284
285 mutex_lock(&ctx->file->mut);
286 if (event->event == RDMA_CM_EVENT_CONNECT_REQUEST) {
287 if (!ctx->backlog) {
288 ret = -ENOMEM;
289 kfree(uevent);
290 goto out;
291 }
292 ctx->backlog--;
293 } else if (!ctx->uid) {
294 /*
295 * We ignore events for new connections until userspace has set
296 * their context. This can only happen if an error occurs on a
297 * new connection before the user accepts it. This is okay,
298 * since the accept will just fail later.
299 */
300 kfree(uevent);
301 goto out;
302 }
303
304 list_add_tail(&uevent->list, &ctx->file->event_list);
305 wake_up_interruptible(&ctx->file->poll_wait);
306out:
307 mutex_unlock(&ctx->file->mut);
308 return ret;
309}
310
311static ssize_t ucma_get_event(struct ucma_file *file, const char __user *inbuf,
312 int in_len, int out_len)
313{
314 struct ucma_context *ctx;
315 struct rdma_ucm_get_event cmd;
316 struct ucma_event *uevent;
317 int ret = 0;
318 DEFINE_WAIT(wait);
319
320 if (out_len < sizeof uevent->resp)
321 return -ENOSPC;
322
323 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
324 return -EFAULT;
325
326 mutex_lock(&file->mut);
327 while (list_empty(&file->event_list)) {
328 mutex_unlock(&file->mut);
329
330 if (file->filp->f_flags & O_NONBLOCK)
331 return -EAGAIN;
332
333 if (wait_event_interruptible(file->poll_wait,
334 !list_empty(&file->event_list)))
335 return -ERESTARTSYS;
336
337 mutex_lock(&file->mut);
338 }
339
340 uevent = list_entry(file->event_list.next, struct ucma_event, list);
341
342 if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST) {
343 ctx = ucma_alloc_ctx(file);
344 if (!ctx) {
345 ret = -ENOMEM;
346 goto done;
347 }
348 uevent->ctx->backlog++;
349 ctx->cm_id = uevent->cm_id;
350 ctx->cm_id->context = ctx;
351 uevent->resp.id = ctx->id;
352 }
353
354 if (copy_to_user((void __user *)(unsigned long)cmd.response,
355 &uevent->resp, sizeof uevent->resp)) {
356 ret = -EFAULT;
357 goto done;
358 }
359
360 list_del(&uevent->list);
361 uevent->ctx->events_reported++;
362 if (uevent->mc)
363 uevent->mc->events_reported++;
364 kfree(uevent);
365done:
366 mutex_unlock(&file->mut);
367 return ret;
368}
369
370static int ucma_get_qp_type(struct rdma_ucm_create_id *cmd, enum ib_qp_type *qp_type)
371{
372 switch (cmd->ps) {
373 case RDMA_PS_TCP:
374 *qp_type = IB_QPT_RC;
375 return 0;
376 case RDMA_PS_UDP:
377 case RDMA_PS_IPOIB:
378 *qp_type = IB_QPT_UD;
379 return 0;
380 default:
381 return -EINVAL;
382 }
383}
384
385static ssize_t ucma_create_id(struct ucma_file *file, const char __user *inbuf,
386 int in_len, int out_len)
387{
388 struct rdma_ucm_create_id cmd;
389 struct rdma_ucm_create_id_resp resp;
390 struct ucma_context *ctx;
391 enum ib_qp_type qp_type;
392 int ret;
393
394 if (out_len < sizeof(resp))
395 return -ENOSPC;
396
397 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
398 return -EFAULT;
399
400 ret = ucma_get_qp_type(&cmd, &qp_type);
401 if (ret)
402 return ret;
403
404 mutex_lock(&file->mut);
405 ctx = ucma_alloc_ctx(file);
406 mutex_unlock(&file->mut);
407 if (!ctx)
408 return -ENOMEM;
409
410 ctx->uid = cmd.uid;
411 ctx->cm_id = rdma_create_id(ucma_event_handler, ctx, cmd.ps, qp_type);
412 if (IS_ERR(ctx->cm_id)) {
413 ret = PTR_ERR(ctx->cm_id);
414 goto err1;
415 }
416
417 resp.id = ctx->id;
418 if (copy_to_user((void __user *)(unsigned long)cmd.response,
419 &resp, sizeof(resp))) {
420 ret = -EFAULT;
421 goto err2;
422 }
423 return 0;
424
425err2:
426 rdma_destroy_id(ctx->cm_id);
427err1:
428 mutex_lock(&mut);
429 idr_remove(&ctx_idr, ctx->id);
430 mutex_unlock(&mut);
431 kfree(ctx);
432 return ret;
433}
434
435static void ucma_cleanup_multicast(struct ucma_context *ctx)
436{
437 struct ucma_multicast *mc, *tmp;
438
439 mutex_lock(&mut);
440 list_for_each_entry_safe(mc, tmp, &ctx->mc_list, list) {
441 list_del(&mc->list);
442 idr_remove(&multicast_idr, mc->id);
443 kfree(mc);
444 }
445 mutex_unlock(&mut);
446}
447
448static void ucma_cleanup_events(struct ucma_context *ctx)
449{
450 struct ucma_event *uevent, *tmp;
451
452 list_for_each_entry_safe(uevent, tmp, &ctx->file->event_list, list) {
453 if (uevent->ctx != ctx)
454 continue;
455
456 list_del(&uevent->list);
457
458 /* clear incoming connections. */
459 if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST)
460 rdma_destroy_id(uevent->cm_id);
461
462 kfree(uevent);
463 }
464}
465
466static void ucma_cleanup_mc_events(struct ucma_multicast *mc)
467{
468 struct ucma_event *uevent, *tmp;
469
470 list_for_each_entry_safe(uevent, tmp, &mc->ctx->file->event_list, list) {
471 if (uevent->mc != mc)
472 continue;
473
474 list_del(&uevent->list);
475 kfree(uevent);
476 }
477}
478
479static int ucma_free_ctx(struct ucma_context *ctx)
480{
481 int events_reported;
482
483 /* No new events will be generated after destroying the id. */
484 rdma_destroy_id(ctx->cm_id);
485
486 ucma_cleanup_multicast(ctx);
487
488 /* Cleanup events not yet reported to the user. */
489 mutex_lock(&ctx->file->mut);
490 ucma_cleanup_events(ctx);
491 list_del(&ctx->list);
492 mutex_unlock(&ctx->file->mut);
493
494 events_reported = ctx->events_reported;
495 kfree(ctx);
496 return events_reported;
497}
498
499static ssize_t ucma_destroy_id(struct ucma_file *file, const char __user *inbuf,
500 int in_len, int out_len)
501{
502 struct rdma_ucm_destroy_id cmd;
503 struct rdma_ucm_destroy_id_resp resp;
504 struct ucma_context *ctx;
505 int ret = 0;
506
507 if (out_len < sizeof(resp))
508 return -ENOSPC;
509
510 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
511 return -EFAULT;
512
513 mutex_lock(&mut);
514 ctx = _ucma_find_context(cmd.id, file);
515 if (!IS_ERR(ctx))
516 idr_remove(&ctx_idr, ctx->id);
517 mutex_unlock(&mut);
518
519 if (IS_ERR(ctx))
520 return PTR_ERR(ctx);
521
522 ucma_put_ctx(ctx);
523 wait_for_completion(&ctx->comp);
524 resp.events_reported = ucma_free_ctx(ctx);
525
526 if (copy_to_user((void __user *)(unsigned long)cmd.response,
527 &resp, sizeof(resp)))
528 ret = -EFAULT;
529
530 return ret;
531}
532
533static ssize_t ucma_bind_addr(struct ucma_file *file, const char __user *inbuf,
534 int in_len, int out_len)
535{
536 struct rdma_ucm_bind_addr cmd;
537 struct ucma_context *ctx;
538 int ret;
539
540 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
541 return -EFAULT;
542
543 ctx = ucma_get_ctx(file, cmd.id);
544 if (IS_ERR(ctx))
545 return PTR_ERR(ctx);
546
547 ret = rdma_bind_addr(ctx->cm_id, (struct sockaddr *) &cmd.addr);
548 ucma_put_ctx(ctx);
549 return ret;
550}
551
552static ssize_t ucma_resolve_addr(struct ucma_file *file,
553 const char __user *inbuf,
554 int in_len, int out_len)
555{
556 struct rdma_ucm_resolve_addr cmd;
557 struct ucma_context *ctx;
558 int ret;
559
560 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
561 return -EFAULT;
562
563 ctx = ucma_get_ctx(file, cmd.id);
564 if (IS_ERR(ctx))
565 return PTR_ERR(ctx);
566
567 ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr,
568 (struct sockaddr *) &cmd.dst_addr,
569 cmd.timeout_ms);
570 ucma_put_ctx(ctx);
571 return ret;
572}
573
574static ssize_t ucma_resolve_route(struct ucma_file *file,
575 const char __user *inbuf,
576 int in_len, int out_len)
577{
578 struct rdma_ucm_resolve_route cmd;
579 struct ucma_context *ctx;
580 int ret;
581
582 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
583 return -EFAULT;
584
585 ctx = ucma_get_ctx(file, cmd.id);
586 if (IS_ERR(ctx))
587 return PTR_ERR(ctx);
588
589 ret = rdma_resolve_route(ctx->cm_id, cmd.timeout_ms);
590 ucma_put_ctx(ctx);
591 return ret;
592}
593
594static void ucma_copy_ib_route(struct rdma_ucm_query_route_resp *resp,
595 struct rdma_route *route)
596{
597 struct rdma_dev_addr *dev_addr;
598
599 resp->num_paths = route->num_paths;
600 switch (route->num_paths) {
601 case 0:
602 dev_addr = &route->addr.dev_addr;
603 rdma_addr_get_dgid(dev_addr,
604 (union ib_gid *) &resp->ib_route[0].dgid);
605 rdma_addr_get_sgid(dev_addr,
606 (union ib_gid *) &resp->ib_route[0].sgid);
607 resp->ib_route[0].pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr));
608 break;
609 case 2:
610 ib_copy_path_rec_to_user(&resp->ib_route[1],
611 &route->path_rec[1]);
612 /* fall through */
613 case 1:
614 ib_copy_path_rec_to_user(&resp->ib_route[0],
615 &route->path_rec[0]);
616 break;
617 default:
618 break;
619 }
620}
621
622static void ucma_copy_iboe_route(struct rdma_ucm_query_route_resp *resp,
623 struct rdma_route *route)
624{
625 struct rdma_dev_addr *dev_addr;
626 struct net_device *dev;
627 u16 vid = 0;
628
629 resp->num_paths = route->num_paths;
630 switch (route->num_paths) {
631 case 0:
632 dev_addr = &route->addr.dev_addr;
633 dev = dev_get_by_index(&init_net, dev_addr->bound_dev_if);
634 if (dev) {
635 vid = rdma_vlan_dev_vlan_id(dev);
636 dev_put(dev);
637 }
638
639 iboe_mac_vlan_to_ll((union ib_gid *) &resp->ib_route[0].dgid,
640 dev_addr->dst_dev_addr, vid);
641 iboe_addr_get_sgid(dev_addr,
642 (union ib_gid *) &resp->ib_route[0].sgid);
643 resp->ib_route[0].pkey = cpu_to_be16(0xffff);
644 break;
645 case 2:
646 ib_copy_path_rec_to_user(&resp->ib_route[1],
647 &route->path_rec[1]);
648 /* fall through */
649 case 1:
650 ib_copy_path_rec_to_user(&resp->ib_route[0],
651 &route->path_rec[0]);
652 break;
653 default:
654 break;
655 }
656}
657
658static void ucma_copy_iw_route(struct rdma_ucm_query_route_resp *resp,
659 struct rdma_route *route)
660{
661 struct rdma_dev_addr *dev_addr;
662
663 dev_addr = &route->addr.dev_addr;
664 rdma_addr_get_dgid(dev_addr, (union ib_gid *) &resp->ib_route[0].dgid);
665 rdma_addr_get_sgid(dev_addr, (union ib_gid *) &resp->ib_route[0].sgid);
666}
667
668static ssize_t ucma_query_route(struct ucma_file *file,
669 const char __user *inbuf,
670 int in_len, int out_len)
671{
672 struct rdma_ucm_query_route cmd;
673 struct rdma_ucm_query_route_resp resp;
674 struct ucma_context *ctx;
675 struct sockaddr *addr;
676 int ret = 0;
677
678 if (out_len < sizeof(resp))
679 return -ENOSPC;
680
681 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
682 return -EFAULT;
683
684 ctx = ucma_get_ctx(file, cmd.id);
685 if (IS_ERR(ctx))
686 return PTR_ERR(ctx);
687
688 memset(&resp, 0, sizeof resp);
689 addr = (struct sockaddr *) &ctx->cm_id->route.addr.src_addr;
690 memcpy(&resp.src_addr, addr, addr->sa_family == AF_INET ?
691 sizeof(struct sockaddr_in) :
692 sizeof(struct sockaddr_in6));
693 addr = (struct sockaddr *) &ctx->cm_id->route.addr.dst_addr;
694 memcpy(&resp.dst_addr, addr, addr->sa_family == AF_INET ?
695 sizeof(struct sockaddr_in) :
696 sizeof(struct sockaddr_in6));
697 if (!ctx->cm_id->device)
698 goto out;
699
700 resp.node_guid = (__force __u64) ctx->cm_id->device->node_guid;
701 resp.port_num = ctx->cm_id->port_num;
702 switch (rdma_node_get_transport(ctx->cm_id->device->node_type)) {
703 case RDMA_TRANSPORT_IB:
704 switch (rdma_port_get_link_layer(ctx->cm_id->device,
705 ctx->cm_id->port_num)) {
706 case IB_LINK_LAYER_INFINIBAND:
707 ucma_copy_ib_route(&resp, &ctx->cm_id->route);
708 break;
709 case IB_LINK_LAYER_ETHERNET:
710 ucma_copy_iboe_route(&resp, &ctx->cm_id->route);
711 break;
712 default:
713 break;
714 }
715 break;
716 case RDMA_TRANSPORT_IWARP:
717 ucma_copy_iw_route(&resp, &ctx->cm_id->route);
718 break;
719 default:
720 break;
721 }
722
723out:
724 if (copy_to_user((void __user *)(unsigned long)cmd.response,
725 &resp, sizeof(resp)))
726 ret = -EFAULT;
727
728 ucma_put_ctx(ctx);
729 return ret;
730}
731
732static void ucma_copy_conn_param(struct rdma_conn_param *dst,
733 struct rdma_ucm_conn_param *src)
734{
735 dst->private_data = src->private_data;
736 dst->private_data_len = src->private_data_len;
737 dst->responder_resources =src->responder_resources;
738 dst->initiator_depth = src->initiator_depth;
739 dst->flow_control = src->flow_control;
740 dst->retry_count = src->retry_count;
741 dst->rnr_retry_count = src->rnr_retry_count;
742 dst->srq = src->srq;
743 dst->qp_num = src->qp_num;
744}
745
746static ssize_t ucma_connect(struct ucma_file *file, const char __user *inbuf,
747 int in_len, int out_len)
748{
749 struct rdma_ucm_connect cmd;
750 struct rdma_conn_param conn_param;
751 struct ucma_context *ctx;
752 int ret;
753
754 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
755 return -EFAULT;
756
757 if (!cmd.conn_param.valid)
758 return -EINVAL;
759
760 ctx = ucma_get_ctx(file, cmd.id);
761 if (IS_ERR(ctx))
762 return PTR_ERR(ctx);
763
764 ucma_copy_conn_param(&conn_param, &cmd.conn_param);
765 ret = rdma_connect(ctx->cm_id, &conn_param);
766 ucma_put_ctx(ctx);
767 return ret;
768}
769
770static ssize_t ucma_listen(struct ucma_file *file, const char __user *inbuf,
771 int in_len, int out_len)
772{
773 struct rdma_ucm_listen cmd;
774 struct ucma_context *ctx;
775 int ret;
776
777 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
778 return -EFAULT;
779
780 ctx = ucma_get_ctx(file, cmd.id);
781 if (IS_ERR(ctx))
782 return PTR_ERR(ctx);
783
784 ctx->backlog = cmd.backlog > 0 && cmd.backlog < max_backlog ?
785 cmd.backlog : max_backlog;
786 ret = rdma_listen(ctx->cm_id, ctx->backlog);
787 ucma_put_ctx(ctx);
788 return ret;
789}
790
791static ssize_t ucma_accept(struct ucma_file *file, const char __user *inbuf,
792 int in_len, int out_len)
793{
794 struct rdma_ucm_accept cmd;
795 struct rdma_conn_param conn_param;
796 struct ucma_context *ctx;
797 int ret;
798
799 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
800 return -EFAULT;
801
802 ctx = ucma_get_ctx(file, cmd.id);
803 if (IS_ERR(ctx))
804 return PTR_ERR(ctx);
805
806 if (cmd.conn_param.valid) {
807 ctx->uid = cmd.uid;
808 ucma_copy_conn_param(&conn_param, &cmd.conn_param);
809 ret = rdma_accept(ctx->cm_id, &conn_param);
810 } else
811 ret = rdma_accept(ctx->cm_id, NULL);
812
813 ucma_put_ctx(ctx);
814 return ret;
815}
816
817static ssize_t ucma_reject(struct ucma_file *file, const char __user *inbuf,
818 int in_len, int out_len)
819{
820 struct rdma_ucm_reject cmd;
821 struct ucma_context *ctx;
822 int ret;
823
824 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
825 return -EFAULT;
826
827 ctx = ucma_get_ctx(file, cmd.id);
828 if (IS_ERR(ctx))
829 return PTR_ERR(ctx);
830
831 ret = rdma_reject(ctx->cm_id, cmd.private_data, cmd.private_data_len);
832 ucma_put_ctx(ctx);
833 return ret;
834}
835
836static ssize_t ucma_disconnect(struct ucma_file *file, const char __user *inbuf,
837 int in_len, int out_len)
838{
839 struct rdma_ucm_disconnect cmd;
840 struct ucma_context *ctx;
841 int ret;
842
843 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
844 return -EFAULT;
845
846 ctx = ucma_get_ctx(file, cmd.id);
847 if (IS_ERR(ctx))
848 return PTR_ERR(ctx);
849
850 ret = rdma_disconnect(ctx->cm_id);
851 ucma_put_ctx(ctx);
852 return ret;
853}
854
855static ssize_t ucma_init_qp_attr(struct ucma_file *file,
856 const char __user *inbuf,
857 int in_len, int out_len)
858{
859 struct rdma_ucm_init_qp_attr cmd;
860 struct ib_uverbs_qp_attr resp;
861 struct ucma_context *ctx;
862 struct ib_qp_attr qp_attr;
863 int ret;
864
865 if (out_len < sizeof(resp))
866 return -ENOSPC;
867
868 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
869 return -EFAULT;
870
871 ctx = ucma_get_ctx(file, cmd.id);
872 if (IS_ERR(ctx))
873 return PTR_ERR(ctx);
874
875 resp.qp_attr_mask = 0;
876 memset(&qp_attr, 0, sizeof qp_attr);
877 qp_attr.qp_state = cmd.qp_state;
878 ret = rdma_init_qp_attr(ctx->cm_id, &qp_attr, &resp.qp_attr_mask);
879 if (ret)
880 goto out;
881
882 ib_copy_qp_attr_to_user(&resp, &qp_attr);
883 if (copy_to_user((void __user *)(unsigned long)cmd.response,
884 &resp, sizeof(resp)))
885 ret = -EFAULT;
886
887out:
888 ucma_put_ctx(ctx);
889 return ret;
890}
891
892static int ucma_set_option_id(struct ucma_context *ctx, int optname,
893 void *optval, size_t optlen)
894{
895 int ret = 0;
896
897 switch (optname) {
898 case RDMA_OPTION_ID_TOS:
899 if (optlen != sizeof(u8)) {
900 ret = -EINVAL;
901 break;
902 }
903 rdma_set_service_type(ctx->cm_id, *((u8 *) optval));
904 break;
905 case RDMA_OPTION_ID_REUSEADDR:
906 if (optlen != sizeof(int)) {
907 ret = -EINVAL;
908 break;
909 }
910 ret = rdma_set_reuseaddr(ctx->cm_id, *((int *) optval) ? 1 : 0);
911 break;
912 default:
913 ret = -ENOSYS;
914 }
915
916 return ret;
917}
918
919static int ucma_set_ib_path(struct ucma_context *ctx,
920 struct ib_path_rec_data *path_data, size_t optlen)
921{
922 struct ib_sa_path_rec sa_path;
923 struct rdma_cm_event event;
924 int ret;
925
926 if (optlen % sizeof(*path_data))
927 return -EINVAL;
928
929 for (; optlen; optlen -= sizeof(*path_data), path_data++) {
930 if (path_data->flags == (IB_PATH_GMP | IB_PATH_PRIMARY |
931 IB_PATH_BIDIRECTIONAL))
932 break;
933 }
934
935 if (!optlen)
936 return -EINVAL;
937
938 ib_sa_unpack_path(path_data->path_rec, &sa_path);
939 ret = rdma_set_ib_paths(ctx->cm_id, &sa_path, 1);
940 if (ret)
941 return ret;
942
943 memset(&event, 0, sizeof event);
944 event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
945 return ucma_event_handler(ctx->cm_id, &event);
946}
947
948static int ucma_set_option_ib(struct ucma_context *ctx, int optname,
949 void *optval, size_t optlen)
950{
951 int ret;
952
953 switch (optname) {
954 case RDMA_OPTION_IB_PATH:
955 ret = ucma_set_ib_path(ctx, optval, optlen);
956 break;
957 default:
958 ret = -ENOSYS;
959 }
960
961 return ret;
962}
963
964static int ucma_set_option_level(struct ucma_context *ctx, int level,
965 int optname, void *optval, size_t optlen)
966{
967 int ret;
968
969 switch (level) {
970 case RDMA_OPTION_ID:
971 ret = ucma_set_option_id(ctx, optname, optval, optlen);
972 break;
973 case RDMA_OPTION_IB:
974 ret = ucma_set_option_ib(ctx, optname, optval, optlen);
975 break;
976 default:
977 ret = -ENOSYS;
978 }
979
980 return ret;
981}
982
983static ssize_t ucma_set_option(struct ucma_file *file, const char __user *inbuf,
984 int in_len, int out_len)
985{
986 struct rdma_ucm_set_option cmd;
987 struct ucma_context *ctx;
988 void *optval;
989 int ret;
990
991 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
992 return -EFAULT;
993
994 ctx = ucma_get_ctx(file, cmd.id);
995 if (IS_ERR(ctx))
996 return PTR_ERR(ctx);
997
998 optval = kmalloc(cmd.optlen, GFP_KERNEL);
999 if (!optval) {
1000 ret = -ENOMEM;
1001 goto out1;
1002 }
1003
1004 if (copy_from_user(optval, (void __user *) (unsigned long) cmd.optval,
1005 cmd.optlen)) {
1006 ret = -EFAULT;
1007 goto out2;
1008 }
1009
1010 ret = ucma_set_option_level(ctx, cmd.level, cmd.optname, optval,
1011 cmd.optlen);
1012out2:
1013 kfree(optval);
1014out1:
1015 ucma_put_ctx(ctx);
1016 return ret;
1017}
1018
1019static ssize_t ucma_notify(struct ucma_file *file, const char __user *inbuf,
1020 int in_len, int out_len)
1021{
1022 struct rdma_ucm_notify cmd;
1023 struct ucma_context *ctx;
1024 int ret;
1025
1026 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1027 return -EFAULT;
1028
1029 ctx = ucma_get_ctx(file, cmd.id);
1030 if (IS_ERR(ctx))
1031 return PTR_ERR(ctx);
1032
1033 ret = rdma_notify(ctx->cm_id, (enum ib_event_type) cmd.event);
1034 ucma_put_ctx(ctx);
1035 return ret;
1036}
1037
1038static ssize_t ucma_join_multicast(struct ucma_file *file,
1039 const char __user *inbuf,
1040 int in_len, int out_len)
1041{
1042 struct rdma_ucm_join_mcast cmd;
1043 struct rdma_ucm_create_id_resp resp;
1044 struct ucma_context *ctx;
1045 struct ucma_multicast *mc;
1046 int ret;
1047
1048 if (out_len < sizeof(resp))
1049 return -ENOSPC;
1050
1051 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1052 return -EFAULT;
1053
1054 ctx = ucma_get_ctx(file, cmd.id);
1055 if (IS_ERR(ctx))
1056 return PTR_ERR(ctx);
1057
1058 mutex_lock(&file->mut);
1059 mc = ucma_alloc_multicast(ctx);
1060 if (!mc) {
1061 ret = -ENOMEM;
1062 goto err1;
1063 }
1064
1065 mc->uid = cmd.uid;
1066 memcpy(&mc->addr, &cmd.addr, sizeof cmd.addr);
1067 ret = rdma_join_multicast(ctx->cm_id, (struct sockaddr *) &mc->addr, mc);
1068 if (ret)
1069 goto err2;
1070
1071 resp.id = mc->id;
1072 if (copy_to_user((void __user *)(unsigned long)cmd.response,
1073 &resp, sizeof(resp))) {
1074 ret = -EFAULT;
1075 goto err3;
1076 }
1077
1078 mutex_unlock(&file->mut);
1079 ucma_put_ctx(ctx);
1080 return 0;
1081
1082err3:
1083 rdma_leave_multicast(ctx->cm_id, (struct sockaddr *) &mc->addr);
1084 ucma_cleanup_mc_events(mc);
1085err2:
1086 mutex_lock(&mut);
1087 idr_remove(&multicast_idr, mc->id);
1088 mutex_unlock(&mut);
1089 list_del(&mc->list);
1090 kfree(mc);
1091err1:
1092 mutex_unlock(&file->mut);
1093 ucma_put_ctx(ctx);
1094 return ret;
1095}
1096
1097static ssize_t ucma_leave_multicast(struct ucma_file *file,
1098 const char __user *inbuf,
1099 int in_len, int out_len)
1100{
1101 struct rdma_ucm_destroy_id cmd;
1102 struct rdma_ucm_destroy_id_resp resp;
1103 struct ucma_multicast *mc;
1104 int ret = 0;
1105
1106 if (out_len < sizeof(resp))
1107 return -ENOSPC;
1108
1109 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1110 return -EFAULT;
1111
1112 mutex_lock(&mut);
1113 mc = idr_find(&multicast_idr, cmd.id);
1114 if (!mc)
1115 mc = ERR_PTR(-ENOENT);
1116 else if (mc->ctx->file != file)
1117 mc = ERR_PTR(-EINVAL);
1118 else {
1119 idr_remove(&multicast_idr, mc->id);
1120 atomic_inc(&mc->ctx->ref);
1121 }
1122 mutex_unlock(&mut);
1123
1124 if (IS_ERR(mc)) {
1125 ret = PTR_ERR(mc);
1126 goto out;
1127 }
1128
1129 rdma_leave_multicast(mc->ctx->cm_id, (struct sockaddr *) &mc->addr);
1130 mutex_lock(&mc->ctx->file->mut);
1131 ucma_cleanup_mc_events(mc);
1132 list_del(&mc->list);
1133 mutex_unlock(&mc->ctx->file->mut);
1134
1135 ucma_put_ctx(mc->ctx);
1136 resp.events_reported = mc->events_reported;
1137 kfree(mc);
1138
1139 if (copy_to_user((void __user *)(unsigned long)cmd.response,
1140 &resp, sizeof(resp)))
1141 ret = -EFAULT;
1142out:
1143 return ret;
1144}
1145
1146static void ucma_lock_files(struct ucma_file *file1, struct ucma_file *file2)
1147{
1148 /* Acquire mutex's based on pointer comparison to prevent deadlock. */
1149 if (file1 < file2) {
1150 mutex_lock(&file1->mut);
1151 mutex_lock(&file2->mut);
1152 } else {
1153 mutex_lock(&file2->mut);
1154 mutex_lock(&file1->mut);
1155 }
1156}
1157
1158static void ucma_unlock_files(struct ucma_file *file1, struct ucma_file *file2)
1159{
1160 if (file1 < file2) {
1161 mutex_unlock(&file2->mut);
1162 mutex_unlock(&file1->mut);
1163 } else {
1164 mutex_unlock(&file1->mut);
1165 mutex_unlock(&file2->mut);
1166 }
1167}
1168
1169static void ucma_move_events(struct ucma_context *ctx, struct ucma_file *file)
1170{
1171 struct ucma_event *uevent, *tmp;
1172
1173 list_for_each_entry_safe(uevent, tmp, &ctx->file->event_list, list)
1174 if (uevent->ctx == ctx)
1175 list_move_tail(&uevent->list, &file->event_list);
1176}
1177
1178static ssize_t ucma_migrate_id(struct ucma_file *new_file,
1179 const char __user *inbuf,
1180 int in_len, int out_len)
1181{
1182 struct rdma_ucm_migrate_id cmd;
1183 struct rdma_ucm_migrate_resp resp;
1184 struct ucma_context *ctx;
1185 struct file *filp;
1186 struct ucma_file *cur_file;
1187 int ret = 0;
1188
1189 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1190 return -EFAULT;
1191
1192 /* Get current fd to protect against it being closed */
1193 filp = fget(cmd.fd);
1194 if (!filp)
1195 return -ENOENT;
1196
1197 /* Validate current fd and prevent destruction of id. */
1198 ctx = ucma_get_ctx(filp->private_data, cmd.id);
1199 if (IS_ERR(ctx)) {
1200 ret = PTR_ERR(ctx);
1201 goto file_put;
1202 }
1203
1204 cur_file = ctx->file;
1205 if (cur_file == new_file) {
1206 resp.events_reported = ctx->events_reported;
1207 goto response;
1208 }
1209
1210 /*
1211 * Migrate events between fd's, maintaining order, and avoiding new
1212 * events being added before existing events.
1213 */
1214 ucma_lock_files(cur_file, new_file);
1215 mutex_lock(&mut);
1216
1217 list_move_tail(&ctx->list, &new_file->ctx_list);
1218 ucma_move_events(ctx, new_file);
1219 ctx->file = new_file;
1220 resp.events_reported = ctx->events_reported;
1221
1222 mutex_unlock(&mut);
1223 ucma_unlock_files(cur_file, new_file);
1224
1225response:
1226 if (copy_to_user((void __user *)(unsigned long)cmd.response,
1227 &resp, sizeof(resp)))
1228 ret = -EFAULT;
1229
1230 ucma_put_ctx(ctx);
1231file_put:
1232 fput(filp);
1233 return ret;
1234}
1235
1236static ssize_t (*ucma_cmd_table[])(struct ucma_file *file,
1237 const char __user *inbuf,
1238 int in_len, int out_len) = {
1239 [RDMA_USER_CM_CMD_CREATE_ID] = ucma_create_id,
1240 [RDMA_USER_CM_CMD_DESTROY_ID] = ucma_destroy_id,
1241 [RDMA_USER_CM_CMD_BIND_ADDR] = ucma_bind_addr,
1242 [RDMA_USER_CM_CMD_RESOLVE_ADDR] = ucma_resolve_addr,
1243 [RDMA_USER_CM_CMD_RESOLVE_ROUTE]= ucma_resolve_route,
1244 [RDMA_USER_CM_CMD_QUERY_ROUTE] = ucma_query_route,
1245 [RDMA_USER_CM_CMD_CONNECT] = ucma_connect,
1246 [RDMA_USER_CM_CMD_LISTEN] = ucma_listen,
1247 [RDMA_USER_CM_CMD_ACCEPT] = ucma_accept,
1248 [RDMA_USER_CM_CMD_REJECT] = ucma_reject,
1249 [RDMA_USER_CM_CMD_DISCONNECT] = ucma_disconnect,
1250 [RDMA_USER_CM_CMD_INIT_QP_ATTR] = ucma_init_qp_attr,
1251 [RDMA_USER_CM_CMD_GET_EVENT] = ucma_get_event,
1252 [RDMA_USER_CM_CMD_GET_OPTION] = NULL,
1253 [RDMA_USER_CM_CMD_SET_OPTION] = ucma_set_option,
1254 [RDMA_USER_CM_CMD_NOTIFY] = ucma_notify,
1255 [RDMA_USER_CM_CMD_JOIN_MCAST] = ucma_join_multicast,
1256 [RDMA_USER_CM_CMD_LEAVE_MCAST] = ucma_leave_multicast,
1257 [RDMA_USER_CM_CMD_MIGRATE_ID] = ucma_migrate_id
1258};
1259
1260static ssize_t ucma_write(struct file *filp, const char __user *buf,
1261 size_t len, loff_t *pos)
1262{
1263 struct ucma_file *file = filp->private_data;
1264 struct rdma_ucm_cmd_hdr hdr;
1265 ssize_t ret;
1266
1267 if (len < sizeof(hdr))
1268 return -EINVAL;
1269
1270 if (copy_from_user(&hdr, buf, sizeof(hdr)))
1271 return -EFAULT;
1272
1273 if (hdr.cmd < 0 || hdr.cmd >= ARRAY_SIZE(ucma_cmd_table))
1274 return -EINVAL;
1275
1276 if (hdr.in + sizeof(hdr) > len)
1277 return -EINVAL;
1278
1279 if (!ucma_cmd_table[hdr.cmd])
1280 return -ENOSYS;
1281
1282 ret = ucma_cmd_table[hdr.cmd](file, buf + sizeof(hdr), hdr.in, hdr.out);
1283 if (!ret)
1284 ret = len;
1285
1286 return ret;
1287}
1288
1289static unsigned int ucma_poll(struct file *filp, struct poll_table_struct *wait)
1290{
1291 struct ucma_file *file = filp->private_data;
1292 unsigned int mask = 0;
1293
1294 poll_wait(filp, &file->poll_wait, wait);
1295
1296 if (!list_empty(&file->event_list))
1297 mask = POLLIN | POLLRDNORM;
1298
1299 return mask;
1300}
1301
1302/*
1303 * ucma_open() does not need the BKL:
1304 *
1305 * - no global state is referred to;
1306 * - there is no ioctl method to race against;
1307 * - no further module initialization is required for open to work
1308 * after the device is registered.
1309 */
1310static int ucma_open(struct inode *inode, struct file *filp)
1311{
1312 struct ucma_file *file;
1313
1314 file = kmalloc(sizeof *file, GFP_KERNEL);
1315 if (!file)
1316 return -ENOMEM;
1317
1318 INIT_LIST_HEAD(&file->event_list);
1319 INIT_LIST_HEAD(&file->ctx_list);
1320 init_waitqueue_head(&file->poll_wait);
1321 mutex_init(&file->mut);
1322
1323 filp->private_data = file;
1324 file->filp = filp;
1325
1326 return nonseekable_open(inode, filp);
1327}
1328
1329static int ucma_close(struct inode *inode, struct file *filp)
1330{
1331 struct ucma_file *file = filp->private_data;
1332 struct ucma_context *ctx, *tmp;
1333
1334 mutex_lock(&file->mut);
1335 list_for_each_entry_safe(ctx, tmp, &file->ctx_list, list) {
1336 mutex_unlock(&file->mut);
1337
1338 mutex_lock(&mut);
1339 idr_remove(&ctx_idr, ctx->id);
1340 mutex_unlock(&mut);
1341
1342 ucma_free_ctx(ctx);
1343 mutex_lock(&file->mut);
1344 }
1345 mutex_unlock(&file->mut);
1346 kfree(file);
1347 return 0;
1348}
1349
1350static const struct file_operations ucma_fops = {
1351 .owner = THIS_MODULE,
1352 .open = ucma_open,
1353 .release = ucma_close,
1354 .write = ucma_write,
1355 .poll = ucma_poll,
1356 .llseek = no_llseek,
1357};
1358
1359static struct miscdevice ucma_misc = {
1360 .minor = MISC_DYNAMIC_MINOR,
1361 .name = "rdma_cm",
1362 .nodename = "infiniband/rdma_cm",
1363 .mode = 0666,
1364 .fops = &ucma_fops,
1365};
1366
1367static ssize_t show_abi_version(struct device *dev,
1368 struct device_attribute *attr,
1369 char *buf)
1370{
1371 return sprintf(buf, "%d\n", RDMA_USER_CM_ABI_VERSION);
1372}
1373static DEVICE_ATTR(abi_version, S_IRUGO, show_abi_version, NULL);
1374
1375static int __init ucma_init(void)
1376{
1377 int ret;
1378
1379 ret = misc_register(&ucma_misc);
1380 if (ret)
1381 return ret;
1382
1383 ret = device_create_file(ucma_misc.this_device, &dev_attr_abi_version);
1384 if (ret) {
1385 printk(KERN_ERR "rdma_ucm: couldn't create abi_version attr\n");
1386 goto err1;
1387 }
1388
1389 ucma_ctl_table_hdr = register_sysctl_paths(ucma_ctl_path, ucma_ctl_table);
1390 if (!ucma_ctl_table_hdr) {
1391 printk(KERN_ERR "rdma_ucm: couldn't register sysctl paths\n");
1392 ret = -ENOMEM;
1393 goto err2;
1394 }
1395 return 0;
1396err2:
1397 device_remove_file(ucma_misc.this_device, &dev_attr_abi_version);
1398err1:
1399 misc_deregister(&ucma_misc);
1400 return ret;
1401}
1402
1403static void __exit ucma_cleanup(void)
1404{
1405 unregister_sysctl_table(ucma_ctl_table_hdr);
1406 device_remove_file(ucma_misc.this_device, &dev_attr_abi_version);
1407 misc_deregister(&ucma_misc);
1408 idr_destroy(&ctx_idr);
1409}
1410
1411module_init(ucma_init);
1412module_exit(ucma_cleanup);
1/*
2 * Copyright (c) 2005-2006 Intel Corporation. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/completion.h>
34#include <linux/file.h>
35#include <linux/mutex.h>
36#include <linux/poll.h>
37#include <linux/sched.h>
38#include <linux/idr.h>
39#include <linux/in.h>
40#include <linux/in6.h>
41#include <linux/miscdevice.h>
42#include <linux/slab.h>
43#include <linux/sysctl.h>
44#include <linux/module.h>
45#include <linux/nsproxy.h>
46
47#include <rdma/rdma_user_cm.h>
48#include <rdma/ib_marshall.h>
49#include <rdma/rdma_cm.h>
50#include <rdma/rdma_cm_ib.h>
51#include <rdma/ib_addr.h>
52#include <rdma/ib.h>
53
54MODULE_AUTHOR("Sean Hefty");
55MODULE_DESCRIPTION("RDMA Userspace Connection Manager Access");
56MODULE_LICENSE("Dual BSD/GPL");
57
58static unsigned int max_backlog = 1024;
59
60static struct ctl_table_header *ucma_ctl_table_hdr;
61static struct ctl_table ucma_ctl_table[] = {
62 {
63 .procname = "max_backlog",
64 .data = &max_backlog,
65 .maxlen = sizeof max_backlog,
66 .mode = 0644,
67 .proc_handler = proc_dointvec,
68 },
69 { }
70};
71
72struct ucma_file {
73 struct mutex mut;
74 struct file *filp;
75 struct list_head ctx_list;
76 struct list_head event_list;
77 wait_queue_head_t poll_wait;
78 struct workqueue_struct *close_wq;
79};
80
81struct ucma_context {
82 int id;
83 struct completion comp;
84 atomic_t ref;
85 int events_reported;
86 int backlog;
87
88 struct ucma_file *file;
89 struct rdma_cm_id *cm_id;
90 u64 uid;
91
92 struct list_head list;
93 struct list_head mc_list;
94 /* mark that device is in process of destroying the internal HW
95 * resources, protected by the global mut
96 */
97 int closing;
98 /* sync between removal event and id destroy, protected by file mut */
99 int destroying;
100 struct work_struct close_work;
101};
102
103struct ucma_multicast {
104 struct ucma_context *ctx;
105 int id;
106 int events_reported;
107
108 u64 uid;
109 struct list_head list;
110 struct sockaddr_storage addr;
111};
112
113struct ucma_event {
114 struct ucma_context *ctx;
115 struct ucma_multicast *mc;
116 struct list_head list;
117 struct rdma_cm_id *cm_id;
118 struct rdma_ucm_event_resp resp;
119 struct work_struct close_work;
120};
121
122static DEFINE_MUTEX(mut);
123static DEFINE_IDR(ctx_idr);
124static DEFINE_IDR(multicast_idr);
125
126static inline struct ucma_context *_ucma_find_context(int id,
127 struct ucma_file *file)
128{
129 struct ucma_context *ctx;
130
131 ctx = idr_find(&ctx_idr, id);
132 if (!ctx)
133 ctx = ERR_PTR(-ENOENT);
134 else if (ctx->file != file)
135 ctx = ERR_PTR(-EINVAL);
136 return ctx;
137}
138
139static struct ucma_context *ucma_get_ctx(struct ucma_file *file, int id)
140{
141 struct ucma_context *ctx;
142
143 mutex_lock(&mut);
144 ctx = _ucma_find_context(id, file);
145 if (!IS_ERR(ctx)) {
146 if (ctx->closing)
147 ctx = ERR_PTR(-EIO);
148 else
149 atomic_inc(&ctx->ref);
150 }
151 mutex_unlock(&mut);
152 return ctx;
153}
154
155static void ucma_put_ctx(struct ucma_context *ctx)
156{
157 if (atomic_dec_and_test(&ctx->ref))
158 complete(&ctx->comp);
159}
160
161static void ucma_close_event_id(struct work_struct *work)
162{
163 struct ucma_event *uevent_close = container_of(work, struct ucma_event, close_work);
164
165 rdma_destroy_id(uevent_close->cm_id);
166 kfree(uevent_close);
167}
168
169static void ucma_close_id(struct work_struct *work)
170{
171 struct ucma_context *ctx = container_of(work, struct ucma_context, close_work);
172
173 /* once all inflight tasks are finished, we close all underlying
174 * resources. The context is still alive till its explicit destryoing
175 * by its creator.
176 */
177 ucma_put_ctx(ctx);
178 wait_for_completion(&ctx->comp);
179 /* No new events will be generated after destroying the id. */
180 rdma_destroy_id(ctx->cm_id);
181}
182
183static struct ucma_context *ucma_alloc_ctx(struct ucma_file *file)
184{
185 struct ucma_context *ctx;
186
187 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
188 if (!ctx)
189 return NULL;
190
191 INIT_WORK(&ctx->close_work, ucma_close_id);
192 atomic_set(&ctx->ref, 1);
193 init_completion(&ctx->comp);
194 INIT_LIST_HEAD(&ctx->mc_list);
195 ctx->file = file;
196
197 mutex_lock(&mut);
198 ctx->id = idr_alloc(&ctx_idr, ctx, 0, 0, GFP_KERNEL);
199 mutex_unlock(&mut);
200 if (ctx->id < 0)
201 goto error;
202
203 list_add_tail(&ctx->list, &file->ctx_list);
204 return ctx;
205
206error:
207 kfree(ctx);
208 return NULL;
209}
210
211static struct ucma_multicast* ucma_alloc_multicast(struct ucma_context *ctx)
212{
213 struct ucma_multicast *mc;
214
215 mc = kzalloc(sizeof(*mc), GFP_KERNEL);
216 if (!mc)
217 return NULL;
218
219 mutex_lock(&mut);
220 mc->id = idr_alloc(&multicast_idr, mc, 0, 0, GFP_KERNEL);
221 mutex_unlock(&mut);
222 if (mc->id < 0)
223 goto error;
224
225 mc->ctx = ctx;
226 list_add_tail(&mc->list, &ctx->mc_list);
227 return mc;
228
229error:
230 kfree(mc);
231 return NULL;
232}
233
234static void ucma_copy_conn_event(struct rdma_ucm_conn_param *dst,
235 struct rdma_conn_param *src)
236{
237 if (src->private_data_len)
238 memcpy(dst->private_data, src->private_data,
239 src->private_data_len);
240 dst->private_data_len = src->private_data_len;
241 dst->responder_resources =src->responder_resources;
242 dst->initiator_depth = src->initiator_depth;
243 dst->flow_control = src->flow_control;
244 dst->retry_count = src->retry_count;
245 dst->rnr_retry_count = src->rnr_retry_count;
246 dst->srq = src->srq;
247 dst->qp_num = src->qp_num;
248}
249
250static void ucma_copy_ud_event(struct rdma_ucm_ud_param *dst,
251 struct rdma_ud_param *src)
252{
253 if (src->private_data_len)
254 memcpy(dst->private_data, src->private_data,
255 src->private_data_len);
256 dst->private_data_len = src->private_data_len;
257 ib_copy_ah_attr_to_user(&dst->ah_attr, &src->ah_attr);
258 dst->qp_num = src->qp_num;
259 dst->qkey = src->qkey;
260}
261
262static void ucma_set_event_context(struct ucma_context *ctx,
263 struct rdma_cm_event *event,
264 struct ucma_event *uevent)
265{
266 uevent->ctx = ctx;
267 switch (event->event) {
268 case RDMA_CM_EVENT_MULTICAST_JOIN:
269 case RDMA_CM_EVENT_MULTICAST_ERROR:
270 uevent->mc = (struct ucma_multicast *)
271 event->param.ud.private_data;
272 uevent->resp.uid = uevent->mc->uid;
273 uevent->resp.id = uevent->mc->id;
274 break;
275 default:
276 uevent->resp.uid = ctx->uid;
277 uevent->resp.id = ctx->id;
278 break;
279 }
280}
281
282/* Called with file->mut locked for the relevant context. */
283static void ucma_removal_event_handler(struct rdma_cm_id *cm_id)
284{
285 struct ucma_context *ctx = cm_id->context;
286 struct ucma_event *con_req_eve;
287 int event_found = 0;
288
289 if (ctx->destroying)
290 return;
291
292 /* only if context is pointing to cm_id that it owns it and can be
293 * queued to be closed, otherwise that cm_id is an inflight one that
294 * is part of that context event list pending to be detached and
295 * reattached to its new context as part of ucma_get_event,
296 * handled separately below.
297 */
298 if (ctx->cm_id == cm_id) {
299 mutex_lock(&mut);
300 ctx->closing = 1;
301 mutex_unlock(&mut);
302 queue_work(ctx->file->close_wq, &ctx->close_work);
303 return;
304 }
305
306 list_for_each_entry(con_req_eve, &ctx->file->event_list, list) {
307 if (con_req_eve->cm_id == cm_id &&
308 con_req_eve->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST) {
309 list_del(&con_req_eve->list);
310 INIT_WORK(&con_req_eve->close_work, ucma_close_event_id);
311 queue_work(ctx->file->close_wq, &con_req_eve->close_work);
312 event_found = 1;
313 break;
314 }
315 }
316 if (!event_found)
317 pr_err("ucma_removal_event_handler: warning: connect request event wasn't found\n");
318}
319
320static int ucma_event_handler(struct rdma_cm_id *cm_id,
321 struct rdma_cm_event *event)
322{
323 struct ucma_event *uevent;
324 struct ucma_context *ctx = cm_id->context;
325 int ret = 0;
326
327 uevent = kzalloc(sizeof(*uevent), GFP_KERNEL);
328 if (!uevent)
329 return event->event == RDMA_CM_EVENT_CONNECT_REQUEST;
330
331 mutex_lock(&ctx->file->mut);
332 uevent->cm_id = cm_id;
333 ucma_set_event_context(ctx, event, uevent);
334 uevent->resp.event = event->event;
335 uevent->resp.status = event->status;
336 if (cm_id->qp_type == IB_QPT_UD)
337 ucma_copy_ud_event(&uevent->resp.param.ud, &event->param.ud);
338 else
339 ucma_copy_conn_event(&uevent->resp.param.conn,
340 &event->param.conn);
341
342 if (event->event == RDMA_CM_EVENT_CONNECT_REQUEST) {
343 if (!ctx->backlog) {
344 ret = -ENOMEM;
345 kfree(uevent);
346 goto out;
347 }
348 ctx->backlog--;
349 } else if (!ctx->uid || ctx->cm_id != cm_id) {
350 /*
351 * We ignore events for new connections until userspace has set
352 * their context. This can only happen if an error occurs on a
353 * new connection before the user accepts it. This is okay,
354 * since the accept will just fail later. However, we do need
355 * to release the underlying HW resources in case of a device
356 * removal event.
357 */
358 if (event->event == RDMA_CM_EVENT_DEVICE_REMOVAL)
359 ucma_removal_event_handler(cm_id);
360
361 kfree(uevent);
362 goto out;
363 }
364
365 list_add_tail(&uevent->list, &ctx->file->event_list);
366 wake_up_interruptible(&ctx->file->poll_wait);
367 if (event->event == RDMA_CM_EVENT_DEVICE_REMOVAL)
368 ucma_removal_event_handler(cm_id);
369out:
370 mutex_unlock(&ctx->file->mut);
371 return ret;
372}
373
374static ssize_t ucma_get_event(struct ucma_file *file, const char __user *inbuf,
375 int in_len, int out_len)
376{
377 struct ucma_context *ctx;
378 struct rdma_ucm_get_event cmd;
379 struct ucma_event *uevent;
380 int ret = 0;
381
382 if (out_len < sizeof uevent->resp)
383 return -ENOSPC;
384
385 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
386 return -EFAULT;
387
388 mutex_lock(&file->mut);
389 while (list_empty(&file->event_list)) {
390 mutex_unlock(&file->mut);
391
392 if (file->filp->f_flags & O_NONBLOCK)
393 return -EAGAIN;
394
395 if (wait_event_interruptible(file->poll_wait,
396 !list_empty(&file->event_list)))
397 return -ERESTARTSYS;
398
399 mutex_lock(&file->mut);
400 }
401
402 uevent = list_entry(file->event_list.next, struct ucma_event, list);
403
404 if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST) {
405 ctx = ucma_alloc_ctx(file);
406 if (!ctx) {
407 ret = -ENOMEM;
408 goto done;
409 }
410 uevent->ctx->backlog++;
411 ctx->cm_id = uevent->cm_id;
412 ctx->cm_id->context = ctx;
413 uevent->resp.id = ctx->id;
414 }
415
416 if (copy_to_user((void __user *)(unsigned long)cmd.response,
417 &uevent->resp, sizeof uevent->resp)) {
418 ret = -EFAULT;
419 goto done;
420 }
421
422 list_del(&uevent->list);
423 uevent->ctx->events_reported++;
424 if (uevent->mc)
425 uevent->mc->events_reported++;
426 kfree(uevent);
427done:
428 mutex_unlock(&file->mut);
429 return ret;
430}
431
432static int ucma_get_qp_type(struct rdma_ucm_create_id *cmd, enum ib_qp_type *qp_type)
433{
434 switch (cmd->ps) {
435 case RDMA_PS_TCP:
436 *qp_type = IB_QPT_RC;
437 return 0;
438 case RDMA_PS_UDP:
439 case RDMA_PS_IPOIB:
440 *qp_type = IB_QPT_UD;
441 return 0;
442 case RDMA_PS_IB:
443 *qp_type = cmd->qp_type;
444 return 0;
445 default:
446 return -EINVAL;
447 }
448}
449
450static ssize_t ucma_create_id(struct ucma_file *file, const char __user *inbuf,
451 int in_len, int out_len)
452{
453 struct rdma_ucm_create_id cmd;
454 struct rdma_ucm_create_id_resp resp;
455 struct ucma_context *ctx;
456 enum ib_qp_type qp_type;
457 int ret;
458
459 if (out_len < sizeof(resp))
460 return -ENOSPC;
461
462 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
463 return -EFAULT;
464
465 ret = ucma_get_qp_type(&cmd, &qp_type);
466 if (ret)
467 return ret;
468
469 mutex_lock(&file->mut);
470 ctx = ucma_alloc_ctx(file);
471 mutex_unlock(&file->mut);
472 if (!ctx)
473 return -ENOMEM;
474
475 ctx->uid = cmd.uid;
476 ctx->cm_id = rdma_create_id(current->nsproxy->net_ns,
477 ucma_event_handler, ctx, cmd.ps, qp_type);
478 if (IS_ERR(ctx->cm_id)) {
479 ret = PTR_ERR(ctx->cm_id);
480 goto err1;
481 }
482
483 resp.id = ctx->id;
484 if (copy_to_user((void __user *)(unsigned long)cmd.response,
485 &resp, sizeof(resp))) {
486 ret = -EFAULT;
487 goto err2;
488 }
489 return 0;
490
491err2:
492 rdma_destroy_id(ctx->cm_id);
493err1:
494 mutex_lock(&mut);
495 idr_remove(&ctx_idr, ctx->id);
496 mutex_unlock(&mut);
497 kfree(ctx);
498 return ret;
499}
500
501static void ucma_cleanup_multicast(struct ucma_context *ctx)
502{
503 struct ucma_multicast *mc, *tmp;
504
505 mutex_lock(&mut);
506 list_for_each_entry_safe(mc, tmp, &ctx->mc_list, list) {
507 list_del(&mc->list);
508 idr_remove(&multicast_idr, mc->id);
509 kfree(mc);
510 }
511 mutex_unlock(&mut);
512}
513
514static void ucma_cleanup_mc_events(struct ucma_multicast *mc)
515{
516 struct ucma_event *uevent, *tmp;
517
518 list_for_each_entry_safe(uevent, tmp, &mc->ctx->file->event_list, list) {
519 if (uevent->mc != mc)
520 continue;
521
522 list_del(&uevent->list);
523 kfree(uevent);
524 }
525}
526
527/*
528 * ucma_free_ctx is called after the underlying rdma CM-ID is destroyed. At
529 * this point, no new events will be reported from the hardware. However, we
530 * still need to cleanup the UCMA context for this ID. Specifically, there
531 * might be events that have not yet been consumed by the user space software.
532 * These might include pending connect requests which we have not completed
533 * processing. We cannot call rdma_destroy_id while holding the lock of the
534 * context (file->mut), as it might cause a deadlock. We therefore extract all
535 * relevant events from the context pending events list while holding the
536 * mutex. After that we release them as needed.
537 */
538static int ucma_free_ctx(struct ucma_context *ctx)
539{
540 int events_reported;
541 struct ucma_event *uevent, *tmp;
542 LIST_HEAD(list);
543
544
545 ucma_cleanup_multicast(ctx);
546
547 /* Cleanup events not yet reported to the user. */
548 mutex_lock(&ctx->file->mut);
549 list_for_each_entry_safe(uevent, tmp, &ctx->file->event_list, list) {
550 if (uevent->ctx == ctx)
551 list_move_tail(&uevent->list, &list);
552 }
553 list_del(&ctx->list);
554 mutex_unlock(&ctx->file->mut);
555
556 list_for_each_entry_safe(uevent, tmp, &list, list) {
557 list_del(&uevent->list);
558 if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST)
559 rdma_destroy_id(uevent->cm_id);
560 kfree(uevent);
561 }
562
563 events_reported = ctx->events_reported;
564 kfree(ctx);
565 return events_reported;
566}
567
568static ssize_t ucma_destroy_id(struct ucma_file *file, const char __user *inbuf,
569 int in_len, int out_len)
570{
571 struct rdma_ucm_destroy_id cmd;
572 struct rdma_ucm_destroy_id_resp resp;
573 struct ucma_context *ctx;
574 int ret = 0;
575
576 if (out_len < sizeof(resp))
577 return -ENOSPC;
578
579 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
580 return -EFAULT;
581
582 mutex_lock(&mut);
583 ctx = _ucma_find_context(cmd.id, file);
584 if (!IS_ERR(ctx))
585 idr_remove(&ctx_idr, ctx->id);
586 mutex_unlock(&mut);
587
588 if (IS_ERR(ctx))
589 return PTR_ERR(ctx);
590
591 mutex_lock(&ctx->file->mut);
592 ctx->destroying = 1;
593 mutex_unlock(&ctx->file->mut);
594
595 flush_workqueue(ctx->file->close_wq);
596 /* At this point it's guaranteed that there is no inflight
597 * closing task */
598 mutex_lock(&mut);
599 if (!ctx->closing) {
600 mutex_unlock(&mut);
601 ucma_put_ctx(ctx);
602 wait_for_completion(&ctx->comp);
603 rdma_destroy_id(ctx->cm_id);
604 } else {
605 mutex_unlock(&mut);
606 }
607
608 resp.events_reported = ucma_free_ctx(ctx);
609 if (copy_to_user((void __user *)(unsigned long)cmd.response,
610 &resp, sizeof(resp)))
611 ret = -EFAULT;
612
613 return ret;
614}
615
616static ssize_t ucma_bind_ip(struct ucma_file *file, const char __user *inbuf,
617 int in_len, int out_len)
618{
619 struct rdma_ucm_bind_ip cmd;
620 struct ucma_context *ctx;
621 int ret;
622
623 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
624 return -EFAULT;
625
626 ctx = ucma_get_ctx(file, cmd.id);
627 if (IS_ERR(ctx))
628 return PTR_ERR(ctx);
629
630 ret = rdma_bind_addr(ctx->cm_id, (struct sockaddr *) &cmd.addr);
631 ucma_put_ctx(ctx);
632 return ret;
633}
634
635static ssize_t ucma_bind(struct ucma_file *file, const char __user *inbuf,
636 int in_len, int out_len)
637{
638 struct rdma_ucm_bind cmd;
639 struct sockaddr *addr;
640 struct ucma_context *ctx;
641 int ret;
642
643 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
644 return -EFAULT;
645
646 addr = (struct sockaddr *) &cmd.addr;
647 if (cmd.reserved || !cmd.addr_size || (cmd.addr_size != rdma_addr_size(addr)))
648 return -EINVAL;
649
650 ctx = ucma_get_ctx(file, cmd.id);
651 if (IS_ERR(ctx))
652 return PTR_ERR(ctx);
653
654 ret = rdma_bind_addr(ctx->cm_id, addr);
655 ucma_put_ctx(ctx);
656 return ret;
657}
658
659static ssize_t ucma_resolve_ip(struct ucma_file *file,
660 const char __user *inbuf,
661 int in_len, int out_len)
662{
663 struct rdma_ucm_resolve_ip cmd;
664 struct ucma_context *ctx;
665 int ret;
666
667 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
668 return -EFAULT;
669
670 ctx = ucma_get_ctx(file, cmd.id);
671 if (IS_ERR(ctx))
672 return PTR_ERR(ctx);
673
674 ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr,
675 (struct sockaddr *) &cmd.dst_addr,
676 cmd.timeout_ms);
677 ucma_put_ctx(ctx);
678 return ret;
679}
680
681static ssize_t ucma_resolve_addr(struct ucma_file *file,
682 const char __user *inbuf,
683 int in_len, int out_len)
684{
685 struct rdma_ucm_resolve_addr cmd;
686 struct sockaddr *src, *dst;
687 struct ucma_context *ctx;
688 int ret;
689
690 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
691 return -EFAULT;
692
693 src = (struct sockaddr *) &cmd.src_addr;
694 dst = (struct sockaddr *) &cmd.dst_addr;
695 if (cmd.reserved || (cmd.src_size && (cmd.src_size != rdma_addr_size(src))) ||
696 !cmd.dst_size || (cmd.dst_size != rdma_addr_size(dst)))
697 return -EINVAL;
698
699 ctx = ucma_get_ctx(file, cmd.id);
700 if (IS_ERR(ctx))
701 return PTR_ERR(ctx);
702
703 ret = rdma_resolve_addr(ctx->cm_id, src, dst, cmd.timeout_ms);
704 ucma_put_ctx(ctx);
705 return ret;
706}
707
708static ssize_t ucma_resolve_route(struct ucma_file *file,
709 const char __user *inbuf,
710 int in_len, int out_len)
711{
712 struct rdma_ucm_resolve_route cmd;
713 struct ucma_context *ctx;
714 int ret;
715
716 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
717 return -EFAULT;
718
719 ctx = ucma_get_ctx(file, cmd.id);
720 if (IS_ERR(ctx))
721 return PTR_ERR(ctx);
722
723 ret = rdma_resolve_route(ctx->cm_id, cmd.timeout_ms);
724 ucma_put_ctx(ctx);
725 return ret;
726}
727
728static void ucma_copy_ib_route(struct rdma_ucm_query_route_resp *resp,
729 struct rdma_route *route)
730{
731 struct rdma_dev_addr *dev_addr;
732
733 resp->num_paths = route->num_paths;
734 switch (route->num_paths) {
735 case 0:
736 dev_addr = &route->addr.dev_addr;
737 rdma_addr_get_dgid(dev_addr,
738 (union ib_gid *) &resp->ib_route[0].dgid);
739 rdma_addr_get_sgid(dev_addr,
740 (union ib_gid *) &resp->ib_route[0].sgid);
741 resp->ib_route[0].pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr));
742 break;
743 case 2:
744 ib_copy_path_rec_to_user(&resp->ib_route[1],
745 &route->path_rec[1]);
746 /* fall through */
747 case 1:
748 ib_copy_path_rec_to_user(&resp->ib_route[0],
749 &route->path_rec[0]);
750 break;
751 default:
752 break;
753 }
754}
755
756static void ucma_copy_iboe_route(struct rdma_ucm_query_route_resp *resp,
757 struct rdma_route *route)
758{
759
760 resp->num_paths = route->num_paths;
761 switch (route->num_paths) {
762 case 0:
763 rdma_ip2gid((struct sockaddr *)&route->addr.dst_addr,
764 (union ib_gid *)&resp->ib_route[0].dgid);
765 rdma_ip2gid((struct sockaddr *)&route->addr.src_addr,
766 (union ib_gid *)&resp->ib_route[0].sgid);
767 resp->ib_route[0].pkey = cpu_to_be16(0xffff);
768 break;
769 case 2:
770 ib_copy_path_rec_to_user(&resp->ib_route[1],
771 &route->path_rec[1]);
772 /* fall through */
773 case 1:
774 ib_copy_path_rec_to_user(&resp->ib_route[0],
775 &route->path_rec[0]);
776 break;
777 default:
778 break;
779 }
780}
781
782static void ucma_copy_iw_route(struct rdma_ucm_query_route_resp *resp,
783 struct rdma_route *route)
784{
785 struct rdma_dev_addr *dev_addr;
786
787 dev_addr = &route->addr.dev_addr;
788 rdma_addr_get_dgid(dev_addr, (union ib_gid *) &resp->ib_route[0].dgid);
789 rdma_addr_get_sgid(dev_addr, (union ib_gid *) &resp->ib_route[0].sgid);
790}
791
792static ssize_t ucma_query_route(struct ucma_file *file,
793 const char __user *inbuf,
794 int in_len, int out_len)
795{
796 struct rdma_ucm_query cmd;
797 struct rdma_ucm_query_route_resp resp;
798 struct ucma_context *ctx;
799 struct sockaddr *addr;
800 int ret = 0;
801
802 if (out_len < sizeof(resp))
803 return -ENOSPC;
804
805 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
806 return -EFAULT;
807
808 ctx = ucma_get_ctx(file, cmd.id);
809 if (IS_ERR(ctx))
810 return PTR_ERR(ctx);
811
812 memset(&resp, 0, sizeof resp);
813 addr = (struct sockaddr *) &ctx->cm_id->route.addr.src_addr;
814 memcpy(&resp.src_addr, addr, addr->sa_family == AF_INET ?
815 sizeof(struct sockaddr_in) :
816 sizeof(struct sockaddr_in6));
817 addr = (struct sockaddr *) &ctx->cm_id->route.addr.dst_addr;
818 memcpy(&resp.dst_addr, addr, addr->sa_family == AF_INET ?
819 sizeof(struct sockaddr_in) :
820 sizeof(struct sockaddr_in6));
821 if (!ctx->cm_id->device)
822 goto out;
823
824 resp.node_guid = (__force __u64) ctx->cm_id->device->node_guid;
825 resp.port_num = ctx->cm_id->port_num;
826
827 if (rdma_cap_ib_sa(ctx->cm_id->device, ctx->cm_id->port_num))
828 ucma_copy_ib_route(&resp, &ctx->cm_id->route);
829 else if (rdma_protocol_roce(ctx->cm_id->device, ctx->cm_id->port_num))
830 ucma_copy_iboe_route(&resp, &ctx->cm_id->route);
831 else if (rdma_protocol_iwarp(ctx->cm_id->device, ctx->cm_id->port_num))
832 ucma_copy_iw_route(&resp, &ctx->cm_id->route);
833
834out:
835 if (copy_to_user((void __user *)(unsigned long)cmd.response,
836 &resp, sizeof(resp)))
837 ret = -EFAULT;
838
839 ucma_put_ctx(ctx);
840 return ret;
841}
842
843static void ucma_query_device_addr(struct rdma_cm_id *cm_id,
844 struct rdma_ucm_query_addr_resp *resp)
845{
846 if (!cm_id->device)
847 return;
848
849 resp->node_guid = (__force __u64) cm_id->device->node_guid;
850 resp->port_num = cm_id->port_num;
851 resp->pkey = (__force __u16) cpu_to_be16(
852 ib_addr_get_pkey(&cm_id->route.addr.dev_addr));
853}
854
855static ssize_t ucma_query_addr(struct ucma_context *ctx,
856 void __user *response, int out_len)
857{
858 struct rdma_ucm_query_addr_resp resp;
859 struct sockaddr *addr;
860 int ret = 0;
861
862 if (out_len < sizeof(resp))
863 return -ENOSPC;
864
865 memset(&resp, 0, sizeof resp);
866
867 addr = (struct sockaddr *) &ctx->cm_id->route.addr.src_addr;
868 resp.src_size = rdma_addr_size(addr);
869 memcpy(&resp.src_addr, addr, resp.src_size);
870
871 addr = (struct sockaddr *) &ctx->cm_id->route.addr.dst_addr;
872 resp.dst_size = rdma_addr_size(addr);
873 memcpy(&resp.dst_addr, addr, resp.dst_size);
874
875 ucma_query_device_addr(ctx->cm_id, &resp);
876
877 if (copy_to_user(response, &resp, sizeof(resp)))
878 ret = -EFAULT;
879
880 return ret;
881}
882
883static ssize_t ucma_query_path(struct ucma_context *ctx,
884 void __user *response, int out_len)
885{
886 struct rdma_ucm_query_path_resp *resp;
887 int i, ret = 0;
888
889 if (out_len < sizeof(*resp))
890 return -ENOSPC;
891
892 resp = kzalloc(out_len, GFP_KERNEL);
893 if (!resp)
894 return -ENOMEM;
895
896 resp->num_paths = ctx->cm_id->route.num_paths;
897 for (i = 0, out_len -= sizeof(*resp);
898 i < resp->num_paths && out_len > sizeof(struct ib_path_rec_data);
899 i++, out_len -= sizeof(struct ib_path_rec_data)) {
900
901 resp->path_data[i].flags = IB_PATH_GMP | IB_PATH_PRIMARY |
902 IB_PATH_BIDIRECTIONAL;
903 ib_sa_pack_path(&ctx->cm_id->route.path_rec[i],
904 &resp->path_data[i].path_rec);
905 }
906
907 if (copy_to_user(response, resp,
908 sizeof(*resp) + (i * sizeof(struct ib_path_rec_data))))
909 ret = -EFAULT;
910
911 kfree(resp);
912 return ret;
913}
914
915static ssize_t ucma_query_gid(struct ucma_context *ctx,
916 void __user *response, int out_len)
917{
918 struct rdma_ucm_query_addr_resp resp;
919 struct sockaddr_ib *addr;
920 int ret = 0;
921
922 if (out_len < sizeof(resp))
923 return -ENOSPC;
924
925 memset(&resp, 0, sizeof resp);
926
927 ucma_query_device_addr(ctx->cm_id, &resp);
928
929 addr = (struct sockaddr_ib *) &resp.src_addr;
930 resp.src_size = sizeof(*addr);
931 if (ctx->cm_id->route.addr.src_addr.ss_family == AF_IB) {
932 memcpy(addr, &ctx->cm_id->route.addr.src_addr, resp.src_size);
933 } else {
934 addr->sib_family = AF_IB;
935 addr->sib_pkey = (__force __be16) resp.pkey;
936 rdma_addr_get_sgid(&ctx->cm_id->route.addr.dev_addr,
937 (union ib_gid *) &addr->sib_addr);
938 addr->sib_sid = rdma_get_service_id(ctx->cm_id, (struct sockaddr *)
939 &ctx->cm_id->route.addr.src_addr);
940 }
941
942 addr = (struct sockaddr_ib *) &resp.dst_addr;
943 resp.dst_size = sizeof(*addr);
944 if (ctx->cm_id->route.addr.dst_addr.ss_family == AF_IB) {
945 memcpy(addr, &ctx->cm_id->route.addr.dst_addr, resp.dst_size);
946 } else {
947 addr->sib_family = AF_IB;
948 addr->sib_pkey = (__force __be16) resp.pkey;
949 rdma_addr_get_dgid(&ctx->cm_id->route.addr.dev_addr,
950 (union ib_gid *) &addr->sib_addr);
951 addr->sib_sid = rdma_get_service_id(ctx->cm_id, (struct sockaddr *)
952 &ctx->cm_id->route.addr.dst_addr);
953 }
954
955 if (copy_to_user(response, &resp, sizeof(resp)))
956 ret = -EFAULT;
957
958 return ret;
959}
960
961static ssize_t ucma_query(struct ucma_file *file,
962 const char __user *inbuf,
963 int in_len, int out_len)
964{
965 struct rdma_ucm_query cmd;
966 struct ucma_context *ctx;
967 void __user *response;
968 int ret;
969
970 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
971 return -EFAULT;
972
973 response = (void __user *)(unsigned long) cmd.response;
974 ctx = ucma_get_ctx(file, cmd.id);
975 if (IS_ERR(ctx))
976 return PTR_ERR(ctx);
977
978 switch (cmd.option) {
979 case RDMA_USER_CM_QUERY_ADDR:
980 ret = ucma_query_addr(ctx, response, out_len);
981 break;
982 case RDMA_USER_CM_QUERY_PATH:
983 ret = ucma_query_path(ctx, response, out_len);
984 break;
985 case RDMA_USER_CM_QUERY_GID:
986 ret = ucma_query_gid(ctx, response, out_len);
987 break;
988 default:
989 ret = -ENOSYS;
990 break;
991 }
992
993 ucma_put_ctx(ctx);
994 return ret;
995}
996
997static void ucma_copy_conn_param(struct rdma_cm_id *id,
998 struct rdma_conn_param *dst,
999 struct rdma_ucm_conn_param *src)
1000{
1001 dst->private_data = src->private_data;
1002 dst->private_data_len = src->private_data_len;
1003 dst->responder_resources =src->responder_resources;
1004 dst->initiator_depth = src->initiator_depth;
1005 dst->flow_control = src->flow_control;
1006 dst->retry_count = src->retry_count;
1007 dst->rnr_retry_count = src->rnr_retry_count;
1008 dst->srq = src->srq;
1009 dst->qp_num = src->qp_num;
1010 dst->qkey = (id->route.addr.src_addr.ss_family == AF_IB) ? src->qkey : 0;
1011}
1012
1013static ssize_t ucma_connect(struct ucma_file *file, const char __user *inbuf,
1014 int in_len, int out_len)
1015{
1016 struct rdma_ucm_connect cmd;
1017 struct rdma_conn_param conn_param;
1018 struct ucma_context *ctx;
1019 int ret;
1020
1021 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1022 return -EFAULT;
1023
1024 if (!cmd.conn_param.valid)
1025 return -EINVAL;
1026
1027 ctx = ucma_get_ctx(file, cmd.id);
1028 if (IS_ERR(ctx))
1029 return PTR_ERR(ctx);
1030
1031 ucma_copy_conn_param(ctx->cm_id, &conn_param, &cmd.conn_param);
1032 ret = rdma_connect(ctx->cm_id, &conn_param);
1033 ucma_put_ctx(ctx);
1034 return ret;
1035}
1036
1037static ssize_t ucma_listen(struct ucma_file *file, const char __user *inbuf,
1038 int in_len, int out_len)
1039{
1040 struct rdma_ucm_listen cmd;
1041 struct ucma_context *ctx;
1042 int ret;
1043
1044 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1045 return -EFAULT;
1046
1047 ctx = ucma_get_ctx(file, cmd.id);
1048 if (IS_ERR(ctx))
1049 return PTR_ERR(ctx);
1050
1051 ctx->backlog = cmd.backlog > 0 && cmd.backlog < max_backlog ?
1052 cmd.backlog : max_backlog;
1053 ret = rdma_listen(ctx->cm_id, ctx->backlog);
1054 ucma_put_ctx(ctx);
1055 return ret;
1056}
1057
1058static ssize_t ucma_accept(struct ucma_file *file, const char __user *inbuf,
1059 int in_len, int out_len)
1060{
1061 struct rdma_ucm_accept cmd;
1062 struct rdma_conn_param conn_param;
1063 struct ucma_context *ctx;
1064 int ret;
1065
1066 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1067 return -EFAULT;
1068
1069 ctx = ucma_get_ctx(file, cmd.id);
1070 if (IS_ERR(ctx))
1071 return PTR_ERR(ctx);
1072
1073 if (cmd.conn_param.valid) {
1074 ucma_copy_conn_param(ctx->cm_id, &conn_param, &cmd.conn_param);
1075 mutex_lock(&file->mut);
1076 ret = rdma_accept(ctx->cm_id, &conn_param);
1077 if (!ret)
1078 ctx->uid = cmd.uid;
1079 mutex_unlock(&file->mut);
1080 } else
1081 ret = rdma_accept(ctx->cm_id, NULL);
1082
1083 ucma_put_ctx(ctx);
1084 return ret;
1085}
1086
1087static ssize_t ucma_reject(struct ucma_file *file, const char __user *inbuf,
1088 int in_len, int out_len)
1089{
1090 struct rdma_ucm_reject cmd;
1091 struct ucma_context *ctx;
1092 int ret;
1093
1094 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1095 return -EFAULT;
1096
1097 ctx = ucma_get_ctx(file, cmd.id);
1098 if (IS_ERR(ctx))
1099 return PTR_ERR(ctx);
1100
1101 ret = rdma_reject(ctx->cm_id, cmd.private_data, cmd.private_data_len);
1102 ucma_put_ctx(ctx);
1103 return ret;
1104}
1105
1106static ssize_t ucma_disconnect(struct ucma_file *file, const char __user *inbuf,
1107 int in_len, int out_len)
1108{
1109 struct rdma_ucm_disconnect cmd;
1110 struct ucma_context *ctx;
1111 int ret;
1112
1113 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1114 return -EFAULT;
1115
1116 ctx = ucma_get_ctx(file, cmd.id);
1117 if (IS_ERR(ctx))
1118 return PTR_ERR(ctx);
1119
1120 ret = rdma_disconnect(ctx->cm_id);
1121 ucma_put_ctx(ctx);
1122 return ret;
1123}
1124
1125static ssize_t ucma_init_qp_attr(struct ucma_file *file,
1126 const char __user *inbuf,
1127 int in_len, int out_len)
1128{
1129 struct rdma_ucm_init_qp_attr cmd;
1130 struct ib_uverbs_qp_attr resp;
1131 struct ucma_context *ctx;
1132 struct ib_qp_attr qp_attr;
1133 int ret;
1134
1135 if (out_len < sizeof(resp))
1136 return -ENOSPC;
1137
1138 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1139 return -EFAULT;
1140
1141 ctx = ucma_get_ctx(file, cmd.id);
1142 if (IS_ERR(ctx))
1143 return PTR_ERR(ctx);
1144
1145 resp.qp_attr_mask = 0;
1146 memset(&qp_attr, 0, sizeof qp_attr);
1147 qp_attr.qp_state = cmd.qp_state;
1148 ret = rdma_init_qp_attr(ctx->cm_id, &qp_attr, &resp.qp_attr_mask);
1149 if (ret)
1150 goto out;
1151
1152 ib_copy_qp_attr_to_user(&resp, &qp_attr);
1153 if (copy_to_user((void __user *)(unsigned long)cmd.response,
1154 &resp, sizeof(resp)))
1155 ret = -EFAULT;
1156
1157out:
1158 ucma_put_ctx(ctx);
1159 return ret;
1160}
1161
1162static int ucma_set_option_id(struct ucma_context *ctx, int optname,
1163 void *optval, size_t optlen)
1164{
1165 int ret = 0;
1166
1167 switch (optname) {
1168 case RDMA_OPTION_ID_TOS:
1169 if (optlen != sizeof(u8)) {
1170 ret = -EINVAL;
1171 break;
1172 }
1173 rdma_set_service_type(ctx->cm_id, *((u8 *) optval));
1174 break;
1175 case RDMA_OPTION_ID_REUSEADDR:
1176 if (optlen != sizeof(int)) {
1177 ret = -EINVAL;
1178 break;
1179 }
1180 ret = rdma_set_reuseaddr(ctx->cm_id, *((int *) optval) ? 1 : 0);
1181 break;
1182 case RDMA_OPTION_ID_AFONLY:
1183 if (optlen != sizeof(int)) {
1184 ret = -EINVAL;
1185 break;
1186 }
1187 ret = rdma_set_afonly(ctx->cm_id, *((int *) optval) ? 1 : 0);
1188 break;
1189 default:
1190 ret = -ENOSYS;
1191 }
1192
1193 return ret;
1194}
1195
1196static int ucma_set_ib_path(struct ucma_context *ctx,
1197 struct ib_path_rec_data *path_data, size_t optlen)
1198{
1199 struct ib_sa_path_rec sa_path;
1200 struct rdma_cm_event event;
1201 int ret;
1202
1203 if (optlen % sizeof(*path_data))
1204 return -EINVAL;
1205
1206 for (; optlen; optlen -= sizeof(*path_data), path_data++) {
1207 if (path_data->flags == (IB_PATH_GMP | IB_PATH_PRIMARY |
1208 IB_PATH_BIDIRECTIONAL))
1209 break;
1210 }
1211
1212 if (!optlen)
1213 return -EINVAL;
1214
1215 memset(&sa_path, 0, sizeof(sa_path));
1216
1217 ib_sa_unpack_path(path_data->path_rec, &sa_path);
1218 ret = rdma_set_ib_paths(ctx->cm_id, &sa_path, 1);
1219 if (ret)
1220 return ret;
1221
1222 memset(&event, 0, sizeof event);
1223 event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
1224 return ucma_event_handler(ctx->cm_id, &event);
1225}
1226
1227static int ucma_set_option_ib(struct ucma_context *ctx, int optname,
1228 void *optval, size_t optlen)
1229{
1230 int ret;
1231
1232 switch (optname) {
1233 case RDMA_OPTION_IB_PATH:
1234 ret = ucma_set_ib_path(ctx, optval, optlen);
1235 break;
1236 default:
1237 ret = -ENOSYS;
1238 }
1239
1240 return ret;
1241}
1242
1243static int ucma_set_option_level(struct ucma_context *ctx, int level,
1244 int optname, void *optval, size_t optlen)
1245{
1246 int ret;
1247
1248 switch (level) {
1249 case RDMA_OPTION_ID:
1250 ret = ucma_set_option_id(ctx, optname, optval, optlen);
1251 break;
1252 case RDMA_OPTION_IB:
1253 ret = ucma_set_option_ib(ctx, optname, optval, optlen);
1254 break;
1255 default:
1256 ret = -ENOSYS;
1257 }
1258
1259 return ret;
1260}
1261
1262static ssize_t ucma_set_option(struct ucma_file *file, const char __user *inbuf,
1263 int in_len, int out_len)
1264{
1265 struct rdma_ucm_set_option cmd;
1266 struct ucma_context *ctx;
1267 void *optval;
1268 int ret;
1269
1270 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1271 return -EFAULT;
1272
1273 ctx = ucma_get_ctx(file, cmd.id);
1274 if (IS_ERR(ctx))
1275 return PTR_ERR(ctx);
1276
1277 optval = memdup_user((void __user *) (unsigned long) cmd.optval,
1278 cmd.optlen);
1279 if (IS_ERR(optval)) {
1280 ret = PTR_ERR(optval);
1281 goto out;
1282 }
1283
1284 ret = ucma_set_option_level(ctx, cmd.level, cmd.optname, optval,
1285 cmd.optlen);
1286 kfree(optval);
1287
1288out:
1289 ucma_put_ctx(ctx);
1290 return ret;
1291}
1292
1293static ssize_t ucma_notify(struct ucma_file *file, const char __user *inbuf,
1294 int in_len, int out_len)
1295{
1296 struct rdma_ucm_notify cmd;
1297 struct ucma_context *ctx;
1298 int ret;
1299
1300 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1301 return -EFAULT;
1302
1303 ctx = ucma_get_ctx(file, cmd.id);
1304 if (IS_ERR(ctx))
1305 return PTR_ERR(ctx);
1306
1307 ret = rdma_notify(ctx->cm_id, (enum ib_event_type) cmd.event);
1308 ucma_put_ctx(ctx);
1309 return ret;
1310}
1311
1312static ssize_t ucma_process_join(struct ucma_file *file,
1313 struct rdma_ucm_join_mcast *cmd, int out_len)
1314{
1315 struct rdma_ucm_create_id_resp resp;
1316 struct ucma_context *ctx;
1317 struct ucma_multicast *mc;
1318 struct sockaddr *addr;
1319 int ret;
1320
1321 if (out_len < sizeof(resp))
1322 return -ENOSPC;
1323
1324 addr = (struct sockaddr *) &cmd->addr;
1325 if (cmd->reserved || !cmd->addr_size || (cmd->addr_size != rdma_addr_size(addr)))
1326 return -EINVAL;
1327
1328 ctx = ucma_get_ctx(file, cmd->id);
1329 if (IS_ERR(ctx))
1330 return PTR_ERR(ctx);
1331
1332 mutex_lock(&file->mut);
1333 mc = ucma_alloc_multicast(ctx);
1334 if (!mc) {
1335 ret = -ENOMEM;
1336 goto err1;
1337 }
1338
1339 mc->uid = cmd->uid;
1340 memcpy(&mc->addr, addr, cmd->addr_size);
1341 ret = rdma_join_multicast(ctx->cm_id, (struct sockaddr *) &mc->addr, mc);
1342 if (ret)
1343 goto err2;
1344
1345 resp.id = mc->id;
1346 if (copy_to_user((void __user *)(unsigned long) cmd->response,
1347 &resp, sizeof(resp))) {
1348 ret = -EFAULT;
1349 goto err3;
1350 }
1351
1352 mutex_unlock(&file->mut);
1353 ucma_put_ctx(ctx);
1354 return 0;
1355
1356err3:
1357 rdma_leave_multicast(ctx->cm_id, (struct sockaddr *) &mc->addr);
1358 ucma_cleanup_mc_events(mc);
1359err2:
1360 mutex_lock(&mut);
1361 idr_remove(&multicast_idr, mc->id);
1362 mutex_unlock(&mut);
1363 list_del(&mc->list);
1364 kfree(mc);
1365err1:
1366 mutex_unlock(&file->mut);
1367 ucma_put_ctx(ctx);
1368 return ret;
1369}
1370
1371static ssize_t ucma_join_ip_multicast(struct ucma_file *file,
1372 const char __user *inbuf,
1373 int in_len, int out_len)
1374{
1375 struct rdma_ucm_join_ip_mcast cmd;
1376 struct rdma_ucm_join_mcast join_cmd;
1377
1378 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1379 return -EFAULT;
1380
1381 join_cmd.response = cmd.response;
1382 join_cmd.uid = cmd.uid;
1383 join_cmd.id = cmd.id;
1384 join_cmd.addr_size = rdma_addr_size((struct sockaddr *) &cmd.addr);
1385 join_cmd.reserved = 0;
1386 memcpy(&join_cmd.addr, &cmd.addr, join_cmd.addr_size);
1387
1388 return ucma_process_join(file, &join_cmd, out_len);
1389}
1390
1391static ssize_t ucma_join_multicast(struct ucma_file *file,
1392 const char __user *inbuf,
1393 int in_len, int out_len)
1394{
1395 struct rdma_ucm_join_mcast cmd;
1396
1397 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1398 return -EFAULT;
1399
1400 return ucma_process_join(file, &cmd, out_len);
1401}
1402
1403static ssize_t ucma_leave_multicast(struct ucma_file *file,
1404 const char __user *inbuf,
1405 int in_len, int out_len)
1406{
1407 struct rdma_ucm_destroy_id cmd;
1408 struct rdma_ucm_destroy_id_resp resp;
1409 struct ucma_multicast *mc;
1410 int ret = 0;
1411
1412 if (out_len < sizeof(resp))
1413 return -ENOSPC;
1414
1415 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1416 return -EFAULT;
1417
1418 mutex_lock(&mut);
1419 mc = idr_find(&multicast_idr, cmd.id);
1420 if (!mc)
1421 mc = ERR_PTR(-ENOENT);
1422 else if (mc->ctx->file != file)
1423 mc = ERR_PTR(-EINVAL);
1424 else if (!atomic_inc_not_zero(&mc->ctx->ref))
1425 mc = ERR_PTR(-ENXIO);
1426 else
1427 idr_remove(&multicast_idr, mc->id);
1428 mutex_unlock(&mut);
1429
1430 if (IS_ERR(mc)) {
1431 ret = PTR_ERR(mc);
1432 goto out;
1433 }
1434
1435 rdma_leave_multicast(mc->ctx->cm_id, (struct sockaddr *) &mc->addr);
1436 mutex_lock(&mc->ctx->file->mut);
1437 ucma_cleanup_mc_events(mc);
1438 list_del(&mc->list);
1439 mutex_unlock(&mc->ctx->file->mut);
1440
1441 ucma_put_ctx(mc->ctx);
1442 resp.events_reported = mc->events_reported;
1443 kfree(mc);
1444
1445 if (copy_to_user((void __user *)(unsigned long)cmd.response,
1446 &resp, sizeof(resp)))
1447 ret = -EFAULT;
1448out:
1449 return ret;
1450}
1451
1452static void ucma_lock_files(struct ucma_file *file1, struct ucma_file *file2)
1453{
1454 /* Acquire mutex's based on pointer comparison to prevent deadlock. */
1455 if (file1 < file2) {
1456 mutex_lock(&file1->mut);
1457 mutex_lock_nested(&file2->mut, SINGLE_DEPTH_NESTING);
1458 } else {
1459 mutex_lock(&file2->mut);
1460 mutex_lock_nested(&file1->mut, SINGLE_DEPTH_NESTING);
1461 }
1462}
1463
1464static void ucma_unlock_files(struct ucma_file *file1, struct ucma_file *file2)
1465{
1466 if (file1 < file2) {
1467 mutex_unlock(&file2->mut);
1468 mutex_unlock(&file1->mut);
1469 } else {
1470 mutex_unlock(&file1->mut);
1471 mutex_unlock(&file2->mut);
1472 }
1473}
1474
1475static void ucma_move_events(struct ucma_context *ctx, struct ucma_file *file)
1476{
1477 struct ucma_event *uevent, *tmp;
1478
1479 list_for_each_entry_safe(uevent, tmp, &ctx->file->event_list, list)
1480 if (uevent->ctx == ctx)
1481 list_move_tail(&uevent->list, &file->event_list);
1482}
1483
1484static ssize_t ucma_migrate_id(struct ucma_file *new_file,
1485 const char __user *inbuf,
1486 int in_len, int out_len)
1487{
1488 struct rdma_ucm_migrate_id cmd;
1489 struct rdma_ucm_migrate_resp resp;
1490 struct ucma_context *ctx;
1491 struct fd f;
1492 struct ucma_file *cur_file;
1493 int ret = 0;
1494
1495 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1496 return -EFAULT;
1497
1498 /* Get current fd to protect against it being closed */
1499 f = fdget(cmd.fd);
1500 if (!f.file)
1501 return -ENOENT;
1502
1503 /* Validate current fd and prevent destruction of id. */
1504 ctx = ucma_get_ctx(f.file->private_data, cmd.id);
1505 if (IS_ERR(ctx)) {
1506 ret = PTR_ERR(ctx);
1507 goto file_put;
1508 }
1509
1510 cur_file = ctx->file;
1511 if (cur_file == new_file) {
1512 resp.events_reported = ctx->events_reported;
1513 goto response;
1514 }
1515
1516 /*
1517 * Migrate events between fd's, maintaining order, and avoiding new
1518 * events being added before existing events.
1519 */
1520 ucma_lock_files(cur_file, new_file);
1521 mutex_lock(&mut);
1522
1523 list_move_tail(&ctx->list, &new_file->ctx_list);
1524 ucma_move_events(ctx, new_file);
1525 ctx->file = new_file;
1526 resp.events_reported = ctx->events_reported;
1527
1528 mutex_unlock(&mut);
1529 ucma_unlock_files(cur_file, new_file);
1530
1531response:
1532 if (copy_to_user((void __user *)(unsigned long)cmd.response,
1533 &resp, sizeof(resp)))
1534 ret = -EFAULT;
1535
1536 ucma_put_ctx(ctx);
1537file_put:
1538 fdput(f);
1539 return ret;
1540}
1541
1542static ssize_t (*ucma_cmd_table[])(struct ucma_file *file,
1543 const char __user *inbuf,
1544 int in_len, int out_len) = {
1545 [RDMA_USER_CM_CMD_CREATE_ID] = ucma_create_id,
1546 [RDMA_USER_CM_CMD_DESTROY_ID] = ucma_destroy_id,
1547 [RDMA_USER_CM_CMD_BIND_IP] = ucma_bind_ip,
1548 [RDMA_USER_CM_CMD_RESOLVE_IP] = ucma_resolve_ip,
1549 [RDMA_USER_CM_CMD_RESOLVE_ROUTE] = ucma_resolve_route,
1550 [RDMA_USER_CM_CMD_QUERY_ROUTE] = ucma_query_route,
1551 [RDMA_USER_CM_CMD_CONNECT] = ucma_connect,
1552 [RDMA_USER_CM_CMD_LISTEN] = ucma_listen,
1553 [RDMA_USER_CM_CMD_ACCEPT] = ucma_accept,
1554 [RDMA_USER_CM_CMD_REJECT] = ucma_reject,
1555 [RDMA_USER_CM_CMD_DISCONNECT] = ucma_disconnect,
1556 [RDMA_USER_CM_CMD_INIT_QP_ATTR] = ucma_init_qp_attr,
1557 [RDMA_USER_CM_CMD_GET_EVENT] = ucma_get_event,
1558 [RDMA_USER_CM_CMD_GET_OPTION] = NULL,
1559 [RDMA_USER_CM_CMD_SET_OPTION] = ucma_set_option,
1560 [RDMA_USER_CM_CMD_NOTIFY] = ucma_notify,
1561 [RDMA_USER_CM_CMD_JOIN_IP_MCAST] = ucma_join_ip_multicast,
1562 [RDMA_USER_CM_CMD_LEAVE_MCAST] = ucma_leave_multicast,
1563 [RDMA_USER_CM_CMD_MIGRATE_ID] = ucma_migrate_id,
1564 [RDMA_USER_CM_CMD_QUERY] = ucma_query,
1565 [RDMA_USER_CM_CMD_BIND] = ucma_bind,
1566 [RDMA_USER_CM_CMD_RESOLVE_ADDR] = ucma_resolve_addr,
1567 [RDMA_USER_CM_CMD_JOIN_MCAST] = ucma_join_multicast
1568};
1569
1570static ssize_t ucma_write(struct file *filp, const char __user *buf,
1571 size_t len, loff_t *pos)
1572{
1573 struct ucma_file *file = filp->private_data;
1574 struct rdma_ucm_cmd_hdr hdr;
1575 ssize_t ret;
1576
1577 if (WARN_ON_ONCE(!ib_safe_file_access(filp)))
1578 return -EACCES;
1579
1580 if (len < sizeof(hdr))
1581 return -EINVAL;
1582
1583 if (copy_from_user(&hdr, buf, sizeof(hdr)))
1584 return -EFAULT;
1585
1586 if (hdr.cmd >= ARRAY_SIZE(ucma_cmd_table))
1587 return -EINVAL;
1588
1589 if (hdr.in + sizeof(hdr) > len)
1590 return -EINVAL;
1591
1592 if (!ucma_cmd_table[hdr.cmd])
1593 return -ENOSYS;
1594
1595 ret = ucma_cmd_table[hdr.cmd](file, buf + sizeof(hdr), hdr.in, hdr.out);
1596 if (!ret)
1597 ret = len;
1598
1599 return ret;
1600}
1601
1602static unsigned int ucma_poll(struct file *filp, struct poll_table_struct *wait)
1603{
1604 struct ucma_file *file = filp->private_data;
1605 unsigned int mask = 0;
1606
1607 poll_wait(filp, &file->poll_wait, wait);
1608
1609 if (!list_empty(&file->event_list))
1610 mask = POLLIN | POLLRDNORM;
1611
1612 return mask;
1613}
1614
1615/*
1616 * ucma_open() does not need the BKL:
1617 *
1618 * - no global state is referred to;
1619 * - there is no ioctl method to race against;
1620 * - no further module initialization is required for open to work
1621 * after the device is registered.
1622 */
1623static int ucma_open(struct inode *inode, struct file *filp)
1624{
1625 struct ucma_file *file;
1626
1627 file = kmalloc(sizeof *file, GFP_KERNEL);
1628 if (!file)
1629 return -ENOMEM;
1630
1631 file->close_wq = create_singlethread_workqueue("ucma_close_id");
1632 if (!file->close_wq) {
1633 kfree(file);
1634 return -ENOMEM;
1635 }
1636
1637 INIT_LIST_HEAD(&file->event_list);
1638 INIT_LIST_HEAD(&file->ctx_list);
1639 init_waitqueue_head(&file->poll_wait);
1640 mutex_init(&file->mut);
1641
1642 filp->private_data = file;
1643 file->filp = filp;
1644
1645 return nonseekable_open(inode, filp);
1646}
1647
1648static int ucma_close(struct inode *inode, struct file *filp)
1649{
1650 struct ucma_file *file = filp->private_data;
1651 struct ucma_context *ctx, *tmp;
1652
1653 mutex_lock(&file->mut);
1654 list_for_each_entry_safe(ctx, tmp, &file->ctx_list, list) {
1655 ctx->destroying = 1;
1656 mutex_unlock(&file->mut);
1657
1658 mutex_lock(&mut);
1659 idr_remove(&ctx_idr, ctx->id);
1660 mutex_unlock(&mut);
1661
1662 flush_workqueue(file->close_wq);
1663 /* At that step once ctx was marked as destroying and workqueue
1664 * was flushed we are safe from any inflights handlers that
1665 * might put other closing task.
1666 */
1667 mutex_lock(&mut);
1668 if (!ctx->closing) {
1669 mutex_unlock(&mut);
1670 /* rdma_destroy_id ensures that no event handlers are
1671 * inflight for that id before releasing it.
1672 */
1673 rdma_destroy_id(ctx->cm_id);
1674 } else {
1675 mutex_unlock(&mut);
1676 }
1677
1678 ucma_free_ctx(ctx);
1679 mutex_lock(&file->mut);
1680 }
1681 mutex_unlock(&file->mut);
1682 destroy_workqueue(file->close_wq);
1683 kfree(file);
1684 return 0;
1685}
1686
1687static const struct file_operations ucma_fops = {
1688 .owner = THIS_MODULE,
1689 .open = ucma_open,
1690 .release = ucma_close,
1691 .write = ucma_write,
1692 .poll = ucma_poll,
1693 .llseek = no_llseek,
1694};
1695
1696static struct miscdevice ucma_misc = {
1697 .minor = MISC_DYNAMIC_MINOR,
1698 .name = "rdma_cm",
1699 .nodename = "infiniband/rdma_cm",
1700 .mode = 0666,
1701 .fops = &ucma_fops,
1702};
1703
1704static ssize_t show_abi_version(struct device *dev,
1705 struct device_attribute *attr,
1706 char *buf)
1707{
1708 return sprintf(buf, "%d\n", RDMA_USER_CM_ABI_VERSION);
1709}
1710static DEVICE_ATTR(abi_version, S_IRUGO, show_abi_version, NULL);
1711
1712static int __init ucma_init(void)
1713{
1714 int ret;
1715
1716 ret = misc_register(&ucma_misc);
1717 if (ret)
1718 return ret;
1719
1720 ret = device_create_file(ucma_misc.this_device, &dev_attr_abi_version);
1721 if (ret) {
1722 pr_err("rdma_ucm: couldn't create abi_version attr\n");
1723 goto err1;
1724 }
1725
1726 ucma_ctl_table_hdr = register_net_sysctl(&init_net, "net/rdma_ucm", ucma_ctl_table);
1727 if (!ucma_ctl_table_hdr) {
1728 pr_err("rdma_ucm: couldn't register sysctl paths\n");
1729 ret = -ENOMEM;
1730 goto err2;
1731 }
1732 return 0;
1733err2:
1734 device_remove_file(ucma_misc.this_device, &dev_attr_abi_version);
1735err1:
1736 misc_deregister(&ucma_misc);
1737 return ret;
1738}
1739
1740static void __exit ucma_cleanup(void)
1741{
1742 unregister_net_sysctl_table(ucma_ctl_table_hdr);
1743 device_remove_file(ucma_misc.this_device, &dev_attr_abi_version);
1744 misc_deregister(&ucma_misc);
1745 idr_destroy(&ctx_idr);
1746 idr_destroy(&multicast_idr);
1747}
1748
1749module_init(ucma_init);
1750module_exit(ucma_cleanup);