Linux Audio

Check our new training course

Loading...
v3.1
   1/*
   2 * Copyright (c) 2005-2006 Intel Corporation.  All rights reserved.
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the
   8 * OpenIB.org BSD license below:
   9 *
  10 *     Redistribution and use in source and binary forms, with or
  11 *     without modification, are permitted provided that the following
  12 *     conditions are met:
  13 *
  14 *      - Redistributions of source code must retain the above
  15 *	copyright notice, this list of conditions and the following
  16 *	disclaimer.
  17 *
  18 *      - Redistributions in binary form must reproduce the above
  19 *	copyright notice, this list of conditions and the following
  20 *	disclaimer in the documentation and/or other materials
  21 *	provided with the distribution.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30 * SOFTWARE.
  31 */
  32
  33#include <linux/completion.h>
  34#include <linux/file.h>
  35#include <linux/mutex.h>
  36#include <linux/poll.h>
  37#include <linux/sched.h>
  38#include <linux/idr.h>
  39#include <linux/in.h>
  40#include <linux/in6.h>
  41#include <linux/miscdevice.h>
  42#include <linux/slab.h>
  43#include <linux/sysctl.h>
 
  44
  45#include <rdma/rdma_user_cm.h>
  46#include <rdma/ib_marshall.h>
  47#include <rdma/rdma_cm.h>
  48#include <rdma/rdma_cm_ib.h>
 
 
  49
  50MODULE_AUTHOR("Sean Hefty");
  51MODULE_DESCRIPTION("RDMA Userspace Connection Manager Access");
  52MODULE_LICENSE("Dual BSD/GPL");
  53
  54static unsigned int max_backlog = 1024;
  55
  56static struct ctl_table_header *ucma_ctl_table_hdr;
  57static ctl_table ucma_ctl_table[] = {
  58	{
  59		.procname	= "max_backlog",
  60		.data		= &max_backlog,
  61		.maxlen		= sizeof max_backlog,
  62		.mode		= 0644,
  63		.proc_handler	= proc_dointvec,
  64	},
  65	{ }
  66};
  67
  68static struct ctl_path ucma_ctl_path[] = {
  69	{ .procname = "net" },
  70	{ .procname = "rdma_ucm" },
  71	{ }
  72};
  73
  74struct ucma_file {
  75	struct mutex		mut;
  76	struct file		*filp;
  77	struct list_head	ctx_list;
  78	struct list_head	event_list;
  79	wait_queue_head_t	poll_wait;
  80};
  81
  82struct ucma_context {
  83	int			id;
  84	struct completion	comp;
  85	atomic_t		ref;
  86	int			events_reported;
  87	int			backlog;
  88
  89	struct ucma_file	*file;
  90	struct rdma_cm_id	*cm_id;
  91	u64			uid;
  92
  93	struct list_head	list;
  94	struct list_head	mc_list;
  95};
  96
  97struct ucma_multicast {
  98	struct ucma_context	*ctx;
  99	int			id;
 100	int			events_reported;
 101
 102	u64			uid;
 103	struct list_head	list;
 104	struct sockaddr_storage	addr;
 105};
 106
 107struct ucma_event {
 108	struct ucma_context	*ctx;
 109	struct ucma_multicast	*mc;
 110	struct list_head	list;
 111	struct rdma_cm_id	*cm_id;
 112	struct rdma_ucm_event_resp resp;
 113};
 114
 115static DEFINE_MUTEX(mut);
 116static DEFINE_IDR(ctx_idr);
 117static DEFINE_IDR(multicast_idr);
 118
 119static inline struct ucma_context *_ucma_find_context(int id,
 120						      struct ucma_file *file)
 121{
 122	struct ucma_context *ctx;
 123
 124	ctx = idr_find(&ctx_idr, id);
 125	if (!ctx)
 126		ctx = ERR_PTR(-ENOENT);
 127	else if (ctx->file != file)
 128		ctx = ERR_PTR(-EINVAL);
 129	return ctx;
 130}
 131
 132static struct ucma_context *ucma_get_ctx(struct ucma_file *file, int id)
 133{
 134	struct ucma_context *ctx;
 135
 136	mutex_lock(&mut);
 137	ctx = _ucma_find_context(id, file);
 138	if (!IS_ERR(ctx))
 139		atomic_inc(&ctx->ref);
 140	mutex_unlock(&mut);
 141	return ctx;
 142}
 143
 144static void ucma_put_ctx(struct ucma_context *ctx)
 145{
 146	if (atomic_dec_and_test(&ctx->ref))
 147		complete(&ctx->comp);
 148}
 149
 150static struct ucma_context *ucma_alloc_ctx(struct ucma_file *file)
 151{
 152	struct ucma_context *ctx;
 153	int ret;
 154
 155	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
 156	if (!ctx)
 157		return NULL;
 158
 159	atomic_set(&ctx->ref, 1);
 160	init_completion(&ctx->comp);
 161	INIT_LIST_HEAD(&ctx->mc_list);
 162	ctx->file = file;
 163
 164	do {
 165		ret = idr_pre_get(&ctx_idr, GFP_KERNEL);
 166		if (!ret)
 167			goto error;
 168
 169		mutex_lock(&mut);
 170		ret = idr_get_new(&ctx_idr, ctx, &ctx->id);
 171		mutex_unlock(&mut);
 172	} while (ret == -EAGAIN);
 173
 174	if (ret)
 175		goto error;
 176
 177	list_add_tail(&ctx->list, &file->ctx_list);
 178	return ctx;
 179
 180error:
 181	kfree(ctx);
 182	return NULL;
 183}
 184
 185static struct ucma_multicast* ucma_alloc_multicast(struct ucma_context *ctx)
 186{
 187	struct ucma_multicast *mc;
 188	int ret;
 189
 190	mc = kzalloc(sizeof(*mc), GFP_KERNEL);
 191	if (!mc)
 192		return NULL;
 193
 194	do {
 195		ret = idr_pre_get(&multicast_idr, GFP_KERNEL);
 196		if (!ret)
 197			goto error;
 198
 199		mutex_lock(&mut);
 200		ret = idr_get_new(&multicast_idr, mc, &mc->id);
 201		mutex_unlock(&mut);
 202	} while (ret == -EAGAIN);
 203
 204	if (ret)
 205		goto error;
 206
 207	mc->ctx = ctx;
 208	list_add_tail(&mc->list, &ctx->mc_list);
 209	return mc;
 210
 211error:
 212	kfree(mc);
 213	return NULL;
 214}
 215
 216static void ucma_copy_conn_event(struct rdma_ucm_conn_param *dst,
 217				 struct rdma_conn_param *src)
 218{
 219	if (src->private_data_len)
 220		memcpy(dst->private_data, src->private_data,
 221		       src->private_data_len);
 222	dst->private_data_len = src->private_data_len;
 223	dst->responder_resources =src->responder_resources;
 224	dst->initiator_depth = src->initiator_depth;
 225	dst->flow_control = src->flow_control;
 226	dst->retry_count = src->retry_count;
 227	dst->rnr_retry_count = src->rnr_retry_count;
 228	dst->srq = src->srq;
 229	dst->qp_num = src->qp_num;
 230}
 231
 232static void ucma_copy_ud_event(struct rdma_ucm_ud_param *dst,
 233			       struct rdma_ud_param *src)
 234{
 235	if (src->private_data_len)
 236		memcpy(dst->private_data, src->private_data,
 237		       src->private_data_len);
 238	dst->private_data_len = src->private_data_len;
 239	ib_copy_ah_attr_to_user(&dst->ah_attr, &src->ah_attr);
 240	dst->qp_num = src->qp_num;
 241	dst->qkey = src->qkey;
 242}
 243
 244static void ucma_set_event_context(struct ucma_context *ctx,
 245				   struct rdma_cm_event *event,
 246				   struct ucma_event *uevent)
 247{
 248	uevent->ctx = ctx;
 249	switch (event->event) {
 250	case RDMA_CM_EVENT_MULTICAST_JOIN:
 251	case RDMA_CM_EVENT_MULTICAST_ERROR:
 252		uevent->mc = (struct ucma_multicast *)
 253			     event->param.ud.private_data;
 254		uevent->resp.uid = uevent->mc->uid;
 255		uevent->resp.id = uevent->mc->id;
 256		break;
 257	default:
 258		uevent->resp.uid = ctx->uid;
 259		uevent->resp.id = ctx->id;
 260		break;
 261	}
 262}
 263
 264static int ucma_event_handler(struct rdma_cm_id *cm_id,
 265			      struct rdma_cm_event *event)
 266{
 267	struct ucma_event *uevent;
 268	struct ucma_context *ctx = cm_id->context;
 269	int ret = 0;
 270
 271	uevent = kzalloc(sizeof(*uevent), GFP_KERNEL);
 272	if (!uevent)
 273		return event->event == RDMA_CM_EVENT_CONNECT_REQUEST;
 274
 
 275	uevent->cm_id = cm_id;
 276	ucma_set_event_context(ctx, event, uevent);
 277	uevent->resp.event = event->event;
 278	uevent->resp.status = event->status;
 279	if (cm_id->ps == RDMA_PS_UDP || cm_id->ps == RDMA_PS_IPOIB)
 280		ucma_copy_ud_event(&uevent->resp.param.ud, &event->param.ud);
 281	else
 282		ucma_copy_conn_event(&uevent->resp.param.conn,
 283				     &event->param.conn);
 284
 285	mutex_lock(&ctx->file->mut);
 286	if (event->event == RDMA_CM_EVENT_CONNECT_REQUEST) {
 287		if (!ctx->backlog) {
 288			ret = -ENOMEM;
 289			kfree(uevent);
 290			goto out;
 291		}
 292		ctx->backlog--;
 293	} else if (!ctx->uid) {
 294		/*
 295		 * We ignore events for new connections until userspace has set
 296		 * their context.  This can only happen if an error occurs on a
 297		 * new connection before the user accepts it.  This is okay,
 298		 * since the accept will just fail later.
 299		 */
 300		kfree(uevent);
 301		goto out;
 302	}
 303
 304	list_add_tail(&uevent->list, &ctx->file->event_list);
 305	wake_up_interruptible(&ctx->file->poll_wait);
 306out:
 307	mutex_unlock(&ctx->file->mut);
 308	return ret;
 309}
 310
 311static ssize_t ucma_get_event(struct ucma_file *file, const char __user *inbuf,
 312			      int in_len, int out_len)
 313{
 314	struct ucma_context *ctx;
 315	struct rdma_ucm_get_event cmd;
 316	struct ucma_event *uevent;
 317	int ret = 0;
 318	DEFINE_WAIT(wait);
 319
 320	if (out_len < sizeof uevent->resp)
 321		return -ENOSPC;
 322
 323	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
 324		return -EFAULT;
 325
 326	mutex_lock(&file->mut);
 327	while (list_empty(&file->event_list)) {
 328		mutex_unlock(&file->mut);
 329
 330		if (file->filp->f_flags & O_NONBLOCK)
 331			return -EAGAIN;
 332
 333		if (wait_event_interruptible(file->poll_wait,
 334					     !list_empty(&file->event_list)))
 335			return -ERESTARTSYS;
 336
 337		mutex_lock(&file->mut);
 338	}
 339
 340	uevent = list_entry(file->event_list.next, struct ucma_event, list);
 341
 342	if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST) {
 343		ctx = ucma_alloc_ctx(file);
 344		if (!ctx) {
 345			ret = -ENOMEM;
 346			goto done;
 347		}
 348		uevent->ctx->backlog++;
 349		ctx->cm_id = uevent->cm_id;
 350		ctx->cm_id->context = ctx;
 351		uevent->resp.id = ctx->id;
 352	}
 353
 354	if (copy_to_user((void __user *)(unsigned long)cmd.response,
 355			 &uevent->resp, sizeof uevent->resp)) {
 356		ret = -EFAULT;
 357		goto done;
 358	}
 359
 360	list_del(&uevent->list);
 361	uevent->ctx->events_reported++;
 362	if (uevent->mc)
 363		uevent->mc->events_reported++;
 364	kfree(uevent);
 365done:
 366	mutex_unlock(&file->mut);
 367	return ret;
 368}
 369
 370static int ucma_get_qp_type(struct rdma_ucm_create_id *cmd, enum ib_qp_type *qp_type)
 371{
 372	switch (cmd->ps) {
 373	case RDMA_PS_TCP:
 374		*qp_type = IB_QPT_RC;
 375		return 0;
 376	case RDMA_PS_UDP:
 377	case RDMA_PS_IPOIB:
 378		*qp_type = IB_QPT_UD;
 379		return 0;
 
 
 
 380	default:
 381		return -EINVAL;
 382	}
 383}
 384
 385static ssize_t ucma_create_id(struct ucma_file *file, const char __user *inbuf,
 386			      int in_len, int out_len)
 387{
 388	struct rdma_ucm_create_id cmd;
 389	struct rdma_ucm_create_id_resp resp;
 390	struct ucma_context *ctx;
 391	enum ib_qp_type qp_type;
 392	int ret;
 393
 394	if (out_len < sizeof(resp))
 395		return -ENOSPC;
 396
 397	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
 398		return -EFAULT;
 399
 400	ret = ucma_get_qp_type(&cmd, &qp_type);
 401	if (ret)
 402		return ret;
 403
 404	mutex_lock(&file->mut);
 405	ctx = ucma_alloc_ctx(file);
 406	mutex_unlock(&file->mut);
 407	if (!ctx)
 408		return -ENOMEM;
 409
 410	ctx->uid = cmd.uid;
 411	ctx->cm_id = rdma_create_id(ucma_event_handler, ctx, cmd.ps, qp_type);
 412	if (IS_ERR(ctx->cm_id)) {
 413		ret = PTR_ERR(ctx->cm_id);
 414		goto err1;
 415	}
 416
 417	resp.id = ctx->id;
 418	if (copy_to_user((void __user *)(unsigned long)cmd.response,
 419			 &resp, sizeof(resp))) {
 420		ret = -EFAULT;
 421		goto err2;
 422	}
 423	return 0;
 424
 425err2:
 426	rdma_destroy_id(ctx->cm_id);
 427err1:
 428	mutex_lock(&mut);
 429	idr_remove(&ctx_idr, ctx->id);
 430	mutex_unlock(&mut);
 431	kfree(ctx);
 432	return ret;
 433}
 434
 435static void ucma_cleanup_multicast(struct ucma_context *ctx)
 436{
 437	struct ucma_multicast *mc, *tmp;
 438
 439	mutex_lock(&mut);
 440	list_for_each_entry_safe(mc, tmp, &ctx->mc_list, list) {
 441		list_del(&mc->list);
 442		idr_remove(&multicast_idr, mc->id);
 443		kfree(mc);
 444	}
 445	mutex_unlock(&mut);
 446}
 447
 448static void ucma_cleanup_events(struct ucma_context *ctx)
 449{
 450	struct ucma_event *uevent, *tmp;
 451
 452	list_for_each_entry_safe(uevent, tmp, &ctx->file->event_list, list) {
 453		if (uevent->ctx != ctx)
 454			continue;
 455
 456		list_del(&uevent->list);
 457
 458		/* clear incoming connections. */
 459		if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST)
 460			rdma_destroy_id(uevent->cm_id);
 461
 462		kfree(uevent);
 463	}
 464}
 465
 466static void ucma_cleanup_mc_events(struct ucma_multicast *mc)
 467{
 468	struct ucma_event *uevent, *tmp;
 469
 470	list_for_each_entry_safe(uevent, tmp, &mc->ctx->file->event_list, list) {
 471		if (uevent->mc != mc)
 472			continue;
 473
 474		list_del(&uevent->list);
 475		kfree(uevent);
 476	}
 477}
 478
 
 
 
 
 
 479static int ucma_free_ctx(struct ucma_context *ctx)
 480{
 481	int events_reported;
 
 
 482
 483	/* No new events will be generated after destroying the id. */
 484	rdma_destroy_id(ctx->cm_id);
 485
 486	ucma_cleanup_multicast(ctx);
 487
 488	/* Cleanup events not yet reported to the user. */
 489	mutex_lock(&ctx->file->mut);
 490	ucma_cleanup_events(ctx);
 
 
 
 491	list_del(&ctx->list);
 492	mutex_unlock(&ctx->file->mut);
 493
 
 
 
 
 
 
 
 494	events_reported = ctx->events_reported;
 495	kfree(ctx);
 496	return events_reported;
 497}
 498
 499static ssize_t ucma_destroy_id(struct ucma_file *file, const char __user *inbuf,
 500			       int in_len, int out_len)
 501{
 502	struct rdma_ucm_destroy_id cmd;
 503	struct rdma_ucm_destroy_id_resp resp;
 504	struct ucma_context *ctx;
 505	int ret = 0;
 506
 507	if (out_len < sizeof(resp))
 508		return -ENOSPC;
 509
 510	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
 511		return -EFAULT;
 512
 513	mutex_lock(&mut);
 514	ctx = _ucma_find_context(cmd.id, file);
 515	if (!IS_ERR(ctx))
 516		idr_remove(&ctx_idr, ctx->id);
 517	mutex_unlock(&mut);
 518
 519	if (IS_ERR(ctx))
 520		return PTR_ERR(ctx);
 521
 522	ucma_put_ctx(ctx);
 523	wait_for_completion(&ctx->comp);
 524	resp.events_reported = ucma_free_ctx(ctx);
 525
 526	if (copy_to_user((void __user *)(unsigned long)cmd.response,
 527			 &resp, sizeof(resp)))
 528		ret = -EFAULT;
 529
 530	return ret;
 531}
 532
 533static ssize_t ucma_bind_addr(struct ucma_file *file, const char __user *inbuf,
 534			      int in_len, int out_len)
 535{
 536	struct rdma_ucm_bind_addr cmd;
 537	struct ucma_context *ctx;
 538	int ret;
 539
 540	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
 541		return -EFAULT;
 542
 543	ctx = ucma_get_ctx(file, cmd.id);
 544	if (IS_ERR(ctx))
 545		return PTR_ERR(ctx);
 546
 547	ret = rdma_bind_addr(ctx->cm_id, (struct sockaddr *) &cmd.addr);
 548	ucma_put_ctx(ctx);
 549	return ret;
 550}
 551
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 552static ssize_t ucma_resolve_addr(struct ucma_file *file,
 553				 const char __user *inbuf,
 554				 int in_len, int out_len)
 555{
 556	struct rdma_ucm_resolve_addr cmd;
 
 557	struct ucma_context *ctx;
 558	int ret;
 559
 560	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
 561		return -EFAULT;
 562
 
 
 
 
 
 
 563	ctx = ucma_get_ctx(file, cmd.id);
 564	if (IS_ERR(ctx))
 565		return PTR_ERR(ctx);
 566
 567	ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr,
 568				(struct sockaddr *) &cmd.dst_addr,
 569				cmd.timeout_ms);
 570	ucma_put_ctx(ctx);
 571	return ret;
 572}
 573
 574static ssize_t ucma_resolve_route(struct ucma_file *file,
 575				  const char __user *inbuf,
 576				  int in_len, int out_len)
 577{
 578	struct rdma_ucm_resolve_route cmd;
 579	struct ucma_context *ctx;
 580	int ret;
 581
 582	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
 583		return -EFAULT;
 584
 585	ctx = ucma_get_ctx(file, cmd.id);
 586	if (IS_ERR(ctx))
 587		return PTR_ERR(ctx);
 588
 589	ret = rdma_resolve_route(ctx->cm_id, cmd.timeout_ms);
 590	ucma_put_ctx(ctx);
 591	return ret;
 592}
 593
 594static void ucma_copy_ib_route(struct rdma_ucm_query_route_resp *resp,
 595			       struct rdma_route *route)
 596{
 597	struct rdma_dev_addr *dev_addr;
 598
 599	resp->num_paths = route->num_paths;
 600	switch (route->num_paths) {
 601	case 0:
 602		dev_addr = &route->addr.dev_addr;
 603		rdma_addr_get_dgid(dev_addr,
 604				   (union ib_gid *) &resp->ib_route[0].dgid);
 605		rdma_addr_get_sgid(dev_addr,
 606				   (union ib_gid *) &resp->ib_route[0].sgid);
 607		resp->ib_route[0].pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr));
 608		break;
 609	case 2:
 610		ib_copy_path_rec_to_user(&resp->ib_route[1],
 611					 &route->path_rec[1]);
 612		/* fall through */
 613	case 1:
 614		ib_copy_path_rec_to_user(&resp->ib_route[0],
 615					 &route->path_rec[0]);
 616		break;
 617	default:
 618		break;
 619	}
 620}
 621
 622static void ucma_copy_iboe_route(struct rdma_ucm_query_route_resp *resp,
 623				 struct rdma_route *route)
 624{
 625	struct rdma_dev_addr *dev_addr;
 626	struct net_device *dev;
 627	u16 vid = 0;
 628
 629	resp->num_paths = route->num_paths;
 630	switch (route->num_paths) {
 631	case 0:
 632		dev_addr = &route->addr.dev_addr;
 633		dev = dev_get_by_index(&init_net, dev_addr->bound_dev_if);
 634			if (dev) {
 635				vid = rdma_vlan_dev_vlan_id(dev);
 636				dev_put(dev);
 637			}
 638
 639		iboe_mac_vlan_to_ll((union ib_gid *) &resp->ib_route[0].dgid,
 640				    dev_addr->dst_dev_addr, vid);
 641		iboe_addr_get_sgid(dev_addr,
 642				   (union ib_gid *) &resp->ib_route[0].sgid);
 643		resp->ib_route[0].pkey = cpu_to_be16(0xffff);
 644		break;
 645	case 2:
 646		ib_copy_path_rec_to_user(&resp->ib_route[1],
 647					 &route->path_rec[1]);
 648		/* fall through */
 649	case 1:
 650		ib_copy_path_rec_to_user(&resp->ib_route[0],
 651					 &route->path_rec[0]);
 652		break;
 653	default:
 654		break;
 655	}
 656}
 657
 658static void ucma_copy_iw_route(struct rdma_ucm_query_route_resp *resp,
 659			       struct rdma_route *route)
 660{
 661	struct rdma_dev_addr *dev_addr;
 662
 663	dev_addr = &route->addr.dev_addr;
 664	rdma_addr_get_dgid(dev_addr, (union ib_gid *) &resp->ib_route[0].dgid);
 665	rdma_addr_get_sgid(dev_addr, (union ib_gid *) &resp->ib_route[0].sgid);
 666}
 667
 668static ssize_t ucma_query_route(struct ucma_file *file,
 669				const char __user *inbuf,
 670				int in_len, int out_len)
 671{
 672	struct rdma_ucm_query_route cmd;
 673	struct rdma_ucm_query_route_resp resp;
 674	struct ucma_context *ctx;
 675	struct sockaddr *addr;
 676	int ret = 0;
 677
 678	if (out_len < sizeof(resp))
 679		return -ENOSPC;
 680
 681	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
 682		return -EFAULT;
 683
 684	ctx = ucma_get_ctx(file, cmd.id);
 685	if (IS_ERR(ctx))
 686		return PTR_ERR(ctx);
 687
 688	memset(&resp, 0, sizeof resp);
 689	addr = (struct sockaddr *) &ctx->cm_id->route.addr.src_addr;
 690	memcpy(&resp.src_addr, addr, addr->sa_family == AF_INET ?
 691				     sizeof(struct sockaddr_in) :
 692				     sizeof(struct sockaddr_in6));
 693	addr = (struct sockaddr *) &ctx->cm_id->route.addr.dst_addr;
 694	memcpy(&resp.dst_addr, addr, addr->sa_family == AF_INET ?
 695				     sizeof(struct sockaddr_in) :
 696				     sizeof(struct sockaddr_in6));
 697	if (!ctx->cm_id->device)
 698		goto out;
 699
 700	resp.node_guid = (__force __u64) ctx->cm_id->device->node_guid;
 701	resp.port_num = ctx->cm_id->port_num;
 702	switch (rdma_node_get_transport(ctx->cm_id->device->node_type)) {
 703	case RDMA_TRANSPORT_IB:
 704		switch (rdma_port_get_link_layer(ctx->cm_id->device,
 705			ctx->cm_id->port_num)) {
 706		case IB_LINK_LAYER_INFINIBAND:
 707			ucma_copy_ib_route(&resp, &ctx->cm_id->route);
 708			break;
 709		case IB_LINK_LAYER_ETHERNET:
 710			ucma_copy_iboe_route(&resp, &ctx->cm_id->route);
 711			break;
 712		default:
 713			break;
 714		}
 715		break;
 716	case RDMA_TRANSPORT_IWARP:
 717		ucma_copy_iw_route(&resp, &ctx->cm_id->route);
 718		break;
 719	default:
 720		break;
 721	}
 722
 723out:
 724	if (copy_to_user((void __user *)(unsigned long)cmd.response,
 725			 &resp, sizeof(resp)))
 726		ret = -EFAULT;
 727
 728	ucma_put_ctx(ctx);
 729	return ret;
 730}
 731
 732static void ucma_copy_conn_param(struct rdma_conn_param *dst,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 733				 struct rdma_ucm_conn_param *src)
 734{
 735	dst->private_data = src->private_data;
 736	dst->private_data_len = src->private_data_len;
 737	dst->responder_resources =src->responder_resources;
 738	dst->initiator_depth = src->initiator_depth;
 739	dst->flow_control = src->flow_control;
 740	dst->retry_count = src->retry_count;
 741	dst->rnr_retry_count = src->rnr_retry_count;
 742	dst->srq = src->srq;
 743	dst->qp_num = src->qp_num;
 
 744}
 745
 746static ssize_t ucma_connect(struct ucma_file *file, const char __user *inbuf,
 747			    int in_len, int out_len)
 748{
 749	struct rdma_ucm_connect cmd;
 750	struct rdma_conn_param conn_param;
 751	struct ucma_context *ctx;
 752	int ret;
 753
 754	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
 755		return -EFAULT;
 756
 757	if (!cmd.conn_param.valid)
 758		return -EINVAL;
 759
 760	ctx = ucma_get_ctx(file, cmd.id);
 761	if (IS_ERR(ctx))
 762		return PTR_ERR(ctx);
 763
 764	ucma_copy_conn_param(&conn_param, &cmd.conn_param);
 765	ret = rdma_connect(ctx->cm_id, &conn_param);
 766	ucma_put_ctx(ctx);
 767	return ret;
 768}
 769
 770static ssize_t ucma_listen(struct ucma_file *file, const char __user *inbuf,
 771			   int in_len, int out_len)
 772{
 773	struct rdma_ucm_listen cmd;
 774	struct ucma_context *ctx;
 775	int ret;
 776
 777	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
 778		return -EFAULT;
 779
 780	ctx = ucma_get_ctx(file, cmd.id);
 781	if (IS_ERR(ctx))
 782		return PTR_ERR(ctx);
 783
 784	ctx->backlog = cmd.backlog > 0 && cmd.backlog < max_backlog ?
 785		       cmd.backlog : max_backlog;
 786	ret = rdma_listen(ctx->cm_id, ctx->backlog);
 787	ucma_put_ctx(ctx);
 788	return ret;
 789}
 790
 791static ssize_t ucma_accept(struct ucma_file *file, const char __user *inbuf,
 792			   int in_len, int out_len)
 793{
 794	struct rdma_ucm_accept cmd;
 795	struct rdma_conn_param conn_param;
 796	struct ucma_context *ctx;
 797	int ret;
 798
 799	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
 800		return -EFAULT;
 801
 802	ctx = ucma_get_ctx(file, cmd.id);
 803	if (IS_ERR(ctx))
 804		return PTR_ERR(ctx);
 805
 806	if (cmd.conn_param.valid) {
 807		ctx->uid = cmd.uid;
 808		ucma_copy_conn_param(&conn_param, &cmd.conn_param);
 809		ret = rdma_accept(ctx->cm_id, &conn_param);
 
 
 
 810	} else
 811		ret = rdma_accept(ctx->cm_id, NULL);
 812
 813	ucma_put_ctx(ctx);
 814	return ret;
 815}
 816
 817static ssize_t ucma_reject(struct ucma_file *file, const char __user *inbuf,
 818			   int in_len, int out_len)
 819{
 820	struct rdma_ucm_reject cmd;
 821	struct ucma_context *ctx;
 822	int ret;
 823
 824	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
 825		return -EFAULT;
 826
 827	ctx = ucma_get_ctx(file, cmd.id);
 828	if (IS_ERR(ctx))
 829		return PTR_ERR(ctx);
 830
 831	ret = rdma_reject(ctx->cm_id, cmd.private_data, cmd.private_data_len);
 832	ucma_put_ctx(ctx);
 833	return ret;
 834}
 835
 836static ssize_t ucma_disconnect(struct ucma_file *file, const char __user *inbuf,
 837			       int in_len, int out_len)
 838{
 839	struct rdma_ucm_disconnect cmd;
 840	struct ucma_context *ctx;
 841	int ret;
 842
 843	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
 844		return -EFAULT;
 845
 846	ctx = ucma_get_ctx(file, cmd.id);
 847	if (IS_ERR(ctx))
 848		return PTR_ERR(ctx);
 849
 850	ret = rdma_disconnect(ctx->cm_id);
 851	ucma_put_ctx(ctx);
 852	return ret;
 853}
 854
 855static ssize_t ucma_init_qp_attr(struct ucma_file *file,
 856				 const char __user *inbuf,
 857				 int in_len, int out_len)
 858{
 859	struct rdma_ucm_init_qp_attr cmd;
 860	struct ib_uverbs_qp_attr resp;
 861	struct ucma_context *ctx;
 862	struct ib_qp_attr qp_attr;
 863	int ret;
 864
 865	if (out_len < sizeof(resp))
 866		return -ENOSPC;
 867
 868	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
 869		return -EFAULT;
 870
 871	ctx = ucma_get_ctx(file, cmd.id);
 872	if (IS_ERR(ctx))
 873		return PTR_ERR(ctx);
 874
 875	resp.qp_attr_mask = 0;
 876	memset(&qp_attr, 0, sizeof qp_attr);
 877	qp_attr.qp_state = cmd.qp_state;
 878	ret = rdma_init_qp_attr(ctx->cm_id, &qp_attr, &resp.qp_attr_mask);
 879	if (ret)
 880		goto out;
 881
 882	ib_copy_qp_attr_to_user(&resp, &qp_attr);
 883	if (copy_to_user((void __user *)(unsigned long)cmd.response,
 884			 &resp, sizeof(resp)))
 885		ret = -EFAULT;
 886
 887out:
 888	ucma_put_ctx(ctx);
 889	return ret;
 890}
 891
 892static int ucma_set_option_id(struct ucma_context *ctx, int optname,
 893			      void *optval, size_t optlen)
 894{
 895	int ret = 0;
 896
 897	switch (optname) {
 898	case RDMA_OPTION_ID_TOS:
 899		if (optlen != sizeof(u8)) {
 900			ret = -EINVAL;
 901			break;
 902		}
 903		rdma_set_service_type(ctx->cm_id, *((u8 *) optval));
 904		break;
 905	case RDMA_OPTION_ID_REUSEADDR:
 906		if (optlen != sizeof(int)) {
 907			ret = -EINVAL;
 908			break;
 909		}
 910		ret = rdma_set_reuseaddr(ctx->cm_id, *((int *) optval) ? 1 : 0);
 911		break;
 
 
 
 
 
 
 
 912	default:
 913		ret = -ENOSYS;
 914	}
 915
 916	return ret;
 917}
 918
 919static int ucma_set_ib_path(struct ucma_context *ctx,
 920			    struct ib_path_rec_data *path_data, size_t optlen)
 921{
 922	struct ib_sa_path_rec sa_path;
 923	struct rdma_cm_event event;
 924	int ret;
 925
 926	if (optlen % sizeof(*path_data))
 927		return -EINVAL;
 928
 929	for (; optlen; optlen -= sizeof(*path_data), path_data++) {
 930		if (path_data->flags == (IB_PATH_GMP | IB_PATH_PRIMARY |
 931					 IB_PATH_BIDIRECTIONAL))
 932			break;
 933	}
 934
 935	if (!optlen)
 936		return -EINVAL;
 937
 938	ib_sa_unpack_path(path_data->path_rec, &sa_path);
 939	ret = rdma_set_ib_paths(ctx->cm_id, &sa_path, 1);
 940	if (ret)
 941		return ret;
 942
 943	memset(&event, 0, sizeof event);
 944	event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
 945	return ucma_event_handler(ctx->cm_id, &event);
 946}
 947
 948static int ucma_set_option_ib(struct ucma_context *ctx, int optname,
 949			      void *optval, size_t optlen)
 950{
 951	int ret;
 952
 953	switch (optname) {
 954	case RDMA_OPTION_IB_PATH:
 955		ret = ucma_set_ib_path(ctx, optval, optlen);
 956		break;
 957	default:
 958		ret = -ENOSYS;
 959	}
 960
 961	return ret;
 962}
 963
 964static int ucma_set_option_level(struct ucma_context *ctx, int level,
 965				 int optname, void *optval, size_t optlen)
 966{
 967	int ret;
 968
 969	switch (level) {
 970	case RDMA_OPTION_ID:
 971		ret = ucma_set_option_id(ctx, optname, optval, optlen);
 972		break;
 973	case RDMA_OPTION_IB:
 974		ret = ucma_set_option_ib(ctx, optname, optval, optlen);
 975		break;
 976	default:
 977		ret = -ENOSYS;
 978	}
 979
 980	return ret;
 981}
 982
 983static ssize_t ucma_set_option(struct ucma_file *file, const char __user *inbuf,
 984			       int in_len, int out_len)
 985{
 986	struct rdma_ucm_set_option cmd;
 987	struct ucma_context *ctx;
 988	void *optval;
 989	int ret;
 990
 991	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
 992		return -EFAULT;
 993
 994	ctx = ucma_get_ctx(file, cmd.id);
 995	if (IS_ERR(ctx))
 996		return PTR_ERR(ctx);
 997
 998	optval = kmalloc(cmd.optlen, GFP_KERNEL);
 999	if (!optval) {
1000		ret = -ENOMEM;
1001		goto out1;
1002	}
1003
1004	if (copy_from_user(optval, (void __user *) (unsigned long) cmd.optval,
1005			   cmd.optlen)) {
1006		ret = -EFAULT;
1007		goto out2;
1008	}
1009
1010	ret = ucma_set_option_level(ctx, cmd.level, cmd.optname, optval,
1011				    cmd.optlen);
1012out2:
1013	kfree(optval);
1014out1:
 
1015	ucma_put_ctx(ctx);
1016	return ret;
1017}
1018
1019static ssize_t ucma_notify(struct ucma_file *file, const char __user *inbuf,
1020			   int in_len, int out_len)
1021{
1022	struct rdma_ucm_notify cmd;
1023	struct ucma_context *ctx;
1024	int ret;
1025
1026	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1027		return -EFAULT;
1028
1029	ctx = ucma_get_ctx(file, cmd.id);
1030	if (IS_ERR(ctx))
1031		return PTR_ERR(ctx);
1032
1033	ret = rdma_notify(ctx->cm_id, (enum ib_event_type) cmd.event);
1034	ucma_put_ctx(ctx);
1035	return ret;
1036}
1037
1038static ssize_t ucma_join_multicast(struct ucma_file *file,
1039				   const char __user *inbuf,
1040				   int in_len, int out_len)
1041{
1042	struct rdma_ucm_join_mcast cmd;
1043	struct rdma_ucm_create_id_resp resp;
1044	struct ucma_context *ctx;
1045	struct ucma_multicast *mc;
 
1046	int ret;
1047
1048	if (out_len < sizeof(resp))
1049		return -ENOSPC;
1050
1051	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1052		return -EFAULT;
 
1053
1054	ctx = ucma_get_ctx(file, cmd.id);
1055	if (IS_ERR(ctx))
1056		return PTR_ERR(ctx);
1057
1058	mutex_lock(&file->mut);
1059	mc = ucma_alloc_multicast(ctx);
1060	if (!mc) {
1061		ret = -ENOMEM;
1062		goto err1;
1063	}
1064
1065	mc->uid = cmd.uid;
1066	memcpy(&mc->addr, &cmd.addr, sizeof cmd.addr);
1067	ret = rdma_join_multicast(ctx->cm_id, (struct sockaddr *) &mc->addr, mc);
1068	if (ret)
1069		goto err2;
1070
1071	resp.id = mc->id;
1072	if (copy_to_user((void __user *)(unsigned long)cmd.response,
1073			 &resp, sizeof(resp))) {
1074		ret = -EFAULT;
1075		goto err3;
1076	}
1077
1078	mutex_unlock(&file->mut);
1079	ucma_put_ctx(ctx);
1080	return 0;
1081
1082err3:
1083	rdma_leave_multicast(ctx->cm_id, (struct sockaddr *) &mc->addr);
1084	ucma_cleanup_mc_events(mc);
1085err2:
1086	mutex_lock(&mut);
1087	idr_remove(&multicast_idr, mc->id);
1088	mutex_unlock(&mut);
1089	list_del(&mc->list);
1090	kfree(mc);
1091err1:
1092	mutex_unlock(&file->mut);
1093	ucma_put_ctx(ctx);
1094	return ret;
1095}
1096
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1097static ssize_t ucma_leave_multicast(struct ucma_file *file,
1098				    const char __user *inbuf,
1099				    int in_len, int out_len)
1100{
1101	struct rdma_ucm_destroy_id cmd;
1102	struct rdma_ucm_destroy_id_resp resp;
1103	struct ucma_multicast *mc;
1104	int ret = 0;
1105
1106	if (out_len < sizeof(resp))
1107		return -ENOSPC;
1108
1109	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1110		return -EFAULT;
1111
1112	mutex_lock(&mut);
1113	mc = idr_find(&multicast_idr, cmd.id);
1114	if (!mc)
1115		mc = ERR_PTR(-ENOENT);
1116	else if (mc->ctx->file != file)
1117		mc = ERR_PTR(-EINVAL);
1118	else {
1119		idr_remove(&multicast_idr, mc->id);
1120		atomic_inc(&mc->ctx->ref);
1121	}
1122	mutex_unlock(&mut);
1123
1124	if (IS_ERR(mc)) {
1125		ret = PTR_ERR(mc);
1126		goto out;
1127	}
1128
1129	rdma_leave_multicast(mc->ctx->cm_id, (struct sockaddr *) &mc->addr);
1130	mutex_lock(&mc->ctx->file->mut);
1131	ucma_cleanup_mc_events(mc);
1132	list_del(&mc->list);
1133	mutex_unlock(&mc->ctx->file->mut);
1134
1135	ucma_put_ctx(mc->ctx);
1136	resp.events_reported = mc->events_reported;
1137	kfree(mc);
1138
1139	if (copy_to_user((void __user *)(unsigned long)cmd.response,
1140			 &resp, sizeof(resp)))
1141		ret = -EFAULT;
1142out:
1143	return ret;
1144}
1145
1146static void ucma_lock_files(struct ucma_file *file1, struct ucma_file *file2)
1147{
1148	/* Acquire mutex's based on pointer comparison to prevent deadlock. */
1149	if (file1 < file2) {
1150		mutex_lock(&file1->mut);
1151		mutex_lock(&file2->mut);
1152	} else {
1153		mutex_lock(&file2->mut);
1154		mutex_lock(&file1->mut);
1155	}
1156}
1157
1158static void ucma_unlock_files(struct ucma_file *file1, struct ucma_file *file2)
1159{
1160	if (file1 < file2) {
1161		mutex_unlock(&file2->mut);
1162		mutex_unlock(&file1->mut);
1163	} else {
1164		mutex_unlock(&file1->mut);
1165		mutex_unlock(&file2->mut);
1166	}
1167}
1168
1169static void ucma_move_events(struct ucma_context *ctx, struct ucma_file *file)
1170{
1171	struct ucma_event *uevent, *tmp;
1172
1173	list_for_each_entry_safe(uevent, tmp, &ctx->file->event_list, list)
1174		if (uevent->ctx == ctx)
1175			list_move_tail(&uevent->list, &file->event_list);
1176}
1177
1178static ssize_t ucma_migrate_id(struct ucma_file *new_file,
1179			       const char __user *inbuf,
1180			       int in_len, int out_len)
1181{
1182	struct rdma_ucm_migrate_id cmd;
1183	struct rdma_ucm_migrate_resp resp;
1184	struct ucma_context *ctx;
1185	struct file *filp;
1186	struct ucma_file *cur_file;
1187	int ret = 0;
1188
1189	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1190		return -EFAULT;
1191
1192	/* Get current fd to protect against it being closed */
1193	filp = fget(cmd.fd);
1194	if (!filp)
1195		return -ENOENT;
1196
1197	/* Validate current fd and prevent destruction of id. */
1198	ctx = ucma_get_ctx(filp->private_data, cmd.id);
1199	if (IS_ERR(ctx)) {
1200		ret = PTR_ERR(ctx);
1201		goto file_put;
1202	}
1203
1204	cur_file = ctx->file;
1205	if (cur_file == new_file) {
1206		resp.events_reported = ctx->events_reported;
1207		goto response;
1208	}
1209
1210	/*
1211	 * Migrate events between fd's, maintaining order, and avoiding new
1212	 * events being added before existing events.
1213	 */
1214	ucma_lock_files(cur_file, new_file);
1215	mutex_lock(&mut);
1216
1217	list_move_tail(&ctx->list, &new_file->ctx_list);
1218	ucma_move_events(ctx, new_file);
1219	ctx->file = new_file;
1220	resp.events_reported = ctx->events_reported;
1221
1222	mutex_unlock(&mut);
1223	ucma_unlock_files(cur_file, new_file);
1224
1225response:
1226	if (copy_to_user((void __user *)(unsigned long)cmd.response,
1227			 &resp, sizeof(resp)))
1228		ret = -EFAULT;
1229
1230	ucma_put_ctx(ctx);
1231file_put:
1232	fput(filp);
1233	return ret;
1234}
1235
1236static ssize_t (*ucma_cmd_table[])(struct ucma_file *file,
1237				   const char __user *inbuf,
1238				   int in_len, int out_len) = {
1239	[RDMA_USER_CM_CMD_CREATE_ID]	= ucma_create_id,
1240	[RDMA_USER_CM_CMD_DESTROY_ID]	= ucma_destroy_id,
1241	[RDMA_USER_CM_CMD_BIND_ADDR]	= ucma_bind_addr,
1242	[RDMA_USER_CM_CMD_RESOLVE_ADDR]	= ucma_resolve_addr,
1243	[RDMA_USER_CM_CMD_RESOLVE_ROUTE]= ucma_resolve_route,
1244	[RDMA_USER_CM_CMD_QUERY_ROUTE]	= ucma_query_route,
1245	[RDMA_USER_CM_CMD_CONNECT]	= ucma_connect,
1246	[RDMA_USER_CM_CMD_LISTEN]	= ucma_listen,
1247	[RDMA_USER_CM_CMD_ACCEPT]	= ucma_accept,
1248	[RDMA_USER_CM_CMD_REJECT]	= ucma_reject,
1249	[RDMA_USER_CM_CMD_DISCONNECT]	= ucma_disconnect,
1250	[RDMA_USER_CM_CMD_INIT_QP_ATTR]	= ucma_init_qp_attr,
1251	[RDMA_USER_CM_CMD_GET_EVENT]	= ucma_get_event,
1252	[RDMA_USER_CM_CMD_GET_OPTION]	= NULL,
1253	[RDMA_USER_CM_CMD_SET_OPTION]	= ucma_set_option,
1254	[RDMA_USER_CM_CMD_NOTIFY]	= ucma_notify,
1255	[RDMA_USER_CM_CMD_JOIN_MCAST]	= ucma_join_multicast,
1256	[RDMA_USER_CM_CMD_LEAVE_MCAST]	= ucma_leave_multicast,
1257	[RDMA_USER_CM_CMD_MIGRATE_ID]	= ucma_migrate_id
 
 
 
 
1258};
1259
1260static ssize_t ucma_write(struct file *filp, const char __user *buf,
1261			  size_t len, loff_t *pos)
1262{
1263	struct ucma_file *file = filp->private_data;
1264	struct rdma_ucm_cmd_hdr hdr;
1265	ssize_t ret;
1266
1267	if (len < sizeof(hdr))
1268		return -EINVAL;
1269
1270	if (copy_from_user(&hdr, buf, sizeof(hdr)))
1271		return -EFAULT;
1272
1273	if (hdr.cmd < 0 || hdr.cmd >= ARRAY_SIZE(ucma_cmd_table))
1274		return -EINVAL;
1275
1276	if (hdr.in + sizeof(hdr) > len)
1277		return -EINVAL;
1278
1279	if (!ucma_cmd_table[hdr.cmd])
1280		return -ENOSYS;
1281
1282	ret = ucma_cmd_table[hdr.cmd](file, buf + sizeof(hdr), hdr.in, hdr.out);
1283	if (!ret)
1284		ret = len;
1285
1286	return ret;
1287}
1288
1289static unsigned int ucma_poll(struct file *filp, struct poll_table_struct *wait)
1290{
1291	struct ucma_file *file = filp->private_data;
1292	unsigned int mask = 0;
1293
1294	poll_wait(filp, &file->poll_wait, wait);
1295
1296	if (!list_empty(&file->event_list))
1297		mask = POLLIN | POLLRDNORM;
1298
1299	return mask;
1300}
1301
1302/*
1303 * ucma_open() does not need the BKL:
1304 *
1305 *  - no global state is referred to;
1306 *  - there is no ioctl method to race against;
1307 *  - no further module initialization is required for open to work
1308 *    after the device is registered.
1309 */
1310static int ucma_open(struct inode *inode, struct file *filp)
1311{
1312	struct ucma_file *file;
1313
1314	file = kmalloc(sizeof *file, GFP_KERNEL);
1315	if (!file)
1316		return -ENOMEM;
1317
1318	INIT_LIST_HEAD(&file->event_list);
1319	INIT_LIST_HEAD(&file->ctx_list);
1320	init_waitqueue_head(&file->poll_wait);
1321	mutex_init(&file->mut);
1322
1323	filp->private_data = file;
1324	file->filp = filp;
1325
1326	return nonseekable_open(inode, filp);
1327}
1328
1329static int ucma_close(struct inode *inode, struct file *filp)
1330{
1331	struct ucma_file *file = filp->private_data;
1332	struct ucma_context *ctx, *tmp;
1333
1334	mutex_lock(&file->mut);
1335	list_for_each_entry_safe(ctx, tmp, &file->ctx_list, list) {
1336		mutex_unlock(&file->mut);
1337
1338		mutex_lock(&mut);
1339		idr_remove(&ctx_idr, ctx->id);
1340		mutex_unlock(&mut);
1341
1342		ucma_free_ctx(ctx);
1343		mutex_lock(&file->mut);
1344	}
1345	mutex_unlock(&file->mut);
1346	kfree(file);
1347	return 0;
1348}
1349
1350static const struct file_operations ucma_fops = {
1351	.owner 	 = THIS_MODULE,
1352	.open 	 = ucma_open,
1353	.release = ucma_close,
1354	.write	 = ucma_write,
1355	.poll    = ucma_poll,
1356	.llseek	 = no_llseek,
1357};
1358
1359static struct miscdevice ucma_misc = {
1360	.minor		= MISC_DYNAMIC_MINOR,
1361	.name		= "rdma_cm",
1362	.nodename	= "infiniband/rdma_cm",
1363	.mode		= 0666,
1364	.fops		= &ucma_fops,
1365};
1366
1367static ssize_t show_abi_version(struct device *dev,
1368				struct device_attribute *attr,
1369				char *buf)
1370{
1371	return sprintf(buf, "%d\n", RDMA_USER_CM_ABI_VERSION);
1372}
1373static DEVICE_ATTR(abi_version, S_IRUGO, show_abi_version, NULL);
1374
1375static int __init ucma_init(void)
1376{
1377	int ret;
1378
1379	ret = misc_register(&ucma_misc);
1380	if (ret)
1381		return ret;
1382
1383	ret = device_create_file(ucma_misc.this_device, &dev_attr_abi_version);
1384	if (ret) {
1385		printk(KERN_ERR "rdma_ucm: couldn't create abi_version attr\n");
1386		goto err1;
1387	}
1388
1389	ucma_ctl_table_hdr = register_sysctl_paths(ucma_ctl_path, ucma_ctl_table);
1390	if (!ucma_ctl_table_hdr) {
1391		printk(KERN_ERR "rdma_ucm: couldn't register sysctl paths\n");
1392		ret = -ENOMEM;
1393		goto err2;
1394	}
1395	return 0;
1396err2:
1397	device_remove_file(ucma_misc.this_device, &dev_attr_abi_version);
1398err1:
1399	misc_deregister(&ucma_misc);
1400	return ret;
1401}
1402
1403static void __exit ucma_cleanup(void)
1404{
1405	unregister_sysctl_table(ucma_ctl_table_hdr);
1406	device_remove_file(ucma_misc.this_device, &dev_attr_abi_version);
1407	misc_deregister(&ucma_misc);
1408	idr_destroy(&ctx_idr);
1409}
1410
1411module_init(ucma_init);
1412module_exit(ucma_cleanup);
v3.15
   1/*
   2 * Copyright (c) 2005-2006 Intel Corporation.  All rights reserved.
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the
   8 * OpenIB.org BSD license below:
   9 *
  10 *     Redistribution and use in source and binary forms, with or
  11 *     without modification, are permitted provided that the following
  12 *     conditions are met:
  13 *
  14 *      - Redistributions of source code must retain the above
  15 *	copyright notice, this list of conditions and the following
  16 *	disclaimer.
  17 *
  18 *      - Redistributions in binary form must reproduce the above
  19 *	copyright notice, this list of conditions and the following
  20 *	disclaimer in the documentation and/or other materials
  21 *	provided with the distribution.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30 * SOFTWARE.
  31 */
  32
  33#include <linux/completion.h>
  34#include <linux/file.h>
  35#include <linux/mutex.h>
  36#include <linux/poll.h>
  37#include <linux/sched.h>
  38#include <linux/idr.h>
  39#include <linux/in.h>
  40#include <linux/in6.h>
  41#include <linux/miscdevice.h>
  42#include <linux/slab.h>
  43#include <linux/sysctl.h>
  44#include <linux/module.h>
  45
  46#include <rdma/rdma_user_cm.h>
  47#include <rdma/ib_marshall.h>
  48#include <rdma/rdma_cm.h>
  49#include <rdma/rdma_cm_ib.h>
  50#include <rdma/ib_addr.h>
  51#include <rdma/ib.h>
  52
  53MODULE_AUTHOR("Sean Hefty");
  54MODULE_DESCRIPTION("RDMA Userspace Connection Manager Access");
  55MODULE_LICENSE("Dual BSD/GPL");
  56
  57static unsigned int max_backlog = 1024;
  58
  59static struct ctl_table_header *ucma_ctl_table_hdr;
  60static struct ctl_table ucma_ctl_table[] = {
  61	{
  62		.procname	= "max_backlog",
  63		.data		= &max_backlog,
  64		.maxlen		= sizeof max_backlog,
  65		.mode		= 0644,
  66		.proc_handler	= proc_dointvec,
  67	},
  68	{ }
  69};
  70
 
 
 
 
 
 
  71struct ucma_file {
  72	struct mutex		mut;
  73	struct file		*filp;
  74	struct list_head	ctx_list;
  75	struct list_head	event_list;
  76	wait_queue_head_t	poll_wait;
  77};
  78
  79struct ucma_context {
  80	int			id;
  81	struct completion	comp;
  82	atomic_t		ref;
  83	int			events_reported;
  84	int			backlog;
  85
  86	struct ucma_file	*file;
  87	struct rdma_cm_id	*cm_id;
  88	u64			uid;
  89
  90	struct list_head	list;
  91	struct list_head	mc_list;
  92};
  93
  94struct ucma_multicast {
  95	struct ucma_context	*ctx;
  96	int			id;
  97	int			events_reported;
  98
  99	u64			uid;
 100	struct list_head	list;
 101	struct sockaddr_storage	addr;
 102};
 103
 104struct ucma_event {
 105	struct ucma_context	*ctx;
 106	struct ucma_multicast	*mc;
 107	struct list_head	list;
 108	struct rdma_cm_id	*cm_id;
 109	struct rdma_ucm_event_resp resp;
 110};
 111
 112static DEFINE_MUTEX(mut);
 113static DEFINE_IDR(ctx_idr);
 114static DEFINE_IDR(multicast_idr);
 115
 116static inline struct ucma_context *_ucma_find_context(int id,
 117						      struct ucma_file *file)
 118{
 119	struct ucma_context *ctx;
 120
 121	ctx = idr_find(&ctx_idr, id);
 122	if (!ctx)
 123		ctx = ERR_PTR(-ENOENT);
 124	else if (ctx->file != file)
 125		ctx = ERR_PTR(-EINVAL);
 126	return ctx;
 127}
 128
 129static struct ucma_context *ucma_get_ctx(struct ucma_file *file, int id)
 130{
 131	struct ucma_context *ctx;
 132
 133	mutex_lock(&mut);
 134	ctx = _ucma_find_context(id, file);
 135	if (!IS_ERR(ctx))
 136		atomic_inc(&ctx->ref);
 137	mutex_unlock(&mut);
 138	return ctx;
 139}
 140
 141static void ucma_put_ctx(struct ucma_context *ctx)
 142{
 143	if (atomic_dec_and_test(&ctx->ref))
 144		complete(&ctx->comp);
 145}
 146
 147static struct ucma_context *ucma_alloc_ctx(struct ucma_file *file)
 148{
 149	struct ucma_context *ctx;
 
 150
 151	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
 152	if (!ctx)
 153		return NULL;
 154
 155	atomic_set(&ctx->ref, 1);
 156	init_completion(&ctx->comp);
 157	INIT_LIST_HEAD(&ctx->mc_list);
 158	ctx->file = file;
 159
 160	mutex_lock(&mut);
 161	ctx->id = idr_alloc(&ctx_idr, ctx, 0, 0, GFP_KERNEL);
 162	mutex_unlock(&mut);
 163	if (ctx->id < 0)
 
 
 
 
 
 
 
 164		goto error;
 165
 166	list_add_tail(&ctx->list, &file->ctx_list);
 167	return ctx;
 168
 169error:
 170	kfree(ctx);
 171	return NULL;
 172}
 173
 174static struct ucma_multicast* ucma_alloc_multicast(struct ucma_context *ctx)
 175{
 176	struct ucma_multicast *mc;
 
 177
 178	mc = kzalloc(sizeof(*mc), GFP_KERNEL);
 179	if (!mc)
 180		return NULL;
 181
 182	mutex_lock(&mut);
 183	mc->id = idr_alloc(&multicast_idr, mc, 0, 0, GFP_KERNEL);
 184	mutex_unlock(&mut);
 185	if (mc->id < 0)
 
 
 
 
 
 
 
 186		goto error;
 187
 188	mc->ctx = ctx;
 189	list_add_tail(&mc->list, &ctx->mc_list);
 190	return mc;
 191
 192error:
 193	kfree(mc);
 194	return NULL;
 195}
 196
 197static void ucma_copy_conn_event(struct rdma_ucm_conn_param *dst,
 198				 struct rdma_conn_param *src)
 199{
 200	if (src->private_data_len)
 201		memcpy(dst->private_data, src->private_data,
 202		       src->private_data_len);
 203	dst->private_data_len = src->private_data_len;
 204	dst->responder_resources =src->responder_resources;
 205	dst->initiator_depth = src->initiator_depth;
 206	dst->flow_control = src->flow_control;
 207	dst->retry_count = src->retry_count;
 208	dst->rnr_retry_count = src->rnr_retry_count;
 209	dst->srq = src->srq;
 210	dst->qp_num = src->qp_num;
 211}
 212
 213static void ucma_copy_ud_event(struct rdma_ucm_ud_param *dst,
 214			       struct rdma_ud_param *src)
 215{
 216	if (src->private_data_len)
 217		memcpy(dst->private_data, src->private_data,
 218		       src->private_data_len);
 219	dst->private_data_len = src->private_data_len;
 220	ib_copy_ah_attr_to_user(&dst->ah_attr, &src->ah_attr);
 221	dst->qp_num = src->qp_num;
 222	dst->qkey = src->qkey;
 223}
 224
 225static void ucma_set_event_context(struct ucma_context *ctx,
 226				   struct rdma_cm_event *event,
 227				   struct ucma_event *uevent)
 228{
 229	uevent->ctx = ctx;
 230	switch (event->event) {
 231	case RDMA_CM_EVENT_MULTICAST_JOIN:
 232	case RDMA_CM_EVENT_MULTICAST_ERROR:
 233		uevent->mc = (struct ucma_multicast *)
 234			     event->param.ud.private_data;
 235		uevent->resp.uid = uevent->mc->uid;
 236		uevent->resp.id = uevent->mc->id;
 237		break;
 238	default:
 239		uevent->resp.uid = ctx->uid;
 240		uevent->resp.id = ctx->id;
 241		break;
 242	}
 243}
 244
 245static int ucma_event_handler(struct rdma_cm_id *cm_id,
 246			      struct rdma_cm_event *event)
 247{
 248	struct ucma_event *uevent;
 249	struct ucma_context *ctx = cm_id->context;
 250	int ret = 0;
 251
 252	uevent = kzalloc(sizeof(*uevent), GFP_KERNEL);
 253	if (!uevent)
 254		return event->event == RDMA_CM_EVENT_CONNECT_REQUEST;
 255
 256	mutex_lock(&ctx->file->mut);
 257	uevent->cm_id = cm_id;
 258	ucma_set_event_context(ctx, event, uevent);
 259	uevent->resp.event = event->event;
 260	uevent->resp.status = event->status;
 261	if (cm_id->qp_type == IB_QPT_UD)
 262		ucma_copy_ud_event(&uevent->resp.param.ud, &event->param.ud);
 263	else
 264		ucma_copy_conn_event(&uevent->resp.param.conn,
 265				     &event->param.conn);
 266
 
 267	if (event->event == RDMA_CM_EVENT_CONNECT_REQUEST) {
 268		if (!ctx->backlog) {
 269			ret = -ENOMEM;
 270			kfree(uevent);
 271			goto out;
 272		}
 273		ctx->backlog--;
 274	} else if (!ctx->uid || ctx->cm_id != cm_id) {
 275		/*
 276		 * We ignore events for new connections until userspace has set
 277		 * their context.  This can only happen if an error occurs on a
 278		 * new connection before the user accepts it.  This is okay,
 279		 * since the accept will just fail later.
 280		 */
 281		kfree(uevent);
 282		goto out;
 283	}
 284
 285	list_add_tail(&uevent->list, &ctx->file->event_list);
 286	wake_up_interruptible(&ctx->file->poll_wait);
 287out:
 288	mutex_unlock(&ctx->file->mut);
 289	return ret;
 290}
 291
 292static ssize_t ucma_get_event(struct ucma_file *file, const char __user *inbuf,
 293			      int in_len, int out_len)
 294{
 295	struct ucma_context *ctx;
 296	struct rdma_ucm_get_event cmd;
 297	struct ucma_event *uevent;
 298	int ret = 0;
 
 299
 300	if (out_len < sizeof uevent->resp)
 301		return -ENOSPC;
 302
 303	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
 304		return -EFAULT;
 305
 306	mutex_lock(&file->mut);
 307	while (list_empty(&file->event_list)) {
 308		mutex_unlock(&file->mut);
 309
 310		if (file->filp->f_flags & O_NONBLOCK)
 311			return -EAGAIN;
 312
 313		if (wait_event_interruptible(file->poll_wait,
 314					     !list_empty(&file->event_list)))
 315			return -ERESTARTSYS;
 316
 317		mutex_lock(&file->mut);
 318	}
 319
 320	uevent = list_entry(file->event_list.next, struct ucma_event, list);
 321
 322	if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST) {
 323		ctx = ucma_alloc_ctx(file);
 324		if (!ctx) {
 325			ret = -ENOMEM;
 326			goto done;
 327		}
 328		uevent->ctx->backlog++;
 329		ctx->cm_id = uevent->cm_id;
 330		ctx->cm_id->context = ctx;
 331		uevent->resp.id = ctx->id;
 332	}
 333
 334	if (copy_to_user((void __user *)(unsigned long)cmd.response,
 335			 &uevent->resp, sizeof uevent->resp)) {
 336		ret = -EFAULT;
 337		goto done;
 338	}
 339
 340	list_del(&uevent->list);
 341	uevent->ctx->events_reported++;
 342	if (uevent->mc)
 343		uevent->mc->events_reported++;
 344	kfree(uevent);
 345done:
 346	mutex_unlock(&file->mut);
 347	return ret;
 348}
 349
 350static int ucma_get_qp_type(struct rdma_ucm_create_id *cmd, enum ib_qp_type *qp_type)
 351{
 352	switch (cmd->ps) {
 353	case RDMA_PS_TCP:
 354		*qp_type = IB_QPT_RC;
 355		return 0;
 356	case RDMA_PS_UDP:
 357	case RDMA_PS_IPOIB:
 358		*qp_type = IB_QPT_UD;
 359		return 0;
 360	case RDMA_PS_IB:
 361		*qp_type = cmd->qp_type;
 362		return 0;
 363	default:
 364		return -EINVAL;
 365	}
 366}
 367
 368static ssize_t ucma_create_id(struct ucma_file *file, const char __user *inbuf,
 369			      int in_len, int out_len)
 370{
 371	struct rdma_ucm_create_id cmd;
 372	struct rdma_ucm_create_id_resp resp;
 373	struct ucma_context *ctx;
 374	enum ib_qp_type qp_type;
 375	int ret;
 376
 377	if (out_len < sizeof(resp))
 378		return -ENOSPC;
 379
 380	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
 381		return -EFAULT;
 382
 383	ret = ucma_get_qp_type(&cmd, &qp_type);
 384	if (ret)
 385		return ret;
 386
 387	mutex_lock(&file->mut);
 388	ctx = ucma_alloc_ctx(file);
 389	mutex_unlock(&file->mut);
 390	if (!ctx)
 391		return -ENOMEM;
 392
 393	ctx->uid = cmd.uid;
 394	ctx->cm_id = rdma_create_id(ucma_event_handler, ctx, cmd.ps, qp_type);
 395	if (IS_ERR(ctx->cm_id)) {
 396		ret = PTR_ERR(ctx->cm_id);
 397		goto err1;
 398	}
 399
 400	resp.id = ctx->id;
 401	if (copy_to_user((void __user *)(unsigned long)cmd.response,
 402			 &resp, sizeof(resp))) {
 403		ret = -EFAULT;
 404		goto err2;
 405	}
 406	return 0;
 407
 408err2:
 409	rdma_destroy_id(ctx->cm_id);
 410err1:
 411	mutex_lock(&mut);
 412	idr_remove(&ctx_idr, ctx->id);
 413	mutex_unlock(&mut);
 414	kfree(ctx);
 415	return ret;
 416}
 417
 418static void ucma_cleanup_multicast(struct ucma_context *ctx)
 419{
 420	struct ucma_multicast *mc, *tmp;
 421
 422	mutex_lock(&mut);
 423	list_for_each_entry_safe(mc, tmp, &ctx->mc_list, list) {
 424		list_del(&mc->list);
 425		idr_remove(&multicast_idr, mc->id);
 426		kfree(mc);
 427	}
 428	mutex_unlock(&mut);
 429}
 430
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 431static void ucma_cleanup_mc_events(struct ucma_multicast *mc)
 432{
 433	struct ucma_event *uevent, *tmp;
 434
 435	list_for_each_entry_safe(uevent, tmp, &mc->ctx->file->event_list, list) {
 436		if (uevent->mc != mc)
 437			continue;
 438
 439		list_del(&uevent->list);
 440		kfree(uevent);
 441	}
 442}
 443
 444/*
 445 * We cannot hold file->mut when calling rdma_destroy_id() or we can
 446 * deadlock.  We also acquire file->mut in ucma_event_handler(), and
 447 * rdma_destroy_id() will wait until all callbacks have completed.
 448 */
 449static int ucma_free_ctx(struct ucma_context *ctx)
 450{
 451	int events_reported;
 452	struct ucma_event *uevent, *tmp;
 453	LIST_HEAD(list);
 454
 455	/* No new events will be generated after destroying the id. */
 456	rdma_destroy_id(ctx->cm_id);
 457
 458	ucma_cleanup_multicast(ctx);
 459
 460	/* Cleanup events not yet reported to the user. */
 461	mutex_lock(&ctx->file->mut);
 462	list_for_each_entry_safe(uevent, tmp, &ctx->file->event_list, list) {
 463		if (uevent->ctx == ctx)
 464			list_move_tail(&uevent->list, &list);
 465	}
 466	list_del(&ctx->list);
 467	mutex_unlock(&ctx->file->mut);
 468
 469	list_for_each_entry_safe(uevent, tmp, &list, list) {
 470		list_del(&uevent->list);
 471		if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST)
 472			rdma_destroy_id(uevent->cm_id);
 473		kfree(uevent);
 474	}
 475
 476	events_reported = ctx->events_reported;
 477	kfree(ctx);
 478	return events_reported;
 479}
 480
 481static ssize_t ucma_destroy_id(struct ucma_file *file, const char __user *inbuf,
 482			       int in_len, int out_len)
 483{
 484	struct rdma_ucm_destroy_id cmd;
 485	struct rdma_ucm_destroy_id_resp resp;
 486	struct ucma_context *ctx;
 487	int ret = 0;
 488
 489	if (out_len < sizeof(resp))
 490		return -ENOSPC;
 491
 492	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
 493		return -EFAULT;
 494
 495	mutex_lock(&mut);
 496	ctx = _ucma_find_context(cmd.id, file);
 497	if (!IS_ERR(ctx))
 498		idr_remove(&ctx_idr, ctx->id);
 499	mutex_unlock(&mut);
 500
 501	if (IS_ERR(ctx))
 502		return PTR_ERR(ctx);
 503
 504	ucma_put_ctx(ctx);
 505	wait_for_completion(&ctx->comp);
 506	resp.events_reported = ucma_free_ctx(ctx);
 507
 508	if (copy_to_user((void __user *)(unsigned long)cmd.response,
 509			 &resp, sizeof(resp)))
 510		ret = -EFAULT;
 511
 512	return ret;
 513}
 514
 515static ssize_t ucma_bind_ip(struct ucma_file *file, const char __user *inbuf,
 516			      int in_len, int out_len)
 517{
 518	struct rdma_ucm_bind_ip cmd;
 519	struct ucma_context *ctx;
 520	int ret;
 521
 522	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
 523		return -EFAULT;
 524
 525	ctx = ucma_get_ctx(file, cmd.id);
 526	if (IS_ERR(ctx))
 527		return PTR_ERR(ctx);
 528
 529	ret = rdma_bind_addr(ctx->cm_id, (struct sockaddr *) &cmd.addr);
 530	ucma_put_ctx(ctx);
 531	return ret;
 532}
 533
 534static ssize_t ucma_bind(struct ucma_file *file, const char __user *inbuf,
 535			 int in_len, int out_len)
 536{
 537	struct rdma_ucm_bind cmd;
 538	struct sockaddr *addr;
 539	struct ucma_context *ctx;
 540	int ret;
 541
 542	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
 543		return -EFAULT;
 544
 545	addr = (struct sockaddr *) &cmd.addr;
 546	if (cmd.reserved || !cmd.addr_size || (cmd.addr_size != rdma_addr_size(addr)))
 547		return -EINVAL;
 548
 549	ctx = ucma_get_ctx(file, cmd.id);
 550	if (IS_ERR(ctx))
 551		return PTR_ERR(ctx);
 552
 553	ret = rdma_bind_addr(ctx->cm_id, addr);
 554	ucma_put_ctx(ctx);
 555	return ret;
 556}
 557
 558static ssize_t ucma_resolve_ip(struct ucma_file *file,
 559			       const char __user *inbuf,
 560			       int in_len, int out_len)
 561{
 562	struct rdma_ucm_resolve_ip cmd;
 563	struct ucma_context *ctx;
 564	int ret;
 565
 566	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
 567		return -EFAULT;
 568
 569	ctx = ucma_get_ctx(file, cmd.id);
 570	if (IS_ERR(ctx))
 571		return PTR_ERR(ctx);
 572
 573	ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr,
 574				(struct sockaddr *) &cmd.dst_addr,
 575				cmd.timeout_ms);
 576	ucma_put_ctx(ctx);
 577	return ret;
 578}
 579
 580static ssize_t ucma_resolve_addr(struct ucma_file *file,
 581				 const char __user *inbuf,
 582				 int in_len, int out_len)
 583{
 584	struct rdma_ucm_resolve_addr cmd;
 585	struct sockaddr *src, *dst;
 586	struct ucma_context *ctx;
 587	int ret;
 588
 589	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
 590		return -EFAULT;
 591
 592	src = (struct sockaddr *) &cmd.src_addr;
 593	dst = (struct sockaddr *) &cmd.dst_addr;
 594	if (cmd.reserved || (cmd.src_size && (cmd.src_size != rdma_addr_size(src))) ||
 595	    !cmd.dst_size || (cmd.dst_size != rdma_addr_size(dst)))
 596		return -EINVAL;
 597
 598	ctx = ucma_get_ctx(file, cmd.id);
 599	if (IS_ERR(ctx))
 600		return PTR_ERR(ctx);
 601
 602	ret = rdma_resolve_addr(ctx->cm_id, src, dst, cmd.timeout_ms);
 
 
 603	ucma_put_ctx(ctx);
 604	return ret;
 605}
 606
 607static ssize_t ucma_resolve_route(struct ucma_file *file,
 608				  const char __user *inbuf,
 609				  int in_len, int out_len)
 610{
 611	struct rdma_ucm_resolve_route cmd;
 612	struct ucma_context *ctx;
 613	int ret;
 614
 615	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
 616		return -EFAULT;
 617
 618	ctx = ucma_get_ctx(file, cmd.id);
 619	if (IS_ERR(ctx))
 620		return PTR_ERR(ctx);
 621
 622	ret = rdma_resolve_route(ctx->cm_id, cmd.timeout_ms);
 623	ucma_put_ctx(ctx);
 624	return ret;
 625}
 626
 627static void ucma_copy_ib_route(struct rdma_ucm_query_route_resp *resp,
 628			       struct rdma_route *route)
 629{
 630	struct rdma_dev_addr *dev_addr;
 631
 632	resp->num_paths = route->num_paths;
 633	switch (route->num_paths) {
 634	case 0:
 635		dev_addr = &route->addr.dev_addr;
 636		rdma_addr_get_dgid(dev_addr,
 637				   (union ib_gid *) &resp->ib_route[0].dgid);
 638		rdma_addr_get_sgid(dev_addr,
 639				   (union ib_gid *) &resp->ib_route[0].sgid);
 640		resp->ib_route[0].pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr));
 641		break;
 642	case 2:
 643		ib_copy_path_rec_to_user(&resp->ib_route[1],
 644					 &route->path_rec[1]);
 645		/* fall through */
 646	case 1:
 647		ib_copy_path_rec_to_user(&resp->ib_route[0],
 648					 &route->path_rec[0]);
 649		break;
 650	default:
 651		break;
 652	}
 653}
 654
 655static void ucma_copy_iboe_route(struct rdma_ucm_query_route_resp *resp,
 656				 struct rdma_route *route)
 657{
 
 
 
 658
 659	resp->num_paths = route->num_paths;
 660	switch (route->num_paths) {
 661	case 0:
 662		rdma_ip2gid((struct sockaddr *)&route->addr.dst_addr,
 663			    (union ib_gid *)&resp->ib_route[0].dgid);
 664		rdma_ip2gid((struct sockaddr *)&route->addr.src_addr,
 665			    (union ib_gid *)&resp->ib_route[0].sgid);
 
 
 
 
 
 
 
 666		resp->ib_route[0].pkey = cpu_to_be16(0xffff);
 667		break;
 668	case 2:
 669		ib_copy_path_rec_to_user(&resp->ib_route[1],
 670					 &route->path_rec[1]);
 671		/* fall through */
 672	case 1:
 673		ib_copy_path_rec_to_user(&resp->ib_route[0],
 674					 &route->path_rec[0]);
 675		break;
 676	default:
 677		break;
 678	}
 679}
 680
 681static void ucma_copy_iw_route(struct rdma_ucm_query_route_resp *resp,
 682			       struct rdma_route *route)
 683{
 684	struct rdma_dev_addr *dev_addr;
 685
 686	dev_addr = &route->addr.dev_addr;
 687	rdma_addr_get_dgid(dev_addr, (union ib_gid *) &resp->ib_route[0].dgid);
 688	rdma_addr_get_sgid(dev_addr, (union ib_gid *) &resp->ib_route[0].sgid);
 689}
 690
 691static ssize_t ucma_query_route(struct ucma_file *file,
 692				const char __user *inbuf,
 693				int in_len, int out_len)
 694{
 695	struct rdma_ucm_query cmd;
 696	struct rdma_ucm_query_route_resp resp;
 697	struct ucma_context *ctx;
 698	struct sockaddr *addr;
 699	int ret = 0;
 700
 701	if (out_len < sizeof(resp))
 702		return -ENOSPC;
 703
 704	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
 705		return -EFAULT;
 706
 707	ctx = ucma_get_ctx(file, cmd.id);
 708	if (IS_ERR(ctx))
 709		return PTR_ERR(ctx);
 710
 711	memset(&resp, 0, sizeof resp);
 712	addr = (struct sockaddr *) &ctx->cm_id->route.addr.src_addr;
 713	memcpy(&resp.src_addr, addr, addr->sa_family == AF_INET ?
 714				     sizeof(struct sockaddr_in) :
 715				     sizeof(struct sockaddr_in6));
 716	addr = (struct sockaddr *) &ctx->cm_id->route.addr.dst_addr;
 717	memcpy(&resp.dst_addr, addr, addr->sa_family == AF_INET ?
 718				     sizeof(struct sockaddr_in) :
 719				     sizeof(struct sockaddr_in6));
 720	if (!ctx->cm_id->device)
 721		goto out;
 722
 723	resp.node_guid = (__force __u64) ctx->cm_id->device->node_guid;
 724	resp.port_num = ctx->cm_id->port_num;
 725	switch (rdma_node_get_transport(ctx->cm_id->device->node_type)) {
 726	case RDMA_TRANSPORT_IB:
 727		switch (rdma_port_get_link_layer(ctx->cm_id->device,
 728			ctx->cm_id->port_num)) {
 729		case IB_LINK_LAYER_INFINIBAND:
 730			ucma_copy_ib_route(&resp, &ctx->cm_id->route);
 731			break;
 732		case IB_LINK_LAYER_ETHERNET:
 733			ucma_copy_iboe_route(&resp, &ctx->cm_id->route);
 734			break;
 735		default:
 736			break;
 737		}
 738		break;
 739	case RDMA_TRANSPORT_IWARP:
 740		ucma_copy_iw_route(&resp, &ctx->cm_id->route);
 741		break;
 742	default:
 743		break;
 744	}
 745
 746out:
 747	if (copy_to_user((void __user *)(unsigned long)cmd.response,
 748			 &resp, sizeof(resp)))
 749		ret = -EFAULT;
 750
 751	ucma_put_ctx(ctx);
 752	return ret;
 753}
 754
 755static void ucma_query_device_addr(struct rdma_cm_id *cm_id,
 756				   struct rdma_ucm_query_addr_resp *resp)
 757{
 758	if (!cm_id->device)
 759		return;
 760
 761	resp->node_guid = (__force __u64) cm_id->device->node_guid;
 762	resp->port_num = cm_id->port_num;
 763	resp->pkey = (__force __u16) cpu_to_be16(
 764		     ib_addr_get_pkey(&cm_id->route.addr.dev_addr));
 765}
 766
 767static ssize_t ucma_query_addr(struct ucma_context *ctx,
 768			       void __user *response, int out_len)
 769{
 770	struct rdma_ucm_query_addr_resp resp;
 771	struct sockaddr *addr;
 772	int ret = 0;
 773
 774	if (out_len < sizeof(resp))
 775		return -ENOSPC;
 776
 777	memset(&resp, 0, sizeof resp);
 778
 779	addr = (struct sockaddr *) &ctx->cm_id->route.addr.src_addr;
 780	resp.src_size = rdma_addr_size(addr);
 781	memcpy(&resp.src_addr, addr, resp.src_size);
 782
 783	addr = (struct sockaddr *) &ctx->cm_id->route.addr.dst_addr;
 784	resp.dst_size = rdma_addr_size(addr);
 785	memcpy(&resp.dst_addr, addr, resp.dst_size);
 786
 787	ucma_query_device_addr(ctx->cm_id, &resp);
 788
 789	if (copy_to_user(response, &resp, sizeof(resp)))
 790		ret = -EFAULT;
 791
 792	return ret;
 793}
 794
 795static ssize_t ucma_query_path(struct ucma_context *ctx,
 796			       void __user *response, int out_len)
 797{
 798	struct rdma_ucm_query_path_resp *resp;
 799	int i, ret = 0;
 800
 801	if (out_len < sizeof(*resp))
 802		return -ENOSPC;
 803
 804	resp = kzalloc(out_len, GFP_KERNEL);
 805	if (!resp)
 806		return -ENOMEM;
 807
 808	resp->num_paths = ctx->cm_id->route.num_paths;
 809	for (i = 0, out_len -= sizeof(*resp);
 810	     i < resp->num_paths && out_len > sizeof(struct ib_path_rec_data);
 811	     i++, out_len -= sizeof(struct ib_path_rec_data)) {
 812
 813		resp->path_data[i].flags = IB_PATH_GMP | IB_PATH_PRIMARY |
 814					   IB_PATH_BIDIRECTIONAL;
 815		ib_sa_pack_path(&ctx->cm_id->route.path_rec[i],
 816				&resp->path_data[i].path_rec);
 817	}
 818
 819	if (copy_to_user(response, resp,
 820			 sizeof(*resp) + (i * sizeof(struct ib_path_rec_data))))
 821		ret = -EFAULT;
 822
 823	kfree(resp);
 824	return ret;
 825}
 826
 827static ssize_t ucma_query_gid(struct ucma_context *ctx,
 828			      void __user *response, int out_len)
 829{
 830	struct rdma_ucm_query_addr_resp resp;
 831	struct sockaddr_ib *addr;
 832	int ret = 0;
 833
 834	if (out_len < sizeof(resp))
 835		return -ENOSPC;
 836
 837	memset(&resp, 0, sizeof resp);
 838
 839	ucma_query_device_addr(ctx->cm_id, &resp);
 840
 841	addr = (struct sockaddr_ib *) &resp.src_addr;
 842	resp.src_size = sizeof(*addr);
 843	if (ctx->cm_id->route.addr.src_addr.ss_family == AF_IB) {
 844		memcpy(addr, &ctx->cm_id->route.addr.src_addr, resp.src_size);
 845	} else {
 846		addr->sib_family = AF_IB;
 847		addr->sib_pkey = (__force __be16) resp.pkey;
 848		rdma_addr_get_sgid(&ctx->cm_id->route.addr.dev_addr,
 849				   (union ib_gid *) &addr->sib_addr);
 850		addr->sib_sid = rdma_get_service_id(ctx->cm_id, (struct sockaddr *)
 851						    &ctx->cm_id->route.addr.src_addr);
 852	}
 853
 854	addr = (struct sockaddr_ib *) &resp.dst_addr;
 855	resp.dst_size = sizeof(*addr);
 856	if (ctx->cm_id->route.addr.dst_addr.ss_family == AF_IB) {
 857		memcpy(addr, &ctx->cm_id->route.addr.dst_addr, resp.dst_size);
 858	} else {
 859		addr->sib_family = AF_IB;
 860		addr->sib_pkey = (__force __be16) resp.pkey;
 861		rdma_addr_get_dgid(&ctx->cm_id->route.addr.dev_addr,
 862				   (union ib_gid *) &addr->sib_addr);
 863		addr->sib_sid = rdma_get_service_id(ctx->cm_id, (struct sockaddr *)
 864						    &ctx->cm_id->route.addr.dst_addr);
 865	}
 866
 867	if (copy_to_user(response, &resp, sizeof(resp)))
 868		ret = -EFAULT;
 869
 870	return ret;
 871}
 872
 873static ssize_t ucma_query(struct ucma_file *file,
 874			  const char __user *inbuf,
 875			  int in_len, int out_len)
 876{
 877	struct rdma_ucm_query cmd;
 878	struct ucma_context *ctx;
 879	void __user *response;
 880	int ret;
 881
 882	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
 883		return -EFAULT;
 884
 885	response = (void __user *)(unsigned long) cmd.response;
 886	ctx = ucma_get_ctx(file, cmd.id);
 887	if (IS_ERR(ctx))
 888		return PTR_ERR(ctx);
 889
 890	switch (cmd.option) {
 891	case RDMA_USER_CM_QUERY_ADDR:
 892		ret = ucma_query_addr(ctx, response, out_len);
 893		break;
 894	case RDMA_USER_CM_QUERY_PATH:
 895		ret = ucma_query_path(ctx, response, out_len);
 896		break;
 897	case RDMA_USER_CM_QUERY_GID:
 898		ret = ucma_query_gid(ctx, response, out_len);
 899		break;
 900	default:
 901		ret = -ENOSYS;
 902		break;
 903	}
 904
 905	ucma_put_ctx(ctx);
 906	return ret;
 907}
 908
 909static void ucma_copy_conn_param(struct rdma_cm_id *id,
 910				 struct rdma_conn_param *dst,
 911				 struct rdma_ucm_conn_param *src)
 912{
 913	dst->private_data = src->private_data;
 914	dst->private_data_len = src->private_data_len;
 915	dst->responder_resources =src->responder_resources;
 916	dst->initiator_depth = src->initiator_depth;
 917	dst->flow_control = src->flow_control;
 918	dst->retry_count = src->retry_count;
 919	dst->rnr_retry_count = src->rnr_retry_count;
 920	dst->srq = src->srq;
 921	dst->qp_num = src->qp_num;
 922	dst->qkey = (id->route.addr.src_addr.ss_family == AF_IB) ? src->qkey : 0;
 923}
 924
 925static ssize_t ucma_connect(struct ucma_file *file, const char __user *inbuf,
 926			    int in_len, int out_len)
 927{
 928	struct rdma_ucm_connect cmd;
 929	struct rdma_conn_param conn_param;
 930	struct ucma_context *ctx;
 931	int ret;
 932
 933	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
 934		return -EFAULT;
 935
 936	if (!cmd.conn_param.valid)
 937		return -EINVAL;
 938
 939	ctx = ucma_get_ctx(file, cmd.id);
 940	if (IS_ERR(ctx))
 941		return PTR_ERR(ctx);
 942
 943	ucma_copy_conn_param(ctx->cm_id, &conn_param, &cmd.conn_param);
 944	ret = rdma_connect(ctx->cm_id, &conn_param);
 945	ucma_put_ctx(ctx);
 946	return ret;
 947}
 948
 949static ssize_t ucma_listen(struct ucma_file *file, const char __user *inbuf,
 950			   int in_len, int out_len)
 951{
 952	struct rdma_ucm_listen cmd;
 953	struct ucma_context *ctx;
 954	int ret;
 955
 956	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
 957		return -EFAULT;
 958
 959	ctx = ucma_get_ctx(file, cmd.id);
 960	if (IS_ERR(ctx))
 961		return PTR_ERR(ctx);
 962
 963	ctx->backlog = cmd.backlog > 0 && cmd.backlog < max_backlog ?
 964		       cmd.backlog : max_backlog;
 965	ret = rdma_listen(ctx->cm_id, ctx->backlog);
 966	ucma_put_ctx(ctx);
 967	return ret;
 968}
 969
 970static ssize_t ucma_accept(struct ucma_file *file, const char __user *inbuf,
 971			   int in_len, int out_len)
 972{
 973	struct rdma_ucm_accept cmd;
 974	struct rdma_conn_param conn_param;
 975	struct ucma_context *ctx;
 976	int ret;
 977
 978	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
 979		return -EFAULT;
 980
 981	ctx = ucma_get_ctx(file, cmd.id);
 982	if (IS_ERR(ctx))
 983		return PTR_ERR(ctx);
 984
 985	if (cmd.conn_param.valid) {
 986		ucma_copy_conn_param(ctx->cm_id, &conn_param, &cmd.conn_param);
 987		mutex_lock(&file->mut);
 988		ret = rdma_accept(ctx->cm_id, &conn_param);
 989		if (!ret)
 990			ctx->uid = cmd.uid;
 991		mutex_unlock(&file->mut);
 992	} else
 993		ret = rdma_accept(ctx->cm_id, NULL);
 994
 995	ucma_put_ctx(ctx);
 996	return ret;
 997}
 998
 999static ssize_t ucma_reject(struct ucma_file *file, const char __user *inbuf,
1000			   int in_len, int out_len)
1001{
1002	struct rdma_ucm_reject cmd;
1003	struct ucma_context *ctx;
1004	int ret;
1005
1006	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1007		return -EFAULT;
1008
1009	ctx = ucma_get_ctx(file, cmd.id);
1010	if (IS_ERR(ctx))
1011		return PTR_ERR(ctx);
1012
1013	ret = rdma_reject(ctx->cm_id, cmd.private_data, cmd.private_data_len);
1014	ucma_put_ctx(ctx);
1015	return ret;
1016}
1017
1018static ssize_t ucma_disconnect(struct ucma_file *file, const char __user *inbuf,
1019			       int in_len, int out_len)
1020{
1021	struct rdma_ucm_disconnect cmd;
1022	struct ucma_context *ctx;
1023	int ret;
1024
1025	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1026		return -EFAULT;
1027
1028	ctx = ucma_get_ctx(file, cmd.id);
1029	if (IS_ERR(ctx))
1030		return PTR_ERR(ctx);
1031
1032	ret = rdma_disconnect(ctx->cm_id);
1033	ucma_put_ctx(ctx);
1034	return ret;
1035}
1036
1037static ssize_t ucma_init_qp_attr(struct ucma_file *file,
1038				 const char __user *inbuf,
1039				 int in_len, int out_len)
1040{
1041	struct rdma_ucm_init_qp_attr cmd;
1042	struct ib_uverbs_qp_attr resp;
1043	struct ucma_context *ctx;
1044	struct ib_qp_attr qp_attr;
1045	int ret;
1046
1047	if (out_len < sizeof(resp))
1048		return -ENOSPC;
1049
1050	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1051		return -EFAULT;
1052
1053	ctx = ucma_get_ctx(file, cmd.id);
1054	if (IS_ERR(ctx))
1055		return PTR_ERR(ctx);
1056
1057	resp.qp_attr_mask = 0;
1058	memset(&qp_attr, 0, sizeof qp_attr);
1059	qp_attr.qp_state = cmd.qp_state;
1060	ret = rdma_init_qp_attr(ctx->cm_id, &qp_attr, &resp.qp_attr_mask);
1061	if (ret)
1062		goto out;
1063
1064	ib_copy_qp_attr_to_user(&resp, &qp_attr);
1065	if (copy_to_user((void __user *)(unsigned long)cmd.response,
1066			 &resp, sizeof(resp)))
1067		ret = -EFAULT;
1068
1069out:
1070	ucma_put_ctx(ctx);
1071	return ret;
1072}
1073
1074static int ucma_set_option_id(struct ucma_context *ctx, int optname,
1075			      void *optval, size_t optlen)
1076{
1077	int ret = 0;
1078
1079	switch (optname) {
1080	case RDMA_OPTION_ID_TOS:
1081		if (optlen != sizeof(u8)) {
1082			ret = -EINVAL;
1083			break;
1084		}
1085		rdma_set_service_type(ctx->cm_id, *((u8 *) optval));
1086		break;
1087	case RDMA_OPTION_ID_REUSEADDR:
1088		if (optlen != sizeof(int)) {
1089			ret = -EINVAL;
1090			break;
1091		}
1092		ret = rdma_set_reuseaddr(ctx->cm_id, *((int *) optval) ? 1 : 0);
1093		break;
1094	case RDMA_OPTION_ID_AFONLY:
1095		if (optlen != sizeof(int)) {
1096			ret = -EINVAL;
1097			break;
1098		}
1099		ret = rdma_set_afonly(ctx->cm_id, *((int *) optval) ? 1 : 0);
1100		break;
1101	default:
1102		ret = -ENOSYS;
1103	}
1104
1105	return ret;
1106}
1107
1108static int ucma_set_ib_path(struct ucma_context *ctx,
1109			    struct ib_path_rec_data *path_data, size_t optlen)
1110{
1111	struct ib_sa_path_rec sa_path;
1112	struct rdma_cm_event event;
1113	int ret;
1114
1115	if (optlen % sizeof(*path_data))
1116		return -EINVAL;
1117
1118	for (; optlen; optlen -= sizeof(*path_data), path_data++) {
1119		if (path_data->flags == (IB_PATH_GMP | IB_PATH_PRIMARY |
1120					 IB_PATH_BIDIRECTIONAL))
1121			break;
1122	}
1123
1124	if (!optlen)
1125		return -EINVAL;
1126
1127	ib_sa_unpack_path(path_data->path_rec, &sa_path);
1128	ret = rdma_set_ib_paths(ctx->cm_id, &sa_path, 1);
1129	if (ret)
1130		return ret;
1131
1132	memset(&event, 0, sizeof event);
1133	event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
1134	return ucma_event_handler(ctx->cm_id, &event);
1135}
1136
1137static int ucma_set_option_ib(struct ucma_context *ctx, int optname,
1138			      void *optval, size_t optlen)
1139{
1140	int ret;
1141
1142	switch (optname) {
1143	case RDMA_OPTION_IB_PATH:
1144		ret = ucma_set_ib_path(ctx, optval, optlen);
1145		break;
1146	default:
1147		ret = -ENOSYS;
1148	}
1149
1150	return ret;
1151}
1152
1153static int ucma_set_option_level(struct ucma_context *ctx, int level,
1154				 int optname, void *optval, size_t optlen)
1155{
1156	int ret;
1157
1158	switch (level) {
1159	case RDMA_OPTION_ID:
1160		ret = ucma_set_option_id(ctx, optname, optval, optlen);
1161		break;
1162	case RDMA_OPTION_IB:
1163		ret = ucma_set_option_ib(ctx, optname, optval, optlen);
1164		break;
1165	default:
1166		ret = -ENOSYS;
1167	}
1168
1169	return ret;
1170}
1171
1172static ssize_t ucma_set_option(struct ucma_file *file, const char __user *inbuf,
1173			       int in_len, int out_len)
1174{
1175	struct rdma_ucm_set_option cmd;
1176	struct ucma_context *ctx;
1177	void *optval;
1178	int ret;
1179
1180	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1181		return -EFAULT;
1182
1183	ctx = ucma_get_ctx(file, cmd.id);
1184	if (IS_ERR(ctx))
1185		return PTR_ERR(ctx);
1186
1187	optval = memdup_user((void __user *) (unsigned long) cmd.optval,
1188			     cmd.optlen);
1189	if (IS_ERR(optval)) {
1190		ret = PTR_ERR(optval);
1191		goto out;
 
 
 
 
 
1192	}
1193
1194	ret = ucma_set_option_level(ctx, cmd.level, cmd.optname, optval,
1195				    cmd.optlen);
 
1196	kfree(optval);
1197
1198out:
1199	ucma_put_ctx(ctx);
1200	return ret;
1201}
1202
1203static ssize_t ucma_notify(struct ucma_file *file, const char __user *inbuf,
1204			   int in_len, int out_len)
1205{
1206	struct rdma_ucm_notify cmd;
1207	struct ucma_context *ctx;
1208	int ret;
1209
1210	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1211		return -EFAULT;
1212
1213	ctx = ucma_get_ctx(file, cmd.id);
1214	if (IS_ERR(ctx))
1215		return PTR_ERR(ctx);
1216
1217	ret = rdma_notify(ctx->cm_id, (enum ib_event_type) cmd.event);
1218	ucma_put_ctx(ctx);
1219	return ret;
1220}
1221
1222static ssize_t ucma_process_join(struct ucma_file *file,
1223				 struct rdma_ucm_join_mcast *cmd,  int out_len)
 
1224{
 
1225	struct rdma_ucm_create_id_resp resp;
1226	struct ucma_context *ctx;
1227	struct ucma_multicast *mc;
1228	struct sockaddr *addr;
1229	int ret;
1230
1231	if (out_len < sizeof(resp))
1232		return -ENOSPC;
1233
1234	addr = (struct sockaddr *) &cmd->addr;
1235	if (cmd->reserved || !cmd->addr_size || (cmd->addr_size != rdma_addr_size(addr)))
1236		return -EINVAL;
1237
1238	ctx = ucma_get_ctx(file, cmd->id);
1239	if (IS_ERR(ctx))
1240		return PTR_ERR(ctx);
1241
1242	mutex_lock(&file->mut);
1243	mc = ucma_alloc_multicast(ctx);
1244	if (!mc) {
1245		ret = -ENOMEM;
1246		goto err1;
1247	}
1248
1249	mc->uid = cmd->uid;
1250	memcpy(&mc->addr, addr, cmd->addr_size);
1251	ret = rdma_join_multicast(ctx->cm_id, (struct sockaddr *) &mc->addr, mc);
1252	if (ret)
1253		goto err2;
1254
1255	resp.id = mc->id;
1256	if (copy_to_user((void __user *)(unsigned long) cmd->response,
1257			 &resp, sizeof(resp))) {
1258		ret = -EFAULT;
1259		goto err3;
1260	}
1261
1262	mutex_unlock(&file->mut);
1263	ucma_put_ctx(ctx);
1264	return 0;
1265
1266err3:
1267	rdma_leave_multicast(ctx->cm_id, (struct sockaddr *) &mc->addr);
1268	ucma_cleanup_mc_events(mc);
1269err2:
1270	mutex_lock(&mut);
1271	idr_remove(&multicast_idr, mc->id);
1272	mutex_unlock(&mut);
1273	list_del(&mc->list);
1274	kfree(mc);
1275err1:
1276	mutex_unlock(&file->mut);
1277	ucma_put_ctx(ctx);
1278	return ret;
1279}
1280
1281static ssize_t ucma_join_ip_multicast(struct ucma_file *file,
1282				      const char __user *inbuf,
1283				      int in_len, int out_len)
1284{
1285	struct rdma_ucm_join_ip_mcast cmd;
1286	struct rdma_ucm_join_mcast join_cmd;
1287
1288	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1289		return -EFAULT;
1290
1291	join_cmd.response = cmd.response;
1292	join_cmd.uid = cmd.uid;
1293	join_cmd.id = cmd.id;
1294	join_cmd.addr_size = rdma_addr_size((struct sockaddr *) &cmd.addr);
1295	join_cmd.reserved = 0;
1296	memcpy(&join_cmd.addr, &cmd.addr, join_cmd.addr_size);
1297
1298	return ucma_process_join(file, &join_cmd, out_len);
1299}
1300
1301static ssize_t ucma_join_multicast(struct ucma_file *file,
1302				   const char __user *inbuf,
1303				   int in_len, int out_len)
1304{
1305	struct rdma_ucm_join_mcast cmd;
1306
1307	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1308		return -EFAULT;
1309
1310	return ucma_process_join(file, &cmd, out_len);
1311}
1312
1313static ssize_t ucma_leave_multicast(struct ucma_file *file,
1314				    const char __user *inbuf,
1315				    int in_len, int out_len)
1316{
1317	struct rdma_ucm_destroy_id cmd;
1318	struct rdma_ucm_destroy_id_resp resp;
1319	struct ucma_multicast *mc;
1320	int ret = 0;
1321
1322	if (out_len < sizeof(resp))
1323		return -ENOSPC;
1324
1325	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1326		return -EFAULT;
1327
1328	mutex_lock(&mut);
1329	mc = idr_find(&multicast_idr, cmd.id);
1330	if (!mc)
1331		mc = ERR_PTR(-ENOENT);
1332	else if (mc->ctx->file != file)
1333		mc = ERR_PTR(-EINVAL);
1334	else {
1335		idr_remove(&multicast_idr, mc->id);
1336		atomic_inc(&mc->ctx->ref);
1337	}
1338	mutex_unlock(&mut);
1339
1340	if (IS_ERR(mc)) {
1341		ret = PTR_ERR(mc);
1342		goto out;
1343	}
1344
1345	rdma_leave_multicast(mc->ctx->cm_id, (struct sockaddr *) &mc->addr);
1346	mutex_lock(&mc->ctx->file->mut);
1347	ucma_cleanup_mc_events(mc);
1348	list_del(&mc->list);
1349	mutex_unlock(&mc->ctx->file->mut);
1350
1351	ucma_put_ctx(mc->ctx);
1352	resp.events_reported = mc->events_reported;
1353	kfree(mc);
1354
1355	if (copy_to_user((void __user *)(unsigned long)cmd.response,
1356			 &resp, sizeof(resp)))
1357		ret = -EFAULT;
1358out:
1359	return ret;
1360}
1361
1362static void ucma_lock_files(struct ucma_file *file1, struct ucma_file *file2)
1363{
1364	/* Acquire mutex's based on pointer comparison to prevent deadlock. */
1365	if (file1 < file2) {
1366		mutex_lock(&file1->mut);
1367		mutex_lock(&file2->mut);
1368	} else {
1369		mutex_lock(&file2->mut);
1370		mutex_lock(&file1->mut);
1371	}
1372}
1373
1374static void ucma_unlock_files(struct ucma_file *file1, struct ucma_file *file2)
1375{
1376	if (file1 < file2) {
1377		mutex_unlock(&file2->mut);
1378		mutex_unlock(&file1->mut);
1379	} else {
1380		mutex_unlock(&file1->mut);
1381		mutex_unlock(&file2->mut);
1382	}
1383}
1384
1385static void ucma_move_events(struct ucma_context *ctx, struct ucma_file *file)
1386{
1387	struct ucma_event *uevent, *tmp;
1388
1389	list_for_each_entry_safe(uevent, tmp, &ctx->file->event_list, list)
1390		if (uevent->ctx == ctx)
1391			list_move_tail(&uevent->list, &file->event_list);
1392}
1393
1394static ssize_t ucma_migrate_id(struct ucma_file *new_file,
1395			       const char __user *inbuf,
1396			       int in_len, int out_len)
1397{
1398	struct rdma_ucm_migrate_id cmd;
1399	struct rdma_ucm_migrate_resp resp;
1400	struct ucma_context *ctx;
1401	struct fd f;
1402	struct ucma_file *cur_file;
1403	int ret = 0;
1404
1405	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1406		return -EFAULT;
1407
1408	/* Get current fd to protect against it being closed */
1409	f = fdget(cmd.fd);
1410	if (!f.file)
1411		return -ENOENT;
1412
1413	/* Validate current fd and prevent destruction of id. */
1414	ctx = ucma_get_ctx(f.file->private_data, cmd.id);
1415	if (IS_ERR(ctx)) {
1416		ret = PTR_ERR(ctx);
1417		goto file_put;
1418	}
1419
1420	cur_file = ctx->file;
1421	if (cur_file == new_file) {
1422		resp.events_reported = ctx->events_reported;
1423		goto response;
1424	}
1425
1426	/*
1427	 * Migrate events between fd's, maintaining order, and avoiding new
1428	 * events being added before existing events.
1429	 */
1430	ucma_lock_files(cur_file, new_file);
1431	mutex_lock(&mut);
1432
1433	list_move_tail(&ctx->list, &new_file->ctx_list);
1434	ucma_move_events(ctx, new_file);
1435	ctx->file = new_file;
1436	resp.events_reported = ctx->events_reported;
1437
1438	mutex_unlock(&mut);
1439	ucma_unlock_files(cur_file, new_file);
1440
1441response:
1442	if (copy_to_user((void __user *)(unsigned long)cmd.response,
1443			 &resp, sizeof(resp)))
1444		ret = -EFAULT;
1445
1446	ucma_put_ctx(ctx);
1447file_put:
1448	fdput(f);
1449	return ret;
1450}
1451
1452static ssize_t (*ucma_cmd_table[])(struct ucma_file *file,
1453				   const char __user *inbuf,
1454				   int in_len, int out_len) = {
1455	[RDMA_USER_CM_CMD_CREATE_ID] 	 = ucma_create_id,
1456	[RDMA_USER_CM_CMD_DESTROY_ID]	 = ucma_destroy_id,
1457	[RDMA_USER_CM_CMD_BIND_IP]	 = ucma_bind_ip,
1458	[RDMA_USER_CM_CMD_RESOLVE_IP]	 = ucma_resolve_ip,
1459	[RDMA_USER_CM_CMD_RESOLVE_ROUTE] = ucma_resolve_route,
1460	[RDMA_USER_CM_CMD_QUERY_ROUTE]	 = ucma_query_route,
1461	[RDMA_USER_CM_CMD_CONNECT]	 = ucma_connect,
1462	[RDMA_USER_CM_CMD_LISTEN]	 = ucma_listen,
1463	[RDMA_USER_CM_CMD_ACCEPT]	 = ucma_accept,
1464	[RDMA_USER_CM_CMD_REJECT]	 = ucma_reject,
1465	[RDMA_USER_CM_CMD_DISCONNECT]	 = ucma_disconnect,
1466	[RDMA_USER_CM_CMD_INIT_QP_ATTR]	 = ucma_init_qp_attr,
1467	[RDMA_USER_CM_CMD_GET_EVENT]	 = ucma_get_event,
1468	[RDMA_USER_CM_CMD_GET_OPTION]	 = NULL,
1469	[RDMA_USER_CM_CMD_SET_OPTION]	 = ucma_set_option,
1470	[RDMA_USER_CM_CMD_NOTIFY]	 = ucma_notify,
1471	[RDMA_USER_CM_CMD_JOIN_IP_MCAST] = ucma_join_ip_multicast,
1472	[RDMA_USER_CM_CMD_LEAVE_MCAST]	 = ucma_leave_multicast,
1473	[RDMA_USER_CM_CMD_MIGRATE_ID]	 = ucma_migrate_id,
1474	[RDMA_USER_CM_CMD_QUERY]	 = ucma_query,
1475	[RDMA_USER_CM_CMD_BIND]		 = ucma_bind,
1476	[RDMA_USER_CM_CMD_RESOLVE_ADDR]	 = ucma_resolve_addr,
1477	[RDMA_USER_CM_CMD_JOIN_MCAST]	 = ucma_join_multicast
1478};
1479
1480static ssize_t ucma_write(struct file *filp, const char __user *buf,
1481			  size_t len, loff_t *pos)
1482{
1483	struct ucma_file *file = filp->private_data;
1484	struct rdma_ucm_cmd_hdr hdr;
1485	ssize_t ret;
1486
1487	if (len < sizeof(hdr))
1488		return -EINVAL;
1489
1490	if (copy_from_user(&hdr, buf, sizeof(hdr)))
1491		return -EFAULT;
1492
1493	if (hdr.cmd >= ARRAY_SIZE(ucma_cmd_table))
1494		return -EINVAL;
1495
1496	if (hdr.in + sizeof(hdr) > len)
1497		return -EINVAL;
1498
1499	if (!ucma_cmd_table[hdr.cmd])
1500		return -ENOSYS;
1501
1502	ret = ucma_cmd_table[hdr.cmd](file, buf + sizeof(hdr), hdr.in, hdr.out);
1503	if (!ret)
1504		ret = len;
1505
1506	return ret;
1507}
1508
1509static unsigned int ucma_poll(struct file *filp, struct poll_table_struct *wait)
1510{
1511	struct ucma_file *file = filp->private_data;
1512	unsigned int mask = 0;
1513
1514	poll_wait(filp, &file->poll_wait, wait);
1515
1516	if (!list_empty(&file->event_list))
1517		mask = POLLIN | POLLRDNORM;
1518
1519	return mask;
1520}
1521
1522/*
1523 * ucma_open() does not need the BKL:
1524 *
1525 *  - no global state is referred to;
1526 *  - there is no ioctl method to race against;
1527 *  - no further module initialization is required for open to work
1528 *    after the device is registered.
1529 */
1530static int ucma_open(struct inode *inode, struct file *filp)
1531{
1532	struct ucma_file *file;
1533
1534	file = kmalloc(sizeof *file, GFP_KERNEL);
1535	if (!file)
1536		return -ENOMEM;
1537
1538	INIT_LIST_HEAD(&file->event_list);
1539	INIT_LIST_HEAD(&file->ctx_list);
1540	init_waitqueue_head(&file->poll_wait);
1541	mutex_init(&file->mut);
1542
1543	filp->private_data = file;
1544	file->filp = filp;
1545
1546	return nonseekable_open(inode, filp);
1547}
1548
1549static int ucma_close(struct inode *inode, struct file *filp)
1550{
1551	struct ucma_file *file = filp->private_data;
1552	struct ucma_context *ctx, *tmp;
1553
1554	mutex_lock(&file->mut);
1555	list_for_each_entry_safe(ctx, tmp, &file->ctx_list, list) {
1556		mutex_unlock(&file->mut);
1557
1558		mutex_lock(&mut);
1559		idr_remove(&ctx_idr, ctx->id);
1560		mutex_unlock(&mut);
1561
1562		ucma_free_ctx(ctx);
1563		mutex_lock(&file->mut);
1564	}
1565	mutex_unlock(&file->mut);
1566	kfree(file);
1567	return 0;
1568}
1569
1570static const struct file_operations ucma_fops = {
1571	.owner 	 = THIS_MODULE,
1572	.open 	 = ucma_open,
1573	.release = ucma_close,
1574	.write	 = ucma_write,
1575	.poll    = ucma_poll,
1576	.llseek	 = no_llseek,
1577};
1578
1579static struct miscdevice ucma_misc = {
1580	.minor		= MISC_DYNAMIC_MINOR,
1581	.name		= "rdma_cm",
1582	.nodename	= "infiniband/rdma_cm",
1583	.mode		= 0666,
1584	.fops		= &ucma_fops,
1585};
1586
1587static ssize_t show_abi_version(struct device *dev,
1588				struct device_attribute *attr,
1589				char *buf)
1590{
1591	return sprintf(buf, "%d\n", RDMA_USER_CM_ABI_VERSION);
1592}
1593static DEVICE_ATTR(abi_version, S_IRUGO, show_abi_version, NULL);
1594
1595static int __init ucma_init(void)
1596{
1597	int ret;
1598
1599	ret = misc_register(&ucma_misc);
1600	if (ret)
1601		return ret;
1602
1603	ret = device_create_file(ucma_misc.this_device, &dev_attr_abi_version);
1604	if (ret) {
1605		printk(KERN_ERR "rdma_ucm: couldn't create abi_version attr\n");
1606		goto err1;
1607	}
1608
1609	ucma_ctl_table_hdr = register_net_sysctl(&init_net, "net/rdma_ucm", ucma_ctl_table);
1610	if (!ucma_ctl_table_hdr) {
1611		printk(KERN_ERR "rdma_ucm: couldn't register sysctl paths\n");
1612		ret = -ENOMEM;
1613		goto err2;
1614	}
1615	return 0;
1616err2:
1617	device_remove_file(ucma_misc.this_device, &dev_attr_abi_version);
1618err1:
1619	misc_deregister(&ucma_misc);
1620	return ret;
1621}
1622
1623static void __exit ucma_cleanup(void)
1624{
1625	unregister_net_sysctl_table(ucma_ctl_table_hdr);
1626	device_remove_file(ucma_misc.this_device, &dev_attr_abi_version);
1627	misc_deregister(&ucma_misc);
1628	idr_destroy(&ctx_idr);
1629}
1630
1631module_init(ucma_init);
1632module_exit(ucma_cleanup);