Linux Audio

Check our new training course

Loading...
v3.1
   1/*
   2 * Copyright (c) 2005 Topspin Communications.  All rights reserved.
   3 * Copyright (c) 2005, 2006, 2007 Cisco Systems.  All rights reserved.
   4 * Copyright (c) 2005 PathScale, Inc.  All rights reserved.
   5 * Copyright (c) 2006 Mellanox Technologies.  All rights reserved.
   6 *
   7 * This software is available to you under a choice of one of two
   8 * licenses.  You may choose to be licensed under the terms of the GNU
   9 * General Public License (GPL) Version 2, available from the file
  10 * COPYING in the main directory of this source tree, or the
  11 * OpenIB.org BSD license below:
  12 *
  13 *     Redistribution and use in source and binary forms, with or
  14 *     without modification, are permitted provided that the following
  15 *     conditions are met:
  16 *
  17 *      - Redistributions of source code must retain the above
  18 *        copyright notice, this list of conditions and the following
  19 *        disclaimer.
  20 *
  21 *      - Redistributions in binary form must reproduce the above
  22 *        copyright notice, this list of conditions and the following
  23 *        disclaimer in the documentation and/or other materials
  24 *        provided with the distribution.
  25 *
  26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  33 * SOFTWARE.
  34 */
  35
  36#include <linux/file.h>
  37#include <linux/fs.h>
  38#include <linux/slab.h>
  39
  40#include <asm/uaccess.h>
  41
  42#include "uverbs.h"
 
  43
  44static struct lock_class_key pd_lock_key;
  45static struct lock_class_key mr_lock_key;
  46static struct lock_class_key cq_lock_key;
  47static struct lock_class_key qp_lock_key;
  48static struct lock_class_key ah_lock_key;
  49static struct lock_class_key srq_lock_key;
  50
  51#define INIT_UDATA(udata, ibuf, obuf, ilen, olen)			\
  52	do {								\
  53		(udata)->inbuf  = (void __user *) (ibuf);		\
  54		(udata)->outbuf = (void __user *) (obuf);		\
  55		(udata)->inlen  = (ilen);				\
  56		(udata)->outlen = (olen);				\
  57	} while (0)
  58
  59/*
  60 * The ib_uobject locking scheme is as follows:
  61 *
  62 * - ib_uverbs_idr_lock protects the uverbs idrs themselves, so it
  63 *   needs to be held during all idr operations.  When an object is
  64 *   looked up, a reference must be taken on the object's kref before
  65 *   dropping this lock.
  66 *
  67 * - Each object also has an rwsem.  This rwsem must be held for
  68 *   reading while an operation that uses the object is performed.
  69 *   For example, while registering an MR, the associated PD's
  70 *   uobject.mutex must be held for reading.  The rwsem must be held
  71 *   for writing while initializing or destroying an object.
  72 *
  73 * - In addition, each object has a "live" flag.  If this flag is not
  74 *   set, then lookups of the object will fail even if it is found in
  75 *   the idr.  This handles a reader that blocks and does not acquire
  76 *   the rwsem until after the object is destroyed.  The destroy
  77 *   operation will set the live flag to 0 and then drop the rwsem;
  78 *   this will allow the reader to acquire the rwsem, see that the
  79 *   live flag is 0, and then drop the rwsem and its reference to
  80 *   object.  The underlying storage will not be freed until the last
  81 *   reference to the object is dropped.
  82 */
  83
  84static void init_uobj(struct ib_uobject *uobj, u64 user_handle,
  85		      struct ib_ucontext *context, struct lock_class_key *key)
  86{
  87	uobj->user_handle = user_handle;
  88	uobj->context     = context;
  89	kref_init(&uobj->ref);
  90	init_rwsem(&uobj->mutex);
  91	lockdep_set_class(&uobj->mutex, key);
  92	uobj->live        = 0;
  93}
  94
  95static void release_uobj(struct kref *kref)
  96{
  97	kfree(container_of(kref, struct ib_uobject, ref));
  98}
  99
 100static void put_uobj(struct ib_uobject *uobj)
 101{
 102	kref_put(&uobj->ref, release_uobj);
 103}
 104
 105static void put_uobj_read(struct ib_uobject *uobj)
 106{
 107	up_read(&uobj->mutex);
 108	put_uobj(uobj);
 109}
 110
 111static void put_uobj_write(struct ib_uobject *uobj)
 112{
 113	up_write(&uobj->mutex);
 114	put_uobj(uobj);
 115}
 116
 117static int idr_add_uobj(struct idr *idr, struct ib_uobject *uobj)
 118{
 119	int ret;
 120
 121retry:
 122	if (!idr_pre_get(idr, GFP_KERNEL))
 123		return -ENOMEM;
 124
 125	spin_lock(&ib_uverbs_idr_lock);
 126	ret = idr_get_new(idr, uobj, &uobj->id);
 127	spin_unlock(&ib_uverbs_idr_lock);
 128
 129	if (ret == -EAGAIN)
 130		goto retry;
 
 131
 132	return ret;
 
 
 
 133}
 134
 135void idr_remove_uobj(struct idr *idr, struct ib_uobject *uobj)
 136{
 137	spin_lock(&ib_uverbs_idr_lock);
 138	idr_remove(idr, uobj->id);
 139	spin_unlock(&ib_uverbs_idr_lock);
 140}
 141
 142static struct ib_uobject *__idr_get_uobj(struct idr *idr, int id,
 143					 struct ib_ucontext *context)
 144{
 145	struct ib_uobject *uobj;
 146
 147	spin_lock(&ib_uverbs_idr_lock);
 148	uobj = idr_find(idr, id);
 149	if (uobj) {
 150		if (uobj->context == context)
 151			kref_get(&uobj->ref);
 152		else
 153			uobj = NULL;
 154	}
 155	spin_unlock(&ib_uverbs_idr_lock);
 156
 157	return uobj;
 158}
 159
 160static struct ib_uobject *idr_read_uobj(struct idr *idr, int id,
 161					struct ib_ucontext *context, int nested)
 162{
 163	struct ib_uobject *uobj;
 164
 165	uobj = __idr_get_uobj(idr, id, context);
 166	if (!uobj)
 167		return NULL;
 168
 169	if (nested)
 170		down_read_nested(&uobj->mutex, SINGLE_DEPTH_NESTING);
 171	else
 172		down_read(&uobj->mutex);
 173	if (!uobj->live) {
 174		put_uobj_read(uobj);
 175		return NULL;
 176	}
 177
 178	return uobj;
 179}
 180
 181static struct ib_uobject *idr_write_uobj(struct idr *idr, int id,
 182					 struct ib_ucontext *context)
 183{
 184	struct ib_uobject *uobj;
 185
 186	uobj = __idr_get_uobj(idr, id, context);
 187	if (!uobj)
 188		return NULL;
 189
 190	down_write(&uobj->mutex);
 191	if (!uobj->live) {
 192		put_uobj_write(uobj);
 193		return NULL;
 194	}
 195
 196	return uobj;
 197}
 198
 199static void *idr_read_obj(struct idr *idr, int id, struct ib_ucontext *context,
 200			  int nested)
 201{
 202	struct ib_uobject *uobj;
 203
 204	uobj = idr_read_uobj(idr, id, context, nested);
 205	return uobj ? uobj->object : NULL;
 206}
 207
 208static struct ib_pd *idr_read_pd(int pd_handle, struct ib_ucontext *context)
 209{
 210	return idr_read_obj(&ib_uverbs_pd_idr, pd_handle, context, 0);
 211}
 212
 213static void put_pd_read(struct ib_pd *pd)
 214{
 215	put_uobj_read(pd->uobject);
 216}
 217
 218static struct ib_cq *idr_read_cq(int cq_handle, struct ib_ucontext *context, int nested)
 219{
 220	return idr_read_obj(&ib_uverbs_cq_idr, cq_handle, context, nested);
 221}
 222
 223static void put_cq_read(struct ib_cq *cq)
 224{
 225	put_uobj_read(cq->uobject);
 226}
 227
 228static struct ib_ah *idr_read_ah(int ah_handle, struct ib_ucontext *context)
 229{
 230	return idr_read_obj(&ib_uverbs_ah_idr, ah_handle, context, 0);
 231}
 232
 233static void put_ah_read(struct ib_ah *ah)
 234{
 235	put_uobj_read(ah->uobject);
 236}
 237
 238static struct ib_qp *idr_read_qp(int qp_handle, struct ib_ucontext *context)
 239{
 240	return idr_read_obj(&ib_uverbs_qp_idr, qp_handle, context, 0);
 241}
 242
 
 
 
 
 
 
 
 
 243static void put_qp_read(struct ib_qp *qp)
 244{
 245	put_uobj_read(qp->uobject);
 246}
 247
 
 
 
 
 
 248static struct ib_srq *idr_read_srq(int srq_handle, struct ib_ucontext *context)
 249{
 250	return idr_read_obj(&ib_uverbs_srq_idr, srq_handle, context, 0);
 251}
 252
 253static void put_srq_read(struct ib_srq *srq)
 254{
 255	put_uobj_read(srq->uobject);
 256}
 257
 
 
 
 
 
 
 
 
 
 
 
 
 258ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file,
 259			      const char __user *buf,
 260			      int in_len, int out_len)
 261{
 262	struct ib_uverbs_get_context      cmd;
 263	struct ib_uverbs_get_context_resp resp;
 264	struct ib_udata                   udata;
 265	struct ib_device                 *ibdev = file->device->ib_dev;
 266	struct ib_ucontext		 *ucontext;
 267	struct file			 *filp;
 268	int ret;
 269
 270	if (out_len < sizeof resp)
 271		return -ENOSPC;
 272
 273	if (copy_from_user(&cmd, buf, sizeof cmd))
 274		return -EFAULT;
 275
 276	mutex_lock(&file->mutex);
 277
 278	if (file->ucontext) {
 279		ret = -EINVAL;
 280		goto err;
 281	}
 282
 283	INIT_UDATA(&udata, buf + sizeof cmd,
 284		   (unsigned long) cmd.response + sizeof resp,
 285		   in_len - sizeof cmd, out_len - sizeof resp);
 286
 287	ucontext = ibdev->alloc_ucontext(ibdev, &udata);
 288	if (IS_ERR(ucontext)) {
 289		ret = PTR_ERR(ucontext);
 290		goto err;
 291	}
 292
 293	ucontext->device = ibdev;
 294	INIT_LIST_HEAD(&ucontext->pd_list);
 295	INIT_LIST_HEAD(&ucontext->mr_list);
 296	INIT_LIST_HEAD(&ucontext->mw_list);
 297	INIT_LIST_HEAD(&ucontext->cq_list);
 298	INIT_LIST_HEAD(&ucontext->qp_list);
 299	INIT_LIST_HEAD(&ucontext->srq_list);
 300	INIT_LIST_HEAD(&ucontext->ah_list);
 
 
 301	ucontext->closing = 0;
 302
 303	resp.num_comp_vectors = file->device->num_comp_vectors;
 304
 305	ret = get_unused_fd();
 306	if (ret < 0)
 307		goto err_free;
 308	resp.async_fd = ret;
 309
 310	filp = ib_uverbs_alloc_event_file(file, 1);
 311	if (IS_ERR(filp)) {
 312		ret = PTR_ERR(filp);
 313		goto err_fd;
 314	}
 315
 316	if (copy_to_user((void __user *) (unsigned long) cmd.response,
 317			 &resp, sizeof resp)) {
 318		ret = -EFAULT;
 319		goto err_file;
 320	}
 321
 322	file->async_file = filp->private_data;
 323
 324	INIT_IB_EVENT_HANDLER(&file->event_handler, file->device->ib_dev,
 325			      ib_uverbs_event_handler);
 326	ret = ib_register_event_handler(&file->event_handler);
 327	if (ret)
 328		goto err_file;
 329
 330	kref_get(&file->async_file->ref);
 331	kref_get(&file->ref);
 332	file->ucontext = ucontext;
 333
 334	fd_install(resp.async_fd, filp);
 335
 336	mutex_unlock(&file->mutex);
 337
 338	return in_len;
 339
 340err_file:
 341	fput(filp);
 342
 343err_fd:
 344	put_unused_fd(resp.async_fd);
 345
 346err_free:
 347	ibdev->dealloc_ucontext(ucontext);
 348
 349err:
 350	mutex_unlock(&file->mutex);
 351	return ret;
 352}
 353
 354ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file,
 355			       const char __user *buf,
 356			       int in_len, int out_len)
 357{
 358	struct ib_uverbs_query_device      cmd;
 359	struct ib_uverbs_query_device_resp resp;
 360	struct ib_device_attr              attr;
 361	int                                ret;
 362
 363	if (out_len < sizeof resp)
 364		return -ENOSPC;
 365
 366	if (copy_from_user(&cmd, buf, sizeof cmd))
 367		return -EFAULT;
 368
 369	ret = ib_query_device(file->device->ib_dev, &attr);
 370	if (ret)
 371		return ret;
 372
 373	memset(&resp, 0, sizeof resp);
 374
 375	resp.fw_ver 		       = attr.fw_ver;
 376	resp.node_guid 		       = file->device->ib_dev->node_guid;
 377	resp.sys_image_guid 	       = attr.sys_image_guid;
 378	resp.max_mr_size 	       = attr.max_mr_size;
 379	resp.page_size_cap 	       = attr.page_size_cap;
 380	resp.vendor_id 		       = attr.vendor_id;
 381	resp.vendor_part_id 	       = attr.vendor_part_id;
 382	resp.hw_ver 		       = attr.hw_ver;
 383	resp.max_qp 		       = attr.max_qp;
 384	resp.max_qp_wr 		       = attr.max_qp_wr;
 385	resp.device_cap_flags 	       = attr.device_cap_flags;
 386	resp.max_sge 		       = attr.max_sge;
 387	resp.max_sge_rd 	       = attr.max_sge_rd;
 388	resp.max_cq 		       = attr.max_cq;
 389	resp.max_cqe 		       = attr.max_cqe;
 390	resp.max_mr 		       = attr.max_mr;
 391	resp.max_pd 		       = attr.max_pd;
 392	resp.max_qp_rd_atom 	       = attr.max_qp_rd_atom;
 393	resp.max_ee_rd_atom 	       = attr.max_ee_rd_atom;
 394	resp.max_res_rd_atom 	       = attr.max_res_rd_atom;
 395	resp.max_qp_init_rd_atom       = attr.max_qp_init_rd_atom;
 396	resp.max_ee_init_rd_atom       = attr.max_ee_init_rd_atom;
 397	resp.atomic_cap 	       = attr.atomic_cap;
 398	resp.max_ee 		       = attr.max_ee;
 399	resp.max_rdd 		       = attr.max_rdd;
 400	resp.max_mw 		       = attr.max_mw;
 401	resp.max_raw_ipv6_qp 	       = attr.max_raw_ipv6_qp;
 402	resp.max_raw_ethy_qp 	       = attr.max_raw_ethy_qp;
 403	resp.max_mcast_grp 	       = attr.max_mcast_grp;
 404	resp.max_mcast_qp_attach       = attr.max_mcast_qp_attach;
 405	resp.max_total_mcast_qp_attach = attr.max_total_mcast_qp_attach;
 406	resp.max_ah 		       = attr.max_ah;
 407	resp.max_fmr 		       = attr.max_fmr;
 408	resp.max_map_per_fmr 	       = attr.max_map_per_fmr;
 409	resp.max_srq 		       = attr.max_srq;
 410	resp.max_srq_wr 	       = attr.max_srq_wr;
 411	resp.max_srq_sge 	       = attr.max_srq_sge;
 412	resp.max_pkeys 		       = attr.max_pkeys;
 413	resp.local_ca_ack_delay        = attr.local_ca_ack_delay;
 414	resp.phys_port_cnt	       = file->device->ib_dev->phys_port_cnt;
 415
 416	if (copy_to_user((void __user *) (unsigned long) cmd.response,
 417			 &resp, sizeof resp))
 418		return -EFAULT;
 419
 420	return in_len;
 421}
 422
 423ssize_t ib_uverbs_query_port(struct ib_uverbs_file *file,
 424			     const char __user *buf,
 425			     int in_len, int out_len)
 426{
 427	struct ib_uverbs_query_port      cmd;
 428	struct ib_uverbs_query_port_resp resp;
 429	struct ib_port_attr              attr;
 430	int                              ret;
 431
 432	if (out_len < sizeof resp)
 433		return -ENOSPC;
 434
 435	if (copy_from_user(&cmd, buf, sizeof cmd))
 436		return -EFAULT;
 437
 438	ret = ib_query_port(file->device->ib_dev, cmd.port_num, &attr);
 439	if (ret)
 440		return ret;
 441
 442	memset(&resp, 0, sizeof resp);
 443
 444	resp.state 	     = attr.state;
 445	resp.max_mtu 	     = attr.max_mtu;
 446	resp.active_mtu      = attr.active_mtu;
 447	resp.gid_tbl_len     = attr.gid_tbl_len;
 448	resp.port_cap_flags  = attr.port_cap_flags;
 449	resp.max_msg_sz      = attr.max_msg_sz;
 450	resp.bad_pkey_cntr   = attr.bad_pkey_cntr;
 451	resp.qkey_viol_cntr  = attr.qkey_viol_cntr;
 452	resp.pkey_tbl_len    = attr.pkey_tbl_len;
 453	resp.lid 	     = attr.lid;
 454	resp.sm_lid 	     = attr.sm_lid;
 455	resp.lmc 	     = attr.lmc;
 456	resp.max_vl_num      = attr.max_vl_num;
 457	resp.sm_sl 	     = attr.sm_sl;
 458	resp.subnet_timeout  = attr.subnet_timeout;
 459	resp.init_type_reply = attr.init_type_reply;
 460	resp.active_width    = attr.active_width;
 461	resp.active_speed    = attr.active_speed;
 462	resp.phys_state      = attr.phys_state;
 463	resp.link_layer      = rdma_port_get_link_layer(file->device->ib_dev,
 464							cmd.port_num);
 465
 466	if (copy_to_user((void __user *) (unsigned long) cmd.response,
 467			 &resp, sizeof resp))
 468		return -EFAULT;
 469
 470	return in_len;
 471}
 472
 473ssize_t ib_uverbs_alloc_pd(struct ib_uverbs_file *file,
 474			   const char __user *buf,
 475			   int in_len, int out_len)
 476{
 477	struct ib_uverbs_alloc_pd      cmd;
 478	struct ib_uverbs_alloc_pd_resp resp;
 479	struct ib_udata                udata;
 480	struct ib_uobject             *uobj;
 481	struct ib_pd                  *pd;
 482	int                            ret;
 483
 484	if (out_len < sizeof resp)
 485		return -ENOSPC;
 486
 487	if (copy_from_user(&cmd, buf, sizeof cmd))
 488		return -EFAULT;
 489
 490	INIT_UDATA(&udata, buf + sizeof cmd,
 491		   (unsigned long) cmd.response + sizeof resp,
 492		   in_len - sizeof cmd, out_len - sizeof resp);
 493
 494	uobj = kmalloc(sizeof *uobj, GFP_KERNEL);
 495	if (!uobj)
 496		return -ENOMEM;
 497
 498	init_uobj(uobj, 0, file->ucontext, &pd_lock_key);
 499	down_write(&uobj->mutex);
 500
 501	pd = file->device->ib_dev->alloc_pd(file->device->ib_dev,
 502					    file->ucontext, &udata);
 503	if (IS_ERR(pd)) {
 504		ret = PTR_ERR(pd);
 505		goto err;
 506	}
 507
 508	pd->device  = file->device->ib_dev;
 509	pd->uobject = uobj;
 510	atomic_set(&pd->usecnt, 0);
 511
 512	uobj->object = pd;
 513	ret = idr_add_uobj(&ib_uverbs_pd_idr, uobj);
 514	if (ret)
 515		goto err_idr;
 516
 517	memset(&resp, 0, sizeof resp);
 518	resp.pd_handle = uobj->id;
 519
 520	if (copy_to_user((void __user *) (unsigned long) cmd.response,
 521			 &resp, sizeof resp)) {
 522		ret = -EFAULT;
 523		goto err_copy;
 524	}
 525
 526	mutex_lock(&file->mutex);
 527	list_add_tail(&uobj->list, &file->ucontext->pd_list);
 528	mutex_unlock(&file->mutex);
 529
 530	uobj->live = 1;
 531
 532	up_write(&uobj->mutex);
 533
 534	return in_len;
 535
 536err_copy:
 537	idr_remove_uobj(&ib_uverbs_pd_idr, uobj);
 538
 539err_idr:
 540	ib_dealloc_pd(pd);
 541
 542err:
 543	put_uobj_write(uobj);
 544	return ret;
 545}
 546
 547ssize_t ib_uverbs_dealloc_pd(struct ib_uverbs_file *file,
 548			     const char __user *buf,
 549			     int in_len, int out_len)
 550{
 551	struct ib_uverbs_dealloc_pd cmd;
 552	struct ib_uobject          *uobj;
 553	int                         ret;
 554
 555	if (copy_from_user(&cmd, buf, sizeof cmd))
 556		return -EFAULT;
 557
 558	uobj = idr_write_uobj(&ib_uverbs_pd_idr, cmd.pd_handle, file->ucontext);
 559	if (!uobj)
 560		return -EINVAL;
 561
 562	ret = ib_dealloc_pd(uobj->object);
 563	if (!ret)
 564		uobj->live = 0;
 565
 566	put_uobj_write(uobj);
 567
 568	if (ret)
 569		return ret;
 570
 571	idr_remove_uobj(&ib_uverbs_pd_idr, uobj);
 572
 573	mutex_lock(&file->mutex);
 574	list_del(&uobj->list);
 575	mutex_unlock(&file->mutex);
 576
 577	put_uobj(uobj);
 578
 579	return in_len;
 580}
 581
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 582ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file,
 583			 const char __user *buf, int in_len,
 584			 int out_len)
 585{
 586	struct ib_uverbs_reg_mr      cmd;
 587	struct ib_uverbs_reg_mr_resp resp;
 588	struct ib_udata              udata;
 589	struct ib_uobject           *uobj;
 590	struct ib_pd                *pd;
 591	struct ib_mr                *mr;
 592	int                          ret;
 593
 594	if (out_len < sizeof resp)
 595		return -ENOSPC;
 596
 597	if (copy_from_user(&cmd, buf, sizeof cmd))
 598		return -EFAULT;
 599
 600	INIT_UDATA(&udata, buf + sizeof cmd,
 601		   (unsigned long) cmd.response + sizeof resp,
 602		   in_len - sizeof cmd, out_len - sizeof resp);
 603
 604	if ((cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK))
 605		return -EINVAL;
 606
 607	/*
 608	 * Local write permission is required if remote write or
 609	 * remote atomic permission is also requested.
 610	 */
 611	if (cmd.access_flags & (IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_REMOTE_WRITE) &&
 612	    !(cmd.access_flags & IB_ACCESS_LOCAL_WRITE))
 613		return -EINVAL;
 614
 615	uobj = kmalloc(sizeof *uobj, GFP_KERNEL);
 616	if (!uobj)
 617		return -ENOMEM;
 618
 619	init_uobj(uobj, 0, file->ucontext, &mr_lock_key);
 620	down_write(&uobj->mutex);
 621
 622	pd = idr_read_pd(cmd.pd_handle, file->ucontext);
 623	if (!pd) {
 624		ret = -EINVAL;
 625		goto err_free;
 626	}
 627
 628	mr = pd->device->reg_user_mr(pd, cmd.start, cmd.length, cmd.hca_va,
 629				     cmd.access_flags, &udata);
 630	if (IS_ERR(mr)) {
 631		ret = PTR_ERR(mr);
 632		goto err_put;
 633	}
 634
 635	mr->device  = pd->device;
 636	mr->pd      = pd;
 637	mr->uobject = uobj;
 638	atomic_inc(&pd->usecnt);
 639	atomic_set(&mr->usecnt, 0);
 640
 641	uobj->object = mr;
 642	ret = idr_add_uobj(&ib_uverbs_mr_idr, uobj);
 643	if (ret)
 644		goto err_unreg;
 645
 646	memset(&resp, 0, sizeof resp);
 647	resp.lkey      = mr->lkey;
 648	resp.rkey      = mr->rkey;
 649	resp.mr_handle = uobj->id;
 650
 651	if (copy_to_user((void __user *) (unsigned long) cmd.response,
 652			 &resp, sizeof resp)) {
 653		ret = -EFAULT;
 654		goto err_copy;
 655	}
 656
 657	put_pd_read(pd);
 658
 659	mutex_lock(&file->mutex);
 660	list_add_tail(&uobj->list, &file->ucontext->mr_list);
 661	mutex_unlock(&file->mutex);
 662
 663	uobj->live = 1;
 664
 665	up_write(&uobj->mutex);
 666
 667	return in_len;
 668
 669err_copy:
 670	idr_remove_uobj(&ib_uverbs_mr_idr, uobj);
 671
 672err_unreg:
 673	ib_dereg_mr(mr);
 674
 675err_put:
 676	put_pd_read(pd);
 677
 678err_free:
 679	put_uobj_write(uobj);
 680	return ret;
 681}
 682
 683ssize_t ib_uverbs_dereg_mr(struct ib_uverbs_file *file,
 684			   const char __user *buf, int in_len,
 685			   int out_len)
 686{
 687	struct ib_uverbs_dereg_mr cmd;
 688	struct ib_mr             *mr;
 689	struct ib_uobject	 *uobj;
 690	int                       ret = -EINVAL;
 691
 692	if (copy_from_user(&cmd, buf, sizeof cmd))
 693		return -EFAULT;
 694
 695	uobj = idr_write_uobj(&ib_uverbs_mr_idr, cmd.mr_handle, file->ucontext);
 696	if (!uobj)
 697		return -EINVAL;
 698
 699	mr = uobj->object;
 700
 701	ret = ib_dereg_mr(mr);
 702	if (!ret)
 703		uobj->live = 0;
 704
 705	put_uobj_write(uobj);
 706
 707	if (ret)
 708		return ret;
 709
 710	idr_remove_uobj(&ib_uverbs_mr_idr, uobj);
 711
 712	mutex_lock(&file->mutex);
 713	list_del(&uobj->list);
 714	mutex_unlock(&file->mutex);
 715
 716	put_uobj(uobj);
 717
 718	return in_len;
 719}
 720
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 721ssize_t ib_uverbs_create_comp_channel(struct ib_uverbs_file *file,
 722				      const char __user *buf, int in_len,
 723				      int out_len)
 724{
 725	struct ib_uverbs_create_comp_channel	   cmd;
 726	struct ib_uverbs_create_comp_channel_resp  resp;
 727	struct file				  *filp;
 728	int ret;
 729
 730	if (out_len < sizeof resp)
 731		return -ENOSPC;
 732
 733	if (copy_from_user(&cmd, buf, sizeof cmd))
 734		return -EFAULT;
 735
 736	ret = get_unused_fd();
 737	if (ret < 0)
 738		return ret;
 739	resp.fd = ret;
 740
 741	filp = ib_uverbs_alloc_event_file(file, 0);
 742	if (IS_ERR(filp)) {
 743		put_unused_fd(resp.fd);
 744		return PTR_ERR(filp);
 745	}
 746
 747	if (copy_to_user((void __user *) (unsigned long) cmd.response,
 748			 &resp, sizeof resp)) {
 749		put_unused_fd(resp.fd);
 750		fput(filp);
 751		return -EFAULT;
 752	}
 753
 754	fd_install(resp.fd, filp);
 755	return in_len;
 756}
 757
 758ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file,
 759			    const char __user *buf, int in_len,
 760			    int out_len)
 761{
 762	struct ib_uverbs_create_cq      cmd;
 763	struct ib_uverbs_create_cq_resp resp;
 764	struct ib_udata                 udata;
 765	struct ib_ucq_object           *obj;
 766	struct ib_uverbs_event_file    *ev_file = NULL;
 767	struct ib_cq                   *cq;
 768	int                             ret;
 769
 770	if (out_len < sizeof resp)
 771		return -ENOSPC;
 772
 773	if (copy_from_user(&cmd, buf, sizeof cmd))
 774		return -EFAULT;
 775
 776	INIT_UDATA(&udata, buf + sizeof cmd,
 777		   (unsigned long) cmd.response + sizeof resp,
 778		   in_len - sizeof cmd, out_len - sizeof resp);
 779
 780	if (cmd.comp_vector >= file->device->num_comp_vectors)
 781		return -EINVAL;
 782
 783	obj = kmalloc(sizeof *obj, GFP_KERNEL);
 784	if (!obj)
 785		return -ENOMEM;
 786
 787	init_uobj(&obj->uobject, cmd.user_handle, file->ucontext, &cq_lock_key);
 788	down_write(&obj->uobject.mutex);
 789
 790	if (cmd.comp_channel >= 0) {
 791		ev_file = ib_uverbs_lookup_comp_file(cmd.comp_channel);
 792		if (!ev_file) {
 793			ret = -EINVAL;
 794			goto err;
 795		}
 796	}
 797
 798	obj->uverbs_file	   = file;
 799	obj->comp_events_reported  = 0;
 800	obj->async_events_reported = 0;
 801	INIT_LIST_HEAD(&obj->comp_list);
 802	INIT_LIST_HEAD(&obj->async_list);
 803
 804	cq = file->device->ib_dev->create_cq(file->device->ib_dev, cmd.cqe,
 805					     cmd.comp_vector,
 806					     file->ucontext, &udata);
 807	if (IS_ERR(cq)) {
 808		ret = PTR_ERR(cq);
 809		goto err_file;
 810	}
 811
 812	cq->device        = file->device->ib_dev;
 813	cq->uobject       = &obj->uobject;
 814	cq->comp_handler  = ib_uverbs_comp_handler;
 815	cq->event_handler = ib_uverbs_cq_event_handler;
 816	cq->cq_context    = ev_file;
 817	atomic_set(&cq->usecnt, 0);
 818
 819	obj->uobject.object = cq;
 820	ret = idr_add_uobj(&ib_uverbs_cq_idr, &obj->uobject);
 821	if (ret)
 822		goto err_free;
 823
 824	memset(&resp, 0, sizeof resp);
 825	resp.cq_handle = obj->uobject.id;
 826	resp.cqe       = cq->cqe;
 827
 828	if (copy_to_user((void __user *) (unsigned long) cmd.response,
 829			 &resp, sizeof resp)) {
 830		ret = -EFAULT;
 831		goto err_copy;
 832	}
 833
 834	mutex_lock(&file->mutex);
 835	list_add_tail(&obj->uobject.list, &file->ucontext->cq_list);
 836	mutex_unlock(&file->mutex);
 837
 838	obj->uobject.live = 1;
 839
 840	up_write(&obj->uobject.mutex);
 841
 842	return in_len;
 843
 844err_copy:
 845	idr_remove_uobj(&ib_uverbs_cq_idr, &obj->uobject);
 846
 847err_free:
 848	ib_destroy_cq(cq);
 849
 850err_file:
 851	if (ev_file)
 852		ib_uverbs_release_ucq(file, ev_file, obj);
 853
 854err:
 855	put_uobj_write(&obj->uobject);
 856	return ret;
 857}
 858
 859ssize_t ib_uverbs_resize_cq(struct ib_uverbs_file *file,
 860			    const char __user *buf, int in_len,
 861			    int out_len)
 862{
 863	struct ib_uverbs_resize_cq	cmd;
 864	struct ib_uverbs_resize_cq_resp	resp;
 865	struct ib_udata                 udata;
 866	struct ib_cq			*cq;
 867	int				ret = -EINVAL;
 868
 869	if (copy_from_user(&cmd, buf, sizeof cmd))
 870		return -EFAULT;
 871
 872	INIT_UDATA(&udata, buf + sizeof cmd,
 873		   (unsigned long) cmd.response + sizeof resp,
 874		   in_len - sizeof cmd, out_len - sizeof resp);
 875
 876	cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0);
 877	if (!cq)
 878		return -EINVAL;
 879
 880	ret = cq->device->resize_cq(cq, cmd.cqe, &udata);
 881	if (ret)
 882		goto out;
 883
 884	resp.cqe = cq->cqe;
 885
 886	if (copy_to_user((void __user *) (unsigned long) cmd.response,
 887			 &resp, sizeof resp.cqe))
 888		ret = -EFAULT;
 889
 890out:
 891	put_cq_read(cq);
 892
 893	return ret ? ret : in_len;
 894}
 895
 896static int copy_wc_to_user(void __user *dest, struct ib_wc *wc)
 897{
 898	struct ib_uverbs_wc tmp;
 899
 900	tmp.wr_id		= wc->wr_id;
 901	tmp.status		= wc->status;
 902	tmp.opcode		= wc->opcode;
 903	tmp.vendor_err		= wc->vendor_err;
 904	tmp.byte_len		= wc->byte_len;
 905	tmp.ex.imm_data		= (__u32 __force) wc->ex.imm_data;
 906	tmp.qp_num		= wc->qp->qp_num;
 907	tmp.src_qp		= wc->src_qp;
 908	tmp.wc_flags		= wc->wc_flags;
 909	tmp.pkey_index		= wc->pkey_index;
 910	tmp.slid		= wc->slid;
 911	tmp.sl			= wc->sl;
 912	tmp.dlid_path_bits	= wc->dlid_path_bits;
 913	tmp.port_num		= wc->port_num;
 914	tmp.reserved		= 0;
 915
 916	if (copy_to_user(dest, &tmp, sizeof tmp))
 917		return -EFAULT;
 918
 919	return 0;
 920}
 921
 922ssize_t ib_uverbs_poll_cq(struct ib_uverbs_file *file,
 923			  const char __user *buf, int in_len,
 924			  int out_len)
 925{
 926	struct ib_uverbs_poll_cq       cmd;
 927	struct ib_uverbs_poll_cq_resp  resp;
 928	u8 __user                     *header_ptr;
 929	u8 __user                     *data_ptr;
 930	struct ib_cq                  *cq;
 931	struct ib_wc                   wc;
 932	int                            ret;
 933
 934	if (copy_from_user(&cmd, buf, sizeof cmd))
 935		return -EFAULT;
 936
 937	cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0);
 938	if (!cq)
 939		return -EINVAL;
 940
 941	/* we copy a struct ib_uverbs_poll_cq_resp to user space */
 942	header_ptr = (void __user *)(unsigned long) cmd.response;
 943	data_ptr = header_ptr + sizeof resp;
 944
 945	memset(&resp, 0, sizeof resp);
 946	while (resp.count < cmd.ne) {
 947		ret = ib_poll_cq(cq, 1, &wc);
 948		if (ret < 0)
 949			goto out_put;
 950		if (!ret)
 951			break;
 952
 953		ret = copy_wc_to_user(data_ptr, &wc);
 954		if (ret)
 955			goto out_put;
 956
 957		data_ptr += sizeof(struct ib_uverbs_wc);
 958		++resp.count;
 959	}
 960
 961	if (copy_to_user(header_ptr, &resp, sizeof resp)) {
 962		ret = -EFAULT;
 963		goto out_put;
 964	}
 965
 966	ret = in_len;
 967
 968out_put:
 969	put_cq_read(cq);
 970	return ret;
 971}
 972
 973ssize_t ib_uverbs_req_notify_cq(struct ib_uverbs_file *file,
 974				const char __user *buf, int in_len,
 975				int out_len)
 976{
 977	struct ib_uverbs_req_notify_cq cmd;
 978	struct ib_cq                  *cq;
 979
 980	if (copy_from_user(&cmd, buf, sizeof cmd))
 981		return -EFAULT;
 982
 983	cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0);
 984	if (!cq)
 985		return -EINVAL;
 986
 987	ib_req_notify_cq(cq, cmd.solicited_only ?
 988			 IB_CQ_SOLICITED : IB_CQ_NEXT_COMP);
 989
 990	put_cq_read(cq);
 991
 992	return in_len;
 993}
 994
 995ssize_t ib_uverbs_destroy_cq(struct ib_uverbs_file *file,
 996			     const char __user *buf, int in_len,
 997			     int out_len)
 998{
 999	struct ib_uverbs_destroy_cq      cmd;
1000	struct ib_uverbs_destroy_cq_resp resp;
1001	struct ib_uobject		*uobj;
1002	struct ib_cq               	*cq;
1003	struct ib_ucq_object        	*obj;
1004	struct ib_uverbs_event_file	*ev_file;
1005	int                        	 ret = -EINVAL;
1006
1007	if (copy_from_user(&cmd, buf, sizeof cmd))
1008		return -EFAULT;
1009
1010	uobj = idr_write_uobj(&ib_uverbs_cq_idr, cmd.cq_handle, file->ucontext);
1011	if (!uobj)
1012		return -EINVAL;
1013	cq      = uobj->object;
1014	ev_file = cq->cq_context;
1015	obj     = container_of(cq->uobject, struct ib_ucq_object, uobject);
1016
1017	ret = ib_destroy_cq(cq);
1018	if (!ret)
1019		uobj->live = 0;
1020
1021	put_uobj_write(uobj);
1022
1023	if (ret)
1024		return ret;
1025
1026	idr_remove_uobj(&ib_uverbs_cq_idr, uobj);
1027
1028	mutex_lock(&file->mutex);
1029	list_del(&uobj->list);
1030	mutex_unlock(&file->mutex);
1031
1032	ib_uverbs_release_ucq(file, ev_file, obj);
1033
1034	memset(&resp, 0, sizeof resp);
1035	resp.comp_events_reported  = obj->comp_events_reported;
1036	resp.async_events_reported = obj->async_events_reported;
1037
1038	put_uobj(uobj);
1039
1040	if (copy_to_user((void __user *) (unsigned long) cmd.response,
1041			 &resp, sizeof resp))
1042		return -EFAULT;
1043
1044	return in_len;
1045}
1046
1047ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file,
1048			    const char __user *buf, int in_len,
1049			    int out_len)
1050{
1051	struct ib_uverbs_create_qp      cmd;
1052	struct ib_uverbs_create_qp_resp resp;
1053	struct ib_udata                 udata;
1054	struct ib_uqp_object           *obj;
1055	struct ib_pd                   *pd;
1056	struct ib_cq                   *scq, *rcq;
1057	struct ib_srq                  *srq;
 
 
 
1058	struct ib_qp                   *qp;
1059	struct ib_qp_init_attr          attr;
1060	int ret;
1061
1062	if (out_len < sizeof resp)
1063		return -ENOSPC;
1064
1065	if (copy_from_user(&cmd, buf, sizeof cmd))
1066		return -EFAULT;
1067
 
 
 
1068	INIT_UDATA(&udata, buf + sizeof cmd,
1069		   (unsigned long) cmd.response + sizeof resp,
1070		   in_len - sizeof cmd, out_len - sizeof resp);
1071
1072	obj = kmalloc(sizeof *obj, GFP_KERNEL);
1073	if (!obj)
1074		return -ENOMEM;
1075
1076	init_uobj(&obj->uevent.uobject, cmd.user_handle, file->ucontext, &qp_lock_key);
1077	down_write(&obj->uevent.uobject.mutex);
1078
1079	srq = cmd.is_srq ? idr_read_srq(cmd.srq_handle, file->ucontext) : NULL;
1080	pd  = idr_read_pd(cmd.pd_handle, file->ucontext);
1081	scq = idr_read_cq(cmd.send_cq_handle, file->ucontext, 0);
1082	rcq = cmd.recv_cq_handle == cmd.send_cq_handle ?
1083		scq : idr_read_cq(cmd.recv_cq_handle, file->ucontext, 1);
 
 
 
 
 
 
 
 
 
 
 
 
 
1084
1085	if (!pd || !scq || !rcq || (cmd.is_srq && !srq)) {
1086		ret = -EINVAL;
1087		goto err_put;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1088	}
1089
1090	attr.event_handler = ib_uverbs_qp_event_handler;
1091	attr.qp_context    = file;
1092	attr.send_cq       = scq;
1093	attr.recv_cq       = rcq;
1094	attr.srq           = srq;
 
1095	attr.sq_sig_type   = cmd.sq_sig_all ? IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR;
1096	attr.qp_type       = cmd.qp_type;
1097	attr.create_flags  = 0;
1098
1099	attr.cap.max_send_wr     = cmd.max_send_wr;
1100	attr.cap.max_recv_wr     = cmd.max_recv_wr;
1101	attr.cap.max_send_sge    = cmd.max_send_sge;
1102	attr.cap.max_recv_sge    = cmd.max_recv_sge;
1103	attr.cap.max_inline_data = cmd.max_inline_data;
1104
1105	obj->uevent.events_reported     = 0;
1106	INIT_LIST_HEAD(&obj->uevent.event_list);
1107	INIT_LIST_HEAD(&obj->mcast_list);
1108
1109	qp = pd->device->create_qp(pd, &attr, &udata);
 
 
 
 
1110	if (IS_ERR(qp)) {
1111		ret = PTR_ERR(qp);
1112		goto err_put;
1113	}
1114
1115	qp->device     	  = pd->device;
1116	qp->pd         	  = pd;
1117	qp->send_cq    	  = attr.send_cq;
1118	qp->recv_cq    	  = attr.recv_cq;
1119	qp->srq	       	  = attr.srq;
1120	qp->uobject       = &obj->uevent.uobject;
1121	qp->event_handler = attr.event_handler;
1122	qp->qp_context    = attr.qp_context;
1123	qp->qp_type	  = attr.qp_type;
1124	atomic_inc(&pd->usecnt);
1125	atomic_inc(&attr.send_cq->usecnt);
1126	atomic_inc(&attr.recv_cq->usecnt);
1127	if (attr.srq)
1128		atomic_inc(&attr.srq->usecnt);
 
 
 
 
 
1129
1130	obj->uevent.uobject.object = qp;
1131	ret = idr_add_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject);
1132	if (ret)
1133		goto err_destroy;
1134
1135	memset(&resp, 0, sizeof resp);
1136	resp.qpn             = qp->qp_num;
1137	resp.qp_handle       = obj->uevent.uobject.id;
1138	resp.max_recv_sge    = attr.cap.max_recv_sge;
1139	resp.max_send_sge    = attr.cap.max_send_sge;
1140	resp.max_recv_wr     = attr.cap.max_recv_wr;
1141	resp.max_send_wr     = attr.cap.max_send_wr;
1142	resp.max_inline_data = attr.cap.max_inline_data;
1143
1144	if (copy_to_user((void __user *) (unsigned long) cmd.response,
1145			 &resp, sizeof resp)) {
1146		ret = -EFAULT;
1147		goto err_copy;
1148	}
1149
1150	put_pd_read(pd);
1151	put_cq_read(scq);
1152	if (rcq != scq)
 
 
 
 
 
 
 
 
 
1153		put_cq_read(rcq);
1154	if (srq)
1155		put_srq_read(srq);
1156
1157	mutex_lock(&file->mutex);
1158	list_add_tail(&obj->uevent.uobject.list, &file->ucontext->qp_list);
1159	mutex_unlock(&file->mutex);
1160
1161	obj->uevent.uobject.live = 1;
1162
1163	up_write(&obj->uevent.uobject.mutex);
1164
1165	return in_len;
1166
1167err_copy:
1168	idr_remove_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject);
1169
1170err_destroy:
1171	ib_destroy_qp(qp);
1172
1173err_put:
 
 
1174	if (pd)
1175		put_pd_read(pd);
1176	if (scq)
1177		put_cq_read(scq);
1178	if (rcq && rcq != scq)
1179		put_cq_read(rcq);
1180	if (srq)
1181		put_srq_read(srq);
1182
1183	put_uobj_write(&obj->uevent.uobject);
1184	return ret;
1185}
1186
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1187ssize_t ib_uverbs_query_qp(struct ib_uverbs_file *file,
1188			   const char __user *buf, int in_len,
1189			   int out_len)
1190{
1191	struct ib_uverbs_query_qp      cmd;
1192	struct ib_uverbs_query_qp_resp resp;
1193	struct ib_qp                   *qp;
1194	struct ib_qp_attr              *attr;
1195	struct ib_qp_init_attr         *init_attr;
1196	int                            ret;
1197
1198	if (copy_from_user(&cmd, buf, sizeof cmd))
1199		return -EFAULT;
1200
1201	attr      = kmalloc(sizeof *attr, GFP_KERNEL);
1202	init_attr = kmalloc(sizeof *init_attr, GFP_KERNEL);
1203	if (!attr || !init_attr) {
1204		ret = -ENOMEM;
1205		goto out;
1206	}
1207
1208	qp = idr_read_qp(cmd.qp_handle, file->ucontext);
1209	if (!qp) {
1210		ret = -EINVAL;
1211		goto out;
1212	}
1213
1214	ret = ib_query_qp(qp, attr, cmd.attr_mask, init_attr);
1215
1216	put_qp_read(qp);
1217
1218	if (ret)
1219		goto out;
1220
1221	memset(&resp, 0, sizeof resp);
1222
1223	resp.qp_state               = attr->qp_state;
1224	resp.cur_qp_state           = attr->cur_qp_state;
1225	resp.path_mtu               = attr->path_mtu;
1226	resp.path_mig_state         = attr->path_mig_state;
1227	resp.qkey                   = attr->qkey;
1228	resp.rq_psn                 = attr->rq_psn;
1229	resp.sq_psn                 = attr->sq_psn;
1230	resp.dest_qp_num            = attr->dest_qp_num;
1231	resp.qp_access_flags        = attr->qp_access_flags;
1232	resp.pkey_index             = attr->pkey_index;
1233	resp.alt_pkey_index         = attr->alt_pkey_index;
1234	resp.sq_draining            = attr->sq_draining;
1235	resp.max_rd_atomic          = attr->max_rd_atomic;
1236	resp.max_dest_rd_atomic     = attr->max_dest_rd_atomic;
1237	resp.min_rnr_timer          = attr->min_rnr_timer;
1238	resp.port_num               = attr->port_num;
1239	resp.timeout                = attr->timeout;
1240	resp.retry_cnt              = attr->retry_cnt;
1241	resp.rnr_retry              = attr->rnr_retry;
1242	resp.alt_port_num           = attr->alt_port_num;
1243	resp.alt_timeout            = attr->alt_timeout;
1244
1245	memcpy(resp.dest.dgid, attr->ah_attr.grh.dgid.raw, 16);
1246	resp.dest.flow_label        = attr->ah_attr.grh.flow_label;
1247	resp.dest.sgid_index        = attr->ah_attr.grh.sgid_index;
1248	resp.dest.hop_limit         = attr->ah_attr.grh.hop_limit;
1249	resp.dest.traffic_class     = attr->ah_attr.grh.traffic_class;
1250	resp.dest.dlid              = attr->ah_attr.dlid;
1251	resp.dest.sl                = attr->ah_attr.sl;
1252	resp.dest.src_path_bits     = attr->ah_attr.src_path_bits;
1253	resp.dest.static_rate       = attr->ah_attr.static_rate;
1254	resp.dest.is_global         = !!(attr->ah_attr.ah_flags & IB_AH_GRH);
1255	resp.dest.port_num          = attr->ah_attr.port_num;
1256
1257	memcpy(resp.alt_dest.dgid, attr->alt_ah_attr.grh.dgid.raw, 16);
1258	resp.alt_dest.flow_label    = attr->alt_ah_attr.grh.flow_label;
1259	resp.alt_dest.sgid_index    = attr->alt_ah_attr.grh.sgid_index;
1260	resp.alt_dest.hop_limit     = attr->alt_ah_attr.grh.hop_limit;
1261	resp.alt_dest.traffic_class = attr->alt_ah_attr.grh.traffic_class;
1262	resp.alt_dest.dlid          = attr->alt_ah_attr.dlid;
1263	resp.alt_dest.sl            = attr->alt_ah_attr.sl;
1264	resp.alt_dest.src_path_bits = attr->alt_ah_attr.src_path_bits;
1265	resp.alt_dest.static_rate   = attr->alt_ah_attr.static_rate;
1266	resp.alt_dest.is_global     = !!(attr->alt_ah_attr.ah_flags & IB_AH_GRH);
1267	resp.alt_dest.port_num      = attr->alt_ah_attr.port_num;
1268
1269	resp.max_send_wr            = init_attr->cap.max_send_wr;
1270	resp.max_recv_wr            = init_attr->cap.max_recv_wr;
1271	resp.max_send_sge           = init_attr->cap.max_send_sge;
1272	resp.max_recv_sge           = init_attr->cap.max_recv_sge;
1273	resp.max_inline_data        = init_attr->cap.max_inline_data;
1274	resp.sq_sig_all             = init_attr->sq_sig_type == IB_SIGNAL_ALL_WR;
1275
1276	if (copy_to_user((void __user *) (unsigned long) cmd.response,
1277			 &resp, sizeof resp))
1278		ret = -EFAULT;
1279
1280out:
1281	kfree(attr);
1282	kfree(init_attr);
1283
1284	return ret ? ret : in_len;
1285}
1286
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1287ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file,
1288			    const char __user *buf, int in_len,
1289			    int out_len)
1290{
1291	struct ib_uverbs_modify_qp cmd;
1292	struct ib_udata            udata;
1293	struct ib_qp              *qp;
1294	struct ib_qp_attr         *attr;
1295	int                        ret;
1296
1297	if (copy_from_user(&cmd, buf, sizeof cmd))
1298		return -EFAULT;
1299
1300	INIT_UDATA(&udata, buf + sizeof cmd, NULL, in_len - sizeof cmd,
1301		   out_len);
1302
1303	attr = kmalloc(sizeof *attr, GFP_KERNEL);
1304	if (!attr)
1305		return -ENOMEM;
1306
1307	qp = idr_read_qp(cmd.qp_handle, file->ucontext);
1308	if (!qp) {
1309		ret = -EINVAL;
1310		goto out;
1311	}
1312
1313	attr->qp_state 		  = cmd.qp_state;
1314	attr->cur_qp_state 	  = cmd.cur_qp_state;
1315	attr->path_mtu 		  = cmd.path_mtu;
1316	attr->path_mig_state 	  = cmd.path_mig_state;
1317	attr->qkey 		  = cmd.qkey;
1318	attr->rq_psn 		  = cmd.rq_psn;
1319	attr->sq_psn 		  = cmd.sq_psn;
1320	attr->dest_qp_num 	  = cmd.dest_qp_num;
1321	attr->qp_access_flags 	  = cmd.qp_access_flags;
1322	attr->pkey_index 	  = cmd.pkey_index;
1323	attr->alt_pkey_index 	  = cmd.alt_pkey_index;
1324	attr->en_sqd_async_notify = cmd.en_sqd_async_notify;
1325	attr->max_rd_atomic 	  = cmd.max_rd_atomic;
1326	attr->max_dest_rd_atomic  = cmd.max_dest_rd_atomic;
1327	attr->min_rnr_timer 	  = cmd.min_rnr_timer;
1328	attr->port_num 		  = cmd.port_num;
1329	attr->timeout 		  = cmd.timeout;
1330	attr->retry_cnt 	  = cmd.retry_cnt;
1331	attr->rnr_retry 	  = cmd.rnr_retry;
1332	attr->alt_port_num 	  = cmd.alt_port_num;
1333	attr->alt_timeout 	  = cmd.alt_timeout;
1334
1335	memcpy(attr->ah_attr.grh.dgid.raw, cmd.dest.dgid, 16);
1336	attr->ah_attr.grh.flow_label        = cmd.dest.flow_label;
1337	attr->ah_attr.grh.sgid_index        = cmd.dest.sgid_index;
1338	attr->ah_attr.grh.hop_limit         = cmd.dest.hop_limit;
1339	attr->ah_attr.grh.traffic_class     = cmd.dest.traffic_class;
1340	attr->ah_attr.dlid 	    	    = cmd.dest.dlid;
1341	attr->ah_attr.sl   	    	    = cmd.dest.sl;
1342	attr->ah_attr.src_path_bits 	    = cmd.dest.src_path_bits;
1343	attr->ah_attr.static_rate   	    = cmd.dest.static_rate;
1344	attr->ah_attr.ah_flags 	    	    = cmd.dest.is_global ? IB_AH_GRH : 0;
1345	attr->ah_attr.port_num 	    	    = cmd.dest.port_num;
1346
1347	memcpy(attr->alt_ah_attr.grh.dgid.raw, cmd.alt_dest.dgid, 16);
1348	attr->alt_ah_attr.grh.flow_label    = cmd.alt_dest.flow_label;
1349	attr->alt_ah_attr.grh.sgid_index    = cmd.alt_dest.sgid_index;
1350	attr->alt_ah_attr.grh.hop_limit     = cmd.alt_dest.hop_limit;
1351	attr->alt_ah_attr.grh.traffic_class = cmd.alt_dest.traffic_class;
1352	attr->alt_ah_attr.dlid 	    	    = cmd.alt_dest.dlid;
1353	attr->alt_ah_attr.sl   	    	    = cmd.alt_dest.sl;
1354	attr->alt_ah_attr.src_path_bits     = cmd.alt_dest.src_path_bits;
1355	attr->alt_ah_attr.static_rate       = cmd.alt_dest.static_rate;
1356	attr->alt_ah_attr.ah_flags 	    = cmd.alt_dest.is_global ? IB_AH_GRH : 0;
1357	attr->alt_ah_attr.port_num 	    = cmd.alt_dest.port_num;
1358
1359	ret = qp->device->modify_qp(qp, attr, cmd.attr_mask, &udata);
 
 
 
 
 
 
 
 
1360
1361	put_qp_read(qp);
1362
1363	if (ret)
1364		goto out;
1365
1366	ret = in_len;
1367
1368out:
1369	kfree(attr);
1370
1371	return ret;
1372}
1373
1374ssize_t ib_uverbs_destroy_qp(struct ib_uverbs_file *file,
1375			     const char __user *buf, int in_len,
1376			     int out_len)
1377{
1378	struct ib_uverbs_destroy_qp      cmd;
1379	struct ib_uverbs_destroy_qp_resp resp;
1380	struct ib_uobject		*uobj;
1381	struct ib_qp               	*qp;
1382	struct ib_uqp_object        	*obj;
1383	int                        	 ret = -EINVAL;
1384
1385	if (copy_from_user(&cmd, buf, sizeof cmd))
1386		return -EFAULT;
1387
1388	memset(&resp, 0, sizeof resp);
1389
1390	uobj = idr_write_uobj(&ib_uverbs_qp_idr, cmd.qp_handle, file->ucontext);
1391	if (!uobj)
1392		return -EINVAL;
1393	qp  = uobj->object;
1394	obj = container_of(uobj, struct ib_uqp_object, uevent.uobject);
1395
1396	if (!list_empty(&obj->mcast_list)) {
1397		put_uobj_write(uobj);
1398		return -EBUSY;
1399	}
1400
1401	ret = ib_destroy_qp(qp);
1402	if (!ret)
1403		uobj->live = 0;
1404
1405	put_uobj_write(uobj);
1406
1407	if (ret)
1408		return ret;
1409
 
 
 
1410	idr_remove_uobj(&ib_uverbs_qp_idr, uobj);
1411
1412	mutex_lock(&file->mutex);
1413	list_del(&uobj->list);
1414	mutex_unlock(&file->mutex);
1415
1416	ib_uverbs_release_uevent(file, &obj->uevent);
1417
1418	resp.events_reported = obj->uevent.events_reported;
1419
1420	put_uobj(uobj);
1421
1422	if (copy_to_user((void __user *) (unsigned long) cmd.response,
1423			 &resp, sizeof resp))
1424		return -EFAULT;
1425
1426	return in_len;
1427}
1428
1429ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file,
1430			    const char __user *buf, int in_len,
1431			    int out_len)
1432{
1433	struct ib_uverbs_post_send      cmd;
1434	struct ib_uverbs_post_send_resp resp;
1435	struct ib_uverbs_send_wr       *user_wr;
1436	struct ib_send_wr              *wr = NULL, *last, *next, *bad_wr;
1437	struct ib_qp                   *qp;
1438	int                             i, sg_ind;
1439	int				is_ud;
1440	ssize_t                         ret = -EINVAL;
1441
1442	if (copy_from_user(&cmd, buf, sizeof cmd))
1443		return -EFAULT;
1444
1445	if (in_len < sizeof cmd + cmd.wqe_size * cmd.wr_count +
1446	    cmd.sge_count * sizeof (struct ib_uverbs_sge))
1447		return -EINVAL;
1448
1449	if (cmd.wqe_size < sizeof (struct ib_uverbs_send_wr))
1450		return -EINVAL;
1451
1452	user_wr = kmalloc(cmd.wqe_size, GFP_KERNEL);
1453	if (!user_wr)
1454		return -ENOMEM;
1455
1456	qp = idr_read_qp(cmd.qp_handle, file->ucontext);
1457	if (!qp)
1458		goto out;
1459
1460	is_ud = qp->qp_type == IB_QPT_UD;
1461	sg_ind = 0;
1462	last = NULL;
1463	for (i = 0; i < cmd.wr_count; ++i) {
1464		if (copy_from_user(user_wr,
1465				   buf + sizeof cmd + i * cmd.wqe_size,
1466				   cmd.wqe_size)) {
1467			ret = -EFAULT;
1468			goto out_put;
1469		}
1470
1471		if (user_wr->num_sge + sg_ind > cmd.sge_count) {
1472			ret = -EINVAL;
1473			goto out_put;
1474		}
1475
1476		next = kmalloc(ALIGN(sizeof *next, sizeof (struct ib_sge)) +
1477			       user_wr->num_sge * sizeof (struct ib_sge),
1478			       GFP_KERNEL);
1479		if (!next) {
1480			ret = -ENOMEM;
1481			goto out_put;
1482		}
1483
1484		if (!last)
1485			wr = next;
1486		else
1487			last->next = next;
1488		last = next;
1489
1490		next->next       = NULL;
1491		next->wr_id      = user_wr->wr_id;
1492		next->num_sge    = user_wr->num_sge;
1493		next->opcode     = user_wr->opcode;
1494		next->send_flags = user_wr->send_flags;
1495
1496		if (is_ud) {
1497			next->wr.ud.ah = idr_read_ah(user_wr->wr.ud.ah,
1498						     file->ucontext);
1499			if (!next->wr.ud.ah) {
1500				ret = -EINVAL;
1501				goto out_put;
1502			}
1503			next->wr.ud.remote_qpn  = user_wr->wr.ud.remote_qpn;
1504			next->wr.ud.remote_qkey = user_wr->wr.ud.remote_qkey;
 
 
 
1505		} else {
1506			switch (next->opcode) {
1507			case IB_WR_RDMA_WRITE_WITH_IMM:
1508				next->ex.imm_data =
1509					(__be32 __force) user_wr->ex.imm_data;
1510			case IB_WR_RDMA_WRITE:
1511			case IB_WR_RDMA_READ:
1512				next->wr.rdma.remote_addr =
1513					user_wr->wr.rdma.remote_addr;
1514				next->wr.rdma.rkey        =
1515					user_wr->wr.rdma.rkey;
1516				break;
1517			case IB_WR_SEND_WITH_IMM:
1518				next->ex.imm_data =
1519					(__be32 __force) user_wr->ex.imm_data;
1520				break;
1521			case IB_WR_SEND_WITH_INV:
1522				next->ex.invalidate_rkey =
1523					user_wr->ex.invalidate_rkey;
1524				break;
1525			case IB_WR_ATOMIC_CMP_AND_SWP:
1526			case IB_WR_ATOMIC_FETCH_AND_ADD:
1527				next->wr.atomic.remote_addr =
1528					user_wr->wr.atomic.remote_addr;
1529				next->wr.atomic.compare_add =
1530					user_wr->wr.atomic.compare_add;
1531				next->wr.atomic.swap = user_wr->wr.atomic.swap;
1532				next->wr.atomic.rkey = user_wr->wr.atomic.rkey;
1533				break;
1534			default:
1535				break;
1536			}
1537		}
1538
1539		if (next->num_sge) {
1540			next->sg_list = (void *) next +
1541				ALIGN(sizeof *next, sizeof (struct ib_sge));
1542			if (copy_from_user(next->sg_list,
1543					   buf + sizeof cmd +
1544					   cmd.wr_count * cmd.wqe_size +
1545					   sg_ind * sizeof (struct ib_sge),
1546					   next->num_sge * sizeof (struct ib_sge))) {
1547				ret = -EFAULT;
1548				goto out_put;
1549			}
1550			sg_ind += next->num_sge;
1551		} else
1552			next->sg_list = NULL;
1553	}
1554
1555	resp.bad_wr = 0;
1556	ret = qp->device->post_send(qp, wr, &bad_wr);
1557	if (ret)
1558		for (next = wr; next; next = next->next) {
1559			++resp.bad_wr;
1560			if (next == bad_wr)
1561				break;
1562		}
1563
1564	if (copy_to_user((void __user *) (unsigned long) cmd.response,
1565			 &resp, sizeof resp))
1566		ret = -EFAULT;
1567
1568out_put:
1569	put_qp_read(qp);
1570
1571	while (wr) {
1572		if (is_ud && wr->wr.ud.ah)
1573			put_ah_read(wr->wr.ud.ah);
1574		next = wr->next;
1575		kfree(wr);
1576		wr = next;
1577	}
1578
1579out:
1580	kfree(user_wr);
1581
1582	return ret ? ret : in_len;
1583}
1584
1585static struct ib_recv_wr *ib_uverbs_unmarshall_recv(const char __user *buf,
1586						    int in_len,
1587						    u32 wr_count,
1588						    u32 sge_count,
1589						    u32 wqe_size)
1590{
1591	struct ib_uverbs_recv_wr *user_wr;
1592	struct ib_recv_wr        *wr = NULL, *last, *next;
1593	int                       sg_ind;
1594	int                       i;
1595	int                       ret;
1596
1597	if (in_len < wqe_size * wr_count +
1598	    sge_count * sizeof (struct ib_uverbs_sge))
1599		return ERR_PTR(-EINVAL);
1600
1601	if (wqe_size < sizeof (struct ib_uverbs_recv_wr))
1602		return ERR_PTR(-EINVAL);
1603
1604	user_wr = kmalloc(wqe_size, GFP_KERNEL);
1605	if (!user_wr)
1606		return ERR_PTR(-ENOMEM);
1607
1608	sg_ind = 0;
1609	last = NULL;
1610	for (i = 0; i < wr_count; ++i) {
1611		if (copy_from_user(user_wr, buf + i * wqe_size,
1612				   wqe_size)) {
1613			ret = -EFAULT;
1614			goto err;
1615		}
1616
1617		if (user_wr->num_sge + sg_ind > sge_count) {
1618			ret = -EINVAL;
1619			goto err;
1620		}
1621
1622		next = kmalloc(ALIGN(sizeof *next, sizeof (struct ib_sge)) +
1623			       user_wr->num_sge * sizeof (struct ib_sge),
1624			       GFP_KERNEL);
1625		if (!next) {
1626			ret = -ENOMEM;
1627			goto err;
1628		}
1629
1630		if (!last)
1631			wr = next;
1632		else
1633			last->next = next;
1634		last = next;
1635
1636		next->next       = NULL;
1637		next->wr_id      = user_wr->wr_id;
1638		next->num_sge    = user_wr->num_sge;
1639
1640		if (next->num_sge) {
1641			next->sg_list = (void *) next +
1642				ALIGN(sizeof *next, sizeof (struct ib_sge));
1643			if (copy_from_user(next->sg_list,
1644					   buf + wr_count * wqe_size +
1645					   sg_ind * sizeof (struct ib_sge),
1646					   next->num_sge * sizeof (struct ib_sge))) {
1647				ret = -EFAULT;
1648				goto err;
1649			}
1650			sg_ind += next->num_sge;
1651		} else
1652			next->sg_list = NULL;
1653	}
1654
1655	kfree(user_wr);
1656	return wr;
1657
1658err:
1659	kfree(user_wr);
1660
1661	while (wr) {
1662		next = wr->next;
1663		kfree(wr);
1664		wr = next;
1665	}
1666
1667	return ERR_PTR(ret);
1668}
1669
1670ssize_t ib_uverbs_post_recv(struct ib_uverbs_file *file,
1671			    const char __user *buf, int in_len,
1672			    int out_len)
1673{
1674	struct ib_uverbs_post_recv      cmd;
1675	struct ib_uverbs_post_recv_resp resp;
1676	struct ib_recv_wr              *wr, *next, *bad_wr;
1677	struct ib_qp                   *qp;
1678	ssize_t                         ret = -EINVAL;
1679
1680	if (copy_from_user(&cmd, buf, sizeof cmd))
1681		return -EFAULT;
1682
1683	wr = ib_uverbs_unmarshall_recv(buf + sizeof cmd,
1684				       in_len - sizeof cmd, cmd.wr_count,
1685				       cmd.sge_count, cmd.wqe_size);
1686	if (IS_ERR(wr))
1687		return PTR_ERR(wr);
1688
1689	qp = idr_read_qp(cmd.qp_handle, file->ucontext);
1690	if (!qp)
1691		goto out;
1692
1693	resp.bad_wr = 0;
1694	ret = qp->device->post_recv(qp, wr, &bad_wr);
1695
1696	put_qp_read(qp);
1697
1698	if (ret)
1699		for (next = wr; next; next = next->next) {
1700			++resp.bad_wr;
1701			if (next == bad_wr)
1702				break;
1703		}
1704
1705	if (copy_to_user((void __user *) (unsigned long) cmd.response,
1706			 &resp, sizeof resp))
1707		ret = -EFAULT;
1708
1709out:
1710	while (wr) {
1711		next = wr->next;
1712		kfree(wr);
1713		wr = next;
1714	}
1715
1716	return ret ? ret : in_len;
1717}
1718
1719ssize_t ib_uverbs_post_srq_recv(struct ib_uverbs_file *file,
1720				const char __user *buf, int in_len,
1721				int out_len)
1722{
1723	struct ib_uverbs_post_srq_recv      cmd;
1724	struct ib_uverbs_post_srq_recv_resp resp;
1725	struct ib_recv_wr                  *wr, *next, *bad_wr;
1726	struct ib_srq                      *srq;
1727	ssize_t                             ret = -EINVAL;
1728
1729	if (copy_from_user(&cmd, buf, sizeof cmd))
1730		return -EFAULT;
1731
1732	wr = ib_uverbs_unmarshall_recv(buf + sizeof cmd,
1733				       in_len - sizeof cmd, cmd.wr_count,
1734				       cmd.sge_count, cmd.wqe_size);
1735	if (IS_ERR(wr))
1736		return PTR_ERR(wr);
1737
1738	srq = idr_read_srq(cmd.srq_handle, file->ucontext);
1739	if (!srq)
1740		goto out;
1741
1742	resp.bad_wr = 0;
1743	ret = srq->device->post_srq_recv(srq, wr, &bad_wr);
1744
1745	put_srq_read(srq);
1746
1747	if (ret)
1748		for (next = wr; next; next = next->next) {
1749			++resp.bad_wr;
1750			if (next == bad_wr)
1751				break;
1752		}
1753
1754	if (copy_to_user((void __user *) (unsigned long) cmd.response,
1755			 &resp, sizeof resp))
1756		ret = -EFAULT;
1757
1758out:
1759	while (wr) {
1760		next = wr->next;
1761		kfree(wr);
1762		wr = next;
1763	}
1764
1765	return ret ? ret : in_len;
1766}
1767
1768ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file,
1769			    const char __user *buf, int in_len,
1770			    int out_len)
1771{
1772	struct ib_uverbs_create_ah	 cmd;
1773	struct ib_uverbs_create_ah_resp	 resp;
1774	struct ib_uobject		*uobj;
1775	struct ib_pd			*pd;
1776	struct ib_ah			*ah;
1777	struct ib_ah_attr		attr;
1778	int ret;
1779
1780	if (out_len < sizeof resp)
1781		return -ENOSPC;
1782
1783	if (copy_from_user(&cmd, buf, sizeof cmd))
1784		return -EFAULT;
1785
1786	uobj = kmalloc(sizeof *uobj, GFP_KERNEL);
1787	if (!uobj)
1788		return -ENOMEM;
1789
1790	init_uobj(uobj, cmd.user_handle, file->ucontext, &ah_lock_key);
1791	down_write(&uobj->mutex);
1792
1793	pd = idr_read_pd(cmd.pd_handle, file->ucontext);
1794	if (!pd) {
1795		ret = -EINVAL;
1796		goto err;
1797	}
1798
1799	attr.dlid 	       = cmd.attr.dlid;
1800	attr.sl 	       = cmd.attr.sl;
1801	attr.src_path_bits     = cmd.attr.src_path_bits;
1802	attr.static_rate       = cmd.attr.static_rate;
1803	attr.ah_flags          = cmd.attr.is_global ? IB_AH_GRH : 0;
1804	attr.port_num 	       = cmd.attr.port_num;
1805	attr.grh.flow_label    = cmd.attr.grh.flow_label;
1806	attr.grh.sgid_index    = cmd.attr.grh.sgid_index;
1807	attr.grh.hop_limit     = cmd.attr.grh.hop_limit;
1808	attr.grh.traffic_class = cmd.attr.grh.traffic_class;
1809	memcpy(attr.grh.dgid.raw, cmd.attr.grh.dgid, 16);
1810
1811	ah = ib_create_ah(pd, &attr);
1812	if (IS_ERR(ah)) {
1813		ret = PTR_ERR(ah);
1814		goto err_put;
1815	}
1816
1817	ah->uobject  = uobj;
1818	uobj->object = ah;
1819
1820	ret = idr_add_uobj(&ib_uverbs_ah_idr, uobj);
1821	if (ret)
1822		goto err_destroy;
1823
1824	resp.ah_handle = uobj->id;
1825
1826	if (copy_to_user((void __user *) (unsigned long) cmd.response,
1827			 &resp, sizeof resp)) {
1828		ret = -EFAULT;
1829		goto err_copy;
1830	}
1831
1832	put_pd_read(pd);
1833
1834	mutex_lock(&file->mutex);
1835	list_add_tail(&uobj->list, &file->ucontext->ah_list);
1836	mutex_unlock(&file->mutex);
1837
1838	uobj->live = 1;
1839
1840	up_write(&uobj->mutex);
1841
1842	return in_len;
1843
1844err_copy:
1845	idr_remove_uobj(&ib_uverbs_ah_idr, uobj);
1846
1847err_destroy:
1848	ib_destroy_ah(ah);
1849
1850err_put:
1851	put_pd_read(pd);
1852
1853err:
1854	put_uobj_write(uobj);
1855	return ret;
1856}
1857
1858ssize_t ib_uverbs_destroy_ah(struct ib_uverbs_file *file,
1859			     const char __user *buf, int in_len, int out_len)
1860{
1861	struct ib_uverbs_destroy_ah cmd;
1862	struct ib_ah		   *ah;
1863	struct ib_uobject	   *uobj;
1864	int			    ret;
1865
1866	if (copy_from_user(&cmd, buf, sizeof cmd))
1867		return -EFAULT;
1868
1869	uobj = idr_write_uobj(&ib_uverbs_ah_idr, cmd.ah_handle, file->ucontext);
1870	if (!uobj)
1871		return -EINVAL;
1872	ah = uobj->object;
1873
1874	ret = ib_destroy_ah(ah);
1875	if (!ret)
1876		uobj->live = 0;
1877
1878	put_uobj_write(uobj);
1879
1880	if (ret)
1881		return ret;
1882
1883	idr_remove_uobj(&ib_uverbs_ah_idr, uobj);
1884
1885	mutex_lock(&file->mutex);
1886	list_del(&uobj->list);
1887	mutex_unlock(&file->mutex);
1888
1889	put_uobj(uobj);
1890
1891	return in_len;
1892}
1893
1894ssize_t ib_uverbs_attach_mcast(struct ib_uverbs_file *file,
1895			       const char __user *buf, int in_len,
1896			       int out_len)
1897{
1898	struct ib_uverbs_attach_mcast cmd;
1899	struct ib_qp                 *qp;
1900	struct ib_uqp_object         *obj;
1901	struct ib_uverbs_mcast_entry *mcast;
1902	int                           ret;
1903
1904	if (copy_from_user(&cmd, buf, sizeof cmd))
1905		return -EFAULT;
1906
1907	qp = idr_read_qp(cmd.qp_handle, file->ucontext);
1908	if (!qp)
1909		return -EINVAL;
1910
1911	obj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject);
1912
1913	list_for_each_entry(mcast, &obj->mcast_list, list)
1914		if (cmd.mlid == mcast->lid &&
1915		    !memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) {
1916			ret = 0;
1917			goto out_put;
1918		}
1919
1920	mcast = kmalloc(sizeof *mcast, GFP_KERNEL);
1921	if (!mcast) {
1922		ret = -ENOMEM;
1923		goto out_put;
1924	}
1925
1926	mcast->lid = cmd.mlid;
1927	memcpy(mcast->gid.raw, cmd.gid, sizeof mcast->gid.raw);
1928
1929	ret = ib_attach_mcast(qp, &mcast->gid, cmd.mlid);
1930	if (!ret)
1931		list_add_tail(&mcast->list, &obj->mcast_list);
1932	else
1933		kfree(mcast);
1934
1935out_put:
1936	put_qp_read(qp);
1937
1938	return ret ? ret : in_len;
1939}
1940
1941ssize_t ib_uverbs_detach_mcast(struct ib_uverbs_file *file,
1942			       const char __user *buf, int in_len,
1943			       int out_len)
1944{
1945	struct ib_uverbs_detach_mcast cmd;
1946	struct ib_uqp_object         *obj;
1947	struct ib_qp                 *qp;
1948	struct ib_uverbs_mcast_entry *mcast;
1949	int                           ret = -EINVAL;
1950
1951	if (copy_from_user(&cmd, buf, sizeof cmd))
1952		return -EFAULT;
1953
1954	qp = idr_read_qp(cmd.qp_handle, file->ucontext);
1955	if (!qp)
1956		return -EINVAL;
1957
1958	ret = ib_detach_mcast(qp, (union ib_gid *) cmd.gid, cmd.mlid);
1959	if (ret)
1960		goto out_put;
1961
1962	obj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject);
1963
1964	list_for_each_entry(mcast, &obj->mcast_list, list)
1965		if (cmd.mlid == mcast->lid &&
1966		    !memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) {
1967			list_del(&mcast->list);
1968			kfree(mcast);
1969			break;
1970		}
1971
1972out_put:
1973	put_qp_read(qp);
1974
1975	return ret ? ret : in_len;
1976}
1977
1978ssize_t ib_uverbs_create_srq(struct ib_uverbs_file *file,
1979			     const char __user *buf, int in_len,
1980			     int out_len)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1981{
1982	struct ib_uverbs_create_srq      cmd;
1983	struct ib_uverbs_create_srq_resp resp;
1984	struct ib_udata                  udata;
1985	struct ib_uevent_object         *obj;
1986	struct ib_pd                    *pd;
1987	struct ib_srq                   *srq;
 
1988	struct ib_srq_init_attr          attr;
1989	int ret;
1990
1991	if (out_len < sizeof resp)
1992		return -ENOSPC;
1993
1994	if (copy_from_user(&cmd, buf, sizeof cmd))
1995		return -EFAULT;
1996
1997	INIT_UDATA(&udata, buf + sizeof cmd,
1998		   (unsigned long) cmd.response + sizeof resp,
1999		   in_len - sizeof cmd, out_len - sizeof resp);
2000
2001	obj = kmalloc(sizeof *obj, GFP_KERNEL);
2002	if (!obj)
2003		return -ENOMEM;
2004
2005	init_uobj(&obj->uobject, cmd.user_handle, file->ucontext, &srq_lock_key);
2006	down_write(&obj->uobject.mutex);
2007
2008	pd  = idr_read_pd(cmd.pd_handle, file->ucontext);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2009	if (!pd) {
2010		ret = -EINVAL;
2011		goto err;
2012	}
2013
2014	attr.event_handler  = ib_uverbs_srq_event_handler;
2015	attr.srq_context    = file;
2016	attr.attr.max_wr    = cmd.max_wr;
2017	attr.attr.max_sge   = cmd.max_sge;
2018	attr.attr.srq_limit = cmd.srq_limit;
 
2019
2020	obj->events_reported     = 0;
2021	INIT_LIST_HEAD(&obj->event_list);
2022
2023	srq = pd->device->create_srq(pd, &attr, &udata);
2024	if (IS_ERR(srq)) {
2025		ret = PTR_ERR(srq);
2026		goto err_put;
2027	}
2028
2029	srq->device    	   = pd->device;
2030	srq->pd        	   = pd;
2031	srq->uobject       = &obj->uobject;
 
2032	srq->event_handler = attr.event_handler;
2033	srq->srq_context   = attr.srq_context;
 
 
 
 
 
 
 
 
2034	atomic_inc(&pd->usecnt);
2035	atomic_set(&srq->usecnt, 0);
2036
2037	obj->uobject.object = srq;
2038	ret = idr_add_uobj(&ib_uverbs_srq_idr, &obj->uobject);
2039	if (ret)
2040		goto err_destroy;
2041
2042	memset(&resp, 0, sizeof resp);
2043	resp.srq_handle = obj->uobject.id;
2044	resp.max_wr     = attr.attr.max_wr;
2045	resp.max_sge    = attr.attr.max_sge;
 
 
2046
2047	if (copy_to_user((void __user *) (unsigned long) cmd.response,
2048			 &resp, sizeof resp)) {
2049		ret = -EFAULT;
2050		goto err_copy;
2051	}
2052
 
 
 
 
2053	put_pd_read(pd);
2054
2055	mutex_lock(&file->mutex);
2056	list_add_tail(&obj->uobject.list, &file->ucontext->srq_list);
2057	mutex_unlock(&file->mutex);
2058
2059	obj->uobject.live = 1;
2060
2061	up_write(&obj->uobject.mutex);
2062
2063	return in_len;
2064
2065err_copy:
2066	idr_remove_uobj(&ib_uverbs_srq_idr, &obj->uobject);
2067
2068err_destroy:
2069	ib_destroy_srq(srq);
2070
2071err_put:
2072	put_pd_read(pd);
2073
 
 
 
 
 
 
 
 
 
 
2074err:
2075	put_uobj_write(&obj->uobject);
2076	return ret;
2077}
2078
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2079ssize_t ib_uverbs_modify_srq(struct ib_uverbs_file *file,
2080			     const char __user *buf, int in_len,
2081			     int out_len)
2082{
2083	struct ib_uverbs_modify_srq cmd;
2084	struct ib_udata             udata;
2085	struct ib_srq              *srq;
2086	struct ib_srq_attr          attr;
2087	int                         ret;
2088
2089	if (copy_from_user(&cmd, buf, sizeof cmd))
2090		return -EFAULT;
2091
2092	INIT_UDATA(&udata, buf + sizeof cmd, NULL, in_len - sizeof cmd,
2093		   out_len);
2094
2095	srq = idr_read_srq(cmd.srq_handle, file->ucontext);
2096	if (!srq)
2097		return -EINVAL;
2098
2099	attr.max_wr    = cmd.max_wr;
2100	attr.srq_limit = cmd.srq_limit;
2101
2102	ret = srq->device->modify_srq(srq, &attr, cmd.attr_mask, &udata);
2103
2104	put_srq_read(srq);
2105
2106	return ret ? ret : in_len;
2107}
2108
2109ssize_t ib_uverbs_query_srq(struct ib_uverbs_file *file,
2110			    const char __user *buf,
2111			    int in_len, int out_len)
2112{
2113	struct ib_uverbs_query_srq      cmd;
2114	struct ib_uverbs_query_srq_resp resp;
2115	struct ib_srq_attr              attr;
2116	struct ib_srq                   *srq;
2117	int                             ret;
2118
2119	if (out_len < sizeof resp)
2120		return -ENOSPC;
2121
2122	if (copy_from_user(&cmd, buf, sizeof cmd))
2123		return -EFAULT;
2124
2125	srq = idr_read_srq(cmd.srq_handle, file->ucontext);
2126	if (!srq)
2127		return -EINVAL;
2128
2129	ret = ib_query_srq(srq, &attr);
2130
2131	put_srq_read(srq);
2132
2133	if (ret)
2134		return ret;
2135
2136	memset(&resp, 0, sizeof resp);
2137
2138	resp.max_wr    = attr.max_wr;
2139	resp.max_sge   = attr.max_sge;
2140	resp.srq_limit = attr.srq_limit;
2141
2142	if (copy_to_user((void __user *) (unsigned long) cmd.response,
2143			 &resp, sizeof resp))
2144		return -EFAULT;
2145
2146	return in_len;
2147}
2148
2149ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file,
2150			      const char __user *buf, int in_len,
2151			      int out_len)
2152{
2153	struct ib_uverbs_destroy_srq      cmd;
2154	struct ib_uverbs_destroy_srq_resp resp;
2155	struct ib_uobject		 *uobj;
2156	struct ib_srq               	 *srq;
2157	struct ib_uevent_object        	 *obj;
2158	int                         	  ret = -EINVAL;
 
 
2159
2160	if (copy_from_user(&cmd, buf, sizeof cmd))
2161		return -EFAULT;
2162
2163	uobj = idr_write_uobj(&ib_uverbs_srq_idr, cmd.srq_handle, file->ucontext);
2164	if (!uobj)
2165		return -EINVAL;
2166	srq = uobj->object;
2167	obj = container_of(uobj, struct ib_uevent_object, uobject);
 
2168
2169	ret = ib_destroy_srq(srq);
2170	if (!ret)
2171		uobj->live = 0;
2172
2173	put_uobj_write(uobj);
2174
2175	if (ret)
2176		return ret;
 
 
 
 
 
2177
2178	idr_remove_uobj(&ib_uverbs_srq_idr, uobj);
2179
2180	mutex_lock(&file->mutex);
2181	list_del(&uobj->list);
2182	mutex_unlock(&file->mutex);
2183
2184	ib_uverbs_release_uevent(file, obj);
2185
2186	memset(&resp, 0, sizeof resp);
2187	resp.events_reported = obj->events_reported;
2188
2189	put_uobj(uobj);
2190
2191	if (copy_to_user((void __user *) (unsigned long) cmd.response,
2192			 &resp, sizeof resp))
2193		ret = -EFAULT;
2194
2195	return ret ? ret : in_len;
2196}
v3.15
   1/*
   2 * Copyright (c) 2005 Topspin Communications.  All rights reserved.
   3 * Copyright (c) 2005, 2006, 2007 Cisco Systems.  All rights reserved.
   4 * Copyright (c) 2005 PathScale, Inc.  All rights reserved.
   5 * Copyright (c) 2006 Mellanox Technologies.  All rights reserved.
   6 *
   7 * This software is available to you under a choice of one of two
   8 * licenses.  You may choose to be licensed under the terms of the GNU
   9 * General Public License (GPL) Version 2, available from the file
  10 * COPYING in the main directory of this source tree, or the
  11 * OpenIB.org BSD license below:
  12 *
  13 *     Redistribution and use in source and binary forms, with or
  14 *     without modification, are permitted provided that the following
  15 *     conditions are met:
  16 *
  17 *      - Redistributions of source code must retain the above
  18 *        copyright notice, this list of conditions and the following
  19 *        disclaimer.
  20 *
  21 *      - Redistributions in binary form must reproduce the above
  22 *        copyright notice, this list of conditions and the following
  23 *        disclaimer in the documentation and/or other materials
  24 *        provided with the distribution.
  25 *
  26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  33 * SOFTWARE.
  34 */
  35
  36#include <linux/file.h>
  37#include <linux/fs.h>
  38#include <linux/slab.h>
  39
  40#include <asm/uaccess.h>
  41
  42#include "uverbs.h"
  43#include "core_priv.h"
  44
  45struct uverbs_lock_class {
  46	struct lock_class_key	key;
  47	char			name[16];
  48};
  49
  50static struct uverbs_lock_class pd_lock_class	= { .name = "PD-uobj" };
  51static struct uverbs_lock_class mr_lock_class	= { .name = "MR-uobj" };
  52static struct uverbs_lock_class mw_lock_class	= { .name = "MW-uobj" };
  53static struct uverbs_lock_class cq_lock_class	= { .name = "CQ-uobj" };
  54static struct uverbs_lock_class qp_lock_class	= { .name = "QP-uobj" };
  55static struct uverbs_lock_class ah_lock_class	= { .name = "AH-uobj" };
  56static struct uverbs_lock_class srq_lock_class	= { .name = "SRQ-uobj" };
  57static struct uverbs_lock_class xrcd_lock_class = { .name = "XRCD-uobj" };
  58static struct uverbs_lock_class rule_lock_class = { .name = "RULE-uobj" };
  59
  60/*
  61 * The ib_uobject locking scheme is as follows:
  62 *
  63 * - ib_uverbs_idr_lock protects the uverbs idrs themselves, so it
  64 *   needs to be held during all idr operations.  When an object is
  65 *   looked up, a reference must be taken on the object's kref before
  66 *   dropping this lock.
  67 *
  68 * - Each object also has an rwsem.  This rwsem must be held for
  69 *   reading while an operation that uses the object is performed.
  70 *   For example, while registering an MR, the associated PD's
  71 *   uobject.mutex must be held for reading.  The rwsem must be held
  72 *   for writing while initializing or destroying an object.
  73 *
  74 * - In addition, each object has a "live" flag.  If this flag is not
  75 *   set, then lookups of the object will fail even if it is found in
  76 *   the idr.  This handles a reader that blocks and does not acquire
  77 *   the rwsem until after the object is destroyed.  The destroy
  78 *   operation will set the live flag to 0 and then drop the rwsem;
  79 *   this will allow the reader to acquire the rwsem, see that the
  80 *   live flag is 0, and then drop the rwsem and its reference to
  81 *   object.  The underlying storage will not be freed until the last
  82 *   reference to the object is dropped.
  83 */
  84
  85static void init_uobj(struct ib_uobject *uobj, u64 user_handle,
  86		      struct ib_ucontext *context, struct uverbs_lock_class *c)
  87{
  88	uobj->user_handle = user_handle;
  89	uobj->context     = context;
  90	kref_init(&uobj->ref);
  91	init_rwsem(&uobj->mutex);
  92	lockdep_set_class_and_name(&uobj->mutex, &c->key, c->name);
  93	uobj->live        = 0;
  94}
  95
  96static void release_uobj(struct kref *kref)
  97{
  98	kfree(container_of(kref, struct ib_uobject, ref));
  99}
 100
 101static void put_uobj(struct ib_uobject *uobj)
 102{
 103	kref_put(&uobj->ref, release_uobj);
 104}
 105
 106static void put_uobj_read(struct ib_uobject *uobj)
 107{
 108	up_read(&uobj->mutex);
 109	put_uobj(uobj);
 110}
 111
 112static void put_uobj_write(struct ib_uobject *uobj)
 113{
 114	up_write(&uobj->mutex);
 115	put_uobj(uobj);
 116}
 117
 118static int idr_add_uobj(struct idr *idr, struct ib_uobject *uobj)
 119{
 120	int ret;
 121
 122	idr_preload(GFP_KERNEL);
 
 
 
 123	spin_lock(&ib_uverbs_idr_lock);
 
 
 124
 125	ret = idr_alloc(idr, uobj, 0, 0, GFP_NOWAIT);
 126	if (ret >= 0)
 127		uobj->id = ret;
 128
 129	spin_unlock(&ib_uverbs_idr_lock);
 130	idr_preload_end();
 131
 132	return ret < 0 ? ret : 0;
 133}
 134
 135void idr_remove_uobj(struct idr *idr, struct ib_uobject *uobj)
 136{
 137	spin_lock(&ib_uverbs_idr_lock);
 138	idr_remove(idr, uobj->id);
 139	spin_unlock(&ib_uverbs_idr_lock);
 140}
 141
 142static struct ib_uobject *__idr_get_uobj(struct idr *idr, int id,
 143					 struct ib_ucontext *context)
 144{
 145	struct ib_uobject *uobj;
 146
 147	spin_lock(&ib_uverbs_idr_lock);
 148	uobj = idr_find(idr, id);
 149	if (uobj) {
 150		if (uobj->context == context)
 151			kref_get(&uobj->ref);
 152		else
 153			uobj = NULL;
 154	}
 155	spin_unlock(&ib_uverbs_idr_lock);
 156
 157	return uobj;
 158}
 159
 160static struct ib_uobject *idr_read_uobj(struct idr *idr, int id,
 161					struct ib_ucontext *context, int nested)
 162{
 163	struct ib_uobject *uobj;
 164
 165	uobj = __idr_get_uobj(idr, id, context);
 166	if (!uobj)
 167		return NULL;
 168
 169	if (nested)
 170		down_read_nested(&uobj->mutex, SINGLE_DEPTH_NESTING);
 171	else
 172		down_read(&uobj->mutex);
 173	if (!uobj->live) {
 174		put_uobj_read(uobj);
 175		return NULL;
 176	}
 177
 178	return uobj;
 179}
 180
 181static struct ib_uobject *idr_write_uobj(struct idr *idr, int id,
 182					 struct ib_ucontext *context)
 183{
 184	struct ib_uobject *uobj;
 185
 186	uobj = __idr_get_uobj(idr, id, context);
 187	if (!uobj)
 188		return NULL;
 189
 190	down_write(&uobj->mutex);
 191	if (!uobj->live) {
 192		put_uobj_write(uobj);
 193		return NULL;
 194	}
 195
 196	return uobj;
 197}
 198
 199static void *idr_read_obj(struct idr *idr, int id, struct ib_ucontext *context,
 200			  int nested)
 201{
 202	struct ib_uobject *uobj;
 203
 204	uobj = idr_read_uobj(idr, id, context, nested);
 205	return uobj ? uobj->object : NULL;
 206}
 207
 208static struct ib_pd *idr_read_pd(int pd_handle, struct ib_ucontext *context)
 209{
 210	return idr_read_obj(&ib_uverbs_pd_idr, pd_handle, context, 0);
 211}
 212
 213static void put_pd_read(struct ib_pd *pd)
 214{
 215	put_uobj_read(pd->uobject);
 216}
 217
 218static struct ib_cq *idr_read_cq(int cq_handle, struct ib_ucontext *context, int nested)
 219{
 220	return idr_read_obj(&ib_uverbs_cq_idr, cq_handle, context, nested);
 221}
 222
 223static void put_cq_read(struct ib_cq *cq)
 224{
 225	put_uobj_read(cq->uobject);
 226}
 227
 228static struct ib_ah *idr_read_ah(int ah_handle, struct ib_ucontext *context)
 229{
 230	return idr_read_obj(&ib_uverbs_ah_idr, ah_handle, context, 0);
 231}
 232
 233static void put_ah_read(struct ib_ah *ah)
 234{
 235	put_uobj_read(ah->uobject);
 236}
 237
 238static struct ib_qp *idr_read_qp(int qp_handle, struct ib_ucontext *context)
 239{
 240	return idr_read_obj(&ib_uverbs_qp_idr, qp_handle, context, 0);
 241}
 242
 243static struct ib_qp *idr_write_qp(int qp_handle, struct ib_ucontext *context)
 244{
 245	struct ib_uobject *uobj;
 246
 247	uobj = idr_write_uobj(&ib_uverbs_qp_idr, qp_handle, context);
 248	return uobj ? uobj->object : NULL;
 249}
 250
 251static void put_qp_read(struct ib_qp *qp)
 252{
 253	put_uobj_read(qp->uobject);
 254}
 255
 256static void put_qp_write(struct ib_qp *qp)
 257{
 258	put_uobj_write(qp->uobject);
 259}
 260
 261static struct ib_srq *idr_read_srq(int srq_handle, struct ib_ucontext *context)
 262{
 263	return idr_read_obj(&ib_uverbs_srq_idr, srq_handle, context, 0);
 264}
 265
 266static void put_srq_read(struct ib_srq *srq)
 267{
 268	put_uobj_read(srq->uobject);
 269}
 270
 271static struct ib_xrcd *idr_read_xrcd(int xrcd_handle, struct ib_ucontext *context,
 272				     struct ib_uobject **uobj)
 273{
 274	*uobj = idr_read_uobj(&ib_uverbs_xrcd_idr, xrcd_handle, context, 0);
 275	return *uobj ? (*uobj)->object : NULL;
 276}
 277
 278static void put_xrcd_read(struct ib_uobject *uobj)
 279{
 280	put_uobj_read(uobj);
 281}
 282
 283ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file,
 284			      const char __user *buf,
 285			      int in_len, int out_len)
 286{
 287	struct ib_uverbs_get_context      cmd;
 288	struct ib_uverbs_get_context_resp resp;
 289	struct ib_udata                   udata;
 290	struct ib_device                 *ibdev = file->device->ib_dev;
 291	struct ib_ucontext		 *ucontext;
 292	struct file			 *filp;
 293	int ret;
 294
 295	if (out_len < sizeof resp)
 296		return -ENOSPC;
 297
 298	if (copy_from_user(&cmd, buf, sizeof cmd))
 299		return -EFAULT;
 300
 301	mutex_lock(&file->mutex);
 302
 303	if (file->ucontext) {
 304		ret = -EINVAL;
 305		goto err;
 306	}
 307
 308	INIT_UDATA(&udata, buf + sizeof cmd,
 309		   (unsigned long) cmd.response + sizeof resp,
 310		   in_len - sizeof cmd, out_len - sizeof resp);
 311
 312	ucontext = ibdev->alloc_ucontext(ibdev, &udata);
 313	if (IS_ERR(ucontext)) {
 314		ret = PTR_ERR(ucontext);
 315		goto err;
 316	}
 317
 318	ucontext->device = ibdev;
 319	INIT_LIST_HEAD(&ucontext->pd_list);
 320	INIT_LIST_HEAD(&ucontext->mr_list);
 321	INIT_LIST_HEAD(&ucontext->mw_list);
 322	INIT_LIST_HEAD(&ucontext->cq_list);
 323	INIT_LIST_HEAD(&ucontext->qp_list);
 324	INIT_LIST_HEAD(&ucontext->srq_list);
 325	INIT_LIST_HEAD(&ucontext->ah_list);
 326	INIT_LIST_HEAD(&ucontext->xrcd_list);
 327	INIT_LIST_HEAD(&ucontext->rule_list);
 328	ucontext->closing = 0;
 329
 330	resp.num_comp_vectors = file->device->num_comp_vectors;
 331
 332	ret = get_unused_fd_flags(O_CLOEXEC);
 333	if (ret < 0)
 334		goto err_free;
 335	resp.async_fd = ret;
 336
 337	filp = ib_uverbs_alloc_event_file(file, 1);
 338	if (IS_ERR(filp)) {
 339		ret = PTR_ERR(filp);
 340		goto err_fd;
 341	}
 342
 343	if (copy_to_user((void __user *) (unsigned long) cmd.response,
 344			 &resp, sizeof resp)) {
 345		ret = -EFAULT;
 346		goto err_file;
 347	}
 348
 349	file->async_file = filp->private_data;
 350
 351	INIT_IB_EVENT_HANDLER(&file->event_handler, file->device->ib_dev,
 352			      ib_uverbs_event_handler);
 353	ret = ib_register_event_handler(&file->event_handler);
 354	if (ret)
 355		goto err_file;
 356
 357	kref_get(&file->async_file->ref);
 358	kref_get(&file->ref);
 359	file->ucontext = ucontext;
 360
 361	fd_install(resp.async_fd, filp);
 362
 363	mutex_unlock(&file->mutex);
 364
 365	return in_len;
 366
 367err_file:
 368	fput(filp);
 369
 370err_fd:
 371	put_unused_fd(resp.async_fd);
 372
 373err_free:
 374	ibdev->dealloc_ucontext(ucontext);
 375
 376err:
 377	mutex_unlock(&file->mutex);
 378	return ret;
 379}
 380
 381ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file,
 382			       const char __user *buf,
 383			       int in_len, int out_len)
 384{
 385	struct ib_uverbs_query_device      cmd;
 386	struct ib_uverbs_query_device_resp resp;
 387	struct ib_device_attr              attr;
 388	int                                ret;
 389
 390	if (out_len < sizeof resp)
 391		return -ENOSPC;
 392
 393	if (copy_from_user(&cmd, buf, sizeof cmd))
 394		return -EFAULT;
 395
 396	ret = ib_query_device(file->device->ib_dev, &attr);
 397	if (ret)
 398		return ret;
 399
 400	memset(&resp, 0, sizeof resp);
 401
 402	resp.fw_ver 		       = attr.fw_ver;
 403	resp.node_guid 		       = file->device->ib_dev->node_guid;
 404	resp.sys_image_guid 	       = attr.sys_image_guid;
 405	resp.max_mr_size 	       = attr.max_mr_size;
 406	resp.page_size_cap 	       = attr.page_size_cap;
 407	resp.vendor_id 		       = attr.vendor_id;
 408	resp.vendor_part_id 	       = attr.vendor_part_id;
 409	resp.hw_ver 		       = attr.hw_ver;
 410	resp.max_qp 		       = attr.max_qp;
 411	resp.max_qp_wr 		       = attr.max_qp_wr;
 412	resp.device_cap_flags 	       = attr.device_cap_flags;
 413	resp.max_sge 		       = attr.max_sge;
 414	resp.max_sge_rd 	       = attr.max_sge_rd;
 415	resp.max_cq 		       = attr.max_cq;
 416	resp.max_cqe 		       = attr.max_cqe;
 417	resp.max_mr 		       = attr.max_mr;
 418	resp.max_pd 		       = attr.max_pd;
 419	resp.max_qp_rd_atom 	       = attr.max_qp_rd_atom;
 420	resp.max_ee_rd_atom 	       = attr.max_ee_rd_atom;
 421	resp.max_res_rd_atom 	       = attr.max_res_rd_atom;
 422	resp.max_qp_init_rd_atom       = attr.max_qp_init_rd_atom;
 423	resp.max_ee_init_rd_atom       = attr.max_ee_init_rd_atom;
 424	resp.atomic_cap 	       = attr.atomic_cap;
 425	resp.max_ee 		       = attr.max_ee;
 426	resp.max_rdd 		       = attr.max_rdd;
 427	resp.max_mw 		       = attr.max_mw;
 428	resp.max_raw_ipv6_qp 	       = attr.max_raw_ipv6_qp;
 429	resp.max_raw_ethy_qp 	       = attr.max_raw_ethy_qp;
 430	resp.max_mcast_grp 	       = attr.max_mcast_grp;
 431	resp.max_mcast_qp_attach       = attr.max_mcast_qp_attach;
 432	resp.max_total_mcast_qp_attach = attr.max_total_mcast_qp_attach;
 433	resp.max_ah 		       = attr.max_ah;
 434	resp.max_fmr 		       = attr.max_fmr;
 435	resp.max_map_per_fmr 	       = attr.max_map_per_fmr;
 436	resp.max_srq 		       = attr.max_srq;
 437	resp.max_srq_wr 	       = attr.max_srq_wr;
 438	resp.max_srq_sge 	       = attr.max_srq_sge;
 439	resp.max_pkeys 		       = attr.max_pkeys;
 440	resp.local_ca_ack_delay        = attr.local_ca_ack_delay;
 441	resp.phys_port_cnt	       = file->device->ib_dev->phys_port_cnt;
 442
 443	if (copy_to_user((void __user *) (unsigned long) cmd.response,
 444			 &resp, sizeof resp))
 445		return -EFAULT;
 446
 447	return in_len;
 448}
 449
 450ssize_t ib_uverbs_query_port(struct ib_uverbs_file *file,
 451			     const char __user *buf,
 452			     int in_len, int out_len)
 453{
 454	struct ib_uverbs_query_port      cmd;
 455	struct ib_uverbs_query_port_resp resp;
 456	struct ib_port_attr              attr;
 457	int                              ret;
 458
 459	if (out_len < sizeof resp)
 460		return -ENOSPC;
 461
 462	if (copy_from_user(&cmd, buf, sizeof cmd))
 463		return -EFAULT;
 464
 465	ret = ib_query_port(file->device->ib_dev, cmd.port_num, &attr);
 466	if (ret)
 467		return ret;
 468
 469	memset(&resp, 0, sizeof resp);
 470
 471	resp.state 	     = attr.state;
 472	resp.max_mtu 	     = attr.max_mtu;
 473	resp.active_mtu      = attr.active_mtu;
 474	resp.gid_tbl_len     = attr.gid_tbl_len;
 475	resp.port_cap_flags  = attr.port_cap_flags;
 476	resp.max_msg_sz      = attr.max_msg_sz;
 477	resp.bad_pkey_cntr   = attr.bad_pkey_cntr;
 478	resp.qkey_viol_cntr  = attr.qkey_viol_cntr;
 479	resp.pkey_tbl_len    = attr.pkey_tbl_len;
 480	resp.lid 	     = attr.lid;
 481	resp.sm_lid 	     = attr.sm_lid;
 482	resp.lmc 	     = attr.lmc;
 483	resp.max_vl_num      = attr.max_vl_num;
 484	resp.sm_sl 	     = attr.sm_sl;
 485	resp.subnet_timeout  = attr.subnet_timeout;
 486	resp.init_type_reply = attr.init_type_reply;
 487	resp.active_width    = attr.active_width;
 488	resp.active_speed    = attr.active_speed;
 489	resp.phys_state      = attr.phys_state;
 490	resp.link_layer      = rdma_port_get_link_layer(file->device->ib_dev,
 491							cmd.port_num);
 492
 493	if (copy_to_user((void __user *) (unsigned long) cmd.response,
 494			 &resp, sizeof resp))
 495		return -EFAULT;
 496
 497	return in_len;
 498}
 499
 500ssize_t ib_uverbs_alloc_pd(struct ib_uverbs_file *file,
 501			   const char __user *buf,
 502			   int in_len, int out_len)
 503{
 504	struct ib_uverbs_alloc_pd      cmd;
 505	struct ib_uverbs_alloc_pd_resp resp;
 506	struct ib_udata                udata;
 507	struct ib_uobject             *uobj;
 508	struct ib_pd                  *pd;
 509	int                            ret;
 510
 511	if (out_len < sizeof resp)
 512		return -ENOSPC;
 513
 514	if (copy_from_user(&cmd, buf, sizeof cmd))
 515		return -EFAULT;
 516
 517	INIT_UDATA(&udata, buf + sizeof cmd,
 518		   (unsigned long) cmd.response + sizeof resp,
 519		   in_len - sizeof cmd, out_len - sizeof resp);
 520
 521	uobj = kmalloc(sizeof *uobj, GFP_KERNEL);
 522	if (!uobj)
 523		return -ENOMEM;
 524
 525	init_uobj(uobj, 0, file->ucontext, &pd_lock_class);
 526	down_write(&uobj->mutex);
 527
 528	pd = file->device->ib_dev->alloc_pd(file->device->ib_dev,
 529					    file->ucontext, &udata);
 530	if (IS_ERR(pd)) {
 531		ret = PTR_ERR(pd);
 532		goto err;
 533	}
 534
 535	pd->device  = file->device->ib_dev;
 536	pd->uobject = uobj;
 537	atomic_set(&pd->usecnt, 0);
 538
 539	uobj->object = pd;
 540	ret = idr_add_uobj(&ib_uverbs_pd_idr, uobj);
 541	if (ret)
 542		goto err_idr;
 543
 544	memset(&resp, 0, sizeof resp);
 545	resp.pd_handle = uobj->id;
 546
 547	if (copy_to_user((void __user *) (unsigned long) cmd.response,
 548			 &resp, sizeof resp)) {
 549		ret = -EFAULT;
 550		goto err_copy;
 551	}
 552
 553	mutex_lock(&file->mutex);
 554	list_add_tail(&uobj->list, &file->ucontext->pd_list);
 555	mutex_unlock(&file->mutex);
 556
 557	uobj->live = 1;
 558
 559	up_write(&uobj->mutex);
 560
 561	return in_len;
 562
 563err_copy:
 564	idr_remove_uobj(&ib_uverbs_pd_idr, uobj);
 565
 566err_idr:
 567	ib_dealloc_pd(pd);
 568
 569err:
 570	put_uobj_write(uobj);
 571	return ret;
 572}
 573
 574ssize_t ib_uverbs_dealloc_pd(struct ib_uverbs_file *file,
 575			     const char __user *buf,
 576			     int in_len, int out_len)
 577{
 578	struct ib_uverbs_dealloc_pd cmd;
 579	struct ib_uobject          *uobj;
 580	int                         ret;
 581
 582	if (copy_from_user(&cmd, buf, sizeof cmd))
 583		return -EFAULT;
 584
 585	uobj = idr_write_uobj(&ib_uverbs_pd_idr, cmd.pd_handle, file->ucontext);
 586	if (!uobj)
 587		return -EINVAL;
 588
 589	ret = ib_dealloc_pd(uobj->object);
 590	if (!ret)
 591		uobj->live = 0;
 592
 593	put_uobj_write(uobj);
 594
 595	if (ret)
 596		return ret;
 597
 598	idr_remove_uobj(&ib_uverbs_pd_idr, uobj);
 599
 600	mutex_lock(&file->mutex);
 601	list_del(&uobj->list);
 602	mutex_unlock(&file->mutex);
 603
 604	put_uobj(uobj);
 605
 606	return in_len;
 607}
 608
 609struct xrcd_table_entry {
 610	struct rb_node  node;
 611	struct ib_xrcd *xrcd;
 612	struct inode   *inode;
 613};
 614
 615static int xrcd_table_insert(struct ib_uverbs_device *dev,
 616			    struct inode *inode,
 617			    struct ib_xrcd *xrcd)
 618{
 619	struct xrcd_table_entry *entry, *scan;
 620	struct rb_node **p = &dev->xrcd_tree.rb_node;
 621	struct rb_node *parent = NULL;
 622
 623	entry = kmalloc(sizeof *entry, GFP_KERNEL);
 624	if (!entry)
 625		return -ENOMEM;
 626
 627	entry->xrcd  = xrcd;
 628	entry->inode = inode;
 629
 630	while (*p) {
 631		parent = *p;
 632		scan = rb_entry(parent, struct xrcd_table_entry, node);
 633
 634		if (inode < scan->inode) {
 635			p = &(*p)->rb_left;
 636		} else if (inode > scan->inode) {
 637			p = &(*p)->rb_right;
 638		} else {
 639			kfree(entry);
 640			return -EEXIST;
 641		}
 642	}
 643
 644	rb_link_node(&entry->node, parent, p);
 645	rb_insert_color(&entry->node, &dev->xrcd_tree);
 646	igrab(inode);
 647	return 0;
 648}
 649
 650static struct xrcd_table_entry *xrcd_table_search(struct ib_uverbs_device *dev,
 651						  struct inode *inode)
 652{
 653	struct xrcd_table_entry *entry;
 654	struct rb_node *p = dev->xrcd_tree.rb_node;
 655
 656	while (p) {
 657		entry = rb_entry(p, struct xrcd_table_entry, node);
 658
 659		if (inode < entry->inode)
 660			p = p->rb_left;
 661		else if (inode > entry->inode)
 662			p = p->rb_right;
 663		else
 664			return entry;
 665	}
 666
 667	return NULL;
 668}
 669
 670static struct ib_xrcd *find_xrcd(struct ib_uverbs_device *dev, struct inode *inode)
 671{
 672	struct xrcd_table_entry *entry;
 673
 674	entry = xrcd_table_search(dev, inode);
 675	if (!entry)
 676		return NULL;
 677
 678	return entry->xrcd;
 679}
 680
 681static void xrcd_table_delete(struct ib_uverbs_device *dev,
 682			      struct inode *inode)
 683{
 684	struct xrcd_table_entry *entry;
 685
 686	entry = xrcd_table_search(dev, inode);
 687	if (entry) {
 688		iput(inode);
 689		rb_erase(&entry->node, &dev->xrcd_tree);
 690		kfree(entry);
 691	}
 692}
 693
 694ssize_t ib_uverbs_open_xrcd(struct ib_uverbs_file *file,
 695			    const char __user *buf, int in_len,
 696			    int out_len)
 697{
 698	struct ib_uverbs_open_xrcd	cmd;
 699	struct ib_uverbs_open_xrcd_resp	resp;
 700	struct ib_udata			udata;
 701	struct ib_uxrcd_object         *obj;
 702	struct ib_xrcd                 *xrcd = NULL;
 703	struct fd			f = {NULL, 0};
 704	struct inode                   *inode = NULL;
 705	int				ret = 0;
 706	int				new_xrcd = 0;
 707
 708	if (out_len < sizeof resp)
 709		return -ENOSPC;
 710
 711	if (copy_from_user(&cmd, buf, sizeof cmd))
 712		return -EFAULT;
 713
 714	INIT_UDATA(&udata, buf + sizeof cmd,
 715		   (unsigned long) cmd.response + sizeof resp,
 716		   in_len - sizeof cmd, out_len - sizeof  resp);
 717
 718	mutex_lock(&file->device->xrcd_tree_mutex);
 719
 720	if (cmd.fd != -1) {
 721		/* search for file descriptor */
 722		f = fdget(cmd.fd);
 723		if (!f.file) {
 724			ret = -EBADF;
 725			goto err_tree_mutex_unlock;
 726		}
 727
 728		inode = file_inode(f.file);
 729		xrcd = find_xrcd(file->device, inode);
 730		if (!xrcd && !(cmd.oflags & O_CREAT)) {
 731			/* no file descriptor. Need CREATE flag */
 732			ret = -EAGAIN;
 733			goto err_tree_mutex_unlock;
 734		}
 735
 736		if (xrcd && cmd.oflags & O_EXCL) {
 737			ret = -EINVAL;
 738			goto err_tree_mutex_unlock;
 739		}
 740	}
 741
 742	obj = kmalloc(sizeof *obj, GFP_KERNEL);
 743	if (!obj) {
 744		ret = -ENOMEM;
 745		goto err_tree_mutex_unlock;
 746	}
 747
 748	init_uobj(&obj->uobject, 0, file->ucontext, &xrcd_lock_class);
 749
 750	down_write(&obj->uobject.mutex);
 751
 752	if (!xrcd) {
 753		xrcd = file->device->ib_dev->alloc_xrcd(file->device->ib_dev,
 754							file->ucontext, &udata);
 755		if (IS_ERR(xrcd)) {
 756			ret = PTR_ERR(xrcd);
 757			goto err;
 758		}
 759
 760		xrcd->inode   = inode;
 761		xrcd->device  = file->device->ib_dev;
 762		atomic_set(&xrcd->usecnt, 0);
 763		mutex_init(&xrcd->tgt_qp_mutex);
 764		INIT_LIST_HEAD(&xrcd->tgt_qp_list);
 765		new_xrcd = 1;
 766	}
 767
 768	atomic_set(&obj->refcnt, 0);
 769	obj->uobject.object = xrcd;
 770	ret = idr_add_uobj(&ib_uverbs_xrcd_idr, &obj->uobject);
 771	if (ret)
 772		goto err_idr;
 773
 774	memset(&resp, 0, sizeof resp);
 775	resp.xrcd_handle = obj->uobject.id;
 776
 777	if (inode) {
 778		if (new_xrcd) {
 779			/* create new inode/xrcd table entry */
 780			ret = xrcd_table_insert(file->device, inode, xrcd);
 781			if (ret)
 782				goto err_insert_xrcd;
 783		}
 784		atomic_inc(&xrcd->usecnt);
 785	}
 786
 787	if (copy_to_user((void __user *) (unsigned long) cmd.response,
 788			 &resp, sizeof resp)) {
 789		ret = -EFAULT;
 790		goto err_copy;
 791	}
 792
 793	if (f.file)
 794		fdput(f);
 795
 796	mutex_lock(&file->mutex);
 797	list_add_tail(&obj->uobject.list, &file->ucontext->xrcd_list);
 798	mutex_unlock(&file->mutex);
 799
 800	obj->uobject.live = 1;
 801	up_write(&obj->uobject.mutex);
 802
 803	mutex_unlock(&file->device->xrcd_tree_mutex);
 804	return in_len;
 805
 806err_copy:
 807	if (inode) {
 808		if (new_xrcd)
 809			xrcd_table_delete(file->device, inode);
 810		atomic_dec(&xrcd->usecnt);
 811	}
 812
 813err_insert_xrcd:
 814	idr_remove_uobj(&ib_uverbs_xrcd_idr, &obj->uobject);
 815
 816err_idr:
 817	ib_dealloc_xrcd(xrcd);
 818
 819err:
 820	put_uobj_write(&obj->uobject);
 821
 822err_tree_mutex_unlock:
 823	if (f.file)
 824		fdput(f);
 825
 826	mutex_unlock(&file->device->xrcd_tree_mutex);
 827
 828	return ret;
 829}
 830
 831ssize_t ib_uverbs_close_xrcd(struct ib_uverbs_file *file,
 832			     const char __user *buf, int in_len,
 833			     int out_len)
 834{
 835	struct ib_uverbs_close_xrcd cmd;
 836	struct ib_uobject           *uobj;
 837	struct ib_xrcd              *xrcd = NULL;
 838	struct inode                *inode = NULL;
 839	struct ib_uxrcd_object      *obj;
 840	int                         live;
 841	int                         ret = 0;
 842
 843	if (copy_from_user(&cmd, buf, sizeof cmd))
 844		return -EFAULT;
 845
 846	mutex_lock(&file->device->xrcd_tree_mutex);
 847	uobj = idr_write_uobj(&ib_uverbs_xrcd_idr, cmd.xrcd_handle, file->ucontext);
 848	if (!uobj) {
 849		ret = -EINVAL;
 850		goto out;
 851	}
 852
 853	xrcd  = uobj->object;
 854	inode = xrcd->inode;
 855	obj   = container_of(uobj, struct ib_uxrcd_object, uobject);
 856	if (atomic_read(&obj->refcnt)) {
 857		put_uobj_write(uobj);
 858		ret = -EBUSY;
 859		goto out;
 860	}
 861
 862	if (!inode || atomic_dec_and_test(&xrcd->usecnt)) {
 863		ret = ib_dealloc_xrcd(uobj->object);
 864		if (!ret)
 865			uobj->live = 0;
 866	}
 867
 868	live = uobj->live;
 869	if (inode && ret)
 870		atomic_inc(&xrcd->usecnt);
 871
 872	put_uobj_write(uobj);
 873
 874	if (ret)
 875		goto out;
 876
 877	if (inode && !live)
 878		xrcd_table_delete(file->device, inode);
 879
 880	idr_remove_uobj(&ib_uverbs_xrcd_idr, uobj);
 881	mutex_lock(&file->mutex);
 882	list_del(&uobj->list);
 883	mutex_unlock(&file->mutex);
 884
 885	put_uobj(uobj);
 886	ret = in_len;
 887
 888out:
 889	mutex_unlock(&file->device->xrcd_tree_mutex);
 890	return ret;
 891}
 892
 893void ib_uverbs_dealloc_xrcd(struct ib_uverbs_device *dev,
 894			    struct ib_xrcd *xrcd)
 895{
 896	struct inode *inode;
 897
 898	inode = xrcd->inode;
 899	if (inode && !atomic_dec_and_test(&xrcd->usecnt))
 900		return;
 901
 902	ib_dealloc_xrcd(xrcd);
 903
 904	if (inode)
 905		xrcd_table_delete(dev, inode);
 906}
 907
 908ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file,
 909			 const char __user *buf, int in_len,
 910			 int out_len)
 911{
 912	struct ib_uverbs_reg_mr      cmd;
 913	struct ib_uverbs_reg_mr_resp resp;
 914	struct ib_udata              udata;
 915	struct ib_uobject           *uobj;
 916	struct ib_pd                *pd;
 917	struct ib_mr                *mr;
 918	int                          ret;
 919
 920	if (out_len < sizeof resp)
 921		return -ENOSPC;
 922
 923	if (copy_from_user(&cmd, buf, sizeof cmd))
 924		return -EFAULT;
 925
 926	INIT_UDATA(&udata, buf + sizeof cmd,
 927		   (unsigned long) cmd.response + sizeof resp,
 928		   in_len - sizeof cmd, out_len - sizeof resp);
 929
 930	if ((cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK))
 931		return -EINVAL;
 932
 933	ret = ib_check_mr_access(cmd.access_flags);
 934	if (ret)
 935		return ret;
 
 
 
 
 936
 937	uobj = kmalloc(sizeof *uobj, GFP_KERNEL);
 938	if (!uobj)
 939		return -ENOMEM;
 940
 941	init_uobj(uobj, 0, file->ucontext, &mr_lock_class);
 942	down_write(&uobj->mutex);
 943
 944	pd = idr_read_pd(cmd.pd_handle, file->ucontext);
 945	if (!pd) {
 946		ret = -EINVAL;
 947		goto err_free;
 948	}
 949
 950	mr = pd->device->reg_user_mr(pd, cmd.start, cmd.length, cmd.hca_va,
 951				     cmd.access_flags, &udata);
 952	if (IS_ERR(mr)) {
 953		ret = PTR_ERR(mr);
 954		goto err_put;
 955	}
 956
 957	mr->device  = pd->device;
 958	mr->pd      = pd;
 959	mr->uobject = uobj;
 960	atomic_inc(&pd->usecnt);
 961	atomic_set(&mr->usecnt, 0);
 962
 963	uobj->object = mr;
 964	ret = idr_add_uobj(&ib_uverbs_mr_idr, uobj);
 965	if (ret)
 966		goto err_unreg;
 967
 968	memset(&resp, 0, sizeof resp);
 969	resp.lkey      = mr->lkey;
 970	resp.rkey      = mr->rkey;
 971	resp.mr_handle = uobj->id;
 972
 973	if (copy_to_user((void __user *) (unsigned long) cmd.response,
 974			 &resp, sizeof resp)) {
 975		ret = -EFAULT;
 976		goto err_copy;
 977	}
 978
 979	put_pd_read(pd);
 980
 981	mutex_lock(&file->mutex);
 982	list_add_tail(&uobj->list, &file->ucontext->mr_list);
 983	mutex_unlock(&file->mutex);
 984
 985	uobj->live = 1;
 986
 987	up_write(&uobj->mutex);
 988
 989	return in_len;
 990
 991err_copy:
 992	idr_remove_uobj(&ib_uverbs_mr_idr, uobj);
 993
 994err_unreg:
 995	ib_dereg_mr(mr);
 996
 997err_put:
 998	put_pd_read(pd);
 999
1000err_free:
1001	put_uobj_write(uobj);
1002	return ret;
1003}
1004
1005ssize_t ib_uverbs_dereg_mr(struct ib_uverbs_file *file,
1006			   const char __user *buf, int in_len,
1007			   int out_len)
1008{
1009	struct ib_uverbs_dereg_mr cmd;
1010	struct ib_mr             *mr;
1011	struct ib_uobject	 *uobj;
1012	int                       ret = -EINVAL;
1013
1014	if (copy_from_user(&cmd, buf, sizeof cmd))
1015		return -EFAULT;
1016
1017	uobj = idr_write_uobj(&ib_uverbs_mr_idr, cmd.mr_handle, file->ucontext);
1018	if (!uobj)
1019		return -EINVAL;
1020
1021	mr = uobj->object;
1022
1023	ret = ib_dereg_mr(mr);
1024	if (!ret)
1025		uobj->live = 0;
1026
1027	put_uobj_write(uobj);
1028
1029	if (ret)
1030		return ret;
1031
1032	idr_remove_uobj(&ib_uverbs_mr_idr, uobj);
1033
1034	mutex_lock(&file->mutex);
1035	list_del(&uobj->list);
1036	mutex_unlock(&file->mutex);
1037
1038	put_uobj(uobj);
1039
1040	return in_len;
1041}
1042
1043ssize_t ib_uverbs_alloc_mw(struct ib_uverbs_file *file,
1044			 const char __user *buf, int in_len,
1045			 int out_len)
1046{
1047	struct ib_uverbs_alloc_mw      cmd;
1048	struct ib_uverbs_alloc_mw_resp resp;
1049	struct ib_uobject             *uobj;
1050	struct ib_pd                  *pd;
1051	struct ib_mw                  *mw;
1052	int                            ret;
1053
1054	if (out_len < sizeof(resp))
1055		return -ENOSPC;
1056
1057	if (copy_from_user(&cmd, buf, sizeof(cmd)))
1058		return -EFAULT;
1059
1060	uobj = kmalloc(sizeof(*uobj), GFP_KERNEL);
1061	if (!uobj)
1062		return -ENOMEM;
1063
1064	init_uobj(uobj, 0, file->ucontext, &mw_lock_class);
1065	down_write(&uobj->mutex);
1066
1067	pd = idr_read_pd(cmd.pd_handle, file->ucontext);
1068	if (!pd) {
1069		ret = -EINVAL;
1070		goto err_free;
1071	}
1072
1073	mw = pd->device->alloc_mw(pd, cmd.mw_type);
1074	if (IS_ERR(mw)) {
1075		ret = PTR_ERR(mw);
1076		goto err_put;
1077	}
1078
1079	mw->device  = pd->device;
1080	mw->pd      = pd;
1081	mw->uobject = uobj;
1082	atomic_inc(&pd->usecnt);
1083
1084	uobj->object = mw;
1085	ret = idr_add_uobj(&ib_uverbs_mw_idr, uobj);
1086	if (ret)
1087		goto err_unalloc;
1088
1089	memset(&resp, 0, sizeof(resp));
1090	resp.rkey      = mw->rkey;
1091	resp.mw_handle = uobj->id;
1092
1093	if (copy_to_user((void __user *)(unsigned long)cmd.response,
1094			 &resp, sizeof(resp))) {
1095		ret = -EFAULT;
1096		goto err_copy;
1097	}
1098
1099	put_pd_read(pd);
1100
1101	mutex_lock(&file->mutex);
1102	list_add_tail(&uobj->list, &file->ucontext->mw_list);
1103	mutex_unlock(&file->mutex);
1104
1105	uobj->live = 1;
1106
1107	up_write(&uobj->mutex);
1108
1109	return in_len;
1110
1111err_copy:
1112	idr_remove_uobj(&ib_uverbs_mw_idr, uobj);
1113
1114err_unalloc:
1115	ib_dealloc_mw(mw);
1116
1117err_put:
1118	put_pd_read(pd);
1119
1120err_free:
1121	put_uobj_write(uobj);
1122	return ret;
1123}
1124
1125ssize_t ib_uverbs_dealloc_mw(struct ib_uverbs_file *file,
1126			   const char __user *buf, int in_len,
1127			   int out_len)
1128{
1129	struct ib_uverbs_dealloc_mw cmd;
1130	struct ib_mw               *mw;
1131	struct ib_uobject	   *uobj;
1132	int                         ret = -EINVAL;
1133
1134	if (copy_from_user(&cmd, buf, sizeof(cmd)))
1135		return -EFAULT;
1136
1137	uobj = idr_write_uobj(&ib_uverbs_mw_idr, cmd.mw_handle, file->ucontext);
1138	if (!uobj)
1139		return -EINVAL;
1140
1141	mw = uobj->object;
1142
1143	ret = ib_dealloc_mw(mw);
1144	if (!ret)
1145		uobj->live = 0;
1146
1147	put_uobj_write(uobj);
1148
1149	if (ret)
1150		return ret;
1151
1152	idr_remove_uobj(&ib_uverbs_mw_idr, uobj);
1153
1154	mutex_lock(&file->mutex);
1155	list_del(&uobj->list);
1156	mutex_unlock(&file->mutex);
1157
1158	put_uobj(uobj);
1159
1160	return in_len;
1161}
1162
1163ssize_t ib_uverbs_create_comp_channel(struct ib_uverbs_file *file,
1164				      const char __user *buf, int in_len,
1165				      int out_len)
1166{
1167	struct ib_uverbs_create_comp_channel	   cmd;
1168	struct ib_uverbs_create_comp_channel_resp  resp;
1169	struct file				  *filp;
1170	int ret;
1171
1172	if (out_len < sizeof resp)
1173		return -ENOSPC;
1174
1175	if (copy_from_user(&cmd, buf, sizeof cmd))
1176		return -EFAULT;
1177
1178	ret = get_unused_fd_flags(O_CLOEXEC);
1179	if (ret < 0)
1180		return ret;
1181	resp.fd = ret;
1182
1183	filp = ib_uverbs_alloc_event_file(file, 0);
1184	if (IS_ERR(filp)) {
1185		put_unused_fd(resp.fd);
1186		return PTR_ERR(filp);
1187	}
1188
1189	if (copy_to_user((void __user *) (unsigned long) cmd.response,
1190			 &resp, sizeof resp)) {
1191		put_unused_fd(resp.fd);
1192		fput(filp);
1193		return -EFAULT;
1194	}
1195
1196	fd_install(resp.fd, filp);
1197	return in_len;
1198}
1199
1200ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file,
1201			    const char __user *buf, int in_len,
1202			    int out_len)
1203{
1204	struct ib_uverbs_create_cq      cmd;
1205	struct ib_uverbs_create_cq_resp resp;
1206	struct ib_udata                 udata;
1207	struct ib_ucq_object           *obj;
1208	struct ib_uverbs_event_file    *ev_file = NULL;
1209	struct ib_cq                   *cq;
1210	int                             ret;
1211
1212	if (out_len < sizeof resp)
1213		return -ENOSPC;
1214
1215	if (copy_from_user(&cmd, buf, sizeof cmd))
1216		return -EFAULT;
1217
1218	INIT_UDATA(&udata, buf + sizeof cmd,
1219		   (unsigned long) cmd.response + sizeof resp,
1220		   in_len - sizeof cmd, out_len - sizeof resp);
1221
1222	if (cmd.comp_vector >= file->device->num_comp_vectors)
1223		return -EINVAL;
1224
1225	obj = kmalloc(sizeof *obj, GFP_KERNEL);
1226	if (!obj)
1227		return -ENOMEM;
1228
1229	init_uobj(&obj->uobject, cmd.user_handle, file->ucontext, &cq_lock_class);
1230	down_write(&obj->uobject.mutex);
1231
1232	if (cmd.comp_channel >= 0) {
1233		ev_file = ib_uverbs_lookup_comp_file(cmd.comp_channel);
1234		if (!ev_file) {
1235			ret = -EINVAL;
1236			goto err;
1237		}
1238	}
1239
1240	obj->uverbs_file	   = file;
1241	obj->comp_events_reported  = 0;
1242	obj->async_events_reported = 0;
1243	INIT_LIST_HEAD(&obj->comp_list);
1244	INIT_LIST_HEAD(&obj->async_list);
1245
1246	cq = file->device->ib_dev->create_cq(file->device->ib_dev, cmd.cqe,
1247					     cmd.comp_vector,
1248					     file->ucontext, &udata);
1249	if (IS_ERR(cq)) {
1250		ret = PTR_ERR(cq);
1251		goto err_file;
1252	}
1253
1254	cq->device        = file->device->ib_dev;
1255	cq->uobject       = &obj->uobject;
1256	cq->comp_handler  = ib_uverbs_comp_handler;
1257	cq->event_handler = ib_uverbs_cq_event_handler;
1258	cq->cq_context    = ev_file;
1259	atomic_set(&cq->usecnt, 0);
1260
1261	obj->uobject.object = cq;
1262	ret = idr_add_uobj(&ib_uverbs_cq_idr, &obj->uobject);
1263	if (ret)
1264		goto err_free;
1265
1266	memset(&resp, 0, sizeof resp);
1267	resp.cq_handle = obj->uobject.id;
1268	resp.cqe       = cq->cqe;
1269
1270	if (copy_to_user((void __user *) (unsigned long) cmd.response,
1271			 &resp, sizeof resp)) {
1272		ret = -EFAULT;
1273		goto err_copy;
1274	}
1275
1276	mutex_lock(&file->mutex);
1277	list_add_tail(&obj->uobject.list, &file->ucontext->cq_list);
1278	mutex_unlock(&file->mutex);
1279
1280	obj->uobject.live = 1;
1281
1282	up_write(&obj->uobject.mutex);
1283
1284	return in_len;
1285
1286err_copy:
1287	idr_remove_uobj(&ib_uverbs_cq_idr, &obj->uobject);
1288
1289err_free:
1290	ib_destroy_cq(cq);
1291
1292err_file:
1293	if (ev_file)
1294		ib_uverbs_release_ucq(file, ev_file, obj);
1295
1296err:
1297	put_uobj_write(&obj->uobject);
1298	return ret;
1299}
1300
1301ssize_t ib_uverbs_resize_cq(struct ib_uverbs_file *file,
1302			    const char __user *buf, int in_len,
1303			    int out_len)
1304{
1305	struct ib_uverbs_resize_cq	cmd;
1306	struct ib_uverbs_resize_cq_resp	resp;
1307	struct ib_udata                 udata;
1308	struct ib_cq			*cq;
1309	int				ret = -EINVAL;
1310
1311	if (copy_from_user(&cmd, buf, sizeof cmd))
1312		return -EFAULT;
1313
1314	INIT_UDATA(&udata, buf + sizeof cmd,
1315		   (unsigned long) cmd.response + sizeof resp,
1316		   in_len - sizeof cmd, out_len - sizeof resp);
1317
1318	cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0);
1319	if (!cq)
1320		return -EINVAL;
1321
1322	ret = cq->device->resize_cq(cq, cmd.cqe, &udata);
1323	if (ret)
1324		goto out;
1325
1326	resp.cqe = cq->cqe;
1327
1328	if (copy_to_user((void __user *) (unsigned long) cmd.response,
1329			 &resp, sizeof resp.cqe))
1330		ret = -EFAULT;
1331
1332out:
1333	put_cq_read(cq);
1334
1335	return ret ? ret : in_len;
1336}
1337
1338static int copy_wc_to_user(void __user *dest, struct ib_wc *wc)
1339{
1340	struct ib_uverbs_wc tmp;
1341
1342	tmp.wr_id		= wc->wr_id;
1343	tmp.status		= wc->status;
1344	tmp.opcode		= wc->opcode;
1345	tmp.vendor_err		= wc->vendor_err;
1346	tmp.byte_len		= wc->byte_len;
1347	tmp.ex.imm_data		= (__u32 __force) wc->ex.imm_data;
1348	tmp.qp_num		= wc->qp->qp_num;
1349	tmp.src_qp		= wc->src_qp;
1350	tmp.wc_flags		= wc->wc_flags;
1351	tmp.pkey_index		= wc->pkey_index;
1352	tmp.slid		= wc->slid;
1353	tmp.sl			= wc->sl;
1354	tmp.dlid_path_bits	= wc->dlid_path_bits;
1355	tmp.port_num		= wc->port_num;
1356	tmp.reserved		= 0;
1357
1358	if (copy_to_user(dest, &tmp, sizeof tmp))
1359		return -EFAULT;
1360
1361	return 0;
1362}
1363
1364ssize_t ib_uverbs_poll_cq(struct ib_uverbs_file *file,
1365			  const char __user *buf, int in_len,
1366			  int out_len)
1367{
1368	struct ib_uverbs_poll_cq       cmd;
1369	struct ib_uverbs_poll_cq_resp  resp;
1370	u8 __user                     *header_ptr;
1371	u8 __user                     *data_ptr;
1372	struct ib_cq                  *cq;
1373	struct ib_wc                   wc;
1374	int                            ret;
1375
1376	if (copy_from_user(&cmd, buf, sizeof cmd))
1377		return -EFAULT;
1378
1379	cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0);
1380	if (!cq)
1381		return -EINVAL;
1382
1383	/* we copy a struct ib_uverbs_poll_cq_resp to user space */
1384	header_ptr = (void __user *)(unsigned long) cmd.response;
1385	data_ptr = header_ptr + sizeof resp;
1386
1387	memset(&resp, 0, sizeof resp);
1388	while (resp.count < cmd.ne) {
1389		ret = ib_poll_cq(cq, 1, &wc);
1390		if (ret < 0)
1391			goto out_put;
1392		if (!ret)
1393			break;
1394
1395		ret = copy_wc_to_user(data_ptr, &wc);
1396		if (ret)
1397			goto out_put;
1398
1399		data_ptr += sizeof(struct ib_uverbs_wc);
1400		++resp.count;
1401	}
1402
1403	if (copy_to_user(header_ptr, &resp, sizeof resp)) {
1404		ret = -EFAULT;
1405		goto out_put;
1406	}
1407
1408	ret = in_len;
1409
1410out_put:
1411	put_cq_read(cq);
1412	return ret;
1413}
1414
1415ssize_t ib_uverbs_req_notify_cq(struct ib_uverbs_file *file,
1416				const char __user *buf, int in_len,
1417				int out_len)
1418{
1419	struct ib_uverbs_req_notify_cq cmd;
1420	struct ib_cq                  *cq;
1421
1422	if (copy_from_user(&cmd, buf, sizeof cmd))
1423		return -EFAULT;
1424
1425	cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0);
1426	if (!cq)
1427		return -EINVAL;
1428
1429	ib_req_notify_cq(cq, cmd.solicited_only ?
1430			 IB_CQ_SOLICITED : IB_CQ_NEXT_COMP);
1431
1432	put_cq_read(cq);
1433
1434	return in_len;
1435}
1436
1437ssize_t ib_uverbs_destroy_cq(struct ib_uverbs_file *file,
1438			     const char __user *buf, int in_len,
1439			     int out_len)
1440{
1441	struct ib_uverbs_destroy_cq      cmd;
1442	struct ib_uverbs_destroy_cq_resp resp;
1443	struct ib_uobject		*uobj;
1444	struct ib_cq               	*cq;
1445	struct ib_ucq_object        	*obj;
1446	struct ib_uverbs_event_file	*ev_file;
1447	int                        	 ret = -EINVAL;
1448
1449	if (copy_from_user(&cmd, buf, sizeof cmd))
1450		return -EFAULT;
1451
1452	uobj = idr_write_uobj(&ib_uverbs_cq_idr, cmd.cq_handle, file->ucontext);
1453	if (!uobj)
1454		return -EINVAL;
1455	cq      = uobj->object;
1456	ev_file = cq->cq_context;
1457	obj     = container_of(cq->uobject, struct ib_ucq_object, uobject);
1458
1459	ret = ib_destroy_cq(cq);
1460	if (!ret)
1461		uobj->live = 0;
1462
1463	put_uobj_write(uobj);
1464
1465	if (ret)
1466		return ret;
1467
1468	idr_remove_uobj(&ib_uverbs_cq_idr, uobj);
1469
1470	mutex_lock(&file->mutex);
1471	list_del(&uobj->list);
1472	mutex_unlock(&file->mutex);
1473
1474	ib_uverbs_release_ucq(file, ev_file, obj);
1475
1476	memset(&resp, 0, sizeof resp);
1477	resp.comp_events_reported  = obj->comp_events_reported;
1478	resp.async_events_reported = obj->async_events_reported;
1479
1480	put_uobj(uobj);
1481
1482	if (copy_to_user((void __user *) (unsigned long) cmd.response,
1483			 &resp, sizeof resp))
1484		return -EFAULT;
1485
1486	return in_len;
1487}
1488
1489ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file,
1490			    const char __user *buf, int in_len,
1491			    int out_len)
1492{
1493	struct ib_uverbs_create_qp      cmd;
1494	struct ib_uverbs_create_qp_resp resp;
1495	struct ib_udata                 udata;
1496	struct ib_uqp_object           *obj;
1497	struct ib_device	       *device;
1498	struct ib_pd                   *pd = NULL;
1499	struct ib_xrcd		       *xrcd = NULL;
1500	struct ib_uobject	       *uninitialized_var(xrcd_uobj);
1501	struct ib_cq                   *scq = NULL, *rcq = NULL;
1502	struct ib_srq                  *srq = NULL;
1503	struct ib_qp                   *qp;
1504	struct ib_qp_init_attr          attr;
1505	int ret;
1506
1507	if (out_len < sizeof resp)
1508		return -ENOSPC;
1509
1510	if (copy_from_user(&cmd, buf, sizeof cmd))
1511		return -EFAULT;
1512
1513	if (cmd.qp_type == IB_QPT_RAW_PACKET && !capable(CAP_NET_RAW))
1514		return -EPERM;
1515
1516	INIT_UDATA(&udata, buf + sizeof cmd,
1517		   (unsigned long) cmd.response + sizeof resp,
1518		   in_len - sizeof cmd, out_len - sizeof resp);
1519
1520	obj = kzalloc(sizeof *obj, GFP_KERNEL);
1521	if (!obj)
1522		return -ENOMEM;
1523
1524	init_uobj(&obj->uevent.uobject, cmd.user_handle, file->ucontext, &qp_lock_class);
1525	down_write(&obj->uevent.uobject.mutex);
1526
1527	if (cmd.qp_type == IB_QPT_XRC_TGT) {
1528		xrcd = idr_read_xrcd(cmd.pd_handle, file->ucontext, &xrcd_uobj);
1529		if (!xrcd) {
1530			ret = -EINVAL;
1531			goto err_put;
1532		}
1533		device = xrcd->device;
1534	} else {
1535		if (cmd.qp_type == IB_QPT_XRC_INI) {
1536			cmd.max_recv_wr = cmd.max_recv_sge = 0;
1537		} else {
1538			if (cmd.is_srq) {
1539				srq = idr_read_srq(cmd.srq_handle, file->ucontext);
1540				if (!srq || srq->srq_type != IB_SRQT_BASIC) {
1541					ret = -EINVAL;
1542					goto err_put;
1543				}
1544			}
1545
1546			if (cmd.recv_cq_handle != cmd.send_cq_handle) {
1547				rcq = idr_read_cq(cmd.recv_cq_handle, file->ucontext, 0);
1548				if (!rcq) {
1549					ret = -EINVAL;
1550					goto err_put;
1551				}
1552			}
1553		}
1554
1555		scq = idr_read_cq(cmd.send_cq_handle, file->ucontext, !!rcq);
1556		rcq = rcq ?: scq;
1557		pd  = idr_read_pd(cmd.pd_handle, file->ucontext);
1558		if (!pd || !scq) {
1559			ret = -EINVAL;
1560			goto err_put;
1561		}
1562
1563		device = pd->device;
1564	}
1565
1566	attr.event_handler = ib_uverbs_qp_event_handler;
1567	attr.qp_context    = file;
1568	attr.send_cq       = scq;
1569	attr.recv_cq       = rcq;
1570	attr.srq           = srq;
1571	attr.xrcd	   = xrcd;
1572	attr.sq_sig_type   = cmd.sq_sig_all ? IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR;
1573	attr.qp_type       = cmd.qp_type;
1574	attr.create_flags  = 0;
1575
1576	attr.cap.max_send_wr     = cmd.max_send_wr;
1577	attr.cap.max_recv_wr     = cmd.max_recv_wr;
1578	attr.cap.max_send_sge    = cmd.max_send_sge;
1579	attr.cap.max_recv_sge    = cmd.max_recv_sge;
1580	attr.cap.max_inline_data = cmd.max_inline_data;
1581
1582	obj->uevent.events_reported     = 0;
1583	INIT_LIST_HEAD(&obj->uevent.event_list);
1584	INIT_LIST_HEAD(&obj->mcast_list);
1585
1586	if (cmd.qp_type == IB_QPT_XRC_TGT)
1587		qp = ib_create_qp(pd, &attr);
1588	else
1589		qp = device->create_qp(pd, &attr, &udata);
1590
1591	if (IS_ERR(qp)) {
1592		ret = PTR_ERR(qp);
1593		goto err_put;
1594	}
1595
1596	if (cmd.qp_type != IB_QPT_XRC_TGT) {
1597		qp->real_qp	  = qp;
1598		qp->device	  = device;
1599		qp->pd		  = pd;
1600		qp->send_cq	  = attr.send_cq;
1601		qp->recv_cq	  = attr.recv_cq;
1602		qp->srq		  = attr.srq;
1603		qp->event_handler = attr.event_handler;
1604		qp->qp_context	  = attr.qp_context;
1605		qp->qp_type	  = attr.qp_type;
1606		atomic_set(&qp->usecnt, 0);
1607		atomic_inc(&pd->usecnt);
1608		atomic_inc(&attr.send_cq->usecnt);
1609		if (attr.recv_cq)
1610			atomic_inc(&attr.recv_cq->usecnt);
1611		if (attr.srq)
1612			atomic_inc(&attr.srq->usecnt);
1613	}
1614	qp->uobject = &obj->uevent.uobject;
1615
1616	obj->uevent.uobject.object = qp;
1617	ret = idr_add_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject);
1618	if (ret)
1619		goto err_destroy;
1620
1621	memset(&resp, 0, sizeof resp);
1622	resp.qpn             = qp->qp_num;
1623	resp.qp_handle       = obj->uevent.uobject.id;
1624	resp.max_recv_sge    = attr.cap.max_recv_sge;
1625	resp.max_send_sge    = attr.cap.max_send_sge;
1626	resp.max_recv_wr     = attr.cap.max_recv_wr;
1627	resp.max_send_wr     = attr.cap.max_send_wr;
1628	resp.max_inline_data = attr.cap.max_inline_data;
1629
1630	if (copy_to_user((void __user *) (unsigned long) cmd.response,
1631			 &resp, sizeof resp)) {
1632		ret = -EFAULT;
1633		goto err_copy;
1634	}
1635
1636	if (xrcd) {
1637		obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object,
1638					  uobject);
1639		atomic_inc(&obj->uxrcd->refcnt);
1640		put_xrcd_read(xrcd_uobj);
1641	}
1642
1643	if (pd)
1644		put_pd_read(pd);
1645	if (scq)
1646		put_cq_read(scq);
1647	if (rcq && rcq != scq)
1648		put_cq_read(rcq);
1649	if (srq)
1650		put_srq_read(srq);
1651
1652	mutex_lock(&file->mutex);
1653	list_add_tail(&obj->uevent.uobject.list, &file->ucontext->qp_list);
1654	mutex_unlock(&file->mutex);
1655
1656	obj->uevent.uobject.live = 1;
1657
1658	up_write(&obj->uevent.uobject.mutex);
1659
1660	return in_len;
1661
1662err_copy:
1663	idr_remove_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject);
1664
1665err_destroy:
1666	ib_destroy_qp(qp);
1667
1668err_put:
1669	if (xrcd)
1670		put_xrcd_read(xrcd_uobj);
1671	if (pd)
1672		put_pd_read(pd);
1673	if (scq)
1674		put_cq_read(scq);
1675	if (rcq && rcq != scq)
1676		put_cq_read(rcq);
1677	if (srq)
1678		put_srq_read(srq);
1679
1680	put_uobj_write(&obj->uevent.uobject);
1681	return ret;
1682}
1683
1684ssize_t ib_uverbs_open_qp(struct ib_uverbs_file *file,
1685			  const char __user *buf, int in_len, int out_len)
1686{
1687	struct ib_uverbs_open_qp        cmd;
1688	struct ib_uverbs_create_qp_resp resp;
1689	struct ib_udata                 udata;
1690	struct ib_uqp_object           *obj;
1691	struct ib_xrcd		       *xrcd;
1692	struct ib_uobject	       *uninitialized_var(xrcd_uobj);
1693	struct ib_qp                   *qp;
1694	struct ib_qp_open_attr          attr;
1695	int ret;
1696
1697	if (out_len < sizeof resp)
1698		return -ENOSPC;
1699
1700	if (copy_from_user(&cmd, buf, sizeof cmd))
1701		return -EFAULT;
1702
1703	INIT_UDATA(&udata, buf + sizeof cmd,
1704		   (unsigned long) cmd.response + sizeof resp,
1705		   in_len - sizeof cmd, out_len - sizeof resp);
1706
1707	obj = kmalloc(sizeof *obj, GFP_KERNEL);
1708	if (!obj)
1709		return -ENOMEM;
1710
1711	init_uobj(&obj->uevent.uobject, cmd.user_handle, file->ucontext, &qp_lock_class);
1712	down_write(&obj->uevent.uobject.mutex);
1713
1714	xrcd = idr_read_xrcd(cmd.pd_handle, file->ucontext, &xrcd_uobj);
1715	if (!xrcd) {
1716		ret = -EINVAL;
1717		goto err_put;
1718	}
1719
1720	attr.event_handler = ib_uverbs_qp_event_handler;
1721	attr.qp_context    = file;
1722	attr.qp_num        = cmd.qpn;
1723	attr.qp_type       = cmd.qp_type;
1724
1725	obj->uevent.events_reported = 0;
1726	INIT_LIST_HEAD(&obj->uevent.event_list);
1727	INIT_LIST_HEAD(&obj->mcast_list);
1728
1729	qp = ib_open_qp(xrcd, &attr);
1730	if (IS_ERR(qp)) {
1731		ret = PTR_ERR(qp);
1732		goto err_put;
1733	}
1734
1735	qp->uobject = &obj->uevent.uobject;
1736
1737	obj->uevent.uobject.object = qp;
1738	ret = idr_add_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject);
1739	if (ret)
1740		goto err_destroy;
1741
1742	memset(&resp, 0, sizeof resp);
1743	resp.qpn       = qp->qp_num;
1744	resp.qp_handle = obj->uevent.uobject.id;
1745
1746	if (copy_to_user((void __user *) (unsigned long) cmd.response,
1747			 &resp, sizeof resp)) {
1748		ret = -EFAULT;
1749		goto err_remove;
1750	}
1751
1752	obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, uobject);
1753	atomic_inc(&obj->uxrcd->refcnt);
1754	put_xrcd_read(xrcd_uobj);
1755
1756	mutex_lock(&file->mutex);
1757	list_add_tail(&obj->uevent.uobject.list, &file->ucontext->qp_list);
1758	mutex_unlock(&file->mutex);
1759
1760	obj->uevent.uobject.live = 1;
1761
1762	up_write(&obj->uevent.uobject.mutex);
1763
1764	return in_len;
1765
1766err_remove:
1767	idr_remove_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject);
1768
1769err_destroy:
1770	ib_destroy_qp(qp);
1771
1772err_put:
1773	put_xrcd_read(xrcd_uobj);
1774	put_uobj_write(&obj->uevent.uobject);
1775	return ret;
1776}
1777
1778ssize_t ib_uverbs_query_qp(struct ib_uverbs_file *file,
1779			   const char __user *buf, int in_len,
1780			   int out_len)
1781{
1782	struct ib_uverbs_query_qp      cmd;
1783	struct ib_uverbs_query_qp_resp resp;
1784	struct ib_qp                   *qp;
1785	struct ib_qp_attr              *attr;
1786	struct ib_qp_init_attr         *init_attr;
1787	int                            ret;
1788
1789	if (copy_from_user(&cmd, buf, sizeof cmd))
1790		return -EFAULT;
1791
1792	attr      = kmalloc(sizeof *attr, GFP_KERNEL);
1793	init_attr = kmalloc(sizeof *init_attr, GFP_KERNEL);
1794	if (!attr || !init_attr) {
1795		ret = -ENOMEM;
1796		goto out;
1797	}
1798
1799	qp = idr_read_qp(cmd.qp_handle, file->ucontext);
1800	if (!qp) {
1801		ret = -EINVAL;
1802		goto out;
1803	}
1804
1805	ret = ib_query_qp(qp, attr, cmd.attr_mask, init_attr);
1806
1807	put_qp_read(qp);
1808
1809	if (ret)
1810		goto out;
1811
1812	memset(&resp, 0, sizeof resp);
1813
1814	resp.qp_state               = attr->qp_state;
1815	resp.cur_qp_state           = attr->cur_qp_state;
1816	resp.path_mtu               = attr->path_mtu;
1817	resp.path_mig_state         = attr->path_mig_state;
1818	resp.qkey                   = attr->qkey;
1819	resp.rq_psn                 = attr->rq_psn;
1820	resp.sq_psn                 = attr->sq_psn;
1821	resp.dest_qp_num            = attr->dest_qp_num;
1822	resp.qp_access_flags        = attr->qp_access_flags;
1823	resp.pkey_index             = attr->pkey_index;
1824	resp.alt_pkey_index         = attr->alt_pkey_index;
1825	resp.sq_draining            = attr->sq_draining;
1826	resp.max_rd_atomic          = attr->max_rd_atomic;
1827	resp.max_dest_rd_atomic     = attr->max_dest_rd_atomic;
1828	resp.min_rnr_timer          = attr->min_rnr_timer;
1829	resp.port_num               = attr->port_num;
1830	resp.timeout                = attr->timeout;
1831	resp.retry_cnt              = attr->retry_cnt;
1832	resp.rnr_retry              = attr->rnr_retry;
1833	resp.alt_port_num           = attr->alt_port_num;
1834	resp.alt_timeout            = attr->alt_timeout;
1835
1836	memcpy(resp.dest.dgid, attr->ah_attr.grh.dgid.raw, 16);
1837	resp.dest.flow_label        = attr->ah_attr.grh.flow_label;
1838	resp.dest.sgid_index        = attr->ah_attr.grh.sgid_index;
1839	resp.dest.hop_limit         = attr->ah_attr.grh.hop_limit;
1840	resp.dest.traffic_class     = attr->ah_attr.grh.traffic_class;
1841	resp.dest.dlid              = attr->ah_attr.dlid;
1842	resp.dest.sl                = attr->ah_attr.sl;
1843	resp.dest.src_path_bits     = attr->ah_attr.src_path_bits;
1844	resp.dest.static_rate       = attr->ah_attr.static_rate;
1845	resp.dest.is_global         = !!(attr->ah_attr.ah_flags & IB_AH_GRH);
1846	resp.dest.port_num          = attr->ah_attr.port_num;
1847
1848	memcpy(resp.alt_dest.dgid, attr->alt_ah_attr.grh.dgid.raw, 16);
1849	resp.alt_dest.flow_label    = attr->alt_ah_attr.grh.flow_label;
1850	resp.alt_dest.sgid_index    = attr->alt_ah_attr.grh.sgid_index;
1851	resp.alt_dest.hop_limit     = attr->alt_ah_attr.grh.hop_limit;
1852	resp.alt_dest.traffic_class = attr->alt_ah_attr.grh.traffic_class;
1853	resp.alt_dest.dlid          = attr->alt_ah_attr.dlid;
1854	resp.alt_dest.sl            = attr->alt_ah_attr.sl;
1855	resp.alt_dest.src_path_bits = attr->alt_ah_attr.src_path_bits;
1856	resp.alt_dest.static_rate   = attr->alt_ah_attr.static_rate;
1857	resp.alt_dest.is_global     = !!(attr->alt_ah_attr.ah_flags & IB_AH_GRH);
1858	resp.alt_dest.port_num      = attr->alt_ah_attr.port_num;
1859
1860	resp.max_send_wr            = init_attr->cap.max_send_wr;
1861	resp.max_recv_wr            = init_attr->cap.max_recv_wr;
1862	resp.max_send_sge           = init_attr->cap.max_send_sge;
1863	resp.max_recv_sge           = init_attr->cap.max_recv_sge;
1864	resp.max_inline_data        = init_attr->cap.max_inline_data;
1865	resp.sq_sig_all             = init_attr->sq_sig_type == IB_SIGNAL_ALL_WR;
1866
1867	if (copy_to_user((void __user *) (unsigned long) cmd.response,
1868			 &resp, sizeof resp))
1869		ret = -EFAULT;
1870
1871out:
1872	kfree(attr);
1873	kfree(init_attr);
1874
1875	return ret ? ret : in_len;
1876}
1877
1878/* Remove ignored fields set in the attribute mask */
1879static int modify_qp_mask(enum ib_qp_type qp_type, int mask)
1880{
1881	switch (qp_type) {
1882	case IB_QPT_XRC_INI:
1883		return mask & ~(IB_QP_MAX_DEST_RD_ATOMIC | IB_QP_MIN_RNR_TIMER);
1884	case IB_QPT_XRC_TGT:
1885		return mask & ~(IB_QP_MAX_QP_RD_ATOMIC | IB_QP_RETRY_CNT |
1886				IB_QP_RNR_RETRY);
1887	default:
1888		return mask;
1889	}
1890}
1891
1892ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file,
1893			    const char __user *buf, int in_len,
1894			    int out_len)
1895{
1896	struct ib_uverbs_modify_qp cmd;
1897	struct ib_udata            udata;
1898	struct ib_qp              *qp;
1899	struct ib_qp_attr         *attr;
1900	int                        ret;
1901
1902	if (copy_from_user(&cmd, buf, sizeof cmd))
1903		return -EFAULT;
1904
1905	INIT_UDATA(&udata, buf + sizeof cmd, NULL, in_len - sizeof cmd,
1906		   out_len);
1907
1908	attr = kmalloc(sizeof *attr, GFP_KERNEL);
1909	if (!attr)
1910		return -ENOMEM;
1911
1912	qp = idr_read_qp(cmd.qp_handle, file->ucontext);
1913	if (!qp) {
1914		ret = -EINVAL;
1915		goto out;
1916	}
1917
1918	attr->qp_state 		  = cmd.qp_state;
1919	attr->cur_qp_state 	  = cmd.cur_qp_state;
1920	attr->path_mtu 		  = cmd.path_mtu;
1921	attr->path_mig_state 	  = cmd.path_mig_state;
1922	attr->qkey 		  = cmd.qkey;
1923	attr->rq_psn 		  = cmd.rq_psn;
1924	attr->sq_psn 		  = cmd.sq_psn;
1925	attr->dest_qp_num 	  = cmd.dest_qp_num;
1926	attr->qp_access_flags 	  = cmd.qp_access_flags;
1927	attr->pkey_index 	  = cmd.pkey_index;
1928	attr->alt_pkey_index 	  = cmd.alt_pkey_index;
1929	attr->en_sqd_async_notify = cmd.en_sqd_async_notify;
1930	attr->max_rd_atomic 	  = cmd.max_rd_atomic;
1931	attr->max_dest_rd_atomic  = cmd.max_dest_rd_atomic;
1932	attr->min_rnr_timer 	  = cmd.min_rnr_timer;
1933	attr->port_num 		  = cmd.port_num;
1934	attr->timeout 		  = cmd.timeout;
1935	attr->retry_cnt 	  = cmd.retry_cnt;
1936	attr->rnr_retry 	  = cmd.rnr_retry;
1937	attr->alt_port_num 	  = cmd.alt_port_num;
1938	attr->alt_timeout 	  = cmd.alt_timeout;
1939
1940	memcpy(attr->ah_attr.grh.dgid.raw, cmd.dest.dgid, 16);
1941	attr->ah_attr.grh.flow_label        = cmd.dest.flow_label;
1942	attr->ah_attr.grh.sgid_index        = cmd.dest.sgid_index;
1943	attr->ah_attr.grh.hop_limit         = cmd.dest.hop_limit;
1944	attr->ah_attr.grh.traffic_class     = cmd.dest.traffic_class;
1945	attr->ah_attr.dlid 	    	    = cmd.dest.dlid;
1946	attr->ah_attr.sl   	    	    = cmd.dest.sl;
1947	attr->ah_attr.src_path_bits 	    = cmd.dest.src_path_bits;
1948	attr->ah_attr.static_rate   	    = cmd.dest.static_rate;
1949	attr->ah_attr.ah_flags 	    	    = cmd.dest.is_global ? IB_AH_GRH : 0;
1950	attr->ah_attr.port_num 	    	    = cmd.dest.port_num;
1951
1952	memcpy(attr->alt_ah_attr.grh.dgid.raw, cmd.alt_dest.dgid, 16);
1953	attr->alt_ah_attr.grh.flow_label    = cmd.alt_dest.flow_label;
1954	attr->alt_ah_attr.grh.sgid_index    = cmd.alt_dest.sgid_index;
1955	attr->alt_ah_attr.grh.hop_limit     = cmd.alt_dest.hop_limit;
1956	attr->alt_ah_attr.grh.traffic_class = cmd.alt_dest.traffic_class;
1957	attr->alt_ah_attr.dlid 	    	    = cmd.alt_dest.dlid;
1958	attr->alt_ah_attr.sl   	    	    = cmd.alt_dest.sl;
1959	attr->alt_ah_attr.src_path_bits     = cmd.alt_dest.src_path_bits;
1960	attr->alt_ah_attr.static_rate       = cmd.alt_dest.static_rate;
1961	attr->alt_ah_attr.ah_flags 	    = cmd.alt_dest.is_global ? IB_AH_GRH : 0;
1962	attr->alt_ah_attr.port_num 	    = cmd.alt_dest.port_num;
1963
1964	if (qp->real_qp == qp) {
1965		ret = ib_resolve_eth_l2_attrs(qp, attr, &cmd.attr_mask);
1966		if (ret)
1967			goto out;
1968		ret = qp->device->modify_qp(qp, attr,
1969			modify_qp_mask(qp->qp_type, cmd.attr_mask), &udata);
1970	} else {
1971		ret = ib_modify_qp(qp, attr, modify_qp_mask(qp->qp_type, cmd.attr_mask));
1972	}
1973
1974	put_qp_read(qp);
1975
1976	if (ret)
1977		goto out;
1978
1979	ret = in_len;
1980
1981out:
1982	kfree(attr);
1983
1984	return ret;
1985}
1986
1987ssize_t ib_uverbs_destroy_qp(struct ib_uverbs_file *file,
1988			     const char __user *buf, int in_len,
1989			     int out_len)
1990{
1991	struct ib_uverbs_destroy_qp      cmd;
1992	struct ib_uverbs_destroy_qp_resp resp;
1993	struct ib_uobject		*uobj;
1994	struct ib_qp               	*qp;
1995	struct ib_uqp_object        	*obj;
1996	int                        	 ret = -EINVAL;
1997
1998	if (copy_from_user(&cmd, buf, sizeof cmd))
1999		return -EFAULT;
2000
2001	memset(&resp, 0, sizeof resp);
2002
2003	uobj = idr_write_uobj(&ib_uverbs_qp_idr, cmd.qp_handle, file->ucontext);
2004	if (!uobj)
2005		return -EINVAL;
2006	qp  = uobj->object;
2007	obj = container_of(uobj, struct ib_uqp_object, uevent.uobject);
2008
2009	if (!list_empty(&obj->mcast_list)) {
2010		put_uobj_write(uobj);
2011		return -EBUSY;
2012	}
2013
2014	ret = ib_destroy_qp(qp);
2015	if (!ret)
2016		uobj->live = 0;
2017
2018	put_uobj_write(uobj);
2019
2020	if (ret)
2021		return ret;
2022
2023	if (obj->uxrcd)
2024		atomic_dec(&obj->uxrcd->refcnt);
2025
2026	idr_remove_uobj(&ib_uverbs_qp_idr, uobj);
2027
2028	mutex_lock(&file->mutex);
2029	list_del(&uobj->list);
2030	mutex_unlock(&file->mutex);
2031
2032	ib_uverbs_release_uevent(file, &obj->uevent);
2033
2034	resp.events_reported = obj->uevent.events_reported;
2035
2036	put_uobj(uobj);
2037
2038	if (copy_to_user((void __user *) (unsigned long) cmd.response,
2039			 &resp, sizeof resp))
2040		return -EFAULT;
2041
2042	return in_len;
2043}
2044
2045ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file,
2046			    const char __user *buf, int in_len,
2047			    int out_len)
2048{
2049	struct ib_uverbs_post_send      cmd;
2050	struct ib_uverbs_post_send_resp resp;
2051	struct ib_uverbs_send_wr       *user_wr;
2052	struct ib_send_wr              *wr = NULL, *last, *next, *bad_wr;
2053	struct ib_qp                   *qp;
2054	int                             i, sg_ind;
2055	int				is_ud;
2056	ssize_t                         ret = -EINVAL;
2057
2058	if (copy_from_user(&cmd, buf, sizeof cmd))
2059		return -EFAULT;
2060
2061	if (in_len < sizeof cmd + cmd.wqe_size * cmd.wr_count +
2062	    cmd.sge_count * sizeof (struct ib_uverbs_sge))
2063		return -EINVAL;
2064
2065	if (cmd.wqe_size < sizeof (struct ib_uverbs_send_wr))
2066		return -EINVAL;
2067
2068	user_wr = kmalloc(cmd.wqe_size, GFP_KERNEL);
2069	if (!user_wr)
2070		return -ENOMEM;
2071
2072	qp = idr_read_qp(cmd.qp_handle, file->ucontext);
2073	if (!qp)
2074		goto out;
2075
2076	is_ud = qp->qp_type == IB_QPT_UD;
2077	sg_ind = 0;
2078	last = NULL;
2079	for (i = 0; i < cmd.wr_count; ++i) {
2080		if (copy_from_user(user_wr,
2081				   buf + sizeof cmd + i * cmd.wqe_size,
2082				   cmd.wqe_size)) {
2083			ret = -EFAULT;
2084			goto out_put;
2085		}
2086
2087		if (user_wr->num_sge + sg_ind > cmd.sge_count) {
2088			ret = -EINVAL;
2089			goto out_put;
2090		}
2091
2092		next = kmalloc(ALIGN(sizeof *next, sizeof (struct ib_sge)) +
2093			       user_wr->num_sge * sizeof (struct ib_sge),
2094			       GFP_KERNEL);
2095		if (!next) {
2096			ret = -ENOMEM;
2097			goto out_put;
2098		}
2099
2100		if (!last)
2101			wr = next;
2102		else
2103			last->next = next;
2104		last = next;
2105
2106		next->next       = NULL;
2107		next->wr_id      = user_wr->wr_id;
2108		next->num_sge    = user_wr->num_sge;
2109		next->opcode     = user_wr->opcode;
2110		next->send_flags = user_wr->send_flags;
2111
2112		if (is_ud) {
2113			next->wr.ud.ah = idr_read_ah(user_wr->wr.ud.ah,
2114						     file->ucontext);
2115			if (!next->wr.ud.ah) {
2116				ret = -EINVAL;
2117				goto out_put;
2118			}
2119			next->wr.ud.remote_qpn  = user_wr->wr.ud.remote_qpn;
2120			next->wr.ud.remote_qkey = user_wr->wr.ud.remote_qkey;
2121			if (next->opcode == IB_WR_SEND_WITH_IMM)
2122				next->ex.imm_data =
2123					(__be32 __force) user_wr->ex.imm_data;
2124		} else {
2125			switch (next->opcode) {
2126			case IB_WR_RDMA_WRITE_WITH_IMM:
2127				next->ex.imm_data =
2128					(__be32 __force) user_wr->ex.imm_data;
2129			case IB_WR_RDMA_WRITE:
2130			case IB_WR_RDMA_READ:
2131				next->wr.rdma.remote_addr =
2132					user_wr->wr.rdma.remote_addr;
2133				next->wr.rdma.rkey        =
2134					user_wr->wr.rdma.rkey;
2135				break;
2136			case IB_WR_SEND_WITH_IMM:
2137				next->ex.imm_data =
2138					(__be32 __force) user_wr->ex.imm_data;
2139				break;
2140			case IB_WR_SEND_WITH_INV:
2141				next->ex.invalidate_rkey =
2142					user_wr->ex.invalidate_rkey;
2143				break;
2144			case IB_WR_ATOMIC_CMP_AND_SWP:
2145			case IB_WR_ATOMIC_FETCH_AND_ADD:
2146				next->wr.atomic.remote_addr =
2147					user_wr->wr.atomic.remote_addr;
2148				next->wr.atomic.compare_add =
2149					user_wr->wr.atomic.compare_add;
2150				next->wr.atomic.swap = user_wr->wr.atomic.swap;
2151				next->wr.atomic.rkey = user_wr->wr.atomic.rkey;
2152				break;
2153			default:
2154				break;
2155			}
2156		}
2157
2158		if (next->num_sge) {
2159			next->sg_list = (void *) next +
2160				ALIGN(sizeof *next, sizeof (struct ib_sge));
2161			if (copy_from_user(next->sg_list,
2162					   buf + sizeof cmd +
2163					   cmd.wr_count * cmd.wqe_size +
2164					   sg_ind * sizeof (struct ib_sge),
2165					   next->num_sge * sizeof (struct ib_sge))) {
2166				ret = -EFAULT;
2167				goto out_put;
2168			}
2169			sg_ind += next->num_sge;
2170		} else
2171			next->sg_list = NULL;
2172	}
2173
2174	resp.bad_wr = 0;
2175	ret = qp->device->post_send(qp->real_qp, wr, &bad_wr);
2176	if (ret)
2177		for (next = wr; next; next = next->next) {
2178			++resp.bad_wr;
2179			if (next == bad_wr)
2180				break;
2181		}
2182
2183	if (copy_to_user((void __user *) (unsigned long) cmd.response,
2184			 &resp, sizeof resp))
2185		ret = -EFAULT;
2186
2187out_put:
2188	put_qp_read(qp);
2189
2190	while (wr) {
2191		if (is_ud && wr->wr.ud.ah)
2192			put_ah_read(wr->wr.ud.ah);
2193		next = wr->next;
2194		kfree(wr);
2195		wr = next;
2196	}
2197
2198out:
2199	kfree(user_wr);
2200
2201	return ret ? ret : in_len;
2202}
2203
2204static struct ib_recv_wr *ib_uverbs_unmarshall_recv(const char __user *buf,
2205						    int in_len,
2206						    u32 wr_count,
2207						    u32 sge_count,
2208						    u32 wqe_size)
2209{
2210	struct ib_uverbs_recv_wr *user_wr;
2211	struct ib_recv_wr        *wr = NULL, *last, *next;
2212	int                       sg_ind;
2213	int                       i;
2214	int                       ret;
2215
2216	if (in_len < wqe_size * wr_count +
2217	    sge_count * sizeof (struct ib_uverbs_sge))
2218		return ERR_PTR(-EINVAL);
2219
2220	if (wqe_size < sizeof (struct ib_uverbs_recv_wr))
2221		return ERR_PTR(-EINVAL);
2222
2223	user_wr = kmalloc(wqe_size, GFP_KERNEL);
2224	if (!user_wr)
2225		return ERR_PTR(-ENOMEM);
2226
2227	sg_ind = 0;
2228	last = NULL;
2229	for (i = 0; i < wr_count; ++i) {
2230		if (copy_from_user(user_wr, buf + i * wqe_size,
2231				   wqe_size)) {
2232			ret = -EFAULT;
2233			goto err;
2234		}
2235
2236		if (user_wr->num_sge + sg_ind > sge_count) {
2237			ret = -EINVAL;
2238			goto err;
2239		}
2240
2241		next = kmalloc(ALIGN(sizeof *next, sizeof (struct ib_sge)) +
2242			       user_wr->num_sge * sizeof (struct ib_sge),
2243			       GFP_KERNEL);
2244		if (!next) {
2245			ret = -ENOMEM;
2246			goto err;
2247		}
2248
2249		if (!last)
2250			wr = next;
2251		else
2252			last->next = next;
2253		last = next;
2254
2255		next->next       = NULL;
2256		next->wr_id      = user_wr->wr_id;
2257		next->num_sge    = user_wr->num_sge;
2258
2259		if (next->num_sge) {
2260			next->sg_list = (void *) next +
2261				ALIGN(sizeof *next, sizeof (struct ib_sge));
2262			if (copy_from_user(next->sg_list,
2263					   buf + wr_count * wqe_size +
2264					   sg_ind * sizeof (struct ib_sge),
2265					   next->num_sge * sizeof (struct ib_sge))) {
2266				ret = -EFAULT;
2267				goto err;
2268			}
2269			sg_ind += next->num_sge;
2270		} else
2271			next->sg_list = NULL;
2272	}
2273
2274	kfree(user_wr);
2275	return wr;
2276
2277err:
2278	kfree(user_wr);
2279
2280	while (wr) {
2281		next = wr->next;
2282		kfree(wr);
2283		wr = next;
2284	}
2285
2286	return ERR_PTR(ret);
2287}
2288
2289ssize_t ib_uverbs_post_recv(struct ib_uverbs_file *file,
2290			    const char __user *buf, int in_len,
2291			    int out_len)
2292{
2293	struct ib_uverbs_post_recv      cmd;
2294	struct ib_uverbs_post_recv_resp resp;
2295	struct ib_recv_wr              *wr, *next, *bad_wr;
2296	struct ib_qp                   *qp;
2297	ssize_t                         ret = -EINVAL;
2298
2299	if (copy_from_user(&cmd, buf, sizeof cmd))
2300		return -EFAULT;
2301
2302	wr = ib_uverbs_unmarshall_recv(buf + sizeof cmd,
2303				       in_len - sizeof cmd, cmd.wr_count,
2304				       cmd.sge_count, cmd.wqe_size);
2305	if (IS_ERR(wr))
2306		return PTR_ERR(wr);
2307
2308	qp = idr_read_qp(cmd.qp_handle, file->ucontext);
2309	if (!qp)
2310		goto out;
2311
2312	resp.bad_wr = 0;
2313	ret = qp->device->post_recv(qp->real_qp, wr, &bad_wr);
2314
2315	put_qp_read(qp);
2316
2317	if (ret)
2318		for (next = wr; next; next = next->next) {
2319			++resp.bad_wr;
2320			if (next == bad_wr)
2321				break;
2322		}
2323
2324	if (copy_to_user((void __user *) (unsigned long) cmd.response,
2325			 &resp, sizeof resp))
2326		ret = -EFAULT;
2327
2328out:
2329	while (wr) {
2330		next = wr->next;
2331		kfree(wr);
2332		wr = next;
2333	}
2334
2335	return ret ? ret : in_len;
2336}
2337
2338ssize_t ib_uverbs_post_srq_recv(struct ib_uverbs_file *file,
2339				const char __user *buf, int in_len,
2340				int out_len)
2341{
2342	struct ib_uverbs_post_srq_recv      cmd;
2343	struct ib_uverbs_post_srq_recv_resp resp;
2344	struct ib_recv_wr                  *wr, *next, *bad_wr;
2345	struct ib_srq                      *srq;
2346	ssize_t                             ret = -EINVAL;
2347
2348	if (copy_from_user(&cmd, buf, sizeof cmd))
2349		return -EFAULT;
2350
2351	wr = ib_uverbs_unmarshall_recv(buf + sizeof cmd,
2352				       in_len - sizeof cmd, cmd.wr_count,
2353				       cmd.sge_count, cmd.wqe_size);
2354	if (IS_ERR(wr))
2355		return PTR_ERR(wr);
2356
2357	srq = idr_read_srq(cmd.srq_handle, file->ucontext);
2358	if (!srq)
2359		goto out;
2360
2361	resp.bad_wr = 0;
2362	ret = srq->device->post_srq_recv(srq, wr, &bad_wr);
2363
2364	put_srq_read(srq);
2365
2366	if (ret)
2367		for (next = wr; next; next = next->next) {
2368			++resp.bad_wr;
2369			if (next == bad_wr)
2370				break;
2371		}
2372
2373	if (copy_to_user((void __user *) (unsigned long) cmd.response,
2374			 &resp, sizeof resp))
2375		ret = -EFAULT;
2376
2377out:
2378	while (wr) {
2379		next = wr->next;
2380		kfree(wr);
2381		wr = next;
2382	}
2383
2384	return ret ? ret : in_len;
2385}
2386
2387ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file,
2388			    const char __user *buf, int in_len,
2389			    int out_len)
2390{
2391	struct ib_uverbs_create_ah	 cmd;
2392	struct ib_uverbs_create_ah_resp	 resp;
2393	struct ib_uobject		*uobj;
2394	struct ib_pd			*pd;
2395	struct ib_ah			*ah;
2396	struct ib_ah_attr		attr;
2397	int ret;
2398
2399	if (out_len < sizeof resp)
2400		return -ENOSPC;
2401
2402	if (copy_from_user(&cmd, buf, sizeof cmd))
2403		return -EFAULT;
2404
2405	uobj = kmalloc(sizeof *uobj, GFP_KERNEL);
2406	if (!uobj)
2407		return -ENOMEM;
2408
2409	init_uobj(uobj, cmd.user_handle, file->ucontext, &ah_lock_class);
2410	down_write(&uobj->mutex);
2411
2412	pd = idr_read_pd(cmd.pd_handle, file->ucontext);
2413	if (!pd) {
2414		ret = -EINVAL;
2415		goto err;
2416	}
2417
2418	attr.dlid 	       = cmd.attr.dlid;
2419	attr.sl 	       = cmd.attr.sl;
2420	attr.src_path_bits     = cmd.attr.src_path_bits;
2421	attr.static_rate       = cmd.attr.static_rate;
2422	attr.ah_flags          = cmd.attr.is_global ? IB_AH_GRH : 0;
2423	attr.port_num 	       = cmd.attr.port_num;
2424	attr.grh.flow_label    = cmd.attr.grh.flow_label;
2425	attr.grh.sgid_index    = cmd.attr.grh.sgid_index;
2426	attr.grh.hop_limit     = cmd.attr.grh.hop_limit;
2427	attr.grh.traffic_class = cmd.attr.grh.traffic_class;
2428	memcpy(attr.grh.dgid.raw, cmd.attr.grh.dgid, 16);
2429
2430	ah = ib_create_ah(pd, &attr);
2431	if (IS_ERR(ah)) {
2432		ret = PTR_ERR(ah);
2433		goto err_put;
2434	}
2435
2436	ah->uobject  = uobj;
2437	uobj->object = ah;
2438
2439	ret = idr_add_uobj(&ib_uverbs_ah_idr, uobj);
2440	if (ret)
2441		goto err_destroy;
2442
2443	resp.ah_handle = uobj->id;
2444
2445	if (copy_to_user((void __user *) (unsigned long) cmd.response,
2446			 &resp, sizeof resp)) {
2447		ret = -EFAULT;
2448		goto err_copy;
2449	}
2450
2451	put_pd_read(pd);
2452
2453	mutex_lock(&file->mutex);
2454	list_add_tail(&uobj->list, &file->ucontext->ah_list);
2455	mutex_unlock(&file->mutex);
2456
2457	uobj->live = 1;
2458
2459	up_write(&uobj->mutex);
2460
2461	return in_len;
2462
2463err_copy:
2464	idr_remove_uobj(&ib_uverbs_ah_idr, uobj);
2465
2466err_destroy:
2467	ib_destroy_ah(ah);
2468
2469err_put:
2470	put_pd_read(pd);
2471
2472err:
2473	put_uobj_write(uobj);
2474	return ret;
2475}
2476
2477ssize_t ib_uverbs_destroy_ah(struct ib_uverbs_file *file,
2478			     const char __user *buf, int in_len, int out_len)
2479{
2480	struct ib_uverbs_destroy_ah cmd;
2481	struct ib_ah		   *ah;
2482	struct ib_uobject	   *uobj;
2483	int			    ret;
2484
2485	if (copy_from_user(&cmd, buf, sizeof cmd))
2486		return -EFAULT;
2487
2488	uobj = idr_write_uobj(&ib_uverbs_ah_idr, cmd.ah_handle, file->ucontext);
2489	if (!uobj)
2490		return -EINVAL;
2491	ah = uobj->object;
2492
2493	ret = ib_destroy_ah(ah);
2494	if (!ret)
2495		uobj->live = 0;
2496
2497	put_uobj_write(uobj);
2498
2499	if (ret)
2500		return ret;
2501
2502	idr_remove_uobj(&ib_uverbs_ah_idr, uobj);
2503
2504	mutex_lock(&file->mutex);
2505	list_del(&uobj->list);
2506	mutex_unlock(&file->mutex);
2507
2508	put_uobj(uobj);
2509
2510	return in_len;
2511}
2512
2513ssize_t ib_uverbs_attach_mcast(struct ib_uverbs_file *file,
2514			       const char __user *buf, int in_len,
2515			       int out_len)
2516{
2517	struct ib_uverbs_attach_mcast cmd;
2518	struct ib_qp                 *qp;
2519	struct ib_uqp_object         *obj;
2520	struct ib_uverbs_mcast_entry *mcast;
2521	int                           ret;
2522
2523	if (copy_from_user(&cmd, buf, sizeof cmd))
2524		return -EFAULT;
2525
2526	qp = idr_write_qp(cmd.qp_handle, file->ucontext);
2527	if (!qp)
2528		return -EINVAL;
2529
2530	obj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject);
2531
2532	list_for_each_entry(mcast, &obj->mcast_list, list)
2533		if (cmd.mlid == mcast->lid &&
2534		    !memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) {
2535			ret = 0;
2536			goto out_put;
2537		}
2538
2539	mcast = kmalloc(sizeof *mcast, GFP_KERNEL);
2540	if (!mcast) {
2541		ret = -ENOMEM;
2542		goto out_put;
2543	}
2544
2545	mcast->lid = cmd.mlid;
2546	memcpy(mcast->gid.raw, cmd.gid, sizeof mcast->gid.raw);
2547
2548	ret = ib_attach_mcast(qp, &mcast->gid, cmd.mlid);
2549	if (!ret)
2550		list_add_tail(&mcast->list, &obj->mcast_list);
2551	else
2552		kfree(mcast);
2553
2554out_put:
2555	put_qp_write(qp);
2556
2557	return ret ? ret : in_len;
2558}
2559
2560ssize_t ib_uverbs_detach_mcast(struct ib_uverbs_file *file,
2561			       const char __user *buf, int in_len,
2562			       int out_len)
2563{
2564	struct ib_uverbs_detach_mcast cmd;
2565	struct ib_uqp_object         *obj;
2566	struct ib_qp                 *qp;
2567	struct ib_uverbs_mcast_entry *mcast;
2568	int                           ret = -EINVAL;
2569
2570	if (copy_from_user(&cmd, buf, sizeof cmd))
2571		return -EFAULT;
2572
2573	qp = idr_write_qp(cmd.qp_handle, file->ucontext);
2574	if (!qp)
2575		return -EINVAL;
2576
2577	ret = ib_detach_mcast(qp, (union ib_gid *) cmd.gid, cmd.mlid);
2578	if (ret)
2579		goto out_put;
2580
2581	obj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject);
2582
2583	list_for_each_entry(mcast, &obj->mcast_list, list)
2584		if (cmd.mlid == mcast->lid &&
2585		    !memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) {
2586			list_del(&mcast->list);
2587			kfree(mcast);
2588			break;
2589		}
2590
2591out_put:
2592	put_qp_write(qp);
2593
2594	return ret ? ret : in_len;
2595}
2596
2597static int kern_spec_to_ib_spec(struct ib_uverbs_flow_spec *kern_spec,
2598				union ib_flow_spec *ib_spec)
2599{
2600	if (kern_spec->reserved)
2601		return -EINVAL;
2602
2603	ib_spec->type = kern_spec->type;
2604
2605	switch (ib_spec->type) {
2606	case IB_FLOW_SPEC_ETH:
2607		ib_spec->eth.size = sizeof(struct ib_flow_spec_eth);
2608		if (ib_spec->eth.size != kern_spec->eth.size)
2609			return -EINVAL;
2610		memcpy(&ib_spec->eth.val, &kern_spec->eth.val,
2611		       sizeof(struct ib_flow_eth_filter));
2612		memcpy(&ib_spec->eth.mask, &kern_spec->eth.mask,
2613		       sizeof(struct ib_flow_eth_filter));
2614		break;
2615	case IB_FLOW_SPEC_IPV4:
2616		ib_spec->ipv4.size = sizeof(struct ib_flow_spec_ipv4);
2617		if (ib_spec->ipv4.size != kern_spec->ipv4.size)
2618			return -EINVAL;
2619		memcpy(&ib_spec->ipv4.val, &kern_spec->ipv4.val,
2620		       sizeof(struct ib_flow_ipv4_filter));
2621		memcpy(&ib_spec->ipv4.mask, &kern_spec->ipv4.mask,
2622		       sizeof(struct ib_flow_ipv4_filter));
2623		break;
2624	case IB_FLOW_SPEC_TCP:
2625	case IB_FLOW_SPEC_UDP:
2626		ib_spec->tcp_udp.size = sizeof(struct ib_flow_spec_tcp_udp);
2627		if (ib_spec->tcp_udp.size != kern_spec->tcp_udp.size)
2628			return -EINVAL;
2629		memcpy(&ib_spec->tcp_udp.val, &kern_spec->tcp_udp.val,
2630		       sizeof(struct ib_flow_tcp_udp_filter));
2631		memcpy(&ib_spec->tcp_udp.mask, &kern_spec->tcp_udp.mask,
2632		       sizeof(struct ib_flow_tcp_udp_filter));
2633		break;
2634	default:
2635		return -EINVAL;
2636	}
2637	return 0;
2638}
2639
2640int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file,
2641			     struct ib_udata *ucore,
2642			     struct ib_udata *uhw)
2643{
2644	struct ib_uverbs_create_flow	  cmd;
2645	struct ib_uverbs_create_flow_resp resp;
2646	struct ib_uobject		  *uobj;
2647	struct ib_flow			  *flow_id;
2648	struct ib_uverbs_flow_attr	  *kern_flow_attr;
2649	struct ib_flow_attr		  *flow_attr;
2650	struct ib_qp			  *qp;
2651	int err = 0;
2652	void *kern_spec;
2653	void *ib_spec;
2654	int i;
2655
2656	if (ucore->inlen < sizeof(cmd))
2657		return -EINVAL;
2658
2659	if (ucore->outlen < sizeof(resp))
2660		return -ENOSPC;
2661
2662	err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd));
2663	if (err)
2664		return err;
2665
2666	ucore->inbuf += sizeof(cmd);
2667	ucore->inlen -= sizeof(cmd);
2668
2669	if (cmd.comp_mask)
2670		return -EINVAL;
2671
2672	if ((cmd.flow_attr.type == IB_FLOW_ATTR_SNIFFER &&
2673	     !capable(CAP_NET_ADMIN)) || !capable(CAP_NET_RAW))
2674		return -EPERM;
2675
2676	if (cmd.flow_attr.num_of_specs > IB_FLOW_SPEC_SUPPORT_LAYERS)
2677		return -EINVAL;
2678
2679	if (cmd.flow_attr.size > ucore->inlen ||
2680	    cmd.flow_attr.size >
2681	    (cmd.flow_attr.num_of_specs * sizeof(struct ib_uverbs_flow_spec)))
2682		return -EINVAL;
2683
2684	if (cmd.flow_attr.reserved[0] ||
2685	    cmd.flow_attr.reserved[1])
2686		return -EINVAL;
2687
2688	if (cmd.flow_attr.num_of_specs) {
2689		kern_flow_attr = kmalloc(sizeof(*kern_flow_attr) + cmd.flow_attr.size,
2690					 GFP_KERNEL);
2691		if (!kern_flow_attr)
2692			return -ENOMEM;
2693
2694		memcpy(kern_flow_attr, &cmd.flow_attr, sizeof(*kern_flow_attr));
2695		err = ib_copy_from_udata(kern_flow_attr + 1, ucore,
2696					 cmd.flow_attr.size);
2697		if (err)
2698			goto err_free_attr;
2699	} else {
2700		kern_flow_attr = &cmd.flow_attr;
2701	}
2702
2703	uobj = kmalloc(sizeof(*uobj), GFP_KERNEL);
2704	if (!uobj) {
2705		err = -ENOMEM;
2706		goto err_free_attr;
2707	}
2708	init_uobj(uobj, 0, file->ucontext, &rule_lock_class);
2709	down_write(&uobj->mutex);
2710
2711	qp = idr_read_qp(cmd.qp_handle, file->ucontext);
2712	if (!qp) {
2713		err = -EINVAL;
2714		goto err_uobj;
2715	}
2716
2717	flow_attr = kmalloc(sizeof(*flow_attr) + cmd.flow_attr.size, GFP_KERNEL);
2718	if (!flow_attr) {
2719		err = -ENOMEM;
2720		goto err_put;
2721	}
2722
2723	flow_attr->type = kern_flow_attr->type;
2724	flow_attr->priority = kern_flow_attr->priority;
2725	flow_attr->num_of_specs = kern_flow_attr->num_of_specs;
2726	flow_attr->port = kern_flow_attr->port;
2727	flow_attr->flags = kern_flow_attr->flags;
2728	flow_attr->size = sizeof(*flow_attr);
2729
2730	kern_spec = kern_flow_attr + 1;
2731	ib_spec = flow_attr + 1;
2732	for (i = 0; i < flow_attr->num_of_specs &&
2733	     cmd.flow_attr.size > offsetof(struct ib_uverbs_flow_spec, reserved) &&
2734	     cmd.flow_attr.size >=
2735	     ((struct ib_uverbs_flow_spec *)kern_spec)->size; i++) {
2736		err = kern_spec_to_ib_spec(kern_spec, ib_spec);
2737		if (err)
2738			goto err_free;
2739		flow_attr->size +=
2740			((union ib_flow_spec *) ib_spec)->size;
2741		cmd.flow_attr.size -= ((struct ib_uverbs_flow_spec *)kern_spec)->size;
2742		kern_spec += ((struct ib_uverbs_flow_spec *) kern_spec)->size;
2743		ib_spec += ((union ib_flow_spec *) ib_spec)->size;
2744	}
2745	if (cmd.flow_attr.size || (i != flow_attr->num_of_specs)) {
2746		pr_warn("create flow failed, flow %d: %d bytes left from uverb cmd\n",
2747			i, cmd.flow_attr.size);
2748		err = -EINVAL;
2749		goto err_free;
2750	}
2751	flow_id = ib_create_flow(qp, flow_attr, IB_FLOW_DOMAIN_USER);
2752	if (IS_ERR(flow_id)) {
2753		err = PTR_ERR(flow_id);
2754		goto err_free;
2755	}
2756	flow_id->qp = qp;
2757	flow_id->uobject = uobj;
2758	uobj->object = flow_id;
2759
2760	err = idr_add_uobj(&ib_uverbs_rule_idr, uobj);
2761	if (err)
2762		goto destroy_flow;
2763
2764	memset(&resp, 0, sizeof(resp));
2765	resp.flow_handle = uobj->id;
2766
2767	err = ib_copy_to_udata(ucore,
2768			       &resp, sizeof(resp));
2769	if (err)
2770		goto err_copy;
2771
2772	put_qp_read(qp);
2773	mutex_lock(&file->mutex);
2774	list_add_tail(&uobj->list, &file->ucontext->rule_list);
2775	mutex_unlock(&file->mutex);
2776
2777	uobj->live = 1;
2778
2779	up_write(&uobj->mutex);
2780	kfree(flow_attr);
2781	if (cmd.flow_attr.num_of_specs)
2782		kfree(kern_flow_attr);
2783	return 0;
2784err_copy:
2785	idr_remove_uobj(&ib_uverbs_rule_idr, uobj);
2786destroy_flow:
2787	ib_destroy_flow(flow_id);
2788err_free:
2789	kfree(flow_attr);
2790err_put:
2791	put_qp_read(qp);
2792err_uobj:
2793	put_uobj_write(uobj);
2794err_free_attr:
2795	if (cmd.flow_attr.num_of_specs)
2796		kfree(kern_flow_attr);
2797	return err;
2798}
2799
2800int ib_uverbs_ex_destroy_flow(struct ib_uverbs_file *file,
2801			      struct ib_udata *ucore,
2802			      struct ib_udata *uhw)
2803{
2804	struct ib_uverbs_destroy_flow	cmd;
2805	struct ib_flow			*flow_id;
2806	struct ib_uobject		*uobj;
2807	int				ret;
2808
2809	if (ucore->inlen < sizeof(cmd))
2810		return -EINVAL;
2811
2812	ret = ib_copy_from_udata(&cmd, ucore, sizeof(cmd));
2813	if (ret)
2814		return ret;
2815
2816	if (cmd.comp_mask)
2817		return -EINVAL;
2818
2819	uobj = idr_write_uobj(&ib_uverbs_rule_idr, cmd.flow_handle,
2820			      file->ucontext);
2821	if (!uobj)
2822		return -EINVAL;
2823	flow_id = uobj->object;
2824
2825	ret = ib_destroy_flow(flow_id);
2826	if (!ret)
2827		uobj->live = 0;
2828
2829	put_uobj_write(uobj);
2830
2831	idr_remove_uobj(&ib_uverbs_rule_idr, uobj);
2832
2833	mutex_lock(&file->mutex);
2834	list_del(&uobj->list);
2835	mutex_unlock(&file->mutex);
2836
2837	put_uobj(uobj);
2838
2839	return ret;
2840}
2841
2842static int __uverbs_create_xsrq(struct ib_uverbs_file *file,
2843				struct ib_uverbs_create_xsrq *cmd,
2844				struct ib_udata *udata)
2845{
 
2846	struct ib_uverbs_create_srq_resp resp;
2847	struct ib_usrq_object           *obj;
 
2848	struct ib_pd                    *pd;
2849	struct ib_srq                   *srq;
2850	struct ib_uobject               *uninitialized_var(xrcd_uobj);
2851	struct ib_srq_init_attr          attr;
2852	int ret;
2853
 
 
 
 
 
 
 
 
 
 
2854	obj = kmalloc(sizeof *obj, GFP_KERNEL);
2855	if (!obj)
2856		return -ENOMEM;
2857
2858	init_uobj(&obj->uevent.uobject, cmd->user_handle, file->ucontext, &srq_lock_class);
2859	down_write(&obj->uevent.uobject.mutex);
2860
2861	if (cmd->srq_type == IB_SRQT_XRC) {
2862		attr.ext.xrc.xrcd  = idr_read_xrcd(cmd->xrcd_handle, file->ucontext, &xrcd_uobj);
2863		if (!attr.ext.xrc.xrcd) {
2864			ret = -EINVAL;
2865			goto err;
2866		}
2867
2868		obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, uobject);
2869		atomic_inc(&obj->uxrcd->refcnt);
2870
2871		attr.ext.xrc.cq  = idr_read_cq(cmd->cq_handle, file->ucontext, 0);
2872		if (!attr.ext.xrc.cq) {
2873			ret = -EINVAL;
2874			goto err_put_xrcd;
2875		}
2876	}
2877
2878	pd  = idr_read_pd(cmd->pd_handle, file->ucontext);
2879	if (!pd) {
2880		ret = -EINVAL;
2881		goto err_put_cq;
2882	}
2883
2884	attr.event_handler  = ib_uverbs_srq_event_handler;
2885	attr.srq_context    = file;
2886	attr.srq_type       = cmd->srq_type;
2887	attr.attr.max_wr    = cmd->max_wr;
2888	attr.attr.max_sge   = cmd->max_sge;
2889	attr.attr.srq_limit = cmd->srq_limit;
2890
2891	obj->uevent.events_reported = 0;
2892	INIT_LIST_HEAD(&obj->uevent.event_list);
2893
2894	srq = pd->device->create_srq(pd, &attr, udata);
2895	if (IS_ERR(srq)) {
2896		ret = PTR_ERR(srq);
2897		goto err_put;
2898	}
2899
2900	srq->device        = pd->device;
2901	srq->pd            = pd;
2902	srq->srq_type	   = cmd->srq_type;
2903	srq->uobject       = &obj->uevent.uobject;
2904	srq->event_handler = attr.event_handler;
2905	srq->srq_context   = attr.srq_context;
2906
2907	if (cmd->srq_type == IB_SRQT_XRC) {
2908		srq->ext.xrc.cq   = attr.ext.xrc.cq;
2909		srq->ext.xrc.xrcd = attr.ext.xrc.xrcd;
2910		atomic_inc(&attr.ext.xrc.cq->usecnt);
2911		atomic_inc(&attr.ext.xrc.xrcd->usecnt);
2912	}
2913
2914	atomic_inc(&pd->usecnt);
2915	atomic_set(&srq->usecnt, 0);
2916
2917	obj->uevent.uobject.object = srq;
2918	ret = idr_add_uobj(&ib_uverbs_srq_idr, &obj->uevent.uobject);
2919	if (ret)
2920		goto err_destroy;
2921
2922	memset(&resp, 0, sizeof resp);
2923	resp.srq_handle = obj->uevent.uobject.id;
2924	resp.max_wr     = attr.attr.max_wr;
2925	resp.max_sge    = attr.attr.max_sge;
2926	if (cmd->srq_type == IB_SRQT_XRC)
2927		resp.srqn = srq->ext.xrc.srq_num;
2928
2929	if (copy_to_user((void __user *) (unsigned long) cmd->response,
2930			 &resp, sizeof resp)) {
2931		ret = -EFAULT;
2932		goto err_copy;
2933	}
2934
2935	if (cmd->srq_type == IB_SRQT_XRC) {
2936		put_uobj_read(xrcd_uobj);
2937		put_cq_read(attr.ext.xrc.cq);
2938	}
2939	put_pd_read(pd);
2940
2941	mutex_lock(&file->mutex);
2942	list_add_tail(&obj->uevent.uobject.list, &file->ucontext->srq_list);
2943	mutex_unlock(&file->mutex);
2944
2945	obj->uevent.uobject.live = 1;
2946
2947	up_write(&obj->uevent.uobject.mutex);
2948
2949	return 0;
2950
2951err_copy:
2952	idr_remove_uobj(&ib_uverbs_srq_idr, &obj->uevent.uobject);
2953
2954err_destroy:
2955	ib_destroy_srq(srq);
2956
2957err_put:
2958	put_pd_read(pd);
2959
2960err_put_cq:
2961	if (cmd->srq_type == IB_SRQT_XRC)
2962		put_cq_read(attr.ext.xrc.cq);
2963
2964err_put_xrcd:
2965	if (cmd->srq_type == IB_SRQT_XRC) {
2966		atomic_dec(&obj->uxrcd->refcnt);
2967		put_uobj_read(xrcd_uobj);
2968	}
2969
2970err:
2971	put_uobj_write(&obj->uevent.uobject);
2972	return ret;
2973}
2974
2975ssize_t ib_uverbs_create_srq(struct ib_uverbs_file *file,
2976			     const char __user *buf, int in_len,
2977			     int out_len)
2978{
2979	struct ib_uverbs_create_srq      cmd;
2980	struct ib_uverbs_create_xsrq     xcmd;
2981	struct ib_uverbs_create_srq_resp resp;
2982	struct ib_udata                  udata;
2983	int ret;
2984
2985	if (out_len < sizeof resp)
2986		return -ENOSPC;
2987
2988	if (copy_from_user(&cmd, buf, sizeof cmd))
2989		return -EFAULT;
2990
2991	xcmd.response	 = cmd.response;
2992	xcmd.user_handle = cmd.user_handle;
2993	xcmd.srq_type	 = IB_SRQT_BASIC;
2994	xcmd.pd_handle	 = cmd.pd_handle;
2995	xcmd.max_wr	 = cmd.max_wr;
2996	xcmd.max_sge	 = cmd.max_sge;
2997	xcmd.srq_limit	 = cmd.srq_limit;
2998
2999	INIT_UDATA(&udata, buf + sizeof cmd,
3000		   (unsigned long) cmd.response + sizeof resp,
3001		   in_len - sizeof cmd, out_len - sizeof resp);
3002
3003	ret = __uverbs_create_xsrq(file, &xcmd, &udata);
3004	if (ret)
3005		return ret;
3006
3007	return in_len;
3008}
3009
3010ssize_t ib_uverbs_create_xsrq(struct ib_uverbs_file *file,
3011			      const char __user *buf, int in_len, int out_len)
3012{
3013	struct ib_uverbs_create_xsrq     cmd;
3014	struct ib_uverbs_create_srq_resp resp;
3015	struct ib_udata                  udata;
3016	int ret;
3017
3018	if (out_len < sizeof resp)
3019		return -ENOSPC;
3020
3021	if (copy_from_user(&cmd, buf, sizeof cmd))
3022		return -EFAULT;
3023
3024	INIT_UDATA(&udata, buf + sizeof cmd,
3025		   (unsigned long) cmd.response + sizeof resp,
3026		   in_len - sizeof cmd, out_len - sizeof resp);
3027
3028	ret = __uverbs_create_xsrq(file, &cmd, &udata);
3029	if (ret)
3030		return ret;
3031
3032	return in_len;
3033}
3034
3035ssize_t ib_uverbs_modify_srq(struct ib_uverbs_file *file,
3036			     const char __user *buf, int in_len,
3037			     int out_len)
3038{
3039	struct ib_uverbs_modify_srq cmd;
3040	struct ib_udata             udata;
3041	struct ib_srq              *srq;
3042	struct ib_srq_attr          attr;
3043	int                         ret;
3044
3045	if (copy_from_user(&cmd, buf, sizeof cmd))
3046		return -EFAULT;
3047
3048	INIT_UDATA(&udata, buf + sizeof cmd, NULL, in_len - sizeof cmd,
3049		   out_len);
3050
3051	srq = idr_read_srq(cmd.srq_handle, file->ucontext);
3052	if (!srq)
3053		return -EINVAL;
3054
3055	attr.max_wr    = cmd.max_wr;
3056	attr.srq_limit = cmd.srq_limit;
3057
3058	ret = srq->device->modify_srq(srq, &attr, cmd.attr_mask, &udata);
3059
3060	put_srq_read(srq);
3061
3062	return ret ? ret : in_len;
3063}
3064
3065ssize_t ib_uverbs_query_srq(struct ib_uverbs_file *file,
3066			    const char __user *buf,
3067			    int in_len, int out_len)
3068{
3069	struct ib_uverbs_query_srq      cmd;
3070	struct ib_uverbs_query_srq_resp resp;
3071	struct ib_srq_attr              attr;
3072	struct ib_srq                   *srq;
3073	int                             ret;
3074
3075	if (out_len < sizeof resp)
3076		return -ENOSPC;
3077
3078	if (copy_from_user(&cmd, buf, sizeof cmd))
3079		return -EFAULT;
3080
3081	srq = idr_read_srq(cmd.srq_handle, file->ucontext);
3082	if (!srq)
3083		return -EINVAL;
3084
3085	ret = ib_query_srq(srq, &attr);
3086
3087	put_srq_read(srq);
3088
3089	if (ret)
3090		return ret;
3091
3092	memset(&resp, 0, sizeof resp);
3093
3094	resp.max_wr    = attr.max_wr;
3095	resp.max_sge   = attr.max_sge;
3096	resp.srq_limit = attr.srq_limit;
3097
3098	if (copy_to_user((void __user *) (unsigned long) cmd.response,
3099			 &resp, sizeof resp))
3100		return -EFAULT;
3101
3102	return in_len;
3103}
3104
3105ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file,
3106			      const char __user *buf, int in_len,
3107			      int out_len)
3108{
3109	struct ib_uverbs_destroy_srq      cmd;
3110	struct ib_uverbs_destroy_srq_resp resp;
3111	struct ib_uobject		 *uobj;
3112	struct ib_srq               	 *srq;
3113	struct ib_uevent_object        	 *obj;
3114	int                         	  ret = -EINVAL;
3115	struct ib_usrq_object		 *us;
3116	enum ib_srq_type		  srq_type;
3117
3118	if (copy_from_user(&cmd, buf, sizeof cmd))
3119		return -EFAULT;
3120
3121	uobj = idr_write_uobj(&ib_uverbs_srq_idr, cmd.srq_handle, file->ucontext);
3122	if (!uobj)
3123		return -EINVAL;
3124	srq = uobj->object;
3125	obj = container_of(uobj, struct ib_uevent_object, uobject);
3126	srq_type = srq->srq_type;
3127
3128	ret = ib_destroy_srq(srq);
3129	if (!ret)
3130		uobj->live = 0;
3131
3132	put_uobj_write(uobj);
3133
3134	if (ret)
3135		return ret;
3136
3137	if (srq_type == IB_SRQT_XRC) {
3138		us = container_of(obj, struct ib_usrq_object, uevent);
3139		atomic_dec(&us->uxrcd->refcnt);
3140	}
3141
3142	idr_remove_uobj(&ib_uverbs_srq_idr, uobj);
3143
3144	mutex_lock(&file->mutex);
3145	list_del(&uobj->list);
3146	mutex_unlock(&file->mutex);
3147
3148	ib_uverbs_release_uevent(file, obj);
3149
3150	memset(&resp, 0, sizeof resp);
3151	resp.events_reported = obj->events_reported;
3152
3153	put_uobj(uobj);
3154
3155	if (copy_to_user((void __user *) (unsigned long) cmd.response,
3156			 &resp, sizeof resp))
3157		ret = -EFAULT;
3158
3159	return ret ? ret : in_len;
3160}