Linux Audio

Check our new training course

Loading...
v3.1
   1/*
   2 * Copyright (c) 2005 Topspin Communications.  All rights reserved.
   3 * Copyright (c) 2005, 2006, 2007 Cisco Systems.  All rights reserved.
   4 * Copyright (c) 2005 PathScale, Inc.  All rights reserved.
   5 * Copyright (c) 2006 Mellanox Technologies.  All rights reserved.
   6 *
   7 * This software is available to you under a choice of one of two
   8 * licenses.  You may choose to be licensed under the terms of the GNU
   9 * General Public License (GPL) Version 2, available from the file
  10 * COPYING in the main directory of this source tree, or the
  11 * OpenIB.org BSD license below:
  12 *
  13 *     Redistribution and use in source and binary forms, with or
  14 *     without modification, are permitted provided that the following
  15 *     conditions are met:
  16 *
  17 *      - Redistributions of source code must retain the above
  18 *        copyright notice, this list of conditions and the following
  19 *        disclaimer.
  20 *
  21 *      - Redistributions in binary form must reproduce the above
  22 *        copyright notice, this list of conditions and the following
  23 *        disclaimer in the documentation and/or other materials
  24 *        provided with the distribution.
  25 *
  26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  33 * SOFTWARE.
  34 */
  35
  36#include <linux/file.h>
  37#include <linux/fs.h>
  38#include <linux/slab.h>
 
  39
  40#include <asm/uaccess.h>
  41
  42#include "uverbs.h"
 
  43
  44static struct lock_class_key pd_lock_key;
  45static struct lock_class_key mr_lock_key;
  46static struct lock_class_key cq_lock_key;
  47static struct lock_class_key qp_lock_key;
  48static struct lock_class_key ah_lock_key;
  49static struct lock_class_key srq_lock_key;
  50
  51#define INIT_UDATA(udata, ibuf, obuf, ilen, olen)			\
  52	do {								\
  53		(udata)->inbuf  = (void __user *) (ibuf);		\
  54		(udata)->outbuf = (void __user *) (obuf);		\
  55		(udata)->inlen  = (ilen);				\
  56		(udata)->outlen = (olen);				\
  57	} while (0)
  58
  59/*
  60 * The ib_uobject locking scheme is as follows:
  61 *
  62 * - ib_uverbs_idr_lock protects the uverbs idrs themselves, so it
  63 *   needs to be held during all idr operations.  When an object is
  64 *   looked up, a reference must be taken on the object's kref before
  65 *   dropping this lock.
 
 
  66 *
  67 * - Each object also has an rwsem.  This rwsem must be held for
  68 *   reading while an operation that uses the object is performed.
  69 *   For example, while registering an MR, the associated PD's
  70 *   uobject.mutex must be held for reading.  The rwsem must be held
  71 *   for writing while initializing or destroying an object.
  72 *
  73 * - In addition, each object has a "live" flag.  If this flag is not
  74 *   set, then lookups of the object will fail even if it is found in
  75 *   the idr.  This handles a reader that blocks and does not acquire
  76 *   the rwsem until after the object is destroyed.  The destroy
  77 *   operation will set the live flag to 0 and then drop the rwsem;
  78 *   this will allow the reader to acquire the rwsem, see that the
  79 *   live flag is 0, and then drop the rwsem and its reference to
  80 *   object.  The underlying storage will not be freed until the last
  81 *   reference to the object is dropped.
  82 */
  83
  84static void init_uobj(struct ib_uobject *uobj, u64 user_handle,
  85		      struct ib_ucontext *context, struct lock_class_key *key)
  86{
  87	uobj->user_handle = user_handle;
  88	uobj->context     = context;
  89	kref_init(&uobj->ref);
  90	init_rwsem(&uobj->mutex);
  91	lockdep_set_class(&uobj->mutex, key);
  92	uobj->live        = 0;
  93}
  94
  95static void release_uobj(struct kref *kref)
  96{
  97	kfree(container_of(kref, struct ib_uobject, ref));
  98}
  99
 100static void put_uobj(struct ib_uobject *uobj)
 101{
 102	kref_put(&uobj->ref, release_uobj);
 103}
 104
 105static void put_uobj_read(struct ib_uobject *uobj)
 106{
 107	up_read(&uobj->mutex);
 108	put_uobj(uobj);
 109}
 110
 111static void put_uobj_write(struct ib_uobject *uobj)
 112{
 113	up_write(&uobj->mutex);
 114	put_uobj(uobj);
 115}
 116
 117static int idr_add_uobj(struct idr *idr, struct ib_uobject *uobj)
 118{
 119	int ret;
 120
 121retry:
 122	if (!idr_pre_get(idr, GFP_KERNEL))
 123		return -ENOMEM;
 124
 125	spin_lock(&ib_uverbs_idr_lock);
 126	ret = idr_get_new(idr, uobj, &uobj->id);
 127	spin_unlock(&ib_uverbs_idr_lock);
 128
 129	if (ret == -EAGAIN)
 130		goto retry;
 
 131
 132	return ret;
 
 
 
 133}
 134
 135void idr_remove_uobj(struct idr *idr, struct ib_uobject *uobj)
 136{
 137	spin_lock(&ib_uverbs_idr_lock);
 138	idr_remove(idr, uobj->id);
 139	spin_unlock(&ib_uverbs_idr_lock);
 140}
 141
 142static struct ib_uobject *__idr_get_uobj(struct idr *idr, int id,
 143					 struct ib_ucontext *context)
 144{
 145	struct ib_uobject *uobj;
 146
 147	spin_lock(&ib_uverbs_idr_lock);
 148	uobj = idr_find(idr, id);
 149	if (uobj) {
 150		if (uobj->context == context)
 151			kref_get(&uobj->ref);
 152		else
 153			uobj = NULL;
 154	}
 155	spin_unlock(&ib_uverbs_idr_lock);
 156
 157	return uobj;
 158}
 159
 160static struct ib_uobject *idr_read_uobj(struct idr *idr, int id,
 161					struct ib_ucontext *context, int nested)
 162{
 163	struct ib_uobject *uobj;
 164
 165	uobj = __idr_get_uobj(idr, id, context);
 166	if (!uobj)
 167		return NULL;
 168
 169	if (nested)
 170		down_read_nested(&uobj->mutex, SINGLE_DEPTH_NESTING);
 171	else
 172		down_read(&uobj->mutex);
 173	if (!uobj->live) {
 174		put_uobj_read(uobj);
 175		return NULL;
 176	}
 177
 178	return uobj;
 179}
 180
 181static struct ib_uobject *idr_write_uobj(struct idr *idr, int id,
 182					 struct ib_ucontext *context)
 183{
 184	struct ib_uobject *uobj;
 185
 186	uobj = __idr_get_uobj(idr, id, context);
 187	if (!uobj)
 188		return NULL;
 189
 190	down_write(&uobj->mutex);
 191	if (!uobj->live) {
 192		put_uobj_write(uobj);
 193		return NULL;
 194	}
 195
 196	return uobj;
 197}
 198
 199static void *idr_read_obj(struct idr *idr, int id, struct ib_ucontext *context,
 200			  int nested)
 201{
 202	struct ib_uobject *uobj;
 203
 204	uobj = idr_read_uobj(idr, id, context, nested);
 205	return uobj ? uobj->object : NULL;
 206}
 207
 208static struct ib_pd *idr_read_pd(int pd_handle, struct ib_ucontext *context)
 209{
 210	return idr_read_obj(&ib_uverbs_pd_idr, pd_handle, context, 0);
 211}
 212
 213static void put_pd_read(struct ib_pd *pd)
 214{
 215	put_uobj_read(pd->uobject);
 216}
 217
 218static struct ib_cq *idr_read_cq(int cq_handle, struct ib_ucontext *context, int nested)
 219{
 220	return idr_read_obj(&ib_uverbs_cq_idr, cq_handle, context, nested);
 221}
 222
 223static void put_cq_read(struct ib_cq *cq)
 224{
 225	put_uobj_read(cq->uobject);
 226}
 227
 228static struct ib_ah *idr_read_ah(int ah_handle, struct ib_ucontext *context)
 229{
 230	return idr_read_obj(&ib_uverbs_ah_idr, ah_handle, context, 0);
 231}
 232
 233static void put_ah_read(struct ib_ah *ah)
 234{
 235	put_uobj_read(ah->uobject);
 236}
 237
 238static struct ib_qp *idr_read_qp(int qp_handle, struct ib_ucontext *context)
 239{
 240	return idr_read_obj(&ib_uverbs_qp_idr, qp_handle, context, 0);
 241}
 242
 
 
 
 
 
 
 
 
 243static void put_qp_read(struct ib_qp *qp)
 244{
 245	put_uobj_read(qp->uobject);
 246}
 247
 
 
 
 
 
 248static struct ib_srq *idr_read_srq(int srq_handle, struct ib_ucontext *context)
 249{
 250	return idr_read_obj(&ib_uverbs_srq_idr, srq_handle, context, 0);
 251}
 252
 253static void put_srq_read(struct ib_srq *srq)
 254{
 255	put_uobj_read(srq->uobject);
 256}
 257
 
 
 
 
 
 
 
 
 
 
 
 
 258ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file,
 
 259			      const char __user *buf,
 260			      int in_len, int out_len)
 261{
 262	struct ib_uverbs_get_context      cmd;
 263	struct ib_uverbs_get_context_resp resp;
 264	struct ib_udata                   udata;
 265	struct ib_device                 *ibdev = file->device->ib_dev;
 266	struct ib_ucontext		 *ucontext;
 267	struct file			 *filp;
 268	int ret;
 269
 270	if (out_len < sizeof resp)
 271		return -ENOSPC;
 272
 273	if (copy_from_user(&cmd, buf, sizeof cmd))
 274		return -EFAULT;
 275
 276	mutex_lock(&file->mutex);
 277
 278	if (file->ucontext) {
 279		ret = -EINVAL;
 280		goto err;
 281	}
 282
 283	INIT_UDATA(&udata, buf + sizeof cmd,
 284		   (unsigned long) cmd.response + sizeof resp,
 285		   in_len - sizeof cmd, out_len - sizeof resp);
 286
 287	ucontext = ibdev->alloc_ucontext(ibdev, &udata);
 288	if (IS_ERR(ucontext)) {
 289		ret = PTR_ERR(ucontext);
 290		goto err;
 291	}
 292
 293	ucontext->device = ibdev;
 294	INIT_LIST_HEAD(&ucontext->pd_list);
 295	INIT_LIST_HEAD(&ucontext->mr_list);
 296	INIT_LIST_HEAD(&ucontext->mw_list);
 297	INIT_LIST_HEAD(&ucontext->cq_list);
 298	INIT_LIST_HEAD(&ucontext->qp_list);
 299	INIT_LIST_HEAD(&ucontext->srq_list);
 300	INIT_LIST_HEAD(&ucontext->ah_list);
 
 
 
 
 
 301	ucontext->closing = 0;
 302
 
 
 
 
 
 
 
 
 
 
 
 303	resp.num_comp_vectors = file->device->num_comp_vectors;
 304
 305	ret = get_unused_fd();
 306	if (ret < 0)
 307		goto err_free;
 308	resp.async_fd = ret;
 309
 310	filp = ib_uverbs_alloc_event_file(file, 1);
 311	if (IS_ERR(filp)) {
 312		ret = PTR_ERR(filp);
 313		goto err_fd;
 314	}
 315
 316	if (copy_to_user((void __user *) (unsigned long) cmd.response,
 317			 &resp, sizeof resp)) {
 318		ret = -EFAULT;
 319		goto err_file;
 320	}
 321
 322	file->async_file = filp->private_data;
 323
 324	INIT_IB_EVENT_HANDLER(&file->event_handler, file->device->ib_dev,
 325			      ib_uverbs_event_handler);
 326	ret = ib_register_event_handler(&file->event_handler);
 327	if (ret)
 328		goto err_file;
 329
 330	kref_get(&file->async_file->ref);
 331	kref_get(&file->ref);
 332	file->ucontext = ucontext;
 333
 334	fd_install(resp.async_fd, filp);
 335
 336	mutex_unlock(&file->mutex);
 337
 338	return in_len;
 339
 340err_file:
 
 341	fput(filp);
 342
 343err_fd:
 344	put_unused_fd(resp.async_fd);
 345
 346err_free:
 347	ibdev->dealloc_ucontext(ucontext);
 
 348
 349err:
 350	mutex_unlock(&file->mutex);
 351	return ret;
 352}
 353
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 354ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file,
 
 355			       const char __user *buf,
 356			       int in_len, int out_len)
 357{
 358	struct ib_uverbs_query_device      cmd;
 359	struct ib_uverbs_query_device_resp resp;
 360	struct ib_device_attr              attr;
 361	int                                ret;
 362
 363	if (out_len < sizeof resp)
 364		return -ENOSPC;
 365
 366	if (copy_from_user(&cmd, buf, sizeof cmd))
 367		return -EFAULT;
 368
 369	ret = ib_query_device(file->device->ib_dev, &attr);
 370	if (ret)
 371		return ret;
 372
 373	memset(&resp, 0, sizeof resp);
 374
 375	resp.fw_ver 		       = attr.fw_ver;
 376	resp.node_guid 		       = file->device->ib_dev->node_guid;
 377	resp.sys_image_guid 	       = attr.sys_image_guid;
 378	resp.max_mr_size 	       = attr.max_mr_size;
 379	resp.page_size_cap 	       = attr.page_size_cap;
 380	resp.vendor_id 		       = attr.vendor_id;
 381	resp.vendor_part_id 	       = attr.vendor_part_id;
 382	resp.hw_ver 		       = attr.hw_ver;
 383	resp.max_qp 		       = attr.max_qp;
 384	resp.max_qp_wr 		       = attr.max_qp_wr;
 385	resp.device_cap_flags 	       = attr.device_cap_flags;
 386	resp.max_sge 		       = attr.max_sge;
 387	resp.max_sge_rd 	       = attr.max_sge_rd;
 388	resp.max_cq 		       = attr.max_cq;
 389	resp.max_cqe 		       = attr.max_cqe;
 390	resp.max_mr 		       = attr.max_mr;
 391	resp.max_pd 		       = attr.max_pd;
 392	resp.max_qp_rd_atom 	       = attr.max_qp_rd_atom;
 393	resp.max_ee_rd_atom 	       = attr.max_ee_rd_atom;
 394	resp.max_res_rd_atom 	       = attr.max_res_rd_atom;
 395	resp.max_qp_init_rd_atom       = attr.max_qp_init_rd_atom;
 396	resp.max_ee_init_rd_atom       = attr.max_ee_init_rd_atom;
 397	resp.atomic_cap 	       = attr.atomic_cap;
 398	resp.max_ee 		       = attr.max_ee;
 399	resp.max_rdd 		       = attr.max_rdd;
 400	resp.max_mw 		       = attr.max_mw;
 401	resp.max_raw_ipv6_qp 	       = attr.max_raw_ipv6_qp;
 402	resp.max_raw_ethy_qp 	       = attr.max_raw_ethy_qp;
 403	resp.max_mcast_grp 	       = attr.max_mcast_grp;
 404	resp.max_mcast_qp_attach       = attr.max_mcast_qp_attach;
 405	resp.max_total_mcast_qp_attach = attr.max_total_mcast_qp_attach;
 406	resp.max_ah 		       = attr.max_ah;
 407	resp.max_fmr 		       = attr.max_fmr;
 408	resp.max_map_per_fmr 	       = attr.max_map_per_fmr;
 409	resp.max_srq 		       = attr.max_srq;
 410	resp.max_srq_wr 	       = attr.max_srq_wr;
 411	resp.max_srq_sge 	       = attr.max_srq_sge;
 412	resp.max_pkeys 		       = attr.max_pkeys;
 413	resp.local_ca_ack_delay        = attr.local_ca_ack_delay;
 414	resp.phys_port_cnt	       = file->device->ib_dev->phys_port_cnt;
 415
 416	if (copy_to_user((void __user *) (unsigned long) cmd.response,
 417			 &resp, sizeof resp))
 418		return -EFAULT;
 419
 420	return in_len;
 421}
 422
 423ssize_t ib_uverbs_query_port(struct ib_uverbs_file *file,
 
 424			     const char __user *buf,
 425			     int in_len, int out_len)
 426{
 427	struct ib_uverbs_query_port      cmd;
 428	struct ib_uverbs_query_port_resp resp;
 429	struct ib_port_attr              attr;
 430	int                              ret;
 431
 432	if (out_len < sizeof resp)
 433		return -ENOSPC;
 434
 435	if (copy_from_user(&cmd, buf, sizeof cmd))
 436		return -EFAULT;
 437
 438	ret = ib_query_port(file->device->ib_dev, cmd.port_num, &attr);
 439	if (ret)
 440		return ret;
 441
 442	memset(&resp, 0, sizeof resp);
 443
 444	resp.state 	     = attr.state;
 445	resp.max_mtu 	     = attr.max_mtu;
 446	resp.active_mtu      = attr.active_mtu;
 447	resp.gid_tbl_len     = attr.gid_tbl_len;
 448	resp.port_cap_flags  = attr.port_cap_flags;
 449	resp.max_msg_sz      = attr.max_msg_sz;
 450	resp.bad_pkey_cntr   = attr.bad_pkey_cntr;
 451	resp.qkey_viol_cntr  = attr.qkey_viol_cntr;
 452	resp.pkey_tbl_len    = attr.pkey_tbl_len;
 453	resp.lid 	     = attr.lid;
 454	resp.sm_lid 	     = attr.sm_lid;
 455	resp.lmc 	     = attr.lmc;
 456	resp.max_vl_num      = attr.max_vl_num;
 457	resp.sm_sl 	     = attr.sm_sl;
 458	resp.subnet_timeout  = attr.subnet_timeout;
 459	resp.init_type_reply = attr.init_type_reply;
 460	resp.active_width    = attr.active_width;
 461	resp.active_speed    = attr.active_speed;
 462	resp.phys_state      = attr.phys_state;
 463	resp.link_layer      = rdma_port_get_link_layer(file->device->ib_dev,
 464							cmd.port_num);
 465
 466	if (copy_to_user((void __user *) (unsigned long) cmd.response,
 467			 &resp, sizeof resp))
 468		return -EFAULT;
 469
 470	return in_len;
 471}
 472
 473ssize_t ib_uverbs_alloc_pd(struct ib_uverbs_file *file,
 
 474			   const char __user *buf,
 475			   int in_len, int out_len)
 476{
 477	struct ib_uverbs_alloc_pd      cmd;
 478	struct ib_uverbs_alloc_pd_resp resp;
 479	struct ib_udata                udata;
 480	struct ib_uobject             *uobj;
 481	struct ib_pd                  *pd;
 482	int                            ret;
 483
 484	if (out_len < sizeof resp)
 485		return -ENOSPC;
 486
 487	if (copy_from_user(&cmd, buf, sizeof cmd))
 488		return -EFAULT;
 489
 490	INIT_UDATA(&udata, buf + sizeof cmd,
 491		   (unsigned long) cmd.response + sizeof resp,
 492		   in_len - sizeof cmd, out_len - sizeof resp);
 493
 494	uobj = kmalloc(sizeof *uobj, GFP_KERNEL);
 495	if (!uobj)
 496		return -ENOMEM;
 497
 498	init_uobj(uobj, 0, file->ucontext, &pd_lock_key);
 499	down_write(&uobj->mutex);
 500
 501	pd = file->device->ib_dev->alloc_pd(file->device->ib_dev,
 502					    file->ucontext, &udata);
 503	if (IS_ERR(pd)) {
 504		ret = PTR_ERR(pd);
 505		goto err;
 506	}
 507
 508	pd->device  = file->device->ib_dev;
 509	pd->uobject = uobj;
 
 510	atomic_set(&pd->usecnt, 0);
 511
 512	uobj->object = pd;
 513	ret = idr_add_uobj(&ib_uverbs_pd_idr, uobj);
 514	if (ret)
 515		goto err_idr;
 516
 517	memset(&resp, 0, sizeof resp);
 518	resp.pd_handle = uobj->id;
 519
 520	if (copy_to_user((void __user *) (unsigned long) cmd.response,
 521			 &resp, sizeof resp)) {
 522		ret = -EFAULT;
 523		goto err_copy;
 524	}
 525
 526	mutex_lock(&file->mutex);
 527	list_add_tail(&uobj->list, &file->ucontext->pd_list);
 528	mutex_unlock(&file->mutex);
 529
 530	uobj->live = 1;
 531
 532	up_write(&uobj->mutex);
 533
 534	return in_len;
 535
 536err_copy:
 537	idr_remove_uobj(&ib_uverbs_pd_idr, uobj);
 538
 539err_idr:
 540	ib_dealloc_pd(pd);
 541
 542err:
 543	put_uobj_write(uobj);
 544	return ret;
 545}
 546
 547ssize_t ib_uverbs_dealloc_pd(struct ib_uverbs_file *file,
 
 548			     const char __user *buf,
 549			     int in_len, int out_len)
 550{
 551	struct ib_uverbs_dealloc_pd cmd;
 552	struct ib_uobject          *uobj;
 
 553	int                         ret;
 554
 555	if (copy_from_user(&cmd, buf, sizeof cmd))
 556		return -EFAULT;
 557
 558	uobj = idr_write_uobj(&ib_uverbs_pd_idr, cmd.pd_handle, file->ucontext);
 559	if (!uobj)
 560		return -EINVAL;
 
 561
 562	ret = ib_dealloc_pd(uobj->object);
 563	if (!ret)
 564		uobj->live = 0;
 565
 566	put_uobj_write(uobj);
 567
 
 
 568	if (ret)
 569		return ret;
 
 
 
 570
 571	idr_remove_uobj(&ib_uverbs_pd_idr, uobj);
 572
 573	mutex_lock(&file->mutex);
 574	list_del(&uobj->list);
 575	mutex_unlock(&file->mutex);
 576
 577	put_uobj(uobj);
 578
 579	return in_len;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 580}
 581
 582ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file,
 
 583			 const char __user *buf, int in_len,
 584			 int out_len)
 585{
 586	struct ib_uverbs_reg_mr      cmd;
 587	struct ib_uverbs_reg_mr_resp resp;
 588	struct ib_udata              udata;
 589	struct ib_uobject           *uobj;
 590	struct ib_pd                *pd;
 591	struct ib_mr                *mr;
 592	int                          ret;
 593
 594	if (out_len < sizeof resp)
 595		return -ENOSPC;
 596
 597	if (copy_from_user(&cmd, buf, sizeof cmd))
 598		return -EFAULT;
 599
 600	INIT_UDATA(&udata, buf + sizeof cmd,
 601		   (unsigned long) cmd.response + sizeof resp,
 602		   in_len - sizeof cmd, out_len - sizeof resp);
 603
 604	if ((cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK))
 605		return -EINVAL;
 606
 607	/*
 608	 * Local write permission is required if remote write or
 609	 * remote atomic permission is also requested.
 610	 */
 611	if (cmd.access_flags & (IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_REMOTE_WRITE) &&
 612	    !(cmd.access_flags & IB_ACCESS_LOCAL_WRITE))
 613		return -EINVAL;
 614
 615	uobj = kmalloc(sizeof *uobj, GFP_KERNEL);
 616	if (!uobj)
 617		return -ENOMEM;
 618
 619	init_uobj(uobj, 0, file->ucontext, &mr_lock_key);
 620	down_write(&uobj->mutex);
 621
 622	pd = idr_read_pd(cmd.pd_handle, file->ucontext);
 623	if (!pd) {
 624		ret = -EINVAL;
 625		goto err_free;
 626	}
 627
 
 
 
 
 
 
 
 
 
 628	mr = pd->device->reg_user_mr(pd, cmd.start, cmd.length, cmd.hca_va,
 629				     cmd.access_flags, &udata);
 630	if (IS_ERR(mr)) {
 631		ret = PTR_ERR(mr);
 632		goto err_put;
 633	}
 634
 635	mr->device  = pd->device;
 636	mr->pd      = pd;
 637	mr->uobject = uobj;
 638	atomic_inc(&pd->usecnt);
 639	atomic_set(&mr->usecnt, 0);
 640
 641	uobj->object = mr;
 642	ret = idr_add_uobj(&ib_uverbs_mr_idr, uobj);
 643	if (ret)
 644		goto err_unreg;
 645
 646	memset(&resp, 0, sizeof resp);
 647	resp.lkey      = mr->lkey;
 648	resp.rkey      = mr->rkey;
 649	resp.mr_handle = uobj->id;
 650
 651	if (copy_to_user((void __user *) (unsigned long) cmd.response,
 652			 &resp, sizeof resp)) {
 653		ret = -EFAULT;
 654		goto err_copy;
 655	}
 656
 657	put_pd_read(pd);
 658
 659	mutex_lock(&file->mutex);
 660	list_add_tail(&uobj->list, &file->ucontext->mr_list);
 661	mutex_unlock(&file->mutex);
 662
 663	uobj->live = 1;
 664
 665	up_write(&uobj->mutex);
 666
 667	return in_len;
 668
 669err_copy:
 670	idr_remove_uobj(&ib_uverbs_mr_idr, uobj);
 671
 672err_unreg:
 673	ib_dereg_mr(mr);
 674
 675err_put:
 676	put_pd_read(pd);
 677
 678err_free:
 679	put_uobj_write(uobj);
 680	return ret;
 681}
 682
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 683ssize_t ib_uverbs_dereg_mr(struct ib_uverbs_file *file,
 
 684			   const char __user *buf, int in_len,
 685			   int out_len)
 686{
 687	struct ib_uverbs_dereg_mr cmd;
 688	struct ib_mr             *mr;
 689	struct ib_uobject	 *uobj;
 690	int                       ret = -EINVAL;
 691
 692	if (copy_from_user(&cmd, buf, sizeof cmd))
 693		return -EFAULT;
 694
 695	uobj = idr_write_uobj(&ib_uverbs_mr_idr, cmd.mr_handle, file->ucontext);
 696	if (!uobj)
 697		return -EINVAL;
 698
 699	mr = uobj->object;
 700
 701	ret = ib_dereg_mr(mr);
 702	if (!ret)
 703		uobj->live = 0;
 704
 705	put_uobj_write(uobj);
 706
 707	if (ret)
 708		return ret;
 709
 710	idr_remove_uobj(&ib_uverbs_mr_idr, uobj);
 711
 712	mutex_lock(&file->mutex);
 713	list_del(&uobj->list);
 714	mutex_unlock(&file->mutex);
 715
 716	put_uobj(uobj);
 717
 718	return in_len;
 719}
 720
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 721ssize_t ib_uverbs_create_comp_channel(struct ib_uverbs_file *file,
 
 722				      const char __user *buf, int in_len,
 723				      int out_len)
 724{
 725	struct ib_uverbs_create_comp_channel	   cmd;
 726	struct ib_uverbs_create_comp_channel_resp  resp;
 727	struct file				  *filp;
 728	int ret;
 729
 730	if (out_len < sizeof resp)
 731		return -ENOSPC;
 732
 733	if (copy_from_user(&cmd, buf, sizeof cmd))
 734		return -EFAULT;
 735
 736	ret = get_unused_fd();
 737	if (ret < 0)
 738		return ret;
 739	resp.fd = ret;
 740
 741	filp = ib_uverbs_alloc_event_file(file, 0);
 742	if (IS_ERR(filp)) {
 743		put_unused_fd(resp.fd);
 744		return PTR_ERR(filp);
 745	}
 746
 747	if (copy_to_user((void __user *) (unsigned long) cmd.response,
 748			 &resp, sizeof resp)) {
 749		put_unused_fd(resp.fd);
 750		fput(filp);
 751		return -EFAULT;
 752	}
 753
 754	fd_install(resp.fd, filp);
 755	return in_len;
 756}
 757
 758ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file,
 759			    const char __user *buf, int in_len,
 760			    int out_len)
 
 
 
 
 
 
 
 
 
 761{
 762	struct ib_uverbs_create_cq      cmd;
 763	struct ib_uverbs_create_cq_resp resp;
 764	struct ib_udata                 udata;
 765	struct ib_ucq_object           *obj;
 766	struct ib_uverbs_event_file    *ev_file = NULL;
 767	struct ib_cq                   *cq;
 768	int                             ret;
 
 
 769
 770	if (out_len < sizeof resp)
 771		return -ENOSPC;
 772
 773	if (copy_from_user(&cmd, buf, sizeof cmd))
 774		return -EFAULT;
 775
 776	INIT_UDATA(&udata, buf + sizeof cmd,
 777		   (unsigned long) cmd.response + sizeof resp,
 778		   in_len - sizeof cmd, out_len - sizeof resp);
 779
 780	if (cmd.comp_vector >= file->device->num_comp_vectors)
 781		return -EINVAL;
 782
 783	obj = kmalloc(sizeof *obj, GFP_KERNEL);
 784	if (!obj)
 785		return -ENOMEM;
 786
 787	init_uobj(&obj->uobject, cmd.user_handle, file->ucontext, &cq_lock_key);
 788	down_write(&obj->uobject.mutex);
 789
 790	if (cmd.comp_channel >= 0) {
 791		ev_file = ib_uverbs_lookup_comp_file(cmd.comp_channel);
 792		if (!ev_file) {
 793			ret = -EINVAL;
 794			goto err;
 795		}
 796	}
 797
 798	obj->uverbs_file	   = file;
 799	obj->comp_events_reported  = 0;
 800	obj->async_events_reported = 0;
 801	INIT_LIST_HEAD(&obj->comp_list);
 802	INIT_LIST_HEAD(&obj->async_list);
 803
 804	cq = file->device->ib_dev->create_cq(file->device->ib_dev, cmd.cqe,
 805					     cmd.comp_vector,
 806					     file->ucontext, &udata);
 
 
 
 
 
 807	if (IS_ERR(cq)) {
 808		ret = PTR_ERR(cq);
 809		goto err_file;
 810	}
 811
 812	cq->device        = file->device->ib_dev;
 813	cq->uobject       = &obj->uobject;
 814	cq->comp_handler  = ib_uverbs_comp_handler;
 815	cq->event_handler = ib_uverbs_cq_event_handler;
 816	cq->cq_context    = ev_file;
 817	atomic_set(&cq->usecnt, 0);
 818
 819	obj->uobject.object = cq;
 820	ret = idr_add_uobj(&ib_uverbs_cq_idr, &obj->uobject);
 821	if (ret)
 822		goto err_free;
 823
 824	memset(&resp, 0, sizeof resp);
 825	resp.cq_handle = obj->uobject.id;
 826	resp.cqe       = cq->cqe;
 827
 828	if (copy_to_user((void __user *) (unsigned long) cmd.response,
 829			 &resp, sizeof resp)) {
 830		ret = -EFAULT;
 831		goto err_copy;
 832	}
 
 833
 834	mutex_lock(&file->mutex);
 835	list_add_tail(&obj->uobject.list, &file->ucontext->cq_list);
 836	mutex_unlock(&file->mutex);
 837
 838	obj->uobject.live = 1;
 839
 840	up_write(&obj->uobject.mutex);
 841
 842	return in_len;
 843
 844err_copy:
 845	idr_remove_uobj(&ib_uverbs_cq_idr, &obj->uobject);
 846
 847err_free:
 848	ib_destroy_cq(cq);
 849
 850err_file:
 851	if (ev_file)
 852		ib_uverbs_release_ucq(file, ev_file, obj);
 853
 854err:
 855	put_uobj_write(&obj->uobject);
 856	return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 857}
 858
 859ssize_t ib_uverbs_resize_cq(struct ib_uverbs_file *file,
 
 860			    const char __user *buf, int in_len,
 861			    int out_len)
 862{
 863	struct ib_uverbs_resize_cq	cmd;
 864	struct ib_uverbs_resize_cq_resp	resp;
 865	struct ib_udata                 udata;
 866	struct ib_cq			*cq;
 867	int				ret = -EINVAL;
 868
 869	if (copy_from_user(&cmd, buf, sizeof cmd))
 870		return -EFAULT;
 871
 872	INIT_UDATA(&udata, buf + sizeof cmd,
 873		   (unsigned long) cmd.response + sizeof resp,
 874		   in_len - sizeof cmd, out_len - sizeof resp);
 875
 876	cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0);
 877	if (!cq)
 878		return -EINVAL;
 879
 880	ret = cq->device->resize_cq(cq, cmd.cqe, &udata);
 881	if (ret)
 882		goto out;
 883
 884	resp.cqe = cq->cqe;
 885
 886	if (copy_to_user((void __user *) (unsigned long) cmd.response,
 887			 &resp, sizeof resp.cqe))
 888		ret = -EFAULT;
 889
 890out:
 891	put_cq_read(cq);
 892
 893	return ret ? ret : in_len;
 894}
 895
 896static int copy_wc_to_user(void __user *dest, struct ib_wc *wc)
 897{
 898	struct ib_uverbs_wc tmp;
 899
 900	tmp.wr_id		= wc->wr_id;
 901	tmp.status		= wc->status;
 902	tmp.opcode		= wc->opcode;
 903	tmp.vendor_err		= wc->vendor_err;
 904	tmp.byte_len		= wc->byte_len;
 905	tmp.ex.imm_data		= (__u32 __force) wc->ex.imm_data;
 906	tmp.qp_num		= wc->qp->qp_num;
 907	tmp.src_qp		= wc->src_qp;
 908	tmp.wc_flags		= wc->wc_flags;
 909	tmp.pkey_index		= wc->pkey_index;
 910	tmp.slid		= wc->slid;
 911	tmp.sl			= wc->sl;
 912	tmp.dlid_path_bits	= wc->dlid_path_bits;
 913	tmp.port_num		= wc->port_num;
 914	tmp.reserved		= 0;
 915
 916	if (copy_to_user(dest, &tmp, sizeof tmp))
 917		return -EFAULT;
 918
 919	return 0;
 920}
 921
 922ssize_t ib_uverbs_poll_cq(struct ib_uverbs_file *file,
 
 923			  const char __user *buf, int in_len,
 924			  int out_len)
 925{
 926	struct ib_uverbs_poll_cq       cmd;
 927	struct ib_uverbs_poll_cq_resp  resp;
 928	u8 __user                     *header_ptr;
 929	u8 __user                     *data_ptr;
 930	struct ib_cq                  *cq;
 931	struct ib_wc                   wc;
 932	int                            ret;
 933
 934	if (copy_from_user(&cmd, buf, sizeof cmd))
 935		return -EFAULT;
 936
 937	cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0);
 938	if (!cq)
 939		return -EINVAL;
 940
 941	/* we copy a struct ib_uverbs_poll_cq_resp to user space */
 942	header_ptr = (void __user *)(unsigned long) cmd.response;
 943	data_ptr = header_ptr + sizeof resp;
 944
 945	memset(&resp, 0, sizeof resp);
 946	while (resp.count < cmd.ne) {
 947		ret = ib_poll_cq(cq, 1, &wc);
 948		if (ret < 0)
 949			goto out_put;
 950		if (!ret)
 951			break;
 952
 953		ret = copy_wc_to_user(data_ptr, &wc);
 954		if (ret)
 955			goto out_put;
 956
 957		data_ptr += sizeof(struct ib_uverbs_wc);
 958		++resp.count;
 959	}
 960
 961	if (copy_to_user(header_ptr, &resp, sizeof resp)) {
 962		ret = -EFAULT;
 963		goto out_put;
 964	}
 965
 966	ret = in_len;
 967
 968out_put:
 969	put_cq_read(cq);
 970	return ret;
 971}
 972
 973ssize_t ib_uverbs_req_notify_cq(struct ib_uverbs_file *file,
 
 974				const char __user *buf, int in_len,
 975				int out_len)
 976{
 977	struct ib_uverbs_req_notify_cq cmd;
 978	struct ib_cq                  *cq;
 979
 980	if (copy_from_user(&cmd, buf, sizeof cmd))
 981		return -EFAULT;
 982
 983	cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0);
 984	if (!cq)
 985		return -EINVAL;
 986
 987	ib_req_notify_cq(cq, cmd.solicited_only ?
 988			 IB_CQ_SOLICITED : IB_CQ_NEXT_COMP);
 989
 990	put_cq_read(cq);
 991
 992	return in_len;
 993}
 994
 995ssize_t ib_uverbs_destroy_cq(struct ib_uverbs_file *file,
 
 996			     const char __user *buf, int in_len,
 997			     int out_len)
 998{
 999	struct ib_uverbs_destroy_cq      cmd;
1000	struct ib_uverbs_destroy_cq_resp resp;
1001	struct ib_uobject		*uobj;
1002	struct ib_cq               	*cq;
1003	struct ib_ucq_object        	*obj;
1004	struct ib_uverbs_event_file	*ev_file;
1005	int                        	 ret = -EINVAL;
1006
1007	if (copy_from_user(&cmd, buf, sizeof cmd))
1008		return -EFAULT;
1009
1010	uobj = idr_write_uobj(&ib_uverbs_cq_idr, cmd.cq_handle, file->ucontext);
1011	if (!uobj)
1012		return -EINVAL;
1013	cq      = uobj->object;
1014	ev_file = cq->cq_context;
1015	obj     = container_of(cq->uobject, struct ib_ucq_object, uobject);
1016
1017	ret = ib_destroy_cq(cq);
1018	if (!ret)
1019		uobj->live = 0;
1020
1021	put_uobj_write(uobj);
1022
1023	if (ret)
1024		return ret;
1025
1026	idr_remove_uobj(&ib_uverbs_cq_idr, uobj);
1027
1028	mutex_lock(&file->mutex);
1029	list_del(&uobj->list);
1030	mutex_unlock(&file->mutex);
1031
1032	ib_uverbs_release_ucq(file, ev_file, obj);
1033
1034	memset(&resp, 0, sizeof resp);
1035	resp.comp_events_reported  = obj->comp_events_reported;
1036	resp.async_events_reported = obj->async_events_reported;
1037
1038	put_uobj(uobj);
1039
1040	if (copy_to_user((void __user *) (unsigned long) cmd.response,
1041			 &resp, sizeof resp))
1042		return -EFAULT;
1043
1044	return in_len;
1045}
1046
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1047ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file,
 
1048			    const char __user *buf, int in_len,
1049			    int out_len)
1050{
1051	struct ib_uverbs_create_qp      cmd;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1052	struct ib_uverbs_create_qp_resp resp;
1053	struct ib_udata                 udata;
1054	struct ib_uqp_object           *obj;
1055	struct ib_pd                   *pd;
1056	struct ib_cq                   *scq, *rcq;
1057	struct ib_srq                  *srq;
1058	struct ib_qp                   *qp;
1059	struct ib_qp_init_attr          attr;
1060	int ret;
1061
1062	if (out_len < sizeof resp)
1063		return -ENOSPC;
1064
1065	if (copy_from_user(&cmd, buf, sizeof cmd))
1066		return -EFAULT;
1067
1068	INIT_UDATA(&udata, buf + sizeof cmd,
1069		   (unsigned long) cmd.response + sizeof resp,
1070		   in_len - sizeof cmd, out_len - sizeof resp);
1071
1072	obj = kmalloc(sizeof *obj, GFP_KERNEL);
1073	if (!obj)
1074		return -ENOMEM;
1075
1076	init_uobj(&obj->uevent.uobject, cmd.user_handle, file->ucontext, &qp_lock_key);
1077	down_write(&obj->uevent.uobject.mutex);
1078
1079	srq = cmd.is_srq ? idr_read_srq(cmd.srq_handle, file->ucontext) : NULL;
1080	pd  = idr_read_pd(cmd.pd_handle, file->ucontext);
1081	scq = idr_read_cq(cmd.send_cq_handle, file->ucontext, 0);
1082	rcq = cmd.recv_cq_handle == cmd.send_cq_handle ?
1083		scq : idr_read_cq(cmd.recv_cq_handle, file->ucontext, 1);
1084
1085	if (!pd || !scq || !rcq || (cmd.is_srq && !srq)) {
1086		ret = -EINVAL;
1087		goto err_put;
1088	}
1089
1090	attr.event_handler = ib_uverbs_qp_event_handler;
1091	attr.qp_context    = file;
1092	attr.send_cq       = scq;
1093	attr.recv_cq       = rcq;
1094	attr.srq           = srq;
1095	attr.sq_sig_type   = cmd.sq_sig_all ? IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR;
1096	attr.qp_type       = cmd.qp_type;
1097	attr.create_flags  = 0;
1098
1099	attr.cap.max_send_wr     = cmd.max_send_wr;
1100	attr.cap.max_recv_wr     = cmd.max_recv_wr;
1101	attr.cap.max_send_sge    = cmd.max_send_sge;
1102	attr.cap.max_recv_sge    = cmd.max_recv_sge;
1103	attr.cap.max_inline_data = cmd.max_inline_data;
1104
1105	obj->uevent.events_reported     = 0;
1106	INIT_LIST_HEAD(&obj->uevent.event_list);
1107	INIT_LIST_HEAD(&obj->mcast_list);
1108
1109	qp = pd->device->create_qp(pd, &attr, &udata);
1110	if (IS_ERR(qp)) {
1111		ret = PTR_ERR(qp);
1112		goto err_put;
1113	}
1114
1115	qp->device     	  = pd->device;
1116	qp->pd         	  = pd;
1117	qp->send_cq    	  = attr.send_cq;
1118	qp->recv_cq    	  = attr.recv_cq;
1119	qp->srq	       	  = attr.srq;
1120	qp->uobject       = &obj->uevent.uobject;
1121	qp->event_handler = attr.event_handler;
1122	qp->qp_context    = attr.qp_context;
1123	qp->qp_type	  = attr.qp_type;
1124	atomic_inc(&pd->usecnt);
1125	atomic_inc(&attr.send_cq->usecnt);
1126	atomic_inc(&attr.recv_cq->usecnt);
1127	if (attr.srq)
1128		atomic_inc(&attr.srq->usecnt);
1129
1130	obj->uevent.uobject.object = qp;
1131	ret = idr_add_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject);
1132	if (ret)
1133		goto err_destroy;
1134
1135	memset(&resp, 0, sizeof resp);
1136	resp.qpn             = qp->qp_num;
1137	resp.qp_handle       = obj->uevent.uobject.id;
1138	resp.max_recv_sge    = attr.cap.max_recv_sge;
1139	resp.max_send_sge    = attr.cap.max_send_sge;
1140	resp.max_recv_wr     = attr.cap.max_recv_wr;
1141	resp.max_send_wr     = attr.cap.max_send_wr;
1142	resp.max_inline_data = attr.cap.max_inline_data;
1143
1144	if (copy_to_user((void __user *) (unsigned long) cmd.response,
1145			 &resp, sizeof resp)) {
1146		ret = -EFAULT;
1147		goto err_copy;
1148	}
1149
1150	put_pd_read(pd);
1151	put_cq_read(scq);
1152	if (rcq != scq)
1153		put_cq_read(rcq);
1154	if (srq)
1155		put_srq_read(srq);
1156
1157	mutex_lock(&file->mutex);
1158	list_add_tail(&obj->uevent.uobject.list, &file->ucontext->qp_list);
1159	mutex_unlock(&file->mutex);
1160
1161	obj->uevent.uobject.live = 1;
1162
1163	up_write(&obj->uevent.uobject.mutex);
1164
1165	return in_len;
1166
1167err_copy:
1168	idr_remove_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject);
1169
1170err_destroy:
1171	ib_destroy_qp(qp);
1172
1173err_put:
1174	if (pd)
1175		put_pd_read(pd);
1176	if (scq)
1177		put_cq_read(scq);
1178	if (rcq && rcq != scq)
1179		put_cq_read(rcq);
1180	if (srq)
1181		put_srq_read(srq);
1182
1183	put_uobj_write(&obj->uevent.uobject);
1184	return ret;
1185}
1186
1187ssize_t ib_uverbs_query_qp(struct ib_uverbs_file *file,
 
1188			   const char __user *buf, int in_len,
1189			   int out_len)
1190{
1191	struct ib_uverbs_query_qp      cmd;
1192	struct ib_uverbs_query_qp_resp resp;
1193	struct ib_qp                   *qp;
1194	struct ib_qp_attr              *attr;
1195	struct ib_qp_init_attr         *init_attr;
1196	int                            ret;
1197
1198	if (copy_from_user(&cmd, buf, sizeof cmd))
1199		return -EFAULT;
1200
1201	attr      = kmalloc(sizeof *attr, GFP_KERNEL);
1202	init_attr = kmalloc(sizeof *init_attr, GFP_KERNEL);
1203	if (!attr || !init_attr) {
1204		ret = -ENOMEM;
1205		goto out;
1206	}
1207
1208	qp = idr_read_qp(cmd.qp_handle, file->ucontext);
1209	if (!qp) {
1210		ret = -EINVAL;
1211		goto out;
1212	}
1213
1214	ret = ib_query_qp(qp, attr, cmd.attr_mask, init_attr);
1215
1216	put_qp_read(qp);
1217
1218	if (ret)
1219		goto out;
1220
1221	memset(&resp, 0, sizeof resp);
1222
1223	resp.qp_state               = attr->qp_state;
1224	resp.cur_qp_state           = attr->cur_qp_state;
1225	resp.path_mtu               = attr->path_mtu;
1226	resp.path_mig_state         = attr->path_mig_state;
1227	resp.qkey                   = attr->qkey;
1228	resp.rq_psn                 = attr->rq_psn;
1229	resp.sq_psn                 = attr->sq_psn;
1230	resp.dest_qp_num            = attr->dest_qp_num;
1231	resp.qp_access_flags        = attr->qp_access_flags;
1232	resp.pkey_index             = attr->pkey_index;
1233	resp.alt_pkey_index         = attr->alt_pkey_index;
1234	resp.sq_draining            = attr->sq_draining;
1235	resp.max_rd_atomic          = attr->max_rd_atomic;
1236	resp.max_dest_rd_atomic     = attr->max_dest_rd_atomic;
1237	resp.min_rnr_timer          = attr->min_rnr_timer;
1238	resp.port_num               = attr->port_num;
1239	resp.timeout                = attr->timeout;
1240	resp.retry_cnt              = attr->retry_cnt;
1241	resp.rnr_retry              = attr->rnr_retry;
1242	resp.alt_port_num           = attr->alt_port_num;
1243	resp.alt_timeout            = attr->alt_timeout;
1244
1245	memcpy(resp.dest.dgid, attr->ah_attr.grh.dgid.raw, 16);
1246	resp.dest.flow_label        = attr->ah_attr.grh.flow_label;
1247	resp.dest.sgid_index        = attr->ah_attr.grh.sgid_index;
1248	resp.dest.hop_limit         = attr->ah_attr.grh.hop_limit;
1249	resp.dest.traffic_class     = attr->ah_attr.grh.traffic_class;
1250	resp.dest.dlid              = attr->ah_attr.dlid;
1251	resp.dest.sl                = attr->ah_attr.sl;
1252	resp.dest.src_path_bits     = attr->ah_attr.src_path_bits;
1253	resp.dest.static_rate       = attr->ah_attr.static_rate;
1254	resp.dest.is_global         = !!(attr->ah_attr.ah_flags & IB_AH_GRH);
1255	resp.dest.port_num          = attr->ah_attr.port_num;
1256
1257	memcpy(resp.alt_dest.dgid, attr->alt_ah_attr.grh.dgid.raw, 16);
1258	resp.alt_dest.flow_label    = attr->alt_ah_attr.grh.flow_label;
1259	resp.alt_dest.sgid_index    = attr->alt_ah_attr.grh.sgid_index;
1260	resp.alt_dest.hop_limit     = attr->alt_ah_attr.grh.hop_limit;
1261	resp.alt_dest.traffic_class = attr->alt_ah_attr.grh.traffic_class;
1262	resp.alt_dest.dlid          = attr->alt_ah_attr.dlid;
1263	resp.alt_dest.sl            = attr->alt_ah_attr.sl;
1264	resp.alt_dest.src_path_bits = attr->alt_ah_attr.src_path_bits;
1265	resp.alt_dest.static_rate   = attr->alt_ah_attr.static_rate;
1266	resp.alt_dest.is_global     = !!(attr->alt_ah_attr.ah_flags & IB_AH_GRH);
1267	resp.alt_dest.port_num      = attr->alt_ah_attr.port_num;
1268
1269	resp.max_send_wr            = init_attr->cap.max_send_wr;
1270	resp.max_recv_wr            = init_attr->cap.max_recv_wr;
1271	resp.max_send_sge           = init_attr->cap.max_send_sge;
1272	resp.max_recv_sge           = init_attr->cap.max_recv_sge;
1273	resp.max_inline_data        = init_attr->cap.max_inline_data;
1274	resp.sq_sig_all             = init_attr->sq_sig_type == IB_SIGNAL_ALL_WR;
1275
1276	if (copy_to_user((void __user *) (unsigned long) cmd.response,
1277			 &resp, sizeof resp))
1278		ret = -EFAULT;
1279
1280out:
1281	kfree(attr);
1282	kfree(init_attr);
1283
1284	return ret ? ret : in_len;
1285}
1286
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1287ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file,
 
1288			    const char __user *buf, int in_len,
1289			    int out_len)
1290{
1291	struct ib_uverbs_modify_qp cmd;
1292	struct ib_udata            udata;
1293	struct ib_qp              *qp;
1294	struct ib_qp_attr         *attr;
1295	int                        ret;
1296
1297	if (copy_from_user(&cmd, buf, sizeof cmd))
1298		return -EFAULT;
1299
1300	INIT_UDATA(&udata, buf + sizeof cmd, NULL, in_len - sizeof cmd,
1301		   out_len);
1302
1303	attr = kmalloc(sizeof *attr, GFP_KERNEL);
1304	if (!attr)
1305		return -ENOMEM;
1306
1307	qp = idr_read_qp(cmd.qp_handle, file->ucontext);
1308	if (!qp) {
1309		ret = -EINVAL;
1310		goto out;
1311	}
1312
1313	attr->qp_state 		  = cmd.qp_state;
1314	attr->cur_qp_state 	  = cmd.cur_qp_state;
1315	attr->path_mtu 		  = cmd.path_mtu;
1316	attr->path_mig_state 	  = cmd.path_mig_state;
1317	attr->qkey 		  = cmd.qkey;
1318	attr->rq_psn 		  = cmd.rq_psn;
1319	attr->sq_psn 		  = cmd.sq_psn;
1320	attr->dest_qp_num 	  = cmd.dest_qp_num;
1321	attr->qp_access_flags 	  = cmd.qp_access_flags;
1322	attr->pkey_index 	  = cmd.pkey_index;
1323	attr->alt_pkey_index 	  = cmd.alt_pkey_index;
1324	attr->en_sqd_async_notify = cmd.en_sqd_async_notify;
1325	attr->max_rd_atomic 	  = cmd.max_rd_atomic;
1326	attr->max_dest_rd_atomic  = cmd.max_dest_rd_atomic;
1327	attr->min_rnr_timer 	  = cmd.min_rnr_timer;
1328	attr->port_num 		  = cmd.port_num;
1329	attr->timeout 		  = cmd.timeout;
1330	attr->retry_cnt 	  = cmd.retry_cnt;
1331	attr->rnr_retry 	  = cmd.rnr_retry;
1332	attr->alt_port_num 	  = cmd.alt_port_num;
1333	attr->alt_timeout 	  = cmd.alt_timeout;
1334
1335	memcpy(attr->ah_attr.grh.dgid.raw, cmd.dest.dgid, 16);
1336	attr->ah_attr.grh.flow_label        = cmd.dest.flow_label;
1337	attr->ah_attr.grh.sgid_index        = cmd.dest.sgid_index;
1338	attr->ah_attr.grh.hop_limit         = cmd.dest.hop_limit;
1339	attr->ah_attr.grh.traffic_class     = cmd.dest.traffic_class;
1340	attr->ah_attr.dlid 	    	    = cmd.dest.dlid;
1341	attr->ah_attr.sl   	    	    = cmd.dest.sl;
1342	attr->ah_attr.src_path_bits 	    = cmd.dest.src_path_bits;
1343	attr->ah_attr.static_rate   	    = cmd.dest.static_rate;
1344	attr->ah_attr.ah_flags 	    	    = cmd.dest.is_global ? IB_AH_GRH : 0;
1345	attr->ah_attr.port_num 	    	    = cmd.dest.port_num;
1346
1347	memcpy(attr->alt_ah_attr.grh.dgid.raw, cmd.alt_dest.dgid, 16);
1348	attr->alt_ah_attr.grh.flow_label    = cmd.alt_dest.flow_label;
1349	attr->alt_ah_attr.grh.sgid_index    = cmd.alt_dest.sgid_index;
1350	attr->alt_ah_attr.grh.hop_limit     = cmd.alt_dest.hop_limit;
1351	attr->alt_ah_attr.grh.traffic_class = cmd.alt_dest.traffic_class;
1352	attr->alt_ah_attr.dlid 	    	    = cmd.alt_dest.dlid;
1353	attr->alt_ah_attr.sl   	    	    = cmd.alt_dest.sl;
1354	attr->alt_ah_attr.src_path_bits     = cmd.alt_dest.src_path_bits;
1355	attr->alt_ah_attr.static_rate       = cmd.alt_dest.static_rate;
1356	attr->alt_ah_attr.ah_flags 	    = cmd.alt_dest.is_global ? IB_AH_GRH : 0;
1357	attr->alt_ah_attr.port_num 	    = cmd.alt_dest.port_num;
1358
1359	ret = qp->device->modify_qp(qp, attr, cmd.attr_mask, &udata);
1360
1361	put_qp_read(qp);
 
 
 
 
 
 
1362
1363	if (ret)
1364		goto out;
1365
1366	ret = in_len;
1367
 
 
 
1368out:
1369	kfree(attr);
1370
1371	return ret;
1372}
1373
1374ssize_t ib_uverbs_destroy_qp(struct ib_uverbs_file *file,
 
1375			     const char __user *buf, int in_len,
1376			     int out_len)
1377{
1378	struct ib_uverbs_destroy_qp      cmd;
1379	struct ib_uverbs_destroy_qp_resp resp;
1380	struct ib_uobject		*uobj;
1381	struct ib_qp               	*qp;
1382	struct ib_uqp_object        	*obj;
1383	int                        	 ret = -EINVAL;
1384
1385	if (copy_from_user(&cmd, buf, sizeof cmd))
1386		return -EFAULT;
1387
1388	memset(&resp, 0, sizeof resp);
1389
1390	uobj = idr_write_uobj(&ib_uverbs_qp_idr, cmd.qp_handle, file->ucontext);
1391	if (!uobj)
1392		return -EINVAL;
1393	qp  = uobj->object;
1394	obj = container_of(uobj, struct ib_uqp_object, uevent.uobject);
1395
1396	if (!list_empty(&obj->mcast_list)) {
1397		put_uobj_write(uobj);
1398		return -EBUSY;
1399	}
1400
1401	ret = ib_destroy_qp(qp);
1402	if (!ret)
1403		uobj->live = 0;
1404
1405	put_uobj_write(uobj);
1406
1407	if (ret)
1408		return ret;
1409
 
 
 
1410	idr_remove_uobj(&ib_uverbs_qp_idr, uobj);
1411
1412	mutex_lock(&file->mutex);
1413	list_del(&uobj->list);
1414	mutex_unlock(&file->mutex);
1415
1416	ib_uverbs_release_uevent(file, &obj->uevent);
1417
1418	resp.events_reported = obj->uevent.events_reported;
1419
1420	put_uobj(uobj);
1421
1422	if (copy_to_user((void __user *) (unsigned long) cmd.response,
1423			 &resp, sizeof resp))
1424		return -EFAULT;
1425
1426	return in_len;
1427}
1428
 
 
 
 
 
 
1429ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file,
 
1430			    const char __user *buf, int in_len,
1431			    int out_len)
1432{
1433	struct ib_uverbs_post_send      cmd;
1434	struct ib_uverbs_post_send_resp resp;
1435	struct ib_uverbs_send_wr       *user_wr;
1436	struct ib_send_wr              *wr = NULL, *last, *next, *bad_wr;
1437	struct ib_qp                   *qp;
1438	int                             i, sg_ind;
1439	int				is_ud;
1440	ssize_t                         ret = -EINVAL;
 
1441
1442	if (copy_from_user(&cmd, buf, sizeof cmd))
1443		return -EFAULT;
1444
1445	if (in_len < sizeof cmd + cmd.wqe_size * cmd.wr_count +
1446	    cmd.sge_count * sizeof (struct ib_uverbs_sge))
1447		return -EINVAL;
1448
1449	if (cmd.wqe_size < sizeof (struct ib_uverbs_send_wr))
1450		return -EINVAL;
1451
1452	user_wr = kmalloc(cmd.wqe_size, GFP_KERNEL);
1453	if (!user_wr)
1454		return -ENOMEM;
1455
1456	qp = idr_read_qp(cmd.qp_handle, file->ucontext);
1457	if (!qp)
1458		goto out;
1459
1460	is_ud = qp->qp_type == IB_QPT_UD;
1461	sg_ind = 0;
1462	last = NULL;
1463	for (i = 0; i < cmd.wr_count; ++i) {
1464		if (copy_from_user(user_wr,
1465				   buf + sizeof cmd + i * cmd.wqe_size,
1466				   cmd.wqe_size)) {
1467			ret = -EFAULT;
1468			goto out_put;
1469		}
1470
1471		if (user_wr->num_sge + sg_ind > cmd.sge_count) {
1472			ret = -EINVAL;
1473			goto out_put;
1474		}
1475
1476		next = kmalloc(ALIGN(sizeof *next, sizeof (struct ib_sge)) +
1477			       user_wr->num_sge * sizeof (struct ib_sge),
1478			       GFP_KERNEL);
1479		if (!next) {
1480			ret = -ENOMEM;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1481			goto out_put;
1482		}
1483
 
 
 
 
 
 
 
 
1484		if (!last)
1485			wr = next;
1486		else
1487			last->next = next;
1488		last = next;
1489
1490		next->next       = NULL;
1491		next->wr_id      = user_wr->wr_id;
1492		next->num_sge    = user_wr->num_sge;
1493		next->opcode     = user_wr->opcode;
1494		next->send_flags = user_wr->send_flags;
1495
1496		if (is_ud) {
1497			next->wr.ud.ah = idr_read_ah(user_wr->wr.ud.ah,
1498						     file->ucontext);
1499			if (!next->wr.ud.ah) {
1500				ret = -EINVAL;
1501				goto out_put;
1502			}
1503			next->wr.ud.remote_qpn  = user_wr->wr.ud.remote_qpn;
1504			next->wr.ud.remote_qkey = user_wr->wr.ud.remote_qkey;
1505		} else {
1506			switch (next->opcode) {
1507			case IB_WR_RDMA_WRITE_WITH_IMM:
1508				next->ex.imm_data =
1509					(__be32 __force) user_wr->ex.imm_data;
1510			case IB_WR_RDMA_WRITE:
1511			case IB_WR_RDMA_READ:
1512				next->wr.rdma.remote_addr =
1513					user_wr->wr.rdma.remote_addr;
1514				next->wr.rdma.rkey        =
1515					user_wr->wr.rdma.rkey;
1516				break;
1517			case IB_WR_SEND_WITH_IMM:
1518				next->ex.imm_data =
1519					(__be32 __force) user_wr->ex.imm_data;
1520				break;
1521			case IB_WR_SEND_WITH_INV:
1522				next->ex.invalidate_rkey =
1523					user_wr->ex.invalidate_rkey;
1524				break;
1525			case IB_WR_ATOMIC_CMP_AND_SWP:
1526			case IB_WR_ATOMIC_FETCH_AND_ADD:
1527				next->wr.atomic.remote_addr =
1528					user_wr->wr.atomic.remote_addr;
1529				next->wr.atomic.compare_add =
1530					user_wr->wr.atomic.compare_add;
1531				next->wr.atomic.swap = user_wr->wr.atomic.swap;
1532				next->wr.atomic.rkey = user_wr->wr.atomic.rkey;
1533				break;
1534			default:
1535				break;
1536			}
1537		}
1538
1539		if (next->num_sge) {
1540			next->sg_list = (void *) next +
1541				ALIGN(sizeof *next, sizeof (struct ib_sge));
1542			if (copy_from_user(next->sg_list,
1543					   buf + sizeof cmd +
1544					   cmd.wr_count * cmd.wqe_size +
1545					   sg_ind * sizeof (struct ib_sge),
1546					   next->num_sge * sizeof (struct ib_sge))) {
1547				ret = -EFAULT;
1548				goto out_put;
1549			}
1550			sg_ind += next->num_sge;
1551		} else
1552			next->sg_list = NULL;
1553	}
1554
1555	resp.bad_wr = 0;
1556	ret = qp->device->post_send(qp, wr, &bad_wr);
1557	if (ret)
1558		for (next = wr; next; next = next->next) {
1559			++resp.bad_wr;
1560			if (next == bad_wr)
1561				break;
1562		}
1563
1564	if (copy_to_user((void __user *) (unsigned long) cmd.response,
1565			 &resp, sizeof resp))
1566		ret = -EFAULT;
1567
1568out_put:
1569	put_qp_read(qp);
1570
1571	while (wr) {
1572		if (is_ud && wr->wr.ud.ah)
1573			put_ah_read(wr->wr.ud.ah);
1574		next = wr->next;
1575		kfree(wr);
1576		wr = next;
1577	}
1578
1579out:
1580	kfree(user_wr);
1581
1582	return ret ? ret : in_len;
1583}
1584
1585static struct ib_recv_wr *ib_uverbs_unmarshall_recv(const char __user *buf,
1586						    int in_len,
1587						    u32 wr_count,
1588						    u32 sge_count,
1589						    u32 wqe_size)
1590{
1591	struct ib_uverbs_recv_wr *user_wr;
1592	struct ib_recv_wr        *wr = NULL, *last, *next;
1593	int                       sg_ind;
1594	int                       i;
1595	int                       ret;
1596
1597	if (in_len < wqe_size * wr_count +
1598	    sge_count * sizeof (struct ib_uverbs_sge))
1599		return ERR_PTR(-EINVAL);
1600
1601	if (wqe_size < sizeof (struct ib_uverbs_recv_wr))
1602		return ERR_PTR(-EINVAL);
1603
1604	user_wr = kmalloc(wqe_size, GFP_KERNEL);
1605	if (!user_wr)
1606		return ERR_PTR(-ENOMEM);
1607
1608	sg_ind = 0;
1609	last = NULL;
1610	for (i = 0; i < wr_count; ++i) {
1611		if (copy_from_user(user_wr, buf + i * wqe_size,
1612				   wqe_size)) {
1613			ret = -EFAULT;
1614			goto err;
1615		}
1616
1617		if (user_wr->num_sge + sg_ind > sge_count) {
1618			ret = -EINVAL;
1619			goto err;
1620		}
1621
1622		next = kmalloc(ALIGN(sizeof *next, sizeof (struct ib_sge)) +
1623			       user_wr->num_sge * sizeof (struct ib_sge),
1624			       GFP_KERNEL);
1625		if (!next) {
1626			ret = -ENOMEM;
1627			goto err;
1628		}
1629
1630		if (!last)
1631			wr = next;
1632		else
1633			last->next = next;
1634		last = next;
1635
1636		next->next       = NULL;
1637		next->wr_id      = user_wr->wr_id;
1638		next->num_sge    = user_wr->num_sge;
1639
1640		if (next->num_sge) {
1641			next->sg_list = (void *) next +
1642				ALIGN(sizeof *next, sizeof (struct ib_sge));
1643			if (copy_from_user(next->sg_list,
1644					   buf + wr_count * wqe_size +
1645					   sg_ind * sizeof (struct ib_sge),
1646					   next->num_sge * sizeof (struct ib_sge))) {
1647				ret = -EFAULT;
1648				goto err;
1649			}
1650			sg_ind += next->num_sge;
1651		} else
1652			next->sg_list = NULL;
1653	}
1654
1655	kfree(user_wr);
1656	return wr;
1657
1658err:
1659	kfree(user_wr);
1660
1661	while (wr) {
1662		next = wr->next;
1663		kfree(wr);
1664		wr = next;
1665	}
1666
1667	return ERR_PTR(ret);
1668}
1669
1670ssize_t ib_uverbs_post_recv(struct ib_uverbs_file *file,
 
1671			    const char __user *buf, int in_len,
1672			    int out_len)
1673{
1674	struct ib_uverbs_post_recv      cmd;
1675	struct ib_uverbs_post_recv_resp resp;
1676	struct ib_recv_wr              *wr, *next, *bad_wr;
1677	struct ib_qp                   *qp;
1678	ssize_t                         ret = -EINVAL;
1679
1680	if (copy_from_user(&cmd, buf, sizeof cmd))
1681		return -EFAULT;
1682
1683	wr = ib_uverbs_unmarshall_recv(buf + sizeof cmd,
1684				       in_len - sizeof cmd, cmd.wr_count,
1685				       cmd.sge_count, cmd.wqe_size);
1686	if (IS_ERR(wr))
1687		return PTR_ERR(wr);
1688
1689	qp = idr_read_qp(cmd.qp_handle, file->ucontext);
1690	if (!qp)
1691		goto out;
1692
1693	resp.bad_wr = 0;
1694	ret = qp->device->post_recv(qp, wr, &bad_wr);
1695
1696	put_qp_read(qp);
1697
1698	if (ret)
1699		for (next = wr; next; next = next->next) {
1700			++resp.bad_wr;
1701			if (next == bad_wr)
1702				break;
1703		}
1704
1705	if (copy_to_user((void __user *) (unsigned long) cmd.response,
1706			 &resp, sizeof resp))
1707		ret = -EFAULT;
1708
1709out:
1710	while (wr) {
1711		next = wr->next;
1712		kfree(wr);
1713		wr = next;
1714	}
1715
1716	return ret ? ret : in_len;
1717}
1718
1719ssize_t ib_uverbs_post_srq_recv(struct ib_uverbs_file *file,
 
1720				const char __user *buf, int in_len,
1721				int out_len)
1722{
1723	struct ib_uverbs_post_srq_recv      cmd;
1724	struct ib_uverbs_post_srq_recv_resp resp;
1725	struct ib_recv_wr                  *wr, *next, *bad_wr;
1726	struct ib_srq                      *srq;
1727	ssize_t                             ret = -EINVAL;
1728
1729	if (copy_from_user(&cmd, buf, sizeof cmd))
1730		return -EFAULT;
1731
1732	wr = ib_uverbs_unmarshall_recv(buf + sizeof cmd,
1733				       in_len - sizeof cmd, cmd.wr_count,
1734				       cmd.sge_count, cmd.wqe_size);
1735	if (IS_ERR(wr))
1736		return PTR_ERR(wr);
1737
1738	srq = idr_read_srq(cmd.srq_handle, file->ucontext);
1739	if (!srq)
1740		goto out;
1741
1742	resp.bad_wr = 0;
1743	ret = srq->device->post_srq_recv(srq, wr, &bad_wr);
1744
1745	put_srq_read(srq);
1746
1747	if (ret)
1748		for (next = wr; next; next = next->next) {
1749			++resp.bad_wr;
1750			if (next == bad_wr)
1751				break;
1752		}
1753
1754	if (copy_to_user((void __user *) (unsigned long) cmd.response,
1755			 &resp, sizeof resp))
1756		ret = -EFAULT;
1757
1758out:
1759	while (wr) {
1760		next = wr->next;
1761		kfree(wr);
1762		wr = next;
1763	}
1764
1765	return ret ? ret : in_len;
1766}
1767
1768ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file,
 
1769			    const char __user *buf, int in_len,
1770			    int out_len)
1771{
1772	struct ib_uverbs_create_ah	 cmd;
1773	struct ib_uverbs_create_ah_resp	 resp;
1774	struct ib_uobject		*uobj;
1775	struct ib_pd			*pd;
1776	struct ib_ah			*ah;
1777	struct ib_ah_attr		attr;
1778	int ret;
1779
1780	if (out_len < sizeof resp)
1781		return -ENOSPC;
1782
1783	if (copy_from_user(&cmd, buf, sizeof cmd))
1784		return -EFAULT;
1785
1786	uobj = kmalloc(sizeof *uobj, GFP_KERNEL);
1787	if (!uobj)
1788		return -ENOMEM;
1789
1790	init_uobj(uobj, cmd.user_handle, file->ucontext, &ah_lock_key);
1791	down_write(&uobj->mutex);
1792
1793	pd = idr_read_pd(cmd.pd_handle, file->ucontext);
1794	if (!pd) {
1795		ret = -EINVAL;
1796		goto err;
1797	}
1798
1799	attr.dlid 	       = cmd.attr.dlid;
1800	attr.sl 	       = cmd.attr.sl;
1801	attr.src_path_bits     = cmd.attr.src_path_bits;
1802	attr.static_rate       = cmd.attr.static_rate;
1803	attr.ah_flags          = cmd.attr.is_global ? IB_AH_GRH : 0;
1804	attr.port_num 	       = cmd.attr.port_num;
1805	attr.grh.flow_label    = cmd.attr.grh.flow_label;
1806	attr.grh.sgid_index    = cmd.attr.grh.sgid_index;
1807	attr.grh.hop_limit     = cmd.attr.grh.hop_limit;
1808	attr.grh.traffic_class = cmd.attr.grh.traffic_class;
 
1809	memcpy(attr.grh.dgid.raw, cmd.attr.grh.dgid, 16);
1810
1811	ah = ib_create_ah(pd, &attr);
1812	if (IS_ERR(ah)) {
1813		ret = PTR_ERR(ah);
1814		goto err_put;
1815	}
1816
1817	ah->uobject  = uobj;
1818	uobj->object = ah;
1819
1820	ret = idr_add_uobj(&ib_uverbs_ah_idr, uobj);
1821	if (ret)
1822		goto err_destroy;
1823
1824	resp.ah_handle = uobj->id;
1825
1826	if (copy_to_user((void __user *) (unsigned long) cmd.response,
1827			 &resp, sizeof resp)) {
1828		ret = -EFAULT;
1829		goto err_copy;
1830	}
1831
1832	put_pd_read(pd);
1833
1834	mutex_lock(&file->mutex);
1835	list_add_tail(&uobj->list, &file->ucontext->ah_list);
1836	mutex_unlock(&file->mutex);
1837
1838	uobj->live = 1;
1839
1840	up_write(&uobj->mutex);
1841
1842	return in_len;
1843
1844err_copy:
1845	idr_remove_uobj(&ib_uverbs_ah_idr, uobj);
1846
1847err_destroy:
1848	ib_destroy_ah(ah);
1849
1850err_put:
1851	put_pd_read(pd);
1852
1853err:
1854	put_uobj_write(uobj);
1855	return ret;
1856}
1857
1858ssize_t ib_uverbs_destroy_ah(struct ib_uverbs_file *file,
 
1859			     const char __user *buf, int in_len, int out_len)
1860{
1861	struct ib_uverbs_destroy_ah cmd;
1862	struct ib_ah		   *ah;
1863	struct ib_uobject	   *uobj;
1864	int			    ret;
1865
1866	if (copy_from_user(&cmd, buf, sizeof cmd))
1867		return -EFAULT;
1868
1869	uobj = idr_write_uobj(&ib_uverbs_ah_idr, cmd.ah_handle, file->ucontext);
1870	if (!uobj)
1871		return -EINVAL;
1872	ah = uobj->object;
1873
1874	ret = ib_destroy_ah(ah);
1875	if (!ret)
1876		uobj->live = 0;
1877
1878	put_uobj_write(uobj);
1879
1880	if (ret)
1881		return ret;
1882
1883	idr_remove_uobj(&ib_uverbs_ah_idr, uobj);
1884
1885	mutex_lock(&file->mutex);
1886	list_del(&uobj->list);
1887	mutex_unlock(&file->mutex);
1888
1889	put_uobj(uobj);
1890
1891	return in_len;
1892}
1893
1894ssize_t ib_uverbs_attach_mcast(struct ib_uverbs_file *file,
 
1895			       const char __user *buf, int in_len,
1896			       int out_len)
1897{
1898	struct ib_uverbs_attach_mcast cmd;
1899	struct ib_qp                 *qp;
1900	struct ib_uqp_object         *obj;
1901	struct ib_uverbs_mcast_entry *mcast;
1902	int                           ret;
1903
1904	if (copy_from_user(&cmd, buf, sizeof cmd))
1905		return -EFAULT;
1906
1907	qp = idr_read_qp(cmd.qp_handle, file->ucontext);
1908	if (!qp)
1909		return -EINVAL;
1910
1911	obj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject);
1912
1913	list_for_each_entry(mcast, &obj->mcast_list, list)
1914		if (cmd.mlid == mcast->lid &&
1915		    !memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) {
1916			ret = 0;
1917			goto out_put;
1918		}
1919
1920	mcast = kmalloc(sizeof *mcast, GFP_KERNEL);
1921	if (!mcast) {
1922		ret = -ENOMEM;
1923		goto out_put;
1924	}
1925
1926	mcast->lid = cmd.mlid;
1927	memcpy(mcast->gid.raw, cmd.gid, sizeof mcast->gid.raw);
1928
1929	ret = ib_attach_mcast(qp, &mcast->gid, cmd.mlid);
1930	if (!ret)
1931		list_add_tail(&mcast->list, &obj->mcast_list);
1932	else
1933		kfree(mcast);
1934
1935out_put:
1936	put_qp_read(qp);
1937
1938	return ret ? ret : in_len;
1939}
1940
1941ssize_t ib_uverbs_detach_mcast(struct ib_uverbs_file *file,
 
1942			       const char __user *buf, int in_len,
1943			       int out_len)
1944{
1945	struct ib_uverbs_detach_mcast cmd;
1946	struct ib_uqp_object         *obj;
1947	struct ib_qp                 *qp;
1948	struct ib_uverbs_mcast_entry *mcast;
1949	int                           ret = -EINVAL;
1950
1951	if (copy_from_user(&cmd, buf, sizeof cmd))
1952		return -EFAULT;
1953
1954	qp = idr_read_qp(cmd.qp_handle, file->ucontext);
1955	if (!qp)
1956		return -EINVAL;
1957
1958	ret = ib_detach_mcast(qp, (union ib_gid *) cmd.gid, cmd.mlid);
1959	if (ret)
1960		goto out_put;
1961
1962	obj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject);
1963
1964	list_for_each_entry(mcast, &obj->mcast_list, list)
1965		if (cmd.mlid == mcast->lid &&
1966		    !memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) {
1967			list_del(&mcast->list);
1968			kfree(mcast);
1969			break;
1970		}
1971
1972out_put:
1973	put_qp_read(qp);
1974
1975	return ret ? ret : in_len;
1976}
1977
1978ssize_t ib_uverbs_create_srq(struct ib_uverbs_file *file,
1979			     const char __user *buf, int in_len,
1980			     int out_len)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1981{
1982	struct ib_uverbs_create_srq      cmd;
1983	struct ib_uverbs_create_srq_resp resp;
1984	struct ib_udata                  udata;
1985	struct ib_uevent_object         *obj;
1986	struct ib_pd                    *pd;
1987	struct ib_srq                   *srq;
 
1988	struct ib_srq_init_attr          attr;
1989	int ret;
1990
1991	if (out_len < sizeof resp)
1992		return -ENOSPC;
1993
1994	if (copy_from_user(&cmd, buf, sizeof cmd))
1995		return -EFAULT;
1996
1997	INIT_UDATA(&udata, buf + sizeof cmd,
1998		   (unsigned long) cmd.response + sizeof resp,
1999		   in_len - sizeof cmd, out_len - sizeof resp);
2000
2001	obj = kmalloc(sizeof *obj, GFP_KERNEL);
2002	if (!obj)
2003		return -ENOMEM;
2004
2005	init_uobj(&obj->uobject, cmd.user_handle, file->ucontext, &srq_lock_key);
2006	down_write(&obj->uobject.mutex);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2007
2008	pd  = idr_read_pd(cmd.pd_handle, file->ucontext);
2009	if (!pd) {
2010		ret = -EINVAL;
2011		goto err;
2012	}
2013
2014	attr.event_handler  = ib_uverbs_srq_event_handler;
2015	attr.srq_context    = file;
2016	attr.attr.max_wr    = cmd.max_wr;
2017	attr.attr.max_sge   = cmd.max_sge;
2018	attr.attr.srq_limit = cmd.srq_limit;
 
2019
2020	obj->events_reported     = 0;
2021	INIT_LIST_HEAD(&obj->event_list);
2022
2023	srq = pd->device->create_srq(pd, &attr, &udata);
2024	if (IS_ERR(srq)) {
2025		ret = PTR_ERR(srq);
2026		goto err_put;
2027	}
2028
2029	srq->device    	   = pd->device;
2030	srq->pd        	   = pd;
2031	srq->uobject       = &obj->uobject;
 
2032	srq->event_handler = attr.event_handler;
2033	srq->srq_context   = attr.srq_context;
 
 
 
 
 
 
 
 
2034	atomic_inc(&pd->usecnt);
2035	atomic_set(&srq->usecnt, 0);
2036
2037	obj->uobject.object = srq;
2038	ret = idr_add_uobj(&ib_uverbs_srq_idr, &obj->uobject);
2039	if (ret)
2040		goto err_destroy;
2041
2042	memset(&resp, 0, sizeof resp);
2043	resp.srq_handle = obj->uobject.id;
2044	resp.max_wr     = attr.attr.max_wr;
2045	resp.max_sge    = attr.attr.max_sge;
 
 
2046
2047	if (copy_to_user((void __user *) (unsigned long) cmd.response,
2048			 &resp, sizeof resp)) {
2049		ret = -EFAULT;
2050		goto err_copy;
2051	}
2052
 
 
 
 
2053	put_pd_read(pd);
2054
2055	mutex_lock(&file->mutex);
2056	list_add_tail(&obj->uobject.list, &file->ucontext->srq_list);
2057	mutex_unlock(&file->mutex);
2058
2059	obj->uobject.live = 1;
2060
2061	up_write(&obj->uobject.mutex);
2062
2063	return in_len;
2064
2065err_copy:
2066	idr_remove_uobj(&ib_uverbs_srq_idr, &obj->uobject);
2067
2068err_destroy:
2069	ib_destroy_srq(srq);
2070
2071err_put:
2072	put_pd_read(pd);
2073
 
 
 
 
 
 
 
 
 
 
2074err:
2075	put_uobj_write(&obj->uobject);
2076	return ret;
2077}
2078
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2079ssize_t ib_uverbs_modify_srq(struct ib_uverbs_file *file,
 
2080			     const char __user *buf, int in_len,
2081			     int out_len)
2082{
2083	struct ib_uverbs_modify_srq cmd;
2084	struct ib_udata             udata;
2085	struct ib_srq              *srq;
2086	struct ib_srq_attr          attr;
2087	int                         ret;
2088
2089	if (copy_from_user(&cmd, buf, sizeof cmd))
2090		return -EFAULT;
2091
2092	INIT_UDATA(&udata, buf + sizeof cmd, NULL, in_len - sizeof cmd,
2093		   out_len);
2094
2095	srq = idr_read_srq(cmd.srq_handle, file->ucontext);
2096	if (!srq)
2097		return -EINVAL;
2098
2099	attr.max_wr    = cmd.max_wr;
2100	attr.srq_limit = cmd.srq_limit;
2101
2102	ret = srq->device->modify_srq(srq, &attr, cmd.attr_mask, &udata);
2103
2104	put_srq_read(srq);
2105
2106	return ret ? ret : in_len;
2107}
2108
2109ssize_t ib_uverbs_query_srq(struct ib_uverbs_file *file,
 
2110			    const char __user *buf,
2111			    int in_len, int out_len)
2112{
2113	struct ib_uverbs_query_srq      cmd;
2114	struct ib_uverbs_query_srq_resp resp;
2115	struct ib_srq_attr              attr;
2116	struct ib_srq                   *srq;
2117	int                             ret;
2118
2119	if (out_len < sizeof resp)
2120		return -ENOSPC;
2121
2122	if (copy_from_user(&cmd, buf, sizeof cmd))
2123		return -EFAULT;
2124
2125	srq = idr_read_srq(cmd.srq_handle, file->ucontext);
2126	if (!srq)
2127		return -EINVAL;
2128
2129	ret = ib_query_srq(srq, &attr);
2130
2131	put_srq_read(srq);
2132
2133	if (ret)
2134		return ret;
2135
2136	memset(&resp, 0, sizeof resp);
2137
2138	resp.max_wr    = attr.max_wr;
2139	resp.max_sge   = attr.max_sge;
2140	resp.srq_limit = attr.srq_limit;
2141
2142	if (copy_to_user((void __user *) (unsigned long) cmd.response,
2143			 &resp, sizeof resp))
2144		return -EFAULT;
2145
2146	return in_len;
2147}
2148
2149ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file,
 
2150			      const char __user *buf, int in_len,
2151			      int out_len)
2152{
2153	struct ib_uverbs_destroy_srq      cmd;
2154	struct ib_uverbs_destroy_srq_resp resp;
2155	struct ib_uobject		 *uobj;
2156	struct ib_srq               	 *srq;
2157	struct ib_uevent_object        	 *obj;
2158	int                         	  ret = -EINVAL;
 
 
2159
2160	if (copy_from_user(&cmd, buf, sizeof cmd))
2161		return -EFAULT;
2162
2163	uobj = idr_write_uobj(&ib_uverbs_srq_idr, cmd.srq_handle, file->ucontext);
2164	if (!uobj)
2165		return -EINVAL;
2166	srq = uobj->object;
2167	obj = container_of(uobj, struct ib_uevent_object, uobject);
 
2168
2169	ret = ib_destroy_srq(srq);
2170	if (!ret)
2171		uobj->live = 0;
2172
2173	put_uobj_write(uobj);
2174
2175	if (ret)
2176		return ret;
2177
 
 
 
 
 
2178	idr_remove_uobj(&ib_uverbs_srq_idr, uobj);
2179
2180	mutex_lock(&file->mutex);
2181	list_del(&uobj->list);
2182	mutex_unlock(&file->mutex);
2183
2184	ib_uverbs_release_uevent(file, obj);
2185
2186	memset(&resp, 0, sizeof resp);
2187	resp.events_reported = obj->events_reported;
2188
2189	put_uobj(uobj);
2190
2191	if (copy_to_user((void __user *) (unsigned long) cmd.response,
2192			 &resp, sizeof resp))
2193		ret = -EFAULT;
2194
2195	return ret ? ret : in_len;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2196}
v4.6
   1/*
   2 * Copyright (c) 2005 Topspin Communications.  All rights reserved.
   3 * Copyright (c) 2005, 2006, 2007 Cisco Systems.  All rights reserved.
   4 * Copyright (c) 2005 PathScale, Inc.  All rights reserved.
   5 * Copyright (c) 2006 Mellanox Technologies.  All rights reserved.
   6 *
   7 * This software is available to you under a choice of one of two
   8 * licenses.  You may choose to be licensed under the terms of the GNU
   9 * General Public License (GPL) Version 2, available from the file
  10 * COPYING in the main directory of this source tree, or the
  11 * OpenIB.org BSD license below:
  12 *
  13 *     Redistribution and use in source and binary forms, with or
  14 *     without modification, are permitted provided that the following
  15 *     conditions are met:
  16 *
  17 *      - Redistributions of source code must retain the above
  18 *        copyright notice, this list of conditions and the following
  19 *        disclaimer.
  20 *
  21 *      - Redistributions in binary form must reproduce the above
  22 *        copyright notice, this list of conditions and the following
  23 *        disclaimer in the documentation and/or other materials
  24 *        provided with the distribution.
  25 *
  26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  33 * SOFTWARE.
  34 */
  35
  36#include <linux/file.h>
  37#include <linux/fs.h>
  38#include <linux/slab.h>
  39#include <linux/sched.h>
  40
  41#include <asm/uaccess.h>
  42
  43#include "uverbs.h"
  44#include "core_priv.h"
  45
  46struct uverbs_lock_class {
  47	struct lock_class_key	key;
  48	char			name[16];
  49};
  50
  51static struct uverbs_lock_class pd_lock_class	= { .name = "PD-uobj" };
  52static struct uverbs_lock_class mr_lock_class	= { .name = "MR-uobj" };
  53static struct uverbs_lock_class mw_lock_class	= { .name = "MW-uobj" };
  54static struct uverbs_lock_class cq_lock_class	= { .name = "CQ-uobj" };
  55static struct uverbs_lock_class qp_lock_class	= { .name = "QP-uobj" };
  56static struct uverbs_lock_class ah_lock_class	= { .name = "AH-uobj" };
  57static struct uverbs_lock_class srq_lock_class	= { .name = "SRQ-uobj" };
  58static struct uverbs_lock_class xrcd_lock_class = { .name = "XRCD-uobj" };
  59static struct uverbs_lock_class rule_lock_class = { .name = "RULE-uobj" };
  60
  61/*
  62 * The ib_uobject locking scheme is as follows:
  63 *
  64 * - ib_uverbs_idr_lock protects the uverbs idrs themselves, so it
  65 *   needs to be held during all idr write operations.  When an object is
  66 *   looked up, a reference must be taken on the object's kref before
  67 *   dropping this lock.  For read operations, the rcu_read_lock()
  68 *   and rcu_write_lock() but similarly the kref reference is grabbed
  69 *   before the rcu_read_unlock().
  70 *
  71 * - Each object also has an rwsem.  This rwsem must be held for
  72 *   reading while an operation that uses the object is performed.
  73 *   For example, while registering an MR, the associated PD's
  74 *   uobject.mutex must be held for reading.  The rwsem must be held
  75 *   for writing while initializing or destroying an object.
  76 *
  77 * - In addition, each object has a "live" flag.  If this flag is not
  78 *   set, then lookups of the object will fail even if it is found in
  79 *   the idr.  This handles a reader that blocks and does not acquire
  80 *   the rwsem until after the object is destroyed.  The destroy
  81 *   operation will set the live flag to 0 and then drop the rwsem;
  82 *   this will allow the reader to acquire the rwsem, see that the
  83 *   live flag is 0, and then drop the rwsem and its reference to
  84 *   object.  The underlying storage will not be freed until the last
  85 *   reference to the object is dropped.
  86 */
  87
  88static void init_uobj(struct ib_uobject *uobj, u64 user_handle,
  89		      struct ib_ucontext *context, struct uverbs_lock_class *c)
  90{
  91	uobj->user_handle = user_handle;
  92	uobj->context     = context;
  93	kref_init(&uobj->ref);
  94	init_rwsem(&uobj->mutex);
  95	lockdep_set_class_and_name(&uobj->mutex, &c->key, c->name);
  96	uobj->live        = 0;
  97}
  98
  99static void release_uobj(struct kref *kref)
 100{
 101	kfree_rcu(container_of(kref, struct ib_uobject, ref), rcu);
 102}
 103
 104static void put_uobj(struct ib_uobject *uobj)
 105{
 106	kref_put(&uobj->ref, release_uobj);
 107}
 108
 109static void put_uobj_read(struct ib_uobject *uobj)
 110{
 111	up_read(&uobj->mutex);
 112	put_uobj(uobj);
 113}
 114
 115static void put_uobj_write(struct ib_uobject *uobj)
 116{
 117	up_write(&uobj->mutex);
 118	put_uobj(uobj);
 119}
 120
 121static int idr_add_uobj(struct idr *idr, struct ib_uobject *uobj)
 122{
 123	int ret;
 124
 125	idr_preload(GFP_KERNEL);
 
 
 
 126	spin_lock(&ib_uverbs_idr_lock);
 
 
 127
 128	ret = idr_alloc(idr, uobj, 0, 0, GFP_NOWAIT);
 129	if (ret >= 0)
 130		uobj->id = ret;
 131
 132	spin_unlock(&ib_uverbs_idr_lock);
 133	idr_preload_end();
 134
 135	return ret < 0 ? ret : 0;
 136}
 137
 138void idr_remove_uobj(struct idr *idr, struct ib_uobject *uobj)
 139{
 140	spin_lock(&ib_uverbs_idr_lock);
 141	idr_remove(idr, uobj->id);
 142	spin_unlock(&ib_uverbs_idr_lock);
 143}
 144
 145static struct ib_uobject *__idr_get_uobj(struct idr *idr, int id,
 146					 struct ib_ucontext *context)
 147{
 148	struct ib_uobject *uobj;
 149
 150	rcu_read_lock();
 151	uobj = idr_find(idr, id);
 152	if (uobj) {
 153		if (uobj->context == context)
 154			kref_get(&uobj->ref);
 155		else
 156			uobj = NULL;
 157	}
 158	rcu_read_unlock();
 159
 160	return uobj;
 161}
 162
 163static struct ib_uobject *idr_read_uobj(struct idr *idr, int id,
 164					struct ib_ucontext *context, int nested)
 165{
 166	struct ib_uobject *uobj;
 167
 168	uobj = __idr_get_uobj(idr, id, context);
 169	if (!uobj)
 170		return NULL;
 171
 172	if (nested)
 173		down_read_nested(&uobj->mutex, SINGLE_DEPTH_NESTING);
 174	else
 175		down_read(&uobj->mutex);
 176	if (!uobj->live) {
 177		put_uobj_read(uobj);
 178		return NULL;
 179	}
 180
 181	return uobj;
 182}
 183
 184static struct ib_uobject *idr_write_uobj(struct idr *idr, int id,
 185					 struct ib_ucontext *context)
 186{
 187	struct ib_uobject *uobj;
 188
 189	uobj = __idr_get_uobj(idr, id, context);
 190	if (!uobj)
 191		return NULL;
 192
 193	down_write(&uobj->mutex);
 194	if (!uobj->live) {
 195		put_uobj_write(uobj);
 196		return NULL;
 197	}
 198
 199	return uobj;
 200}
 201
 202static void *idr_read_obj(struct idr *idr, int id, struct ib_ucontext *context,
 203			  int nested)
 204{
 205	struct ib_uobject *uobj;
 206
 207	uobj = idr_read_uobj(idr, id, context, nested);
 208	return uobj ? uobj->object : NULL;
 209}
 210
 211static struct ib_pd *idr_read_pd(int pd_handle, struct ib_ucontext *context)
 212{
 213	return idr_read_obj(&ib_uverbs_pd_idr, pd_handle, context, 0);
 214}
 215
 216static void put_pd_read(struct ib_pd *pd)
 217{
 218	put_uobj_read(pd->uobject);
 219}
 220
 221static struct ib_cq *idr_read_cq(int cq_handle, struct ib_ucontext *context, int nested)
 222{
 223	return idr_read_obj(&ib_uverbs_cq_idr, cq_handle, context, nested);
 224}
 225
 226static void put_cq_read(struct ib_cq *cq)
 227{
 228	put_uobj_read(cq->uobject);
 229}
 230
 231static struct ib_ah *idr_read_ah(int ah_handle, struct ib_ucontext *context)
 232{
 233	return idr_read_obj(&ib_uverbs_ah_idr, ah_handle, context, 0);
 234}
 235
 236static void put_ah_read(struct ib_ah *ah)
 237{
 238	put_uobj_read(ah->uobject);
 239}
 240
 241static struct ib_qp *idr_read_qp(int qp_handle, struct ib_ucontext *context)
 242{
 243	return idr_read_obj(&ib_uverbs_qp_idr, qp_handle, context, 0);
 244}
 245
 246static struct ib_qp *idr_write_qp(int qp_handle, struct ib_ucontext *context)
 247{
 248	struct ib_uobject *uobj;
 249
 250	uobj = idr_write_uobj(&ib_uverbs_qp_idr, qp_handle, context);
 251	return uobj ? uobj->object : NULL;
 252}
 253
 254static void put_qp_read(struct ib_qp *qp)
 255{
 256	put_uobj_read(qp->uobject);
 257}
 258
 259static void put_qp_write(struct ib_qp *qp)
 260{
 261	put_uobj_write(qp->uobject);
 262}
 263
 264static struct ib_srq *idr_read_srq(int srq_handle, struct ib_ucontext *context)
 265{
 266	return idr_read_obj(&ib_uverbs_srq_idr, srq_handle, context, 0);
 267}
 268
 269static void put_srq_read(struct ib_srq *srq)
 270{
 271	put_uobj_read(srq->uobject);
 272}
 273
 274static struct ib_xrcd *idr_read_xrcd(int xrcd_handle, struct ib_ucontext *context,
 275				     struct ib_uobject **uobj)
 276{
 277	*uobj = idr_read_uobj(&ib_uverbs_xrcd_idr, xrcd_handle, context, 0);
 278	return *uobj ? (*uobj)->object : NULL;
 279}
 280
 281static void put_xrcd_read(struct ib_uobject *uobj)
 282{
 283	put_uobj_read(uobj);
 284}
 285
 286ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file,
 287			      struct ib_device *ib_dev,
 288			      const char __user *buf,
 289			      int in_len, int out_len)
 290{
 291	struct ib_uverbs_get_context      cmd;
 292	struct ib_uverbs_get_context_resp resp;
 293	struct ib_udata                   udata;
 
 294	struct ib_ucontext		 *ucontext;
 295	struct file			 *filp;
 296	int ret;
 297
 298	if (out_len < sizeof resp)
 299		return -ENOSPC;
 300
 301	if (copy_from_user(&cmd, buf, sizeof cmd))
 302		return -EFAULT;
 303
 304	mutex_lock(&file->mutex);
 305
 306	if (file->ucontext) {
 307		ret = -EINVAL;
 308		goto err;
 309	}
 310
 311	INIT_UDATA(&udata, buf + sizeof cmd,
 312		   (unsigned long) cmd.response + sizeof resp,
 313		   in_len - sizeof cmd, out_len - sizeof resp);
 314
 315	ucontext = ib_dev->alloc_ucontext(ib_dev, &udata);
 316	if (IS_ERR(ucontext)) {
 317		ret = PTR_ERR(ucontext);
 318		goto err;
 319	}
 320
 321	ucontext->device = ib_dev;
 322	INIT_LIST_HEAD(&ucontext->pd_list);
 323	INIT_LIST_HEAD(&ucontext->mr_list);
 324	INIT_LIST_HEAD(&ucontext->mw_list);
 325	INIT_LIST_HEAD(&ucontext->cq_list);
 326	INIT_LIST_HEAD(&ucontext->qp_list);
 327	INIT_LIST_HEAD(&ucontext->srq_list);
 328	INIT_LIST_HEAD(&ucontext->ah_list);
 329	INIT_LIST_HEAD(&ucontext->xrcd_list);
 330	INIT_LIST_HEAD(&ucontext->rule_list);
 331	rcu_read_lock();
 332	ucontext->tgid = get_task_pid(current->group_leader, PIDTYPE_PID);
 333	rcu_read_unlock();
 334	ucontext->closing = 0;
 335
 336#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
 337	ucontext->umem_tree = RB_ROOT;
 338	init_rwsem(&ucontext->umem_rwsem);
 339	ucontext->odp_mrs_count = 0;
 340	INIT_LIST_HEAD(&ucontext->no_private_counters);
 341
 342	if (!(ib_dev->attrs.device_cap_flags & IB_DEVICE_ON_DEMAND_PAGING))
 343		ucontext->invalidate_range = NULL;
 344
 345#endif
 346
 347	resp.num_comp_vectors = file->device->num_comp_vectors;
 348
 349	ret = get_unused_fd_flags(O_CLOEXEC);
 350	if (ret < 0)
 351		goto err_free;
 352	resp.async_fd = ret;
 353
 354	filp = ib_uverbs_alloc_event_file(file, ib_dev, 1);
 355	if (IS_ERR(filp)) {
 356		ret = PTR_ERR(filp);
 357		goto err_fd;
 358	}
 359
 360	if (copy_to_user((void __user *) (unsigned long) cmd.response,
 361			 &resp, sizeof resp)) {
 362		ret = -EFAULT;
 363		goto err_file;
 364	}
 365
 
 
 
 
 
 
 
 
 
 
 366	file->ucontext = ucontext;
 367
 368	fd_install(resp.async_fd, filp);
 369
 370	mutex_unlock(&file->mutex);
 371
 372	return in_len;
 373
 374err_file:
 375	ib_uverbs_free_async_event_file(file);
 376	fput(filp);
 377
 378err_fd:
 379	put_unused_fd(resp.async_fd);
 380
 381err_free:
 382	put_pid(ucontext->tgid);
 383	ib_dev->dealloc_ucontext(ucontext);
 384
 385err:
 386	mutex_unlock(&file->mutex);
 387	return ret;
 388}
 389
 390static void copy_query_dev_fields(struct ib_uverbs_file *file,
 391				  struct ib_device *ib_dev,
 392				  struct ib_uverbs_query_device_resp *resp,
 393				  struct ib_device_attr *attr)
 394{
 395	resp->fw_ver		= attr->fw_ver;
 396	resp->node_guid		= ib_dev->node_guid;
 397	resp->sys_image_guid	= attr->sys_image_guid;
 398	resp->max_mr_size	= attr->max_mr_size;
 399	resp->page_size_cap	= attr->page_size_cap;
 400	resp->vendor_id		= attr->vendor_id;
 401	resp->vendor_part_id	= attr->vendor_part_id;
 402	resp->hw_ver		= attr->hw_ver;
 403	resp->max_qp		= attr->max_qp;
 404	resp->max_qp_wr		= attr->max_qp_wr;
 405	resp->device_cap_flags	= lower_32_bits(attr->device_cap_flags);
 406	resp->max_sge		= attr->max_sge;
 407	resp->max_sge_rd	= attr->max_sge_rd;
 408	resp->max_cq		= attr->max_cq;
 409	resp->max_cqe		= attr->max_cqe;
 410	resp->max_mr		= attr->max_mr;
 411	resp->max_pd		= attr->max_pd;
 412	resp->max_qp_rd_atom	= attr->max_qp_rd_atom;
 413	resp->max_ee_rd_atom	= attr->max_ee_rd_atom;
 414	resp->max_res_rd_atom	= attr->max_res_rd_atom;
 415	resp->max_qp_init_rd_atom	= attr->max_qp_init_rd_atom;
 416	resp->max_ee_init_rd_atom	= attr->max_ee_init_rd_atom;
 417	resp->atomic_cap		= attr->atomic_cap;
 418	resp->max_ee			= attr->max_ee;
 419	resp->max_rdd			= attr->max_rdd;
 420	resp->max_mw			= attr->max_mw;
 421	resp->max_raw_ipv6_qp		= attr->max_raw_ipv6_qp;
 422	resp->max_raw_ethy_qp		= attr->max_raw_ethy_qp;
 423	resp->max_mcast_grp		= attr->max_mcast_grp;
 424	resp->max_mcast_qp_attach	= attr->max_mcast_qp_attach;
 425	resp->max_total_mcast_qp_attach	= attr->max_total_mcast_qp_attach;
 426	resp->max_ah			= attr->max_ah;
 427	resp->max_fmr			= attr->max_fmr;
 428	resp->max_map_per_fmr		= attr->max_map_per_fmr;
 429	resp->max_srq			= attr->max_srq;
 430	resp->max_srq_wr		= attr->max_srq_wr;
 431	resp->max_srq_sge		= attr->max_srq_sge;
 432	resp->max_pkeys			= attr->max_pkeys;
 433	resp->local_ca_ack_delay	= attr->local_ca_ack_delay;
 434	resp->phys_port_cnt		= ib_dev->phys_port_cnt;
 435}
 436
 437ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file,
 438			       struct ib_device *ib_dev,
 439			       const char __user *buf,
 440			       int in_len, int out_len)
 441{
 442	struct ib_uverbs_query_device      cmd;
 443	struct ib_uverbs_query_device_resp resp;
 
 
 444
 445	if (out_len < sizeof resp)
 446		return -ENOSPC;
 447
 448	if (copy_from_user(&cmd, buf, sizeof cmd))
 449		return -EFAULT;
 450
 
 
 
 
 451	memset(&resp, 0, sizeof resp);
 452	copy_query_dev_fields(file, ib_dev, &resp, &ib_dev->attrs);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 453
 454	if (copy_to_user((void __user *) (unsigned long) cmd.response,
 455			 &resp, sizeof resp))
 456		return -EFAULT;
 457
 458	return in_len;
 459}
 460
 461ssize_t ib_uverbs_query_port(struct ib_uverbs_file *file,
 462			     struct ib_device *ib_dev,
 463			     const char __user *buf,
 464			     int in_len, int out_len)
 465{
 466	struct ib_uverbs_query_port      cmd;
 467	struct ib_uverbs_query_port_resp resp;
 468	struct ib_port_attr              attr;
 469	int                              ret;
 470
 471	if (out_len < sizeof resp)
 472		return -ENOSPC;
 473
 474	if (copy_from_user(&cmd, buf, sizeof cmd))
 475		return -EFAULT;
 476
 477	ret = ib_query_port(ib_dev, cmd.port_num, &attr);
 478	if (ret)
 479		return ret;
 480
 481	memset(&resp, 0, sizeof resp);
 482
 483	resp.state 	     = attr.state;
 484	resp.max_mtu 	     = attr.max_mtu;
 485	resp.active_mtu      = attr.active_mtu;
 486	resp.gid_tbl_len     = attr.gid_tbl_len;
 487	resp.port_cap_flags  = attr.port_cap_flags;
 488	resp.max_msg_sz      = attr.max_msg_sz;
 489	resp.bad_pkey_cntr   = attr.bad_pkey_cntr;
 490	resp.qkey_viol_cntr  = attr.qkey_viol_cntr;
 491	resp.pkey_tbl_len    = attr.pkey_tbl_len;
 492	resp.lid 	     = attr.lid;
 493	resp.sm_lid 	     = attr.sm_lid;
 494	resp.lmc 	     = attr.lmc;
 495	resp.max_vl_num      = attr.max_vl_num;
 496	resp.sm_sl 	     = attr.sm_sl;
 497	resp.subnet_timeout  = attr.subnet_timeout;
 498	resp.init_type_reply = attr.init_type_reply;
 499	resp.active_width    = attr.active_width;
 500	resp.active_speed    = attr.active_speed;
 501	resp.phys_state      = attr.phys_state;
 502	resp.link_layer      = rdma_port_get_link_layer(ib_dev,
 503							cmd.port_num);
 504
 505	if (copy_to_user((void __user *) (unsigned long) cmd.response,
 506			 &resp, sizeof resp))
 507		return -EFAULT;
 508
 509	return in_len;
 510}
 511
 512ssize_t ib_uverbs_alloc_pd(struct ib_uverbs_file *file,
 513			   struct ib_device *ib_dev,
 514			   const char __user *buf,
 515			   int in_len, int out_len)
 516{
 517	struct ib_uverbs_alloc_pd      cmd;
 518	struct ib_uverbs_alloc_pd_resp resp;
 519	struct ib_udata                udata;
 520	struct ib_uobject             *uobj;
 521	struct ib_pd                  *pd;
 522	int                            ret;
 523
 524	if (out_len < sizeof resp)
 525		return -ENOSPC;
 526
 527	if (copy_from_user(&cmd, buf, sizeof cmd))
 528		return -EFAULT;
 529
 530	INIT_UDATA(&udata, buf + sizeof cmd,
 531		   (unsigned long) cmd.response + sizeof resp,
 532		   in_len - sizeof cmd, out_len - sizeof resp);
 533
 534	uobj = kmalloc(sizeof *uobj, GFP_KERNEL);
 535	if (!uobj)
 536		return -ENOMEM;
 537
 538	init_uobj(uobj, 0, file->ucontext, &pd_lock_class);
 539	down_write(&uobj->mutex);
 540
 541	pd = ib_dev->alloc_pd(ib_dev, file->ucontext, &udata);
 
 542	if (IS_ERR(pd)) {
 543		ret = PTR_ERR(pd);
 544		goto err;
 545	}
 546
 547	pd->device  = ib_dev;
 548	pd->uobject = uobj;
 549	pd->local_mr = NULL;
 550	atomic_set(&pd->usecnt, 0);
 551
 552	uobj->object = pd;
 553	ret = idr_add_uobj(&ib_uverbs_pd_idr, uobj);
 554	if (ret)
 555		goto err_idr;
 556
 557	memset(&resp, 0, sizeof resp);
 558	resp.pd_handle = uobj->id;
 559
 560	if (copy_to_user((void __user *) (unsigned long) cmd.response,
 561			 &resp, sizeof resp)) {
 562		ret = -EFAULT;
 563		goto err_copy;
 564	}
 565
 566	mutex_lock(&file->mutex);
 567	list_add_tail(&uobj->list, &file->ucontext->pd_list);
 568	mutex_unlock(&file->mutex);
 569
 570	uobj->live = 1;
 571
 572	up_write(&uobj->mutex);
 573
 574	return in_len;
 575
 576err_copy:
 577	idr_remove_uobj(&ib_uverbs_pd_idr, uobj);
 578
 579err_idr:
 580	ib_dealloc_pd(pd);
 581
 582err:
 583	put_uobj_write(uobj);
 584	return ret;
 585}
 586
 587ssize_t ib_uverbs_dealloc_pd(struct ib_uverbs_file *file,
 588			     struct ib_device *ib_dev,
 589			     const char __user *buf,
 590			     int in_len, int out_len)
 591{
 592	struct ib_uverbs_dealloc_pd cmd;
 593	struct ib_uobject          *uobj;
 594	struct ib_pd		   *pd;
 595	int                         ret;
 596
 597	if (copy_from_user(&cmd, buf, sizeof cmd))
 598		return -EFAULT;
 599
 600	uobj = idr_write_uobj(&ib_uverbs_pd_idr, cmd.pd_handle, file->ucontext);
 601	if (!uobj)
 602		return -EINVAL;
 603	pd = uobj->object;
 604
 605	if (atomic_read(&pd->usecnt)) {
 606		ret = -EBUSY;
 607		goto err_put;
 608	}
 
 609
 610	ret = pd->device->dealloc_pd(uobj->object);
 611	WARN_ONCE(ret, "Infiniband HW driver failed dealloc_pd");
 612	if (ret)
 613		goto err_put;
 614
 615	uobj->live = 0;
 616	put_uobj_write(uobj);
 617
 618	idr_remove_uobj(&ib_uverbs_pd_idr, uobj);
 619
 620	mutex_lock(&file->mutex);
 621	list_del(&uobj->list);
 622	mutex_unlock(&file->mutex);
 623
 624	put_uobj(uobj);
 625
 626	return in_len;
 627
 628err_put:
 629	put_uobj_write(uobj);
 630	return ret;
 631}
 632
 633struct xrcd_table_entry {
 634	struct rb_node  node;
 635	struct ib_xrcd *xrcd;
 636	struct inode   *inode;
 637};
 638
 639static int xrcd_table_insert(struct ib_uverbs_device *dev,
 640			    struct inode *inode,
 641			    struct ib_xrcd *xrcd)
 642{
 643	struct xrcd_table_entry *entry, *scan;
 644	struct rb_node **p = &dev->xrcd_tree.rb_node;
 645	struct rb_node *parent = NULL;
 646
 647	entry = kmalloc(sizeof *entry, GFP_KERNEL);
 648	if (!entry)
 649		return -ENOMEM;
 650
 651	entry->xrcd  = xrcd;
 652	entry->inode = inode;
 653
 654	while (*p) {
 655		parent = *p;
 656		scan = rb_entry(parent, struct xrcd_table_entry, node);
 657
 658		if (inode < scan->inode) {
 659			p = &(*p)->rb_left;
 660		} else if (inode > scan->inode) {
 661			p = &(*p)->rb_right;
 662		} else {
 663			kfree(entry);
 664			return -EEXIST;
 665		}
 666	}
 667
 668	rb_link_node(&entry->node, parent, p);
 669	rb_insert_color(&entry->node, &dev->xrcd_tree);
 670	igrab(inode);
 671	return 0;
 672}
 673
 674static struct xrcd_table_entry *xrcd_table_search(struct ib_uverbs_device *dev,
 675						  struct inode *inode)
 676{
 677	struct xrcd_table_entry *entry;
 678	struct rb_node *p = dev->xrcd_tree.rb_node;
 679
 680	while (p) {
 681		entry = rb_entry(p, struct xrcd_table_entry, node);
 682
 683		if (inode < entry->inode)
 684			p = p->rb_left;
 685		else if (inode > entry->inode)
 686			p = p->rb_right;
 687		else
 688			return entry;
 689	}
 690
 691	return NULL;
 692}
 693
 694static struct ib_xrcd *find_xrcd(struct ib_uverbs_device *dev, struct inode *inode)
 695{
 696	struct xrcd_table_entry *entry;
 697
 698	entry = xrcd_table_search(dev, inode);
 699	if (!entry)
 700		return NULL;
 701
 702	return entry->xrcd;
 703}
 704
 705static void xrcd_table_delete(struct ib_uverbs_device *dev,
 706			      struct inode *inode)
 707{
 708	struct xrcd_table_entry *entry;
 709
 710	entry = xrcd_table_search(dev, inode);
 711	if (entry) {
 712		iput(inode);
 713		rb_erase(&entry->node, &dev->xrcd_tree);
 714		kfree(entry);
 715	}
 716}
 717
 718ssize_t ib_uverbs_open_xrcd(struct ib_uverbs_file *file,
 719			    struct ib_device *ib_dev,
 720			    const char __user *buf, int in_len,
 721			    int out_len)
 722{
 723	struct ib_uverbs_open_xrcd	cmd;
 724	struct ib_uverbs_open_xrcd_resp	resp;
 725	struct ib_udata			udata;
 726	struct ib_uxrcd_object         *obj;
 727	struct ib_xrcd                 *xrcd = NULL;
 728	struct fd			f = {NULL, 0};
 729	struct inode                   *inode = NULL;
 730	int				ret = 0;
 731	int				new_xrcd = 0;
 732
 733	if (out_len < sizeof resp)
 734		return -ENOSPC;
 735
 736	if (copy_from_user(&cmd, buf, sizeof cmd))
 737		return -EFAULT;
 738
 739	INIT_UDATA(&udata, buf + sizeof cmd,
 740		   (unsigned long) cmd.response + sizeof resp,
 741		   in_len - sizeof cmd, out_len - sizeof  resp);
 742
 743	mutex_lock(&file->device->xrcd_tree_mutex);
 744
 745	if (cmd.fd != -1) {
 746		/* search for file descriptor */
 747		f = fdget(cmd.fd);
 748		if (!f.file) {
 749			ret = -EBADF;
 750			goto err_tree_mutex_unlock;
 751		}
 752
 753		inode = file_inode(f.file);
 754		xrcd = find_xrcd(file->device, inode);
 755		if (!xrcd && !(cmd.oflags & O_CREAT)) {
 756			/* no file descriptor. Need CREATE flag */
 757			ret = -EAGAIN;
 758			goto err_tree_mutex_unlock;
 759		}
 760
 761		if (xrcd && cmd.oflags & O_EXCL) {
 762			ret = -EINVAL;
 763			goto err_tree_mutex_unlock;
 764		}
 765	}
 766
 767	obj = kmalloc(sizeof *obj, GFP_KERNEL);
 768	if (!obj) {
 769		ret = -ENOMEM;
 770		goto err_tree_mutex_unlock;
 771	}
 772
 773	init_uobj(&obj->uobject, 0, file->ucontext, &xrcd_lock_class);
 774
 775	down_write(&obj->uobject.mutex);
 776
 777	if (!xrcd) {
 778		xrcd = ib_dev->alloc_xrcd(ib_dev, file->ucontext, &udata);
 779		if (IS_ERR(xrcd)) {
 780			ret = PTR_ERR(xrcd);
 781			goto err;
 782		}
 783
 784		xrcd->inode   = inode;
 785		xrcd->device  = ib_dev;
 786		atomic_set(&xrcd->usecnt, 0);
 787		mutex_init(&xrcd->tgt_qp_mutex);
 788		INIT_LIST_HEAD(&xrcd->tgt_qp_list);
 789		new_xrcd = 1;
 790	}
 791
 792	atomic_set(&obj->refcnt, 0);
 793	obj->uobject.object = xrcd;
 794	ret = idr_add_uobj(&ib_uverbs_xrcd_idr, &obj->uobject);
 795	if (ret)
 796		goto err_idr;
 797
 798	memset(&resp, 0, sizeof resp);
 799	resp.xrcd_handle = obj->uobject.id;
 800
 801	if (inode) {
 802		if (new_xrcd) {
 803			/* create new inode/xrcd table entry */
 804			ret = xrcd_table_insert(file->device, inode, xrcd);
 805			if (ret)
 806				goto err_insert_xrcd;
 807		}
 808		atomic_inc(&xrcd->usecnt);
 809	}
 810
 811	if (copy_to_user((void __user *) (unsigned long) cmd.response,
 812			 &resp, sizeof resp)) {
 813		ret = -EFAULT;
 814		goto err_copy;
 815	}
 816
 817	if (f.file)
 818		fdput(f);
 819
 820	mutex_lock(&file->mutex);
 821	list_add_tail(&obj->uobject.list, &file->ucontext->xrcd_list);
 822	mutex_unlock(&file->mutex);
 823
 824	obj->uobject.live = 1;
 825	up_write(&obj->uobject.mutex);
 826
 827	mutex_unlock(&file->device->xrcd_tree_mutex);
 828	return in_len;
 829
 830err_copy:
 831	if (inode) {
 832		if (new_xrcd)
 833			xrcd_table_delete(file->device, inode);
 834		atomic_dec(&xrcd->usecnt);
 835	}
 836
 837err_insert_xrcd:
 838	idr_remove_uobj(&ib_uverbs_xrcd_idr, &obj->uobject);
 839
 840err_idr:
 841	ib_dealloc_xrcd(xrcd);
 842
 843err:
 844	put_uobj_write(&obj->uobject);
 845
 846err_tree_mutex_unlock:
 847	if (f.file)
 848		fdput(f);
 849
 850	mutex_unlock(&file->device->xrcd_tree_mutex);
 851
 852	return ret;
 853}
 854
 855ssize_t ib_uverbs_close_xrcd(struct ib_uverbs_file *file,
 856			     struct ib_device *ib_dev,
 857			     const char __user *buf, int in_len,
 858			     int out_len)
 859{
 860	struct ib_uverbs_close_xrcd cmd;
 861	struct ib_uobject           *uobj;
 862	struct ib_xrcd              *xrcd = NULL;
 863	struct inode                *inode = NULL;
 864	struct ib_uxrcd_object      *obj;
 865	int                         live;
 866	int                         ret = 0;
 867
 868	if (copy_from_user(&cmd, buf, sizeof cmd))
 869		return -EFAULT;
 870
 871	mutex_lock(&file->device->xrcd_tree_mutex);
 872	uobj = idr_write_uobj(&ib_uverbs_xrcd_idr, cmd.xrcd_handle, file->ucontext);
 873	if (!uobj) {
 874		ret = -EINVAL;
 875		goto out;
 876	}
 877
 878	xrcd  = uobj->object;
 879	inode = xrcd->inode;
 880	obj   = container_of(uobj, struct ib_uxrcd_object, uobject);
 881	if (atomic_read(&obj->refcnt)) {
 882		put_uobj_write(uobj);
 883		ret = -EBUSY;
 884		goto out;
 885	}
 886
 887	if (!inode || atomic_dec_and_test(&xrcd->usecnt)) {
 888		ret = ib_dealloc_xrcd(uobj->object);
 889		if (!ret)
 890			uobj->live = 0;
 891	}
 892
 893	live = uobj->live;
 894	if (inode && ret)
 895		atomic_inc(&xrcd->usecnt);
 896
 897	put_uobj_write(uobj);
 898
 899	if (ret)
 900		goto out;
 901
 902	if (inode && !live)
 903		xrcd_table_delete(file->device, inode);
 904
 905	idr_remove_uobj(&ib_uverbs_xrcd_idr, uobj);
 906	mutex_lock(&file->mutex);
 907	list_del(&uobj->list);
 908	mutex_unlock(&file->mutex);
 909
 910	put_uobj(uobj);
 911	ret = in_len;
 912
 913out:
 914	mutex_unlock(&file->device->xrcd_tree_mutex);
 915	return ret;
 916}
 917
 918void ib_uverbs_dealloc_xrcd(struct ib_uverbs_device *dev,
 919			    struct ib_xrcd *xrcd)
 920{
 921	struct inode *inode;
 922
 923	inode = xrcd->inode;
 924	if (inode && !atomic_dec_and_test(&xrcd->usecnt))
 925		return;
 926
 927	ib_dealloc_xrcd(xrcd);
 928
 929	if (inode)
 930		xrcd_table_delete(dev, inode);
 931}
 932
 933ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file,
 934			 struct ib_device *ib_dev,
 935			 const char __user *buf, int in_len,
 936			 int out_len)
 937{
 938	struct ib_uverbs_reg_mr      cmd;
 939	struct ib_uverbs_reg_mr_resp resp;
 940	struct ib_udata              udata;
 941	struct ib_uobject           *uobj;
 942	struct ib_pd                *pd;
 943	struct ib_mr                *mr;
 944	int                          ret;
 945
 946	if (out_len < sizeof resp)
 947		return -ENOSPC;
 948
 949	if (copy_from_user(&cmd, buf, sizeof cmd))
 950		return -EFAULT;
 951
 952	INIT_UDATA(&udata, buf + sizeof cmd,
 953		   (unsigned long) cmd.response + sizeof resp,
 954		   in_len - sizeof cmd, out_len - sizeof resp);
 955
 956	if ((cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK))
 957		return -EINVAL;
 958
 959	ret = ib_check_mr_access(cmd.access_flags);
 960	if (ret)
 961		return ret;
 
 
 
 
 962
 963	uobj = kmalloc(sizeof *uobj, GFP_KERNEL);
 964	if (!uobj)
 965		return -ENOMEM;
 966
 967	init_uobj(uobj, 0, file->ucontext, &mr_lock_class);
 968	down_write(&uobj->mutex);
 969
 970	pd = idr_read_pd(cmd.pd_handle, file->ucontext);
 971	if (!pd) {
 972		ret = -EINVAL;
 973		goto err_free;
 974	}
 975
 976	if (cmd.access_flags & IB_ACCESS_ON_DEMAND) {
 977		if (!(pd->device->attrs.device_cap_flags &
 978		      IB_DEVICE_ON_DEMAND_PAGING)) {
 979			pr_debug("ODP support not available\n");
 980			ret = -EINVAL;
 981			goto err_put;
 982		}
 983	}
 984
 985	mr = pd->device->reg_user_mr(pd, cmd.start, cmd.length, cmd.hca_va,
 986				     cmd.access_flags, &udata);
 987	if (IS_ERR(mr)) {
 988		ret = PTR_ERR(mr);
 989		goto err_put;
 990	}
 991
 992	mr->device  = pd->device;
 993	mr->pd      = pd;
 994	mr->uobject = uobj;
 995	atomic_inc(&pd->usecnt);
 
 996
 997	uobj->object = mr;
 998	ret = idr_add_uobj(&ib_uverbs_mr_idr, uobj);
 999	if (ret)
1000		goto err_unreg;
1001
1002	memset(&resp, 0, sizeof resp);
1003	resp.lkey      = mr->lkey;
1004	resp.rkey      = mr->rkey;
1005	resp.mr_handle = uobj->id;
1006
1007	if (copy_to_user((void __user *) (unsigned long) cmd.response,
1008			 &resp, sizeof resp)) {
1009		ret = -EFAULT;
1010		goto err_copy;
1011	}
1012
1013	put_pd_read(pd);
1014
1015	mutex_lock(&file->mutex);
1016	list_add_tail(&uobj->list, &file->ucontext->mr_list);
1017	mutex_unlock(&file->mutex);
1018
1019	uobj->live = 1;
1020
1021	up_write(&uobj->mutex);
1022
1023	return in_len;
1024
1025err_copy:
1026	idr_remove_uobj(&ib_uverbs_mr_idr, uobj);
1027
1028err_unreg:
1029	ib_dereg_mr(mr);
1030
1031err_put:
1032	put_pd_read(pd);
1033
1034err_free:
1035	put_uobj_write(uobj);
1036	return ret;
1037}
1038
1039ssize_t ib_uverbs_rereg_mr(struct ib_uverbs_file *file,
1040			   struct ib_device *ib_dev,
1041			   const char __user *buf, int in_len,
1042			   int out_len)
1043{
1044	struct ib_uverbs_rereg_mr      cmd;
1045	struct ib_uverbs_rereg_mr_resp resp;
1046	struct ib_udata              udata;
1047	struct ib_pd                *pd = NULL;
1048	struct ib_mr                *mr;
1049	struct ib_pd		    *old_pd;
1050	int                          ret;
1051	struct ib_uobject	    *uobj;
1052
1053	if (out_len < sizeof(resp))
1054		return -ENOSPC;
1055
1056	if (copy_from_user(&cmd, buf, sizeof(cmd)))
1057		return -EFAULT;
1058
1059	INIT_UDATA(&udata, buf + sizeof(cmd),
1060		   (unsigned long) cmd.response + sizeof(resp),
1061		   in_len - sizeof(cmd), out_len - sizeof(resp));
1062
1063	if (cmd.flags & ~IB_MR_REREG_SUPPORTED || !cmd.flags)
1064		return -EINVAL;
1065
1066	if ((cmd.flags & IB_MR_REREG_TRANS) &&
1067	    (!cmd.start || !cmd.hca_va || 0 >= cmd.length ||
1068	     (cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK)))
1069			return -EINVAL;
1070
1071	uobj = idr_write_uobj(&ib_uverbs_mr_idr, cmd.mr_handle,
1072			      file->ucontext);
1073
1074	if (!uobj)
1075		return -EINVAL;
1076
1077	mr = uobj->object;
1078
1079	if (cmd.flags & IB_MR_REREG_ACCESS) {
1080		ret = ib_check_mr_access(cmd.access_flags);
1081		if (ret)
1082			goto put_uobjs;
1083	}
1084
1085	if (cmd.flags & IB_MR_REREG_PD) {
1086		pd = idr_read_pd(cmd.pd_handle, file->ucontext);
1087		if (!pd) {
1088			ret = -EINVAL;
1089			goto put_uobjs;
1090		}
1091	}
1092
1093	old_pd = mr->pd;
1094	ret = mr->device->rereg_user_mr(mr, cmd.flags, cmd.start,
1095					cmd.length, cmd.hca_va,
1096					cmd.access_flags, pd, &udata);
1097	if (!ret) {
1098		if (cmd.flags & IB_MR_REREG_PD) {
1099			atomic_inc(&pd->usecnt);
1100			mr->pd = pd;
1101			atomic_dec(&old_pd->usecnt);
1102		}
1103	} else {
1104		goto put_uobj_pd;
1105	}
1106
1107	memset(&resp, 0, sizeof(resp));
1108	resp.lkey      = mr->lkey;
1109	resp.rkey      = mr->rkey;
1110
1111	if (copy_to_user((void __user *)(unsigned long)cmd.response,
1112			 &resp, sizeof(resp)))
1113		ret = -EFAULT;
1114	else
1115		ret = in_len;
1116
1117put_uobj_pd:
1118	if (cmd.flags & IB_MR_REREG_PD)
1119		put_pd_read(pd);
1120
1121put_uobjs:
1122
1123	put_uobj_write(mr->uobject);
1124
1125	return ret;
1126}
1127
1128ssize_t ib_uverbs_dereg_mr(struct ib_uverbs_file *file,
1129			   struct ib_device *ib_dev,
1130			   const char __user *buf, int in_len,
1131			   int out_len)
1132{
1133	struct ib_uverbs_dereg_mr cmd;
1134	struct ib_mr             *mr;
1135	struct ib_uobject	 *uobj;
1136	int                       ret = -EINVAL;
1137
1138	if (copy_from_user(&cmd, buf, sizeof cmd))
1139		return -EFAULT;
1140
1141	uobj = idr_write_uobj(&ib_uverbs_mr_idr, cmd.mr_handle, file->ucontext);
1142	if (!uobj)
1143		return -EINVAL;
1144
1145	mr = uobj->object;
1146
1147	ret = ib_dereg_mr(mr);
1148	if (!ret)
1149		uobj->live = 0;
1150
1151	put_uobj_write(uobj);
1152
1153	if (ret)
1154		return ret;
1155
1156	idr_remove_uobj(&ib_uverbs_mr_idr, uobj);
1157
1158	mutex_lock(&file->mutex);
1159	list_del(&uobj->list);
1160	mutex_unlock(&file->mutex);
1161
1162	put_uobj(uobj);
1163
1164	return in_len;
1165}
1166
1167ssize_t ib_uverbs_alloc_mw(struct ib_uverbs_file *file,
1168			   struct ib_device *ib_dev,
1169			   const char __user *buf, int in_len,
1170			   int out_len)
1171{
1172	struct ib_uverbs_alloc_mw      cmd;
1173	struct ib_uverbs_alloc_mw_resp resp;
1174	struct ib_uobject             *uobj;
1175	struct ib_pd                  *pd;
1176	struct ib_mw                  *mw;
1177	struct ib_udata		       udata;
1178	int                            ret;
1179
1180	if (out_len < sizeof(resp))
1181		return -ENOSPC;
1182
1183	if (copy_from_user(&cmd, buf, sizeof(cmd)))
1184		return -EFAULT;
1185
1186	uobj = kmalloc(sizeof(*uobj), GFP_KERNEL);
1187	if (!uobj)
1188		return -ENOMEM;
1189
1190	init_uobj(uobj, 0, file->ucontext, &mw_lock_class);
1191	down_write(&uobj->mutex);
1192
1193	pd = idr_read_pd(cmd.pd_handle, file->ucontext);
1194	if (!pd) {
1195		ret = -EINVAL;
1196		goto err_free;
1197	}
1198
1199	INIT_UDATA(&udata, buf + sizeof(cmd),
1200		   (unsigned long)cmd.response + sizeof(resp),
1201		   in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
1202		   out_len - sizeof(resp));
1203
1204	mw = pd->device->alloc_mw(pd, cmd.mw_type, &udata);
1205	if (IS_ERR(mw)) {
1206		ret = PTR_ERR(mw);
1207		goto err_put;
1208	}
1209
1210	mw->device  = pd->device;
1211	mw->pd      = pd;
1212	mw->uobject = uobj;
1213	atomic_inc(&pd->usecnt);
1214
1215	uobj->object = mw;
1216	ret = idr_add_uobj(&ib_uverbs_mw_idr, uobj);
1217	if (ret)
1218		goto err_unalloc;
1219
1220	memset(&resp, 0, sizeof(resp));
1221	resp.rkey      = mw->rkey;
1222	resp.mw_handle = uobj->id;
1223
1224	if (copy_to_user((void __user *)(unsigned long)cmd.response,
1225			 &resp, sizeof(resp))) {
1226		ret = -EFAULT;
1227		goto err_copy;
1228	}
1229
1230	put_pd_read(pd);
1231
1232	mutex_lock(&file->mutex);
1233	list_add_tail(&uobj->list, &file->ucontext->mw_list);
1234	mutex_unlock(&file->mutex);
1235
1236	uobj->live = 1;
1237
1238	up_write(&uobj->mutex);
1239
1240	return in_len;
1241
1242err_copy:
1243	idr_remove_uobj(&ib_uverbs_mw_idr, uobj);
1244
1245err_unalloc:
1246	uverbs_dealloc_mw(mw);
1247
1248err_put:
1249	put_pd_read(pd);
1250
1251err_free:
1252	put_uobj_write(uobj);
1253	return ret;
1254}
1255
1256ssize_t ib_uverbs_dealloc_mw(struct ib_uverbs_file *file,
1257			     struct ib_device *ib_dev,
1258			     const char __user *buf, int in_len,
1259			     int out_len)
1260{
1261	struct ib_uverbs_dealloc_mw cmd;
1262	struct ib_mw               *mw;
1263	struct ib_uobject	   *uobj;
1264	int                         ret = -EINVAL;
1265
1266	if (copy_from_user(&cmd, buf, sizeof(cmd)))
1267		return -EFAULT;
1268
1269	uobj = idr_write_uobj(&ib_uverbs_mw_idr, cmd.mw_handle, file->ucontext);
1270	if (!uobj)
1271		return -EINVAL;
1272
1273	mw = uobj->object;
1274
1275	ret = uverbs_dealloc_mw(mw);
1276	if (!ret)
1277		uobj->live = 0;
1278
1279	put_uobj_write(uobj);
1280
1281	if (ret)
1282		return ret;
1283
1284	idr_remove_uobj(&ib_uverbs_mw_idr, uobj);
1285
1286	mutex_lock(&file->mutex);
1287	list_del(&uobj->list);
1288	mutex_unlock(&file->mutex);
1289
1290	put_uobj(uobj);
1291
1292	return in_len;
1293}
1294
1295ssize_t ib_uverbs_create_comp_channel(struct ib_uverbs_file *file,
1296				      struct ib_device *ib_dev,
1297				      const char __user *buf, int in_len,
1298				      int out_len)
1299{
1300	struct ib_uverbs_create_comp_channel	   cmd;
1301	struct ib_uverbs_create_comp_channel_resp  resp;
1302	struct file				  *filp;
1303	int ret;
1304
1305	if (out_len < sizeof resp)
1306		return -ENOSPC;
1307
1308	if (copy_from_user(&cmd, buf, sizeof cmd))
1309		return -EFAULT;
1310
1311	ret = get_unused_fd_flags(O_CLOEXEC);
1312	if (ret < 0)
1313		return ret;
1314	resp.fd = ret;
1315
1316	filp = ib_uverbs_alloc_event_file(file, ib_dev, 0);
1317	if (IS_ERR(filp)) {
1318		put_unused_fd(resp.fd);
1319		return PTR_ERR(filp);
1320	}
1321
1322	if (copy_to_user((void __user *) (unsigned long) cmd.response,
1323			 &resp, sizeof resp)) {
1324		put_unused_fd(resp.fd);
1325		fput(filp);
1326		return -EFAULT;
1327	}
1328
1329	fd_install(resp.fd, filp);
1330	return in_len;
1331}
1332
1333static struct ib_ucq_object *create_cq(struct ib_uverbs_file *file,
1334					struct ib_device *ib_dev,
1335				       struct ib_udata *ucore,
1336				       struct ib_udata *uhw,
1337				       struct ib_uverbs_ex_create_cq *cmd,
1338				       size_t cmd_sz,
1339				       int (*cb)(struct ib_uverbs_file *file,
1340						 struct ib_ucq_object *obj,
1341						 struct ib_uverbs_ex_create_cq_resp *resp,
1342						 struct ib_udata *udata,
1343						 void *context),
1344				       void *context)
1345{
 
 
 
1346	struct ib_ucq_object           *obj;
1347	struct ib_uverbs_event_file    *ev_file = NULL;
1348	struct ib_cq                   *cq;
1349	int                             ret;
1350	struct ib_uverbs_ex_create_cq_resp resp;
1351	struct ib_cq_init_attr attr = {};
1352
1353	if (cmd->comp_vector >= file->device->num_comp_vectors)
1354		return ERR_PTR(-EINVAL);
 
 
 
 
 
 
 
 
 
 
1355
1356	obj = kmalloc(sizeof *obj, GFP_KERNEL);
1357	if (!obj)
1358		return ERR_PTR(-ENOMEM);
1359
1360	init_uobj(&obj->uobject, cmd->user_handle, file->ucontext, &cq_lock_class);
1361	down_write(&obj->uobject.mutex);
1362
1363	if (cmd->comp_channel >= 0) {
1364		ev_file = ib_uverbs_lookup_comp_file(cmd->comp_channel);
1365		if (!ev_file) {
1366			ret = -EINVAL;
1367			goto err;
1368		}
1369	}
1370
1371	obj->uverbs_file	   = file;
1372	obj->comp_events_reported  = 0;
1373	obj->async_events_reported = 0;
1374	INIT_LIST_HEAD(&obj->comp_list);
1375	INIT_LIST_HEAD(&obj->async_list);
1376
1377	attr.cqe = cmd->cqe;
1378	attr.comp_vector = cmd->comp_vector;
1379
1380	if (cmd_sz > offsetof(typeof(*cmd), flags) + sizeof(cmd->flags))
1381		attr.flags = cmd->flags;
1382
1383	cq = ib_dev->create_cq(ib_dev, &attr,
1384					     file->ucontext, uhw);
1385	if (IS_ERR(cq)) {
1386		ret = PTR_ERR(cq);
1387		goto err_file;
1388	}
1389
1390	cq->device        = ib_dev;
1391	cq->uobject       = &obj->uobject;
1392	cq->comp_handler  = ib_uverbs_comp_handler;
1393	cq->event_handler = ib_uverbs_cq_event_handler;
1394	cq->cq_context    = ev_file;
1395	atomic_set(&cq->usecnt, 0);
1396
1397	obj->uobject.object = cq;
1398	ret = idr_add_uobj(&ib_uverbs_cq_idr, &obj->uobject);
1399	if (ret)
1400		goto err_free;
1401
1402	memset(&resp, 0, sizeof resp);
1403	resp.base.cq_handle = obj->uobject.id;
1404	resp.base.cqe       = cq->cqe;
1405
1406	resp.response_length = offsetof(typeof(resp), response_length) +
1407		sizeof(resp.response_length);
1408
1409	ret = cb(file, obj, &resp, ucore, context);
1410	if (ret)
1411		goto err_cb;
1412
1413	mutex_lock(&file->mutex);
1414	list_add_tail(&obj->uobject.list, &file->ucontext->cq_list);
1415	mutex_unlock(&file->mutex);
1416
1417	obj->uobject.live = 1;
1418
1419	up_write(&obj->uobject.mutex);
1420
1421	return obj;
1422
1423err_cb:
1424	idr_remove_uobj(&ib_uverbs_cq_idr, &obj->uobject);
1425
1426err_free:
1427	ib_destroy_cq(cq);
1428
1429err_file:
1430	if (ev_file)
1431		ib_uverbs_release_ucq(file, ev_file, obj);
1432
1433err:
1434	put_uobj_write(&obj->uobject);
1435
1436	return ERR_PTR(ret);
1437}
1438
1439static int ib_uverbs_create_cq_cb(struct ib_uverbs_file *file,
1440				  struct ib_ucq_object *obj,
1441				  struct ib_uverbs_ex_create_cq_resp *resp,
1442				  struct ib_udata *ucore, void *context)
1443{
1444	if (ib_copy_to_udata(ucore, &resp->base, sizeof(resp->base)))
1445		return -EFAULT;
1446
1447	return 0;
1448}
1449
1450ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file,
1451			    struct ib_device *ib_dev,
1452			    const char __user *buf, int in_len,
1453			    int out_len)
1454{
1455	struct ib_uverbs_create_cq      cmd;
1456	struct ib_uverbs_ex_create_cq	cmd_ex;
1457	struct ib_uverbs_create_cq_resp resp;
1458	struct ib_udata                 ucore;
1459	struct ib_udata                 uhw;
1460	struct ib_ucq_object           *obj;
1461
1462	if (out_len < sizeof(resp))
1463		return -ENOSPC;
1464
1465	if (copy_from_user(&cmd, buf, sizeof(cmd)))
1466		return -EFAULT;
1467
1468	INIT_UDATA(&ucore, buf, (unsigned long)cmd.response, sizeof(cmd), sizeof(resp));
1469
1470	INIT_UDATA(&uhw, buf + sizeof(cmd),
1471		   (unsigned long)cmd.response + sizeof(resp),
1472		   in_len - sizeof(cmd), out_len - sizeof(resp));
1473
1474	memset(&cmd_ex, 0, sizeof(cmd_ex));
1475	cmd_ex.user_handle = cmd.user_handle;
1476	cmd_ex.cqe = cmd.cqe;
1477	cmd_ex.comp_vector = cmd.comp_vector;
1478	cmd_ex.comp_channel = cmd.comp_channel;
1479
1480	obj = create_cq(file, ib_dev, &ucore, &uhw, &cmd_ex,
1481			offsetof(typeof(cmd_ex), comp_channel) +
1482			sizeof(cmd.comp_channel), ib_uverbs_create_cq_cb,
1483			NULL);
1484
1485	if (IS_ERR(obj))
1486		return PTR_ERR(obj);
1487
1488	return in_len;
1489}
1490
1491static int ib_uverbs_ex_create_cq_cb(struct ib_uverbs_file *file,
1492				     struct ib_ucq_object *obj,
1493				     struct ib_uverbs_ex_create_cq_resp *resp,
1494				     struct ib_udata *ucore, void *context)
1495{
1496	if (ib_copy_to_udata(ucore, resp, resp->response_length))
1497		return -EFAULT;
1498
1499	return 0;
1500}
1501
1502int ib_uverbs_ex_create_cq(struct ib_uverbs_file *file,
1503			 struct ib_device *ib_dev,
1504			   struct ib_udata *ucore,
1505			   struct ib_udata *uhw)
1506{
1507	struct ib_uverbs_ex_create_cq_resp resp;
1508	struct ib_uverbs_ex_create_cq  cmd;
1509	struct ib_ucq_object           *obj;
1510	int err;
1511
1512	if (ucore->inlen < sizeof(cmd))
1513		return -EINVAL;
1514
1515	err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd));
1516	if (err)
1517		return err;
1518
1519	if (cmd.comp_mask)
1520		return -EINVAL;
1521
1522	if (cmd.reserved)
1523		return -EINVAL;
1524
1525	if (ucore->outlen < (offsetof(typeof(resp), response_length) +
1526			     sizeof(resp.response_length)))
1527		return -ENOSPC;
1528
1529	obj = create_cq(file, ib_dev, ucore, uhw, &cmd,
1530			min(ucore->inlen, sizeof(cmd)),
1531			ib_uverbs_ex_create_cq_cb, NULL);
1532
1533	if (IS_ERR(obj))
1534		return PTR_ERR(obj);
1535
1536	return 0;
1537}
1538
1539ssize_t ib_uverbs_resize_cq(struct ib_uverbs_file *file,
1540			    struct ib_device *ib_dev,
1541			    const char __user *buf, int in_len,
1542			    int out_len)
1543{
1544	struct ib_uverbs_resize_cq	cmd;
1545	struct ib_uverbs_resize_cq_resp	resp;
1546	struct ib_udata                 udata;
1547	struct ib_cq			*cq;
1548	int				ret = -EINVAL;
1549
1550	if (copy_from_user(&cmd, buf, sizeof cmd))
1551		return -EFAULT;
1552
1553	INIT_UDATA(&udata, buf + sizeof cmd,
1554		   (unsigned long) cmd.response + sizeof resp,
1555		   in_len - sizeof cmd, out_len - sizeof resp);
1556
1557	cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0);
1558	if (!cq)
1559		return -EINVAL;
1560
1561	ret = cq->device->resize_cq(cq, cmd.cqe, &udata);
1562	if (ret)
1563		goto out;
1564
1565	resp.cqe = cq->cqe;
1566
1567	if (copy_to_user((void __user *) (unsigned long) cmd.response,
1568			 &resp, sizeof resp.cqe))
1569		ret = -EFAULT;
1570
1571out:
1572	put_cq_read(cq);
1573
1574	return ret ? ret : in_len;
1575}
1576
1577static int copy_wc_to_user(void __user *dest, struct ib_wc *wc)
1578{
1579	struct ib_uverbs_wc tmp;
1580
1581	tmp.wr_id		= wc->wr_id;
1582	tmp.status		= wc->status;
1583	tmp.opcode		= wc->opcode;
1584	tmp.vendor_err		= wc->vendor_err;
1585	tmp.byte_len		= wc->byte_len;
1586	tmp.ex.imm_data		= (__u32 __force) wc->ex.imm_data;
1587	tmp.qp_num		= wc->qp->qp_num;
1588	tmp.src_qp		= wc->src_qp;
1589	tmp.wc_flags		= wc->wc_flags;
1590	tmp.pkey_index		= wc->pkey_index;
1591	tmp.slid		= wc->slid;
1592	tmp.sl			= wc->sl;
1593	tmp.dlid_path_bits	= wc->dlid_path_bits;
1594	tmp.port_num		= wc->port_num;
1595	tmp.reserved		= 0;
1596
1597	if (copy_to_user(dest, &tmp, sizeof tmp))
1598		return -EFAULT;
1599
1600	return 0;
1601}
1602
1603ssize_t ib_uverbs_poll_cq(struct ib_uverbs_file *file,
1604			  struct ib_device *ib_dev,
1605			  const char __user *buf, int in_len,
1606			  int out_len)
1607{
1608	struct ib_uverbs_poll_cq       cmd;
1609	struct ib_uverbs_poll_cq_resp  resp;
1610	u8 __user                     *header_ptr;
1611	u8 __user                     *data_ptr;
1612	struct ib_cq                  *cq;
1613	struct ib_wc                   wc;
1614	int                            ret;
1615
1616	if (copy_from_user(&cmd, buf, sizeof cmd))
1617		return -EFAULT;
1618
1619	cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0);
1620	if (!cq)
1621		return -EINVAL;
1622
1623	/* we copy a struct ib_uverbs_poll_cq_resp to user space */
1624	header_ptr = (void __user *)(unsigned long) cmd.response;
1625	data_ptr = header_ptr + sizeof resp;
1626
1627	memset(&resp, 0, sizeof resp);
1628	while (resp.count < cmd.ne) {
1629		ret = ib_poll_cq(cq, 1, &wc);
1630		if (ret < 0)
1631			goto out_put;
1632		if (!ret)
1633			break;
1634
1635		ret = copy_wc_to_user(data_ptr, &wc);
1636		if (ret)
1637			goto out_put;
1638
1639		data_ptr += sizeof(struct ib_uverbs_wc);
1640		++resp.count;
1641	}
1642
1643	if (copy_to_user(header_ptr, &resp, sizeof resp)) {
1644		ret = -EFAULT;
1645		goto out_put;
1646	}
1647
1648	ret = in_len;
1649
1650out_put:
1651	put_cq_read(cq);
1652	return ret;
1653}
1654
1655ssize_t ib_uverbs_req_notify_cq(struct ib_uverbs_file *file,
1656				struct ib_device *ib_dev,
1657				const char __user *buf, int in_len,
1658				int out_len)
1659{
1660	struct ib_uverbs_req_notify_cq cmd;
1661	struct ib_cq                  *cq;
1662
1663	if (copy_from_user(&cmd, buf, sizeof cmd))
1664		return -EFAULT;
1665
1666	cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0);
1667	if (!cq)
1668		return -EINVAL;
1669
1670	ib_req_notify_cq(cq, cmd.solicited_only ?
1671			 IB_CQ_SOLICITED : IB_CQ_NEXT_COMP);
1672
1673	put_cq_read(cq);
1674
1675	return in_len;
1676}
1677
1678ssize_t ib_uverbs_destroy_cq(struct ib_uverbs_file *file,
1679			     struct ib_device *ib_dev,
1680			     const char __user *buf, int in_len,
1681			     int out_len)
1682{
1683	struct ib_uverbs_destroy_cq      cmd;
1684	struct ib_uverbs_destroy_cq_resp resp;
1685	struct ib_uobject		*uobj;
1686	struct ib_cq               	*cq;
1687	struct ib_ucq_object        	*obj;
1688	struct ib_uverbs_event_file	*ev_file;
1689	int                        	 ret = -EINVAL;
1690
1691	if (copy_from_user(&cmd, buf, sizeof cmd))
1692		return -EFAULT;
1693
1694	uobj = idr_write_uobj(&ib_uverbs_cq_idr, cmd.cq_handle, file->ucontext);
1695	if (!uobj)
1696		return -EINVAL;
1697	cq      = uobj->object;
1698	ev_file = cq->cq_context;
1699	obj     = container_of(cq->uobject, struct ib_ucq_object, uobject);
1700
1701	ret = ib_destroy_cq(cq);
1702	if (!ret)
1703		uobj->live = 0;
1704
1705	put_uobj_write(uobj);
1706
1707	if (ret)
1708		return ret;
1709
1710	idr_remove_uobj(&ib_uverbs_cq_idr, uobj);
1711
1712	mutex_lock(&file->mutex);
1713	list_del(&uobj->list);
1714	mutex_unlock(&file->mutex);
1715
1716	ib_uverbs_release_ucq(file, ev_file, obj);
1717
1718	memset(&resp, 0, sizeof resp);
1719	resp.comp_events_reported  = obj->comp_events_reported;
1720	resp.async_events_reported = obj->async_events_reported;
1721
1722	put_uobj(uobj);
1723
1724	if (copy_to_user((void __user *) (unsigned long) cmd.response,
1725			 &resp, sizeof resp))
1726		return -EFAULT;
1727
1728	return in_len;
1729}
1730
1731static int create_qp(struct ib_uverbs_file *file,
1732		     struct ib_udata *ucore,
1733		     struct ib_udata *uhw,
1734		     struct ib_uverbs_ex_create_qp *cmd,
1735		     size_t cmd_sz,
1736		     int (*cb)(struct ib_uverbs_file *file,
1737			       struct ib_uverbs_ex_create_qp_resp *resp,
1738			       struct ib_udata *udata),
1739		     void *context)
1740{
1741	struct ib_uqp_object		*obj;
1742	struct ib_device		*device;
1743	struct ib_pd			*pd = NULL;
1744	struct ib_xrcd			*xrcd = NULL;
1745	struct ib_uobject		*uninitialized_var(xrcd_uobj);
1746	struct ib_cq			*scq = NULL, *rcq = NULL;
1747	struct ib_srq			*srq = NULL;
1748	struct ib_qp			*qp;
1749	char				*buf;
1750	struct ib_qp_init_attr		attr;
1751	struct ib_uverbs_ex_create_qp_resp resp;
1752	int				ret;
1753
1754	if (cmd->qp_type == IB_QPT_RAW_PACKET && !capable(CAP_NET_RAW))
1755		return -EPERM;
1756
1757	obj = kzalloc(sizeof *obj, GFP_KERNEL);
1758	if (!obj)
1759		return -ENOMEM;
1760
1761	init_uobj(&obj->uevent.uobject, cmd->user_handle, file->ucontext,
1762		  &qp_lock_class);
1763	down_write(&obj->uevent.uobject.mutex);
1764
1765	if (cmd->qp_type == IB_QPT_XRC_TGT) {
1766		xrcd = idr_read_xrcd(cmd->pd_handle, file->ucontext,
1767				     &xrcd_uobj);
1768		if (!xrcd) {
1769			ret = -EINVAL;
1770			goto err_put;
1771		}
1772		device = xrcd->device;
1773	} else {
1774		if (cmd->qp_type == IB_QPT_XRC_INI) {
1775			cmd->max_recv_wr = 0;
1776			cmd->max_recv_sge = 0;
1777		} else {
1778			if (cmd->is_srq) {
1779				srq = idr_read_srq(cmd->srq_handle,
1780						   file->ucontext);
1781				if (!srq || srq->srq_type != IB_SRQT_BASIC) {
1782					ret = -EINVAL;
1783					goto err_put;
1784				}
1785			}
1786
1787			if (cmd->recv_cq_handle != cmd->send_cq_handle) {
1788				rcq = idr_read_cq(cmd->recv_cq_handle,
1789						  file->ucontext, 0);
1790				if (!rcq) {
1791					ret = -EINVAL;
1792					goto err_put;
1793				}
1794			}
1795		}
1796
1797		scq = idr_read_cq(cmd->send_cq_handle, file->ucontext, !!rcq);
1798		rcq = rcq ?: scq;
1799		pd  = idr_read_pd(cmd->pd_handle, file->ucontext);
1800		if (!pd || !scq) {
1801			ret = -EINVAL;
1802			goto err_put;
1803		}
1804
1805		device = pd->device;
1806	}
1807
1808	attr.event_handler = ib_uverbs_qp_event_handler;
1809	attr.qp_context    = file;
1810	attr.send_cq       = scq;
1811	attr.recv_cq       = rcq;
1812	attr.srq           = srq;
1813	attr.xrcd	   = xrcd;
1814	attr.sq_sig_type   = cmd->sq_sig_all ? IB_SIGNAL_ALL_WR :
1815					      IB_SIGNAL_REQ_WR;
1816	attr.qp_type       = cmd->qp_type;
1817	attr.create_flags  = 0;
1818
1819	attr.cap.max_send_wr     = cmd->max_send_wr;
1820	attr.cap.max_recv_wr     = cmd->max_recv_wr;
1821	attr.cap.max_send_sge    = cmd->max_send_sge;
1822	attr.cap.max_recv_sge    = cmd->max_recv_sge;
1823	attr.cap.max_inline_data = cmd->max_inline_data;
1824
1825	obj->uevent.events_reported     = 0;
1826	INIT_LIST_HEAD(&obj->uevent.event_list);
1827	INIT_LIST_HEAD(&obj->mcast_list);
1828
1829	if (cmd_sz >= offsetof(typeof(*cmd), create_flags) +
1830		      sizeof(cmd->create_flags))
1831		attr.create_flags = cmd->create_flags;
1832
1833	if (attr.create_flags & ~(IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK |
1834				IB_QP_CREATE_CROSS_CHANNEL |
1835				IB_QP_CREATE_MANAGED_SEND |
1836				IB_QP_CREATE_MANAGED_RECV)) {
1837		ret = -EINVAL;
1838		goto err_put;
1839	}
1840
1841	buf = (void *)cmd + sizeof(*cmd);
1842	if (cmd_sz > sizeof(*cmd))
1843		if (!(buf[0] == 0 && !memcmp(buf, buf + 1,
1844					     cmd_sz - sizeof(*cmd) - 1))) {
1845			ret = -EINVAL;
1846			goto err_put;
1847		}
1848
1849	if (cmd->qp_type == IB_QPT_XRC_TGT)
1850		qp = ib_create_qp(pd, &attr);
1851	else
1852		qp = device->create_qp(pd, &attr, uhw);
1853
1854	if (IS_ERR(qp)) {
1855		ret = PTR_ERR(qp);
1856		goto err_put;
1857	}
1858
1859	if (cmd->qp_type != IB_QPT_XRC_TGT) {
1860		qp->real_qp	  = qp;
1861		qp->device	  = device;
1862		qp->pd		  = pd;
1863		qp->send_cq	  = attr.send_cq;
1864		qp->recv_cq	  = attr.recv_cq;
1865		qp->srq		  = attr.srq;
1866		qp->event_handler = attr.event_handler;
1867		qp->qp_context	  = attr.qp_context;
1868		qp->qp_type	  = attr.qp_type;
1869		atomic_set(&qp->usecnt, 0);
1870		atomic_inc(&pd->usecnt);
1871		atomic_inc(&attr.send_cq->usecnt);
1872		if (attr.recv_cq)
1873			atomic_inc(&attr.recv_cq->usecnt);
1874		if (attr.srq)
1875			atomic_inc(&attr.srq->usecnt);
1876	}
1877	qp->uobject = &obj->uevent.uobject;
1878
1879	obj->uevent.uobject.object = qp;
1880	ret = idr_add_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject);
1881	if (ret)
1882		goto err_destroy;
1883
1884	memset(&resp, 0, sizeof resp);
1885	resp.base.qpn             = qp->qp_num;
1886	resp.base.qp_handle       = obj->uevent.uobject.id;
1887	resp.base.max_recv_sge    = attr.cap.max_recv_sge;
1888	resp.base.max_send_sge    = attr.cap.max_send_sge;
1889	resp.base.max_recv_wr     = attr.cap.max_recv_wr;
1890	resp.base.max_send_wr     = attr.cap.max_send_wr;
1891	resp.base.max_inline_data = attr.cap.max_inline_data;
1892
1893	resp.response_length = offsetof(typeof(resp), response_length) +
1894			       sizeof(resp.response_length);
1895
1896	ret = cb(file, &resp, ucore);
1897	if (ret)
1898		goto err_cb;
1899
1900	if (xrcd) {
1901		obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object,
1902					  uobject);
1903		atomic_inc(&obj->uxrcd->refcnt);
1904		put_xrcd_read(xrcd_uobj);
1905	}
1906
1907	if (pd)
1908		put_pd_read(pd);
1909	if (scq)
1910		put_cq_read(scq);
1911	if (rcq && rcq != scq)
1912		put_cq_read(rcq);
1913	if (srq)
1914		put_srq_read(srq);
1915
1916	mutex_lock(&file->mutex);
1917	list_add_tail(&obj->uevent.uobject.list, &file->ucontext->qp_list);
1918	mutex_unlock(&file->mutex);
1919
1920	obj->uevent.uobject.live = 1;
1921
1922	up_write(&obj->uevent.uobject.mutex);
1923
1924	return 0;
1925err_cb:
1926	idr_remove_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject);
1927
1928err_destroy:
1929	ib_destroy_qp(qp);
1930
1931err_put:
1932	if (xrcd)
1933		put_xrcd_read(xrcd_uobj);
1934	if (pd)
1935		put_pd_read(pd);
1936	if (scq)
1937		put_cq_read(scq);
1938	if (rcq && rcq != scq)
1939		put_cq_read(rcq);
1940	if (srq)
1941		put_srq_read(srq);
1942
1943	put_uobj_write(&obj->uevent.uobject);
1944	return ret;
1945}
1946
1947static int ib_uverbs_create_qp_cb(struct ib_uverbs_file *file,
1948				  struct ib_uverbs_ex_create_qp_resp *resp,
1949				  struct ib_udata *ucore)
1950{
1951	if (ib_copy_to_udata(ucore, &resp->base, sizeof(resp->base)))
1952		return -EFAULT;
1953
1954	return 0;
1955}
1956
1957ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file,
1958			    struct ib_device *ib_dev,
1959			    const char __user *buf, int in_len,
1960			    int out_len)
1961{
1962	struct ib_uverbs_create_qp      cmd;
1963	struct ib_uverbs_ex_create_qp	cmd_ex;
1964	struct ib_udata			ucore;
1965	struct ib_udata			uhw;
1966	ssize_t resp_size = sizeof(struct ib_uverbs_create_qp_resp);
1967	int				err;
1968
1969	if (out_len < resp_size)
1970		return -ENOSPC;
1971
1972	if (copy_from_user(&cmd, buf, sizeof(cmd)))
1973		return -EFAULT;
1974
1975	INIT_UDATA(&ucore, buf, (unsigned long)cmd.response, sizeof(cmd),
1976		   resp_size);
1977	INIT_UDATA(&uhw, buf + sizeof(cmd),
1978		   (unsigned long)cmd.response + resp_size,
1979		   in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
1980		   out_len - resp_size);
1981
1982	memset(&cmd_ex, 0, sizeof(cmd_ex));
1983	cmd_ex.user_handle = cmd.user_handle;
1984	cmd_ex.pd_handle = cmd.pd_handle;
1985	cmd_ex.send_cq_handle = cmd.send_cq_handle;
1986	cmd_ex.recv_cq_handle = cmd.recv_cq_handle;
1987	cmd_ex.srq_handle = cmd.srq_handle;
1988	cmd_ex.max_send_wr = cmd.max_send_wr;
1989	cmd_ex.max_recv_wr = cmd.max_recv_wr;
1990	cmd_ex.max_send_sge = cmd.max_send_sge;
1991	cmd_ex.max_recv_sge = cmd.max_recv_sge;
1992	cmd_ex.max_inline_data = cmd.max_inline_data;
1993	cmd_ex.sq_sig_all = cmd.sq_sig_all;
1994	cmd_ex.qp_type = cmd.qp_type;
1995	cmd_ex.is_srq = cmd.is_srq;
1996
1997	err = create_qp(file, &ucore, &uhw, &cmd_ex,
1998			offsetof(typeof(cmd_ex), is_srq) +
1999			sizeof(cmd.is_srq), ib_uverbs_create_qp_cb,
2000			NULL);
2001
2002	if (err)
2003		return err;
2004
2005	return in_len;
2006}
2007
2008static int ib_uverbs_ex_create_qp_cb(struct ib_uverbs_file *file,
2009				     struct ib_uverbs_ex_create_qp_resp *resp,
2010				     struct ib_udata *ucore)
2011{
2012	if (ib_copy_to_udata(ucore, resp, resp->response_length))
2013		return -EFAULT;
2014
2015	return 0;
2016}
2017
2018int ib_uverbs_ex_create_qp(struct ib_uverbs_file *file,
2019			   struct ib_device *ib_dev,
2020			   struct ib_udata *ucore,
2021			   struct ib_udata *uhw)
2022{
2023	struct ib_uverbs_ex_create_qp_resp resp;
2024	struct ib_uverbs_ex_create_qp cmd = {0};
2025	int err;
2026
2027	if (ucore->inlen < (offsetof(typeof(cmd), comp_mask) +
2028			    sizeof(cmd.comp_mask)))
2029		return -EINVAL;
2030
2031	err = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen));
2032	if (err)
2033		return err;
2034
2035	if (cmd.comp_mask)
2036		return -EINVAL;
2037
2038	if (cmd.reserved)
2039		return -EINVAL;
2040
2041	if (ucore->outlen < (offsetof(typeof(resp), response_length) +
2042			     sizeof(resp.response_length)))
2043		return -ENOSPC;
2044
2045	err = create_qp(file, ucore, uhw, &cmd,
2046			min(ucore->inlen, sizeof(cmd)),
2047			ib_uverbs_ex_create_qp_cb, NULL);
2048
2049	if (err)
2050		return err;
2051
2052	return 0;
2053}
2054
2055ssize_t ib_uverbs_open_qp(struct ib_uverbs_file *file,
2056			  struct ib_device *ib_dev,
2057			  const char __user *buf, int in_len, int out_len)
2058{
2059	struct ib_uverbs_open_qp        cmd;
2060	struct ib_uverbs_create_qp_resp resp;
2061	struct ib_udata                 udata;
2062	struct ib_uqp_object           *obj;
2063	struct ib_xrcd		       *xrcd;
2064	struct ib_uobject	       *uninitialized_var(xrcd_uobj);
 
2065	struct ib_qp                   *qp;
2066	struct ib_qp_open_attr          attr;
2067	int ret;
2068
2069	if (out_len < sizeof resp)
2070		return -ENOSPC;
2071
2072	if (copy_from_user(&cmd, buf, sizeof cmd))
2073		return -EFAULT;
2074
2075	INIT_UDATA(&udata, buf + sizeof cmd,
2076		   (unsigned long) cmd.response + sizeof resp,
2077		   in_len - sizeof cmd, out_len - sizeof resp);
2078
2079	obj = kmalloc(sizeof *obj, GFP_KERNEL);
2080	if (!obj)
2081		return -ENOMEM;
2082
2083	init_uobj(&obj->uevent.uobject, cmd.user_handle, file->ucontext, &qp_lock_class);
2084	down_write(&obj->uevent.uobject.mutex);
2085
2086	xrcd = idr_read_xrcd(cmd.pd_handle, file->ucontext, &xrcd_uobj);
2087	if (!xrcd) {
 
 
 
 
 
2088		ret = -EINVAL;
2089		goto err_put;
2090	}
2091
2092	attr.event_handler = ib_uverbs_qp_event_handler;
2093	attr.qp_context    = file;
2094	attr.qp_num        = cmd.qpn;
 
 
 
2095	attr.qp_type       = cmd.qp_type;
 
 
 
 
 
 
 
2096
2097	obj->uevent.events_reported = 0;
2098	INIT_LIST_HEAD(&obj->uevent.event_list);
2099	INIT_LIST_HEAD(&obj->mcast_list);
2100
2101	qp = ib_open_qp(xrcd, &attr);
2102	if (IS_ERR(qp)) {
2103		ret = PTR_ERR(qp);
2104		goto err_put;
2105	}
2106
2107	qp->uobject = &obj->uevent.uobject;
 
 
 
 
 
 
 
 
 
 
 
 
 
2108
2109	obj->uevent.uobject.object = qp;
2110	ret = idr_add_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject);
2111	if (ret)
2112		goto err_destroy;
2113
2114	memset(&resp, 0, sizeof resp);
2115	resp.qpn       = qp->qp_num;
2116	resp.qp_handle = obj->uevent.uobject.id;
 
 
 
 
 
2117
2118	if (copy_to_user((void __user *) (unsigned long) cmd.response,
2119			 &resp, sizeof resp)) {
2120		ret = -EFAULT;
2121		goto err_remove;
2122	}
2123
2124	obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, uobject);
2125	atomic_inc(&obj->uxrcd->refcnt);
2126	put_xrcd_read(xrcd_uobj);
 
 
 
2127
2128	mutex_lock(&file->mutex);
2129	list_add_tail(&obj->uevent.uobject.list, &file->ucontext->qp_list);
2130	mutex_unlock(&file->mutex);
2131
2132	obj->uevent.uobject.live = 1;
2133
2134	up_write(&obj->uevent.uobject.mutex);
2135
2136	return in_len;
2137
2138err_remove:
2139	idr_remove_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject);
2140
2141err_destroy:
2142	ib_destroy_qp(qp);
2143
2144err_put:
2145	put_xrcd_read(xrcd_uobj);
 
 
 
 
 
 
 
 
2146	put_uobj_write(&obj->uevent.uobject);
2147	return ret;
2148}
2149
2150ssize_t ib_uverbs_query_qp(struct ib_uverbs_file *file,
2151			   struct ib_device *ib_dev,
2152			   const char __user *buf, int in_len,
2153			   int out_len)
2154{
2155	struct ib_uverbs_query_qp      cmd;
2156	struct ib_uverbs_query_qp_resp resp;
2157	struct ib_qp                   *qp;
2158	struct ib_qp_attr              *attr;
2159	struct ib_qp_init_attr         *init_attr;
2160	int                            ret;
2161
2162	if (copy_from_user(&cmd, buf, sizeof cmd))
2163		return -EFAULT;
2164
2165	attr      = kmalloc(sizeof *attr, GFP_KERNEL);
2166	init_attr = kmalloc(sizeof *init_attr, GFP_KERNEL);
2167	if (!attr || !init_attr) {
2168		ret = -ENOMEM;
2169		goto out;
2170	}
2171
2172	qp = idr_read_qp(cmd.qp_handle, file->ucontext);
2173	if (!qp) {
2174		ret = -EINVAL;
2175		goto out;
2176	}
2177
2178	ret = ib_query_qp(qp, attr, cmd.attr_mask, init_attr);
2179
2180	put_qp_read(qp);
2181
2182	if (ret)
2183		goto out;
2184
2185	memset(&resp, 0, sizeof resp);
2186
2187	resp.qp_state               = attr->qp_state;
2188	resp.cur_qp_state           = attr->cur_qp_state;
2189	resp.path_mtu               = attr->path_mtu;
2190	resp.path_mig_state         = attr->path_mig_state;
2191	resp.qkey                   = attr->qkey;
2192	resp.rq_psn                 = attr->rq_psn;
2193	resp.sq_psn                 = attr->sq_psn;
2194	resp.dest_qp_num            = attr->dest_qp_num;
2195	resp.qp_access_flags        = attr->qp_access_flags;
2196	resp.pkey_index             = attr->pkey_index;
2197	resp.alt_pkey_index         = attr->alt_pkey_index;
2198	resp.sq_draining            = attr->sq_draining;
2199	resp.max_rd_atomic          = attr->max_rd_atomic;
2200	resp.max_dest_rd_atomic     = attr->max_dest_rd_atomic;
2201	resp.min_rnr_timer          = attr->min_rnr_timer;
2202	resp.port_num               = attr->port_num;
2203	resp.timeout                = attr->timeout;
2204	resp.retry_cnt              = attr->retry_cnt;
2205	resp.rnr_retry              = attr->rnr_retry;
2206	resp.alt_port_num           = attr->alt_port_num;
2207	resp.alt_timeout            = attr->alt_timeout;
2208
2209	memcpy(resp.dest.dgid, attr->ah_attr.grh.dgid.raw, 16);
2210	resp.dest.flow_label        = attr->ah_attr.grh.flow_label;
2211	resp.dest.sgid_index        = attr->ah_attr.grh.sgid_index;
2212	resp.dest.hop_limit         = attr->ah_attr.grh.hop_limit;
2213	resp.dest.traffic_class     = attr->ah_attr.grh.traffic_class;
2214	resp.dest.dlid              = attr->ah_attr.dlid;
2215	resp.dest.sl                = attr->ah_attr.sl;
2216	resp.dest.src_path_bits     = attr->ah_attr.src_path_bits;
2217	resp.dest.static_rate       = attr->ah_attr.static_rate;
2218	resp.dest.is_global         = !!(attr->ah_attr.ah_flags & IB_AH_GRH);
2219	resp.dest.port_num          = attr->ah_attr.port_num;
2220
2221	memcpy(resp.alt_dest.dgid, attr->alt_ah_attr.grh.dgid.raw, 16);
2222	resp.alt_dest.flow_label    = attr->alt_ah_attr.grh.flow_label;
2223	resp.alt_dest.sgid_index    = attr->alt_ah_attr.grh.sgid_index;
2224	resp.alt_dest.hop_limit     = attr->alt_ah_attr.grh.hop_limit;
2225	resp.alt_dest.traffic_class = attr->alt_ah_attr.grh.traffic_class;
2226	resp.alt_dest.dlid          = attr->alt_ah_attr.dlid;
2227	resp.alt_dest.sl            = attr->alt_ah_attr.sl;
2228	resp.alt_dest.src_path_bits = attr->alt_ah_attr.src_path_bits;
2229	resp.alt_dest.static_rate   = attr->alt_ah_attr.static_rate;
2230	resp.alt_dest.is_global     = !!(attr->alt_ah_attr.ah_flags & IB_AH_GRH);
2231	resp.alt_dest.port_num      = attr->alt_ah_attr.port_num;
2232
2233	resp.max_send_wr            = init_attr->cap.max_send_wr;
2234	resp.max_recv_wr            = init_attr->cap.max_recv_wr;
2235	resp.max_send_sge           = init_attr->cap.max_send_sge;
2236	resp.max_recv_sge           = init_attr->cap.max_recv_sge;
2237	resp.max_inline_data        = init_attr->cap.max_inline_data;
2238	resp.sq_sig_all             = init_attr->sq_sig_type == IB_SIGNAL_ALL_WR;
2239
2240	if (copy_to_user((void __user *) (unsigned long) cmd.response,
2241			 &resp, sizeof resp))
2242		ret = -EFAULT;
2243
2244out:
2245	kfree(attr);
2246	kfree(init_attr);
2247
2248	return ret ? ret : in_len;
2249}
2250
2251/* Remove ignored fields set in the attribute mask */
2252static int modify_qp_mask(enum ib_qp_type qp_type, int mask)
2253{
2254	switch (qp_type) {
2255	case IB_QPT_XRC_INI:
2256		return mask & ~(IB_QP_MAX_DEST_RD_ATOMIC | IB_QP_MIN_RNR_TIMER);
2257	case IB_QPT_XRC_TGT:
2258		return mask & ~(IB_QP_MAX_QP_RD_ATOMIC | IB_QP_RETRY_CNT |
2259				IB_QP_RNR_RETRY);
2260	default:
2261		return mask;
2262	}
2263}
2264
2265ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file,
2266			    struct ib_device *ib_dev,
2267			    const char __user *buf, int in_len,
2268			    int out_len)
2269{
2270	struct ib_uverbs_modify_qp cmd;
2271	struct ib_udata            udata;
2272	struct ib_qp              *qp;
2273	struct ib_qp_attr         *attr;
2274	int                        ret;
2275
2276	if (copy_from_user(&cmd, buf, sizeof cmd))
2277		return -EFAULT;
2278
2279	INIT_UDATA(&udata, buf + sizeof cmd, NULL, in_len - sizeof cmd,
2280		   out_len);
2281
2282	attr = kmalloc(sizeof *attr, GFP_KERNEL);
2283	if (!attr)
2284		return -ENOMEM;
2285
2286	qp = idr_read_qp(cmd.qp_handle, file->ucontext);
2287	if (!qp) {
2288		ret = -EINVAL;
2289		goto out;
2290	}
2291
2292	attr->qp_state 		  = cmd.qp_state;
2293	attr->cur_qp_state 	  = cmd.cur_qp_state;
2294	attr->path_mtu 		  = cmd.path_mtu;
2295	attr->path_mig_state 	  = cmd.path_mig_state;
2296	attr->qkey 		  = cmd.qkey;
2297	attr->rq_psn 		  = cmd.rq_psn;
2298	attr->sq_psn 		  = cmd.sq_psn;
2299	attr->dest_qp_num 	  = cmd.dest_qp_num;
2300	attr->qp_access_flags 	  = cmd.qp_access_flags;
2301	attr->pkey_index 	  = cmd.pkey_index;
2302	attr->alt_pkey_index 	  = cmd.alt_pkey_index;
2303	attr->en_sqd_async_notify = cmd.en_sqd_async_notify;
2304	attr->max_rd_atomic 	  = cmd.max_rd_atomic;
2305	attr->max_dest_rd_atomic  = cmd.max_dest_rd_atomic;
2306	attr->min_rnr_timer 	  = cmd.min_rnr_timer;
2307	attr->port_num 		  = cmd.port_num;
2308	attr->timeout 		  = cmd.timeout;
2309	attr->retry_cnt 	  = cmd.retry_cnt;
2310	attr->rnr_retry 	  = cmd.rnr_retry;
2311	attr->alt_port_num 	  = cmd.alt_port_num;
2312	attr->alt_timeout 	  = cmd.alt_timeout;
2313
2314	memcpy(attr->ah_attr.grh.dgid.raw, cmd.dest.dgid, 16);
2315	attr->ah_attr.grh.flow_label        = cmd.dest.flow_label;
2316	attr->ah_attr.grh.sgid_index        = cmd.dest.sgid_index;
2317	attr->ah_attr.grh.hop_limit         = cmd.dest.hop_limit;
2318	attr->ah_attr.grh.traffic_class     = cmd.dest.traffic_class;
2319	attr->ah_attr.dlid 	    	    = cmd.dest.dlid;
2320	attr->ah_attr.sl   	    	    = cmd.dest.sl;
2321	attr->ah_attr.src_path_bits 	    = cmd.dest.src_path_bits;
2322	attr->ah_attr.static_rate   	    = cmd.dest.static_rate;
2323	attr->ah_attr.ah_flags 	    	    = cmd.dest.is_global ? IB_AH_GRH : 0;
2324	attr->ah_attr.port_num 	    	    = cmd.dest.port_num;
2325
2326	memcpy(attr->alt_ah_attr.grh.dgid.raw, cmd.alt_dest.dgid, 16);
2327	attr->alt_ah_attr.grh.flow_label    = cmd.alt_dest.flow_label;
2328	attr->alt_ah_attr.grh.sgid_index    = cmd.alt_dest.sgid_index;
2329	attr->alt_ah_attr.grh.hop_limit     = cmd.alt_dest.hop_limit;
2330	attr->alt_ah_attr.grh.traffic_class = cmd.alt_dest.traffic_class;
2331	attr->alt_ah_attr.dlid 	    	    = cmd.alt_dest.dlid;
2332	attr->alt_ah_attr.sl   	    	    = cmd.alt_dest.sl;
2333	attr->alt_ah_attr.src_path_bits     = cmd.alt_dest.src_path_bits;
2334	attr->alt_ah_attr.static_rate       = cmd.alt_dest.static_rate;
2335	attr->alt_ah_attr.ah_flags 	    = cmd.alt_dest.is_global ? IB_AH_GRH : 0;
2336	attr->alt_ah_attr.port_num 	    = cmd.alt_dest.port_num;
2337
2338	if (qp->real_qp == qp) {
2339		ret = ib_resolve_eth_dmac(qp, attr, &cmd.attr_mask);
2340		if (ret)
2341			goto release_qp;
2342		ret = qp->device->modify_qp(qp, attr,
2343			modify_qp_mask(qp->qp_type, cmd.attr_mask), &udata);
2344	} else {
2345		ret = ib_modify_qp(qp, attr, modify_qp_mask(qp->qp_type, cmd.attr_mask));
2346	}
2347
2348	if (ret)
2349		goto release_qp;
2350
2351	ret = in_len;
2352
2353release_qp:
2354	put_qp_read(qp);
2355
2356out:
2357	kfree(attr);
2358
2359	return ret;
2360}
2361
2362ssize_t ib_uverbs_destroy_qp(struct ib_uverbs_file *file,
2363			     struct ib_device *ib_dev,
2364			     const char __user *buf, int in_len,
2365			     int out_len)
2366{
2367	struct ib_uverbs_destroy_qp      cmd;
2368	struct ib_uverbs_destroy_qp_resp resp;
2369	struct ib_uobject		*uobj;
2370	struct ib_qp               	*qp;
2371	struct ib_uqp_object        	*obj;
2372	int                        	 ret = -EINVAL;
2373
2374	if (copy_from_user(&cmd, buf, sizeof cmd))
2375		return -EFAULT;
2376
2377	memset(&resp, 0, sizeof resp);
2378
2379	uobj = idr_write_uobj(&ib_uverbs_qp_idr, cmd.qp_handle, file->ucontext);
2380	if (!uobj)
2381		return -EINVAL;
2382	qp  = uobj->object;
2383	obj = container_of(uobj, struct ib_uqp_object, uevent.uobject);
2384
2385	if (!list_empty(&obj->mcast_list)) {
2386		put_uobj_write(uobj);
2387		return -EBUSY;
2388	}
2389
2390	ret = ib_destroy_qp(qp);
2391	if (!ret)
2392		uobj->live = 0;
2393
2394	put_uobj_write(uobj);
2395
2396	if (ret)
2397		return ret;
2398
2399	if (obj->uxrcd)
2400		atomic_dec(&obj->uxrcd->refcnt);
2401
2402	idr_remove_uobj(&ib_uverbs_qp_idr, uobj);
2403
2404	mutex_lock(&file->mutex);
2405	list_del(&uobj->list);
2406	mutex_unlock(&file->mutex);
2407
2408	ib_uverbs_release_uevent(file, &obj->uevent);
2409
2410	resp.events_reported = obj->uevent.events_reported;
2411
2412	put_uobj(uobj);
2413
2414	if (copy_to_user((void __user *) (unsigned long) cmd.response,
2415			 &resp, sizeof resp))
2416		return -EFAULT;
2417
2418	return in_len;
2419}
2420
2421static void *alloc_wr(size_t wr_size, __u32 num_sge)
2422{
2423	return kmalloc(ALIGN(wr_size, sizeof (struct ib_sge)) +
2424			 num_sge * sizeof (struct ib_sge), GFP_KERNEL);
2425};
2426
2427ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file,
2428			    struct ib_device *ib_dev,
2429			    const char __user *buf, int in_len,
2430			    int out_len)
2431{
2432	struct ib_uverbs_post_send      cmd;
2433	struct ib_uverbs_post_send_resp resp;
2434	struct ib_uverbs_send_wr       *user_wr;
2435	struct ib_send_wr              *wr = NULL, *last, *next, *bad_wr;
2436	struct ib_qp                   *qp;
2437	int                             i, sg_ind;
2438	int				is_ud;
2439	ssize_t                         ret = -EINVAL;
2440	size_t                          next_size;
2441
2442	if (copy_from_user(&cmd, buf, sizeof cmd))
2443		return -EFAULT;
2444
2445	if (in_len < sizeof cmd + cmd.wqe_size * cmd.wr_count +
2446	    cmd.sge_count * sizeof (struct ib_uverbs_sge))
2447		return -EINVAL;
2448
2449	if (cmd.wqe_size < sizeof (struct ib_uverbs_send_wr))
2450		return -EINVAL;
2451
2452	user_wr = kmalloc(cmd.wqe_size, GFP_KERNEL);
2453	if (!user_wr)
2454		return -ENOMEM;
2455
2456	qp = idr_read_qp(cmd.qp_handle, file->ucontext);
2457	if (!qp)
2458		goto out;
2459
2460	is_ud = qp->qp_type == IB_QPT_UD;
2461	sg_ind = 0;
2462	last = NULL;
2463	for (i = 0; i < cmd.wr_count; ++i) {
2464		if (copy_from_user(user_wr,
2465				   buf + sizeof cmd + i * cmd.wqe_size,
2466				   cmd.wqe_size)) {
2467			ret = -EFAULT;
2468			goto out_put;
2469		}
2470
2471		if (user_wr->num_sge + sg_ind > cmd.sge_count) {
2472			ret = -EINVAL;
2473			goto out_put;
2474		}
2475
2476		if (is_ud) {
2477			struct ib_ud_wr *ud;
2478
2479			if (user_wr->opcode != IB_WR_SEND &&
2480			    user_wr->opcode != IB_WR_SEND_WITH_IMM) {
2481				ret = -EINVAL;
2482				goto out_put;
2483			}
2484
2485			next_size = sizeof(*ud);
2486			ud = alloc_wr(next_size, user_wr->num_sge);
2487			if (!ud) {
2488				ret = -ENOMEM;
2489				goto out_put;
2490			}
2491
2492			ud->ah = idr_read_ah(user_wr->wr.ud.ah, file->ucontext);
2493			if (!ud->ah) {
2494				kfree(ud);
2495				ret = -EINVAL;
2496				goto out_put;
2497			}
2498			ud->remote_qpn = user_wr->wr.ud.remote_qpn;
2499			ud->remote_qkey = user_wr->wr.ud.remote_qkey;
2500
2501			next = &ud->wr;
2502		} else if (user_wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM ||
2503			   user_wr->opcode == IB_WR_RDMA_WRITE ||
2504			   user_wr->opcode == IB_WR_RDMA_READ) {
2505			struct ib_rdma_wr *rdma;
2506
2507			next_size = sizeof(*rdma);
2508			rdma = alloc_wr(next_size, user_wr->num_sge);
2509			if (!rdma) {
2510				ret = -ENOMEM;
2511				goto out_put;
2512			}
2513
2514			rdma->remote_addr = user_wr->wr.rdma.remote_addr;
2515			rdma->rkey = user_wr->wr.rdma.rkey;
2516
2517			next = &rdma->wr;
2518		} else if (user_wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
2519			   user_wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
2520			struct ib_atomic_wr *atomic;
2521
2522			next_size = sizeof(*atomic);
2523			atomic = alloc_wr(next_size, user_wr->num_sge);
2524			if (!atomic) {
2525				ret = -ENOMEM;
2526				goto out_put;
2527			}
2528
2529			atomic->remote_addr = user_wr->wr.atomic.remote_addr;
2530			atomic->compare_add = user_wr->wr.atomic.compare_add;
2531			atomic->swap = user_wr->wr.atomic.swap;
2532			atomic->rkey = user_wr->wr.atomic.rkey;
2533
2534			next = &atomic->wr;
2535		} else if (user_wr->opcode == IB_WR_SEND ||
2536			   user_wr->opcode == IB_WR_SEND_WITH_IMM ||
2537			   user_wr->opcode == IB_WR_SEND_WITH_INV) {
2538			next_size = sizeof(*next);
2539			next = alloc_wr(next_size, user_wr->num_sge);
2540			if (!next) {
2541				ret = -ENOMEM;
2542				goto out_put;
2543			}
2544		} else {
2545			ret = -EINVAL;
2546			goto out_put;
2547		}
2548
2549		if (user_wr->opcode == IB_WR_SEND_WITH_IMM ||
2550		    user_wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) {
2551			next->ex.imm_data =
2552					(__be32 __force) user_wr->ex.imm_data;
2553		} else if (user_wr->opcode == IB_WR_SEND_WITH_INV) {
2554			next->ex.invalidate_rkey = user_wr->ex.invalidate_rkey;
2555		}
2556
2557		if (!last)
2558			wr = next;
2559		else
2560			last->next = next;
2561		last = next;
2562
2563		next->next       = NULL;
2564		next->wr_id      = user_wr->wr_id;
2565		next->num_sge    = user_wr->num_sge;
2566		next->opcode     = user_wr->opcode;
2567		next->send_flags = user_wr->send_flags;
2568
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2569		if (next->num_sge) {
2570			next->sg_list = (void *) next +
2571				ALIGN(next_size, sizeof(struct ib_sge));
2572			if (copy_from_user(next->sg_list,
2573					   buf + sizeof cmd +
2574					   cmd.wr_count * cmd.wqe_size +
2575					   sg_ind * sizeof (struct ib_sge),
2576					   next->num_sge * sizeof (struct ib_sge))) {
2577				ret = -EFAULT;
2578				goto out_put;
2579			}
2580			sg_ind += next->num_sge;
2581		} else
2582			next->sg_list = NULL;
2583	}
2584
2585	resp.bad_wr = 0;
2586	ret = qp->device->post_send(qp->real_qp, wr, &bad_wr);
2587	if (ret)
2588		for (next = wr; next; next = next->next) {
2589			++resp.bad_wr;
2590			if (next == bad_wr)
2591				break;
2592		}
2593
2594	if (copy_to_user((void __user *) (unsigned long) cmd.response,
2595			 &resp, sizeof resp))
2596		ret = -EFAULT;
2597
2598out_put:
2599	put_qp_read(qp);
2600
2601	while (wr) {
2602		if (is_ud && ud_wr(wr)->ah)
2603			put_ah_read(ud_wr(wr)->ah);
2604		next = wr->next;
2605		kfree(wr);
2606		wr = next;
2607	}
2608
2609out:
2610	kfree(user_wr);
2611
2612	return ret ? ret : in_len;
2613}
2614
2615static struct ib_recv_wr *ib_uverbs_unmarshall_recv(const char __user *buf,
2616						    int in_len,
2617						    u32 wr_count,
2618						    u32 sge_count,
2619						    u32 wqe_size)
2620{
2621	struct ib_uverbs_recv_wr *user_wr;
2622	struct ib_recv_wr        *wr = NULL, *last, *next;
2623	int                       sg_ind;
2624	int                       i;
2625	int                       ret;
2626
2627	if (in_len < wqe_size * wr_count +
2628	    sge_count * sizeof (struct ib_uverbs_sge))
2629		return ERR_PTR(-EINVAL);
2630
2631	if (wqe_size < sizeof (struct ib_uverbs_recv_wr))
2632		return ERR_PTR(-EINVAL);
2633
2634	user_wr = kmalloc(wqe_size, GFP_KERNEL);
2635	if (!user_wr)
2636		return ERR_PTR(-ENOMEM);
2637
2638	sg_ind = 0;
2639	last = NULL;
2640	for (i = 0; i < wr_count; ++i) {
2641		if (copy_from_user(user_wr, buf + i * wqe_size,
2642				   wqe_size)) {
2643			ret = -EFAULT;
2644			goto err;
2645		}
2646
2647		if (user_wr->num_sge + sg_ind > sge_count) {
2648			ret = -EINVAL;
2649			goto err;
2650		}
2651
2652		next = kmalloc(ALIGN(sizeof *next, sizeof (struct ib_sge)) +
2653			       user_wr->num_sge * sizeof (struct ib_sge),
2654			       GFP_KERNEL);
2655		if (!next) {
2656			ret = -ENOMEM;
2657			goto err;
2658		}
2659
2660		if (!last)
2661			wr = next;
2662		else
2663			last->next = next;
2664		last = next;
2665
2666		next->next       = NULL;
2667		next->wr_id      = user_wr->wr_id;
2668		next->num_sge    = user_wr->num_sge;
2669
2670		if (next->num_sge) {
2671			next->sg_list = (void *) next +
2672				ALIGN(sizeof *next, sizeof (struct ib_sge));
2673			if (copy_from_user(next->sg_list,
2674					   buf + wr_count * wqe_size +
2675					   sg_ind * sizeof (struct ib_sge),
2676					   next->num_sge * sizeof (struct ib_sge))) {
2677				ret = -EFAULT;
2678				goto err;
2679			}
2680			sg_ind += next->num_sge;
2681		} else
2682			next->sg_list = NULL;
2683	}
2684
2685	kfree(user_wr);
2686	return wr;
2687
2688err:
2689	kfree(user_wr);
2690
2691	while (wr) {
2692		next = wr->next;
2693		kfree(wr);
2694		wr = next;
2695	}
2696
2697	return ERR_PTR(ret);
2698}
2699
2700ssize_t ib_uverbs_post_recv(struct ib_uverbs_file *file,
2701			    struct ib_device *ib_dev,
2702			    const char __user *buf, int in_len,
2703			    int out_len)
2704{
2705	struct ib_uverbs_post_recv      cmd;
2706	struct ib_uverbs_post_recv_resp resp;
2707	struct ib_recv_wr              *wr, *next, *bad_wr;
2708	struct ib_qp                   *qp;
2709	ssize_t                         ret = -EINVAL;
2710
2711	if (copy_from_user(&cmd, buf, sizeof cmd))
2712		return -EFAULT;
2713
2714	wr = ib_uverbs_unmarshall_recv(buf + sizeof cmd,
2715				       in_len - sizeof cmd, cmd.wr_count,
2716				       cmd.sge_count, cmd.wqe_size);
2717	if (IS_ERR(wr))
2718		return PTR_ERR(wr);
2719
2720	qp = idr_read_qp(cmd.qp_handle, file->ucontext);
2721	if (!qp)
2722		goto out;
2723
2724	resp.bad_wr = 0;
2725	ret = qp->device->post_recv(qp->real_qp, wr, &bad_wr);
2726
2727	put_qp_read(qp);
2728
2729	if (ret)
2730		for (next = wr; next; next = next->next) {
2731			++resp.bad_wr;
2732			if (next == bad_wr)
2733				break;
2734		}
2735
2736	if (copy_to_user((void __user *) (unsigned long) cmd.response,
2737			 &resp, sizeof resp))
2738		ret = -EFAULT;
2739
2740out:
2741	while (wr) {
2742		next = wr->next;
2743		kfree(wr);
2744		wr = next;
2745	}
2746
2747	return ret ? ret : in_len;
2748}
2749
2750ssize_t ib_uverbs_post_srq_recv(struct ib_uverbs_file *file,
2751				struct ib_device *ib_dev,
2752				const char __user *buf, int in_len,
2753				int out_len)
2754{
2755	struct ib_uverbs_post_srq_recv      cmd;
2756	struct ib_uverbs_post_srq_recv_resp resp;
2757	struct ib_recv_wr                  *wr, *next, *bad_wr;
2758	struct ib_srq                      *srq;
2759	ssize_t                             ret = -EINVAL;
2760
2761	if (copy_from_user(&cmd, buf, sizeof cmd))
2762		return -EFAULT;
2763
2764	wr = ib_uverbs_unmarshall_recv(buf + sizeof cmd,
2765				       in_len - sizeof cmd, cmd.wr_count,
2766				       cmd.sge_count, cmd.wqe_size);
2767	if (IS_ERR(wr))
2768		return PTR_ERR(wr);
2769
2770	srq = idr_read_srq(cmd.srq_handle, file->ucontext);
2771	if (!srq)
2772		goto out;
2773
2774	resp.bad_wr = 0;
2775	ret = srq->device->post_srq_recv(srq, wr, &bad_wr);
2776
2777	put_srq_read(srq);
2778
2779	if (ret)
2780		for (next = wr; next; next = next->next) {
2781			++resp.bad_wr;
2782			if (next == bad_wr)
2783				break;
2784		}
2785
2786	if (copy_to_user((void __user *) (unsigned long) cmd.response,
2787			 &resp, sizeof resp))
2788		ret = -EFAULT;
2789
2790out:
2791	while (wr) {
2792		next = wr->next;
2793		kfree(wr);
2794		wr = next;
2795	}
2796
2797	return ret ? ret : in_len;
2798}
2799
2800ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file,
2801			    struct ib_device *ib_dev,
2802			    const char __user *buf, int in_len,
2803			    int out_len)
2804{
2805	struct ib_uverbs_create_ah	 cmd;
2806	struct ib_uverbs_create_ah_resp	 resp;
2807	struct ib_uobject		*uobj;
2808	struct ib_pd			*pd;
2809	struct ib_ah			*ah;
2810	struct ib_ah_attr		attr;
2811	int ret;
2812
2813	if (out_len < sizeof resp)
2814		return -ENOSPC;
2815
2816	if (copy_from_user(&cmd, buf, sizeof cmd))
2817		return -EFAULT;
2818
2819	uobj = kmalloc(sizeof *uobj, GFP_KERNEL);
2820	if (!uobj)
2821		return -ENOMEM;
2822
2823	init_uobj(uobj, cmd.user_handle, file->ucontext, &ah_lock_class);
2824	down_write(&uobj->mutex);
2825
2826	pd = idr_read_pd(cmd.pd_handle, file->ucontext);
2827	if (!pd) {
2828		ret = -EINVAL;
2829		goto err;
2830	}
2831
2832	attr.dlid 	       = cmd.attr.dlid;
2833	attr.sl 	       = cmd.attr.sl;
2834	attr.src_path_bits     = cmd.attr.src_path_bits;
2835	attr.static_rate       = cmd.attr.static_rate;
2836	attr.ah_flags          = cmd.attr.is_global ? IB_AH_GRH : 0;
2837	attr.port_num 	       = cmd.attr.port_num;
2838	attr.grh.flow_label    = cmd.attr.grh.flow_label;
2839	attr.grh.sgid_index    = cmd.attr.grh.sgid_index;
2840	attr.grh.hop_limit     = cmd.attr.grh.hop_limit;
2841	attr.grh.traffic_class = cmd.attr.grh.traffic_class;
2842	memset(&attr.dmac, 0, sizeof(attr.dmac));
2843	memcpy(attr.grh.dgid.raw, cmd.attr.grh.dgid, 16);
2844
2845	ah = ib_create_ah(pd, &attr);
2846	if (IS_ERR(ah)) {
2847		ret = PTR_ERR(ah);
2848		goto err_put;
2849	}
2850
2851	ah->uobject  = uobj;
2852	uobj->object = ah;
2853
2854	ret = idr_add_uobj(&ib_uverbs_ah_idr, uobj);
2855	if (ret)
2856		goto err_destroy;
2857
2858	resp.ah_handle = uobj->id;
2859
2860	if (copy_to_user((void __user *) (unsigned long) cmd.response,
2861			 &resp, sizeof resp)) {
2862		ret = -EFAULT;
2863		goto err_copy;
2864	}
2865
2866	put_pd_read(pd);
2867
2868	mutex_lock(&file->mutex);
2869	list_add_tail(&uobj->list, &file->ucontext->ah_list);
2870	mutex_unlock(&file->mutex);
2871
2872	uobj->live = 1;
2873
2874	up_write(&uobj->mutex);
2875
2876	return in_len;
2877
2878err_copy:
2879	idr_remove_uobj(&ib_uverbs_ah_idr, uobj);
2880
2881err_destroy:
2882	ib_destroy_ah(ah);
2883
2884err_put:
2885	put_pd_read(pd);
2886
2887err:
2888	put_uobj_write(uobj);
2889	return ret;
2890}
2891
2892ssize_t ib_uverbs_destroy_ah(struct ib_uverbs_file *file,
2893			     struct ib_device *ib_dev,
2894			     const char __user *buf, int in_len, int out_len)
2895{
2896	struct ib_uverbs_destroy_ah cmd;
2897	struct ib_ah		   *ah;
2898	struct ib_uobject	   *uobj;
2899	int			    ret;
2900
2901	if (copy_from_user(&cmd, buf, sizeof cmd))
2902		return -EFAULT;
2903
2904	uobj = idr_write_uobj(&ib_uverbs_ah_idr, cmd.ah_handle, file->ucontext);
2905	if (!uobj)
2906		return -EINVAL;
2907	ah = uobj->object;
2908
2909	ret = ib_destroy_ah(ah);
2910	if (!ret)
2911		uobj->live = 0;
2912
2913	put_uobj_write(uobj);
2914
2915	if (ret)
2916		return ret;
2917
2918	idr_remove_uobj(&ib_uverbs_ah_idr, uobj);
2919
2920	mutex_lock(&file->mutex);
2921	list_del(&uobj->list);
2922	mutex_unlock(&file->mutex);
2923
2924	put_uobj(uobj);
2925
2926	return in_len;
2927}
2928
2929ssize_t ib_uverbs_attach_mcast(struct ib_uverbs_file *file,
2930			       struct ib_device *ib_dev,
2931			       const char __user *buf, int in_len,
2932			       int out_len)
2933{
2934	struct ib_uverbs_attach_mcast cmd;
2935	struct ib_qp                 *qp;
2936	struct ib_uqp_object         *obj;
2937	struct ib_uverbs_mcast_entry *mcast;
2938	int                           ret;
2939
2940	if (copy_from_user(&cmd, buf, sizeof cmd))
2941		return -EFAULT;
2942
2943	qp = idr_write_qp(cmd.qp_handle, file->ucontext);
2944	if (!qp)
2945		return -EINVAL;
2946
2947	obj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject);
2948
2949	list_for_each_entry(mcast, &obj->mcast_list, list)
2950		if (cmd.mlid == mcast->lid &&
2951		    !memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) {
2952			ret = 0;
2953			goto out_put;
2954		}
2955
2956	mcast = kmalloc(sizeof *mcast, GFP_KERNEL);
2957	if (!mcast) {
2958		ret = -ENOMEM;
2959		goto out_put;
2960	}
2961
2962	mcast->lid = cmd.mlid;
2963	memcpy(mcast->gid.raw, cmd.gid, sizeof mcast->gid.raw);
2964
2965	ret = ib_attach_mcast(qp, &mcast->gid, cmd.mlid);
2966	if (!ret)
2967		list_add_tail(&mcast->list, &obj->mcast_list);
2968	else
2969		kfree(mcast);
2970
2971out_put:
2972	put_qp_write(qp);
2973
2974	return ret ? ret : in_len;
2975}
2976
2977ssize_t ib_uverbs_detach_mcast(struct ib_uverbs_file *file,
2978			       struct ib_device *ib_dev,
2979			       const char __user *buf, int in_len,
2980			       int out_len)
2981{
2982	struct ib_uverbs_detach_mcast cmd;
2983	struct ib_uqp_object         *obj;
2984	struct ib_qp                 *qp;
2985	struct ib_uverbs_mcast_entry *mcast;
2986	int                           ret = -EINVAL;
2987
2988	if (copy_from_user(&cmd, buf, sizeof cmd))
2989		return -EFAULT;
2990
2991	qp = idr_write_qp(cmd.qp_handle, file->ucontext);
2992	if (!qp)
2993		return -EINVAL;
2994
2995	ret = ib_detach_mcast(qp, (union ib_gid *) cmd.gid, cmd.mlid);
2996	if (ret)
2997		goto out_put;
2998
2999	obj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject);
3000
3001	list_for_each_entry(mcast, &obj->mcast_list, list)
3002		if (cmd.mlid == mcast->lid &&
3003		    !memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) {
3004			list_del(&mcast->list);
3005			kfree(mcast);
3006			break;
3007		}
3008
3009out_put:
3010	put_qp_write(qp);
3011
3012	return ret ? ret : in_len;
3013}
3014
3015static int kern_spec_to_ib_spec(struct ib_uverbs_flow_spec *kern_spec,
3016				union ib_flow_spec *ib_spec)
3017{
3018	if (kern_spec->reserved)
3019		return -EINVAL;
3020
3021	ib_spec->type = kern_spec->type;
3022
3023	switch (ib_spec->type) {
3024	case IB_FLOW_SPEC_ETH:
3025		ib_spec->eth.size = sizeof(struct ib_flow_spec_eth);
3026		if (ib_spec->eth.size != kern_spec->eth.size)
3027			return -EINVAL;
3028		memcpy(&ib_spec->eth.val, &kern_spec->eth.val,
3029		       sizeof(struct ib_flow_eth_filter));
3030		memcpy(&ib_spec->eth.mask, &kern_spec->eth.mask,
3031		       sizeof(struct ib_flow_eth_filter));
3032		break;
3033	case IB_FLOW_SPEC_IPV4:
3034		ib_spec->ipv4.size = sizeof(struct ib_flow_spec_ipv4);
3035		if (ib_spec->ipv4.size != kern_spec->ipv4.size)
3036			return -EINVAL;
3037		memcpy(&ib_spec->ipv4.val, &kern_spec->ipv4.val,
3038		       sizeof(struct ib_flow_ipv4_filter));
3039		memcpy(&ib_spec->ipv4.mask, &kern_spec->ipv4.mask,
3040		       sizeof(struct ib_flow_ipv4_filter));
3041		break;
3042	case IB_FLOW_SPEC_TCP:
3043	case IB_FLOW_SPEC_UDP:
3044		ib_spec->tcp_udp.size = sizeof(struct ib_flow_spec_tcp_udp);
3045		if (ib_spec->tcp_udp.size != kern_spec->tcp_udp.size)
3046			return -EINVAL;
3047		memcpy(&ib_spec->tcp_udp.val, &kern_spec->tcp_udp.val,
3048		       sizeof(struct ib_flow_tcp_udp_filter));
3049		memcpy(&ib_spec->tcp_udp.mask, &kern_spec->tcp_udp.mask,
3050		       sizeof(struct ib_flow_tcp_udp_filter));
3051		break;
3052	default:
3053		return -EINVAL;
3054	}
3055	return 0;
3056}
3057
3058int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file,
3059			     struct ib_device *ib_dev,
3060			     struct ib_udata *ucore,
3061			     struct ib_udata *uhw)
3062{
3063	struct ib_uverbs_create_flow	  cmd;
3064	struct ib_uverbs_create_flow_resp resp;
3065	struct ib_uobject		  *uobj;
3066	struct ib_flow			  *flow_id;
3067	struct ib_uverbs_flow_attr	  *kern_flow_attr;
3068	struct ib_flow_attr		  *flow_attr;
3069	struct ib_qp			  *qp;
3070	int err = 0;
3071	void *kern_spec;
3072	void *ib_spec;
3073	int i;
3074
3075	if (ucore->inlen < sizeof(cmd))
3076		return -EINVAL;
3077
3078	if (ucore->outlen < sizeof(resp))
3079		return -ENOSPC;
3080
3081	err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd));
3082	if (err)
3083		return err;
3084
3085	ucore->inbuf += sizeof(cmd);
3086	ucore->inlen -= sizeof(cmd);
3087
3088	if (cmd.comp_mask)
3089		return -EINVAL;
3090
3091	if ((cmd.flow_attr.type == IB_FLOW_ATTR_SNIFFER &&
3092	     !capable(CAP_NET_ADMIN)) || !capable(CAP_NET_RAW))
3093		return -EPERM;
3094
3095	if (cmd.flow_attr.flags >= IB_FLOW_ATTR_FLAGS_RESERVED)
3096		return -EINVAL;
3097
3098	if ((cmd.flow_attr.flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP) &&
3099	    ((cmd.flow_attr.type == IB_FLOW_ATTR_ALL_DEFAULT) ||
3100	     (cmd.flow_attr.type == IB_FLOW_ATTR_MC_DEFAULT)))
3101		return -EINVAL;
3102
3103	if (cmd.flow_attr.num_of_specs > IB_FLOW_SPEC_SUPPORT_LAYERS)
3104		return -EINVAL;
3105
3106	if (cmd.flow_attr.size > ucore->inlen ||
3107	    cmd.flow_attr.size >
3108	    (cmd.flow_attr.num_of_specs * sizeof(struct ib_uverbs_flow_spec)))
3109		return -EINVAL;
3110
3111	if (cmd.flow_attr.reserved[0] ||
3112	    cmd.flow_attr.reserved[1])
3113		return -EINVAL;
3114
3115	if (cmd.flow_attr.num_of_specs) {
3116		kern_flow_attr = kmalloc(sizeof(*kern_flow_attr) + cmd.flow_attr.size,
3117					 GFP_KERNEL);
3118		if (!kern_flow_attr)
3119			return -ENOMEM;
3120
3121		memcpy(kern_flow_attr, &cmd.flow_attr, sizeof(*kern_flow_attr));
3122		err = ib_copy_from_udata(kern_flow_attr + 1, ucore,
3123					 cmd.flow_attr.size);
3124		if (err)
3125			goto err_free_attr;
3126	} else {
3127		kern_flow_attr = &cmd.flow_attr;
3128	}
3129
3130	uobj = kmalloc(sizeof(*uobj), GFP_KERNEL);
3131	if (!uobj) {
3132		err = -ENOMEM;
3133		goto err_free_attr;
3134	}
3135	init_uobj(uobj, 0, file->ucontext, &rule_lock_class);
3136	down_write(&uobj->mutex);
3137
3138	qp = idr_read_qp(cmd.qp_handle, file->ucontext);
3139	if (!qp) {
3140		err = -EINVAL;
3141		goto err_uobj;
3142	}
3143
3144	flow_attr = kmalloc(sizeof(*flow_attr) + cmd.flow_attr.size, GFP_KERNEL);
3145	if (!flow_attr) {
3146		err = -ENOMEM;
3147		goto err_put;
3148	}
3149
3150	flow_attr->type = kern_flow_attr->type;
3151	flow_attr->priority = kern_flow_attr->priority;
3152	flow_attr->num_of_specs = kern_flow_attr->num_of_specs;
3153	flow_attr->port = kern_flow_attr->port;
3154	flow_attr->flags = kern_flow_attr->flags;
3155	flow_attr->size = sizeof(*flow_attr);
3156
3157	kern_spec = kern_flow_attr + 1;
3158	ib_spec = flow_attr + 1;
3159	for (i = 0; i < flow_attr->num_of_specs &&
3160	     cmd.flow_attr.size > offsetof(struct ib_uverbs_flow_spec, reserved) &&
3161	     cmd.flow_attr.size >=
3162	     ((struct ib_uverbs_flow_spec *)kern_spec)->size; i++) {
3163		err = kern_spec_to_ib_spec(kern_spec, ib_spec);
3164		if (err)
3165			goto err_free;
3166		flow_attr->size +=
3167			((union ib_flow_spec *) ib_spec)->size;
3168		cmd.flow_attr.size -= ((struct ib_uverbs_flow_spec *)kern_spec)->size;
3169		kern_spec += ((struct ib_uverbs_flow_spec *) kern_spec)->size;
3170		ib_spec += ((union ib_flow_spec *) ib_spec)->size;
3171	}
3172	if (cmd.flow_attr.size || (i != flow_attr->num_of_specs)) {
3173		pr_warn("create flow failed, flow %d: %d bytes left from uverb cmd\n",
3174			i, cmd.flow_attr.size);
3175		err = -EINVAL;
3176		goto err_free;
3177	}
3178	flow_id = ib_create_flow(qp, flow_attr, IB_FLOW_DOMAIN_USER);
3179	if (IS_ERR(flow_id)) {
3180		err = PTR_ERR(flow_id);
3181		goto err_free;
3182	}
3183	flow_id->qp = qp;
3184	flow_id->uobject = uobj;
3185	uobj->object = flow_id;
3186
3187	err = idr_add_uobj(&ib_uverbs_rule_idr, uobj);
3188	if (err)
3189		goto destroy_flow;
3190
3191	memset(&resp, 0, sizeof(resp));
3192	resp.flow_handle = uobj->id;
3193
3194	err = ib_copy_to_udata(ucore,
3195			       &resp, sizeof(resp));
3196	if (err)
3197		goto err_copy;
3198
3199	put_qp_read(qp);
3200	mutex_lock(&file->mutex);
3201	list_add_tail(&uobj->list, &file->ucontext->rule_list);
3202	mutex_unlock(&file->mutex);
3203
3204	uobj->live = 1;
3205
3206	up_write(&uobj->mutex);
3207	kfree(flow_attr);
3208	if (cmd.flow_attr.num_of_specs)
3209		kfree(kern_flow_attr);
3210	return 0;
3211err_copy:
3212	idr_remove_uobj(&ib_uverbs_rule_idr, uobj);
3213destroy_flow:
3214	ib_destroy_flow(flow_id);
3215err_free:
3216	kfree(flow_attr);
3217err_put:
3218	put_qp_read(qp);
3219err_uobj:
3220	put_uobj_write(uobj);
3221err_free_attr:
3222	if (cmd.flow_attr.num_of_specs)
3223		kfree(kern_flow_attr);
3224	return err;
3225}
3226
3227int ib_uverbs_ex_destroy_flow(struct ib_uverbs_file *file,
3228			      struct ib_device *ib_dev,
3229			      struct ib_udata *ucore,
3230			      struct ib_udata *uhw)
3231{
3232	struct ib_uverbs_destroy_flow	cmd;
3233	struct ib_flow			*flow_id;
3234	struct ib_uobject		*uobj;
3235	int				ret;
3236
3237	if (ucore->inlen < sizeof(cmd))
3238		return -EINVAL;
3239
3240	ret = ib_copy_from_udata(&cmd, ucore, sizeof(cmd));
3241	if (ret)
3242		return ret;
3243
3244	if (cmd.comp_mask)
3245		return -EINVAL;
3246
3247	uobj = idr_write_uobj(&ib_uverbs_rule_idr, cmd.flow_handle,
3248			      file->ucontext);
3249	if (!uobj)
3250		return -EINVAL;
3251	flow_id = uobj->object;
3252
3253	ret = ib_destroy_flow(flow_id);
3254	if (!ret)
3255		uobj->live = 0;
3256
3257	put_uobj_write(uobj);
3258
3259	idr_remove_uobj(&ib_uverbs_rule_idr, uobj);
3260
3261	mutex_lock(&file->mutex);
3262	list_del(&uobj->list);
3263	mutex_unlock(&file->mutex);
3264
3265	put_uobj(uobj);
3266
3267	return ret;
3268}
3269
3270static int __uverbs_create_xsrq(struct ib_uverbs_file *file,
3271				struct ib_device *ib_dev,
3272				struct ib_uverbs_create_xsrq *cmd,
3273				struct ib_udata *udata)
3274{
 
3275	struct ib_uverbs_create_srq_resp resp;
3276	struct ib_usrq_object           *obj;
 
3277	struct ib_pd                    *pd;
3278	struct ib_srq                   *srq;
3279	struct ib_uobject               *uninitialized_var(xrcd_uobj);
3280	struct ib_srq_init_attr          attr;
3281	int ret;
3282
 
 
 
 
 
 
 
 
 
 
3283	obj = kmalloc(sizeof *obj, GFP_KERNEL);
3284	if (!obj)
3285		return -ENOMEM;
3286
3287	init_uobj(&obj->uevent.uobject, cmd->user_handle, file->ucontext, &srq_lock_class);
3288	down_write(&obj->uevent.uobject.mutex);
3289
3290	if (cmd->srq_type == IB_SRQT_XRC) {
3291		attr.ext.xrc.xrcd  = idr_read_xrcd(cmd->xrcd_handle, file->ucontext, &xrcd_uobj);
3292		if (!attr.ext.xrc.xrcd) {
3293			ret = -EINVAL;
3294			goto err;
3295		}
3296
3297		obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, uobject);
3298		atomic_inc(&obj->uxrcd->refcnt);
3299
3300		attr.ext.xrc.cq  = idr_read_cq(cmd->cq_handle, file->ucontext, 0);
3301		if (!attr.ext.xrc.cq) {
3302			ret = -EINVAL;
3303			goto err_put_xrcd;
3304		}
3305	}
3306
3307	pd  = idr_read_pd(cmd->pd_handle, file->ucontext);
3308	if (!pd) {
3309		ret = -EINVAL;
3310		goto err_put_cq;
3311	}
3312
3313	attr.event_handler  = ib_uverbs_srq_event_handler;
3314	attr.srq_context    = file;
3315	attr.srq_type       = cmd->srq_type;
3316	attr.attr.max_wr    = cmd->max_wr;
3317	attr.attr.max_sge   = cmd->max_sge;
3318	attr.attr.srq_limit = cmd->srq_limit;
3319
3320	obj->uevent.events_reported = 0;
3321	INIT_LIST_HEAD(&obj->uevent.event_list);
3322
3323	srq = pd->device->create_srq(pd, &attr, udata);
3324	if (IS_ERR(srq)) {
3325		ret = PTR_ERR(srq);
3326		goto err_put;
3327	}
3328
3329	srq->device        = pd->device;
3330	srq->pd            = pd;
3331	srq->srq_type	   = cmd->srq_type;
3332	srq->uobject       = &obj->uevent.uobject;
3333	srq->event_handler = attr.event_handler;
3334	srq->srq_context   = attr.srq_context;
3335
3336	if (cmd->srq_type == IB_SRQT_XRC) {
3337		srq->ext.xrc.cq   = attr.ext.xrc.cq;
3338		srq->ext.xrc.xrcd = attr.ext.xrc.xrcd;
3339		atomic_inc(&attr.ext.xrc.cq->usecnt);
3340		atomic_inc(&attr.ext.xrc.xrcd->usecnt);
3341	}
3342
3343	atomic_inc(&pd->usecnt);
3344	atomic_set(&srq->usecnt, 0);
3345
3346	obj->uevent.uobject.object = srq;
3347	ret = idr_add_uobj(&ib_uverbs_srq_idr, &obj->uevent.uobject);
3348	if (ret)
3349		goto err_destroy;
3350
3351	memset(&resp, 0, sizeof resp);
3352	resp.srq_handle = obj->uevent.uobject.id;
3353	resp.max_wr     = attr.attr.max_wr;
3354	resp.max_sge    = attr.attr.max_sge;
3355	if (cmd->srq_type == IB_SRQT_XRC)
3356		resp.srqn = srq->ext.xrc.srq_num;
3357
3358	if (copy_to_user((void __user *) (unsigned long) cmd->response,
3359			 &resp, sizeof resp)) {
3360		ret = -EFAULT;
3361		goto err_copy;
3362	}
3363
3364	if (cmd->srq_type == IB_SRQT_XRC) {
3365		put_uobj_read(xrcd_uobj);
3366		put_cq_read(attr.ext.xrc.cq);
3367	}
3368	put_pd_read(pd);
3369
3370	mutex_lock(&file->mutex);
3371	list_add_tail(&obj->uevent.uobject.list, &file->ucontext->srq_list);
3372	mutex_unlock(&file->mutex);
3373
3374	obj->uevent.uobject.live = 1;
3375
3376	up_write(&obj->uevent.uobject.mutex);
3377
3378	return 0;
3379
3380err_copy:
3381	idr_remove_uobj(&ib_uverbs_srq_idr, &obj->uevent.uobject);
3382
3383err_destroy:
3384	ib_destroy_srq(srq);
3385
3386err_put:
3387	put_pd_read(pd);
3388
3389err_put_cq:
3390	if (cmd->srq_type == IB_SRQT_XRC)
3391		put_cq_read(attr.ext.xrc.cq);
3392
3393err_put_xrcd:
3394	if (cmd->srq_type == IB_SRQT_XRC) {
3395		atomic_dec(&obj->uxrcd->refcnt);
3396		put_uobj_read(xrcd_uobj);
3397	}
3398
3399err:
3400	put_uobj_write(&obj->uevent.uobject);
3401	return ret;
3402}
3403
3404ssize_t ib_uverbs_create_srq(struct ib_uverbs_file *file,
3405			     struct ib_device *ib_dev,
3406			     const char __user *buf, int in_len,
3407			     int out_len)
3408{
3409	struct ib_uverbs_create_srq      cmd;
3410	struct ib_uverbs_create_xsrq     xcmd;
3411	struct ib_uverbs_create_srq_resp resp;
3412	struct ib_udata                  udata;
3413	int ret;
3414
3415	if (out_len < sizeof resp)
3416		return -ENOSPC;
3417
3418	if (copy_from_user(&cmd, buf, sizeof cmd))
3419		return -EFAULT;
3420
3421	xcmd.response	 = cmd.response;
3422	xcmd.user_handle = cmd.user_handle;
3423	xcmd.srq_type	 = IB_SRQT_BASIC;
3424	xcmd.pd_handle	 = cmd.pd_handle;
3425	xcmd.max_wr	 = cmd.max_wr;
3426	xcmd.max_sge	 = cmd.max_sge;
3427	xcmd.srq_limit	 = cmd.srq_limit;
3428
3429	INIT_UDATA(&udata, buf + sizeof cmd,
3430		   (unsigned long) cmd.response + sizeof resp,
3431		   in_len - sizeof cmd - sizeof(struct ib_uverbs_cmd_hdr),
3432		   out_len - sizeof resp);
3433
3434	ret = __uverbs_create_xsrq(file, ib_dev, &xcmd, &udata);
3435	if (ret)
3436		return ret;
3437
3438	return in_len;
3439}
3440
3441ssize_t ib_uverbs_create_xsrq(struct ib_uverbs_file *file,
3442			      struct ib_device *ib_dev,
3443			      const char __user *buf, int in_len, int out_len)
3444{
3445	struct ib_uverbs_create_xsrq     cmd;
3446	struct ib_uverbs_create_srq_resp resp;
3447	struct ib_udata                  udata;
3448	int ret;
3449
3450	if (out_len < sizeof resp)
3451		return -ENOSPC;
3452
3453	if (copy_from_user(&cmd, buf, sizeof cmd))
3454		return -EFAULT;
3455
3456	INIT_UDATA(&udata, buf + sizeof cmd,
3457		   (unsigned long) cmd.response + sizeof resp,
3458		   in_len - sizeof cmd - sizeof(struct ib_uverbs_cmd_hdr),
3459		   out_len - sizeof resp);
3460
3461	ret = __uverbs_create_xsrq(file, ib_dev, &cmd, &udata);
3462	if (ret)
3463		return ret;
3464
3465	return in_len;
3466}
3467
3468ssize_t ib_uverbs_modify_srq(struct ib_uverbs_file *file,
3469			     struct ib_device *ib_dev,
3470			     const char __user *buf, int in_len,
3471			     int out_len)
3472{
3473	struct ib_uverbs_modify_srq cmd;
3474	struct ib_udata             udata;
3475	struct ib_srq              *srq;
3476	struct ib_srq_attr          attr;
3477	int                         ret;
3478
3479	if (copy_from_user(&cmd, buf, sizeof cmd))
3480		return -EFAULT;
3481
3482	INIT_UDATA(&udata, buf + sizeof cmd, NULL, in_len - sizeof cmd,
3483		   out_len);
3484
3485	srq = idr_read_srq(cmd.srq_handle, file->ucontext);
3486	if (!srq)
3487		return -EINVAL;
3488
3489	attr.max_wr    = cmd.max_wr;
3490	attr.srq_limit = cmd.srq_limit;
3491
3492	ret = srq->device->modify_srq(srq, &attr, cmd.attr_mask, &udata);
3493
3494	put_srq_read(srq);
3495
3496	return ret ? ret : in_len;
3497}
3498
3499ssize_t ib_uverbs_query_srq(struct ib_uverbs_file *file,
3500			    struct ib_device *ib_dev,
3501			    const char __user *buf,
3502			    int in_len, int out_len)
3503{
3504	struct ib_uverbs_query_srq      cmd;
3505	struct ib_uverbs_query_srq_resp resp;
3506	struct ib_srq_attr              attr;
3507	struct ib_srq                   *srq;
3508	int                             ret;
3509
3510	if (out_len < sizeof resp)
3511		return -ENOSPC;
3512
3513	if (copy_from_user(&cmd, buf, sizeof cmd))
3514		return -EFAULT;
3515
3516	srq = idr_read_srq(cmd.srq_handle, file->ucontext);
3517	if (!srq)
3518		return -EINVAL;
3519
3520	ret = ib_query_srq(srq, &attr);
3521
3522	put_srq_read(srq);
3523
3524	if (ret)
3525		return ret;
3526
3527	memset(&resp, 0, sizeof resp);
3528
3529	resp.max_wr    = attr.max_wr;
3530	resp.max_sge   = attr.max_sge;
3531	resp.srq_limit = attr.srq_limit;
3532
3533	if (copy_to_user((void __user *) (unsigned long) cmd.response,
3534			 &resp, sizeof resp))
3535		return -EFAULT;
3536
3537	return in_len;
3538}
3539
3540ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file,
3541			      struct ib_device *ib_dev,
3542			      const char __user *buf, int in_len,
3543			      int out_len)
3544{
3545	struct ib_uverbs_destroy_srq      cmd;
3546	struct ib_uverbs_destroy_srq_resp resp;
3547	struct ib_uobject		 *uobj;
3548	struct ib_srq               	 *srq;
3549	struct ib_uevent_object        	 *obj;
3550	int                         	  ret = -EINVAL;
3551	struct ib_usrq_object		 *us;
3552	enum ib_srq_type		  srq_type;
3553
3554	if (copy_from_user(&cmd, buf, sizeof cmd))
3555		return -EFAULT;
3556
3557	uobj = idr_write_uobj(&ib_uverbs_srq_idr, cmd.srq_handle, file->ucontext);
3558	if (!uobj)
3559		return -EINVAL;
3560	srq = uobj->object;
3561	obj = container_of(uobj, struct ib_uevent_object, uobject);
3562	srq_type = srq->srq_type;
3563
3564	ret = ib_destroy_srq(srq);
3565	if (!ret)
3566		uobj->live = 0;
3567
3568	put_uobj_write(uobj);
3569
3570	if (ret)
3571		return ret;
3572
3573	if (srq_type == IB_SRQT_XRC) {
3574		us = container_of(obj, struct ib_usrq_object, uevent);
3575		atomic_dec(&us->uxrcd->refcnt);
3576	}
3577
3578	idr_remove_uobj(&ib_uverbs_srq_idr, uobj);
3579
3580	mutex_lock(&file->mutex);
3581	list_del(&uobj->list);
3582	mutex_unlock(&file->mutex);
3583
3584	ib_uverbs_release_uevent(file, obj);
3585
3586	memset(&resp, 0, sizeof resp);
3587	resp.events_reported = obj->events_reported;
3588
3589	put_uobj(uobj);
3590
3591	if (copy_to_user((void __user *) (unsigned long) cmd.response,
3592			 &resp, sizeof resp))
3593		ret = -EFAULT;
3594
3595	return ret ? ret : in_len;
3596}
3597
3598int ib_uverbs_ex_query_device(struct ib_uverbs_file *file,
3599			      struct ib_device *ib_dev,
3600			      struct ib_udata *ucore,
3601			      struct ib_udata *uhw)
3602{
3603	struct ib_uverbs_ex_query_device_resp resp = { {0} };
3604	struct ib_uverbs_ex_query_device  cmd;
3605	struct ib_device_attr attr = {0};
3606	int err;
3607
3608	if (ucore->inlen < sizeof(cmd))
3609		return -EINVAL;
3610
3611	err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd));
3612	if (err)
3613		return err;
3614
3615	if (cmd.comp_mask)
3616		return -EINVAL;
3617
3618	if (cmd.reserved)
3619		return -EINVAL;
3620
3621	resp.response_length = offsetof(typeof(resp), odp_caps);
3622
3623	if (ucore->outlen < resp.response_length)
3624		return -ENOSPC;
3625
3626	err = ib_dev->query_device(ib_dev, &attr, uhw);
3627	if (err)
3628		return err;
3629
3630	copy_query_dev_fields(file, ib_dev, &resp.base, &attr);
3631
3632	if (ucore->outlen < resp.response_length + sizeof(resp.odp_caps))
3633		goto end;
3634
3635#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
3636	resp.odp_caps.general_caps = attr.odp_caps.general_caps;
3637	resp.odp_caps.per_transport_caps.rc_odp_caps =
3638		attr.odp_caps.per_transport_caps.rc_odp_caps;
3639	resp.odp_caps.per_transport_caps.uc_odp_caps =
3640		attr.odp_caps.per_transport_caps.uc_odp_caps;
3641	resp.odp_caps.per_transport_caps.ud_odp_caps =
3642		attr.odp_caps.per_transport_caps.ud_odp_caps;
3643#endif
3644	resp.response_length += sizeof(resp.odp_caps);
3645
3646	if (ucore->outlen < resp.response_length + sizeof(resp.timestamp_mask))
3647		goto end;
3648
3649	resp.timestamp_mask = attr.timestamp_mask;
3650	resp.response_length += sizeof(resp.timestamp_mask);
3651
3652	if (ucore->outlen < resp.response_length + sizeof(resp.hca_core_clock))
3653		goto end;
3654
3655	resp.hca_core_clock = attr.hca_core_clock;
3656	resp.response_length += sizeof(resp.hca_core_clock);
3657
3658end:
3659	err = ib_copy_to_udata(ucore, &resp, resp.response_length);
3660	return err;
3661}