Linux Audio

Check our new training course

Loading...
v6.13.7
   1/*
   2 * Copyright (c) 2016, Mellanox Technologies inc.  All rights reserved.
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the
   8 * OpenIB.org BSD license below:
   9 *
  10 *     Redistribution and use in source and binary forms, with or
  11 *     without modification, are permitted provided that the following
  12 *     conditions are met:
  13 *
  14 *      - Redistributions of source code must retain the above
  15 *        copyright notice, this list of conditions and the following
  16 *        disclaimer.
  17 *
  18 *      - Redistributions in binary form must reproduce the above
  19 *        copyright notice, this list of conditions and the following
  20 *        disclaimer in the documentation and/or other materials
  21 *        provided with the distribution.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30 * SOFTWARE.
  31 */
  32
  33#include <linux/file.h>
  34#include <linux/anon_inodes.h>
  35#include <linux/sched/mm.h>
  36#include <rdma/ib_verbs.h>
  37#include <rdma/uverbs_types.h>
  38#include <linux/rcupdate.h>
  39#include <rdma/uverbs_ioctl.h>
  40#include <rdma/rdma_user_ioctl.h>
  41#include "uverbs.h"
  42#include "core_priv.h"
  43#include "rdma_core.h"
  44
  45static void uverbs_uobject_free(struct kref *ref)
  46{
  47	kfree_rcu(container_of(ref, struct ib_uobject, ref), rcu);
  48}
  49
  50/*
  51 * In order to indicate we no longer needs this uobject, uverbs_uobject_put
  52 * is called. When the reference count is decreased, the uobject is freed.
  53 * For example, this is used when attaching a completion channel to a CQ.
  54 */
  55void uverbs_uobject_put(struct ib_uobject *uobject)
  56{
  57	kref_put(&uobject->ref, uverbs_uobject_free);
  58}
  59EXPORT_SYMBOL(uverbs_uobject_put);
  60
  61int uverbs_try_lock_object(struct ib_uobject *uobj,
  62			   enum rdma_lookup_mode mode)
  63{
  64	/*
  65	 * When a shared access is required, we use a positive counter. Each
  66	 * shared access request checks that the value != -1 and increment it.
  67	 * Exclusive access is required for operations like write or destroy.
  68	 * In exclusive access mode, we check that the counter is zero (nobody
  69	 * claimed this object) and we set it to -1. Releasing a shared access
  70	 * lock is done simply by decreasing the counter. As for exclusive
  71	 * access locks, since only a single one of them is allowed
  72	 * concurrently, setting the counter to zero is enough for releasing
  73	 * this lock.
  74	 */
  75	switch (mode) {
  76	case UVERBS_LOOKUP_READ:
  77		return atomic_fetch_add_unless(&uobj->usecnt, 1, -1) == -1 ?
  78			-EBUSY : 0;
  79	case UVERBS_LOOKUP_WRITE:
  80		/* lock is exclusive */
  81		return atomic_cmpxchg(&uobj->usecnt, 0, -1) == 0 ? 0 : -EBUSY;
  82	case UVERBS_LOOKUP_DESTROY:
  83		return 0;
  84	}
  85	return 0;
  86}
  87EXPORT_SYMBOL(uverbs_try_lock_object);
  88
  89static void assert_uverbs_usecnt(struct ib_uobject *uobj,
  90				 enum rdma_lookup_mode mode)
  91{
  92#ifdef CONFIG_LOCKDEP
  93	switch (mode) {
  94	case UVERBS_LOOKUP_READ:
  95		WARN_ON(atomic_read(&uobj->usecnt) <= 0);
  96		break;
  97	case UVERBS_LOOKUP_WRITE:
  98		WARN_ON(atomic_read(&uobj->usecnt) != -1);
  99		break;
 100	case UVERBS_LOOKUP_DESTROY:
 101		break;
 102	}
 103#endif
 104}
 105
 106/*
 107 * This must be called with the hw_destroy_rwsem locked for read or write,
 108 * also the uobject itself must be locked for write.
 109 *
 110 * Upon return the HW object is guaranteed to be destroyed.
 111 *
 112 * For RDMA_REMOVE_ABORT, the hw_destroy_rwsem is not required to be held,
 113 * however the type's allocat_commit function cannot have been called and the
 114 * uobject cannot be on the uobjects_lists
 115 *
 116 * For RDMA_REMOVE_DESTROY the caller should be holding a kref (eg via
 117 * rdma_lookup_get_uobject) and the object is left in a state where the caller
 118 * needs to call rdma_lookup_put_uobject.
 119 *
 120 * For all other destroy modes this function internally unlocks the uobject
 121 * and consumes the kref on the uobj.
 122 */
 123static int uverbs_destroy_uobject(struct ib_uobject *uobj,
 124				  enum rdma_remove_reason reason,
 125				  struct uverbs_attr_bundle *attrs)
 126{
 127	struct ib_uverbs_file *ufile = attrs->ufile;
 128	unsigned long flags;
 129	int ret;
 130
 131	lockdep_assert_held(&ufile->hw_destroy_rwsem);
 132	assert_uverbs_usecnt(uobj, UVERBS_LOOKUP_WRITE);
 133
 134	if (reason == RDMA_REMOVE_ABORT) {
 135		WARN_ON(!list_empty(&uobj->list));
 136		WARN_ON(!uobj->context);
 137		uobj->uapi_object->type_class->alloc_abort(uobj);
 138	} else if (uobj->object) {
 139		ret = uobj->uapi_object->type_class->destroy_hw(uobj, reason,
 140								attrs);
 141		if (ret)
 142			/* Nothing to be done, wait till ucontext will clean it */
 143			return ret;
 144
 145		uobj->object = NULL;
 146	}
 147
 148	uobj->context = NULL;
 149
 150	/*
 151	 * For DESTROY the usecnt is not changed, the caller is expected to
 152	 * manage it via uobj_put_destroy(). Only DESTROY can remove the IDR
 153	 * handle.
 154	 */
 155	if (reason != RDMA_REMOVE_DESTROY)
 156		atomic_set(&uobj->usecnt, 0);
 157	else
 158		uobj->uapi_object->type_class->remove_handle(uobj);
 159
 160	if (!list_empty(&uobj->list)) {
 161		spin_lock_irqsave(&ufile->uobjects_lock, flags);
 162		list_del_init(&uobj->list);
 163		spin_unlock_irqrestore(&ufile->uobjects_lock, flags);
 164
 165		/*
 166		 * Pairs with the get in rdma_alloc_commit_uobject(), could
 167		 * destroy uobj.
 168		 */
 169		uverbs_uobject_put(uobj);
 170	}
 171
 172	/*
 173	 * When aborting the stack kref remains owned by the core code, and is
 174	 * not transferred into the type. Pairs with the get in alloc_uobj
 175	 */
 176	if (reason == RDMA_REMOVE_ABORT)
 177		uverbs_uobject_put(uobj);
 178
 179	return 0;
 180}
 181
 182/*
 183 * This calls uverbs_destroy_uobject() using the RDMA_REMOVE_DESTROY
 184 * sequence. It should only be used from command callbacks. On success the
 185 * caller must pair this with uobj_put_destroy(). This
 186 * version requires the caller to have already obtained an
 187 * LOOKUP_DESTROY uobject kref.
 188 */
 189int uobj_destroy(struct ib_uobject *uobj, struct uverbs_attr_bundle *attrs)
 190{
 191	struct ib_uverbs_file *ufile = attrs->ufile;
 192	int ret;
 193
 194	down_read(&ufile->hw_destroy_rwsem);
 
 195
 196	/*
 197	 * Once the uobject is destroyed by RDMA_REMOVE_DESTROY then it is left
 198	 * write locked as the callers put it back with UVERBS_LOOKUP_DESTROY.
 199	 * This is because any other concurrent thread can still see the object
 200	 * in the xarray due to RCU. Leaving it locked ensures nothing else will
 201	 * touch it.
 202	 */
 203	ret = uverbs_try_lock_object(uobj, UVERBS_LOOKUP_WRITE);
 204	if (ret)
 205		goto out_unlock;
 206
 207	ret = uverbs_destroy_uobject(uobj, RDMA_REMOVE_DESTROY, attrs);
 208	if (ret) {
 209		atomic_set(&uobj->usecnt, 0);
 210		goto out_unlock;
 211	}
 212
 213out_unlock:
 214	up_read(&ufile->hw_destroy_rwsem);
 215	return ret;
 216}
 217
 218/*
 219 * uobj_get_destroy destroys the HW object and returns a handle to the uobj
 220 * with a NULL object pointer. The caller must pair this with
 221 * uobj_put_destroy().
 222 */
 223struct ib_uobject *__uobj_get_destroy(const struct uverbs_api_object *obj,
 224				      u32 id, struct uverbs_attr_bundle *attrs)
 225{
 226	struct ib_uobject *uobj;
 227	int ret;
 228
 229	uobj = rdma_lookup_get_uobject(obj, attrs->ufile, id,
 230				       UVERBS_LOOKUP_DESTROY, attrs);
 231	if (IS_ERR(uobj))
 232		return uobj;
 233
 234	ret = uobj_destroy(uobj, attrs);
 235	if (ret) {
 236		rdma_lookup_put_uobject(uobj, UVERBS_LOOKUP_DESTROY);
 237		return ERR_PTR(ret);
 238	}
 239
 240	return uobj;
 
 
 
 241}
 242
 243/*
 244 * Does both uobj_get_destroy() and uobj_put_destroy().  Returns 0 on success
 245 * (negative errno on failure). For use by callers that do not need the uobj.
 246 */
 247int __uobj_perform_destroy(const struct uverbs_api_object *obj, u32 id,
 248			   struct uverbs_attr_bundle *attrs)
 249{
 250	struct ib_uobject *uobj;
 251
 252	uobj = __uobj_get_destroy(obj, id, attrs);
 253	if (IS_ERR(uobj))
 254		return PTR_ERR(uobj);
 255	uobj_put_destroy(uobj);
 256	return 0;
 257}
 258
 259/* alloc_uobj must be undone by uverbs_destroy_uobject() */
 260static struct ib_uobject *alloc_uobj(struct uverbs_attr_bundle *attrs,
 261				     const struct uverbs_api_object *obj)
 262{
 263	struct ib_uverbs_file *ufile = attrs->ufile;
 264	struct ib_uobject *uobj;
 
 
 
 
 
 
 
 
 
 
 
 
 265
 266	if (!attrs->context) {
 267		struct ib_ucontext *ucontext =
 268			ib_uverbs_get_ucontext_file(ufile);
 269
 270		if (IS_ERR(ucontext))
 271			return ERR_CAST(ucontext);
 272		attrs->context = ucontext;
 273	}
 274
 275	uobj = kzalloc(obj->type_attrs->obj_size, GFP_KERNEL);
 276	if (!uobj)
 277		return ERR_PTR(-ENOMEM);
 278	/*
 279	 * user_handle should be filled by the handler,
 280	 * The object is added to the list in the commit stage.
 281	 */
 282	uobj->ufile = ufile;
 283	uobj->context = attrs->context;
 284	INIT_LIST_HEAD(&uobj->list);
 285	uobj->uapi_object = obj;
 286	/*
 287	 * Allocated objects start out as write locked to deny any other
 288	 * syscalls from accessing them until they are committed. See
 289	 * rdma_alloc_commit_uobject
 290	 */
 291	atomic_set(&uobj->usecnt, -1);
 292	kref_init(&uobj->ref);
 293
 294	return uobj;
 295}
 296
 297static int idr_add_uobj(struct ib_uobject *uobj)
 298{
 299       /*
 300        * We start with allocating an idr pointing to NULL. This represents an
 301        * object which isn't initialized yet. We'll replace it later on with
 302        * the real object once we commit.
 303        */
 304	return xa_alloc(&uobj->ufile->idr, &uobj->id, NULL, xa_limit_32b,
 305			GFP_KERNEL);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 306}
 307
 308/* Returns the ib_uobject or an error. The caller should check for IS_ERR. */
 309static struct ib_uobject *
 310lookup_get_idr_uobject(const struct uverbs_api_object *obj,
 311		       struct ib_uverbs_file *ufile, s64 id,
 312		       enum rdma_lookup_mode mode)
 313{
 314	struct ib_uobject *uobj;
 315
 316	if (id < 0 || id > ULONG_MAX)
 317		return ERR_PTR(-EINVAL);
 318
 319	rcu_read_lock();
 
 
 
 
 
 
 
 320	/*
 321	 * The idr_find is guaranteed to return a pointer to something that
 322	 * isn't freed yet, or NULL, as the free after idr_remove goes through
 323	 * kfree_rcu(). However the object may still have been released and
 324	 * kfree() could be called at any time.
 325	 */
 326	uobj = xa_load(&ufile->idr, id);
 327	if (!uobj || !kref_get_unless_zero(&uobj->ref))
 328		uobj = ERR_PTR(-ENOENT);
 
 
 329	rcu_read_unlock();
 330	return uobj;
 331}
 332
 333static struct ib_uobject *
 334lookup_get_fd_uobject(const struct uverbs_api_object *obj,
 335		      struct ib_uverbs_file *ufile, s64 id,
 336		      enum rdma_lookup_mode mode)
 337{
 338	const struct uverbs_obj_fd_type *fd_type;
 339	struct file *f;
 340	struct ib_uobject *uobject;
 341	int fdno = id;
 342
 343	if (fdno != id)
 344		return ERR_PTR(-EINVAL);
 345
 346	if (mode != UVERBS_LOOKUP_READ)
 347		return ERR_PTR(-EOPNOTSUPP);
 348
 349	if (!obj->type_attrs)
 350		return ERR_PTR(-EIO);
 351	fd_type =
 352		container_of(obj->type_attrs, struct uverbs_obj_fd_type, type);
 353
 354	f = fget(fdno);
 355	if (!f)
 356		return ERR_PTR(-EBADF);
 357
 358	uobject = f->private_data;
 359	/*
 360	 * fget(id) ensures we are not currently running
 361	 * uverbs_uobject_fd_release(), and the caller is expected to ensure
 362	 * that release is never done while a call to lookup is possible.
 363	 */
 364	if (f->f_op != fd_type->fops || uobject->ufile != ufile) {
 365		fput(f);
 366		return ERR_PTR(-EBADF);
 367	}
 368
 369	uverbs_uobject_get(uobject);
 370	return uobject;
 371}
 372
 373struct ib_uobject *rdma_lookup_get_uobject(const struct uverbs_api_object *obj,
 374					   struct ib_uverbs_file *ufile, s64 id,
 375					   enum rdma_lookup_mode mode,
 376					   struct uverbs_attr_bundle *attrs)
 377{
 378	struct ib_uobject *uobj;
 379	int ret;
 380
 381	if (obj == ERR_PTR(-ENOMSG)) {
 382		/* must be UVERBS_IDR_ANY_OBJECT, see uapi_get_object() */
 383		uobj = lookup_get_idr_uobject(NULL, ufile, id, mode);
 384		if (IS_ERR(uobj))
 385			return uobj;
 386	} else {
 387		if (IS_ERR(obj))
 388			return ERR_PTR(-EINVAL);
 389
 390		uobj = obj->type_class->lookup_get(obj, ufile, id, mode);
 391		if (IS_ERR(uobj))
 392			return uobj;
 393
 394		if (uobj->uapi_object != obj) {
 395			ret = -EINVAL;
 396			goto free;
 397		}
 398	}
 399
 400	/*
 401	 * If we have been disassociated block every command except for
 402	 * DESTROY based commands.
 403	 */
 404	if (mode != UVERBS_LOOKUP_DESTROY &&
 405	    !srcu_dereference(ufile->device->ib_dev,
 406			      &ufile->device->disassociate_srcu)) {
 407		ret = -EIO;
 408		goto free;
 409	}
 410
 411	ret = uverbs_try_lock_object(uobj, mode);
 412	if (ret)
 
 
 413		goto free;
 414	if (attrs)
 415		attrs->context = uobj->context;
 416
 417	return uobj;
 418free:
 419	uobj->uapi_object->type_class->lookup_put(uobj, mode);
 420	uverbs_uobject_put(uobj);
 421	return ERR_PTR(ret);
 422}
 423
 424static struct ib_uobject *
 425alloc_begin_idr_uobject(const struct uverbs_api_object *obj,
 426			struct uverbs_attr_bundle *attrs)
 427{
 428	int ret;
 429	struct ib_uobject *uobj;
 430
 431	uobj = alloc_uobj(attrs, obj);
 432	if (IS_ERR(uobj))
 433		return uobj;
 434
 435	ret = idr_add_uobj(uobj);
 436	if (ret)
 437		goto uobj_put;
 438
 439	ret = ib_rdmacg_try_charge(&uobj->cg_obj, uobj->context->device,
 440				   RDMACG_RESOURCE_HCA_OBJECT);
 441	if (ret)
 442		goto remove;
 443
 444	return uobj;
 445
 446remove:
 447	xa_erase(&attrs->ufile->idr, uobj->id);
 448uobj_put:
 449	uverbs_uobject_put(uobj);
 450	return ERR_PTR(ret);
 451}
 452
 453static struct ib_uobject *
 454alloc_begin_fd_uobject(const struct uverbs_api_object *obj,
 455		       struct uverbs_attr_bundle *attrs)
 456{
 457	const struct uverbs_obj_fd_type *fd_type;
 
 458	int new_fd;
 459	struct ib_uobject *uobj, *ret;
 
 460	struct file *filp;
 461
 462	uobj = alloc_uobj(attrs, obj);
 463	if (IS_ERR(uobj))
 464		return uobj;
 465
 466	fd_type =
 467		container_of(obj->type_attrs, struct uverbs_obj_fd_type, type);
 468	if (WARN_ON(fd_type->fops->release != &uverbs_uobject_fd_release &&
 469		    fd_type->fops->release != &uverbs_async_event_release)) {
 470		ret = ERR_PTR(-EINVAL);
 471		goto err_fd;
 472	}
 473
 474	new_fd = get_unused_fd_flags(O_CLOEXEC);
 475	if (new_fd < 0) {
 476		ret = ERR_PTR(new_fd);
 477		goto err_fd;
 
 
 
 
 478	}
 479
 480	/* Note that uverbs_uobject_fd_release() is called during abort */
 481	filp = anon_inode_getfile(fd_type->name, fd_type->fops, NULL,
 
 
 482				  fd_type->flags);
 483	if (IS_ERR(filp)) {
 484		ret = ERR_CAST(filp);
 485		goto err_getfile;
 
 486	}
 487	uobj->object = filp;
 488
 489	uobj->id = new_fd;
 490	return uobj;
 
 
 
 491
 492err_getfile:
 493	put_unused_fd(new_fd);
 494err_fd:
 495	uverbs_uobject_put(uobj);
 496	return ret;
 497}
 498
 499struct ib_uobject *rdma_alloc_begin_uobject(const struct uverbs_api_object *obj,
 500					    struct uverbs_attr_bundle *attrs)
 501{
 502	struct ib_uverbs_file *ufile = attrs->ufile;
 503	struct ib_uobject *ret;
 504
 505	if (IS_ERR(obj))
 506		return ERR_PTR(-EINVAL);
 
 
 
 
 
 507
 508	/*
 509	 * The hw_destroy_rwsem is held across the entire object creation and
 510	 * released during rdma_alloc_commit_uobject or
 511	 * rdma_alloc_abort_uobject
 512	 */
 513	if (!down_read_trylock(&ufile->hw_destroy_rwsem))
 514		return ERR_PTR(-EIO);
 515
 516	ret = obj->type_class->alloc_begin(obj, attrs);
 517	if (IS_ERR(ret)) {
 518		up_read(&ufile->hw_destroy_rwsem);
 519		return ret;
 520	}
 
 
 
 
 521	return ret;
 522}
 523
 524static void alloc_abort_idr_uobject(struct ib_uobject *uobj)
 525{
 526	ib_rdmacg_uncharge(&uobj->cg_obj, uobj->context->device,
 527			   RDMACG_RESOURCE_HCA_OBJECT);
 
 
 528
 529	xa_erase(&uobj->ufile->idr, uobj->id);
 
 
 530}
 531
 532static int __must_check destroy_hw_idr_uobject(struct ib_uobject *uobj,
 533					       enum rdma_remove_reason why,
 534					       struct uverbs_attr_bundle *attrs)
 535{
 536	const struct uverbs_obj_idr_type *idr_type =
 537		container_of(uobj->uapi_object->type_attrs,
 538			     struct uverbs_obj_idr_type, type);
 539	int ret = idr_type->destroy_object(uobj, why, attrs);
 
 540
 541	if (ret)
 542		return ret;
 543
 544	if (why == RDMA_REMOVE_ABORT)
 545		return 0;
 546
 547	ib_rdmacg_uncharge(&uobj->cg_obj, uobj->context->device,
 548			   RDMACG_RESOURCE_HCA_OBJECT);
 549
 550	return 0;
 
 551}
 552
 553static void remove_handle_idr_uobject(struct ib_uobject *uobj)
 554{
 555	xa_erase(&uobj->ufile->idr, uobj->id);
 556	/* Matches the kref in alloc_commit_idr_uobject */
 557	uverbs_uobject_put(uobj);
 
 
 
 558}
 559
 560static void alloc_abort_fd_uobject(struct ib_uobject *uobj)
 
 561{
 562	struct file *filp = uobj->object;
 
 563
 564	fput(filp);
 565	put_unused_fd(uobj->id);
 
 
 
 
 
 
 
 
 
 
 
 
 566}
 567
 568static int __must_check destroy_hw_fd_uobject(struct ib_uobject *uobj,
 569					      enum rdma_remove_reason why,
 570					      struct uverbs_attr_bundle *attrs)
 571{
 572	const struct uverbs_obj_fd_type *fd_type = container_of(
 573		uobj->uapi_object->type_attrs, struct uverbs_obj_fd_type, type);
 574
 575	fd_type->destroy_object(uobj, why);
 576	return 0;
 
 
 
 
 
 
 
 
 
 
 577}
 578
 579static void remove_handle_fd_uobject(struct ib_uobject *uobj)
 
 580{
 
 581}
 582
 583static void alloc_commit_idr_uobject(struct ib_uobject *uobj)
 
 
 
 
 
 
 
 584{
 585	struct ib_uverbs_file *ufile = uobj->ufile;
 586	void *old;
 587
 588	/*
 589	 * We already allocated this IDR with a NULL object, so
 590	 * this shouldn't fail.
 591	 *
 592	 * NOTE: Storing the uobj transfers our kref on uobj to the XArray.
 593	 * It will be put by remove_commit_idr_uobject()
 594	 */
 595	old = xa_store(&ufile->idr, uobj->id, uobj, GFP_KERNEL);
 596	WARN_ON(old != NULL);
 
 
 
 
 
 
 
 597}
 598
 599static void swap_idr_uobjects(struct ib_uobject *obj_old,
 600			     struct ib_uobject *obj_new)
 601{
 602	struct ib_uverbs_file *ufile = obj_old->ufile;
 603	void *old;
 604
 605	/*
 606	 * New must be an object that been allocated but not yet committed, this
 607	 * moves the pre-committed state to obj_old, new still must be comitted.
 608	 */
 609	old = xa_cmpxchg(&ufile->idr, obj_old->id, obj_old, XA_ZERO_ENTRY,
 610			 GFP_KERNEL);
 611	if (WARN_ON(old != obj_old))
 612		return;
 613
 614	swap(obj_old->id, obj_new->id);
 615
 616	old = xa_cmpxchg(&ufile->idr, obj_old->id, NULL, obj_old, GFP_KERNEL);
 617	WARN_ON(old != NULL);
 618}
 619
 620static void alloc_commit_fd_uobject(struct ib_uobject *uobj)
 621{
 622	int fd = uobj->id;
 623	struct file *filp = uobj->object;
 624
 625	/* Matching put will be done in uverbs_uobject_fd_release() */
 626	kref_get(&uobj->ufile->ref);
 627
 
 628	/* This shouldn't be used anymore. Use the file object instead */
 629	uobj->id = 0;
 630
 631	/*
 632	 * NOTE: Once we install the file we loose ownership of our kref on
 633	 * uobj. It will be put by uverbs_uobject_fd_release()
 634	 */
 635	filp->private_data = uobj;
 636	fd_install(fd, filp);
 637}
 638
 639/*
 640 * In all cases rdma_alloc_commit_uobject() consumes the kref to uobj and the
 641 * caller can no longer assume uobj is valid. If this function fails it
 642 * destroys the uboject, including the attached HW object.
 643 */
 644void rdma_alloc_commit_uobject(struct ib_uobject *uobj,
 645			       struct uverbs_attr_bundle *attrs)
 646{
 647	struct ib_uverbs_file *ufile = attrs->ufile;
 
 
 648
 649	/* kref is held so long as the uobj is on the uobj list. */
 650	uverbs_uobject_get(uobj);
 651	spin_lock_irq(&ufile->uobjects_lock);
 652	list_add(&uobj->list, &ufile->uobjects);
 653	spin_unlock_irq(&ufile->uobjects_lock);
 
 
 
 654
 655	/* matches atomic_set(-1) in alloc_uobj */
 
 656	atomic_set(&uobj->usecnt, 0);
 657
 658	/* alloc_commit consumes the uobj kref */
 659	uobj->uapi_object->type_class->alloc_commit(uobj);
 
 660
 661	/* Matches the down_read in rdma_alloc_begin_uobject */
 662	up_read(&ufile->hw_destroy_rwsem);
 
 
 663}
 664
 665/*
 666 * new_uobj will be assigned to the handle currently used by to_uobj, and
 667 * to_uobj will be destroyed.
 668 *
 669 * Upon return the caller must do:
 670 *    rdma_alloc_commit_uobject(new_uobj)
 671 *    uobj_put_destroy(to_uobj)
 672 *
 673 * to_uobj must have a write get but the put mode switches to destroy once
 674 * this is called.
 675 */
 676void rdma_assign_uobject(struct ib_uobject *to_uobj, struct ib_uobject *new_uobj,
 677			struct uverbs_attr_bundle *attrs)
 678{
 679	assert_uverbs_usecnt(new_uobj, UVERBS_LOOKUP_WRITE);
 680
 681	if (WARN_ON(to_uobj->uapi_object != new_uobj->uapi_object ||
 682		    !to_uobj->uapi_object->type_class->swap_uobjects))
 683		return;
 684
 685	to_uobj->uapi_object->type_class->swap_uobjects(to_uobj, new_uobj);
 686
 687	/*
 688	 * If this fails then the uobject is still completely valid (though with
 689	 * a new ID) and we leak it until context close.
 690	 */
 691	uverbs_destroy_uobject(to_uobj, RDMA_REMOVE_DESTROY, attrs);
 692}
 693
 694/*
 695 * This consumes the kref for uobj. It is up to the caller to unwind the HW
 696 * object and anything else connected to uobj before calling this.
 697 */
 698void rdma_alloc_abort_uobject(struct ib_uobject *uobj,
 699			      struct uverbs_attr_bundle *attrs,
 700			      bool hw_obj_valid)
 701{
 702	struct ib_uverbs_file *ufile = uobj->ufile;
 703	int ret;
 704
 705	if (hw_obj_valid) {
 706		ret = uobj->uapi_object->type_class->destroy_hw(
 707			uobj, RDMA_REMOVE_ABORT, attrs);
 708		/*
 709		 * If the driver couldn't destroy the object then go ahead and
 710		 * commit it. Leaking objects that can't be destroyed is only
 711		 * done during FD close after the driver has a few more tries to
 712		 * destroy it.
 713		 */
 714		if (WARN_ON(ret))
 715			return rdma_alloc_commit_uobject(uobj, attrs);
 716	}
 717
 718	uverbs_destroy_uobject(uobj, RDMA_REMOVE_ABORT, attrs);
 719
 720	/* Matches the down_read in rdma_alloc_begin_uobject */
 721	up_read(&ufile->hw_destroy_rwsem);
 722}
 723
 724static void lookup_put_idr_uobject(struct ib_uobject *uobj,
 725				   enum rdma_lookup_mode mode)
 726{
 727}
 728
 729static void lookup_put_fd_uobject(struct ib_uobject *uobj,
 730				  enum rdma_lookup_mode mode)
 731{
 732	struct file *filp = uobj->object;
 733
 734	WARN_ON(mode != UVERBS_LOOKUP_READ);
 735	/*
 736	 * This indirectly calls uverbs_uobject_fd_release() and free the
 737	 * object
 738	 */
 739	fput(filp);
 740}
 741
 742void rdma_lookup_put_uobject(struct ib_uobject *uobj,
 743			     enum rdma_lookup_mode mode)
 744{
 745	assert_uverbs_usecnt(uobj, mode);
 
 746	/*
 747	 * In order to unlock an object, either decrease its usecnt for
 748	 * read access or zero it in case of exclusive access. See
 749	 * uverbs_try_lock_object for locking schema information.
 750	 */
 751	switch (mode) {
 752	case UVERBS_LOOKUP_READ:
 753		atomic_dec(&uobj->usecnt);
 754		break;
 755	case UVERBS_LOOKUP_WRITE:
 756		atomic_set(&uobj->usecnt, 0);
 757		break;
 758	case UVERBS_LOOKUP_DESTROY:
 759		break;
 760	}
 761
 762	uobj->uapi_object->type_class->lookup_put(uobj, mode);
 763	/* Pairs with the kref obtained by type->lookup_get */
 764	uverbs_uobject_put(uobj);
 765}
 766
 767void setup_ufile_idr_uobject(struct ib_uverbs_file *ufile)
 768{
 769	xa_init_flags(&ufile->idr, XA_FLAGS_ALLOC);
 770}
 771
 772void release_ufile_idr_uobject(struct ib_uverbs_file *ufile)
 773{
 774	struct ib_uobject *entry;
 775	unsigned long id;
 776
 777	/*
 778	 * At this point uverbs_cleanup_ufile() is guaranteed to have run, and
 779	 * there are no HW objects left, however the xarray is still populated
 780	 * with anything that has not been cleaned up by userspace. Since the
 781	 * kref on ufile is 0, nothing is allowed to call lookup_get.
 782	 *
 783	 * This is an optimized equivalent to remove_handle_idr_uobject
 784	 */
 785	xa_for_each(&ufile->idr, id, entry) {
 786		WARN_ON(entry->object);
 787		uverbs_uobject_put(entry);
 788	}
 789
 790	xa_destroy(&ufile->idr);
 791}
 792
 793const struct uverbs_obj_type_class uverbs_idr_class = {
 794	.alloc_begin = alloc_begin_idr_uobject,
 795	.lookup_get = lookup_get_idr_uobject,
 796	.alloc_commit = alloc_commit_idr_uobject,
 797	.alloc_abort = alloc_abort_idr_uobject,
 798	.lookup_put = lookup_put_idr_uobject,
 799	.destroy_hw = destroy_hw_idr_uobject,
 800	.remove_handle = remove_handle_idr_uobject,
 801	.swap_uobjects = swap_idr_uobjects,
 802};
 803EXPORT_SYMBOL(uverbs_idr_class);
 804
 805/*
 806 * Users of UVERBS_TYPE_ALLOC_FD should set this function as the struct
 807 * file_operations release method.
 808 */
 809int uverbs_uobject_fd_release(struct inode *inode, struct file *filp)
 810{
 811	struct ib_uverbs_file *ufile;
 812	struct ib_uobject *uobj;
 813
 814	/*
 815	 * This can only happen if the fput came from alloc_abort_fd_uobject()
 
 
 
 
 
 
 
 
 
 
 816	 */
 817	if (!filp->private_data)
 818		return 0;
 819	uobj = filp->private_data;
 820	ufile = uobj->ufile;
 821
 822	if (down_read_trylock(&ufile->hw_destroy_rwsem)) {
 823		struct uverbs_attr_bundle attrs = {
 824			.context = uobj->context,
 825			.ufile = ufile,
 826		};
 827
 828		/*
 829		 * lookup_get_fd_uobject holds the kref on the struct file any
 830		 * time a FD uobj is locked, which prevents this release
 831		 * method from being invoked. Meaning we can always get the
 832		 * write lock here, or we have a kernel bug.
 833		 */
 834		WARN_ON(uverbs_try_lock_object(uobj, UVERBS_LOOKUP_WRITE));
 835		uverbs_destroy_uobject(uobj, RDMA_REMOVE_CLOSE, &attrs);
 836		up_read(&ufile->hw_destroy_rwsem);
 837	}
 838
 839	/* Matches the get in alloc_commit_fd_uobject() */
 840	kref_put(&ufile->ref, ib_uverbs_release_file);
 841
 842	/* Pairs with filp->private_data in alloc_begin_fd_uobject */
 843	uverbs_uobject_put(uobj);
 844	return 0;
 
 
 
 
 
 
 
 
 
 845}
 846EXPORT_SYMBOL(uverbs_uobject_fd_release);
 847
 848/*
 849 * Drop the ucontext off the ufile and completely disconnect it from the
 850 * ib_device
 851 */
 852static void ufile_destroy_ucontext(struct ib_uverbs_file *ufile,
 853				   enum rdma_remove_reason reason)
 854{
 855	struct ib_ucontext *ucontext = ufile->ucontext;
 856	struct ib_device *ib_dev = ucontext->device;
 857
 858	/*
 859	 * If we are closing the FD then the user mmap VMAs must have
 860	 * already been destroyed as they hold on to the filep, otherwise
 861	 * they need to be zap'd.
 862	 */
 863	if (reason == RDMA_REMOVE_DRIVER_REMOVE) {
 864		uverbs_user_mmap_disassociate(ufile);
 865		if (ib_dev->ops.disassociate_ucontext)
 866			ib_dev->ops.disassociate_ucontext(ucontext);
 867	}
 868
 869	ib_rdmacg_uncharge(&ucontext->cg_obj, ib_dev,
 870			   RDMACG_RESOURCE_HCA_HANDLE);
 871
 872	rdma_restrack_del(&ucontext->res);
 873
 874	ib_dev->ops.dealloc_ucontext(ucontext);
 875	WARN_ON(!xa_empty(&ucontext->mmap_xa));
 876	kfree(ucontext);
 877
 878	ufile->ucontext = NULL;
 879}
 880
 881static int __uverbs_cleanup_ufile(struct ib_uverbs_file *ufile,
 882				  enum rdma_remove_reason reason)
 883{
 884	struct uverbs_attr_bundle attrs = { .ufile = ufile };
 885	struct ib_ucontext *ucontext = ufile->ucontext;
 886	struct ib_device *ib_dev = ucontext->device;
 887	struct ib_uobject *obj, *next_obj;
 888	int ret = -EINVAL;
 889
 890	if (ib_dev->ops.ufile_hw_cleanup)
 891		ib_dev->ops.ufile_hw_cleanup(ufile);
 892
 
 893	/*
 894	 * This shouldn't run while executing other commands on this
 895	 * context. Thus, the only thing we should take care of is
 896	 * releasing a FD while traversing this list. The FD could be
 897	 * closed and released from the _release fop of this FD.
 898	 * In order to mitigate this, we add a lock.
 899	 * We take and release the lock per traversal in order to let
 900	 * other threads (which might still use the FDs) chance to run.
 901	 */
 902	list_for_each_entry_safe(obj, next_obj, &ufile->uobjects, list) {
 903		attrs.context = obj->context;
 
 
 
 
 904		/*
 905		 * if we hit this WARN_ON, that means we are
 906		 * racing with a lookup_get.
 
 
 
 
 
 
 907		 */
 908		WARN_ON(uverbs_try_lock_object(obj, UVERBS_LOOKUP_WRITE));
 909		if (reason == RDMA_REMOVE_DRIVER_FAILURE)
 910			obj->object = NULL;
 911		if (!uverbs_destroy_uobject(obj, reason, &attrs))
 912			ret = 0;
 913		else
 914			atomic_set(&obj->usecnt, 0);
 915	}
 916
 917	if (reason == RDMA_REMOVE_DRIVER_FAILURE) {
 918		WARN_ON(!list_empty(&ufile->uobjects));
 919		return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 920	}
 921	return ret;
 922}
 923
 924/*
 925 * Destroy the ucontext and every uobject associated with it.
 926 *
 927 * This is internally locked and can be called in parallel from multiple
 928 * contexts.
 929 */
 930void uverbs_destroy_ufile_hw(struct ib_uverbs_file *ufile,
 931			     enum rdma_remove_reason reason)
 932{
 933	down_write(&ufile->hw_destroy_rwsem);
 934
 935	/*
 936	 * If a ucontext was never created then we can't have any uobjects to
 937	 * cleanup, nothing to do.
 938	 */
 939	if (!ufile->ucontext)
 940		goto done;
 941
 942	while (!list_empty(&ufile->uobjects) &&
 943	       !__uverbs_cleanup_ufile(ufile, reason)) {
 944	}
 945
 946	if (WARN_ON(!list_empty(&ufile->uobjects)))
 947		__uverbs_cleanup_ufile(ufile, RDMA_REMOVE_DRIVER_FAILURE);
 948	ufile_destroy_ucontext(ufile, reason);
 949
 950done:
 951	up_write(&ufile->hw_destroy_rwsem);
 952}
 953
 954const struct uverbs_obj_type_class uverbs_fd_class = {
 955	.alloc_begin = alloc_begin_fd_uobject,
 956	.lookup_get = lookup_get_fd_uobject,
 957	.alloc_commit = alloc_commit_fd_uobject,
 958	.alloc_abort = alloc_abort_fd_uobject,
 959	.lookup_put = lookup_put_fd_uobject,
 960	.destroy_hw = destroy_hw_fd_uobject,
 961	.remove_handle = remove_handle_fd_uobject,
 962};
 963EXPORT_SYMBOL(uverbs_fd_class);
 964
 965struct ib_uobject *
 966uverbs_get_uobject_from_file(u16 object_id, enum uverbs_obj_access access,
 967			     s64 id, struct uverbs_attr_bundle *attrs)
 
 968{
 969	const struct uverbs_api_object *obj =
 970		uapi_get_object(attrs->ufile->device->uapi, object_id);
 971
 972	switch (access) {
 973	case UVERBS_ACCESS_READ:
 974		return rdma_lookup_get_uobject(obj, attrs->ufile, id,
 975					       UVERBS_LOOKUP_READ, attrs);
 976	case UVERBS_ACCESS_DESTROY:
 977		/* Actual destruction is done inside uverbs_handle_method */
 978		return rdma_lookup_get_uobject(obj, attrs->ufile, id,
 979					       UVERBS_LOOKUP_DESTROY, attrs);
 980	case UVERBS_ACCESS_WRITE:
 981		return rdma_lookup_get_uobject(obj, attrs->ufile, id,
 982					       UVERBS_LOOKUP_WRITE, attrs);
 983	case UVERBS_ACCESS_NEW:
 984		return rdma_alloc_begin_uobject(obj, attrs);
 985	default:
 986		WARN_ON(true);
 987		return ERR_PTR(-EOPNOTSUPP);
 988	}
 989}
 990
 991void uverbs_finalize_object(struct ib_uobject *uobj,
 992			    enum uverbs_obj_access access, bool hw_obj_valid,
 993			    bool commit, struct uverbs_attr_bundle *attrs)
 994{
 
 
 995	/*
 996	 * refcounts should be handled at the object level and not at the
 997	 * uobject level. Refcounts of the objects themselves are done in
 998	 * handlers.
 999	 */
1000
1001	switch (access) {
1002	case UVERBS_ACCESS_READ:
1003		rdma_lookup_put_uobject(uobj, UVERBS_LOOKUP_READ);
1004		break;
1005	case UVERBS_ACCESS_WRITE:
1006		rdma_lookup_put_uobject(uobj, UVERBS_LOOKUP_WRITE);
1007		break;
1008	case UVERBS_ACCESS_DESTROY:
1009		if (uobj)
1010			rdma_lookup_put_uobject(uobj, UVERBS_LOOKUP_DESTROY);
 
 
1011		break;
1012	case UVERBS_ACCESS_NEW:
1013		if (commit)
1014			rdma_alloc_commit_uobject(uobj, attrs);
1015		else
1016			rdma_alloc_abort_uobject(uobj, attrs, hw_obj_valid);
1017		break;
1018	default:
1019		WARN_ON(true);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1020	}
 
1021}
v4.17
  1/*
  2 * Copyright (c) 2016, Mellanox Technologies inc.  All rights reserved.
  3 *
  4 * This software is available to you under a choice of one of two
  5 * licenses.  You may choose to be licensed under the terms of the GNU
  6 * General Public License (GPL) Version 2, available from the file
  7 * COPYING in the main directory of this source tree, or the
  8 * OpenIB.org BSD license below:
  9 *
 10 *     Redistribution and use in source and binary forms, with or
 11 *     without modification, are permitted provided that the following
 12 *     conditions are met:
 13 *
 14 *      - Redistributions of source code must retain the above
 15 *        copyright notice, this list of conditions and the following
 16 *        disclaimer.
 17 *
 18 *      - Redistributions in binary form must reproduce the above
 19 *        copyright notice, this list of conditions and the following
 20 *        disclaimer in the documentation and/or other materials
 21 *        provided with the distribution.
 22 *
 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 30 * SOFTWARE.
 31 */
 32
 33#include <linux/file.h>
 34#include <linux/anon_inodes.h>
 
 35#include <rdma/ib_verbs.h>
 36#include <rdma/uverbs_types.h>
 37#include <linux/rcupdate.h>
 38#include <rdma/uverbs_ioctl.h>
 39#include <rdma/rdma_user_ioctl.h>
 40#include "uverbs.h"
 41#include "core_priv.h"
 42#include "rdma_core.h"
 43
 44int uverbs_ns_idx(u16 *id, unsigned int ns_count)
 45{
 46	int ret = (*id & UVERBS_ID_NS_MASK) >> UVERBS_ID_NS_SHIFT;
 
 47
 48	if (ret >= ns_count)
 49		return -EINVAL;
 
 
 
 
 
 
 
 
 50
 51	*id &= ~UVERBS_ID_NS_MASK;
 52	return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 53}
 54
 55const struct uverbs_object_spec *uverbs_get_object(const struct ib_device *ibdev,
 56						   uint16_t object)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 57{
 58	const struct uverbs_root_spec *object_hash = ibdev->specs_root;
 59	const struct uverbs_object_spec_hash *objects;
 60	int ret = uverbs_ns_idx(&object, object_hash->num_buckets);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 61
 62	if (ret < 0)
 63		return NULL;
 
 
 64
 65	objects = object_hash->object_buckets[ret];
 
 
 
 
 
 
 
 
 66
 67	if (object >= objects->num_objects)
 68		return NULL;
 
 
 
 
 
 
 
 
 
 69
 70	return objects->objects[object];
 
 
 
 
 
 
 
 71}
 72
 73const struct uverbs_method_spec *uverbs_get_method(const struct uverbs_object_spec *object,
 74						   uint16_t method)
 
 
 
 
 
 
 75{
 76	const struct uverbs_method_spec_hash *methods;
 77	int ret = uverbs_ns_idx(&method, object->num_buckets);
 78
 79	if (ret < 0)
 80		return NULL;
 81
 82	methods = object->method_buckets[ret];
 83	if (method >= methods->num_methods)
 84		return NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 85
 86	return methods->methods[method];
 
 
 87}
 88
 89void uverbs_uobject_get(struct ib_uobject *uobject)
 
 
 
 
 
 
 90{
 91	kref_get(&uobject->ref);
 92}
 
 
 
 
 
 93
 94static void uverbs_uobject_free(struct kref *ref)
 95{
 96	struct ib_uobject *uobj =
 97		container_of(ref, struct ib_uobject, ref);
 
 98
 99	if (uobj->type->type_class->needs_kfree_rcu)
100		kfree_rcu(uobj, rcu);
101	else
102		kfree(uobj);
103}
104
105void uverbs_uobject_put(struct ib_uobject *uobject)
 
 
 
 
 
106{
107	kref_put(&uobject->ref, uverbs_uobject_free);
 
 
 
 
 
 
108}
109
110static int uverbs_try_lock_object(struct ib_uobject *uobj, bool exclusive)
 
 
111{
112	/*
113	 * When a shared access is required, we use a positive counter. Each
114	 * shared access request checks that the value != -1 and increment it.
115	 * Exclusive access is required for operations like write or destroy.
116	 * In exclusive access mode, we check that the counter is zero (nobody
117	 * claimed this object) and we set it to -1. Releasing a shared access
118	 * lock is done simply by decreasing the counter. As for exclusive
119	 * access locks, since only a single one of them is is allowed
120	 * concurrently, setting the counter to zero is enough for releasing
121	 * this lock.
122	 */
123	if (!exclusive)
124		return __atomic_add_unless(&uobj->usecnt, 1, -1) == -1 ?
125			-EBUSY : 0;
126
127	/* lock is either WRITE or DESTROY - should be exclusive */
128	return atomic_cmpxchg(&uobj->usecnt, 0, -1) == 0 ? 0 : -EBUSY;
129}
130
131static struct ib_uobject *alloc_uobj(struct ib_ucontext *context,
132				     const struct uverbs_obj_type *type)
133{
134	struct ib_uobject *uobj = kzalloc(type->obj_size, GFP_KERNEL);
135
 
136	if (!uobj)
137		return ERR_PTR(-ENOMEM);
138	/*
139	 * user_handle should be filled by the handler,
140	 * The object is added to the list in the commit stage.
141	 */
142	uobj->context = context;
143	uobj->type = type;
 
 
144	/*
145	 * Allocated objects start out as write locked to deny any other
146	 * syscalls from accessing them until they are committed. See
147	 * rdma_alloc_commit_uobject
148	 */
149	atomic_set(&uobj->usecnt, -1);
150	kref_init(&uobj->ref);
151
152	return uobj;
153}
154
155static int idr_add_uobj(struct ib_uobject *uobj)
156{
157	int ret;
158
159	idr_preload(GFP_KERNEL);
160	spin_lock(&uobj->context->ufile->idr_lock);
161
162	/*
163	 * We start with allocating an idr pointing to NULL. This represents an
164	 * object which isn't initialized yet. We'll replace it later on with
165	 * the real object once we commit.
166	 */
167	ret = idr_alloc(&uobj->context->ufile->idr, NULL, 0,
168			min_t(unsigned long, U32_MAX - 1, INT_MAX), GFP_NOWAIT);
169	if (ret >= 0)
170		uobj->id = ret;
171
172	spin_unlock(&uobj->context->ufile->idr_lock);
173	idr_preload_end();
174
175	return ret < 0 ? ret : 0;
176}
177
178/*
179 * It only removes it from the uobjects list, uverbs_uobject_put() is still
180 * required.
181 */
182static void uverbs_idr_remove_uobj(struct ib_uobject *uobj)
183{
184	spin_lock(&uobj->context->ufile->idr_lock);
185	idr_remove(&uobj->context->ufile->idr, uobj->id);
186	spin_unlock(&uobj->context->ufile->idr_lock);
187}
188
189/* Returns the ib_uobject or an error. The caller should check for IS_ERR. */
190static struct ib_uobject *lookup_get_idr_uobject(const struct uverbs_obj_type *type,
191						 struct ib_ucontext *ucontext,
192						 int id, bool exclusive)
 
193{
194	struct ib_uobject *uobj;
195
 
 
 
196	rcu_read_lock();
197	/* object won't be released as we're protected in rcu */
198	uobj = idr_find(&ucontext->ufile->idr, id);
199	if (!uobj) {
200		uobj = ERR_PTR(-ENOENT);
201		goto free;
202	}
203
204	/*
205	 * The idr_find is guaranteed to return a pointer to something that
206	 * isn't freed yet, or NULL, as the free after idr_remove goes through
207	 * kfree_rcu(). However the object may still have been released and
208	 * kfree() could be called at any time.
209	 */
210	if (!kref_get_unless_zero(&uobj->ref))
 
211		uobj = ERR_PTR(-ENOENT);
212
213free:
214	rcu_read_unlock();
215	return uobj;
216}
217
218static struct ib_uobject *lookup_get_fd_uobject(const struct uverbs_obj_type *type,
219						struct ib_ucontext *ucontext,
220						int id, bool exclusive)
 
221{
 
222	struct file *f;
223	struct ib_uobject *uobject;
224	const struct uverbs_obj_fd_type *fd_type =
225		container_of(type, struct uverbs_obj_fd_type, type);
 
 
226
227	if (exclusive)
228		return ERR_PTR(-EOPNOTSUPP);
229
230	f = fget(id);
 
 
 
 
 
231	if (!f)
232		return ERR_PTR(-EBADF);
233
234	uobject = f->private_data;
235	/*
236	 * fget(id) ensures we are not currently running uverbs_close_fd,
237	 * and the caller is expected to ensure that uverbs_close_fd is never
238	 * done while a call top lookup is possible.
239	 */
240	if (f->f_op != fd_type->fops) {
241		fput(f);
242		return ERR_PTR(-EBADF);
243	}
244
245	uverbs_uobject_get(uobject);
246	return uobject;
247}
248
249struct ib_uobject *rdma_lookup_get_uobject(const struct uverbs_obj_type *type,
250					   struct ib_ucontext *ucontext,
251					   int id, bool exclusive)
 
252{
253	struct ib_uobject *uobj;
254	int ret;
255
256	uobj = type->type_class->lookup_get(type, ucontext, id, exclusive);
257	if (IS_ERR(uobj))
258		return uobj;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
259
260	if (uobj->type != type) {
261		ret = -EINVAL;
 
 
 
 
 
 
262		goto free;
263	}
264
265	ret = uverbs_try_lock_object(uobj, exclusive);
266	if (ret) {
267		WARN(ucontext->cleanup_reason,
268		     "ib_uverbs: Trying to lookup_get while cleanup context\n");
269		goto free;
270	}
 
271
272	return uobj;
273free:
274	uobj->type->type_class->lookup_put(uobj, exclusive);
275	uverbs_uobject_put(uobj);
276	return ERR_PTR(ret);
277}
278
279static struct ib_uobject *alloc_begin_idr_uobject(const struct uverbs_obj_type *type,
280						  struct ib_ucontext *ucontext)
 
281{
282	int ret;
283	struct ib_uobject *uobj;
284
285	uobj = alloc_uobj(ucontext, type);
286	if (IS_ERR(uobj))
287		return uobj;
288
289	ret = idr_add_uobj(uobj);
290	if (ret)
291		goto uobj_put;
292
293	ret = ib_rdmacg_try_charge(&uobj->cg_obj, ucontext->device,
294				   RDMACG_RESOURCE_HCA_OBJECT);
295	if (ret)
296		goto idr_remove;
297
298	return uobj;
299
300idr_remove:
301	uverbs_idr_remove_uobj(uobj);
302uobj_put:
303	uverbs_uobject_put(uobj);
304	return ERR_PTR(ret);
305}
306
307static struct ib_uobject *alloc_begin_fd_uobject(const struct uverbs_obj_type *type,
308						 struct ib_ucontext *ucontext)
 
309{
310	const struct uverbs_obj_fd_type *fd_type =
311		container_of(type, struct uverbs_obj_fd_type, type);
312	int new_fd;
313	struct ib_uobject *uobj;
314	struct ib_uobject_file *uobj_file;
315	struct file *filp;
316
 
 
 
 
 
 
 
 
 
 
 
 
317	new_fd = get_unused_fd_flags(O_CLOEXEC);
318	if (new_fd < 0)
319		return ERR_PTR(new_fd);
320
321	uobj = alloc_uobj(ucontext, type);
322	if (IS_ERR(uobj)) {
323		put_unused_fd(new_fd);
324		return uobj;
325	}
326
327	uobj_file = container_of(uobj, struct ib_uobject_file, uobj);
328	filp = anon_inode_getfile(fd_type->name,
329				  fd_type->fops,
330				  uobj_file,
331				  fd_type->flags);
332	if (IS_ERR(filp)) {
333		put_unused_fd(new_fd);
334		uverbs_uobject_put(uobj);
335		return (void *)filp;
336	}
 
337
338	uobj_file->uobj.id = new_fd;
339	uobj_file->uobj.object = filp;
340	uobj_file->ufile = ucontext->ufile;
341	INIT_LIST_HEAD(&uobj->list);
342	kref_get(&uobj_file->ufile->ref);
343
344	return uobj;
 
 
 
 
345}
346
347struct ib_uobject *rdma_alloc_begin_uobject(const struct uverbs_obj_type *type,
348					    struct ib_ucontext *ucontext)
349{
350	return type->type_class->alloc_begin(type, ucontext);
351}
352
353static int __must_check remove_commit_idr_uobject(struct ib_uobject *uobj,
354						  enum rdma_remove_reason why)
355{
356	const struct uverbs_obj_idr_type *idr_type =
357		container_of(uobj->type, struct uverbs_obj_idr_type,
358			     type);
359	int ret = idr_type->destroy_object(uobj, why);
360
361	/*
362	 * We can only fail gracefully if the user requested to destroy the
363	 * object. In the rest of the cases, just remove whatever you can.
 
364	 */
365	if (why == RDMA_REMOVE_DESTROY && ret)
 
 
 
 
 
366		return ret;
367
368	ib_rdmacg_uncharge(&uobj->cg_obj, uobj->context->device,
369			   RDMACG_RESOURCE_HCA_OBJECT);
370	uverbs_idr_remove_uobj(uobj);
371
372	return ret;
373}
374
375static void alloc_abort_fd_uobject(struct ib_uobject *uobj)
376{
377	struct ib_uobject_file *uobj_file =
378		container_of(uobj, struct ib_uobject_file, uobj);
379	struct file *filp = uobj->object;
380	int id = uobj_file->uobj.id;
381
382	/* Unsuccessful NEW */
383	fput(filp);
384	put_unused_fd(id);
385}
386
387static int __must_check remove_commit_fd_uobject(struct ib_uobject *uobj,
388						 enum rdma_remove_reason why)
 
389{
390	const struct uverbs_obj_fd_type *fd_type =
391		container_of(uobj->type, struct uverbs_obj_fd_type, type);
392	struct ib_uobject_file *uobj_file =
393		container_of(uobj, struct ib_uobject_file, uobj);
394	int ret = fd_type->context_closed(uobj_file, why);
395
396	if (why == RDMA_REMOVE_DESTROY && ret)
397		return ret;
398
399	if (why == RDMA_REMOVE_DURING_CLEANUP) {
400		alloc_abort_fd_uobject(uobj);
401		return ret;
402	}
 
403
404	uobj_file->uobj.context = NULL;
405	return ret;
406}
407
408static void assert_uverbs_usecnt(struct ib_uobject *uobj, bool exclusive)
409{
410#ifdef CONFIG_LOCKDEP
411	if (exclusive)
412		WARN_ON(atomic_read(&uobj->usecnt) != -1);
413	else
414		WARN_ON(atomic_read(&uobj->usecnt) <= 0);
415#endif
416}
417
418static int __must_check _rdma_remove_commit_uobject(struct ib_uobject *uobj,
419						    enum rdma_remove_reason why)
420{
421	int ret;
422	struct ib_ucontext *ucontext = uobj->context;
423
424	ret = uobj->type->type_class->remove_commit(uobj, why);
425	if (ret && why == RDMA_REMOVE_DESTROY) {
426		/* We couldn't remove the object, so just unlock the uobject */
427		atomic_set(&uobj->usecnt, 0);
428		uobj->type->type_class->lookup_put(uobj, true);
429	} else {
430		mutex_lock(&ucontext->uobjects_lock);
431		list_del(&uobj->list);
432		mutex_unlock(&ucontext->uobjects_lock);
433		/* put the ref we took when we created the object */
434		uverbs_uobject_put(uobj);
435	}
436
437	return ret;
438}
439
440/* This is called only for user requested DESTROY reasons */
441int __must_check rdma_remove_commit_uobject(struct ib_uobject *uobj)
 
442{
443	int ret;
444	struct ib_ucontext *ucontext = uobj->context;
445
446	/* put the ref count we took at lookup_get */
447	uverbs_uobject_put(uobj);
448	/* Cleanup is running. Calling this should have been impossible */
449	if (!down_read_trylock(&ucontext->cleanup_rwsem)) {
450		WARN(true, "ib_uverbs: Cleanup is running while removing an uobject\n");
451		return 0;
452	}
453	assert_uverbs_usecnt(uobj, true);
454	ret = _rdma_remove_commit_uobject(uobj, RDMA_REMOVE_DESTROY);
455
456	up_read(&ucontext->cleanup_rwsem);
457	return ret;
458}
459
460static int null_obj_type_class_remove_commit(struct ib_uobject *uobj,
461					     enum rdma_remove_reason why)
462{
463	return 0;
464}
465
466static const struct uverbs_obj_type null_obj_type = {
467	.type_class = &((const struct uverbs_obj_type_class){
468			.remove_commit = null_obj_type_class_remove_commit,
469			/* be cautious */
470			.needs_kfree_rcu = true}),
471};
472
473int rdma_explicit_destroy(struct ib_uobject *uobject)
474{
475	int ret;
476	struct ib_ucontext *ucontext = uobject->context;
477
478	/* Cleanup is running. Calling this should have been impossible */
479	if (!down_read_trylock(&ucontext->cleanup_rwsem)) {
480		WARN(true, "ib_uverbs: Cleanup is running while removing an uobject\n");
481		return 0;
482	}
483	assert_uverbs_usecnt(uobject, true);
484	ret = uobject->type->type_class->remove_commit(uobject,
485						       RDMA_REMOVE_DESTROY);
486	if (ret)
487		goto out;
488
489	uobject->type = &null_obj_type;
490
491out:
492	up_read(&ucontext->cleanup_rwsem);
493	return ret;
494}
495
496static void alloc_commit_idr_uobject(struct ib_uobject *uobj)
 
497{
498	spin_lock(&uobj->context->ufile->idr_lock);
 
 
499	/*
500	 * We already allocated this IDR with a NULL object, so
501	 * this shouldn't fail.
502	 */
503	WARN_ON(idr_replace(&uobj->context->ufile->idr,
504			    uobj, uobj->id));
505	spin_unlock(&uobj->context->ufile->idr_lock);
 
 
 
 
 
 
506}
507
508static void alloc_commit_fd_uobject(struct ib_uobject *uobj)
509{
510	struct ib_uobject_file *uobj_file =
511		container_of(uobj, struct ib_uobject_file, uobj);
 
 
 
512
513	fd_install(uobj_file->uobj.id, uobj->object);
514	/* This shouldn't be used anymore. Use the file object instead */
515	uobj_file->uobj.id = 0;
516	/* Get another reference as we export this to the fops */
517	uverbs_uobject_get(&uobj_file->uobj);
 
 
 
 
 
518}
519
520int rdma_alloc_commit_uobject(struct ib_uobject *uobj)
 
 
 
 
 
 
521{
522	/* Cleanup is running. Calling this should have been impossible */
523	if (!down_read_trylock(&uobj->context->cleanup_rwsem)) {
524		int ret;
525
526		WARN(true, "ib_uverbs: Cleanup is running while allocating an uobject\n");
527		ret = uobj->type->type_class->remove_commit(uobj,
528							    RDMA_REMOVE_DURING_CLEANUP);
529		if (ret)
530			pr_warn("ib_uverbs: cleanup of idr object %d failed\n",
531				uobj->id);
532		return ret;
533	}
534
535	/* matches atomic_set(-1) in alloc_uobj */
536	assert_uverbs_usecnt(uobj, true);
537	atomic_set(&uobj->usecnt, 0);
538
539	mutex_lock(&uobj->context->uobjects_lock);
540	list_add(&uobj->list, &uobj->context->uobjects);
541	mutex_unlock(&uobj->context->uobjects_lock);
542
543	uobj->type->type_class->alloc_commit(uobj);
544	up_read(&uobj->context->cleanup_rwsem);
545
546	return 0;
547}
548
549static void alloc_abort_idr_uobject(struct ib_uobject *uobj)
 
 
 
 
 
 
 
 
 
 
 
 
550{
551	uverbs_idr_remove_uobj(uobj);
552	ib_rdmacg_uncharge(&uobj->cg_obj, uobj->context->device,
553			   RDMACG_RESOURCE_HCA_OBJECT);
554	uverbs_uobject_put(uobj);
 
 
 
 
 
 
 
 
 
555}
556
557void rdma_alloc_abort_uobject(struct ib_uobject *uobj)
 
 
 
 
 
 
558{
559	uobj->type->type_class->alloc_abort(uobj);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
560}
561
562static void lookup_put_idr_uobject(struct ib_uobject *uobj, bool exclusive)
 
563{
564}
565
566static void lookup_put_fd_uobject(struct ib_uobject *uobj, bool exclusive)
 
567{
568	struct file *filp = uobj->object;
569
570	WARN_ON(exclusive);
571	/* This indirectly calls uverbs_close_fd and free the object */
 
 
 
572	fput(filp);
573}
574
575void rdma_lookup_put_uobject(struct ib_uobject *uobj, bool exclusive)
 
576{
577	assert_uverbs_usecnt(uobj, exclusive);
578	uobj->type->type_class->lookup_put(uobj, exclusive);
579	/*
580	 * In order to unlock an object, either decrease its usecnt for
581	 * read access or zero it in case of exclusive access. See
582	 * uverbs_try_lock_object for locking schema information.
583	 */
584	if (!exclusive)
 
585		atomic_dec(&uobj->usecnt);
586	else
 
587		atomic_set(&uobj->usecnt, 0);
 
 
 
 
588
 
 
589	uverbs_uobject_put(uobj);
590}
591
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
592const struct uverbs_obj_type_class uverbs_idr_class = {
593	.alloc_begin = alloc_begin_idr_uobject,
594	.lookup_get = lookup_get_idr_uobject,
595	.alloc_commit = alloc_commit_idr_uobject,
596	.alloc_abort = alloc_abort_idr_uobject,
597	.lookup_put = lookup_put_idr_uobject,
598	.remove_commit = remove_commit_idr_uobject,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
599	/*
600	 * When we destroy an object, we first just lock it for WRITE and
601	 * actually DESTROY it in the finalize stage. So, the problematic
602	 * scenario is when we just started the finalize stage of the
603	 * destruction (nothing was executed yet). Now, the other thread
604	 * fetched the object for READ access, but it didn't lock it yet.
605	 * The DESTROY thread continues and starts destroying the object.
606	 * When the other thread continue - without the RCU, it would
607	 * access freed memory. However, the rcu_read_lock delays the free
608	 * until the rcu_read_lock of the READ operation quits. Since the
609	 * exclusive lock of the object is still taken by the DESTROY flow, the
610	 * READ operation will get -EBUSY and it'll just bail out.
611	 */
612	.needs_kfree_rcu = true,
613};
 
 
 
 
 
 
 
 
614
615static void _uverbs_close_fd(struct ib_uobject_file *uobj_file)
616{
617	struct ib_ucontext *ucontext;
618	struct ib_uverbs_file *ufile = uobj_file->ufile;
619	int ret;
 
 
 
 
 
620
621	mutex_lock(&uobj_file->ufile->cleanup_mutex);
 
622
623	/* uobject was either already cleaned up or is cleaned up right now anyway */
624	if (!uobj_file->uobj.context ||
625	    !down_read_trylock(&uobj_file->uobj.context->cleanup_rwsem))
626		goto unlock;
627
628	ucontext = uobj_file->uobj.context;
629	ret = _rdma_remove_commit_uobject(&uobj_file->uobj, RDMA_REMOVE_CLOSE);
630	up_read(&ucontext->cleanup_rwsem);
631	if (ret)
632		pr_warn("uverbs: unable to clean up uobject file in uverbs_close_fd.\n");
633unlock:
634	mutex_unlock(&ufile->cleanup_mutex);
635}
 
636
637void uverbs_close_fd(struct file *f)
 
 
 
 
 
638{
639	struct ib_uobject_file *uobj_file = f->private_data;
640	struct kref *uverbs_file_ref = &uobj_file->ufile->ref;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
641
642	_uverbs_close_fd(uobj_file);
643	uverbs_uobject_put(&uobj_file->uobj);
644	kref_put(uverbs_file_ref, ib_uverbs_release_file);
 
 
645}
646
647void uverbs_cleanup_ucontext(struct ib_ucontext *ucontext, bool device_removed)
 
648{
649	enum rdma_remove_reason reason = device_removed ?
650		RDMA_REMOVE_DRIVER_REMOVE : RDMA_REMOVE_CLOSE;
651	unsigned int cur_order = 0;
 
 
 
 
 
652
653	ucontext->cleanup_reason = reason;
654	/*
655	 * Waits for all remove_commit and alloc_commit to finish. Logically, We
656	 * want to hold this forever as the context is going to be destroyed,
657	 * but we'll release it since it causes a "held lock freed" BUG message.
 
 
 
 
658	 */
659	down_write(&ucontext->cleanup_rwsem);
660
661	while (!list_empty(&ucontext->uobjects)) {
662		struct ib_uobject *obj, *next_obj;
663		unsigned int next_order = UINT_MAX;
664
665		/*
666		 * This shouldn't run while executing other commands on this
667		 * context. Thus, the only thing we should take care of is
668		 * releasing a FD while traversing this list. The FD could be
669		 * closed and released from the _release fop of this FD.
670		 * In order to mitigate this, we add a lock.
671		 * We take and release the lock per order traversal in order
672		 * to let other threads (which might still use the FDs) chance
673		 * to run.
674		 */
675		mutex_lock(&ucontext->uobjects_lock);
676		list_for_each_entry_safe(obj, next_obj, &ucontext->uobjects,
677					 list) {
678			if (obj->type->destroy_order == cur_order) {
679				int ret;
680
681				/*
682				 * if we hit this WARN_ON, that means we are
683				 * racing with a lookup_get.
684				 */
685				WARN_ON(uverbs_try_lock_object(obj, true));
686				ret = obj->type->type_class->remove_commit(obj,
687									   reason);
688				list_del(&obj->list);
689				if (ret)
690					pr_warn("ib_uverbs: failed to remove uobject id %d order %u\n",
691						obj->id, cur_order);
692				/* put the ref we took when we created the object */
693				uverbs_uobject_put(obj);
694			} else {
695				next_order = min(next_order,
696						 obj->type->destroy_order);
697			}
698		}
699		mutex_unlock(&ucontext->uobjects_lock);
700		cur_order = next_order;
701	}
702	up_write(&ucontext->cleanup_rwsem);
703}
704
705void uverbs_initialize_ucontext(struct ib_ucontext *ucontext)
 
 
 
 
 
 
 
706{
707	ucontext->cleanup_reason = 0;
708	mutex_init(&ucontext->uobjects_lock);
709	INIT_LIST_HEAD(&ucontext->uobjects);
710	init_rwsem(&ucontext->cleanup_rwsem);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
711}
712
713const struct uverbs_obj_type_class uverbs_fd_class = {
714	.alloc_begin = alloc_begin_fd_uobject,
715	.lookup_get = lookup_get_fd_uobject,
716	.alloc_commit = alloc_commit_fd_uobject,
717	.alloc_abort = alloc_abort_fd_uobject,
718	.lookup_put = lookup_put_fd_uobject,
719	.remove_commit = remove_commit_fd_uobject,
720	.needs_kfree_rcu = false,
721};
 
722
723struct ib_uobject *uverbs_get_uobject_from_context(const struct uverbs_obj_type *type_attrs,
724						   struct ib_ucontext *ucontext,
725						   enum uverbs_obj_access access,
726						   int id)
727{
 
 
 
728	switch (access) {
729	case UVERBS_ACCESS_READ:
730		return rdma_lookup_get_uobject(type_attrs, ucontext, id, false);
 
731	case UVERBS_ACCESS_DESTROY:
 
 
 
732	case UVERBS_ACCESS_WRITE:
733		return rdma_lookup_get_uobject(type_attrs, ucontext, id, true);
 
734	case UVERBS_ACCESS_NEW:
735		return rdma_alloc_begin_uobject(type_attrs, ucontext);
736	default:
737		WARN_ON(true);
738		return ERR_PTR(-EOPNOTSUPP);
739	}
740}
741
742int uverbs_finalize_object(struct ib_uobject *uobj,
743			   enum uverbs_obj_access access,
744			   bool commit)
745{
746	int ret = 0;
747
748	/*
749	 * refcounts should be handled at the object level and not at the
750	 * uobject level. Refcounts of the objects themselves are done in
751	 * handlers.
752	 */
753
754	switch (access) {
755	case UVERBS_ACCESS_READ:
756		rdma_lookup_put_uobject(uobj, false);
757		break;
758	case UVERBS_ACCESS_WRITE:
759		rdma_lookup_put_uobject(uobj, true);
760		break;
761	case UVERBS_ACCESS_DESTROY:
762		if (commit)
763			ret = rdma_remove_commit_uobject(uobj);
764		else
765			rdma_lookup_put_uobject(uobj, true);
766		break;
767	case UVERBS_ACCESS_NEW:
768		if (commit)
769			ret = rdma_alloc_commit_uobject(uobj);
770		else
771			rdma_alloc_abort_uobject(uobj);
772		break;
773	default:
774		WARN_ON(true);
775		ret = -EOPNOTSUPP;
776	}
777
778	return ret;
779}
780
781int uverbs_finalize_objects(struct uverbs_attr_bundle *attrs_bundle,
782			    struct uverbs_attr_spec_hash * const *spec_hash,
783			    size_t num,
784			    bool commit)
785{
786	unsigned int i;
787	int ret = 0;
788
789	for (i = 0; i < num; i++) {
790		struct uverbs_attr_bundle_hash *curr_bundle =
791			&attrs_bundle->hash[i];
792		const struct uverbs_attr_spec_hash *curr_spec_bucket =
793			spec_hash[i];
794		unsigned int j;
795
796		for (j = 0; j < curr_bundle->num_attrs; j++) {
797			struct uverbs_attr *attr;
798			const struct uverbs_attr_spec *spec;
799
800			if (!uverbs_attr_is_valid_in_hash(curr_bundle, j))
801				continue;
802
803			attr = &curr_bundle->attrs[j];
804			spec = &curr_spec_bucket->attrs[j];
805
806			if (spec->type == UVERBS_ATTR_TYPE_IDR ||
807			    spec->type == UVERBS_ATTR_TYPE_FD) {
808				int current_ret;
809
810				current_ret = uverbs_finalize_object(attr->obj_attr.uobject,
811								     spec->obj.access,
812								     commit);
813				if (!ret)
814					ret = current_ret;
815			}
816		}
817	}
818	return ret;
819}