Loading...
1/*
2 * Copyright (c) 2016, Mellanox Technologies inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/file.h>
34#include <linux/anon_inodes.h>
35#include <linux/sched/mm.h>
36#include <rdma/ib_verbs.h>
37#include <rdma/uverbs_types.h>
38#include <linux/rcupdate.h>
39#include <rdma/uverbs_ioctl.h>
40#include <rdma/rdma_user_ioctl.h>
41#include "uverbs.h"
42#include "core_priv.h"
43#include "rdma_core.h"
44
45static void uverbs_uobject_free(struct kref *ref)
46{
47 kfree_rcu(container_of(ref, struct ib_uobject, ref), rcu);
48}
49
50/*
51 * In order to indicate we no longer needs this uobject, uverbs_uobject_put
52 * is called. When the reference count is decreased, the uobject is freed.
53 * For example, this is used when attaching a completion channel to a CQ.
54 */
55void uverbs_uobject_put(struct ib_uobject *uobject)
56{
57 kref_put(&uobject->ref, uverbs_uobject_free);
58}
59EXPORT_SYMBOL(uverbs_uobject_put);
60
61static int uverbs_try_lock_object(struct ib_uobject *uobj,
62 enum rdma_lookup_mode mode)
63{
64 /*
65 * When a shared access is required, we use a positive counter. Each
66 * shared access request checks that the value != -1 and increment it.
67 * Exclusive access is required for operations like write or destroy.
68 * In exclusive access mode, we check that the counter is zero (nobody
69 * claimed this object) and we set it to -1. Releasing a shared access
70 * lock is done simply by decreasing the counter. As for exclusive
71 * access locks, since only a single one of them is allowed
72 * concurrently, setting the counter to zero is enough for releasing
73 * this lock.
74 */
75 switch (mode) {
76 case UVERBS_LOOKUP_READ:
77 return atomic_fetch_add_unless(&uobj->usecnt, 1, -1) == -1 ?
78 -EBUSY : 0;
79 case UVERBS_LOOKUP_WRITE:
80 /* lock is exclusive */
81 return atomic_cmpxchg(&uobj->usecnt, 0, -1) == 0 ? 0 : -EBUSY;
82 case UVERBS_LOOKUP_DESTROY:
83 return 0;
84 }
85 return 0;
86}
87
88static void assert_uverbs_usecnt(struct ib_uobject *uobj,
89 enum rdma_lookup_mode mode)
90{
91#ifdef CONFIG_LOCKDEP
92 switch (mode) {
93 case UVERBS_LOOKUP_READ:
94 WARN_ON(atomic_read(&uobj->usecnt) <= 0);
95 break;
96 case UVERBS_LOOKUP_WRITE:
97 WARN_ON(atomic_read(&uobj->usecnt) != -1);
98 break;
99 case UVERBS_LOOKUP_DESTROY:
100 break;
101 }
102#endif
103}
104
105/*
106 * This must be called with the hw_destroy_rwsem locked for read or write,
107 * also the uobject itself must be locked for write.
108 *
109 * Upon return the HW object is guaranteed to be destroyed.
110 *
111 * For RDMA_REMOVE_ABORT, the hw_destroy_rwsem is not required to be held,
112 * however the type's allocat_commit function cannot have been called and the
113 * uobject cannot be on the uobjects_lists
114 *
115 * For RDMA_REMOVE_DESTROY the caller should be holding a kref (eg via
116 * rdma_lookup_get_uobject) and the object is left in a state where the caller
117 * needs to call rdma_lookup_put_uobject.
118 *
119 * For all other destroy modes this function internally unlocks the uobject
120 * and consumes the kref on the uobj.
121 */
122static int uverbs_destroy_uobject(struct ib_uobject *uobj,
123 enum rdma_remove_reason reason,
124 struct uverbs_attr_bundle *attrs)
125{
126 struct ib_uverbs_file *ufile = attrs->ufile;
127 unsigned long flags;
128 int ret;
129
130 lockdep_assert_held(&ufile->hw_destroy_rwsem);
131 assert_uverbs_usecnt(uobj, UVERBS_LOOKUP_WRITE);
132
133 if (reason == RDMA_REMOVE_ABORT) {
134 WARN_ON(!list_empty(&uobj->list));
135 WARN_ON(!uobj->context);
136 uobj->uapi_object->type_class->alloc_abort(uobj);
137 } else if (uobj->object) {
138 ret = uobj->uapi_object->type_class->destroy_hw(uobj, reason,
139 attrs);
140 if (ret)
141 /* Nothing to be done, wait till ucontext will clean it */
142 return ret;
143
144 uobj->object = NULL;
145 }
146
147 uobj->context = NULL;
148
149 /*
150 * For DESTROY the usecnt is not changed, the caller is expected to
151 * manage it via uobj_put_destroy(). Only DESTROY can remove the IDR
152 * handle.
153 */
154 if (reason != RDMA_REMOVE_DESTROY)
155 atomic_set(&uobj->usecnt, 0);
156 else
157 uobj->uapi_object->type_class->remove_handle(uobj);
158
159 if (!list_empty(&uobj->list)) {
160 spin_lock_irqsave(&ufile->uobjects_lock, flags);
161 list_del_init(&uobj->list);
162 spin_unlock_irqrestore(&ufile->uobjects_lock, flags);
163
164 /*
165 * Pairs with the get in rdma_alloc_commit_uobject(), could
166 * destroy uobj.
167 */
168 uverbs_uobject_put(uobj);
169 }
170
171 /*
172 * When aborting the stack kref remains owned by the core code, and is
173 * not transferred into the type. Pairs with the get in alloc_uobj
174 */
175 if (reason == RDMA_REMOVE_ABORT)
176 uverbs_uobject_put(uobj);
177
178 return 0;
179}
180
181/*
182 * This calls uverbs_destroy_uobject() using the RDMA_REMOVE_DESTROY
183 * sequence. It should only be used from command callbacks. On success the
184 * caller must pair this with uobj_put_destroy(). This
185 * version requires the caller to have already obtained an
186 * LOOKUP_DESTROY uobject kref.
187 */
188int uobj_destroy(struct ib_uobject *uobj, struct uverbs_attr_bundle *attrs)
189{
190 struct ib_uverbs_file *ufile = attrs->ufile;
191 int ret;
192
193 down_read(&ufile->hw_destroy_rwsem);
194
195 /*
196 * Once the uobject is destroyed by RDMA_REMOVE_DESTROY then it is left
197 * write locked as the callers put it back with UVERBS_LOOKUP_DESTROY.
198 * This is because any other concurrent thread can still see the object
199 * in the xarray due to RCU. Leaving it locked ensures nothing else will
200 * touch it.
201 */
202 ret = uverbs_try_lock_object(uobj, UVERBS_LOOKUP_WRITE);
203 if (ret)
204 goto out_unlock;
205
206 ret = uverbs_destroy_uobject(uobj, RDMA_REMOVE_DESTROY, attrs);
207 if (ret) {
208 atomic_set(&uobj->usecnt, 0);
209 goto out_unlock;
210 }
211
212out_unlock:
213 up_read(&ufile->hw_destroy_rwsem);
214 return ret;
215}
216
217/*
218 * uobj_get_destroy destroys the HW object and returns a handle to the uobj
219 * with a NULL object pointer. The caller must pair this with
220 * uobj_put_destroy().
221 */
222struct ib_uobject *__uobj_get_destroy(const struct uverbs_api_object *obj,
223 u32 id, struct uverbs_attr_bundle *attrs)
224{
225 struct ib_uobject *uobj;
226 int ret;
227
228 uobj = rdma_lookup_get_uobject(obj, attrs->ufile, id,
229 UVERBS_LOOKUP_DESTROY, attrs);
230 if (IS_ERR(uobj))
231 return uobj;
232
233 ret = uobj_destroy(uobj, attrs);
234 if (ret) {
235 rdma_lookup_put_uobject(uobj, UVERBS_LOOKUP_DESTROY);
236 return ERR_PTR(ret);
237 }
238
239 return uobj;
240}
241
242/*
243 * Does both uobj_get_destroy() and uobj_put_destroy(). Returns 0 on success
244 * (negative errno on failure). For use by callers that do not need the uobj.
245 */
246int __uobj_perform_destroy(const struct uverbs_api_object *obj, u32 id,
247 struct uverbs_attr_bundle *attrs)
248{
249 struct ib_uobject *uobj;
250
251 uobj = __uobj_get_destroy(obj, id, attrs);
252 if (IS_ERR(uobj))
253 return PTR_ERR(uobj);
254 uobj_put_destroy(uobj);
255 return 0;
256}
257
258/* alloc_uobj must be undone by uverbs_destroy_uobject() */
259static struct ib_uobject *alloc_uobj(struct uverbs_attr_bundle *attrs,
260 const struct uverbs_api_object *obj)
261{
262 struct ib_uverbs_file *ufile = attrs->ufile;
263 struct ib_uobject *uobj;
264
265 if (!attrs->context) {
266 struct ib_ucontext *ucontext =
267 ib_uverbs_get_ucontext_file(ufile);
268
269 if (IS_ERR(ucontext))
270 return ERR_CAST(ucontext);
271 attrs->context = ucontext;
272 }
273
274 uobj = kzalloc(obj->type_attrs->obj_size, GFP_KERNEL);
275 if (!uobj)
276 return ERR_PTR(-ENOMEM);
277 /*
278 * user_handle should be filled by the handler,
279 * The object is added to the list in the commit stage.
280 */
281 uobj->ufile = ufile;
282 uobj->context = attrs->context;
283 INIT_LIST_HEAD(&uobj->list);
284 uobj->uapi_object = obj;
285 /*
286 * Allocated objects start out as write locked to deny any other
287 * syscalls from accessing them until they are committed. See
288 * rdma_alloc_commit_uobject
289 */
290 atomic_set(&uobj->usecnt, -1);
291 kref_init(&uobj->ref);
292
293 return uobj;
294}
295
296static int idr_add_uobj(struct ib_uobject *uobj)
297{
298 /*
299 * We start with allocating an idr pointing to NULL. This represents an
300 * object which isn't initialized yet. We'll replace it later on with
301 * the real object once we commit.
302 */
303 return xa_alloc(&uobj->ufile->idr, &uobj->id, NULL, xa_limit_32b,
304 GFP_KERNEL);
305}
306
307/* Returns the ib_uobject or an error. The caller should check for IS_ERR. */
308static struct ib_uobject *
309lookup_get_idr_uobject(const struct uverbs_api_object *obj,
310 struct ib_uverbs_file *ufile, s64 id,
311 enum rdma_lookup_mode mode)
312{
313 struct ib_uobject *uobj;
314
315 if (id < 0 || id > ULONG_MAX)
316 return ERR_PTR(-EINVAL);
317
318 rcu_read_lock();
319 /*
320 * The idr_find is guaranteed to return a pointer to something that
321 * isn't freed yet, or NULL, as the free after idr_remove goes through
322 * kfree_rcu(). However the object may still have been released and
323 * kfree() could be called at any time.
324 */
325 uobj = xa_load(&ufile->idr, id);
326 if (!uobj || !kref_get_unless_zero(&uobj->ref))
327 uobj = ERR_PTR(-ENOENT);
328 rcu_read_unlock();
329 return uobj;
330}
331
332static struct ib_uobject *
333lookup_get_fd_uobject(const struct uverbs_api_object *obj,
334 struct ib_uverbs_file *ufile, s64 id,
335 enum rdma_lookup_mode mode)
336{
337 const struct uverbs_obj_fd_type *fd_type;
338 struct file *f;
339 struct ib_uobject *uobject;
340 int fdno = id;
341
342 if (fdno != id)
343 return ERR_PTR(-EINVAL);
344
345 if (mode != UVERBS_LOOKUP_READ)
346 return ERR_PTR(-EOPNOTSUPP);
347
348 if (!obj->type_attrs)
349 return ERR_PTR(-EIO);
350 fd_type =
351 container_of(obj->type_attrs, struct uverbs_obj_fd_type, type);
352
353 f = fget(fdno);
354 if (!f)
355 return ERR_PTR(-EBADF);
356
357 uobject = f->private_data;
358 /*
359 * fget(id) ensures we are not currently running
360 * uverbs_uobject_fd_release(), and the caller is expected to ensure
361 * that release is never done while a call to lookup is possible.
362 */
363 if (f->f_op != fd_type->fops || uobject->ufile != ufile) {
364 fput(f);
365 return ERR_PTR(-EBADF);
366 }
367
368 uverbs_uobject_get(uobject);
369 return uobject;
370}
371
372struct ib_uobject *rdma_lookup_get_uobject(const struct uverbs_api_object *obj,
373 struct ib_uverbs_file *ufile, s64 id,
374 enum rdma_lookup_mode mode,
375 struct uverbs_attr_bundle *attrs)
376{
377 struct ib_uobject *uobj;
378 int ret;
379
380 if (obj == ERR_PTR(-ENOMSG)) {
381 /* must be UVERBS_IDR_ANY_OBJECT, see uapi_get_object() */
382 uobj = lookup_get_idr_uobject(NULL, ufile, id, mode);
383 if (IS_ERR(uobj))
384 return uobj;
385 } else {
386 if (IS_ERR(obj))
387 return ERR_PTR(-EINVAL);
388
389 uobj = obj->type_class->lookup_get(obj, ufile, id, mode);
390 if (IS_ERR(uobj))
391 return uobj;
392
393 if (uobj->uapi_object != obj) {
394 ret = -EINVAL;
395 goto free;
396 }
397 }
398
399 /*
400 * If we have been disassociated block every command except for
401 * DESTROY based commands.
402 */
403 if (mode != UVERBS_LOOKUP_DESTROY &&
404 !srcu_dereference(ufile->device->ib_dev,
405 &ufile->device->disassociate_srcu)) {
406 ret = -EIO;
407 goto free;
408 }
409
410 ret = uverbs_try_lock_object(uobj, mode);
411 if (ret)
412 goto free;
413 if (attrs)
414 attrs->context = uobj->context;
415
416 return uobj;
417free:
418 uobj->uapi_object->type_class->lookup_put(uobj, mode);
419 uverbs_uobject_put(uobj);
420 return ERR_PTR(ret);
421}
422
423static struct ib_uobject *
424alloc_begin_idr_uobject(const struct uverbs_api_object *obj,
425 struct uverbs_attr_bundle *attrs)
426{
427 int ret;
428 struct ib_uobject *uobj;
429
430 uobj = alloc_uobj(attrs, obj);
431 if (IS_ERR(uobj))
432 return uobj;
433
434 ret = idr_add_uobj(uobj);
435 if (ret)
436 goto uobj_put;
437
438 ret = ib_rdmacg_try_charge(&uobj->cg_obj, uobj->context->device,
439 RDMACG_RESOURCE_HCA_OBJECT);
440 if (ret)
441 goto remove;
442
443 return uobj;
444
445remove:
446 xa_erase(&attrs->ufile->idr, uobj->id);
447uobj_put:
448 uverbs_uobject_put(uobj);
449 return ERR_PTR(ret);
450}
451
452static struct ib_uobject *
453alloc_begin_fd_uobject(const struct uverbs_api_object *obj,
454 struct uverbs_attr_bundle *attrs)
455{
456 const struct uverbs_obj_fd_type *fd_type;
457 int new_fd;
458 struct ib_uobject *uobj, *ret;
459 struct file *filp;
460
461 uobj = alloc_uobj(attrs, obj);
462 if (IS_ERR(uobj))
463 return uobj;
464
465 fd_type =
466 container_of(obj->type_attrs, struct uverbs_obj_fd_type, type);
467 if (WARN_ON(fd_type->fops->release != &uverbs_uobject_fd_release &&
468 fd_type->fops->release != &uverbs_async_event_release)) {
469 ret = ERR_PTR(-EINVAL);
470 goto err_fd;
471 }
472
473 new_fd = get_unused_fd_flags(O_CLOEXEC);
474 if (new_fd < 0) {
475 ret = ERR_PTR(new_fd);
476 goto err_fd;
477 }
478
479 /* Note that uverbs_uobject_fd_release() is called during abort */
480 filp = anon_inode_getfile(fd_type->name, fd_type->fops, NULL,
481 fd_type->flags);
482 if (IS_ERR(filp)) {
483 ret = ERR_CAST(filp);
484 goto err_getfile;
485 }
486 uobj->object = filp;
487
488 uobj->id = new_fd;
489 return uobj;
490
491err_getfile:
492 put_unused_fd(new_fd);
493err_fd:
494 uverbs_uobject_put(uobj);
495 return ret;
496}
497
498struct ib_uobject *rdma_alloc_begin_uobject(const struct uverbs_api_object *obj,
499 struct uverbs_attr_bundle *attrs)
500{
501 struct ib_uverbs_file *ufile = attrs->ufile;
502 struct ib_uobject *ret;
503
504 if (IS_ERR(obj))
505 return ERR_PTR(-EINVAL);
506
507 /*
508 * The hw_destroy_rwsem is held across the entire object creation and
509 * released during rdma_alloc_commit_uobject or
510 * rdma_alloc_abort_uobject
511 */
512 if (!down_read_trylock(&ufile->hw_destroy_rwsem))
513 return ERR_PTR(-EIO);
514
515 ret = obj->type_class->alloc_begin(obj, attrs);
516 if (IS_ERR(ret)) {
517 up_read(&ufile->hw_destroy_rwsem);
518 return ret;
519 }
520 return ret;
521}
522
523static void alloc_abort_idr_uobject(struct ib_uobject *uobj)
524{
525 ib_rdmacg_uncharge(&uobj->cg_obj, uobj->context->device,
526 RDMACG_RESOURCE_HCA_OBJECT);
527
528 xa_erase(&uobj->ufile->idr, uobj->id);
529}
530
531static int __must_check destroy_hw_idr_uobject(struct ib_uobject *uobj,
532 enum rdma_remove_reason why,
533 struct uverbs_attr_bundle *attrs)
534{
535 const struct uverbs_obj_idr_type *idr_type =
536 container_of(uobj->uapi_object->type_attrs,
537 struct uverbs_obj_idr_type, type);
538 int ret = idr_type->destroy_object(uobj, why, attrs);
539
540 if (ret)
541 return ret;
542
543 if (why == RDMA_REMOVE_ABORT)
544 return 0;
545
546 ib_rdmacg_uncharge(&uobj->cg_obj, uobj->context->device,
547 RDMACG_RESOURCE_HCA_OBJECT);
548
549 return 0;
550}
551
552static void remove_handle_idr_uobject(struct ib_uobject *uobj)
553{
554 xa_erase(&uobj->ufile->idr, uobj->id);
555 /* Matches the kref in alloc_commit_idr_uobject */
556 uverbs_uobject_put(uobj);
557}
558
559static void alloc_abort_fd_uobject(struct ib_uobject *uobj)
560{
561 struct file *filp = uobj->object;
562
563 fput(filp);
564 put_unused_fd(uobj->id);
565}
566
567static int __must_check destroy_hw_fd_uobject(struct ib_uobject *uobj,
568 enum rdma_remove_reason why,
569 struct uverbs_attr_bundle *attrs)
570{
571 const struct uverbs_obj_fd_type *fd_type = container_of(
572 uobj->uapi_object->type_attrs, struct uverbs_obj_fd_type, type);
573
574 fd_type->destroy_object(uobj, why);
575 return 0;
576}
577
578static void remove_handle_fd_uobject(struct ib_uobject *uobj)
579{
580}
581
582static void alloc_commit_idr_uobject(struct ib_uobject *uobj)
583{
584 struct ib_uverbs_file *ufile = uobj->ufile;
585 void *old;
586
587 /*
588 * We already allocated this IDR with a NULL object, so
589 * this shouldn't fail.
590 *
591 * NOTE: Storing the uobj transfers our kref on uobj to the XArray.
592 * It will be put by remove_commit_idr_uobject()
593 */
594 old = xa_store(&ufile->idr, uobj->id, uobj, GFP_KERNEL);
595 WARN_ON(old != NULL);
596}
597
598static void swap_idr_uobjects(struct ib_uobject *obj_old,
599 struct ib_uobject *obj_new)
600{
601 struct ib_uverbs_file *ufile = obj_old->ufile;
602 void *old;
603
604 /*
605 * New must be an object that been allocated but not yet committed, this
606 * moves the pre-committed state to obj_old, new still must be comitted.
607 */
608 old = xa_cmpxchg(&ufile->idr, obj_old->id, obj_old, XA_ZERO_ENTRY,
609 GFP_KERNEL);
610 if (WARN_ON(old != obj_old))
611 return;
612
613 swap(obj_old->id, obj_new->id);
614
615 old = xa_cmpxchg(&ufile->idr, obj_old->id, NULL, obj_old, GFP_KERNEL);
616 WARN_ON(old != NULL);
617}
618
619static void alloc_commit_fd_uobject(struct ib_uobject *uobj)
620{
621 int fd = uobj->id;
622 struct file *filp = uobj->object;
623
624 /* Matching put will be done in uverbs_uobject_fd_release() */
625 kref_get(&uobj->ufile->ref);
626
627 /* This shouldn't be used anymore. Use the file object instead */
628 uobj->id = 0;
629
630 /*
631 * NOTE: Once we install the file we loose ownership of our kref on
632 * uobj. It will be put by uverbs_uobject_fd_release()
633 */
634 filp->private_data = uobj;
635 fd_install(fd, filp);
636}
637
638/*
639 * In all cases rdma_alloc_commit_uobject() consumes the kref to uobj and the
640 * caller can no longer assume uobj is valid. If this function fails it
641 * destroys the uboject, including the attached HW object.
642 */
643void rdma_alloc_commit_uobject(struct ib_uobject *uobj,
644 struct uverbs_attr_bundle *attrs)
645{
646 struct ib_uverbs_file *ufile = attrs->ufile;
647
648 /* kref is held so long as the uobj is on the uobj list. */
649 uverbs_uobject_get(uobj);
650 spin_lock_irq(&ufile->uobjects_lock);
651 list_add(&uobj->list, &ufile->uobjects);
652 spin_unlock_irq(&ufile->uobjects_lock);
653
654 /* matches atomic_set(-1) in alloc_uobj */
655 atomic_set(&uobj->usecnt, 0);
656
657 /* alloc_commit consumes the uobj kref */
658 uobj->uapi_object->type_class->alloc_commit(uobj);
659
660 /* Matches the down_read in rdma_alloc_begin_uobject */
661 up_read(&ufile->hw_destroy_rwsem);
662}
663
664/*
665 * new_uobj will be assigned to the handle currently used by to_uobj, and
666 * to_uobj will be destroyed.
667 *
668 * Upon return the caller must do:
669 * rdma_alloc_commit_uobject(new_uobj)
670 * uobj_put_destroy(to_uobj)
671 *
672 * to_uobj must have a write get but the put mode switches to destroy once
673 * this is called.
674 */
675void rdma_assign_uobject(struct ib_uobject *to_uobj, struct ib_uobject *new_uobj,
676 struct uverbs_attr_bundle *attrs)
677{
678 assert_uverbs_usecnt(new_uobj, UVERBS_LOOKUP_WRITE);
679
680 if (WARN_ON(to_uobj->uapi_object != new_uobj->uapi_object ||
681 !to_uobj->uapi_object->type_class->swap_uobjects))
682 return;
683
684 to_uobj->uapi_object->type_class->swap_uobjects(to_uobj, new_uobj);
685
686 /*
687 * If this fails then the uobject is still completely valid (though with
688 * a new ID) and we leak it until context close.
689 */
690 uverbs_destroy_uobject(to_uobj, RDMA_REMOVE_DESTROY, attrs);
691}
692
693/*
694 * This consumes the kref for uobj. It is up to the caller to unwind the HW
695 * object and anything else connected to uobj before calling this.
696 */
697void rdma_alloc_abort_uobject(struct ib_uobject *uobj,
698 struct uverbs_attr_bundle *attrs,
699 bool hw_obj_valid)
700{
701 struct ib_uverbs_file *ufile = uobj->ufile;
702 int ret;
703
704 if (hw_obj_valid) {
705 ret = uobj->uapi_object->type_class->destroy_hw(
706 uobj, RDMA_REMOVE_ABORT, attrs);
707 /*
708 * If the driver couldn't destroy the object then go ahead and
709 * commit it. Leaking objects that can't be destroyed is only
710 * done during FD close after the driver has a few more tries to
711 * destroy it.
712 */
713 if (WARN_ON(ret))
714 return rdma_alloc_commit_uobject(uobj, attrs);
715 }
716
717 uverbs_destroy_uobject(uobj, RDMA_REMOVE_ABORT, attrs);
718
719 /* Matches the down_read in rdma_alloc_begin_uobject */
720 up_read(&ufile->hw_destroy_rwsem);
721}
722
723static void lookup_put_idr_uobject(struct ib_uobject *uobj,
724 enum rdma_lookup_mode mode)
725{
726}
727
728static void lookup_put_fd_uobject(struct ib_uobject *uobj,
729 enum rdma_lookup_mode mode)
730{
731 struct file *filp = uobj->object;
732
733 WARN_ON(mode != UVERBS_LOOKUP_READ);
734 /*
735 * This indirectly calls uverbs_uobject_fd_release() and free the
736 * object
737 */
738 fput(filp);
739}
740
741void rdma_lookup_put_uobject(struct ib_uobject *uobj,
742 enum rdma_lookup_mode mode)
743{
744 assert_uverbs_usecnt(uobj, mode);
745 /*
746 * In order to unlock an object, either decrease its usecnt for
747 * read access or zero it in case of exclusive access. See
748 * uverbs_try_lock_object for locking schema information.
749 */
750 switch (mode) {
751 case UVERBS_LOOKUP_READ:
752 atomic_dec(&uobj->usecnt);
753 break;
754 case UVERBS_LOOKUP_WRITE:
755 atomic_set(&uobj->usecnt, 0);
756 break;
757 case UVERBS_LOOKUP_DESTROY:
758 break;
759 }
760
761 uobj->uapi_object->type_class->lookup_put(uobj, mode);
762 /* Pairs with the kref obtained by type->lookup_get */
763 uverbs_uobject_put(uobj);
764}
765
766void setup_ufile_idr_uobject(struct ib_uverbs_file *ufile)
767{
768 xa_init_flags(&ufile->idr, XA_FLAGS_ALLOC);
769}
770
771void release_ufile_idr_uobject(struct ib_uverbs_file *ufile)
772{
773 struct ib_uobject *entry;
774 unsigned long id;
775
776 /*
777 * At this point uverbs_cleanup_ufile() is guaranteed to have run, and
778 * there are no HW objects left, however the xarray is still populated
779 * with anything that has not been cleaned up by userspace. Since the
780 * kref on ufile is 0, nothing is allowed to call lookup_get.
781 *
782 * This is an optimized equivalent to remove_handle_idr_uobject
783 */
784 xa_for_each(&ufile->idr, id, entry) {
785 WARN_ON(entry->object);
786 uverbs_uobject_put(entry);
787 }
788
789 xa_destroy(&ufile->idr);
790}
791
792const struct uverbs_obj_type_class uverbs_idr_class = {
793 .alloc_begin = alloc_begin_idr_uobject,
794 .lookup_get = lookup_get_idr_uobject,
795 .alloc_commit = alloc_commit_idr_uobject,
796 .alloc_abort = alloc_abort_idr_uobject,
797 .lookup_put = lookup_put_idr_uobject,
798 .destroy_hw = destroy_hw_idr_uobject,
799 .remove_handle = remove_handle_idr_uobject,
800 .swap_uobjects = swap_idr_uobjects,
801};
802EXPORT_SYMBOL(uverbs_idr_class);
803
804/*
805 * Users of UVERBS_TYPE_ALLOC_FD should set this function as the struct
806 * file_operations release method.
807 */
808int uverbs_uobject_fd_release(struct inode *inode, struct file *filp)
809{
810 struct ib_uverbs_file *ufile;
811 struct ib_uobject *uobj;
812
813 /*
814 * This can only happen if the fput came from alloc_abort_fd_uobject()
815 */
816 if (!filp->private_data)
817 return 0;
818 uobj = filp->private_data;
819 ufile = uobj->ufile;
820
821 if (down_read_trylock(&ufile->hw_destroy_rwsem)) {
822 struct uverbs_attr_bundle attrs = {
823 .context = uobj->context,
824 .ufile = ufile,
825 };
826
827 /*
828 * lookup_get_fd_uobject holds the kref on the struct file any
829 * time a FD uobj is locked, which prevents this release
830 * method from being invoked. Meaning we can always get the
831 * write lock here, or we have a kernel bug.
832 */
833 WARN_ON(uverbs_try_lock_object(uobj, UVERBS_LOOKUP_WRITE));
834 uverbs_destroy_uobject(uobj, RDMA_REMOVE_CLOSE, &attrs);
835 up_read(&ufile->hw_destroy_rwsem);
836 }
837
838 /* Matches the get in alloc_commit_fd_uobject() */
839 kref_put(&ufile->ref, ib_uverbs_release_file);
840
841 /* Pairs with filp->private_data in alloc_begin_fd_uobject */
842 uverbs_uobject_put(uobj);
843 return 0;
844}
845EXPORT_SYMBOL(uverbs_uobject_fd_release);
846
847/*
848 * Drop the ucontext off the ufile and completely disconnect it from the
849 * ib_device
850 */
851static void ufile_destroy_ucontext(struct ib_uverbs_file *ufile,
852 enum rdma_remove_reason reason)
853{
854 struct ib_ucontext *ucontext = ufile->ucontext;
855 struct ib_device *ib_dev = ucontext->device;
856
857 /*
858 * If we are closing the FD then the user mmap VMAs must have
859 * already been destroyed as they hold on to the filep, otherwise
860 * they need to be zap'd.
861 */
862 if (reason == RDMA_REMOVE_DRIVER_REMOVE) {
863 uverbs_user_mmap_disassociate(ufile);
864 if (ib_dev->ops.disassociate_ucontext)
865 ib_dev->ops.disassociate_ucontext(ucontext);
866 }
867
868 ib_rdmacg_uncharge(&ucontext->cg_obj, ib_dev,
869 RDMACG_RESOURCE_HCA_HANDLE);
870
871 rdma_restrack_del(&ucontext->res);
872
873 ib_dev->ops.dealloc_ucontext(ucontext);
874 WARN_ON(!xa_empty(&ucontext->mmap_xa));
875 kfree(ucontext);
876
877 ufile->ucontext = NULL;
878}
879
880static int __uverbs_cleanup_ufile(struct ib_uverbs_file *ufile,
881 enum rdma_remove_reason reason)
882{
883 struct ib_uobject *obj, *next_obj;
884 int ret = -EINVAL;
885 struct uverbs_attr_bundle attrs = { .ufile = ufile };
886
887 /*
888 * This shouldn't run while executing other commands on this
889 * context. Thus, the only thing we should take care of is
890 * releasing a FD while traversing this list. The FD could be
891 * closed and released from the _release fop of this FD.
892 * In order to mitigate this, we add a lock.
893 * We take and release the lock per traversal in order to let
894 * other threads (which might still use the FDs) chance to run.
895 */
896 list_for_each_entry_safe(obj, next_obj, &ufile->uobjects, list) {
897 attrs.context = obj->context;
898 /*
899 * if we hit this WARN_ON, that means we are
900 * racing with a lookup_get.
901 */
902 WARN_ON(uverbs_try_lock_object(obj, UVERBS_LOOKUP_WRITE));
903 if (reason == RDMA_REMOVE_DRIVER_FAILURE)
904 obj->object = NULL;
905 if (!uverbs_destroy_uobject(obj, reason, &attrs))
906 ret = 0;
907 else
908 atomic_set(&obj->usecnt, 0);
909 }
910
911 if (reason == RDMA_REMOVE_DRIVER_FAILURE) {
912 WARN_ON(!list_empty(&ufile->uobjects));
913 return 0;
914 }
915 return ret;
916}
917
918/*
919 * Destroy the ucontext and every uobject associated with it.
920 *
921 * This is internally locked and can be called in parallel from multiple
922 * contexts.
923 */
924void uverbs_destroy_ufile_hw(struct ib_uverbs_file *ufile,
925 enum rdma_remove_reason reason)
926{
927 down_write(&ufile->hw_destroy_rwsem);
928
929 /*
930 * If a ucontext was never created then we can't have any uobjects to
931 * cleanup, nothing to do.
932 */
933 if (!ufile->ucontext)
934 goto done;
935
936 while (!list_empty(&ufile->uobjects) &&
937 !__uverbs_cleanup_ufile(ufile, reason)) {
938 }
939
940 if (WARN_ON(!list_empty(&ufile->uobjects)))
941 __uverbs_cleanup_ufile(ufile, RDMA_REMOVE_DRIVER_FAILURE);
942 ufile_destroy_ucontext(ufile, reason);
943
944done:
945 up_write(&ufile->hw_destroy_rwsem);
946}
947
948const struct uverbs_obj_type_class uverbs_fd_class = {
949 .alloc_begin = alloc_begin_fd_uobject,
950 .lookup_get = lookup_get_fd_uobject,
951 .alloc_commit = alloc_commit_fd_uobject,
952 .alloc_abort = alloc_abort_fd_uobject,
953 .lookup_put = lookup_put_fd_uobject,
954 .destroy_hw = destroy_hw_fd_uobject,
955 .remove_handle = remove_handle_fd_uobject,
956};
957EXPORT_SYMBOL(uverbs_fd_class);
958
959struct ib_uobject *
960uverbs_get_uobject_from_file(u16 object_id, enum uverbs_obj_access access,
961 s64 id, struct uverbs_attr_bundle *attrs)
962{
963 const struct uverbs_api_object *obj =
964 uapi_get_object(attrs->ufile->device->uapi, object_id);
965
966 switch (access) {
967 case UVERBS_ACCESS_READ:
968 return rdma_lookup_get_uobject(obj, attrs->ufile, id,
969 UVERBS_LOOKUP_READ, attrs);
970 case UVERBS_ACCESS_DESTROY:
971 /* Actual destruction is done inside uverbs_handle_method */
972 return rdma_lookup_get_uobject(obj, attrs->ufile, id,
973 UVERBS_LOOKUP_DESTROY, attrs);
974 case UVERBS_ACCESS_WRITE:
975 return rdma_lookup_get_uobject(obj, attrs->ufile, id,
976 UVERBS_LOOKUP_WRITE, attrs);
977 case UVERBS_ACCESS_NEW:
978 return rdma_alloc_begin_uobject(obj, attrs);
979 default:
980 WARN_ON(true);
981 return ERR_PTR(-EOPNOTSUPP);
982 }
983}
984
985void uverbs_finalize_object(struct ib_uobject *uobj,
986 enum uverbs_obj_access access, bool hw_obj_valid,
987 bool commit, struct uverbs_attr_bundle *attrs)
988{
989 /*
990 * refcounts should be handled at the object level and not at the
991 * uobject level. Refcounts of the objects themselves are done in
992 * handlers.
993 */
994
995 switch (access) {
996 case UVERBS_ACCESS_READ:
997 rdma_lookup_put_uobject(uobj, UVERBS_LOOKUP_READ);
998 break;
999 case UVERBS_ACCESS_WRITE:
1000 rdma_lookup_put_uobject(uobj, UVERBS_LOOKUP_WRITE);
1001 break;
1002 case UVERBS_ACCESS_DESTROY:
1003 if (uobj)
1004 rdma_lookup_put_uobject(uobj, UVERBS_LOOKUP_DESTROY);
1005 break;
1006 case UVERBS_ACCESS_NEW:
1007 if (commit)
1008 rdma_alloc_commit_uobject(uobj, attrs);
1009 else
1010 rdma_alloc_abort_uobject(uobj, attrs, hw_obj_valid);
1011 break;
1012 default:
1013 WARN_ON(true);
1014 }
1015}
1/*
2 * Copyright (c) 2016, Mellanox Technologies inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/file.h>
34#include <linux/anon_inodes.h>
35#include <rdma/ib_verbs.h>
36#include <rdma/uverbs_types.h>
37#include <linux/rcupdate.h>
38#include <rdma/uverbs_ioctl.h>
39#include <rdma/rdma_user_ioctl.h>
40#include "uverbs.h"
41#include "core_priv.h"
42#include "rdma_core.h"
43
44int uverbs_ns_idx(u16 *id, unsigned int ns_count)
45{
46 int ret = (*id & UVERBS_ID_NS_MASK) >> UVERBS_ID_NS_SHIFT;
47
48 if (ret >= ns_count)
49 return -EINVAL;
50
51 *id &= ~UVERBS_ID_NS_MASK;
52 return ret;
53}
54
55const struct uverbs_object_spec *uverbs_get_object(const struct ib_device *ibdev,
56 uint16_t object)
57{
58 const struct uverbs_root_spec *object_hash = ibdev->specs_root;
59 const struct uverbs_object_spec_hash *objects;
60 int ret = uverbs_ns_idx(&object, object_hash->num_buckets);
61
62 if (ret < 0)
63 return NULL;
64
65 objects = object_hash->object_buckets[ret];
66
67 if (object >= objects->num_objects)
68 return NULL;
69
70 return objects->objects[object];
71}
72
73const struct uverbs_method_spec *uverbs_get_method(const struct uverbs_object_spec *object,
74 uint16_t method)
75{
76 const struct uverbs_method_spec_hash *methods;
77 int ret = uverbs_ns_idx(&method, object->num_buckets);
78
79 if (ret < 0)
80 return NULL;
81
82 methods = object->method_buckets[ret];
83 if (method >= methods->num_methods)
84 return NULL;
85
86 return methods->methods[method];
87}
88
89void uverbs_uobject_get(struct ib_uobject *uobject)
90{
91 kref_get(&uobject->ref);
92}
93
94static void uverbs_uobject_free(struct kref *ref)
95{
96 struct ib_uobject *uobj =
97 container_of(ref, struct ib_uobject, ref);
98
99 if (uobj->type->type_class->needs_kfree_rcu)
100 kfree_rcu(uobj, rcu);
101 else
102 kfree(uobj);
103}
104
105void uverbs_uobject_put(struct ib_uobject *uobject)
106{
107 kref_put(&uobject->ref, uverbs_uobject_free);
108}
109
110static int uverbs_try_lock_object(struct ib_uobject *uobj, bool exclusive)
111{
112 /*
113 * When a shared access is required, we use a positive counter. Each
114 * shared access request checks that the value != -1 and increment it.
115 * Exclusive access is required for operations like write or destroy.
116 * In exclusive access mode, we check that the counter is zero (nobody
117 * claimed this object) and we set it to -1. Releasing a shared access
118 * lock is done simply by decreasing the counter. As for exclusive
119 * access locks, since only a single one of them is is allowed
120 * concurrently, setting the counter to zero is enough for releasing
121 * this lock.
122 */
123 if (!exclusive)
124 return __atomic_add_unless(&uobj->usecnt, 1, -1) == -1 ?
125 -EBUSY : 0;
126
127 /* lock is either WRITE or DESTROY - should be exclusive */
128 return atomic_cmpxchg(&uobj->usecnt, 0, -1) == 0 ? 0 : -EBUSY;
129}
130
131static struct ib_uobject *alloc_uobj(struct ib_ucontext *context,
132 const struct uverbs_obj_type *type)
133{
134 struct ib_uobject *uobj = kzalloc(type->obj_size, GFP_KERNEL);
135
136 if (!uobj)
137 return ERR_PTR(-ENOMEM);
138 /*
139 * user_handle should be filled by the handler,
140 * The object is added to the list in the commit stage.
141 */
142 uobj->context = context;
143 uobj->type = type;
144 /*
145 * Allocated objects start out as write locked to deny any other
146 * syscalls from accessing them until they are committed. See
147 * rdma_alloc_commit_uobject
148 */
149 atomic_set(&uobj->usecnt, -1);
150 kref_init(&uobj->ref);
151
152 return uobj;
153}
154
155static int idr_add_uobj(struct ib_uobject *uobj)
156{
157 int ret;
158
159 idr_preload(GFP_KERNEL);
160 spin_lock(&uobj->context->ufile->idr_lock);
161
162 /*
163 * We start with allocating an idr pointing to NULL. This represents an
164 * object which isn't initialized yet. We'll replace it later on with
165 * the real object once we commit.
166 */
167 ret = idr_alloc(&uobj->context->ufile->idr, NULL, 0,
168 min_t(unsigned long, U32_MAX - 1, INT_MAX), GFP_NOWAIT);
169 if (ret >= 0)
170 uobj->id = ret;
171
172 spin_unlock(&uobj->context->ufile->idr_lock);
173 idr_preload_end();
174
175 return ret < 0 ? ret : 0;
176}
177
178/*
179 * It only removes it from the uobjects list, uverbs_uobject_put() is still
180 * required.
181 */
182static void uverbs_idr_remove_uobj(struct ib_uobject *uobj)
183{
184 spin_lock(&uobj->context->ufile->idr_lock);
185 idr_remove(&uobj->context->ufile->idr, uobj->id);
186 spin_unlock(&uobj->context->ufile->idr_lock);
187}
188
189/* Returns the ib_uobject or an error. The caller should check for IS_ERR. */
190static struct ib_uobject *lookup_get_idr_uobject(const struct uverbs_obj_type *type,
191 struct ib_ucontext *ucontext,
192 int id, bool exclusive)
193{
194 struct ib_uobject *uobj;
195
196 rcu_read_lock();
197 /* object won't be released as we're protected in rcu */
198 uobj = idr_find(&ucontext->ufile->idr, id);
199 if (!uobj) {
200 uobj = ERR_PTR(-ENOENT);
201 goto free;
202 }
203
204 /*
205 * The idr_find is guaranteed to return a pointer to something that
206 * isn't freed yet, or NULL, as the free after idr_remove goes through
207 * kfree_rcu(). However the object may still have been released and
208 * kfree() could be called at any time.
209 */
210 if (!kref_get_unless_zero(&uobj->ref))
211 uobj = ERR_PTR(-ENOENT);
212
213free:
214 rcu_read_unlock();
215 return uobj;
216}
217
218static struct ib_uobject *lookup_get_fd_uobject(const struct uverbs_obj_type *type,
219 struct ib_ucontext *ucontext,
220 int id, bool exclusive)
221{
222 struct file *f;
223 struct ib_uobject *uobject;
224 const struct uverbs_obj_fd_type *fd_type =
225 container_of(type, struct uverbs_obj_fd_type, type);
226
227 if (exclusive)
228 return ERR_PTR(-EOPNOTSUPP);
229
230 f = fget(id);
231 if (!f)
232 return ERR_PTR(-EBADF);
233
234 uobject = f->private_data;
235 /*
236 * fget(id) ensures we are not currently running uverbs_close_fd,
237 * and the caller is expected to ensure that uverbs_close_fd is never
238 * done while a call top lookup is possible.
239 */
240 if (f->f_op != fd_type->fops) {
241 fput(f);
242 return ERR_PTR(-EBADF);
243 }
244
245 uverbs_uobject_get(uobject);
246 return uobject;
247}
248
249struct ib_uobject *rdma_lookup_get_uobject(const struct uverbs_obj_type *type,
250 struct ib_ucontext *ucontext,
251 int id, bool exclusive)
252{
253 struct ib_uobject *uobj;
254 int ret;
255
256 uobj = type->type_class->lookup_get(type, ucontext, id, exclusive);
257 if (IS_ERR(uobj))
258 return uobj;
259
260 if (uobj->type != type) {
261 ret = -EINVAL;
262 goto free;
263 }
264
265 ret = uverbs_try_lock_object(uobj, exclusive);
266 if (ret) {
267 WARN(ucontext->cleanup_reason,
268 "ib_uverbs: Trying to lookup_get while cleanup context\n");
269 goto free;
270 }
271
272 return uobj;
273free:
274 uobj->type->type_class->lookup_put(uobj, exclusive);
275 uverbs_uobject_put(uobj);
276 return ERR_PTR(ret);
277}
278
279static struct ib_uobject *alloc_begin_idr_uobject(const struct uverbs_obj_type *type,
280 struct ib_ucontext *ucontext)
281{
282 int ret;
283 struct ib_uobject *uobj;
284
285 uobj = alloc_uobj(ucontext, type);
286 if (IS_ERR(uobj))
287 return uobj;
288
289 ret = idr_add_uobj(uobj);
290 if (ret)
291 goto uobj_put;
292
293 ret = ib_rdmacg_try_charge(&uobj->cg_obj, ucontext->device,
294 RDMACG_RESOURCE_HCA_OBJECT);
295 if (ret)
296 goto idr_remove;
297
298 return uobj;
299
300idr_remove:
301 uverbs_idr_remove_uobj(uobj);
302uobj_put:
303 uverbs_uobject_put(uobj);
304 return ERR_PTR(ret);
305}
306
307static struct ib_uobject *alloc_begin_fd_uobject(const struct uverbs_obj_type *type,
308 struct ib_ucontext *ucontext)
309{
310 const struct uverbs_obj_fd_type *fd_type =
311 container_of(type, struct uverbs_obj_fd_type, type);
312 int new_fd;
313 struct ib_uobject *uobj;
314 struct ib_uobject_file *uobj_file;
315 struct file *filp;
316
317 new_fd = get_unused_fd_flags(O_CLOEXEC);
318 if (new_fd < 0)
319 return ERR_PTR(new_fd);
320
321 uobj = alloc_uobj(ucontext, type);
322 if (IS_ERR(uobj)) {
323 put_unused_fd(new_fd);
324 return uobj;
325 }
326
327 uobj_file = container_of(uobj, struct ib_uobject_file, uobj);
328 filp = anon_inode_getfile(fd_type->name,
329 fd_type->fops,
330 uobj_file,
331 fd_type->flags);
332 if (IS_ERR(filp)) {
333 put_unused_fd(new_fd);
334 uverbs_uobject_put(uobj);
335 return (void *)filp;
336 }
337
338 uobj_file->uobj.id = new_fd;
339 uobj_file->uobj.object = filp;
340 uobj_file->ufile = ucontext->ufile;
341 INIT_LIST_HEAD(&uobj->list);
342 kref_get(&uobj_file->ufile->ref);
343
344 return uobj;
345}
346
347struct ib_uobject *rdma_alloc_begin_uobject(const struct uverbs_obj_type *type,
348 struct ib_ucontext *ucontext)
349{
350 return type->type_class->alloc_begin(type, ucontext);
351}
352
353static int __must_check remove_commit_idr_uobject(struct ib_uobject *uobj,
354 enum rdma_remove_reason why)
355{
356 const struct uverbs_obj_idr_type *idr_type =
357 container_of(uobj->type, struct uverbs_obj_idr_type,
358 type);
359 int ret = idr_type->destroy_object(uobj, why);
360
361 /*
362 * We can only fail gracefully if the user requested to destroy the
363 * object. In the rest of the cases, just remove whatever you can.
364 */
365 if (why == RDMA_REMOVE_DESTROY && ret)
366 return ret;
367
368 ib_rdmacg_uncharge(&uobj->cg_obj, uobj->context->device,
369 RDMACG_RESOURCE_HCA_OBJECT);
370 uverbs_idr_remove_uobj(uobj);
371
372 return ret;
373}
374
375static void alloc_abort_fd_uobject(struct ib_uobject *uobj)
376{
377 struct ib_uobject_file *uobj_file =
378 container_of(uobj, struct ib_uobject_file, uobj);
379 struct file *filp = uobj->object;
380 int id = uobj_file->uobj.id;
381
382 /* Unsuccessful NEW */
383 fput(filp);
384 put_unused_fd(id);
385}
386
387static int __must_check remove_commit_fd_uobject(struct ib_uobject *uobj,
388 enum rdma_remove_reason why)
389{
390 const struct uverbs_obj_fd_type *fd_type =
391 container_of(uobj->type, struct uverbs_obj_fd_type, type);
392 struct ib_uobject_file *uobj_file =
393 container_of(uobj, struct ib_uobject_file, uobj);
394 int ret = fd_type->context_closed(uobj_file, why);
395
396 if (why == RDMA_REMOVE_DESTROY && ret)
397 return ret;
398
399 if (why == RDMA_REMOVE_DURING_CLEANUP) {
400 alloc_abort_fd_uobject(uobj);
401 return ret;
402 }
403
404 uobj_file->uobj.context = NULL;
405 return ret;
406}
407
408static void assert_uverbs_usecnt(struct ib_uobject *uobj, bool exclusive)
409{
410#ifdef CONFIG_LOCKDEP
411 if (exclusive)
412 WARN_ON(atomic_read(&uobj->usecnt) != -1);
413 else
414 WARN_ON(atomic_read(&uobj->usecnt) <= 0);
415#endif
416}
417
418static int __must_check _rdma_remove_commit_uobject(struct ib_uobject *uobj,
419 enum rdma_remove_reason why)
420{
421 int ret;
422 struct ib_ucontext *ucontext = uobj->context;
423
424 ret = uobj->type->type_class->remove_commit(uobj, why);
425 if (ret && why == RDMA_REMOVE_DESTROY) {
426 /* We couldn't remove the object, so just unlock the uobject */
427 atomic_set(&uobj->usecnt, 0);
428 uobj->type->type_class->lookup_put(uobj, true);
429 } else {
430 mutex_lock(&ucontext->uobjects_lock);
431 list_del(&uobj->list);
432 mutex_unlock(&ucontext->uobjects_lock);
433 /* put the ref we took when we created the object */
434 uverbs_uobject_put(uobj);
435 }
436
437 return ret;
438}
439
440/* This is called only for user requested DESTROY reasons */
441int __must_check rdma_remove_commit_uobject(struct ib_uobject *uobj)
442{
443 int ret;
444 struct ib_ucontext *ucontext = uobj->context;
445
446 /* put the ref count we took at lookup_get */
447 uverbs_uobject_put(uobj);
448 /* Cleanup is running. Calling this should have been impossible */
449 if (!down_read_trylock(&ucontext->cleanup_rwsem)) {
450 WARN(true, "ib_uverbs: Cleanup is running while removing an uobject\n");
451 return 0;
452 }
453 assert_uverbs_usecnt(uobj, true);
454 ret = _rdma_remove_commit_uobject(uobj, RDMA_REMOVE_DESTROY);
455
456 up_read(&ucontext->cleanup_rwsem);
457 return ret;
458}
459
460static int null_obj_type_class_remove_commit(struct ib_uobject *uobj,
461 enum rdma_remove_reason why)
462{
463 return 0;
464}
465
466static const struct uverbs_obj_type null_obj_type = {
467 .type_class = &((const struct uverbs_obj_type_class){
468 .remove_commit = null_obj_type_class_remove_commit,
469 /* be cautious */
470 .needs_kfree_rcu = true}),
471};
472
473int rdma_explicit_destroy(struct ib_uobject *uobject)
474{
475 int ret;
476 struct ib_ucontext *ucontext = uobject->context;
477
478 /* Cleanup is running. Calling this should have been impossible */
479 if (!down_read_trylock(&ucontext->cleanup_rwsem)) {
480 WARN(true, "ib_uverbs: Cleanup is running while removing an uobject\n");
481 return 0;
482 }
483 assert_uverbs_usecnt(uobject, true);
484 ret = uobject->type->type_class->remove_commit(uobject,
485 RDMA_REMOVE_DESTROY);
486 if (ret)
487 goto out;
488
489 uobject->type = &null_obj_type;
490
491out:
492 up_read(&ucontext->cleanup_rwsem);
493 return ret;
494}
495
496static void alloc_commit_idr_uobject(struct ib_uobject *uobj)
497{
498 spin_lock(&uobj->context->ufile->idr_lock);
499 /*
500 * We already allocated this IDR with a NULL object, so
501 * this shouldn't fail.
502 */
503 WARN_ON(idr_replace(&uobj->context->ufile->idr,
504 uobj, uobj->id));
505 spin_unlock(&uobj->context->ufile->idr_lock);
506}
507
508static void alloc_commit_fd_uobject(struct ib_uobject *uobj)
509{
510 struct ib_uobject_file *uobj_file =
511 container_of(uobj, struct ib_uobject_file, uobj);
512
513 fd_install(uobj_file->uobj.id, uobj->object);
514 /* This shouldn't be used anymore. Use the file object instead */
515 uobj_file->uobj.id = 0;
516 /* Get another reference as we export this to the fops */
517 uverbs_uobject_get(&uobj_file->uobj);
518}
519
520int rdma_alloc_commit_uobject(struct ib_uobject *uobj)
521{
522 /* Cleanup is running. Calling this should have been impossible */
523 if (!down_read_trylock(&uobj->context->cleanup_rwsem)) {
524 int ret;
525
526 WARN(true, "ib_uverbs: Cleanup is running while allocating an uobject\n");
527 ret = uobj->type->type_class->remove_commit(uobj,
528 RDMA_REMOVE_DURING_CLEANUP);
529 if (ret)
530 pr_warn("ib_uverbs: cleanup of idr object %d failed\n",
531 uobj->id);
532 return ret;
533 }
534
535 /* matches atomic_set(-1) in alloc_uobj */
536 assert_uverbs_usecnt(uobj, true);
537 atomic_set(&uobj->usecnt, 0);
538
539 mutex_lock(&uobj->context->uobjects_lock);
540 list_add(&uobj->list, &uobj->context->uobjects);
541 mutex_unlock(&uobj->context->uobjects_lock);
542
543 uobj->type->type_class->alloc_commit(uobj);
544 up_read(&uobj->context->cleanup_rwsem);
545
546 return 0;
547}
548
549static void alloc_abort_idr_uobject(struct ib_uobject *uobj)
550{
551 uverbs_idr_remove_uobj(uobj);
552 ib_rdmacg_uncharge(&uobj->cg_obj, uobj->context->device,
553 RDMACG_RESOURCE_HCA_OBJECT);
554 uverbs_uobject_put(uobj);
555}
556
557void rdma_alloc_abort_uobject(struct ib_uobject *uobj)
558{
559 uobj->type->type_class->alloc_abort(uobj);
560}
561
562static void lookup_put_idr_uobject(struct ib_uobject *uobj, bool exclusive)
563{
564}
565
566static void lookup_put_fd_uobject(struct ib_uobject *uobj, bool exclusive)
567{
568 struct file *filp = uobj->object;
569
570 WARN_ON(exclusive);
571 /* This indirectly calls uverbs_close_fd and free the object */
572 fput(filp);
573}
574
575void rdma_lookup_put_uobject(struct ib_uobject *uobj, bool exclusive)
576{
577 assert_uverbs_usecnt(uobj, exclusive);
578 uobj->type->type_class->lookup_put(uobj, exclusive);
579 /*
580 * In order to unlock an object, either decrease its usecnt for
581 * read access or zero it in case of exclusive access. See
582 * uverbs_try_lock_object for locking schema information.
583 */
584 if (!exclusive)
585 atomic_dec(&uobj->usecnt);
586 else
587 atomic_set(&uobj->usecnt, 0);
588
589 uverbs_uobject_put(uobj);
590}
591
592const struct uverbs_obj_type_class uverbs_idr_class = {
593 .alloc_begin = alloc_begin_idr_uobject,
594 .lookup_get = lookup_get_idr_uobject,
595 .alloc_commit = alloc_commit_idr_uobject,
596 .alloc_abort = alloc_abort_idr_uobject,
597 .lookup_put = lookup_put_idr_uobject,
598 .remove_commit = remove_commit_idr_uobject,
599 /*
600 * When we destroy an object, we first just lock it for WRITE and
601 * actually DESTROY it in the finalize stage. So, the problematic
602 * scenario is when we just started the finalize stage of the
603 * destruction (nothing was executed yet). Now, the other thread
604 * fetched the object for READ access, but it didn't lock it yet.
605 * The DESTROY thread continues and starts destroying the object.
606 * When the other thread continue - without the RCU, it would
607 * access freed memory. However, the rcu_read_lock delays the free
608 * until the rcu_read_lock of the READ operation quits. Since the
609 * exclusive lock of the object is still taken by the DESTROY flow, the
610 * READ operation will get -EBUSY and it'll just bail out.
611 */
612 .needs_kfree_rcu = true,
613};
614
615static void _uverbs_close_fd(struct ib_uobject_file *uobj_file)
616{
617 struct ib_ucontext *ucontext;
618 struct ib_uverbs_file *ufile = uobj_file->ufile;
619 int ret;
620
621 mutex_lock(&uobj_file->ufile->cleanup_mutex);
622
623 /* uobject was either already cleaned up or is cleaned up right now anyway */
624 if (!uobj_file->uobj.context ||
625 !down_read_trylock(&uobj_file->uobj.context->cleanup_rwsem))
626 goto unlock;
627
628 ucontext = uobj_file->uobj.context;
629 ret = _rdma_remove_commit_uobject(&uobj_file->uobj, RDMA_REMOVE_CLOSE);
630 up_read(&ucontext->cleanup_rwsem);
631 if (ret)
632 pr_warn("uverbs: unable to clean up uobject file in uverbs_close_fd.\n");
633unlock:
634 mutex_unlock(&ufile->cleanup_mutex);
635}
636
637void uverbs_close_fd(struct file *f)
638{
639 struct ib_uobject_file *uobj_file = f->private_data;
640 struct kref *uverbs_file_ref = &uobj_file->ufile->ref;
641
642 _uverbs_close_fd(uobj_file);
643 uverbs_uobject_put(&uobj_file->uobj);
644 kref_put(uverbs_file_ref, ib_uverbs_release_file);
645}
646
647void uverbs_cleanup_ucontext(struct ib_ucontext *ucontext, bool device_removed)
648{
649 enum rdma_remove_reason reason = device_removed ?
650 RDMA_REMOVE_DRIVER_REMOVE : RDMA_REMOVE_CLOSE;
651 unsigned int cur_order = 0;
652
653 ucontext->cleanup_reason = reason;
654 /*
655 * Waits for all remove_commit and alloc_commit to finish. Logically, We
656 * want to hold this forever as the context is going to be destroyed,
657 * but we'll release it since it causes a "held lock freed" BUG message.
658 */
659 down_write(&ucontext->cleanup_rwsem);
660
661 while (!list_empty(&ucontext->uobjects)) {
662 struct ib_uobject *obj, *next_obj;
663 unsigned int next_order = UINT_MAX;
664
665 /*
666 * This shouldn't run while executing other commands on this
667 * context. Thus, the only thing we should take care of is
668 * releasing a FD while traversing this list. The FD could be
669 * closed and released from the _release fop of this FD.
670 * In order to mitigate this, we add a lock.
671 * We take and release the lock per order traversal in order
672 * to let other threads (which might still use the FDs) chance
673 * to run.
674 */
675 mutex_lock(&ucontext->uobjects_lock);
676 list_for_each_entry_safe(obj, next_obj, &ucontext->uobjects,
677 list) {
678 if (obj->type->destroy_order == cur_order) {
679 int ret;
680
681 /*
682 * if we hit this WARN_ON, that means we are
683 * racing with a lookup_get.
684 */
685 WARN_ON(uverbs_try_lock_object(obj, true));
686 ret = obj->type->type_class->remove_commit(obj,
687 reason);
688 list_del(&obj->list);
689 if (ret)
690 pr_warn("ib_uverbs: failed to remove uobject id %d order %u\n",
691 obj->id, cur_order);
692 /* put the ref we took when we created the object */
693 uverbs_uobject_put(obj);
694 } else {
695 next_order = min(next_order,
696 obj->type->destroy_order);
697 }
698 }
699 mutex_unlock(&ucontext->uobjects_lock);
700 cur_order = next_order;
701 }
702 up_write(&ucontext->cleanup_rwsem);
703}
704
705void uverbs_initialize_ucontext(struct ib_ucontext *ucontext)
706{
707 ucontext->cleanup_reason = 0;
708 mutex_init(&ucontext->uobjects_lock);
709 INIT_LIST_HEAD(&ucontext->uobjects);
710 init_rwsem(&ucontext->cleanup_rwsem);
711}
712
713const struct uverbs_obj_type_class uverbs_fd_class = {
714 .alloc_begin = alloc_begin_fd_uobject,
715 .lookup_get = lookup_get_fd_uobject,
716 .alloc_commit = alloc_commit_fd_uobject,
717 .alloc_abort = alloc_abort_fd_uobject,
718 .lookup_put = lookup_put_fd_uobject,
719 .remove_commit = remove_commit_fd_uobject,
720 .needs_kfree_rcu = false,
721};
722
723struct ib_uobject *uverbs_get_uobject_from_context(const struct uverbs_obj_type *type_attrs,
724 struct ib_ucontext *ucontext,
725 enum uverbs_obj_access access,
726 int id)
727{
728 switch (access) {
729 case UVERBS_ACCESS_READ:
730 return rdma_lookup_get_uobject(type_attrs, ucontext, id, false);
731 case UVERBS_ACCESS_DESTROY:
732 case UVERBS_ACCESS_WRITE:
733 return rdma_lookup_get_uobject(type_attrs, ucontext, id, true);
734 case UVERBS_ACCESS_NEW:
735 return rdma_alloc_begin_uobject(type_attrs, ucontext);
736 default:
737 WARN_ON(true);
738 return ERR_PTR(-EOPNOTSUPP);
739 }
740}
741
742int uverbs_finalize_object(struct ib_uobject *uobj,
743 enum uverbs_obj_access access,
744 bool commit)
745{
746 int ret = 0;
747
748 /*
749 * refcounts should be handled at the object level and not at the
750 * uobject level. Refcounts of the objects themselves are done in
751 * handlers.
752 */
753
754 switch (access) {
755 case UVERBS_ACCESS_READ:
756 rdma_lookup_put_uobject(uobj, false);
757 break;
758 case UVERBS_ACCESS_WRITE:
759 rdma_lookup_put_uobject(uobj, true);
760 break;
761 case UVERBS_ACCESS_DESTROY:
762 if (commit)
763 ret = rdma_remove_commit_uobject(uobj);
764 else
765 rdma_lookup_put_uobject(uobj, true);
766 break;
767 case UVERBS_ACCESS_NEW:
768 if (commit)
769 ret = rdma_alloc_commit_uobject(uobj);
770 else
771 rdma_alloc_abort_uobject(uobj);
772 break;
773 default:
774 WARN_ON(true);
775 ret = -EOPNOTSUPP;
776 }
777
778 return ret;
779}
780
781int uverbs_finalize_objects(struct uverbs_attr_bundle *attrs_bundle,
782 struct uverbs_attr_spec_hash * const *spec_hash,
783 size_t num,
784 bool commit)
785{
786 unsigned int i;
787 int ret = 0;
788
789 for (i = 0; i < num; i++) {
790 struct uverbs_attr_bundle_hash *curr_bundle =
791 &attrs_bundle->hash[i];
792 const struct uverbs_attr_spec_hash *curr_spec_bucket =
793 spec_hash[i];
794 unsigned int j;
795
796 for (j = 0; j < curr_bundle->num_attrs; j++) {
797 struct uverbs_attr *attr;
798 const struct uverbs_attr_spec *spec;
799
800 if (!uverbs_attr_is_valid_in_hash(curr_bundle, j))
801 continue;
802
803 attr = &curr_bundle->attrs[j];
804 spec = &curr_spec_bucket->attrs[j];
805
806 if (spec->type == UVERBS_ATTR_TYPE_IDR ||
807 spec->type == UVERBS_ATTR_TYPE_FD) {
808 int current_ret;
809
810 current_ret = uverbs_finalize_object(attr->obj_attr.uobject,
811 spec->obj.access,
812 commit);
813 if (!ret)
814 ret = current_ret;
815 }
816 }
817 }
818 return ret;
819}