Loading...
1/*
2 * Copyright (c) 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved.
4 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
5 * Copyright (c) 2005 Voltaire, Inc. All rights reserved.
6 * Copyright (c) 2005 PathScale, Inc. All rights reserved.
7 *
8 * This software is available to you under a choice of one of two
9 * licenses. You may choose to be licensed under the terms of the GNU
10 * General Public License (GPL) Version 2, available from the file
11 * COPYING in the main directory of this source tree, or the
12 * OpenIB.org BSD license below:
13 *
14 * Redistribution and use in source and binary forms, with or
15 * without modification, are permitted provided that the following
16 * conditions are met:
17 *
18 * - Redistributions of source code must retain the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer.
21 *
22 * - Redistributions in binary form must reproduce the above
23 * copyright notice, this list of conditions and the following
24 * disclaimer in the documentation and/or other materials
25 * provided with the distribution.
26 *
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
28 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
29 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
30 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
31 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
32 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
33 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 * SOFTWARE.
35 */
36
37#include <linux/module.h>
38#include <linux/init.h>
39#include <linux/device.h>
40#include <linux/err.h>
41#include <linux/fs.h>
42#include <linux/poll.h>
43#include <linux/sched.h>
44#include <linux/file.h>
45#include <linux/cdev.h>
46#include <linux/anon_inodes.h>
47#include <linux/slab.h>
48#include <linux/sched/mm.h>
49
50#include <linux/uaccess.h>
51
52#include <rdma/ib.h>
53#include <rdma/uverbs_std_types.h>
54#include <rdma/rdma_netlink.h>
55
56#include "uverbs.h"
57#include "core_priv.h"
58#include "rdma_core.h"
59
60MODULE_AUTHOR("Roland Dreier");
61MODULE_DESCRIPTION("InfiniBand userspace verbs access");
62MODULE_LICENSE("Dual BSD/GPL");
63
64enum {
65 IB_UVERBS_MAJOR = 231,
66 IB_UVERBS_BASE_MINOR = 192,
67 IB_UVERBS_MAX_DEVICES = RDMA_MAX_PORTS,
68 IB_UVERBS_NUM_FIXED_MINOR = 32,
69 IB_UVERBS_NUM_DYNAMIC_MINOR = IB_UVERBS_MAX_DEVICES - IB_UVERBS_NUM_FIXED_MINOR,
70};
71
72#define IB_UVERBS_BASE_DEV MKDEV(IB_UVERBS_MAJOR, IB_UVERBS_BASE_MINOR)
73
74static dev_t dynamic_uverbs_dev;
75
76static DEFINE_IDA(uverbs_ida);
77static int ib_uverbs_add_one(struct ib_device *device);
78static void ib_uverbs_remove_one(struct ib_device *device, void *client_data);
79
80static char *uverbs_devnode(const struct device *dev, umode_t *mode)
81{
82 if (mode)
83 *mode = 0666;
84 return kasprintf(GFP_KERNEL, "infiniband/%s", dev_name(dev));
85}
86
87static const struct class uverbs_class = {
88 .name = "infiniband_verbs",
89 .devnode = uverbs_devnode,
90};
91
92/*
93 * Must be called with the ufile->device->disassociate_srcu held, and the lock
94 * must be held until use of the ucontext is finished.
95 */
96struct ib_ucontext *ib_uverbs_get_ucontext_file(struct ib_uverbs_file *ufile)
97{
98 /*
99 * We do not hold the hw_destroy_rwsem lock for this flow, instead
100 * srcu is used. It does not matter if someone races this with
101 * get_context, we get NULL or valid ucontext.
102 */
103 struct ib_ucontext *ucontext = smp_load_acquire(&ufile->ucontext);
104
105 if (!srcu_dereference(ufile->device->ib_dev,
106 &ufile->device->disassociate_srcu))
107 return ERR_PTR(-EIO);
108
109 if (!ucontext)
110 return ERR_PTR(-EINVAL);
111
112 return ucontext;
113}
114EXPORT_SYMBOL(ib_uverbs_get_ucontext_file);
115
116int uverbs_dealloc_mw(struct ib_mw *mw)
117{
118 struct ib_pd *pd = mw->pd;
119 int ret;
120
121 ret = mw->device->ops.dealloc_mw(mw);
122 if (ret)
123 return ret;
124
125 atomic_dec(&pd->usecnt);
126 kfree(mw);
127 return ret;
128}
129
130static void ib_uverbs_release_dev(struct device *device)
131{
132 struct ib_uverbs_device *dev =
133 container_of(device, struct ib_uverbs_device, dev);
134
135 uverbs_destroy_api(dev->uapi);
136 cleanup_srcu_struct(&dev->disassociate_srcu);
137 mutex_destroy(&dev->lists_mutex);
138 mutex_destroy(&dev->xrcd_tree_mutex);
139 kfree(dev);
140}
141
142void ib_uverbs_release_ucq(struct ib_uverbs_completion_event_file *ev_file,
143 struct ib_ucq_object *uobj)
144{
145 struct ib_uverbs_event *evt, *tmp;
146
147 if (ev_file) {
148 spin_lock_irq(&ev_file->ev_queue.lock);
149 list_for_each_entry_safe(evt, tmp, &uobj->comp_list, obj_list) {
150 list_del(&evt->list);
151 kfree(evt);
152 }
153 spin_unlock_irq(&ev_file->ev_queue.lock);
154
155 uverbs_uobject_put(&ev_file->uobj);
156 }
157
158 ib_uverbs_release_uevent(&uobj->uevent);
159}
160
161void ib_uverbs_release_uevent(struct ib_uevent_object *uobj)
162{
163 struct ib_uverbs_async_event_file *async_file = uobj->event_file;
164 struct ib_uverbs_event *evt, *tmp;
165
166 if (!async_file)
167 return;
168
169 spin_lock_irq(&async_file->ev_queue.lock);
170 list_for_each_entry_safe(evt, tmp, &uobj->event_list, obj_list) {
171 list_del(&evt->list);
172 kfree(evt);
173 }
174 spin_unlock_irq(&async_file->ev_queue.lock);
175 uverbs_uobject_put(&async_file->uobj);
176}
177
178void ib_uverbs_detach_umcast(struct ib_qp *qp,
179 struct ib_uqp_object *uobj)
180{
181 struct ib_uverbs_mcast_entry *mcast, *tmp;
182
183 list_for_each_entry_safe(mcast, tmp, &uobj->mcast_list, list) {
184 ib_detach_mcast(qp, &mcast->gid, mcast->lid);
185 list_del(&mcast->list);
186 kfree(mcast);
187 }
188}
189
190static void ib_uverbs_comp_dev(struct ib_uverbs_device *dev)
191{
192 complete(&dev->comp);
193}
194
195void ib_uverbs_release_file(struct kref *ref)
196{
197 struct ib_uverbs_file *file =
198 container_of(ref, struct ib_uverbs_file, ref);
199 struct ib_device *ib_dev;
200 int srcu_key;
201
202 release_ufile_idr_uobject(file);
203
204 srcu_key = srcu_read_lock(&file->device->disassociate_srcu);
205 ib_dev = srcu_dereference(file->device->ib_dev,
206 &file->device->disassociate_srcu);
207 if (ib_dev && !ib_dev->ops.disassociate_ucontext)
208 module_put(ib_dev->ops.owner);
209 srcu_read_unlock(&file->device->disassociate_srcu, srcu_key);
210
211 if (refcount_dec_and_test(&file->device->refcount))
212 ib_uverbs_comp_dev(file->device);
213
214 if (file->default_async_file)
215 uverbs_uobject_put(&file->default_async_file->uobj);
216 put_device(&file->device->dev);
217
218 if (file->disassociate_page)
219 __free_pages(file->disassociate_page, 0);
220 mutex_destroy(&file->umap_lock);
221 mutex_destroy(&file->ucontext_lock);
222 kfree(file);
223}
224
225static ssize_t ib_uverbs_event_read(struct ib_uverbs_event_queue *ev_queue,
226 struct file *filp, char __user *buf,
227 size_t count, loff_t *pos,
228 size_t eventsz)
229{
230 struct ib_uverbs_event *event;
231 int ret = 0;
232
233 spin_lock_irq(&ev_queue->lock);
234
235 while (list_empty(&ev_queue->event_list)) {
236 if (ev_queue->is_closed) {
237 spin_unlock_irq(&ev_queue->lock);
238 return -EIO;
239 }
240
241 spin_unlock_irq(&ev_queue->lock);
242 if (filp->f_flags & O_NONBLOCK)
243 return -EAGAIN;
244
245 if (wait_event_interruptible(ev_queue->poll_wait,
246 (!list_empty(&ev_queue->event_list) ||
247 ev_queue->is_closed)))
248 return -ERESTARTSYS;
249
250 spin_lock_irq(&ev_queue->lock);
251 }
252
253 event = list_entry(ev_queue->event_list.next, struct ib_uverbs_event, list);
254
255 if (eventsz > count) {
256 ret = -EINVAL;
257 event = NULL;
258 } else {
259 list_del(ev_queue->event_list.next);
260 if (event->counter) {
261 ++(*event->counter);
262 list_del(&event->obj_list);
263 }
264 }
265
266 spin_unlock_irq(&ev_queue->lock);
267
268 if (event) {
269 if (copy_to_user(buf, event, eventsz))
270 ret = -EFAULT;
271 else
272 ret = eventsz;
273 }
274
275 kfree(event);
276
277 return ret;
278}
279
280static ssize_t ib_uverbs_async_event_read(struct file *filp, char __user *buf,
281 size_t count, loff_t *pos)
282{
283 struct ib_uverbs_async_event_file *file = filp->private_data;
284
285 return ib_uverbs_event_read(&file->ev_queue, filp, buf, count, pos,
286 sizeof(struct ib_uverbs_async_event_desc));
287}
288
289static ssize_t ib_uverbs_comp_event_read(struct file *filp, char __user *buf,
290 size_t count, loff_t *pos)
291{
292 struct ib_uverbs_completion_event_file *comp_ev_file =
293 filp->private_data;
294
295 return ib_uverbs_event_read(&comp_ev_file->ev_queue, filp, buf, count,
296 pos,
297 sizeof(struct ib_uverbs_comp_event_desc));
298}
299
300static __poll_t ib_uverbs_event_poll(struct ib_uverbs_event_queue *ev_queue,
301 struct file *filp,
302 struct poll_table_struct *wait)
303{
304 __poll_t pollflags = 0;
305
306 poll_wait(filp, &ev_queue->poll_wait, wait);
307
308 spin_lock_irq(&ev_queue->lock);
309 if (!list_empty(&ev_queue->event_list))
310 pollflags = EPOLLIN | EPOLLRDNORM;
311 else if (ev_queue->is_closed)
312 pollflags = EPOLLERR;
313 spin_unlock_irq(&ev_queue->lock);
314
315 return pollflags;
316}
317
318static __poll_t ib_uverbs_async_event_poll(struct file *filp,
319 struct poll_table_struct *wait)
320{
321 struct ib_uverbs_async_event_file *file = filp->private_data;
322
323 return ib_uverbs_event_poll(&file->ev_queue, filp, wait);
324}
325
326static __poll_t ib_uverbs_comp_event_poll(struct file *filp,
327 struct poll_table_struct *wait)
328{
329 struct ib_uverbs_completion_event_file *comp_ev_file =
330 filp->private_data;
331
332 return ib_uverbs_event_poll(&comp_ev_file->ev_queue, filp, wait);
333}
334
335static int ib_uverbs_async_event_fasync(int fd, struct file *filp, int on)
336{
337 struct ib_uverbs_async_event_file *file = filp->private_data;
338
339 return fasync_helper(fd, filp, on, &file->ev_queue.async_queue);
340}
341
342static int ib_uverbs_comp_event_fasync(int fd, struct file *filp, int on)
343{
344 struct ib_uverbs_completion_event_file *comp_ev_file =
345 filp->private_data;
346
347 return fasync_helper(fd, filp, on, &comp_ev_file->ev_queue.async_queue);
348}
349
350const struct file_operations uverbs_event_fops = {
351 .owner = THIS_MODULE,
352 .read = ib_uverbs_comp_event_read,
353 .poll = ib_uverbs_comp_event_poll,
354 .release = uverbs_uobject_fd_release,
355 .fasync = ib_uverbs_comp_event_fasync,
356 .llseek = no_llseek,
357};
358
359const struct file_operations uverbs_async_event_fops = {
360 .owner = THIS_MODULE,
361 .read = ib_uverbs_async_event_read,
362 .poll = ib_uverbs_async_event_poll,
363 .release = uverbs_async_event_release,
364 .fasync = ib_uverbs_async_event_fasync,
365 .llseek = no_llseek,
366};
367
368void ib_uverbs_comp_handler(struct ib_cq *cq, void *cq_context)
369{
370 struct ib_uverbs_event_queue *ev_queue = cq_context;
371 struct ib_ucq_object *uobj;
372 struct ib_uverbs_event *entry;
373 unsigned long flags;
374
375 if (!ev_queue)
376 return;
377
378 spin_lock_irqsave(&ev_queue->lock, flags);
379 if (ev_queue->is_closed) {
380 spin_unlock_irqrestore(&ev_queue->lock, flags);
381 return;
382 }
383
384 entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
385 if (!entry) {
386 spin_unlock_irqrestore(&ev_queue->lock, flags);
387 return;
388 }
389
390 uobj = cq->uobject;
391
392 entry->desc.comp.cq_handle = cq->uobject->uevent.uobject.user_handle;
393 entry->counter = &uobj->comp_events_reported;
394
395 list_add_tail(&entry->list, &ev_queue->event_list);
396 list_add_tail(&entry->obj_list, &uobj->comp_list);
397 spin_unlock_irqrestore(&ev_queue->lock, flags);
398
399 wake_up_interruptible(&ev_queue->poll_wait);
400 kill_fasync(&ev_queue->async_queue, SIGIO, POLL_IN);
401}
402
403void ib_uverbs_async_handler(struct ib_uverbs_async_event_file *async_file,
404 __u64 element, __u64 event,
405 struct list_head *obj_list, u32 *counter)
406{
407 struct ib_uverbs_event *entry;
408 unsigned long flags;
409
410 if (!async_file)
411 return;
412
413 spin_lock_irqsave(&async_file->ev_queue.lock, flags);
414 if (async_file->ev_queue.is_closed) {
415 spin_unlock_irqrestore(&async_file->ev_queue.lock, flags);
416 return;
417 }
418
419 entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
420 if (!entry) {
421 spin_unlock_irqrestore(&async_file->ev_queue.lock, flags);
422 return;
423 }
424
425 entry->desc.async.element = element;
426 entry->desc.async.event_type = event;
427 entry->desc.async.reserved = 0;
428 entry->counter = counter;
429
430 list_add_tail(&entry->list, &async_file->ev_queue.event_list);
431 if (obj_list)
432 list_add_tail(&entry->obj_list, obj_list);
433 spin_unlock_irqrestore(&async_file->ev_queue.lock, flags);
434
435 wake_up_interruptible(&async_file->ev_queue.poll_wait);
436 kill_fasync(&async_file->ev_queue.async_queue, SIGIO, POLL_IN);
437}
438
439static void uverbs_uobj_event(struct ib_uevent_object *eobj,
440 struct ib_event *event)
441{
442 ib_uverbs_async_handler(eobj->event_file,
443 eobj->uobject.user_handle, event->event,
444 &eobj->event_list, &eobj->events_reported);
445}
446
447void ib_uverbs_cq_event_handler(struct ib_event *event, void *context_ptr)
448{
449 uverbs_uobj_event(&event->element.cq->uobject->uevent, event);
450}
451
452void ib_uverbs_qp_event_handler(struct ib_event *event, void *context_ptr)
453{
454 /* for XRC target qp's, check that qp is live */
455 if (!event->element.qp->uobject)
456 return;
457
458 uverbs_uobj_event(&event->element.qp->uobject->uevent, event);
459}
460
461void ib_uverbs_wq_event_handler(struct ib_event *event, void *context_ptr)
462{
463 uverbs_uobj_event(&event->element.wq->uobject->uevent, event);
464}
465
466void ib_uverbs_srq_event_handler(struct ib_event *event, void *context_ptr)
467{
468 uverbs_uobj_event(&event->element.srq->uobject->uevent, event);
469}
470
471static void ib_uverbs_event_handler(struct ib_event_handler *handler,
472 struct ib_event *event)
473{
474 ib_uverbs_async_handler(
475 container_of(handler, struct ib_uverbs_async_event_file,
476 event_handler),
477 event->element.port_num, event->event, NULL, NULL);
478}
479
480void ib_uverbs_init_event_queue(struct ib_uverbs_event_queue *ev_queue)
481{
482 spin_lock_init(&ev_queue->lock);
483 INIT_LIST_HEAD(&ev_queue->event_list);
484 init_waitqueue_head(&ev_queue->poll_wait);
485 ev_queue->is_closed = 0;
486 ev_queue->async_queue = NULL;
487}
488
489void ib_uverbs_init_async_event_file(
490 struct ib_uverbs_async_event_file *async_file)
491{
492 struct ib_uverbs_file *uverbs_file = async_file->uobj.ufile;
493 struct ib_device *ib_dev = async_file->uobj.context->device;
494
495 ib_uverbs_init_event_queue(&async_file->ev_queue);
496
497 /* The first async_event_file becomes the default one for the file. */
498 mutex_lock(&uverbs_file->ucontext_lock);
499 if (!uverbs_file->default_async_file) {
500 /* Pairs with the put in ib_uverbs_release_file */
501 uverbs_uobject_get(&async_file->uobj);
502 smp_store_release(&uverbs_file->default_async_file, async_file);
503 }
504 mutex_unlock(&uverbs_file->ucontext_lock);
505
506 INIT_IB_EVENT_HANDLER(&async_file->event_handler, ib_dev,
507 ib_uverbs_event_handler);
508 ib_register_event_handler(&async_file->event_handler);
509}
510
511static ssize_t verify_hdr(struct ib_uverbs_cmd_hdr *hdr,
512 struct ib_uverbs_ex_cmd_hdr *ex_hdr, size_t count,
513 const struct uverbs_api_write_method *method_elm)
514{
515 if (method_elm->is_ex) {
516 count -= sizeof(*hdr) + sizeof(*ex_hdr);
517
518 if ((hdr->in_words + ex_hdr->provider_in_words) * 8 != count)
519 return -EINVAL;
520
521 if (hdr->in_words * 8 < method_elm->req_size)
522 return -ENOSPC;
523
524 if (ex_hdr->cmd_hdr_reserved)
525 return -EINVAL;
526
527 if (ex_hdr->response) {
528 if (!hdr->out_words && !ex_hdr->provider_out_words)
529 return -EINVAL;
530
531 if (hdr->out_words * 8 < method_elm->resp_size)
532 return -ENOSPC;
533
534 if (!access_ok(u64_to_user_ptr(ex_hdr->response),
535 (hdr->out_words + ex_hdr->provider_out_words) * 8))
536 return -EFAULT;
537 } else {
538 if (hdr->out_words || ex_hdr->provider_out_words)
539 return -EINVAL;
540 }
541
542 return 0;
543 }
544
545 /* not extended command */
546 if (hdr->in_words * 4 != count)
547 return -EINVAL;
548
549 if (count < method_elm->req_size + sizeof(*hdr)) {
550 /*
551 * rdma-core v18 and v19 have a bug where they send DESTROY_CQ
552 * with a 16 byte write instead of 24. Old kernels didn't
553 * check the size so they allowed this. Now that the size is
554 * checked provide a compatibility work around to not break
555 * those userspaces.
556 */
557 if (hdr->command == IB_USER_VERBS_CMD_DESTROY_CQ &&
558 count == 16) {
559 hdr->in_words = 6;
560 return 0;
561 }
562 return -ENOSPC;
563 }
564 if (hdr->out_words * 4 < method_elm->resp_size)
565 return -ENOSPC;
566
567 return 0;
568}
569
570static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
571 size_t count, loff_t *pos)
572{
573 struct ib_uverbs_file *file = filp->private_data;
574 const struct uverbs_api_write_method *method_elm;
575 struct uverbs_api *uapi = file->device->uapi;
576 struct ib_uverbs_ex_cmd_hdr ex_hdr;
577 struct ib_uverbs_cmd_hdr hdr;
578 struct uverbs_attr_bundle bundle;
579 int srcu_key;
580 ssize_t ret;
581
582 if (!ib_safe_file_access(filp)) {
583 pr_err_once("uverbs_write: process %d (%s) changed security contexts after opening file descriptor, this is not allowed.\n",
584 task_tgid_vnr(current), current->comm);
585 return -EACCES;
586 }
587
588 if (count < sizeof(hdr))
589 return -EINVAL;
590
591 if (copy_from_user(&hdr, buf, sizeof(hdr)))
592 return -EFAULT;
593
594 method_elm = uapi_get_method(uapi, hdr.command);
595 if (IS_ERR(method_elm))
596 return PTR_ERR(method_elm);
597
598 if (method_elm->is_ex) {
599 if (count < (sizeof(hdr) + sizeof(ex_hdr)))
600 return -EINVAL;
601 if (copy_from_user(&ex_hdr, buf + sizeof(hdr), sizeof(ex_hdr)))
602 return -EFAULT;
603 }
604
605 ret = verify_hdr(&hdr, &ex_hdr, count, method_elm);
606 if (ret)
607 return ret;
608
609 srcu_key = srcu_read_lock(&file->device->disassociate_srcu);
610
611 buf += sizeof(hdr);
612
613 memset(bundle.attr_present, 0, sizeof(bundle.attr_present));
614 bundle.ufile = file;
615 bundle.context = NULL; /* only valid if bundle has uobject */
616 bundle.uobject = NULL;
617 if (!method_elm->is_ex) {
618 size_t in_len = hdr.in_words * 4 - sizeof(hdr);
619 size_t out_len = hdr.out_words * 4;
620 u64 response = 0;
621
622 if (method_elm->has_udata) {
623 bundle.driver_udata.inlen =
624 in_len - method_elm->req_size;
625 in_len = method_elm->req_size;
626 if (bundle.driver_udata.inlen)
627 bundle.driver_udata.inbuf = buf + in_len;
628 else
629 bundle.driver_udata.inbuf = NULL;
630 } else {
631 memset(&bundle.driver_udata, 0,
632 sizeof(bundle.driver_udata));
633 }
634
635 if (method_elm->has_resp) {
636 /*
637 * The macros check that if has_resp is set
638 * then the command request structure starts
639 * with a '__aligned u64 response' member.
640 */
641 ret = get_user(response, (const u64 __user *)buf);
642 if (ret)
643 goto out_unlock;
644
645 if (method_elm->has_udata) {
646 bundle.driver_udata.outlen =
647 out_len - method_elm->resp_size;
648 out_len = method_elm->resp_size;
649 if (bundle.driver_udata.outlen)
650 bundle.driver_udata.outbuf =
651 u64_to_user_ptr(response +
652 out_len);
653 else
654 bundle.driver_udata.outbuf = NULL;
655 }
656 } else {
657 bundle.driver_udata.outlen = 0;
658 bundle.driver_udata.outbuf = NULL;
659 }
660
661 ib_uverbs_init_udata_buf_or_null(
662 &bundle.ucore, buf, u64_to_user_ptr(response),
663 in_len, out_len);
664 } else {
665 buf += sizeof(ex_hdr);
666
667 ib_uverbs_init_udata_buf_or_null(&bundle.ucore, buf,
668 u64_to_user_ptr(ex_hdr.response),
669 hdr.in_words * 8, hdr.out_words * 8);
670
671 ib_uverbs_init_udata_buf_or_null(
672 &bundle.driver_udata, buf + bundle.ucore.inlen,
673 u64_to_user_ptr(ex_hdr.response) + bundle.ucore.outlen,
674 ex_hdr.provider_in_words * 8,
675 ex_hdr.provider_out_words * 8);
676
677 }
678
679 ret = method_elm->handler(&bundle);
680 if (bundle.uobject)
681 uverbs_finalize_object(bundle.uobject, UVERBS_ACCESS_NEW, true,
682 !ret, &bundle);
683out_unlock:
684 srcu_read_unlock(&file->device->disassociate_srcu, srcu_key);
685 return (ret) ? : count;
686}
687
688static const struct vm_operations_struct rdma_umap_ops;
689
690static int ib_uverbs_mmap(struct file *filp, struct vm_area_struct *vma)
691{
692 struct ib_uverbs_file *file = filp->private_data;
693 struct ib_ucontext *ucontext;
694 int ret = 0;
695 int srcu_key;
696
697 srcu_key = srcu_read_lock(&file->device->disassociate_srcu);
698 ucontext = ib_uverbs_get_ucontext_file(file);
699 if (IS_ERR(ucontext)) {
700 ret = PTR_ERR(ucontext);
701 goto out;
702 }
703 vma->vm_ops = &rdma_umap_ops;
704 ret = ucontext->device->ops.mmap(ucontext, vma);
705out:
706 srcu_read_unlock(&file->device->disassociate_srcu, srcu_key);
707 return ret;
708}
709
710/*
711 * The VMA has been dup'd, initialize the vm_private_data with a new tracking
712 * struct
713 */
714static void rdma_umap_open(struct vm_area_struct *vma)
715{
716 struct ib_uverbs_file *ufile = vma->vm_file->private_data;
717 struct rdma_umap_priv *opriv = vma->vm_private_data;
718 struct rdma_umap_priv *priv;
719
720 if (!opriv)
721 return;
722
723 /* We are racing with disassociation */
724 if (!down_read_trylock(&ufile->hw_destroy_rwsem))
725 goto out_zap;
726 /*
727 * Disassociation already completed, the VMA should already be zapped.
728 */
729 if (!ufile->ucontext)
730 goto out_unlock;
731
732 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
733 if (!priv)
734 goto out_unlock;
735 rdma_umap_priv_init(priv, vma, opriv->entry);
736
737 up_read(&ufile->hw_destroy_rwsem);
738 return;
739
740out_unlock:
741 up_read(&ufile->hw_destroy_rwsem);
742out_zap:
743 /*
744 * We can't allow the VMA to be created with the actual IO pages, that
745 * would break our API contract, and it can't be stopped at this
746 * point, so zap it.
747 */
748 vma->vm_private_data = NULL;
749 zap_vma_ptes(vma, vma->vm_start, vma->vm_end - vma->vm_start);
750}
751
752static void rdma_umap_close(struct vm_area_struct *vma)
753{
754 struct ib_uverbs_file *ufile = vma->vm_file->private_data;
755 struct rdma_umap_priv *priv = vma->vm_private_data;
756
757 if (!priv)
758 return;
759
760 /*
761 * The vma holds a reference on the struct file that created it, which
762 * in turn means that the ib_uverbs_file is guaranteed to exist at
763 * this point.
764 */
765 mutex_lock(&ufile->umap_lock);
766 if (priv->entry)
767 rdma_user_mmap_entry_put(priv->entry);
768
769 list_del(&priv->list);
770 mutex_unlock(&ufile->umap_lock);
771 kfree(priv);
772}
773
774/*
775 * Once the zap_vma_ptes has been called touches to the VMA will come here and
776 * we return a dummy writable zero page for all the pfns.
777 */
778static vm_fault_t rdma_umap_fault(struct vm_fault *vmf)
779{
780 struct ib_uverbs_file *ufile = vmf->vma->vm_file->private_data;
781 struct rdma_umap_priv *priv = vmf->vma->vm_private_data;
782 vm_fault_t ret = 0;
783
784 if (!priv)
785 return VM_FAULT_SIGBUS;
786
787 /* Read only pages can just use the system zero page. */
788 if (!(vmf->vma->vm_flags & (VM_WRITE | VM_MAYWRITE))) {
789 vmf->page = ZERO_PAGE(vmf->address);
790 get_page(vmf->page);
791 return 0;
792 }
793
794 mutex_lock(&ufile->umap_lock);
795 if (!ufile->disassociate_page)
796 ufile->disassociate_page =
797 alloc_pages(vmf->gfp_mask | __GFP_ZERO, 0);
798
799 if (ufile->disassociate_page) {
800 /*
801 * This VMA is forced to always be shared so this doesn't have
802 * to worry about COW.
803 */
804 vmf->page = ufile->disassociate_page;
805 get_page(vmf->page);
806 } else {
807 ret = VM_FAULT_SIGBUS;
808 }
809 mutex_unlock(&ufile->umap_lock);
810
811 return ret;
812}
813
814static const struct vm_operations_struct rdma_umap_ops = {
815 .open = rdma_umap_open,
816 .close = rdma_umap_close,
817 .fault = rdma_umap_fault,
818};
819
820void uverbs_user_mmap_disassociate(struct ib_uverbs_file *ufile)
821{
822 struct rdma_umap_priv *priv, *next_priv;
823
824 lockdep_assert_held(&ufile->hw_destroy_rwsem);
825
826 while (1) {
827 struct mm_struct *mm = NULL;
828
829 /* Get an arbitrary mm pointer that hasn't been cleaned yet */
830 mutex_lock(&ufile->umap_lock);
831 while (!list_empty(&ufile->umaps)) {
832 int ret;
833
834 priv = list_first_entry(&ufile->umaps,
835 struct rdma_umap_priv, list);
836 mm = priv->vma->vm_mm;
837 ret = mmget_not_zero(mm);
838 if (!ret) {
839 list_del_init(&priv->list);
840 if (priv->entry) {
841 rdma_user_mmap_entry_put(priv->entry);
842 priv->entry = NULL;
843 }
844 mm = NULL;
845 continue;
846 }
847 break;
848 }
849 mutex_unlock(&ufile->umap_lock);
850 if (!mm)
851 return;
852
853 /*
854 * The umap_lock is nested under mmap_lock since it used within
855 * the vma_ops callbacks, so we have to clean the list one mm
856 * at a time to get the lock ordering right. Typically there
857 * will only be one mm, so no big deal.
858 */
859 mmap_read_lock(mm);
860 mutex_lock(&ufile->umap_lock);
861 list_for_each_entry_safe (priv, next_priv, &ufile->umaps,
862 list) {
863 struct vm_area_struct *vma = priv->vma;
864
865 if (vma->vm_mm != mm)
866 continue;
867 list_del_init(&priv->list);
868
869 zap_vma_ptes(vma, vma->vm_start,
870 vma->vm_end - vma->vm_start);
871
872 if (priv->entry) {
873 rdma_user_mmap_entry_put(priv->entry);
874 priv->entry = NULL;
875 }
876 }
877 mutex_unlock(&ufile->umap_lock);
878 mmap_read_unlock(mm);
879 mmput(mm);
880 }
881}
882
883/*
884 * ib_uverbs_open() does not need the BKL:
885 *
886 * - the ib_uverbs_device structures are properly reference counted and
887 * everything else is purely local to the file being created, so
888 * races against other open calls are not a problem;
889 * - there is no ioctl method to race against;
890 * - the open method will either immediately run -ENXIO, or all
891 * required initialization will be done.
892 */
893static int ib_uverbs_open(struct inode *inode, struct file *filp)
894{
895 struct ib_uverbs_device *dev;
896 struct ib_uverbs_file *file;
897 struct ib_device *ib_dev;
898 int ret;
899 int module_dependent;
900 int srcu_key;
901
902 dev = container_of(inode->i_cdev, struct ib_uverbs_device, cdev);
903 if (!refcount_inc_not_zero(&dev->refcount))
904 return -ENXIO;
905
906 get_device(&dev->dev);
907 srcu_key = srcu_read_lock(&dev->disassociate_srcu);
908 mutex_lock(&dev->lists_mutex);
909 ib_dev = srcu_dereference(dev->ib_dev,
910 &dev->disassociate_srcu);
911 if (!ib_dev) {
912 ret = -EIO;
913 goto err;
914 }
915
916 if (!rdma_dev_access_netns(ib_dev, current->nsproxy->net_ns)) {
917 ret = -EPERM;
918 goto err;
919 }
920
921 /* In case IB device supports disassociate ucontext, there is no hard
922 * dependency between uverbs device and its low level device.
923 */
924 module_dependent = !(ib_dev->ops.disassociate_ucontext);
925
926 if (module_dependent) {
927 if (!try_module_get(ib_dev->ops.owner)) {
928 ret = -ENODEV;
929 goto err;
930 }
931 }
932
933 file = kzalloc(sizeof(*file), GFP_KERNEL);
934 if (!file) {
935 ret = -ENOMEM;
936 if (module_dependent)
937 goto err_module;
938
939 goto err;
940 }
941
942 file->device = dev;
943 kref_init(&file->ref);
944 mutex_init(&file->ucontext_lock);
945
946 spin_lock_init(&file->uobjects_lock);
947 INIT_LIST_HEAD(&file->uobjects);
948 init_rwsem(&file->hw_destroy_rwsem);
949 mutex_init(&file->umap_lock);
950 INIT_LIST_HEAD(&file->umaps);
951
952 filp->private_data = file;
953 list_add_tail(&file->list, &dev->uverbs_file_list);
954 mutex_unlock(&dev->lists_mutex);
955 srcu_read_unlock(&dev->disassociate_srcu, srcu_key);
956
957 setup_ufile_idr_uobject(file);
958
959 return stream_open(inode, filp);
960
961err_module:
962 module_put(ib_dev->ops.owner);
963
964err:
965 mutex_unlock(&dev->lists_mutex);
966 srcu_read_unlock(&dev->disassociate_srcu, srcu_key);
967 if (refcount_dec_and_test(&dev->refcount))
968 ib_uverbs_comp_dev(dev);
969
970 put_device(&dev->dev);
971 return ret;
972}
973
974static int ib_uverbs_close(struct inode *inode, struct file *filp)
975{
976 struct ib_uverbs_file *file = filp->private_data;
977
978 uverbs_destroy_ufile_hw(file, RDMA_REMOVE_CLOSE);
979
980 mutex_lock(&file->device->lists_mutex);
981 list_del_init(&file->list);
982 mutex_unlock(&file->device->lists_mutex);
983
984 kref_put(&file->ref, ib_uverbs_release_file);
985
986 return 0;
987}
988
989static const struct file_operations uverbs_fops = {
990 .owner = THIS_MODULE,
991 .write = ib_uverbs_write,
992 .open = ib_uverbs_open,
993 .release = ib_uverbs_close,
994 .llseek = no_llseek,
995 .unlocked_ioctl = ib_uverbs_ioctl,
996 .compat_ioctl = compat_ptr_ioctl,
997};
998
999static const struct file_operations uverbs_mmap_fops = {
1000 .owner = THIS_MODULE,
1001 .write = ib_uverbs_write,
1002 .mmap = ib_uverbs_mmap,
1003 .open = ib_uverbs_open,
1004 .release = ib_uverbs_close,
1005 .llseek = no_llseek,
1006 .unlocked_ioctl = ib_uverbs_ioctl,
1007 .compat_ioctl = compat_ptr_ioctl,
1008};
1009
1010static int ib_uverbs_get_nl_info(struct ib_device *ibdev, void *client_data,
1011 struct ib_client_nl_info *res)
1012{
1013 struct ib_uverbs_device *uverbs_dev = client_data;
1014 int ret;
1015
1016 if (res->port != -1)
1017 return -EINVAL;
1018
1019 res->abi = ibdev->ops.uverbs_abi_ver;
1020 res->cdev = &uverbs_dev->dev;
1021
1022 /*
1023 * To support DRIVER_ID binding in userspace some of the driver need
1024 * upgrading to expose their PCI dependent revision information
1025 * through get_context instead of relying on modalias matching. When
1026 * the drivers are fixed they can drop this flag.
1027 */
1028 if (!ibdev->ops.uverbs_no_driver_id_binding) {
1029 ret = nla_put_u32(res->nl_msg, RDMA_NLDEV_ATTR_UVERBS_DRIVER_ID,
1030 ibdev->ops.driver_id);
1031 if (ret)
1032 return ret;
1033 }
1034 return 0;
1035}
1036
1037static struct ib_client uverbs_client = {
1038 .name = "uverbs",
1039 .no_kverbs_req = true,
1040 .add = ib_uverbs_add_one,
1041 .remove = ib_uverbs_remove_one,
1042 .get_nl_info = ib_uverbs_get_nl_info,
1043};
1044MODULE_ALIAS_RDMA_CLIENT("uverbs");
1045
1046static ssize_t ibdev_show(struct device *device, struct device_attribute *attr,
1047 char *buf)
1048{
1049 struct ib_uverbs_device *dev =
1050 container_of(device, struct ib_uverbs_device, dev);
1051 int ret = -ENODEV;
1052 int srcu_key;
1053 struct ib_device *ib_dev;
1054
1055 srcu_key = srcu_read_lock(&dev->disassociate_srcu);
1056 ib_dev = srcu_dereference(dev->ib_dev, &dev->disassociate_srcu);
1057 if (ib_dev)
1058 ret = sysfs_emit(buf, "%s\n", dev_name(&ib_dev->dev));
1059 srcu_read_unlock(&dev->disassociate_srcu, srcu_key);
1060
1061 return ret;
1062}
1063static DEVICE_ATTR_RO(ibdev);
1064
1065static ssize_t abi_version_show(struct device *device,
1066 struct device_attribute *attr, char *buf)
1067{
1068 struct ib_uverbs_device *dev =
1069 container_of(device, struct ib_uverbs_device, dev);
1070 int ret = -ENODEV;
1071 int srcu_key;
1072 struct ib_device *ib_dev;
1073
1074 srcu_key = srcu_read_lock(&dev->disassociate_srcu);
1075 ib_dev = srcu_dereference(dev->ib_dev, &dev->disassociate_srcu);
1076 if (ib_dev)
1077 ret = sysfs_emit(buf, "%u\n", ib_dev->ops.uverbs_abi_ver);
1078 srcu_read_unlock(&dev->disassociate_srcu, srcu_key);
1079
1080 return ret;
1081}
1082static DEVICE_ATTR_RO(abi_version);
1083
1084static struct attribute *ib_dev_attrs[] = {
1085 &dev_attr_abi_version.attr,
1086 &dev_attr_ibdev.attr,
1087 NULL,
1088};
1089
1090static const struct attribute_group dev_attr_group = {
1091 .attrs = ib_dev_attrs,
1092};
1093
1094static CLASS_ATTR_STRING(abi_version, S_IRUGO,
1095 __stringify(IB_USER_VERBS_ABI_VERSION));
1096
1097static int ib_uverbs_create_uapi(struct ib_device *device,
1098 struct ib_uverbs_device *uverbs_dev)
1099{
1100 struct uverbs_api *uapi;
1101
1102 uapi = uverbs_alloc_api(device);
1103 if (IS_ERR(uapi))
1104 return PTR_ERR(uapi);
1105
1106 uverbs_dev->uapi = uapi;
1107 return 0;
1108}
1109
1110static int ib_uverbs_add_one(struct ib_device *device)
1111{
1112 int devnum;
1113 dev_t base;
1114 struct ib_uverbs_device *uverbs_dev;
1115 int ret;
1116
1117 if (!device->ops.alloc_ucontext)
1118 return -EOPNOTSUPP;
1119
1120 uverbs_dev = kzalloc(sizeof(*uverbs_dev), GFP_KERNEL);
1121 if (!uverbs_dev)
1122 return -ENOMEM;
1123
1124 ret = init_srcu_struct(&uverbs_dev->disassociate_srcu);
1125 if (ret) {
1126 kfree(uverbs_dev);
1127 return -ENOMEM;
1128 }
1129
1130 device_initialize(&uverbs_dev->dev);
1131 uverbs_dev->dev.class = &uverbs_class;
1132 uverbs_dev->dev.parent = device->dev.parent;
1133 uverbs_dev->dev.release = ib_uverbs_release_dev;
1134 uverbs_dev->groups[0] = &dev_attr_group;
1135 uverbs_dev->dev.groups = uverbs_dev->groups;
1136 refcount_set(&uverbs_dev->refcount, 1);
1137 init_completion(&uverbs_dev->comp);
1138 uverbs_dev->xrcd_tree = RB_ROOT;
1139 mutex_init(&uverbs_dev->xrcd_tree_mutex);
1140 mutex_init(&uverbs_dev->lists_mutex);
1141 INIT_LIST_HEAD(&uverbs_dev->uverbs_file_list);
1142 rcu_assign_pointer(uverbs_dev->ib_dev, device);
1143 uverbs_dev->num_comp_vectors = device->num_comp_vectors;
1144
1145 devnum = ida_alloc_max(&uverbs_ida, IB_UVERBS_MAX_DEVICES - 1,
1146 GFP_KERNEL);
1147 if (devnum < 0) {
1148 ret = -ENOMEM;
1149 goto err;
1150 }
1151 uverbs_dev->devnum = devnum;
1152 if (devnum >= IB_UVERBS_NUM_FIXED_MINOR)
1153 base = dynamic_uverbs_dev + devnum - IB_UVERBS_NUM_FIXED_MINOR;
1154 else
1155 base = IB_UVERBS_BASE_DEV + devnum;
1156
1157 ret = ib_uverbs_create_uapi(device, uverbs_dev);
1158 if (ret)
1159 goto err_uapi;
1160
1161 uverbs_dev->dev.devt = base;
1162 dev_set_name(&uverbs_dev->dev, "uverbs%d", uverbs_dev->devnum);
1163
1164 cdev_init(&uverbs_dev->cdev,
1165 device->ops.mmap ? &uverbs_mmap_fops : &uverbs_fops);
1166 uverbs_dev->cdev.owner = THIS_MODULE;
1167
1168 ret = cdev_device_add(&uverbs_dev->cdev, &uverbs_dev->dev);
1169 if (ret)
1170 goto err_uapi;
1171
1172 ib_set_client_data(device, &uverbs_client, uverbs_dev);
1173 return 0;
1174
1175err_uapi:
1176 ida_free(&uverbs_ida, devnum);
1177err:
1178 if (refcount_dec_and_test(&uverbs_dev->refcount))
1179 ib_uverbs_comp_dev(uverbs_dev);
1180 wait_for_completion(&uverbs_dev->comp);
1181 put_device(&uverbs_dev->dev);
1182 return ret;
1183}
1184
1185static void ib_uverbs_free_hw_resources(struct ib_uverbs_device *uverbs_dev,
1186 struct ib_device *ib_dev)
1187{
1188 struct ib_uverbs_file *file;
1189
1190 /* Pending running commands to terminate */
1191 uverbs_disassociate_api_pre(uverbs_dev);
1192
1193 mutex_lock(&uverbs_dev->lists_mutex);
1194 while (!list_empty(&uverbs_dev->uverbs_file_list)) {
1195 file = list_first_entry(&uverbs_dev->uverbs_file_list,
1196 struct ib_uverbs_file, list);
1197 list_del_init(&file->list);
1198 kref_get(&file->ref);
1199
1200 /* We must release the mutex before going ahead and calling
1201 * uverbs_cleanup_ufile, as it might end up indirectly calling
1202 * uverbs_close, for example due to freeing the resources (e.g
1203 * mmput).
1204 */
1205 mutex_unlock(&uverbs_dev->lists_mutex);
1206
1207 uverbs_destroy_ufile_hw(file, RDMA_REMOVE_DRIVER_REMOVE);
1208 kref_put(&file->ref, ib_uverbs_release_file);
1209
1210 mutex_lock(&uverbs_dev->lists_mutex);
1211 }
1212 mutex_unlock(&uverbs_dev->lists_mutex);
1213
1214 uverbs_disassociate_api(uverbs_dev->uapi);
1215}
1216
1217static void ib_uverbs_remove_one(struct ib_device *device, void *client_data)
1218{
1219 struct ib_uverbs_device *uverbs_dev = client_data;
1220 int wait_clients = 1;
1221
1222 cdev_device_del(&uverbs_dev->cdev, &uverbs_dev->dev);
1223 ida_free(&uverbs_ida, uverbs_dev->devnum);
1224
1225 if (device->ops.disassociate_ucontext) {
1226 /* We disassociate HW resources and immediately return.
1227 * Userspace will see a EIO errno for all future access.
1228 * Upon returning, ib_device may be freed internally and is not
1229 * valid any more.
1230 * uverbs_device is still available until all clients close
1231 * their files, then the uverbs device ref count will be zero
1232 * and its resources will be freed.
1233 * Note: At this point no more files can be opened since the
1234 * cdev was deleted, however active clients can still issue
1235 * commands and close their open files.
1236 */
1237 ib_uverbs_free_hw_resources(uverbs_dev, device);
1238 wait_clients = 0;
1239 }
1240
1241 if (refcount_dec_and_test(&uverbs_dev->refcount))
1242 ib_uverbs_comp_dev(uverbs_dev);
1243 if (wait_clients)
1244 wait_for_completion(&uverbs_dev->comp);
1245
1246 put_device(&uverbs_dev->dev);
1247}
1248
1249static int __init ib_uverbs_init(void)
1250{
1251 int ret;
1252
1253 ret = register_chrdev_region(IB_UVERBS_BASE_DEV,
1254 IB_UVERBS_NUM_FIXED_MINOR,
1255 "infiniband_verbs");
1256 if (ret) {
1257 pr_err("user_verbs: couldn't register device number\n");
1258 goto out;
1259 }
1260
1261 ret = alloc_chrdev_region(&dynamic_uverbs_dev, 0,
1262 IB_UVERBS_NUM_DYNAMIC_MINOR,
1263 "infiniband_verbs");
1264 if (ret) {
1265 pr_err("couldn't register dynamic device number\n");
1266 goto out_alloc;
1267 }
1268
1269 ret = class_register(&uverbs_class);
1270 if (ret) {
1271 pr_err("user_verbs: couldn't create class infiniband_verbs\n");
1272 goto out_chrdev;
1273 }
1274
1275 ret = class_create_file(&uverbs_class, &class_attr_abi_version.attr);
1276 if (ret) {
1277 pr_err("user_verbs: couldn't create abi_version attribute\n");
1278 goto out_class;
1279 }
1280
1281 ret = ib_register_client(&uverbs_client);
1282 if (ret) {
1283 pr_err("user_verbs: couldn't register client\n");
1284 goto out_class;
1285 }
1286
1287 return 0;
1288
1289out_class:
1290 class_unregister(&uverbs_class);
1291
1292out_chrdev:
1293 unregister_chrdev_region(dynamic_uverbs_dev,
1294 IB_UVERBS_NUM_DYNAMIC_MINOR);
1295
1296out_alloc:
1297 unregister_chrdev_region(IB_UVERBS_BASE_DEV,
1298 IB_UVERBS_NUM_FIXED_MINOR);
1299
1300out:
1301 return ret;
1302}
1303
1304static void __exit ib_uverbs_cleanup(void)
1305{
1306 ib_unregister_client(&uverbs_client);
1307 class_unregister(&uverbs_class);
1308 unregister_chrdev_region(IB_UVERBS_BASE_DEV,
1309 IB_UVERBS_NUM_FIXED_MINOR);
1310 unregister_chrdev_region(dynamic_uverbs_dev,
1311 IB_UVERBS_NUM_DYNAMIC_MINOR);
1312 mmu_notifier_synchronize();
1313}
1314
1315module_init(ib_uverbs_init);
1316module_exit(ib_uverbs_cleanup);
1/*
2 * Copyright (c) 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved.
4 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
5 * Copyright (c) 2005 Voltaire, Inc. All rights reserved.
6 * Copyright (c) 2005 PathScale, Inc. All rights reserved.
7 *
8 * This software is available to you under a choice of one of two
9 * licenses. You may choose to be licensed under the terms of the GNU
10 * General Public License (GPL) Version 2, available from the file
11 * COPYING in the main directory of this source tree, or the
12 * OpenIB.org BSD license below:
13 *
14 * Redistribution and use in source and binary forms, with or
15 * without modification, are permitted provided that the following
16 * conditions are met:
17 *
18 * - Redistributions of source code must retain the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer.
21 *
22 * - Redistributions in binary form must reproduce the above
23 * copyright notice, this list of conditions and the following
24 * disclaimer in the documentation and/or other materials
25 * provided with the distribution.
26 *
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
28 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
29 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
30 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
31 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
32 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
33 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 * SOFTWARE.
35 */
36
37#include <linux/module.h>
38#include <linux/init.h>
39#include <linux/device.h>
40#include <linux/err.h>
41#include <linux/fs.h>
42#include <linux/poll.h>
43#include <linux/sched.h>
44#include <linux/file.h>
45#include <linux/cdev.h>
46#include <linux/anon_inodes.h>
47#include <linux/slab.h>
48
49#include <linux/uaccess.h>
50
51#include <rdma/ib.h>
52
53#include "uverbs.h"
54
55MODULE_AUTHOR("Roland Dreier");
56MODULE_DESCRIPTION("InfiniBand userspace verbs access");
57MODULE_LICENSE("Dual BSD/GPL");
58
59enum {
60 IB_UVERBS_MAJOR = 231,
61 IB_UVERBS_BASE_MINOR = 192,
62 IB_UVERBS_MAX_DEVICES = 32
63};
64
65#define IB_UVERBS_BASE_DEV MKDEV(IB_UVERBS_MAJOR, IB_UVERBS_BASE_MINOR)
66
67static struct class *uverbs_class;
68
69DEFINE_SPINLOCK(ib_uverbs_idr_lock);
70DEFINE_IDR(ib_uverbs_pd_idr);
71DEFINE_IDR(ib_uverbs_mr_idr);
72DEFINE_IDR(ib_uverbs_mw_idr);
73DEFINE_IDR(ib_uverbs_ah_idr);
74DEFINE_IDR(ib_uverbs_cq_idr);
75DEFINE_IDR(ib_uverbs_qp_idr);
76DEFINE_IDR(ib_uverbs_srq_idr);
77DEFINE_IDR(ib_uverbs_xrcd_idr);
78DEFINE_IDR(ib_uverbs_rule_idr);
79DEFINE_IDR(ib_uverbs_wq_idr);
80DEFINE_IDR(ib_uverbs_rwq_ind_tbl_idr);
81
82static DEFINE_SPINLOCK(map_lock);
83static DECLARE_BITMAP(dev_map, IB_UVERBS_MAX_DEVICES);
84
85static ssize_t (*uverbs_cmd_table[])(struct ib_uverbs_file *file,
86 struct ib_device *ib_dev,
87 const char __user *buf, int in_len,
88 int out_len) = {
89 [IB_USER_VERBS_CMD_GET_CONTEXT] = ib_uverbs_get_context,
90 [IB_USER_VERBS_CMD_QUERY_DEVICE] = ib_uverbs_query_device,
91 [IB_USER_VERBS_CMD_QUERY_PORT] = ib_uverbs_query_port,
92 [IB_USER_VERBS_CMD_ALLOC_PD] = ib_uverbs_alloc_pd,
93 [IB_USER_VERBS_CMD_DEALLOC_PD] = ib_uverbs_dealloc_pd,
94 [IB_USER_VERBS_CMD_REG_MR] = ib_uverbs_reg_mr,
95 [IB_USER_VERBS_CMD_REREG_MR] = ib_uverbs_rereg_mr,
96 [IB_USER_VERBS_CMD_DEREG_MR] = ib_uverbs_dereg_mr,
97 [IB_USER_VERBS_CMD_ALLOC_MW] = ib_uverbs_alloc_mw,
98 [IB_USER_VERBS_CMD_DEALLOC_MW] = ib_uverbs_dealloc_mw,
99 [IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL] = ib_uverbs_create_comp_channel,
100 [IB_USER_VERBS_CMD_CREATE_CQ] = ib_uverbs_create_cq,
101 [IB_USER_VERBS_CMD_RESIZE_CQ] = ib_uverbs_resize_cq,
102 [IB_USER_VERBS_CMD_POLL_CQ] = ib_uverbs_poll_cq,
103 [IB_USER_VERBS_CMD_REQ_NOTIFY_CQ] = ib_uverbs_req_notify_cq,
104 [IB_USER_VERBS_CMD_DESTROY_CQ] = ib_uverbs_destroy_cq,
105 [IB_USER_VERBS_CMD_CREATE_QP] = ib_uverbs_create_qp,
106 [IB_USER_VERBS_CMD_QUERY_QP] = ib_uverbs_query_qp,
107 [IB_USER_VERBS_CMD_MODIFY_QP] = ib_uverbs_modify_qp,
108 [IB_USER_VERBS_CMD_DESTROY_QP] = ib_uverbs_destroy_qp,
109 [IB_USER_VERBS_CMD_POST_SEND] = ib_uverbs_post_send,
110 [IB_USER_VERBS_CMD_POST_RECV] = ib_uverbs_post_recv,
111 [IB_USER_VERBS_CMD_POST_SRQ_RECV] = ib_uverbs_post_srq_recv,
112 [IB_USER_VERBS_CMD_CREATE_AH] = ib_uverbs_create_ah,
113 [IB_USER_VERBS_CMD_DESTROY_AH] = ib_uverbs_destroy_ah,
114 [IB_USER_VERBS_CMD_ATTACH_MCAST] = ib_uverbs_attach_mcast,
115 [IB_USER_VERBS_CMD_DETACH_MCAST] = ib_uverbs_detach_mcast,
116 [IB_USER_VERBS_CMD_CREATE_SRQ] = ib_uverbs_create_srq,
117 [IB_USER_VERBS_CMD_MODIFY_SRQ] = ib_uverbs_modify_srq,
118 [IB_USER_VERBS_CMD_QUERY_SRQ] = ib_uverbs_query_srq,
119 [IB_USER_VERBS_CMD_DESTROY_SRQ] = ib_uverbs_destroy_srq,
120 [IB_USER_VERBS_CMD_OPEN_XRCD] = ib_uverbs_open_xrcd,
121 [IB_USER_VERBS_CMD_CLOSE_XRCD] = ib_uverbs_close_xrcd,
122 [IB_USER_VERBS_CMD_CREATE_XSRQ] = ib_uverbs_create_xsrq,
123 [IB_USER_VERBS_CMD_OPEN_QP] = ib_uverbs_open_qp,
124};
125
126static int (*uverbs_ex_cmd_table[])(struct ib_uverbs_file *file,
127 struct ib_device *ib_dev,
128 struct ib_udata *ucore,
129 struct ib_udata *uhw) = {
130 [IB_USER_VERBS_EX_CMD_CREATE_FLOW] = ib_uverbs_ex_create_flow,
131 [IB_USER_VERBS_EX_CMD_DESTROY_FLOW] = ib_uverbs_ex_destroy_flow,
132 [IB_USER_VERBS_EX_CMD_QUERY_DEVICE] = ib_uverbs_ex_query_device,
133 [IB_USER_VERBS_EX_CMD_CREATE_CQ] = ib_uverbs_ex_create_cq,
134 [IB_USER_VERBS_EX_CMD_CREATE_QP] = ib_uverbs_ex_create_qp,
135 [IB_USER_VERBS_EX_CMD_CREATE_WQ] = ib_uverbs_ex_create_wq,
136 [IB_USER_VERBS_EX_CMD_MODIFY_WQ] = ib_uverbs_ex_modify_wq,
137 [IB_USER_VERBS_EX_CMD_DESTROY_WQ] = ib_uverbs_ex_destroy_wq,
138 [IB_USER_VERBS_EX_CMD_CREATE_RWQ_IND_TBL] = ib_uverbs_ex_create_rwq_ind_table,
139 [IB_USER_VERBS_EX_CMD_DESTROY_RWQ_IND_TBL] = ib_uverbs_ex_destroy_rwq_ind_table,
140 [IB_USER_VERBS_EX_CMD_MODIFY_QP] = ib_uverbs_ex_modify_qp,
141};
142
143static void ib_uverbs_add_one(struct ib_device *device);
144static void ib_uverbs_remove_one(struct ib_device *device, void *client_data);
145
146int uverbs_dealloc_mw(struct ib_mw *mw)
147{
148 struct ib_pd *pd = mw->pd;
149 int ret;
150
151 ret = mw->device->dealloc_mw(mw);
152 if (!ret)
153 atomic_dec(&pd->usecnt);
154 return ret;
155}
156
157static void ib_uverbs_release_dev(struct kobject *kobj)
158{
159 struct ib_uverbs_device *dev =
160 container_of(kobj, struct ib_uverbs_device, kobj);
161
162 cleanup_srcu_struct(&dev->disassociate_srcu);
163 kfree(dev);
164}
165
166static struct kobj_type ib_uverbs_dev_ktype = {
167 .release = ib_uverbs_release_dev,
168};
169
170static void ib_uverbs_release_event_file(struct kref *ref)
171{
172 struct ib_uverbs_event_file *file =
173 container_of(ref, struct ib_uverbs_event_file, ref);
174
175 kfree(file);
176}
177
178void ib_uverbs_release_ucq(struct ib_uverbs_file *file,
179 struct ib_uverbs_event_file *ev_file,
180 struct ib_ucq_object *uobj)
181{
182 struct ib_uverbs_event *evt, *tmp;
183
184 if (ev_file) {
185 spin_lock_irq(&ev_file->lock);
186 list_for_each_entry_safe(evt, tmp, &uobj->comp_list, obj_list) {
187 list_del(&evt->list);
188 kfree(evt);
189 }
190 spin_unlock_irq(&ev_file->lock);
191
192 kref_put(&ev_file->ref, ib_uverbs_release_event_file);
193 }
194
195 spin_lock_irq(&file->async_file->lock);
196 list_for_each_entry_safe(evt, tmp, &uobj->async_list, obj_list) {
197 list_del(&evt->list);
198 kfree(evt);
199 }
200 spin_unlock_irq(&file->async_file->lock);
201}
202
203void ib_uverbs_release_uevent(struct ib_uverbs_file *file,
204 struct ib_uevent_object *uobj)
205{
206 struct ib_uverbs_event *evt, *tmp;
207
208 spin_lock_irq(&file->async_file->lock);
209 list_for_each_entry_safe(evt, tmp, &uobj->event_list, obj_list) {
210 list_del(&evt->list);
211 kfree(evt);
212 }
213 spin_unlock_irq(&file->async_file->lock);
214}
215
216static void ib_uverbs_detach_umcast(struct ib_qp *qp,
217 struct ib_uqp_object *uobj)
218{
219 struct ib_uverbs_mcast_entry *mcast, *tmp;
220
221 list_for_each_entry_safe(mcast, tmp, &uobj->mcast_list, list) {
222 ib_detach_mcast(qp, &mcast->gid, mcast->lid);
223 list_del(&mcast->list);
224 kfree(mcast);
225 }
226}
227
228static int ib_uverbs_cleanup_ucontext(struct ib_uverbs_file *file,
229 struct ib_ucontext *context)
230{
231 struct ib_uobject *uobj, *tmp;
232
233 context->closing = 1;
234
235 list_for_each_entry_safe(uobj, tmp, &context->ah_list, list) {
236 struct ib_ah *ah = uobj->object;
237
238 idr_remove_uobj(&ib_uverbs_ah_idr, uobj);
239 ib_destroy_ah(ah);
240 kfree(uobj);
241 }
242
243 /* Remove MWs before QPs, in order to support type 2A MWs. */
244 list_for_each_entry_safe(uobj, tmp, &context->mw_list, list) {
245 struct ib_mw *mw = uobj->object;
246
247 idr_remove_uobj(&ib_uverbs_mw_idr, uobj);
248 uverbs_dealloc_mw(mw);
249 kfree(uobj);
250 }
251
252 list_for_each_entry_safe(uobj, tmp, &context->rule_list, list) {
253 struct ib_flow *flow_id = uobj->object;
254
255 idr_remove_uobj(&ib_uverbs_rule_idr, uobj);
256 ib_destroy_flow(flow_id);
257 kfree(uobj);
258 }
259
260 list_for_each_entry_safe(uobj, tmp, &context->qp_list, list) {
261 struct ib_qp *qp = uobj->object;
262 struct ib_uqp_object *uqp =
263 container_of(uobj, struct ib_uqp_object, uevent.uobject);
264
265 idr_remove_uobj(&ib_uverbs_qp_idr, uobj);
266 if (qp == qp->real_qp)
267 ib_uverbs_detach_umcast(qp, uqp);
268 ib_destroy_qp(qp);
269 ib_uverbs_release_uevent(file, &uqp->uevent);
270 kfree(uqp);
271 }
272
273 list_for_each_entry_safe(uobj, tmp, &context->rwq_ind_tbl_list, list) {
274 struct ib_rwq_ind_table *rwq_ind_tbl = uobj->object;
275 struct ib_wq **ind_tbl = rwq_ind_tbl->ind_tbl;
276
277 idr_remove_uobj(&ib_uverbs_rwq_ind_tbl_idr, uobj);
278 ib_destroy_rwq_ind_table(rwq_ind_tbl);
279 kfree(ind_tbl);
280 kfree(uobj);
281 }
282
283 list_for_each_entry_safe(uobj, tmp, &context->wq_list, list) {
284 struct ib_wq *wq = uobj->object;
285 struct ib_uwq_object *uwq =
286 container_of(uobj, struct ib_uwq_object, uevent.uobject);
287
288 idr_remove_uobj(&ib_uverbs_wq_idr, uobj);
289 ib_destroy_wq(wq);
290 ib_uverbs_release_uevent(file, &uwq->uevent);
291 kfree(uwq);
292 }
293
294 list_for_each_entry_safe(uobj, tmp, &context->srq_list, list) {
295 struct ib_srq *srq = uobj->object;
296 struct ib_uevent_object *uevent =
297 container_of(uobj, struct ib_uevent_object, uobject);
298
299 idr_remove_uobj(&ib_uverbs_srq_idr, uobj);
300 ib_destroy_srq(srq);
301 ib_uverbs_release_uevent(file, uevent);
302 kfree(uevent);
303 }
304
305 list_for_each_entry_safe(uobj, tmp, &context->cq_list, list) {
306 struct ib_cq *cq = uobj->object;
307 struct ib_uverbs_event_file *ev_file = cq->cq_context;
308 struct ib_ucq_object *ucq =
309 container_of(uobj, struct ib_ucq_object, uobject);
310
311 idr_remove_uobj(&ib_uverbs_cq_idr, uobj);
312 ib_destroy_cq(cq);
313 ib_uverbs_release_ucq(file, ev_file, ucq);
314 kfree(ucq);
315 }
316
317 list_for_each_entry_safe(uobj, tmp, &context->mr_list, list) {
318 struct ib_mr *mr = uobj->object;
319
320 idr_remove_uobj(&ib_uverbs_mr_idr, uobj);
321 ib_dereg_mr(mr);
322 kfree(uobj);
323 }
324
325 mutex_lock(&file->device->xrcd_tree_mutex);
326 list_for_each_entry_safe(uobj, tmp, &context->xrcd_list, list) {
327 struct ib_xrcd *xrcd = uobj->object;
328 struct ib_uxrcd_object *uxrcd =
329 container_of(uobj, struct ib_uxrcd_object, uobject);
330
331 idr_remove_uobj(&ib_uverbs_xrcd_idr, uobj);
332 ib_uverbs_dealloc_xrcd(file->device, xrcd);
333 kfree(uxrcd);
334 }
335 mutex_unlock(&file->device->xrcd_tree_mutex);
336
337 list_for_each_entry_safe(uobj, tmp, &context->pd_list, list) {
338 struct ib_pd *pd = uobj->object;
339
340 idr_remove_uobj(&ib_uverbs_pd_idr, uobj);
341 ib_dealloc_pd(pd);
342 kfree(uobj);
343 }
344
345 put_pid(context->tgid);
346
347 return context->device->dealloc_ucontext(context);
348}
349
350static void ib_uverbs_comp_dev(struct ib_uverbs_device *dev)
351{
352 complete(&dev->comp);
353}
354
355static void ib_uverbs_release_file(struct kref *ref)
356{
357 struct ib_uverbs_file *file =
358 container_of(ref, struct ib_uverbs_file, ref);
359 struct ib_device *ib_dev;
360 int srcu_key;
361
362 srcu_key = srcu_read_lock(&file->device->disassociate_srcu);
363 ib_dev = srcu_dereference(file->device->ib_dev,
364 &file->device->disassociate_srcu);
365 if (ib_dev && !ib_dev->disassociate_ucontext)
366 module_put(ib_dev->owner);
367 srcu_read_unlock(&file->device->disassociate_srcu, srcu_key);
368
369 if (atomic_dec_and_test(&file->device->refcount))
370 ib_uverbs_comp_dev(file->device);
371
372 kfree(file);
373}
374
375static ssize_t ib_uverbs_event_read(struct file *filp, char __user *buf,
376 size_t count, loff_t *pos)
377{
378 struct ib_uverbs_event_file *file = filp->private_data;
379 struct ib_uverbs_event *event;
380 int eventsz;
381 int ret = 0;
382
383 spin_lock_irq(&file->lock);
384
385 while (list_empty(&file->event_list)) {
386 spin_unlock_irq(&file->lock);
387
388 if (filp->f_flags & O_NONBLOCK)
389 return -EAGAIN;
390
391 if (wait_event_interruptible(file->poll_wait,
392 (!list_empty(&file->event_list) ||
393 /* The barriers built into wait_event_interruptible()
394 * and wake_up() guarentee this will see the null set
395 * without using RCU
396 */
397 !file->uverbs_file->device->ib_dev)))
398 return -ERESTARTSYS;
399
400 /* If device was disassociated and no event exists set an error */
401 if (list_empty(&file->event_list) &&
402 !file->uverbs_file->device->ib_dev)
403 return -EIO;
404
405 spin_lock_irq(&file->lock);
406 }
407
408 event = list_entry(file->event_list.next, struct ib_uverbs_event, list);
409
410 if (file->is_async)
411 eventsz = sizeof (struct ib_uverbs_async_event_desc);
412 else
413 eventsz = sizeof (struct ib_uverbs_comp_event_desc);
414
415 if (eventsz > count) {
416 ret = -EINVAL;
417 event = NULL;
418 } else {
419 list_del(file->event_list.next);
420 if (event->counter) {
421 ++(*event->counter);
422 list_del(&event->obj_list);
423 }
424 }
425
426 spin_unlock_irq(&file->lock);
427
428 if (event) {
429 if (copy_to_user(buf, event, eventsz))
430 ret = -EFAULT;
431 else
432 ret = eventsz;
433 }
434
435 kfree(event);
436
437 return ret;
438}
439
440static unsigned int ib_uverbs_event_poll(struct file *filp,
441 struct poll_table_struct *wait)
442{
443 unsigned int pollflags = 0;
444 struct ib_uverbs_event_file *file = filp->private_data;
445
446 poll_wait(filp, &file->poll_wait, wait);
447
448 spin_lock_irq(&file->lock);
449 if (!list_empty(&file->event_list))
450 pollflags = POLLIN | POLLRDNORM;
451 spin_unlock_irq(&file->lock);
452
453 return pollflags;
454}
455
456static int ib_uverbs_event_fasync(int fd, struct file *filp, int on)
457{
458 struct ib_uverbs_event_file *file = filp->private_data;
459
460 return fasync_helper(fd, filp, on, &file->async_queue);
461}
462
463static int ib_uverbs_event_close(struct inode *inode, struct file *filp)
464{
465 struct ib_uverbs_event_file *file = filp->private_data;
466 struct ib_uverbs_event *entry, *tmp;
467 int closed_already = 0;
468
469 mutex_lock(&file->uverbs_file->device->lists_mutex);
470 spin_lock_irq(&file->lock);
471 closed_already = file->is_closed;
472 file->is_closed = 1;
473 list_for_each_entry_safe(entry, tmp, &file->event_list, list) {
474 if (entry->counter)
475 list_del(&entry->obj_list);
476 kfree(entry);
477 }
478 spin_unlock_irq(&file->lock);
479 if (!closed_already) {
480 list_del(&file->list);
481 if (file->is_async)
482 ib_unregister_event_handler(&file->uverbs_file->
483 event_handler);
484 }
485 mutex_unlock(&file->uverbs_file->device->lists_mutex);
486
487 kref_put(&file->uverbs_file->ref, ib_uverbs_release_file);
488 kref_put(&file->ref, ib_uverbs_release_event_file);
489
490 return 0;
491}
492
493static const struct file_operations uverbs_event_fops = {
494 .owner = THIS_MODULE,
495 .read = ib_uverbs_event_read,
496 .poll = ib_uverbs_event_poll,
497 .release = ib_uverbs_event_close,
498 .fasync = ib_uverbs_event_fasync,
499 .llseek = no_llseek,
500};
501
502void ib_uverbs_comp_handler(struct ib_cq *cq, void *cq_context)
503{
504 struct ib_uverbs_event_file *file = cq_context;
505 struct ib_ucq_object *uobj;
506 struct ib_uverbs_event *entry;
507 unsigned long flags;
508
509 if (!file)
510 return;
511
512 spin_lock_irqsave(&file->lock, flags);
513 if (file->is_closed) {
514 spin_unlock_irqrestore(&file->lock, flags);
515 return;
516 }
517
518 entry = kmalloc(sizeof *entry, GFP_ATOMIC);
519 if (!entry) {
520 spin_unlock_irqrestore(&file->lock, flags);
521 return;
522 }
523
524 uobj = container_of(cq->uobject, struct ib_ucq_object, uobject);
525
526 entry->desc.comp.cq_handle = cq->uobject->user_handle;
527 entry->counter = &uobj->comp_events_reported;
528
529 list_add_tail(&entry->list, &file->event_list);
530 list_add_tail(&entry->obj_list, &uobj->comp_list);
531 spin_unlock_irqrestore(&file->lock, flags);
532
533 wake_up_interruptible(&file->poll_wait);
534 kill_fasync(&file->async_queue, SIGIO, POLL_IN);
535}
536
537static void ib_uverbs_async_handler(struct ib_uverbs_file *file,
538 __u64 element, __u64 event,
539 struct list_head *obj_list,
540 u32 *counter)
541{
542 struct ib_uverbs_event *entry;
543 unsigned long flags;
544
545 spin_lock_irqsave(&file->async_file->lock, flags);
546 if (file->async_file->is_closed) {
547 spin_unlock_irqrestore(&file->async_file->lock, flags);
548 return;
549 }
550
551 entry = kmalloc(sizeof *entry, GFP_ATOMIC);
552 if (!entry) {
553 spin_unlock_irqrestore(&file->async_file->lock, flags);
554 return;
555 }
556
557 entry->desc.async.element = element;
558 entry->desc.async.event_type = event;
559 entry->desc.async.reserved = 0;
560 entry->counter = counter;
561
562 list_add_tail(&entry->list, &file->async_file->event_list);
563 if (obj_list)
564 list_add_tail(&entry->obj_list, obj_list);
565 spin_unlock_irqrestore(&file->async_file->lock, flags);
566
567 wake_up_interruptible(&file->async_file->poll_wait);
568 kill_fasync(&file->async_file->async_queue, SIGIO, POLL_IN);
569}
570
571void ib_uverbs_cq_event_handler(struct ib_event *event, void *context_ptr)
572{
573 struct ib_ucq_object *uobj = container_of(event->element.cq->uobject,
574 struct ib_ucq_object, uobject);
575
576 ib_uverbs_async_handler(uobj->uverbs_file, uobj->uobject.user_handle,
577 event->event, &uobj->async_list,
578 &uobj->async_events_reported);
579}
580
581void ib_uverbs_qp_event_handler(struct ib_event *event, void *context_ptr)
582{
583 struct ib_uevent_object *uobj;
584
585 /* for XRC target qp's, check that qp is live */
586 if (!event->element.qp->uobject || !event->element.qp->uobject->live)
587 return;
588
589 uobj = container_of(event->element.qp->uobject,
590 struct ib_uevent_object, uobject);
591
592 ib_uverbs_async_handler(context_ptr, uobj->uobject.user_handle,
593 event->event, &uobj->event_list,
594 &uobj->events_reported);
595}
596
597void ib_uverbs_wq_event_handler(struct ib_event *event, void *context_ptr)
598{
599 struct ib_uevent_object *uobj = container_of(event->element.wq->uobject,
600 struct ib_uevent_object, uobject);
601
602 ib_uverbs_async_handler(context_ptr, uobj->uobject.user_handle,
603 event->event, &uobj->event_list,
604 &uobj->events_reported);
605}
606
607void ib_uverbs_srq_event_handler(struct ib_event *event, void *context_ptr)
608{
609 struct ib_uevent_object *uobj;
610
611 uobj = container_of(event->element.srq->uobject,
612 struct ib_uevent_object, uobject);
613
614 ib_uverbs_async_handler(context_ptr, uobj->uobject.user_handle,
615 event->event, &uobj->event_list,
616 &uobj->events_reported);
617}
618
619void ib_uverbs_event_handler(struct ib_event_handler *handler,
620 struct ib_event *event)
621{
622 struct ib_uverbs_file *file =
623 container_of(handler, struct ib_uverbs_file, event_handler);
624
625 ib_uverbs_async_handler(file, event->element.port_num, event->event,
626 NULL, NULL);
627}
628
629void ib_uverbs_free_async_event_file(struct ib_uverbs_file *file)
630{
631 kref_put(&file->async_file->ref, ib_uverbs_release_event_file);
632 file->async_file = NULL;
633}
634
635struct file *ib_uverbs_alloc_event_file(struct ib_uverbs_file *uverbs_file,
636 struct ib_device *ib_dev,
637 int is_async)
638{
639 struct ib_uverbs_event_file *ev_file;
640 struct file *filp;
641 int ret;
642
643 ev_file = kzalloc(sizeof(*ev_file), GFP_KERNEL);
644 if (!ev_file)
645 return ERR_PTR(-ENOMEM);
646
647 kref_init(&ev_file->ref);
648 spin_lock_init(&ev_file->lock);
649 INIT_LIST_HEAD(&ev_file->event_list);
650 init_waitqueue_head(&ev_file->poll_wait);
651 ev_file->uverbs_file = uverbs_file;
652 kref_get(&ev_file->uverbs_file->ref);
653 ev_file->async_queue = NULL;
654 ev_file->is_closed = 0;
655
656 filp = anon_inode_getfile("[infinibandevent]", &uverbs_event_fops,
657 ev_file, O_RDONLY);
658 if (IS_ERR(filp))
659 goto err_put_refs;
660
661 mutex_lock(&uverbs_file->device->lists_mutex);
662 list_add_tail(&ev_file->list,
663 &uverbs_file->device->uverbs_events_file_list);
664 mutex_unlock(&uverbs_file->device->lists_mutex);
665
666 if (is_async) {
667 WARN_ON(uverbs_file->async_file);
668 uverbs_file->async_file = ev_file;
669 kref_get(&uverbs_file->async_file->ref);
670 INIT_IB_EVENT_HANDLER(&uverbs_file->event_handler,
671 ib_dev,
672 ib_uverbs_event_handler);
673 ret = ib_register_event_handler(&uverbs_file->event_handler);
674 if (ret)
675 goto err_put_file;
676
677 /* At that point async file stuff was fully set */
678 ev_file->is_async = 1;
679 }
680
681 return filp;
682
683err_put_file:
684 fput(filp);
685 kref_put(&uverbs_file->async_file->ref, ib_uverbs_release_event_file);
686 uverbs_file->async_file = NULL;
687 return ERR_PTR(ret);
688
689err_put_refs:
690 kref_put(&ev_file->uverbs_file->ref, ib_uverbs_release_file);
691 kref_put(&ev_file->ref, ib_uverbs_release_event_file);
692 return filp;
693}
694
695/*
696 * Look up a completion event file by FD. If lookup is successful,
697 * takes a ref to the event file struct that it returns; if
698 * unsuccessful, returns NULL.
699 */
700struct ib_uverbs_event_file *ib_uverbs_lookup_comp_file(int fd)
701{
702 struct ib_uverbs_event_file *ev_file = NULL;
703 struct fd f = fdget(fd);
704
705 if (!f.file)
706 return NULL;
707
708 if (f.file->f_op != &uverbs_event_fops)
709 goto out;
710
711 ev_file = f.file->private_data;
712 if (ev_file->is_async) {
713 ev_file = NULL;
714 goto out;
715 }
716
717 kref_get(&ev_file->ref);
718
719out:
720 fdput(f);
721 return ev_file;
722}
723
724static int verify_command_mask(struct ib_device *ib_dev, __u32 command)
725{
726 u64 mask;
727
728 if (command <= IB_USER_VERBS_CMD_OPEN_QP)
729 mask = ib_dev->uverbs_cmd_mask;
730 else
731 mask = ib_dev->uverbs_ex_cmd_mask;
732
733 if (mask & ((u64)1 << command))
734 return 0;
735
736 return -1;
737}
738
739static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
740 size_t count, loff_t *pos)
741{
742 struct ib_uverbs_file *file = filp->private_data;
743 struct ib_device *ib_dev;
744 struct ib_uverbs_cmd_hdr hdr;
745 __u32 command;
746 __u32 flags;
747 int srcu_key;
748 ssize_t ret;
749
750 if (!ib_safe_file_access(filp)) {
751 pr_err_once("uverbs_write: process %d (%s) changed security contexts after opening file descriptor, this is not allowed.\n",
752 task_tgid_vnr(current), current->comm);
753 return -EACCES;
754 }
755
756 if (count < sizeof hdr)
757 return -EINVAL;
758
759 if (copy_from_user(&hdr, buf, sizeof hdr))
760 return -EFAULT;
761
762 srcu_key = srcu_read_lock(&file->device->disassociate_srcu);
763 ib_dev = srcu_dereference(file->device->ib_dev,
764 &file->device->disassociate_srcu);
765 if (!ib_dev) {
766 ret = -EIO;
767 goto out;
768 }
769
770 if (hdr.command & ~(__u32)(IB_USER_VERBS_CMD_FLAGS_MASK |
771 IB_USER_VERBS_CMD_COMMAND_MASK)) {
772 ret = -EINVAL;
773 goto out;
774 }
775
776 command = hdr.command & IB_USER_VERBS_CMD_COMMAND_MASK;
777 if (verify_command_mask(ib_dev, command)) {
778 ret = -EOPNOTSUPP;
779 goto out;
780 }
781
782 if (!file->ucontext &&
783 command != IB_USER_VERBS_CMD_GET_CONTEXT) {
784 ret = -EINVAL;
785 goto out;
786 }
787
788 flags = (hdr.command &
789 IB_USER_VERBS_CMD_FLAGS_MASK) >> IB_USER_VERBS_CMD_FLAGS_SHIFT;
790
791 if (!flags) {
792 if (command >= ARRAY_SIZE(uverbs_cmd_table) ||
793 !uverbs_cmd_table[command]) {
794 ret = -EINVAL;
795 goto out;
796 }
797
798 if (hdr.in_words * 4 != count) {
799 ret = -EINVAL;
800 goto out;
801 }
802
803 ret = uverbs_cmd_table[command](file, ib_dev,
804 buf + sizeof(hdr),
805 hdr.in_words * 4,
806 hdr.out_words * 4);
807
808 } else if (flags == IB_USER_VERBS_CMD_FLAG_EXTENDED) {
809 struct ib_uverbs_ex_cmd_hdr ex_hdr;
810 struct ib_udata ucore;
811 struct ib_udata uhw;
812 size_t written_count = count;
813
814 if (command >= ARRAY_SIZE(uverbs_ex_cmd_table) ||
815 !uverbs_ex_cmd_table[command]) {
816 ret = -ENOSYS;
817 goto out;
818 }
819
820 if (!file->ucontext) {
821 ret = -EINVAL;
822 goto out;
823 }
824
825 if (count < (sizeof(hdr) + sizeof(ex_hdr))) {
826 ret = -EINVAL;
827 goto out;
828 }
829
830 if (copy_from_user(&ex_hdr, buf + sizeof(hdr), sizeof(ex_hdr))) {
831 ret = -EFAULT;
832 goto out;
833 }
834
835 count -= sizeof(hdr) + sizeof(ex_hdr);
836 buf += sizeof(hdr) + sizeof(ex_hdr);
837
838 if ((hdr.in_words + ex_hdr.provider_in_words) * 8 != count) {
839 ret = -EINVAL;
840 goto out;
841 }
842
843 if (ex_hdr.cmd_hdr_reserved) {
844 ret = -EINVAL;
845 goto out;
846 }
847
848 if (ex_hdr.response) {
849 if (!hdr.out_words && !ex_hdr.provider_out_words) {
850 ret = -EINVAL;
851 goto out;
852 }
853
854 if (!access_ok(VERIFY_WRITE,
855 (void __user *) (unsigned long) ex_hdr.response,
856 (hdr.out_words + ex_hdr.provider_out_words) * 8)) {
857 ret = -EFAULT;
858 goto out;
859 }
860 } else {
861 if (hdr.out_words || ex_hdr.provider_out_words) {
862 ret = -EINVAL;
863 goto out;
864 }
865 }
866
867 INIT_UDATA_BUF_OR_NULL(&ucore, buf, (unsigned long) ex_hdr.response,
868 hdr.in_words * 8, hdr.out_words * 8);
869
870 INIT_UDATA_BUF_OR_NULL(&uhw,
871 buf + ucore.inlen,
872 (unsigned long) ex_hdr.response + ucore.outlen,
873 ex_hdr.provider_in_words * 8,
874 ex_hdr.provider_out_words * 8);
875
876 ret = uverbs_ex_cmd_table[command](file,
877 ib_dev,
878 &ucore,
879 &uhw);
880 if (!ret)
881 ret = written_count;
882 } else {
883 ret = -ENOSYS;
884 }
885
886out:
887 srcu_read_unlock(&file->device->disassociate_srcu, srcu_key);
888 return ret;
889}
890
891static int ib_uverbs_mmap(struct file *filp, struct vm_area_struct *vma)
892{
893 struct ib_uverbs_file *file = filp->private_data;
894 struct ib_device *ib_dev;
895 int ret = 0;
896 int srcu_key;
897
898 srcu_key = srcu_read_lock(&file->device->disassociate_srcu);
899 ib_dev = srcu_dereference(file->device->ib_dev,
900 &file->device->disassociate_srcu);
901 if (!ib_dev) {
902 ret = -EIO;
903 goto out;
904 }
905
906 if (!file->ucontext)
907 ret = -ENODEV;
908 else
909 ret = ib_dev->mmap(file->ucontext, vma);
910out:
911 srcu_read_unlock(&file->device->disassociate_srcu, srcu_key);
912 return ret;
913}
914
915/*
916 * ib_uverbs_open() does not need the BKL:
917 *
918 * - the ib_uverbs_device structures are properly reference counted and
919 * everything else is purely local to the file being created, so
920 * races against other open calls are not a problem;
921 * - there is no ioctl method to race against;
922 * - the open method will either immediately run -ENXIO, or all
923 * required initialization will be done.
924 */
925static int ib_uverbs_open(struct inode *inode, struct file *filp)
926{
927 struct ib_uverbs_device *dev;
928 struct ib_uverbs_file *file;
929 struct ib_device *ib_dev;
930 int ret;
931 int module_dependent;
932 int srcu_key;
933
934 dev = container_of(inode->i_cdev, struct ib_uverbs_device, cdev);
935 if (!atomic_inc_not_zero(&dev->refcount))
936 return -ENXIO;
937
938 srcu_key = srcu_read_lock(&dev->disassociate_srcu);
939 mutex_lock(&dev->lists_mutex);
940 ib_dev = srcu_dereference(dev->ib_dev,
941 &dev->disassociate_srcu);
942 if (!ib_dev) {
943 ret = -EIO;
944 goto err;
945 }
946
947 /* In case IB device supports disassociate ucontext, there is no hard
948 * dependency between uverbs device and its low level device.
949 */
950 module_dependent = !(ib_dev->disassociate_ucontext);
951
952 if (module_dependent) {
953 if (!try_module_get(ib_dev->owner)) {
954 ret = -ENODEV;
955 goto err;
956 }
957 }
958
959 file = kzalloc(sizeof(*file), GFP_KERNEL);
960 if (!file) {
961 ret = -ENOMEM;
962 if (module_dependent)
963 goto err_module;
964
965 goto err;
966 }
967
968 file->device = dev;
969 file->ucontext = NULL;
970 file->async_file = NULL;
971 kref_init(&file->ref);
972 mutex_init(&file->mutex);
973 mutex_init(&file->cleanup_mutex);
974
975 filp->private_data = file;
976 kobject_get(&dev->kobj);
977 list_add_tail(&file->list, &dev->uverbs_file_list);
978 mutex_unlock(&dev->lists_mutex);
979 srcu_read_unlock(&dev->disassociate_srcu, srcu_key);
980
981 return nonseekable_open(inode, filp);
982
983err_module:
984 module_put(ib_dev->owner);
985
986err:
987 mutex_unlock(&dev->lists_mutex);
988 srcu_read_unlock(&dev->disassociate_srcu, srcu_key);
989 if (atomic_dec_and_test(&dev->refcount))
990 ib_uverbs_comp_dev(dev);
991
992 return ret;
993}
994
995static int ib_uverbs_close(struct inode *inode, struct file *filp)
996{
997 struct ib_uverbs_file *file = filp->private_data;
998 struct ib_uverbs_device *dev = file->device;
999
1000 mutex_lock(&file->cleanup_mutex);
1001 if (file->ucontext) {
1002 ib_uverbs_cleanup_ucontext(file, file->ucontext);
1003 file->ucontext = NULL;
1004 }
1005 mutex_unlock(&file->cleanup_mutex);
1006
1007 mutex_lock(&file->device->lists_mutex);
1008 if (!file->is_closed) {
1009 list_del(&file->list);
1010 file->is_closed = 1;
1011 }
1012 mutex_unlock(&file->device->lists_mutex);
1013
1014 if (file->async_file)
1015 kref_put(&file->async_file->ref, ib_uverbs_release_event_file);
1016
1017 kref_put(&file->ref, ib_uverbs_release_file);
1018 kobject_put(&dev->kobj);
1019
1020 return 0;
1021}
1022
1023static const struct file_operations uverbs_fops = {
1024 .owner = THIS_MODULE,
1025 .write = ib_uverbs_write,
1026 .open = ib_uverbs_open,
1027 .release = ib_uverbs_close,
1028 .llseek = no_llseek,
1029};
1030
1031static const struct file_operations uverbs_mmap_fops = {
1032 .owner = THIS_MODULE,
1033 .write = ib_uverbs_write,
1034 .mmap = ib_uverbs_mmap,
1035 .open = ib_uverbs_open,
1036 .release = ib_uverbs_close,
1037 .llseek = no_llseek,
1038};
1039
1040static struct ib_client uverbs_client = {
1041 .name = "uverbs",
1042 .add = ib_uverbs_add_one,
1043 .remove = ib_uverbs_remove_one
1044};
1045
1046static ssize_t show_ibdev(struct device *device, struct device_attribute *attr,
1047 char *buf)
1048{
1049 int ret = -ENODEV;
1050 int srcu_key;
1051 struct ib_uverbs_device *dev = dev_get_drvdata(device);
1052 struct ib_device *ib_dev;
1053
1054 if (!dev)
1055 return -ENODEV;
1056
1057 srcu_key = srcu_read_lock(&dev->disassociate_srcu);
1058 ib_dev = srcu_dereference(dev->ib_dev, &dev->disassociate_srcu);
1059 if (ib_dev)
1060 ret = sprintf(buf, "%s\n", ib_dev->name);
1061 srcu_read_unlock(&dev->disassociate_srcu, srcu_key);
1062
1063 return ret;
1064}
1065static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
1066
1067static ssize_t show_dev_abi_version(struct device *device,
1068 struct device_attribute *attr, char *buf)
1069{
1070 struct ib_uverbs_device *dev = dev_get_drvdata(device);
1071 int ret = -ENODEV;
1072 int srcu_key;
1073 struct ib_device *ib_dev;
1074
1075 if (!dev)
1076 return -ENODEV;
1077 srcu_key = srcu_read_lock(&dev->disassociate_srcu);
1078 ib_dev = srcu_dereference(dev->ib_dev, &dev->disassociate_srcu);
1079 if (ib_dev)
1080 ret = sprintf(buf, "%d\n", ib_dev->uverbs_abi_ver);
1081 srcu_read_unlock(&dev->disassociate_srcu, srcu_key);
1082
1083 return ret;
1084}
1085static DEVICE_ATTR(abi_version, S_IRUGO, show_dev_abi_version, NULL);
1086
1087static CLASS_ATTR_STRING(abi_version, S_IRUGO,
1088 __stringify(IB_USER_VERBS_ABI_VERSION));
1089
1090static dev_t overflow_maj;
1091static DECLARE_BITMAP(overflow_map, IB_UVERBS_MAX_DEVICES);
1092
1093/*
1094 * If we have more than IB_UVERBS_MAX_DEVICES, dynamically overflow by
1095 * requesting a new major number and doubling the number of max devices we
1096 * support. It's stupid, but simple.
1097 */
1098static int find_overflow_devnum(void)
1099{
1100 int ret;
1101
1102 if (!overflow_maj) {
1103 ret = alloc_chrdev_region(&overflow_maj, 0, IB_UVERBS_MAX_DEVICES,
1104 "infiniband_verbs");
1105 if (ret) {
1106 pr_err("user_verbs: couldn't register dynamic device number\n");
1107 return ret;
1108 }
1109 }
1110
1111 ret = find_first_zero_bit(overflow_map, IB_UVERBS_MAX_DEVICES);
1112 if (ret >= IB_UVERBS_MAX_DEVICES)
1113 return -1;
1114
1115 return ret;
1116}
1117
1118static void ib_uverbs_add_one(struct ib_device *device)
1119{
1120 int devnum;
1121 dev_t base;
1122 struct ib_uverbs_device *uverbs_dev;
1123 int ret;
1124
1125 if (!device->alloc_ucontext)
1126 return;
1127
1128 uverbs_dev = kzalloc(sizeof *uverbs_dev, GFP_KERNEL);
1129 if (!uverbs_dev)
1130 return;
1131
1132 ret = init_srcu_struct(&uverbs_dev->disassociate_srcu);
1133 if (ret) {
1134 kfree(uverbs_dev);
1135 return;
1136 }
1137
1138 atomic_set(&uverbs_dev->refcount, 1);
1139 init_completion(&uverbs_dev->comp);
1140 uverbs_dev->xrcd_tree = RB_ROOT;
1141 mutex_init(&uverbs_dev->xrcd_tree_mutex);
1142 kobject_init(&uverbs_dev->kobj, &ib_uverbs_dev_ktype);
1143 mutex_init(&uverbs_dev->lists_mutex);
1144 INIT_LIST_HEAD(&uverbs_dev->uverbs_file_list);
1145 INIT_LIST_HEAD(&uverbs_dev->uverbs_events_file_list);
1146
1147 spin_lock(&map_lock);
1148 devnum = find_first_zero_bit(dev_map, IB_UVERBS_MAX_DEVICES);
1149 if (devnum >= IB_UVERBS_MAX_DEVICES) {
1150 spin_unlock(&map_lock);
1151 devnum = find_overflow_devnum();
1152 if (devnum < 0)
1153 goto err;
1154
1155 spin_lock(&map_lock);
1156 uverbs_dev->devnum = devnum + IB_UVERBS_MAX_DEVICES;
1157 base = devnum + overflow_maj;
1158 set_bit(devnum, overflow_map);
1159 } else {
1160 uverbs_dev->devnum = devnum;
1161 base = devnum + IB_UVERBS_BASE_DEV;
1162 set_bit(devnum, dev_map);
1163 }
1164 spin_unlock(&map_lock);
1165
1166 rcu_assign_pointer(uverbs_dev->ib_dev, device);
1167 uverbs_dev->num_comp_vectors = device->num_comp_vectors;
1168
1169 cdev_init(&uverbs_dev->cdev, NULL);
1170 uverbs_dev->cdev.owner = THIS_MODULE;
1171 uverbs_dev->cdev.ops = device->mmap ? &uverbs_mmap_fops : &uverbs_fops;
1172 uverbs_dev->cdev.kobj.parent = &uverbs_dev->kobj;
1173 kobject_set_name(&uverbs_dev->cdev.kobj, "uverbs%d", uverbs_dev->devnum);
1174 if (cdev_add(&uverbs_dev->cdev, base, 1))
1175 goto err_cdev;
1176
1177 uverbs_dev->dev = device_create(uverbs_class, device->dma_device,
1178 uverbs_dev->cdev.dev, uverbs_dev,
1179 "uverbs%d", uverbs_dev->devnum);
1180 if (IS_ERR(uverbs_dev->dev))
1181 goto err_cdev;
1182
1183 if (device_create_file(uverbs_dev->dev, &dev_attr_ibdev))
1184 goto err_class;
1185 if (device_create_file(uverbs_dev->dev, &dev_attr_abi_version))
1186 goto err_class;
1187
1188 ib_set_client_data(device, &uverbs_client, uverbs_dev);
1189
1190 return;
1191
1192err_class:
1193 device_destroy(uverbs_class, uverbs_dev->cdev.dev);
1194
1195err_cdev:
1196 cdev_del(&uverbs_dev->cdev);
1197 if (uverbs_dev->devnum < IB_UVERBS_MAX_DEVICES)
1198 clear_bit(devnum, dev_map);
1199 else
1200 clear_bit(devnum, overflow_map);
1201
1202err:
1203 if (atomic_dec_and_test(&uverbs_dev->refcount))
1204 ib_uverbs_comp_dev(uverbs_dev);
1205 wait_for_completion(&uverbs_dev->comp);
1206 kobject_put(&uverbs_dev->kobj);
1207 return;
1208}
1209
1210static void ib_uverbs_free_hw_resources(struct ib_uverbs_device *uverbs_dev,
1211 struct ib_device *ib_dev)
1212{
1213 struct ib_uverbs_file *file;
1214 struct ib_uverbs_event_file *event_file;
1215 struct ib_event event;
1216
1217 /* Pending running commands to terminate */
1218 synchronize_srcu(&uverbs_dev->disassociate_srcu);
1219 event.event = IB_EVENT_DEVICE_FATAL;
1220 event.element.port_num = 0;
1221 event.device = ib_dev;
1222
1223 mutex_lock(&uverbs_dev->lists_mutex);
1224 while (!list_empty(&uverbs_dev->uverbs_file_list)) {
1225 struct ib_ucontext *ucontext;
1226 file = list_first_entry(&uverbs_dev->uverbs_file_list,
1227 struct ib_uverbs_file, list);
1228 file->is_closed = 1;
1229 list_del(&file->list);
1230 kref_get(&file->ref);
1231 mutex_unlock(&uverbs_dev->lists_mutex);
1232
1233 ib_uverbs_event_handler(&file->event_handler, &event);
1234
1235 mutex_lock(&file->cleanup_mutex);
1236 ucontext = file->ucontext;
1237 file->ucontext = NULL;
1238 mutex_unlock(&file->cleanup_mutex);
1239
1240 /* At this point ib_uverbs_close cannot be running
1241 * ib_uverbs_cleanup_ucontext
1242 */
1243 if (ucontext) {
1244 /* We must release the mutex before going ahead and
1245 * calling disassociate_ucontext. disassociate_ucontext
1246 * might end up indirectly calling uverbs_close,
1247 * for example due to freeing the resources
1248 * (e.g mmput).
1249 */
1250 ib_dev->disassociate_ucontext(ucontext);
1251 ib_uverbs_cleanup_ucontext(file, ucontext);
1252 }
1253
1254 mutex_lock(&uverbs_dev->lists_mutex);
1255 kref_put(&file->ref, ib_uverbs_release_file);
1256 }
1257
1258 while (!list_empty(&uverbs_dev->uverbs_events_file_list)) {
1259 event_file = list_first_entry(&uverbs_dev->
1260 uverbs_events_file_list,
1261 struct ib_uverbs_event_file,
1262 list);
1263 spin_lock_irq(&event_file->lock);
1264 event_file->is_closed = 1;
1265 spin_unlock_irq(&event_file->lock);
1266
1267 list_del(&event_file->list);
1268 if (event_file->is_async) {
1269 ib_unregister_event_handler(&event_file->uverbs_file->
1270 event_handler);
1271 event_file->uverbs_file->event_handler.device = NULL;
1272 }
1273
1274 wake_up_interruptible(&event_file->poll_wait);
1275 kill_fasync(&event_file->async_queue, SIGIO, POLL_IN);
1276 }
1277 mutex_unlock(&uverbs_dev->lists_mutex);
1278}
1279
1280static void ib_uverbs_remove_one(struct ib_device *device, void *client_data)
1281{
1282 struct ib_uverbs_device *uverbs_dev = client_data;
1283 int wait_clients = 1;
1284
1285 if (!uverbs_dev)
1286 return;
1287
1288 dev_set_drvdata(uverbs_dev->dev, NULL);
1289 device_destroy(uverbs_class, uverbs_dev->cdev.dev);
1290 cdev_del(&uverbs_dev->cdev);
1291
1292 if (uverbs_dev->devnum < IB_UVERBS_MAX_DEVICES)
1293 clear_bit(uverbs_dev->devnum, dev_map);
1294 else
1295 clear_bit(uverbs_dev->devnum - IB_UVERBS_MAX_DEVICES, overflow_map);
1296
1297 if (device->disassociate_ucontext) {
1298 /* We disassociate HW resources and immediately return.
1299 * Userspace will see a EIO errno for all future access.
1300 * Upon returning, ib_device may be freed internally and is not
1301 * valid any more.
1302 * uverbs_device is still available until all clients close
1303 * their files, then the uverbs device ref count will be zero
1304 * and its resources will be freed.
1305 * Note: At this point no more files can be opened since the
1306 * cdev was deleted, however active clients can still issue
1307 * commands and close their open files.
1308 */
1309 rcu_assign_pointer(uverbs_dev->ib_dev, NULL);
1310 ib_uverbs_free_hw_resources(uverbs_dev, device);
1311 wait_clients = 0;
1312 }
1313
1314 if (atomic_dec_and_test(&uverbs_dev->refcount))
1315 ib_uverbs_comp_dev(uverbs_dev);
1316 if (wait_clients)
1317 wait_for_completion(&uverbs_dev->comp);
1318 kobject_put(&uverbs_dev->kobj);
1319}
1320
1321static char *uverbs_devnode(struct device *dev, umode_t *mode)
1322{
1323 if (mode)
1324 *mode = 0666;
1325 return kasprintf(GFP_KERNEL, "infiniband/%s", dev_name(dev));
1326}
1327
1328static int __init ib_uverbs_init(void)
1329{
1330 int ret;
1331
1332 ret = register_chrdev_region(IB_UVERBS_BASE_DEV, IB_UVERBS_MAX_DEVICES,
1333 "infiniband_verbs");
1334 if (ret) {
1335 pr_err("user_verbs: couldn't register device number\n");
1336 goto out;
1337 }
1338
1339 uverbs_class = class_create(THIS_MODULE, "infiniband_verbs");
1340 if (IS_ERR(uverbs_class)) {
1341 ret = PTR_ERR(uverbs_class);
1342 pr_err("user_verbs: couldn't create class infiniband_verbs\n");
1343 goto out_chrdev;
1344 }
1345
1346 uverbs_class->devnode = uverbs_devnode;
1347
1348 ret = class_create_file(uverbs_class, &class_attr_abi_version.attr);
1349 if (ret) {
1350 pr_err("user_verbs: couldn't create abi_version attribute\n");
1351 goto out_class;
1352 }
1353
1354 ret = ib_register_client(&uverbs_client);
1355 if (ret) {
1356 pr_err("user_verbs: couldn't register client\n");
1357 goto out_class;
1358 }
1359
1360 return 0;
1361
1362out_class:
1363 class_destroy(uverbs_class);
1364
1365out_chrdev:
1366 unregister_chrdev_region(IB_UVERBS_BASE_DEV, IB_UVERBS_MAX_DEVICES);
1367
1368out:
1369 return ret;
1370}
1371
1372static void __exit ib_uverbs_cleanup(void)
1373{
1374 ib_unregister_client(&uverbs_client);
1375 class_destroy(uverbs_class);
1376 unregister_chrdev_region(IB_UVERBS_BASE_DEV, IB_UVERBS_MAX_DEVICES);
1377 if (overflow_maj)
1378 unregister_chrdev_region(overflow_maj, IB_UVERBS_MAX_DEVICES);
1379 idr_destroy(&ib_uverbs_pd_idr);
1380 idr_destroy(&ib_uverbs_mr_idr);
1381 idr_destroy(&ib_uverbs_mw_idr);
1382 idr_destroy(&ib_uverbs_ah_idr);
1383 idr_destroy(&ib_uverbs_cq_idr);
1384 idr_destroy(&ib_uverbs_qp_idr);
1385 idr_destroy(&ib_uverbs_srq_idr);
1386}
1387
1388module_init(ib_uverbs_init);
1389module_exit(ib_uverbs_cleanup);