Loading...
1/*
2 * Copyright © 2012-2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24
25#include <drm/drmP.h>
26#include <drm/i915_drm.h>
27#include "i915_drv.h"
28#include "i915_trace.h"
29#include "intel_drv.h"
30#include <linux/mmu_context.h>
31#include <linux/mmu_notifier.h>
32#include <linux/mempolicy.h>
33#include <linux/swap.h>
34
35struct i915_mm_struct {
36 struct mm_struct *mm;
37 struct drm_device *dev;
38 struct i915_mmu_notifier *mn;
39 struct hlist_node node;
40 struct kref kref;
41 struct work_struct work;
42};
43
44#if defined(CONFIG_MMU_NOTIFIER)
45#include <linux/interval_tree.h>
46
47struct i915_mmu_notifier {
48 spinlock_t lock;
49 struct hlist_node node;
50 struct mmu_notifier mn;
51 struct rb_root objects;
52};
53
54struct i915_mmu_object {
55 struct i915_mmu_notifier *mn;
56 struct drm_i915_gem_object *obj;
57 struct interval_tree_node it;
58 struct list_head link;
59 struct work_struct work;
60 bool attached;
61};
62
63static void cancel_userptr(struct work_struct *work)
64{
65 struct i915_mmu_object *mo = container_of(work, typeof(*mo), work);
66 struct drm_i915_gem_object *obj = mo->obj;
67 struct drm_device *dev = obj->base.dev;
68
69 mutex_lock(&dev->struct_mutex);
70 /* Cancel any active worker and force us to re-evaluate gup */
71 obj->userptr.work = NULL;
72
73 if (obj->pages != NULL) {
74 struct drm_i915_private *dev_priv = to_i915(dev);
75 struct i915_vma *vma, *tmp;
76 bool was_interruptible;
77
78 was_interruptible = dev_priv->mm.interruptible;
79 dev_priv->mm.interruptible = false;
80
81 list_for_each_entry_safe(vma, tmp, &obj->vma_list, obj_link) {
82 int ret = i915_vma_unbind(vma);
83 WARN_ON(ret && ret != -EIO);
84 }
85 WARN_ON(i915_gem_object_put_pages(obj));
86
87 dev_priv->mm.interruptible = was_interruptible;
88 }
89
90 drm_gem_object_unreference(&obj->base);
91 mutex_unlock(&dev->struct_mutex);
92}
93
94static void add_object(struct i915_mmu_object *mo)
95{
96 if (mo->attached)
97 return;
98
99 interval_tree_insert(&mo->it, &mo->mn->objects);
100 mo->attached = true;
101}
102
103static void del_object(struct i915_mmu_object *mo)
104{
105 if (!mo->attached)
106 return;
107
108 interval_tree_remove(&mo->it, &mo->mn->objects);
109 mo->attached = false;
110}
111
112static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
113 struct mm_struct *mm,
114 unsigned long start,
115 unsigned long end)
116{
117 struct i915_mmu_notifier *mn =
118 container_of(_mn, struct i915_mmu_notifier, mn);
119 struct i915_mmu_object *mo;
120 struct interval_tree_node *it;
121 LIST_HEAD(cancelled);
122
123 if (RB_EMPTY_ROOT(&mn->objects))
124 return;
125
126 /* interval ranges are inclusive, but invalidate range is exclusive */
127 end--;
128
129 spin_lock(&mn->lock);
130 it = interval_tree_iter_first(&mn->objects, start, end);
131 while (it) {
132 /* The mmu_object is released late when destroying the
133 * GEM object so it is entirely possible to gain a
134 * reference on an object in the process of being freed
135 * since our serialisation is via the spinlock and not
136 * the struct_mutex - and consequently use it after it
137 * is freed and then double free it. To prevent that
138 * use-after-free we only acquire a reference on the
139 * object if it is not in the process of being destroyed.
140 */
141 mo = container_of(it, struct i915_mmu_object, it);
142 if (kref_get_unless_zero(&mo->obj->base.refcount))
143 schedule_work(&mo->work);
144
145 list_add(&mo->link, &cancelled);
146 it = interval_tree_iter_next(it, start, end);
147 }
148 list_for_each_entry(mo, &cancelled, link)
149 del_object(mo);
150 spin_unlock(&mn->lock);
151}
152
153static const struct mmu_notifier_ops i915_gem_userptr_notifier = {
154 .invalidate_range_start = i915_gem_userptr_mn_invalidate_range_start,
155};
156
157static struct i915_mmu_notifier *
158i915_mmu_notifier_create(struct mm_struct *mm)
159{
160 struct i915_mmu_notifier *mn;
161 int ret;
162
163 mn = kmalloc(sizeof(*mn), GFP_KERNEL);
164 if (mn == NULL)
165 return ERR_PTR(-ENOMEM);
166
167 spin_lock_init(&mn->lock);
168 mn->mn.ops = &i915_gem_userptr_notifier;
169 mn->objects = RB_ROOT;
170
171 /* Protected by mmap_sem (write-lock) */
172 ret = __mmu_notifier_register(&mn->mn, mm);
173 if (ret) {
174 kfree(mn);
175 return ERR_PTR(ret);
176 }
177
178 return mn;
179}
180
181static void
182i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj)
183{
184 struct i915_mmu_object *mo;
185
186 mo = obj->userptr.mmu_object;
187 if (mo == NULL)
188 return;
189
190 spin_lock(&mo->mn->lock);
191 del_object(mo);
192 spin_unlock(&mo->mn->lock);
193 kfree(mo);
194
195 obj->userptr.mmu_object = NULL;
196}
197
198static struct i915_mmu_notifier *
199i915_mmu_notifier_find(struct i915_mm_struct *mm)
200{
201 struct i915_mmu_notifier *mn = mm->mn;
202
203 mn = mm->mn;
204 if (mn)
205 return mn;
206
207 down_write(&mm->mm->mmap_sem);
208 mutex_lock(&to_i915(mm->dev)->mm_lock);
209 if ((mn = mm->mn) == NULL) {
210 mn = i915_mmu_notifier_create(mm->mm);
211 if (!IS_ERR(mn))
212 mm->mn = mn;
213 }
214 mutex_unlock(&to_i915(mm->dev)->mm_lock);
215 up_write(&mm->mm->mmap_sem);
216
217 return mn;
218}
219
220static int
221i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj,
222 unsigned flags)
223{
224 struct i915_mmu_notifier *mn;
225 struct i915_mmu_object *mo;
226
227 if (flags & I915_USERPTR_UNSYNCHRONIZED)
228 return capable(CAP_SYS_ADMIN) ? 0 : -EPERM;
229
230 if (WARN_ON(obj->userptr.mm == NULL))
231 return -EINVAL;
232
233 mn = i915_mmu_notifier_find(obj->userptr.mm);
234 if (IS_ERR(mn))
235 return PTR_ERR(mn);
236
237 mo = kzalloc(sizeof(*mo), GFP_KERNEL);
238 if (mo == NULL)
239 return -ENOMEM;
240
241 mo->mn = mn;
242 mo->obj = obj;
243 mo->it.start = obj->userptr.ptr;
244 mo->it.last = obj->userptr.ptr + obj->base.size - 1;
245 INIT_WORK(&mo->work, cancel_userptr);
246
247 obj->userptr.mmu_object = mo;
248 return 0;
249}
250
251static void
252i915_mmu_notifier_free(struct i915_mmu_notifier *mn,
253 struct mm_struct *mm)
254{
255 if (mn == NULL)
256 return;
257
258 mmu_notifier_unregister(&mn->mn, mm);
259 kfree(mn);
260}
261
262#else
263
264static void
265i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj)
266{
267}
268
269static int
270i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj,
271 unsigned flags)
272{
273 if ((flags & I915_USERPTR_UNSYNCHRONIZED) == 0)
274 return -ENODEV;
275
276 if (!capable(CAP_SYS_ADMIN))
277 return -EPERM;
278
279 return 0;
280}
281
282static void
283i915_mmu_notifier_free(struct i915_mmu_notifier *mn,
284 struct mm_struct *mm)
285{
286}
287
288#endif
289
290static struct i915_mm_struct *
291__i915_mm_struct_find(struct drm_i915_private *dev_priv, struct mm_struct *real)
292{
293 struct i915_mm_struct *mm;
294
295 /* Protected by dev_priv->mm_lock */
296 hash_for_each_possible(dev_priv->mm_structs, mm, node, (unsigned long)real)
297 if (mm->mm == real)
298 return mm;
299
300 return NULL;
301}
302
303static int
304i915_gem_userptr_init__mm_struct(struct drm_i915_gem_object *obj)
305{
306 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
307 struct i915_mm_struct *mm;
308 int ret = 0;
309
310 /* During release of the GEM object we hold the struct_mutex. This
311 * precludes us from calling mmput() at that time as that may be
312 * the last reference and so call exit_mmap(). exit_mmap() will
313 * attempt to reap the vma, and if we were holding a GTT mmap
314 * would then call drm_gem_vm_close() and attempt to reacquire
315 * the struct mutex. So in order to avoid that recursion, we have
316 * to defer releasing the mm reference until after we drop the
317 * struct_mutex, i.e. we need to schedule a worker to do the clean
318 * up.
319 */
320 mutex_lock(&dev_priv->mm_lock);
321 mm = __i915_mm_struct_find(dev_priv, current->mm);
322 if (mm == NULL) {
323 mm = kmalloc(sizeof(*mm), GFP_KERNEL);
324 if (mm == NULL) {
325 ret = -ENOMEM;
326 goto out;
327 }
328
329 kref_init(&mm->kref);
330 mm->dev = obj->base.dev;
331
332 mm->mm = current->mm;
333 atomic_inc(¤t->mm->mm_count);
334
335 mm->mn = NULL;
336
337 /* Protected by dev_priv->mm_lock */
338 hash_add(dev_priv->mm_structs,
339 &mm->node, (unsigned long)mm->mm);
340 } else
341 kref_get(&mm->kref);
342
343 obj->userptr.mm = mm;
344out:
345 mutex_unlock(&dev_priv->mm_lock);
346 return ret;
347}
348
349static void
350__i915_mm_struct_free__worker(struct work_struct *work)
351{
352 struct i915_mm_struct *mm = container_of(work, typeof(*mm), work);
353 i915_mmu_notifier_free(mm->mn, mm->mm);
354 mmdrop(mm->mm);
355 kfree(mm);
356}
357
358static void
359__i915_mm_struct_free(struct kref *kref)
360{
361 struct i915_mm_struct *mm = container_of(kref, typeof(*mm), kref);
362
363 /* Protected by dev_priv->mm_lock */
364 hash_del(&mm->node);
365 mutex_unlock(&to_i915(mm->dev)->mm_lock);
366
367 INIT_WORK(&mm->work, __i915_mm_struct_free__worker);
368 schedule_work(&mm->work);
369}
370
371static void
372i915_gem_userptr_release__mm_struct(struct drm_i915_gem_object *obj)
373{
374 if (obj->userptr.mm == NULL)
375 return;
376
377 kref_put_mutex(&obj->userptr.mm->kref,
378 __i915_mm_struct_free,
379 &to_i915(obj->base.dev)->mm_lock);
380 obj->userptr.mm = NULL;
381}
382
383struct get_pages_work {
384 struct work_struct work;
385 struct drm_i915_gem_object *obj;
386 struct task_struct *task;
387};
388
389#if IS_ENABLED(CONFIG_SWIOTLB)
390#define swiotlb_active() swiotlb_nr_tbl()
391#else
392#define swiotlb_active() 0
393#endif
394
395static int
396st_set_pages(struct sg_table **st, struct page **pvec, int num_pages)
397{
398 struct scatterlist *sg;
399 int ret, n;
400
401 *st = kmalloc(sizeof(**st), GFP_KERNEL);
402 if (*st == NULL)
403 return -ENOMEM;
404
405 if (swiotlb_active()) {
406 ret = sg_alloc_table(*st, num_pages, GFP_KERNEL);
407 if (ret)
408 goto err;
409
410 for_each_sg((*st)->sgl, sg, num_pages, n)
411 sg_set_page(sg, pvec[n], PAGE_SIZE, 0);
412 } else {
413 ret = sg_alloc_table_from_pages(*st, pvec, num_pages,
414 0, num_pages << PAGE_SHIFT,
415 GFP_KERNEL);
416 if (ret)
417 goto err;
418 }
419
420 return 0;
421
422err:
423 kfree(*st);
424 *st = NULL;
425 return ret;
426}
427
428static int
429__i915_gem_userptr_set_pages(struct drm_i915_gem_object *obj,
430 struct page **pvec, int num_pages)
431{
432 int ret;
433
434 ret = st_set_pages(&obj->pages, pvec, num_pages);
435 if (ret)
436 return ret;
437
438 ret = i915_gem_gtt_prepare_object(obj);
439 if (ret) {
440 sg_free_table(obj->pages);
441 kfree(obj->pages);
442 obj->pages = NULL;
443 }
444
445 return ret;
446}
447
448static int
449__i915_gem_userptr_set_active(struct drm_i915_gem_object *obj,
450 bool value)
451{
452 int ret = 0;
453
454 /* During mm_invalidate_range we need to cancel any userptr that
455 * overlaps the range being invalidated. Doing so requires the
456 * struct_mutex, and that risks recursion. In order to cause
457 * recursion, the user must alias the userptr address space with
458 * a GTT mmapping (possible with a MAP_FIXED) - then when we have
459 * to invalidate that mmaping, mm_invalidate_range is called with
460 * the userptr address *and* the struct_mutex held. To prevent that
461 * we set a flag under the i915_mmu_notifier spinlock to indicate
462 * whether this object is valid.
463 */
464#if defined(CONFIG_MMU_NOTIFIER)
465 if (obj->userptr.mmu_object == NULL)
466 return 0;
467
468 spin_lock(&obj->userptr.mmu_object->mn->lock);
469 /* In order to serialise get_pages with an outstanding
470 * cancel_userptr, we must drop the struct_mutex and try again.
471 */
472 if (!value)
473 del_object(obj->userptr.mmu_object);
474 else if (!work_pending(&obj->userptr.mmu_object->work))
475 add_object(obj->userptr.mmu_object);
476 else
477 ret = -EAGAIN;
478 spin_unlock(&obj->userptr.mmu_object->mn->lock);
479#endif
480
481 return ret;
482}
483
484static void
485__i915_gem_userptr_get_pages_worker(struct work_struct *_work)
486{
487 struct get_pages_work *work = container_of(_work, typeof(*work), work);
488 struct drm_i915_gem_object *obj = work->obj;
489 struct drm_device *dev = obj->base.dev;
490 const int npages = obj->base.size >> PAGE_SHIFT;
491 struct page **pvec;
492 int pinned, ret;
493
494 ret = -ENOMEM;
495 pinned = 0;
496
497 pvec = kmalloc(npages*sizeof(struct page *),
498 GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
499 if (pvec == NULL)
500 pvec = drm_malloc_ab(npages, sizeof(struct page *));
501 if (pvec != NULL) {
502 struct mm_struct *mm = obj->userptr.mm->mm;
503
504 ret = -EFAULT;
505 if (atomic_inc_not_zero(&mm->mm_users)) {
506 down_read(&mm->mmap_sem);
507 while (pinned < npages) {
508 ret = get_user_pages_remote
509 (work->task, mm,
510 obj->userptr.ptr + pinned * PAGE_SIZE,
511 npages - pinned,
512 !obj->userptr.read_only, 0,
513 pvec + pinned, NULL);
514 if (ret < 0)
515 break;
516
517 pinned += ret;
518 }
519 up_read(&mm->mmap_sem);
520 mmput(mm);
521 }
522 }
523
524 mutex_lock(&dev->struct_mutex);
525 if (obj->userptr.work == &work->work) {
526 if (pinned == npages) {
527 ret = __i915_gem_userptr_set_pages(obj, pvec, npages);
528 if (ret == 0) {
529 list_add_tail(&obj->global_list,
530 &to_i915(dev)->mm.unbound_list);
531 obj->get_page.sg = obj->pages->sgl;
532 obj->get_page.last = 0;
533 pinned = 0;
534 }
535 }
536 obj->userptr.work = ERR_PTR(ret);
537 if (ret)
538 __i915_gem_userptr_set_active(obj, false);
539 }
540
541 obj->userptr.workers--;
542 drm_gem_object_unreference(&obj->base);
543 mutex_unlock(&dev->struct_mutex);
544
545 release_pages(pvec, pinned, 0);
546 drm_free_large(pvec);
547
548 put_task_struct(work->task);
549 kfree(work);
550}
551
552static int
553__i915_gem_userptr_get_pages_schedule(struct drm_i915_gem_object *obj,
554 bool *active)
555{
556 struct get_pages_work *work;
557
558 /* Spawn a worker so that we can acquire the
559 * user pages without holding our mutex. Access
560 * to the user pages requires mmap_sem, and we have
561 * a strict lock ordering of mmap_sem, struct_mutex -
562 * we already hold struct_mutex here and so cannot
563 * call gup without encountering a lock inversion.
564 *
565 * Userspace will keep on repeating the operation
566 * (thanks to EAGAIN) until either we hit the fast
567 * path or the worker completes. If the worker is
568 * cancelled or superseded, the task is still run
569 * but the results ignored. (This leads to
570 * complications that we may have a stray object
571 * refcount that we need to be wary of when
572 * checking for existing objects during creation.)
573 * If the worker encounters an error, it reports
574 * that error back to this function through
575 * obj->userptr.work = ERR_PTR.
576 */
577 if (obj->userptr.workers >= I915_GEM_USERPTR_MAX_WORKERS)
578 return -EAGAIN;
579
580 work = kmalloc(sizeof(*work), GFP_KERNEL);
581 if (work == NULL)
582 return -ENOMEM;
583
584 obj->userptr.work = &work->work;
585 obj->userptr.workers++;
586
587 work->obj = obj;
588 drm_gem_object_reference(&obj->base);
589
590 work->task = current;
591 get_task_struct(work->task);
592
593 INIT_WORK(&work->work, __i915_gem_userptr_get_pages_worker);
594 schedule_work(&work->work);
595
596 *active = true;
597 return -EAGAIN;
598}
599
600static int
601i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
602{
603 const int num_pages = obj->base.size >> PAGE_SHIFT;
604 struct page **pvec;
605 int pinned, ret;
606 bool active;
607
608 /* If userspace should engineer that these pages are replaced in
609 * the vma between us binding this page into the GTT and completion
610 * of rendering... Their loss. If they change the mapping of their
611 * pages they need to create a new bo to point to the new vma.
612 *
613 * However, that still leaves open the possibility of the vma
614 * being copied upon fork. Which falls under the same userspace
615 * synchronisation issue as a regular bo, except that this time
616 * the process may not be expecting that a particular piece of
617 * memory is tied to the GPU.
618 *
619 * Fortunately, we can hook into the mmu_notifier in order to
620 * discard the page references prior to anything nasty happening
621 * to the vma (discard or cloning) which should prevent the more
622 * egregious cases from causing harm.
623 */
624 if (IS_ERR(obj->userptr.work)) {
625 /* active flag will have been dropped already by the worker */
626 ret = PTR_ERR(obj->userptr.work);
627 obj->userptr.work = NULL;
628 return ret;
629 }
630 if (obj->userptr.work)
631 /* active flag should still be held for the pending work */
632 return -EAGAIN;
633
634 /* Let the mmu-notifier know that we have begun and need cancellation */
635 ret = __i915_gem_userptr_set_active(obj, true);
636 if (ret)
637 return ret;
638
639 pvec = NULL;
640 pinned = 0;
641 if (obj->userptr.mm->mm == current->mm) {
642 pvec = kmalloc(num_pages*sizeof(struct page *),
643 GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
644 if (pvec == NULL) {
645 pvec = drm_malloc_ab(num_pages, sizeof(struct page *));
646 if (pvec == NULL) {
647 __i915_gem_userptr_set_active(obj, false);
648 return -ENOMEM;
649 }
650 }
651
652 pinned = __get_user_pages_fast(obj->userptr.ptr, num_pages,
653 !obj->userptr.read_only, pvec);
654 }
655
656 active = false;
657 if (pinned < 0)
658 ret = pinned, pinned = 0;
659 else if (pinned < num_pages)
660 ret = __i915_gem_userptr_get_pages_schedule(obj, &active);
661 else
662 ret = __i915_gem_userptr_set_pages(obj, pvec, num_pages);
663 if (ret) {
664 __i915_gem_userptr_set_active(obj, active);
665 release_pages(pvec, pinned, 0);
666 }
667 drm_free_large(pvec);
668 return ret;
669}
670
671static void
672i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj)
673{
674 struct sg_page_iter sg_iter;
675
676 BUG_ON(obj->userptr.work != NULL);
677 __i915_gem_userptr_set_active(obj, false);
678
679 if (obj->madv != I915_MADV_WILLNEED)
680 obj->dirty = 0;
681
682 i915_gem_gtt_finish_object(obj);
683
684 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
685 struct page *page = sg_page_iter_page(&sg_iter);
686
687 if (obj->dirty)
688 set_page_dirty(page);
689
690 mark_page_accessed(page);
691 put_page(page);
692 }
693 obj->dirty = 0;
694
695 sg_free_table(obj->pages);
696 kfree(obj->pages);
697}
698
699static void
700i915_gem_userptr_release(struct drm_i915_gem_object *obj)
701{
702 i915_gem_userptr_release__mmu_notifier(obj);
703 i915_gem_userptr_release__mm_struct(obj);
704}
705
706static int
707i915_gem_userptr_dmabuf_export(struct drm_i915_gem_object *obj)
708{
709 if (obj->userptr.mmu_object)
710 return 0;
711
712 return i915_gem_userptr_init__mmu_notifier(obj, 0);
713}
714
715static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = {
716 .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE,
717 .get_pages = i915_gem_userptr_get_pages,
718 .put_pages = i915_gem_userptr_put_pages,
719 .dmabuf_export = i915_gem_userptr_dmabuf_export,
720 .release = i915_gem_userptr_release,
721};
722
723/**
724 * Creates a new mm object that wraps some normal memory from the process
725 * context - user memory.
726 *
727 * We impose several restrictions upon the memory being mapped
728 * into the GPU.
729 * 1. It must be page aligned (both start/end addresses, i.e ptr and size).
730 * 2. It must be normal system memory, not a pointer into another map of IO
731 * space (e.g. it must not be a GTT mmapping of another object).
732 * 3. We only allow a bo as large as we could in theory map into the GTT,
733 * that is we limit the size to the total size of the GTT.
734 * 4. The bo is marked as being snoopable. The backing pages are left
735 * accessible directly by the CPU, but reads and writes by the GPU may
736 * incur the cost of a snoop (unless you have an LLC architecture).
737 *
738 * Synchronisation between multiple users and the GPU is left to userspace
739 * through the normal set-domain-ioctl. The kernel will enforce that the
740 * GPU relinquishes the VMA before it is returned back to the system
741 * i.e. upon free(), munmap() or process termination. However, the userspace
742 * malloc() library may not immediately relinquish the VMA after free() and
743 * instead reuse it whilst the GPU is still reading and writing to the VMA.
744 * Caveat emptor.
745 *
746 * Also note, that the object created here is not currently a "first class"
747 * object, in that several ioctls are banned. These are the CPU access
748 * ioctls: mmap(), pwrite and pread. In practice, you are expected to use
749 * direct access via your pointer rather than use those ioctls. Another
750 * restriction is that we do not allow userptr surfaces to be pinned to the
751 * hardware and so we reject any attempt to create a framebuffer out of a
752 * userptr.
753 *
754 * If you think this is a good interface to use to pass GPU memory between
755 * drivers, please use dma-buf instead. In fact, wherever possible use
756 * dma-buf instead.
757 */
758int
759i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
760{
761 struct drm_i915_gem_userptr *args = data;
762 struct drm_i915_gem_object *obj;
763 int ret;
764 u32 handle;
765
766 if (args->flags & ~(I915_USERPTR_READ_ONLY |
767 I915_USERPTR_UNSYNCHRONIZED))
768 return -EINVAL;
769
770 if (offset_in_page(args->user_ptr | args->user_size))
771 return -EINVAL;
772
773 if (!access_ok(args->flags & I915_USERPTR_READ_ONLY ? VERIFY_READ : VERIFY_WRITE,
774 (char __user *)(unsigned long)args->user_ptr, args->user_size))
775 return -EFAULT;
776
777 if (args->flags & I915_USERPTR_READ_ONLY) {
778 /* On almost all of the current hw, we cannot tell the GPU that a
779 * page is readonly, so this is just a placeholder in the uAPI.
780 */
781 return -ENODEV;
782 }
783
784 obj = i915_gem_object_alloc(dev);
785 if (obj == NULL)
786 return -ENOMEM;
787
788 drm_gem_private_object_init(dev, &obj->base, args->user_size);
789 i915_gem_object_init(obj, &i915_gem_userptr_ops);
790 obj->cache_level = I915_CACHE_LLC;
791 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
792 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
793
794 obj->userptr.ptr = args->user_ptr;
795 obj->userptr.read_only = !!(args->flags & I915_USERPTR_READ_ONLY);
796
797 /* And keep a pointer to the current->mm for resolving the user pages
798 * at binding. This means that we need to hook into the mmu_notifier
799 * in order to detect if the mmu is destroyed.
800 */
801 ret = i915_gem_userptr_init__mm_struct(obj);
802 if (ret == 0)
803 ret = i915_gem_userptr_init__mmu_notifier(obj, args->flags);
804 if (ret == 0)
805 ret = drm_gem_handle_create(file, &obj->base, &handle);
806
807 /* drop reference from allocate - handle holds it now */
808 drm_gem_object_unreference_unlocked(&obj->base);
809 if (ret)
810 return ret;
811
812 args->handle = handle;
813 return 0;
814}
815
816int
817i915_gem_init_userptr(struct drm_device *dev)
818{
819 struct drm_i915_private *dev_priv = to_i915(dev);
820 mutex_init(&dev_priv->mm_lock);
821 hash_init(dev_priv->mm_structs);
822 return 0;
823}
1/*
2 * Copyright © 2012-2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24
25#include <drm/drmP.h>
26#include <drm/i915_drm.h>
27#include "i915_drv.h"
28#include "i915_trace.h"
29#include "intel_drv.h"
30#include <linux/mmu_context.h>
31#include <linux/mmu_notifier.h>
32#include <linux/mempolicy.h>
33#include <linux/swap.h>
34
35struct i915_mm_struct {
36 struct mm_struct *mm;
37 struct drm_i915_private *i915;
38 struct i915_mmu_notifier *mn;
39 struct hlist_node node;
40 struct kref kref;
41 struct work_struct work;
42};
43
44#if defined(CONFIG_MMU_NOTIFIER)
45#include <linux/interval_tree.h>
46
47struct i915_mmu_notifier {
48 spinlock_t lock;
49 struct hlist_node node;
50 struct mmu_notifier mn;
51 struct rb_root objects;
52 struct workqueue_struct *wq;
53};
54
55struct i915_mmu_object {
56 struct i915_mmu_notifier *mn;
57 struct drm_i915_gem_object *obj;
58 struct interval_tree_node it;
59 struct list_head link;
60 struct work_struct work;
61 bool attached;
62};
63
64static void cancel_userptr(struct work_struct *work)
65{
66 struct i915_mmu_object *mo = container_of(work, typeof(*mo), work);
67 struct drm_i915_gem_object *obj = mo->obj;
68 struct drm_device *dev = obj->base.dev;
69
70 i915_gem_object_wait(obj, I915_WAIT_ALL, MAX_SCHEDULE_TIMEOUT, NULL);
71
72 mutex_lock(&dev->struct_mutex);
73 /* Cancel any active worker and force us to re-evaluate gup */
74 obj->userptr.work = NULL;
75
76 /* We are inside a kthread context and can't be interrupted */
77 if (i915_gem_object_unbind(obj) == 0)
78 __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
79 WARN_ONCE(obj->mm.pages,
80 "Failed to release pages: bind_count=%d, pages_pin_count=%d, pin_display=%d\n",
81 obj->bind_count,
82 atomic_read(&obj->mm.pages_pin_count),
83 obj->pin_display);
84
85 i915_gem_object_put(obj);
86 mutex_unlock(&dev->struct_mutex);
87}
88
89static void add_object(struct i915_mmu_object *mo)
90{
91 if (mo->attached)
92 return;
93
94 interval_tree_insert(&mo->it, &mo->mn->objects);
95 mo->attached = true;
96}
97
98static void del_object(struct i915_mmu_object *mo)
99{
100 if (!mo->attached)
101 return;
102
103 interval_tree_remove(&mo->it, &mo->mn->objects);
104 mo->attached = false;
105}
106
107static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
108 struct mm_struct *mm,
109 unsigned long start,
110 unsigned long end)
111{
112 struct i915_mmu_notifier *mn =
113 container_of(_mn, struct i915_mmu_notifier, mn);
114 struct i915_mmu_object *mo;
115 struct interval_tree_node *it;
116 LIST_HEAD(cancelled);
117
118 if (RB_EMPTY_ROOT(&mn->objects))
119 return;
120
121 /* interval ranges are inclusive, but invalidate range is exclusive */
122 end--;
123
124 spin_lock(&mn->lock);
125 it = interval_tree_iter_first(&mn->objects, start, end);
126 while (it) {
127 /* The mmu_object is released late when destroying the
128 * GEM object so it is entirely possible to gain a
129 * reference on an object in the process of being freed
130 * since our serialisation is via the spinlock and not
131 * the struct_mutex - and consequently use it after it
132 * is freed and then double free it. To prevent that
133 * use-after-free we only acquire a reference on the
134 * object if it is not in the process of being destroyed.
135 */
136 mo = container_of(it, struct i915_mmu_object, it);
137 if (kref_get_unless_zero(&mo->obj->base.refcount))
138 queue_work(mn->wq, &mo->work);
139
140 list_add(&mo->link, &cancelled);
141 it = interval_tree_iter_next(it, start, end);
142 }
143 list_for_each_entry(mo, &cancelled, link)
144 del_object(mo);
145 spin_unlock(&mn->lock);
146
147 flush_workqueue(mn->wq);
148}
149
150static const struct mmu_notifier_ops i915_gem_userptr_notifier = {
151 .invalidate_range_start = i915_gem_userptr_mn_invalidate_range_start,
152};
153
154static struct i915_mmu_notifier *
155i915_mmu_notifier_create(struct mm_struct *mm)
156{
157 struct i915_mmu_notifier *mn;
158 int ret;
159
160 mn = kmalloc(sizeof(*mn), GFP_KERNEL);
161 if (mn == NULL)
162 return ERR_PTR(-ENOMEM);
163
164 spin_lock_init(&mn->lock);
165 mn->mn.ops = &i915_gem_userptr_notifier;
166 mn->objects = RB_ROOT;
167 mn->wq = alloc_workqueue("i915-userptr-release", WQ_UNBOUND, 0);
168 if (mn->wq == NULL) {
169 kfree(mn);
170 return ERR_PTR(-ENOMEM);
171 }
172
173 /* Protected by mmap_sem (write-lock) */
174 ret = __mmu_notifier_register(&mn->mn, mm);
175 if (ret) {
176 destroy_workqueue(mn->wq);
177 kfree(mn);
178 return ERR_PTR(ret);
179 }
180
181 return mn;
182}
183
184static void
185i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj)
186{
187 struct i915_mmu_object *mo;
188
189 mo = obj->userptr.mmu_object;
190 if (mo == NULL)
191 return;
192
193 spin_lock(&mo->mn->lock);
194 del_object(mo);
195 spin_unlock(&mo->mn->lock);
196 kfree(mo);
197
198 obj->userptr.mmu_object = NULL;
199}
200
201static struct i915_mmu_notifier *
202i915_mmu_notifier_find(struct i915_mm_struct *mm)
203{
204 struct i915_mmu_notifier *mn = mm->mn;
205
206 mn = mm->mn;
207 if (mn)
208 return mn;
209
210 down_write(&mm->mm->mmap_sem);
211 mutex_lock(&mm->i915->mm_lock);
212 if ((mn = mm->mn) == NULL) {
213 mn = i915_mmu_notifier_create(mm->mm);
214 if (!IS_ERR(mn))
215 mm->mn = mn;
216 }
217 mutex_unlock(&mm->i915->mm_lock);
218 up_write(&mm->mm->mmap_sem);
219
220 return mn;
221}
222
223static int
224i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj,
225 unsigned flags)
226{
227 struct i915_mmu_notifier *mn;
228 struct i915_mmu_object *mo;
229
230 if (flags & I915_USERPTR_UNSYNCHRONIZED)
231 return capable(CAP_SYS_ADMIN) ? 0 : -EPERM;
232
233 if (WARN_ON(obj->userptr.mm == NULL))
234 return -EINVAL;
235
236 mn = i915_mmu_notifier_find(obj->userptr.mm);
237 if (IS_ERR(mn))
238 return PTR_ERR(mn);
239
240 mo = kzalloc(sizeof(*mo), GFP_KERNEL);
241 if (mo == NULL)
242 return -ENOMEM;
243
244 mo->mn = mn;
245 mo->obj = obj;
246 mo->it.start = obj->userptr.ptr;
247 mo->it.last = obj->userptr.ptr + obj->base.size - 1;
248 INIT_WORK(&mo->work, cancel_userptr);
249
250 obj->userptr.mmu_object = mo;
251 return 0;
252}
253
254static void
255i915_mmu_notifier_free(struct i915_mmu_notifier *mn,
256 struct mm_struct *mm)
257{
258 if (mn == NULL)
259 return;
260
261 mmu_notifier_unregister(&mn->mn, mm);
262 destroy_workqueue(mn->wq);
263 kfree(mn);
264}
265
266#else
267
268static void
269i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj)
270{
271}
272
273static int
274i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj,
275 unsigned flags)
276{
277 if ((flags & I915_USERPTR_UNSYNCHRONIZED) == 0)
278 return -ENODEV;
279
280 if (!capable(CAP_SYS_ADMIN))
281 return -EPERM;
282
283 return 0;
284}
285
286static void
287i915_mmu_notifier_free(struct i915_mmu_notifier *mn,
288 struct mm_struct *mm)
289{
290}
291
292#endif
293
294static struct i915_mm_struct *
295__i915_mm_struct_find(struct drm_i915_private *dev_priv, struct mm_struct *real)
296{
297 struct i915_mm_struct *mm;
298
299 /* Protected by dev_priv->mm_lock */
300 hash_for_each_possible(dev_priv->mm_structs, mm, node, (unsigned long)real)
301 if (mm->mm == real)
302 return mm;
303
304 return NULL;
305}
306
307static int
308i915_gem_userptr_init__mm_struct(struct drm_i915_gem_object *obj)
309{
310 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
311 struct i915_mm_struct *mm;
312 int ret = 0;
313
314 /* During release of the GEM object we hold the struct_mutex. This
315 * precludes us from calling mmput() at that time as that may be
316 * the last reference and so call exit_mmap(). exit_mmap() will
317 * attempt to reap the vma, and if we were holding a GTT mmap
318 * would then call drm_gem_vm_close() and attempt to reacquire
319 * the struct mutex. So in order to avoid that recursion, we have
320 * to defer releasing the mm reference until after we drop the
321 * struct_mutex, i.e. we need to schedule a worker to do the clean
322 * up.
323 */
324 mutex_lock(&dev_priv->mm_lock);
325 mm = __i915_mm_struct_find(dev_priv, current->mm);
326 if (mm == NULL) {
327 mm = kmalloc(sizeof(*mm), GFP_KERNEL);
328 if (mm == NULL) {
329 ret = -ENOMEM;
330 goto out;
331 }
332
333 kref_init(&mm->kref);
334 mm->i915 = to_i915(obj->base.dev);
335
336 mm->mm = current->mm;
337 atomic_inc(¤t->mm->mm_count);
338
339 mm->mn = NULL;
340
341 /* Protected by dev_priv->mm_lock */
342 hash_add(dev_priv->mm_structs,
343 &mm->node, (unsigned long)mm->mm);
344 } else
345 kref_get(&mm->kref);
346
347 obj->userptr.mm = mm;
348out:
349 mutex_unlock(&dev_priv->mm_lock);
350 return ret;
351}
352
353static void
354__i915_mm_struct_free__worker(struct work_struct *work)
355{
356 struct i915_mm_struct *mm = container_of(work, typeof(*mm), work);
357 i915_mmu_notifier_free(mm->mn, mm->mm);
358 mmdrop(mm->mm);
359 kfree(mm);
360}
361
362static void
363__i915_mm_struct_free(struct kref *kref)
364{
365 struct i915_mm_struct *mm = container_of(kref, typeof(*mm), kref);
366
367 /* Protected by dev_priv->mm_lock */
368 hash_del(&mm->node);
369 mutex_unlock(&mm->i915->mm_lock);
370
371 INIT_WORK(&mm->work, __i915_mm_struct_free__worker);
372 schedule_work(&mm->work);
373}
374
375static void
376i915_gem_userptr_release__mm_struct(struct drm_i915_gem_object *obj)
377{
378 if (obj->userptr.mm == NULL)
379 return;
380
381 kref_put_mutex(&obj->userptr.mm->kref,
382 __i915_mm_struct_free,
383 &to_i915(obj->base.dev)->mm_lock);
384 obj->userptr.mm = NULL;
385}
386
387struct get_pages_work {
388 struct work_struct work;
389 struct drm_i915_gem_object *obj;
390 struct task_struct *task;
391};
392
393#if IS_ENABLED(CONFIG_SWIOTLB)
394#define swiotlb_active() swiotlb_nr_tbl()
395#else
396#define swiotlb_active() 0
397#endif
398
399static int
400st_set_pages(struct sg_table **st, struct page **pvec, int num_pages)
401{
402 struct scatterlist *sg;
403 int ret, n;
404
405 *st = kmalloc(sizeof(**st), GFP_KERNEL);
406 if (*st == NULL)
407 return -ENOMEM;
408
409 if (swiotlb_active()) {
410 ret = sg_alloc_table(*st, num_pages, GFP_KERNEL);
411 if (ret)
412 goto err;
413
414 for_each_sg((*st)->sgl, sg, num_pages, n)
415 sg_set_page(sg, pvec[n], PAGE_SIZE, 0);
416 } else {
417 ret = sg_alloc_table_from_pages(*st, pvec, num_pages,
418 0, num_pages << PAGE_SHIFT,
419 GFP_KERNEL);
420 if (ret)
421 goto err;
422 }
423
424 return 0;
425
426err:
427 kfree(*st);
428 *st = NULL;
429 return ret;
430}
431
432static struct sg_table *
433__i915_gem_userptr_set_pages(struct drm_i915_gem_object *obj,
434 struct page **pvec, int num_pages)
435{
436 struct sg_table *pages;
437 int ret;
438
439 ret = st_set_pages(&pages, pvec, num_pages);
440 if (ret)
441 return ERR_PTR(ret);
442
443 ret = i915_gem_gtt_prepare_pages(obj, pages);
444 if (ret) {
445 sg_free_table(pages);
446 kfree(pages);
447 return ERR_PTR(ret);
448 }
449
450 return pages;
451}
452
453static int
454__i915_gem_userptr_set_active(struct drm_i915_gem_object *obj,
455 bool value)
456{
457 int ret = 0;
458
459 /* During mm_invalidate_range we need to cancel any userptr that
460 * overlaps the range being invalidated. Doing so requires the
461 * struct_mutex, and that risks recursion. In order to cause
462 * recursion, the user must alias the userptr address space with
463 * a GTT mmapping (possible with a MAP_FIXED) - then when we have
464 * to invalidate that mmaping, mm_invalidate_range is called with
465 * the userptr address *and* the struct_mutex held. To prevent that
466 * we set a flag under the i915_mmu_notifier spinlock to indicate
467 * whether this object is valid.
468 */
469#if defined(CONFIG_MMU_NOTIFIER)
470 if (obj->userptr.mmu_object == NULL)
471 return 0;
472
473 spin_lock(&obj->userptr.mmu_object->mn->lock);
474 /* In order to serialise get_pages with an outstanding
475 * cancel_userptr, we must drop the struct_mutex and try again.
476 */
477 if (!value)
478 del_object(obj->userptr.mmu_object);
479 else if (!work_pending(&obj->userptr.mmu_object->work))
480 add_object(obj->userptr.mmu_object);
481 else
482 ret = -EAGAIN;
483 spin_unlock(&obj->userptr.mmu_object->mn->lock);
484#endif
485
486 return ret;
487}
488
489static void
490__i915_gem_userptr_get_pages_worker(struct work_struct *_work)
491{
492 struct get_pages_work *work = container_of(_work, typeof(*work), work);
493 struct drm_i915_gem_object *obj = work->obj;
494 const int npages = obj->base.size >> PAGE_SHIFT;
495 struct page **pvec;
496 int pinned, ret;
497
498 ret = -ENOMEM;
499 pinned = 0;
500
501 pvec = drm_malloc_gfp(npages, sizeof(struct page *), GFP_TEMPORARY);
502 if (pvec != NULL) {
503 struct mm_struct *mm = obj->userptr.mm->mm;
504 unsigned int flags = 0;
505
506 if (!obj->userptr.read_only)
507 flags |= FOLL_WRITE;
508
509 ret = -EFAULT;
510 if (atomic_inc_not_zero(&mm->mm_users)) {
511 down_read(&mm->mmap_sem);
512 while (pinned < npages) {
513 ret = get_user_pages_remote
514 (work->task, mm,
515 obj->userptr.ptr + pinned * PAGE_SIZE,
516 npages - pinned,
517 flags,
518 pvec + pinned, NULL, NULL);
519 if (ret < 0)
520 break;
521
522 pinned += ret;
523 }
524 up_read(&mm->mmap_sem);
525 mmput(mm);
526 }
527 }
528
529 mutex_lock(&obj->mm.lock);
530 if (obj->userptr.work == &work->work) {
531 struct sg_table *pages = ERR_PTR(ret);
532
533 if (pinned == npages) {
534 pages = __i915_gem_userptr_set_pages(obj, pvec, npages);
535 if (!IS_ERR(pages)) {
536 __i915_gem_object_set_pages(obj, pages);
537 pinned = 0;
538 pages = NULL;
539 }
540 }
541
542 obj->userptr.work = ERR_CAST(pages);
543 }
544 mutex_unlock(&obj->mm.lock);
545
546 release_pages(pvec, pinned, 0);
547 drm_free_large(pvec);
548
549 i915_gem_object_put(obj);
550 put_task_struct(work->task);
551 kfree(work);
552}
553
554static struct sg_table *
555__i915_gem_userptr_get_pages_schedule(struct drm_i915_gem_object *obj,
556 bool *active)
557{
558 struct get_pages_work *work;
559
560 /* Spawn a worker so that we can acquire the
561 * user pages without holding our mutex. Access
562 * to the user pages requires mmap_sem, and we have
563 * a strict lock ordering of mmap_sem, struct_mutex -
564 * we already hold struct_mutex here and so cannot
565 * call gup without encountering a lock inversion.
566 *
567 * Userspace will keep on repeating the operation
568 * (thanks to EAGAIN) until either we hit the fast
569 * path or the worker completes. If the worker is
570 * cancelled or superseded, the task is still run
571 * but the results ignored. (This leads to
572 * complications that we may have a stray object
573 * refcount that we need to be wary of when
574 * checking for existing objects during creation.)
575 * If the worker encounters an error, it reports
576 * that error back to this function through
577 * obj->userptr.work = ERR_PTR.
578 */
579 work = kmalloc(sizeof(*work), GFP_KERNEL);
580 if (work == NULL)
581 return ERR_PTR(-ENOMEM);
582
583 obj->userptr.work = &work->work;
584
585 work->obj = i915_gem_object_get(obj);
586
587 work->task = current;
588 get_task_struct(work->task);
589
590 INIT_WORK(&work->work, __i915_gem_userptr_get_pages_worker);
591 schedule_work(&work->work);
592
593 *active = true;
594 return ERR_PTR(-EAGAIN);
595}
596
597static struct sg_table *
598i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
599{
600 const int num_pages = obj->base.size >> PAGE_SHIFT;
601 struct page **pvec;
602 struct sg_table *pages;
603 int pinned, ret;
604 bool active;
605
606 /* If userspace should engineer that these pages are replaced in
607 * the vma between us binding this page into the GTT and completion
608 * of rendering... Their loss. If they change the mapping of their
609 * pages they need to create a new bo to point to the new vma.
610 *
611 * However, that still leaves open the possibility of the vma
612 * being copied upon fork. Which falls under the same userspace
613 * synchronisation issue as a regular bo, except that this time
614 * the process may not be expecting that a particular piece of
615 * memory is tied to the GPU.
616 *
617 * Fortunately, we can hook into the mmu_notifier in order to
618 * discard the page references prior to anything nasty happening
619 * to the vma (discard or cloning) which should prevent the more
620 * egregious cases from causing harm.
621 */
622
623 if (obj->userptr.work) {
624 /* active flag should still be held for the pending work */
625 if (IS_ERR(obj->userptr.work))
626 return ERR_CAST(obj->userptr.work);
627 else
628 return ERR_PTR(-EAGAIN);
629 }
630
631 /* Let the mmu-notifier know that we have begun and need cancellation */
632 ret = __i915_gem_userptr_set_active(obj, true);
633 if (ret)
634 return ERR_PTR(ret);
635
636 pvec = NULL;
637 pinned = 0;
638 if (obj->userptr.mm->mm == current->mm) {
639 pvec = drm_malloc_gfp(num_pages, sizeof(struct page *),
640 GFP_TEMPORARY);
641 if (pvec == NULL) {
642 __i915_gem_userptr_set_active(obj, false);
643 return ERR_PTR(-ENOMEM);
644 }
645
646 pinned = __get_user_pages_fast(obj->userptr.ptr, num_pages,
647 !obj->userptr.read_only, pvec);
648 }
649
650 active = false;
651 if (pinned < 0)
652 pages = ERR_PTR(pinned), pinned = 0;
653 else if (pinned < num_pages)
654 pages = __i915_gem_userptr_get_pages_schedule(obj, &active);
655 else
656 pages = __i915_gem_userptr_set_pages(obj, pvec, num_pages);
657 if (IS_ERR(pages)) {
658 __i915_gem_userptr_set_active(obj, active);
659 release_pages(pvec, pinned, 0);
660 }
661 drm_free_large(pvec);
662 return pages;
663}
664
665static void
666i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj,
667 struct sg_table *pages)
668{
669 struct sgt_iter sgt_iter;
670 struct page *page;
671
672 BUG_ON(obj->userptr.work != NULL);
673 __i915_gem_userptr_set_active(obj, false);
674
675 if (obj->mm.madv != I915_MADV_WILLNEED)
676 obj->mm.dirty = false;
677
678 i915_gem_gtt_finish_pages(obj, pages);
679
680 for_each_sgt_page(page, sgt_iter, pages) {
681 if (obj->mm.dirty)
682 set_page_dirty(page);
683
684 mark_page_accessed(page);
685 put_page(page);
686 }
687 obj->mm.dirty = false;
688
689 sg_free_table(pages);
690 kfree(pages);
691}
692
693static void
694i915_gem_userptr_release(struct drm_i915_gem_object *obj)
695{
696 i915_gem_userptr_release__mmu_notifier(obj);
697 i915_gem_userptr_release__mm_struct(obj);
698}
699
700static int
701i915_gem_userptr_dmabuf_export(struct drm_i915_gem_object *obj)
702{
703 if (obj->userptr.mmu_object)
704 return 0;
705
706 return i915_gem_userptr_init__mmu_notifier(obj, 0);
707}
708
709static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = {
710 .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
711 I915_GEM_OBJECT_IS_SHRINKABLE,
712 .get_pages = i915_gem_userptr_get_pages,
713 .put_pages = i915_gem_userptr_put_pages,
714 .dmabuf_export = i915_gem_userptr_dmabuf_export,
715 .release = i915_gem_userptr_release,
716};
717
718/**
719 * Creates a new mm object that wraps some normal memory from the process
720 * context - user memory.
721 *
722 * We impose several restrictions upon the memory being mapped
723 * into the GPU.
724 * 1. It must be page aligned (both start/end addresses, i.e ptr and size).
725 * 2. It must be normal system memory, not a pointer into another map of IO
726 * space (e.g. it must not be a GTT mmapping of another object).
727 * 3. We only allow a bo as large as we could in theory map into the GTT,
728 * that is we limit the size to the total size of the GTT.
729 * 4. The bo is marked as being snoopable. The backing pages are left
730 * accessible directly by the CPU, but reads and writes by the GPU may
731 * incur the cost of a snoop (unless you have an LLC architecture).
732 *
733 * Synchronisation between multiple users and the GPU is left to userspace
734 * through the normal set-domain-ioctl. The kernel will enforce that the
735 * GPU relinquishes the VMA before it is returned back to the system
736 * i.e. upon free(), munmap() or process termination. However, the userspace
737 * malloc() library may not immediately relinquish the VMA after free() and
738 * instead reuse it whilst the GPU is still reading and writing to the VMA.
739 * Caveat emptor.
740 *
741 * Also note, that the object created here is not currently a "first class"
742 * object, in that several ioctls are banned. These are the CPU access
743 * ioctls: mmap(), pwrite and pread. In practice, you are expected to use
744 * direct access via your pointer rather than use those ioctls. Another
745 * restriction is that we do not allow userptr surfaces to be pinned to the
746 * hardware and so we reject any attempt to create a framebuffer out of a
747 * userptr.
748 *
749 * If you think this is a good interface to use to pass GPU memory between
750 * drivers, please use dma-buf instead. In fact, wherever possible use
751 * dma-buf instead.
752 */
753int
754i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
755{
756 struct drm_i915_private *dev_priv = to_i915(dev);
757 struct drm_i915_gem_userptr *args = data;
758 struct drm_i915_gem_object *obj;
759 int ret;
760 u32 handle;
761
762 if (!HAS_LLC(dev_priv) && !HAS_SNOOP(dev_priv)) {
763 /* We cannot support coherent userptr objects on hw without
764 * LLC and broken snooping.
765 */
766 return -ENODEV;
767 }
768
769 if (args->flags & ~(I915_USERPTR_READ_ONLY |
770 I915_USERPTR_UNSYNCHRONIZED))
771 return -EINVAL;
772
773 if (offset_in_page(args->user_ptr | args->user_size))
774 return -EINVAL;
775
776 if (!access_ok(args->flags & I915_USERPTR_READ_ONLY ? VERIFY_READ : VERIFY_WRITE,
777 (char __user *)(unsigned long)args->user_ptr, args->user_size))
778 return -EFAULT;
779
780 if (args->flags & I915_USERPTR_READ_ONLY) {
781 /* On almost all of the current hw, we cannot tell the GPU that a
782 * page is readonly, so this is just a placeholder in the uAPI.
783 */
784 return -ENODEV;
785 }
786
787 obj = i915_gem_object_alloc(dev);
788 if (obj == NULL)
789 return -ENOMEM;
790
791 drm_gem_private_object_init(dev, &obj->base, args->user_size);
792 i915_gem_object_init(obj, &i915_gem_userptr_ops);
793 obj->cache_level = I915_CACHE_LLC;
794 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
795 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
796
797 obj->userptr.ptr = args->user_ptr;
798 obj->userptr.read_only = !!(args->flags & I915_USERPTR_READ_ONLY);
799
800 /* And keep a pointer to the current->mm for resolving the user pages
801 * at binding. This means that we need to hook into the mmu_notifier
802 * in order to detect if the mmu is destroyed.
803 */
804 ret = i915_gem_userptr_init__mm_struct(obj);
805 if (ret == 0)
806 ret = i915_gem_userptr_init__mmu_notifier(obj, args->flags);
807 if (ret == 0)
808 ret = drm_gem_handle_create(file, &obj->base, &handle);
809
810 /* drop reference from allocate - handle holds it now */
811 i915_gem_object_put(obj);
812 if (ret)
813 return ret;
814
815 args->handle = handle;
816 return 0;
817}
818
819void i915_gem_init_userptr(struct drm_i915_private *dev_priv)
820{
821 mutex_init(&dev_priv->mm_lock);
822 hash_init(dev_priv->mm_structs);
823}