Loading...
1/*
2 * Copyright © 2012-2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24
25#include <drm/drmP.h>
26#include <drm/i915_drm.h>
27#include "i915_drv.h"
28#include "i915_trace.h"
29#include "intel_drv.h"
30#include <linux/mmu_context.h>
31#include <linux/mmu_notifier.h>
32#include <linux/mempolicy.h>
33#include <linux/swap.h>
34
35struct i915_mm_struct {
36 struct mm_struct *mm;
37 struct drm_device *dev;
38 struct i915_mmu_notifier *mn;
39 struct hlist_node node;
40 struct kref kref;
41 struct work_struct work;
42};
43
44#if defined(CONFIG_MMU_NOTIFIER)
45#include <linux/interval_tree.h>
46
47struct i915_mmu_notifier {
48 spinlock_t lock;
49 struct hlist_node node;
50 struct mmu_notifier mn;
51 struct rb_root objects;
52};
53
54struct i915_mmu_object {
55 struct i915_mmu_notifier *mn;
56 struct drm_i915_gem_object *obj;
57 struct interval_tree_node it;
58 struct list_head link;
59 struct work_struct work;
60 bool attached;
61};
62
63static void cancel_userptr(struct work_struct *work)
64{
65 struct i915_mmu_object *mo = container_of(work, typeof(*mo), work);
66 struct drm_i915_gem_object *obj = mo->obj;
67 struct drm_device *dev = obj->base.dev;
68
69 mutex_lock(&dev->struct_mutex);
70 /* Cancel any active worker and force us to re-evaluate gup */
71 obj->userptr.work = NULL;
72
73 if (obj->pages != NULL) {
74 struct drm_i915_private *dev_priv = to_i915(dev);
75 struct i915_vma *vma, *tmp;
76 bool was_interruptible;
77
78 was_interruptible = dev_priv->mm.interruptible;
79 dev_priv->mm.interruptible = false;
80
81 list_for_each_entry_safe(vma, tmp, &obj->vma_list, obj_link) {
82 int ret = i915_vma_unbind(vma);
83 WARN_ON(ret && ret != -EIO);
84 }
85 WARN_ON(i915_gem_object_put_pages(obj));
86
87 dev_priv->mm.interruptible = was_interruptible;
88 }
89
90 drm_gem_object_unreference(&obj->base);
91 mutex_unlock(&dev->struct_mutex);
92}
93
94static void add_object(struct i915_mmu_object *mo)
95{
96 if (mo->attached)
97 return;
98
99 interval_tree_insert(&mo->it, &mo->mn->objects);
100 mo->attached = true;
101}
102
103static void del_object(struct i915_mmu_object *mo)
104{
105 if (!mo->attached)
106 return;
107
108 interval_tree_remove(&mo->it, &mo->mn->objects);
109 mo->attached = false;
110}
111
112static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
113 struct mm_struct *mm,
114 unsigned long start,
115 unsigned long end)
116{
117 struct i915_mmu_notifier *mn =
118 container_of(_mn, struct i915_mmu_notifier, mn);
119 struct i915_mmu_object *mo;
120 struct interval_tree_node *it;
121 LIST_HEAD(cancelled);
122
123 if (RB_EMPTY_ROOT(&mn->objects))
124 return;
125
126 /* interval ranges are inclusive, but invalidate range is exclusive */
127 end--;
128
129 spin_lock(&mn->lock);
130 it = interval_tree_iter_first(&mn->objects, start, end);
131 while (it) {
132 /* The mmu_object is released late when destroying the
133 * GEM object so it is entirely possible to gain a
134 * reference on an object in the process of being freed
135 * since our serialisation is via the spinlock and not
136 * the struct_mutex - and consequently use it after it
137 * is freed and then double free it. To prevent that
138 * use-after-free we only acquire a reference on the
139 * object if it is not in the process of being destroyed.
140 */
141 mo = container_of(it, struct i915_mmu_object, it);
142 if (kref_get_unless_zero(&mo->obj->base.refcount))
143 schedule_work(&mo->work);
144
145 list_add(&mo->link, &cancelled);
146 it = interval_tree_iter_next(it, start, end);
147 }
148 list_for_each_entry(mo, &cancelled, link)
149 del_object(mo);
150 spin_unlock(&mn->lock);
151}
152
153static const struct mmu_notifier_ops i915_gem_userptr_notifier = {
154 .invalidate_range_start = i915_gem_userptr_mn_invalidate_range_start,
155};
156
157static struct i915_mmu_notifier *
158i915_mmu_notifier_create(struct mm_struct *mm)
159{
160 struct i915_mmu_notifier *mn;
161 int ret;
162
163 mn = kmalloc(sizeof(*mn), GFP_KERNEL);
164 if (mn == NULL)
165 return ERR_PTR(-ENOMEM);
166
167 spin_lock_init(&mn->lock);
168 mn->mn.ops = &i915_gem_userptr_notifier;
169 mn->objects = RB_ROOT;
170
171 /* Protected by mmap_sem (write-lock) */
172 ret = __mmu_notifier_register(&mn->mn, mm);
173 if (ret) {
174 kfree(mn);
175 return ERR_PTR(ret);
176 }
177
178 return mn;
179}
180
181static void
182i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj)
183{
184 struct i915_mmu_object *mo;
185
186 mo = obj->userptr.mmu_object;
187 if (mo == NULL)
188 return;
189
190 spin_lock(&mo->mn->lock);
191 del_object(mo);
192 spin_unlock(&mo->mn->lock);
193 kfree(mo);
194
195 obj->userptr.mmu_object = NULL;
196}
197
198static struct i915_mmu_notifier *
199i915_mmu_notifier_find(struct i915_mm_struct *mm)
200{
201 struct i915_mmu_notifier *mn = mm->mn;
202
203 mn = mm->mn;
204 if (mn)
205 return mn;
206
207 down_write(&mm->mm->mmap_sem);
208 mutex_lock(&to_i915(mm->dev)->mm_lock);
209 if ((mn = mm->mn) == NULL) {
210 mn = i915_mmu_notifier_create(mm->mm);
211 if (!IS_ERR(mn))
212 mm->mn = mn;
213 }
214 mutex_unlock(&to_i915(mm->dev)->mm_lock);
215 up_write(&mm->mm->mmap_sem);
216
217 return mn;
218}
219
220static int
221i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj,
222 unsigned flags)
223{
224 struct i915_mmu_notifier *mn;
225 struct i915_mmu_object *mo;
226
227 if (flags & I915_USERPTR_UNSYNCHRONIZED)
228 return capable(CAP_SYS_ADMIN) ? 0 : -EPERM;
229
230 if (WARN_ON(obj->userptr.mm == NULL))
231 return -EINVAL;
232
233 mn = i915_mmu_notifier_find(obj->userptr.mm);
234 if (IS_ERR(mn))
235 return PTR_ERR(mn);
236
237 mo = kzalloc(sizeof(*mo), GFP_KERNEL);
238 if (mo == NULL)
239 return -ENOMEM;
240
241 mo->mn = mn;
242 mo->obj = obj;
243 mo->it.start = obj->userptr.ptr;
244 mo->it.last = obj->userptr.ptr + obj->base.size - 1;
245 INIT_WORK(&mo->work, cancel_userptr);
246
247 obj->userptr.mmu_object = mo;
248 return 0;
249}
250
251static void
252i915_mmu_notifier_free(struct i915_mmu_notifier *mn,
253 struct mm_struct *mm)
254{
255 if (mn == NULL)
256 return;
257
258 mmu_notifier_unregister(&mn->mn, mm);
259 kfree(mn);
260}
261
262#else
263
264static void
265i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj)
266{
267}
268
269static int
270i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj,
271 unsigned flags)
272{
273 if ((flags & I915_USERPTR_UNSYNCHRONIZED) == 0)
274 return -ENODEV;
275
276 if (!capable(CAP_SYS_ADMIN))
277 return -EPERM;
278
279 return 0;
280}
281
282static void
283i915_mmu_notifier_free(struct i915_mmu_notifier *mn,
284 struct mm_struct *mm)
285{
286}
287
288#endif
289
290static struct i915_mm_struct *
291__i915_mm_struct_find(struct drm_i915_private *dev_priv, struct mm_struct *real)
292{
293 struct i915_mm_struct *mm;
294
295 /* Protected by dev_priv->mm_lock */
296 hash_for_each_possible(dev_priv->mm_structs, mm, node, (unsigned long)real)
297 if (mm->mm == real)
298 return mm;
299
300 return NULL;
301}
302
303static int
304i915_gem_userptr_init__mm_struct(struct drm_i915_gem_object *obj)
305{
306 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
307 struct i915_mm_struct *mm;
308 int ret = 0;
309
310 /* During release of the GEM object we hold the struct_mutex. This
311 * precludes us from calling mmput() at that time as that may be
312 * the last reference and so call exit_mmap(). exit_mmap() will
313 * attempt to reap the vma, and if we were holding a GTT mmap
314 * would then call drm_gem_vm_close() and attempt to reacquire
315 * the struct mutex. So in order to avoid that recursion, we have
316 * to defer releasing the mm reference until after we drop the
317 * struct_mutex, i.e. we need to schedule a worker to do the clean
318 * up.
319 */
320 mutex_lock(&dev_priv->mm_lock);
321 mm = __i915_mm_struct_find(dev_priv, current->mm);
322 if (mm == NULL) {
323 mm = kmalloc(sizeof(*mm), GFP_KERNEL);
324 if (mm == NULL) {
325 ret = -ENOMEM;
326 goto out;
327 }
328
329 kref_init(&mm->kref);
330 mm->dev = obj->base.dev;
331
332 mm->mm = current->mm;
333 atomic_inc(¤t->mm->mm_count);
334
335 mm->mn = NULL;
336
337 /* Protected by dev_priv->mm_lock */
338 hash_add(dev_priv->mm_structs,
339 &mm->node, (unsigned long)mm->mm);
340 } else
341 kref_get(&mm->kref);
342
343 obj->userptr.mm = mm;
344out:
345 mutex_unlock(&dev_priv->mm_lock);
346 return ret;
347}
348
349static void
350__i915_mm_struct_free__worker(struct work_struct *work)
351{
352 struct i915_mm_struct *mm = container_of(work, typeof(*mm), work);
353 i915_mmu_notifier_free(mm->mn, mm->mm);
354 mmdrop(mm->mm);
355 kfree(mm);
356}
357
358static void
359__i915_mm_struct_free(struct kref *kref)
360{
361 struct i915_mm_struct *mm = container_of(kref, typeof(*mm), kref);
362
363 /* Protected by dev_priv->mm_lock */
364 hash_del(&mm->node);
365 mutex_unlock(&to_i915(mm->dev)->mm_lock);
366
367 INIT_WORK(&mm->work, __i915_mm_struct_free__worker);
368 schedule_work(&mm->work);
369}
370
371static void
372i915_gem_userptr_release__mm_struct(struct drm_i915_gem_object *obj)
373{
374 if (obj->userptr.mm == NULL)
375 return;
376
377 kref_put_mutex(&obj->userptr.mm->kref,
378 __i915_mm_struct_free,
379 &to_i915(obj->base.dev)->mm_lock);
380 obj->userptr.mm = NULL;
381}
382
383struct get_pages_work {
384 struct work_struct work;
385 struct drm_i915_gem_object *obj;
386 struct task_struct *task;
387};
388
389#if IS_ENABLED(CONFIG_SWIOTLB)
390#define swiotlb_active() swiotlb_nr_tbl()
391#else
392#define swiotlb_active() 0
393#endif
394
395static int
396st_set_pages(struct sg_table **st, struct page **pvec, int num_pages)
397{
398 struct scatterlist *sg;
399 int ret, n;
400
401 *st = kmalloc(sizeof(**st), GFP_KERNEL);
402 if (*st == NULL)
403 return -ENOMEM;
404
405 if (swiotlb_active()) {
406 ret = sg_alloc_table(*st, num_pages, GFP_KERNEL);
407 if (ret)
408 goto err;
409
410 for_each_sg((*st)->sgl, sg, num_pages, n)
411 sg_set_page(sg, pvec[n], PAGE_SIZE, 0);
412 } else {
413 ret = sg_alloc_table_from_pages(*st, pvec, num_pages,
414 0, num_pages << PAGE_SHIFT,
415 GFP_KERNEL);
416 if (ret)
417 goto err;
418 }
419
420 return 0;
421
422err:
423 kfree(*st);
424 *st = NULL;
425 return ret;
426}
427
428static int
429__i915_gem_userptr_set_pages(struct drm_i915_gem_object *obj,
430 struct page **pvec, int num_pages)
431{
432 int ret;
433
434 ret = st_set_pages(&obj->pages, pvec, num_pages);
435 if (ret)
436 return ret;
437
438 ret = i915_gem_gtt_prepare_object(obj);
439 if (ret) {
440 sg_free_table(obj->pages);
441 kfree(obj->pages);
442 obj->pages = NULL;
443 }
444
445 return ret;
446}
447
448static int
449__i915_gem_userptr_set_active(struct drm_i915_gem_object *obj,
450 bool value)
451{
452 int ret = 0;
453
454 /* During mm_invalidate_range we need to cancel any userptr that
455 * overlaps the range being invalidated. Doing so requires the
456 * struct_mutex, and that risks recursion. In order to cause
457 * recursion, the user must alias the userptr address space with
458 * a GTT mmapping (possible with a MAP_FIXED) - then when we have
459 * to invalidate that mmaping, mm_invalidate_range is called with
460 * the userptr address *and* the struct_mutex held. To prevent that
461 * we set a flag under the i915_mmu_notifier spinlock to indicate
462 * whether this object is valid.
463 */
464#if defined(CONFIG_MMU_NOTIFIER)
465 if (obj->userptr.mmu_object == NULL)
466 return 0;
467
468 spin_lock(&obj->userptr.mmu_object->mn->lock);
469 /* In order to serialise get_pages with an outstanding
470 * cancel_userptr, we must drop the struct_mutex and try again.
471 */
472 if (!value)
473 del_object(obj->userptr.mmu_object);
474 else if (!work_pending(&obj->userptr.mmu_object->work))
475 add_object(obj->userptr.mmu_object);
476 else
477 ret = -EAGAIN;
478 spin_unlock(&obj->userptr.mmu_object->mn->lock);
479#endif
480
481 return ret;
482}
483
484static void
485__i915_gem_userptr_get_pages_worker(struct work_struct *_work)
486{
487 struct get_pages_work *work = container_of(_work, typeof(*work), work);
488 struct drm_i915_gem_object *obj = work->obj;
489 struct drm_device *dev = obj->base.dev;
490 const int npages = obj->base.size >> PAGE_SHIFT;
491 struct page **pvec;
492 int pinned, ret;
493
494 ret = -ENOMEM;
495 pinned = 0;
496
497 pvec = kmalloc(npages*sizeof(struct page *),
498 GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
499 if (pvec == NULL)
500 pvec = drm_malloc_ab(npages, sizeof(struct page *));
501 if (pvec != NULL) {
502 struct mm_struct *mm = obj->userptr.mm->mm;
503
504 ret = -EFAULT;
505 if (atomic_inc_not_zero(&mm->mm_users)) {
506 down_read(&mm->mmap_sem);
507 while (pinned < npages) {
508 ret = get_user_pages_remote
509 (work->task, mm,
510 obj->userptr.ptr + pinned * PAGE_SIZE,
511 npages - pinned,
512 !obj->userptr.read_only, 0,
513 pvec + pinned, NULL);
514 if (ret < 0)
515 break;
516
517 pinned += ret;
518 }
519 up_read(&mm->mmap_sem);
520 mmput(mm);
521 }
522 }
523
524 mutex_lock(&dev->struct_mutex);
525 if (obj->userptr.work == &work->work) {
526 if (pinned == npages) {
527 ret = __i915_gem_userptr_set_pages(obj, pvec, npages);
528 if (ret == 0) {
529 list_add_tail(&obj->global_list,
530 &to_i915(dev)->mm.unbound_list);
531 obj->get_page.sg = obj->pages->sgl;
532 obj->get_page.last = 0;
533 pinned = 0;
534 }
535 }
536 obj->userptr.work = ERR_PTR(ret);
537 if (ret)
538 __i915_gem_userptr_set_active(obj, false);
539 }
540
541 obj->userptr.workers--;
542 drm_gem_object_unreference(&obj->base);
543 mutex_unlock(&dev->struct_mutex);
544
545 release_pages(pvec, pinned, 0);
546 drm_free_large(pvec);
547
548 put_task_struct(work->task);
549 kfree(work);
550}
551
552static int
553__i915_gem_userptr_get_pages_schedule(struct drm_i915_gem_object *obj,
554 bool *active)
555{
556 struct get_pages_work *work;
557
558 /* Spawn a worker so that we can acquire the
559 * user pages without holding our mutex. Access
560 * to the user pages requires mmap_sem, and we have
561 * a strict lock ordering of mmap_sem, struct_mutex -
562 * we already hold struct_mutex here and so cannot
563 * call gup without encountering a lock inversion.
564 *
565 * Userspace will keep on repeating the operation
566 * (thanks to EAGAIN) until either we hit the fast
567 * path or the worker completes. If the worker is
568 * cancelled or superseded, the task is still run
569 * but the results ignored. (This leads to
570 * complications that we may have a stray object
571 * refcount that we need to be wary of when
572 * checking for existing objects during creation.)
573 * If the worker encounters an error, it reports
574 * that error back to this function through
575 * obj->userptr.work = ERR_PTR.
576 */
577 if (obj->userptr.workers >= I915_GEM_USERPTR_MAX_WORKERS)
578 return -EAGAIN;
579
580 work = kmalloc(sizeof(*work), GFP_KERNEL);
581 if (work == NULL)
582 return -ENOMEM;
583
584 obj->userptr.work = &work->work;
585 obj->userptr.workers++;
586
587 work->obj = obj;
588 drm_gem_object_reference(&obj->base);
589
590 work->task = current;
591 get_task_struct(work->task);
592
593 INIT_WORK(&work->work, __i915_gem_userptr_get_pages_worker);
594 schedule_work(&work->work);
595
596 *active = true;
597 return -EAGAIN;
598}
599
600static int
601i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
602{
603 const int num_pages = obj->base.size >> PAGE_SHIFT;
604 struct page **pvec;
605 int pinned, ret;
606 bool active;
607
608 /* If userspace should engineer that these pages are replaced in
609 * the vma between us binding this page into the GTT and completion
610 * of rendering... Their loss. If they change the mapping of their
611 * pages they need to create a new bo to point to the new vma.
612 *
613 * However, that still leaves open the possibility of the vma
614 * being copied upon fork. Which falls under the same userspace
615 * synchronisation issue as a regular bo, except that this time
616 * the process may not be expecting that a particular piece of
617 * memory is tied to the GPU.
618 *
619 * Fortunately, we can hook into the mmu_notifier in order to
620 * discard the page references prior to anything nasty happening
621 * to the vma (discard or cloning) which should prevent the more
622 * egregious cases from causing harm.
623 */
624 if (IS_ERR(obj->userptr.work)) {
625 /* active flag will have been dropped already by the worker */
626 ret = PTR_ERR(obj->userptr.work);
627 obj->userptr.work = NULL;
628 return ret;
629 }
630 if (obj->userptr.work)
631 /* active flag should still be held for the pending work */
632 return -EAGAIN;
633
634 /* Let the mmu-notifier know that we have begun and need cancellation */
635 ret = __i915_gem_userptr_set_active(obj, true);
636 if (ret)
637 return ret;
638
639 pvec = NULL;
640 pinned = 0;
641 if (obj->userptr.mm->mm == current->mm) {
642 pvec = kmalloc(num_pages*sizeof(struct page *),
643 GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
644 if (pvec == NULL) {
645 pvec = drm_malloc_ab(num_pages, sizeof(struct page *));
646 if (pvec == NULL) {
647 __i915_gem_userptr_set_active(obj, false);
648 return -ENOMEM;
649 }
650 }
651
652 pinned = __get_user_pages_fast(obj->userptr.ptr, num_pages,
653 !obj->userptr.read_only, pvec);
654 }
655
656 active = false;
657 if (pinned < 0)
658 ret = pinned, pinned = 0;
659 else if (pinned < num_pages)
660 ret = __i915_gem_userptr_get_pages_schedule(obj, &active);
661 else
662 ret = __i915_gem_userptr_set_pages(obj, pvec, num_pages);
663 if (ret) {
664 __i915_gem_userptr_set_active(obj, active);
665 release_pages(pvec, pinned, 0);
666 }
667 drm_free_large(pvec);
668 return ret;
669}
670
671static void
672i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj)
673{
674 struct sg_page_iter sg_iter;
675
676 BUG_ON(obj->userptr.work != NULL);
677 __i915_gem_userptr_set_active(obj, false);
678
679 if (obj->madv != I915_MADV_WILLNEED)
680 obj->dirty = 0;
681
682 i915_gem_gtt_finish_object(obj);
683
684 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
685 struct page *page = sg_page_iter_page(&sg_iter);
686
687 if (obj->dirty)
688 set_page_dirty(page);
689
690 mark_page_accessed(page);
691 put_page(page);
692 }
693 obj->dirty = 0;
694
695 sg_free_table(obj->pages);
696 kfree(obj->pages);
697}
698
699static void
700i915_gem_userptr_release(struct drm_i915_gem_object *obj)
701{
702 i915_gem_userptr_release__mmu_notifier(obj);
703 i915_gem_userptr_release__mm_struct(obj);
704}
705
706static int
707i915_gem_userptr_dmabuf_export(struct drm_i915_gem_object *obj)
708{
709 if (obj->userptr.mmu_object)
710 return 0;
711
712 return i915_gem_userptr_init__mmu_notifier(obj, 0);
713}
714
715static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = {
716 .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE,
717 .get_pages = i915_gem_userptr_get_pages,
718 .put_pages = i915_gem_userptr_put_pages,
719 .dmabuf_export = i915_gem_userptr_dmabuf_export,
720 .release = i915_gem_userptr_release,
721};
722
723/**
724 * Creates a new mm object that wraps some normal memory from the process
725 * context - user memory.
726 *
727 * We impose several restrictions upon the memory being mapped
728 * into the GPU.
729 * 1. It must be page aligned (both start/end addresses, i.e ptr and size).
730 * 2. It must be normal system memory, not a pointer into another map of IO
731 * space (e.g. it must not be a GTT mmapping of another object).
732 * 3. We only allow a bo as large as we could in theory map into the GTT,
733 * that is we limit the size to the total size of the GTT.
734 * 4. The bo is marked as being snoopable. The backing pages are left
735 * accessible directly by the CPU, but reads and writes by the GPU may
736 * incur the cost of a snoop (unless you have an LLC architecture).
737 *
738 * Synchronisation between multiple users and the GPU is left to userspace
739 * through the normal set-domain-ioctl. The kernel will enforce that the
740 * GPU relinquishes the VMA before it is returned back to the system
741 * i.e. upon free(), munmap() or process termination. However, the userspace
742 * malloc() library may not immediately relinquish the VMA after free() and
743 * instead reuse it whilst the GPU is still reading and writing to the VMA.
744 * Caveat emptor.
745 *
746 * Also note, that the object created here is not currently a "first class"
747 * object, in that several ioctls are banned. These are the CPU access
748 * ioctls: mmap(), pwrite and pread. In practice, you are expected to use
749 * direct access via your pointer rather than use those ioctls. Another
750 * restriction is that we do not allow userptr surfaces to be pinned to the
751 * hardware and so we reject any attempt to create a framebuffer out of a
752 * userptr.
753 *
754 * If you think this is a good interface to use to pass GPU memory between
755 * drivers, please use dma-buf instead. In fact, wherever possible use
756 * dma-buf instead.
757 */
758int
759i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
760{
761 struct drm_i915_gem_userptr *args = data;
762 struct drm_i915_gem_object *obj;
763 int ret;
764 u32 handle;
765
766 if (args->flags & ~(I915_USERPTR_READ_ONLY |
767 I915_USERPTR_UNSYNCHRONIZED))
768 return -EINVAL;
769
770 if (offset_in_page(args->user_ptr | args->user_size))
771 return -EINVAL;
772
773 if (!access_ok(args->flags & I915_USERPTR_READ_ONLY ? VERIFY_READ : VERIFY_WRITE,
774 (char __user *)(unsigned long)args->user_ptr, args->user_size))
775 return -EFAULT;
776
777 if (args->flags & I915_USERPTR_READ_ONLY) {
778 /* On almost all of the current hw, we cannot tell the GPU that a
779 * page is readonly, so this is just a placeholder in the uAPI.
780 */
781 return -ENODEV;
782 }
783
784 obj = i915_gem_object_alloc(dev);
785 if (obj == NULL)
786 return -ENOMEM;
787
788 drm_gem_private_object_init(dev, &obj->base, args->user_size);
789 i915_gem_object_init(obj, &i915_gem_userptr_ops);
790 obj->cache_level = I915_CACHE_LLC;
791 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
792 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
793
794 obj->userptr.ptr = args->user_ptr;
795 obj->userptr.read_only = !!(args->flags & I915_USERPTR_READ_ONLY);
796
797 /* And keep a pointer to the current->mm for resolving the user pages
798 * at binding. This means that we need to hook into the mmu_notifier
799 * in order to detect if the mmu is destroyed.
800 */
801 ret = i915_gem_userptr_init__mm_struct(obj);
802 if (ret == 0)
803 ret = i915_gem_userptr_init__mmu_notifier(obj, args->flags);
804 if (ret == 0)
805 ret = drm_gem_handle_create(file, &obj->base, &handle);
806
807 /* drop reference from allocate - handle holds it now */
808 drm_gem_object_unreference_unlocked(&obj->base);
809 if (ret)
810 return ret;
811
812 args->handle = handle;
813 return 0;
814}
815
816int
817i915_gem_init_userptr(struct drm_device *dev)
818{
819 struct drm_i915_private *dev_priv = to_i915(dev);
820 mutex_init(&dev_priv->mm_lock);
821 hash_init(dev_priv->mm_structs);
822 return 0;
823}
1/*
2 * Copyright © 2012-2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24
25#include <drm/drmP.h>
26#include <drm/i915_drm.h>
27#include "i915_drv.h"
28#include "i915_trace.h"
29#include "intel_drv.h"
30#include <linux/mmu_context.h>
31#include <linux/mmu_notifier.h>
32#include <linux/mempolicy.h>
33#include <linux/swap.h>
34#include <linux/sched/mm.h>
35
36struct i915_mm_struct {
37 struct mm_struct *mm;
38 struct drm_i915_private *i915;
39 struct i915_mmu_notifier *mn;
40 struct hlist_node node;
41 struct kref kref;
42 struct work_struct work;
43};
44
45#if defined(CONFIG_MMU_NOTIFIER)
46#include <linux/interval_tree.h>
47
48struct i915_mmu_notifier {
49 spinlock_t lock;
50 struct hlist_node node;
51 struct mmu_notifier mn;
52 struct rb_root_cached objects;
53 struct workqueue_struct *wq;
54};
55
56struct i915_mmu_object {
57 struct i915_mmu_notifier *mn;
58 struct drm_i915_gem_object *obj;
59 struct interval_tree_node it;
60 struct list_head link;
61 struct work_struct work;
62 bool attached;
63};
64
65static void cancel_userptr(struct work_struct *work)
66{
67 struct i915_mmu_object *mo = container_of(work, typeof(*mo), work);
68 struct drm_i915_gem_object *obj = mo->obj;
69 struct work_struct *active;
70
71 /* Cancel any active worker and force us to re-evaluate gup */
72 mutex_lock(&obj->mm.lock);
73 active = fetch_and_zero(&obj->userptr.work);
74 mutex_unlock(&obj->mm.lock);
75 if (active)
76 goto out;
77
78 i915_gem_object_wait(obj, I915_WAIT_ALL, MAX_SCHEDULE_TIMEOUT, NULL);
79
80 mutex_lock(&obj->base.dev->struct_mutex);
81
82 /* We are inside a kthread context and can't be interrupted */
83 if (i915_gem_object_unbind(obj) == 0)
84 __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
85 WARN_ONCE(i915_gem_object_has_pages(obj),
86 "Failed to release pages: bind_count=%d, pages_pin_count=%d, pin_global=%d\n",
87 obj->bind_count,
88 atomic_read(&obj->mm.pages_pin_count),
89 obj->pin_global);
90
91 mutex_unlock(&obj->base.dev->struct_mutex);
92
93out:
94 i915_gem_object_put(obj);
95}
96
97static void add_object(struct i915_mmu_object *mo)
98{
99 if (mo->attached)
100 return;
101
102 interval_tree_insert(&mo->it, &mo->mn->objects);
103 mo->attached = true;
104}
105
106static void del_object(struct i915_mmu_object *mo)
107{
108 if (!mo->attached)
109 return;
110
111 interval_tree_remove(&mo->it, &mo->mn->objects);
112 mo->attached = false;
113}
114
115static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
116 struct mm_struct *mm,
117 unsigned long start,
118 unsigned long end)
119{
120 struct i915_mmu_notifier *mn =
121 container_of(_mn, struct i915_mmu_notifier, mn);
122 struct i915_mmu_object *mo;
123 struct interval_tree_node *it;
124 LIST_HEAD(cancelled);
125
126 if (RB_EMPTY_ROOT(&mn->objects.rb_root))
127 return;
128
129 /* interval ranges are inclusive, but invalidate range is exclusive */
130 end--;
131
132 spin_lock(&mn->lock);
133 it = interval_tree_iter_first(&mn->objects, start, end);
134 while (it) {
135 /* The mmu_object is released late when destroying the
136 * GEM object so it is entirely possible to gain a
137 * reference on an object in the process of being freed
138 * since our serialisation is via the spinlock and not
139 * the struct_mutex - and consequently use it after it
140 * is freed and then double free it. To prevent that
141 * use-after-free we only acquire a reference on the
142 * object if it is not in the process of being destroyed.
143 */
144 mo = container_of(it, struct i915_mmu_object, it);
145 if (kref_get_unless_zero(&mo->obj->base.refcount))
146 queue_work(mn->wq, &mo->work);
147
148 list_add(&mo->link, &cancelled);
149 it = interval_tree_iter_next(it, start, end);
150 }
151 list_for_each_entry(mo, &cancelled, link)
152 del_object(mo);
153 spin_unlock(&mn->lock);
154
155 if (!list_empty(&cancelled))
156 flush_workqueue(mn->wq);
157}
158
159static const struct mmu_notifier_ops i915_gem_userptr_notifier = {
160 .invalidate_range_start = i915_gem_userptr_mn_invalidate_range_start,
161};
162
163static struct i915_mmu_notifier *
164i915_mmu_notifier_create(struct mm_struct *mm)
165{
166 struct i915_mmu_notifier *mn;
167
168 mn = kmalloc(sizeof(*mn), GFP_KERNEL);
169 if (mn == NULL)
170 return ERR_PTR(-ENOMEM);
171
172 spin_lock_init(&mn->lock);
173 mn->mn.ops = &i915_gem_userptr_notifier;
174 mn->objects = RB_ROOT_CACHED;
175 mn->wq = alloc_workqueue("i915-userptr-release",
176 WQ_UNBOUND | WQ_MEM_RECLAIM,
177 0);
178 if (mn->wq == NULL) {
179 kfree(mn);
180 return ERR_PTR(-ENOMEM);
181 }
182
183 return mn;
184}
185
186static void
187i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj)
188{
189 struct i915_mmu_object *mo;
190
191 mo = obj->userptr.mmu_object;
192 if (mo == NULL)
193 return;
194
195 spin_lock(&mo->mn->lock);
196 del_object(mo);
197 spin_unlock(&mo->mn->lock);
198 kfree(mo);
199
200 obj->userptr.mmu_object = NULL;
201}
202
203static struct i915_mmu_notifier *
204i915_mmu_notifier_find(struct i915_mm_struct *mm)
205{
206 struct i915_mmu_notifier *mn;
207 int err = 0;
208
209 mn = mm->mn;
210 if (mn)
211 return mn;
212
213 mn = i915_mmu_notifier_create(mm->mm);
214 if (IS_ERR(mn))
215 err = PTR_ERR(mn);
216
217 down_write(&mm->mm->mmap_sem);
218 mutex_lock(&mm->i915->mm_lock);
219 if (mm->mn == NULL && !err) {
220 /* Protected by mmap_sem (write-lock) */
221 err = __mmu_notifier_register(&mn->mn, mm->mm);
222 if (!err) {
223 /* Protected by mm_lock */
224 mm->mn = fetch_and_zero(&mn);
225 }
226 } else if (mm->mn) {
227 /*
228 * Someone else raced and successfully installed the mmu
229 * notifier, we can cancel our own errors.
230 */
231 err = 0;
232 }
233 mutex_unlock(&mm->i915->mm_lock);
234 up_write(&mm->mm->mmap_sem);
235
236 if (mn && !IS_ERR(mn)) {
237 destroy_workqueue(mn->wq);
238 kfree(mn);
239 }
240
241 return err ? ERR_PTR(err) : mm->mn;
242}
243
244static int
245i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj,
246 unsigned flags)
247{
248 struct i915_mmu_notifier *mn;
249 struct i915_mmu_object *mo;
250
251 if (flags & I915_USERPTR_UNSYNCHRONIZED)
252 return capable(CAP_SYS_ADMIN) ? 0 : -EPERM;
253
254 if (WARN_ON(obj->userptr.mm == NULL))
255 return -EINVAL;
256
257 mn = i915_mmu_notifier_find(obj->userptr.mm);
258 if (IS_ERR(mn))
259 return PTR_ERR(mn);
260
261 mo = kzalloc(sizeof(*mo), GFP_KERNEL);
262 if (mo == NULL)
263 return -ENOMEM;
264
265 mo->mn = mn;
266 mo->obj = obj;
267 mo->it.start = obj->userptr.ptr;
268 mo->it.last = obj->userptr.ptr + obj->base.size - 1;
269 INIT_WORK(&mo->work, cancel_userptr);
270
271 obj->userptr.mmu_object = mo;
272 return 0;
273}
274
275static void
276i915_mmu_notifier_free(struct i915_mmu_notifier *mn,
277 struct mm_struct *mm)
278{
279 if (mn == NULL)
280 return;
281
282 mmu_notifier_unregister(&mn->mn, mm);
283 destroy_workqueue(mn->wq);
284 kfree(mn);
285}
286
287#else
288
289static void
290i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj)
291{
292}
293
294static int
295i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj,
296 unsigned flags)
297{
298 if ((flags & I915_USERPTR_UNSYNCHRONIZED) == 0)
299 return -ENODEV;
300
301 if (!capable(CAP_SYS_ADMIN))
302 return -EPERM;
303
304 return 0;
305}
306
307static void
308i915_mmu_notifier_free(struct i915_mmu_notifier *mn,
309 struct mm_struct *mm)
310{
311}
312
313#endif
314
315static struct i915_mm_struct *
316__i915_mm_struct_find(struct drm_i915_private *dev_priv, struct mm_struct *real)
317{
318 struct i915_mm_struct *mm;
319
320 /* Protected by dev_priv->mm_lock */
321 hash_for_each_possible(dev_priv->mm_structs, mm, node, (unsigned long)real)
322 if (mm->mm == real)
323 return mm;
324
325 return NULL;
326}
327
328static int
329i915_gem_userptr_init__mm_struct(struct drm_i915_gem_object *obj)
330{
331 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
332 struct i915_mm_struct *mm;
333 int ret = 0;
334
335 /* During release of the GEM object we hold the struct_mutex. This
336 * precludes us from calling mmput() at that time as that may be
337 * the last reference and so call exit_mmap(). exit_mmap() will
338 * attempt to reap the vma, and if we were holding a GTT mmap
339 * would then call drm_gem_vm_close() and attempt to reacquire
340 * the struct mutex. So in order to avoid that recursion, we have
341 * to defer releasing the mm reference until after we drop the
342 * struct_mutex, i.e. we need to schedule a worker to do the clean
343 * up.
344 */
345 mutex_lock(&dev_priv->mm_lock);
346 mm = __i915_mm_struct_find(dev_priv, current->mm);
347 if (mm == NULL) {
348 mm = kmalloc(sizeof(*mm), GFP_KERNEL);
349 if (mm == NULL) {
350 ret = -ENOMEM;
351 goto out;
352 }
353
354 kref_init(&mm->kref);
355 mm->i915 = to_i915(obj->base.dev);
356
357 mm->mm = current->mm;
358 mmgrab(current->mm);
359
360 mm->mn = NULL;
361
362 /* Protected by dev_priv->mm_lock */
363 hash_add(dev_priv->mm_structs,
364 &mm->node, (unsigned long)mm->mm);
365 } else
366 kref_get(&mm->kref);
367
368 obj->userptr.mm = mm;
369out:
370 mutex_unlock(&dev_priv->mm_lock);
371 return ret;
372}
373
374static void
375__i915_mm_struct_free__worker(struct work_struct *work)
376{
377 struct i915_mm_struct *mm = container_of(work, typeof(*mm), work);
378 i915_mmu_notifier_free(mm->mn, mm->mm);
379 mmdrop(mm->mm);
380 kfree(mm);
381}
382
383static void
384__i915_mm_struct_free(struct kref *kref)
385{
386 struct i915_mm_struct *mm = container_of(kref, typeof(*mm), kref);
387
388 /* Protected by dev_priv->mm_lock */
389 hash_del(&mm->node);
390 mutex_unlock(&mm->i915->mm_lock);
391
392 INIT_WORK(&mm->work, __i915_mm_struct_free__worker);
393 queue_work(mm->i915->mm.userptr_wq, &mm->work);
394}
395
396static void
397i915_gem_userptr_release__mm_struct(struct drm_i915_gem_object *obj)
398{
399 if (obj->userptr.mm == NULL)
400 return;
401
402 kref_put_mutex(&obj->userptr.mm->kref,
403 __i915_mm_struct_free,
404 &to_i915(obj->base.dev)->mm_lock);
405 obj->userptr.mm = NULL;
406}
407
408struct get_pages_work {
409 struct work_struct work;
410 struct drm_i915_gem_object *obj;
411 struct task_struct *task;
412};
413
414static struct sg_table *
415__i915_gem_userptr_alloc_pages(struct drm_i915_gem_object *obj,
416 struct page **pvec, int num_pages)
417{
418 unsigned int max_segment = i915_sg_segment_size();
419 struct sg_table *st;
420 unsigned int sg_page_sizes;
421 int ret;
422
423 st = kmalloc(sizeof(*st), GFP_KERNEL);
424 if (!st)
425 return ERR_PTR(-ENOMEM);
426
427alloc_table:
428 ret = __sg_alloc_table_from_pages(st, pvec, num_pages,
429 0, num_pages << PAGE_SHIFT,
430 max_segment,
431 GFP_KERNEL);
432 if (ret) {
433 kfree(st);
434 return ERR_PTR(ret);
435 }
436
437 ret = i915_gem_gtt_prepare_pages(obj, st);
438 if (ret) {
439 sg_free_table(st);
440
441 if (max_segment > PAGE_SIZE) {
442 max_segment = PAGE_SIZE;
443 goto alloc_table;
444 }
445
446 kfree(st);
447 return ERR_PTR(ret);
448 }
449
450 sg_page_sizes = i915_sg_page_sizes(st->sgl);
451
452 __i915_gem_object_set_pages(obj, st, sg_page_sizes);
453
454 return st;
455}
456
457static int
458__i915_gem_userptr_set_active(struct drm_i915_gem_object *obj,
459 bool value)
460{
461 int ret = 0;
462
463 /* During mm_invalidate_range we need to cancel any userptr that
464 * overlaps the range being invalidated. Doing so requires the
465 * struct_mutex, and that risks recursion. In order to cause
466 * recursion, the user must alias the userptr address space with
467 * a GTT mmapping (possible with a MAP_FIXED) - then when we have
468 * to invalidate that mmaping, mm_invalidate_range is called with
469 * the userptr address *and* the struct_mutex held. To prevent that
470 * we set a flag under the i915_mmu_notifier spinlock to indicate
471 * whether this object is valid.
472 */
473#if defined(CONFIG_MMU_NOTIFIER)
474 if (obj->userptr.mmu_object == NULL)
475 return 0;
476
477 spin_lock(&obj->userptr.mmu_object->mn->lock);
478 /* In order to serialise get_pages with an outstanding
479 * cancel_userptr, we must drop the struct_mutex and try again.
480 */
481 if (!value)
482 del_object(obj->userptr.mmu_object);
483 else if (!work_pending(&obj->userptr.mmu_object->work))
484 add_object(obj->userptr.mmu_object);
485 else
486 ret = -EAGAIN;
487 spin_unlock(&obj->userptr.mmu_object->mn->lock);
488#endif
489
490 return ret;
491}
492
493static void
494__i915_gem_userptr_get_pages_worker(struct work_struct *_work)
495{
496 struct get_pages_work *work = container_of(_work, typeof(*work), work);
497 struct drm_i915_gem_object *obj = work->obj;
498 const int npages = obj->base.size >> PAGE_SHIFT;
499 struct page **pvec;
500 int pinned, ret;
501
502 ret = -ENOMEM;
503 pinned = 0;
504
505 pvec = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
506 if (pvec != NULL) {
507 struct mm_struct *mm = obj->userptr.mm->mm;
508 unsigned int flags = 0;
509
510 if (!obj->userptr.read_only)
511 flags |= FOLL_WRITE;
512
513 ret = -EFAULT;
514 if (mmget_not_zero(mm)) {
515 down_read(&mm->mmap_sem);
516 while (pinned < npages) {
517 ret = get_user_pages_remote
518 (work->task, mm,
519 obj->userptr.ptr + pinned * PAGE_SIZE,
520 npages - pinned,
521 flags,
522 pvec + pinned, NULL, NULL);
523 if (ret < 0)
524 break;
525
526 pinned += ret;
527 }
528 up_read(&mm->mmap_sem);
529 mmput(mm);
530 }
531 }
532
533 mutex_lock(&obj->mm.lock);
534 if (obj->userptr.work == &work->work) {
535 struct sg_table *pages = ERR_PTR(ret);
536
537 if (pinned == npages) {
538 pages = __i915_gem_userptr_alloc_pages(obj, pvec,
539 npages);
540 if (!IS_ERR(pages)) {
541 pinned = 0;
542 pages = NULL;
543 }
544 }
545
546 obj->userptr.work = ERR_CAST(pages);
547 if (IS_ERR(pages))
548 __i915_gem_userptr_set_active(obj, false);
549 }
550 mutex_unlock(&obj->mm.lock);
551
552 release_pages(pvec, pinned);
553 kvfree(pvec);
554
555 i915_gem_object_put(obj);
556 put_task_struct(work->task);
557 kfree(work);
558}
559
560static struct sg_table *
561__i915_gem_userptr_get_pages_schedule(struct drm_i915_gem_object *obj)
562{
563 struct get_pages_work *work;
564
565 /* Spawn a worker so that we can acquire the
566 * user pages without holding our mutex. Access
567 * to the user pages requires mmap_sem, and we have
568 * a strict lock ordering of mmap_sem, struct_mutex -
569 * we already hold struct_mutex here and so cannot
570 * call gup without encountering a lock inversion.
571 *
572 * Userspace will keep on repeating the operation
573 * (thanks to EAGAIN) until either we hit the fast
574 * path or the worker completes. If the worker is
575 * cancelled or superseded, the task is still run
576 * but the results ignored. (This leads to
577 * complications that we may have a stray object
578 * refcount that we need to be wary of when
579 * checking for existing objects during creation.)
580 * If the worker encounters an error, it reports
581 * that error back to this function through
582 * obj->userptr.work = ERR_PTR.
583 */
584 work = kmalloc(sizeof(*work), GFP_KERNEL);
585 if (work == NULL)
586 return ERR_PTR(-ENOMEM);
587
588 obj->userptr.work = &work->work;
589
590 work->obj = i915_gem_object_get(obj);
591
592 work->task = current;
593 get_task_struct(work->task);
594
595 INIT_WORK(&work->work, __i915_gem_userptr_get_pages_worker);
596 queue_work(to_i915(obj->base.dev)->mm.userptr_wq, &work->work);
597
598 return ERR_PTR(-EAGAIN);
599}
600
601static int i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
602{
603 const int num_pages = obj->base.size >> PAGE_SHIFT;
604 struct mm_struct *mm = obj->userptr.mm->mm;
605 struct page **pvec;
606 struct sg_table *pages;
607 bool active;
608 int pinned;
609
610 /* If userspace should engineer that these pages are replaced in
611 * the vma between us binding this page into the GTT and completion
612 * of rendering... Their loss. If they change the mapping of their
613 * pages they need to create a new bo to point to the new vma.
614 *
615 * However, that still leaves open the possibility of the vma
616 * being copied upon fork. Which falls under the same userspace
617 * synchronisation issue as a regular bo, except that this time
618 * the process may not be expecting that a particular piece of
619 * memory is tied to the GPU.
620 *
621 * Fortunately, we can hook into the mmu_notifier in order to
622 * discard the page references prior to anything nasty happening
623 * to the vma (discard or cloning) which should prevent the more
624 * egregious cases from causing harm.
625 */
626
627 if (obj->userptr.work) {
628 /* active flag should still be held for the pending work */
629 if (IS_ERR(obj->userptr.work))
630 return PTR_ERR(obj->userptr.work);
631 else
632 return -EAGAIN;
633 }
634
635 pvec = NULL;
636 pinned = 0;
637
638 if (mm == current->mm) {
639 pvec = kvmalloc_array(num_pages, sizeof(struct page *),
640 GFP_KERNEL |
641 __GFP_NORETRY |
642 __GFP_NOWARN);
643 if (pvec) /* defer to worker if malloc fails */
644 pinned = __get_user_pages_fast(obj->userptr.ptr,
645 num_pages,
646 !obj->userptr.read_only,
647 pvec);
648 }
649
650 active = false;
651 if (pinned < 0) {
652 pages = ERR_PTR(pinned);
653 pinned = 0;
654 } else if (pinned < num_pages) {
655 pages = __i915_gem_userptr_get_pages_schedule(obj);
656 active = pages == ERR_PTR(-EAGAIN);
657 } else {
658 pages = __i915_gem_userptr_alloc_pages(obj, pvec, num_pages);
659 active = !IS_ERR(pages);
660 }
661 if (active)
662 __i915_gem_userptr_set_active(obj, true);
663
664 if (IS_ERR(pages))
665 release_pages(pvec, pinned);
666 kvfree(pvec);
667
668 return PTR_ERR_OR_ZERO(pages);
669}
670
671static void
672i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj,
673 struct sg_table *pages)
674{
675 struct sgt_iter sgt_iter;
676 struct page *page;
677
678 BUG_ON(obj->userptr.work != NULL);
679 __i915_gem_userptr_set_active(obj, false);
680
681 if (obj->mm.madv != I915_MADV_WILLNEED)
682 obj->mm.dirty = false;
683
684 i915_gem_gtt_finish_pages(obj, pages);
685
686 for_each_sgt_page(page, sgt_iter, pages) {
687 if (obj->mm.dirty)
688 set_page_dirty(page);
689
690 mark_page_accessed(page);
691 put_page(page);
692 }
693 obj->mm.dirty = false;
694
695 sg_free_table(pages);
696 kfree(pages);
697}
698
699static void
700i915_gem_userptr_release(struct drm_i915_gem_object *obj)
701{
702 i915_gem_userptr_release__mmu_notifier(obj);
703 i915_gem_userptr_release__mm_struct(obj);
704}
705
706static int
707i915_gem_userptr_dmabuf_export(struct drm_i915_gem_object *obj)
708{
709 if (obj->userptr.mmu_object)
710 return 0;
711
712 return i915_gem_userptr_init__mmu_notifier(obj, 0);
713}
714
715static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = {
716 .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
717 I915_GEM_OBJECT_IS_SHRINKABLE,
718 .get_pages = i915_gem_userptr_get_pages,
719 .put_pages = i915_gem_userptr_put_pages,
720 .dmabuf_export = i915_gem_userptr_dmabuf_export,
721 .release = i915_gem_userptr_release,
722};
723
724/*
725 * Creates a new mm object that wraps some normal memory from the process
726 * context - user memory.
727 *
728 * We impose several restrictions upon the memory being mapped
729 * into the GPU.
730 * 1. It must be page aligned (both start/end addresses, i.e ptr and size).
731 * 2. It must be normal system memory, not a pointer into another map of IO
732 * space (e.g. it must not be a GTT mmapping of another object).
733 * 3. We only allow a bo as large as we could in theory map into the GTT,
734 * that is we limit the size to the total size of the GTT.
735 * 4. The bo is marked as being snoopable. The backing pages are left
736 * accessible directly by the CPU, but reads and writes by the GPU may
737 * incur the cost of a snoop (unless you have an LLC architecture).
738 *
739 * Synchronisation between multiple users and the GPU is left to userspace
740 * through the normal set-domain-ioctl. The kernel will enforce that the
741 * GPU relinquishes the VMA before it is returned back to the system
742 * i.e. upon free(), munmap() or process termination. However, the userspace
743 * malloc() library may not immediately relinquish the VMA after free() and
744 * instead reuse it whilst the GPU is still reading and writing to the VMA.
745 * Caveat emptor.
746 *
747 * Also note, that the object created here is not currently a "first class"
748 * object, in that several ioctls are banned. These are the CPU access
749 * ioctls: mmap(), pwrite and pread. In practice, you are expected to use
750 * direct access via your pointer rather than use those ioctls. Another
751 * restriction is that we do not allow userptr surfaces to be pinned to the
752 * hardware and so we reject any attempt to create a framebuffer out of a
753 * userptr.
754 *
755 * If you think this is a good interface to use to pass GPU memory between
756 * drivers, please use dma-buf instead. In fact, wherever possible use
757 * dma-buf instead.
758 */
759int
760i915_gem_userptr_ioctl(struct drm_device *dev,
761 void *data,
762 struct drm_file *file)
763{
764 struct drm_i915_private *dev_priv = to_i915(dev);
765 struct drm_i915_gem_userptr *args = data;
766 struct drm_i915_gem_object *obj;
767 int ret;
768 u32 handle;
769
770 if (!HAS_LLC(dev_priv) && !HAS_SNOOP(dev_priv)) {
771 /* We cannot support coherent userptr objects on hw without
772 * LLC and broken snooping.
773 */
774 return -ENODEV;
775 }
776
777 if (args->flags & ~(I915_USERPTR_READ_ONLY |
778 I915_USERPTR_UNSYNCHRONIZED))
779 return -EINVAL;
780
781 if (!args->user_size)
782 return -EINVAL;
783
784 if (offset_in_page(args->user_ptr | args->user_size))
785 return -EINVAL;
786
787 if (!access_ok(args->flags & I915_USERPTR_READ_ONLY ? VERIFY_READ : VERIFY_WRITE,
788 (char __user *)(unsigned long)args->user_ptr, args->user_size))
789 return -EFAULT;
790
791 if (args->flags & I915_USERPTR_READ_ONLY) {
792 /* On almost all of the current hw, we cannot tell the GPU that a
793 * page is readonly, so this is just a placeholder in the uAPI.
794 */
795 return -ENODEV;
796 }
797
798 obj = i915_gem_object_alloc(dev_priv);
799 if (obj == NULL)
800 return -ENOMEM;
801
802 drm_gem_private_object_init(dev, &obj->base, args->user_size);
803 i915_gem_object_init(obj, &i915_gem_userptr_ops);
804 obj->read_domains = I915_GEM_DOMAIN_CPU;
805 obj->write_domain = I915_GEM_DOMAIN_CPU;
806 i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
807
808 obj->userptr.ptr = args->user_ptr;
809 obj->userptr.read_only = !!(args->flags & I915_USERPTR_READ_ONLY);
810
811 /* And keep a pointer to the current->mm for resolving the user pages
812 * at binding. This means that we need to hook into the mmu_notifier
813 * in order to detect if the mmu is destroyed.
814 */
815 ret = i915_gem_userptr_init__mm_struct(obj);
816 if (ret == 0)
817 ret = i915_gem_userptr_init__mmu_notifier(obj, args->flags);
818 if (ret == 0)
819 ret = drm_gem_handle_create(file, &obj->base, &handle);
820
821 /* drop reference from allocate - handle holds it now */
822 i915_gem_object_put(obj);
823 if (ret)
824 return ret;
825
826 args->handle = handle;
827 return 0;
828}
829
830int i915_gem_init_userptr(struct drm_i915_private *dev_priv)
831{
832 mutex_init(&dev_priv->mm_lock);
833 hash_init(dev_priv->mm_structs);
834
835 dev_priv->mm.userptr_wq =
836 alloc_workqueue("i915-userptr-acquire",
837 WQ_HIGHPRI | WQ_UNBOUND,
838 0);
839 if (!dev_priv->mm.userptr_wq)
840 return -ENOMEM;
841
842 return 0;
843}
844
845void i915_gem_cleanup_userptr(struct drm_i915_private *dev_priv)
846{
847 destroy_workqueue(dev_priv->mm.userptr_wq);
848}