Loading...
1/*
2 * Copyright © 2008-2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 *
26 */
27
28#include <drm/drm_vma_manager.h>
29#include <linux/dma-fence-array.h>
30#include <linux/kthread.h>
31#include <linux/dma-resv.h>
32#include <linux/shmem_fs.h>
33#include <linux/slab.h>
34#include <linux/stop_machine.h>
35#include <linux/swap.h>
36#include <linux/pci.h>
37#include <linux/dma-buf.h>
38#include <linux/mman.h>
39
40#include "display/intel_display.h"
41#include "display/intel_frontbuffer.h"
42
43#include "gem/i915_gem_clflush.h"
44#include "gem/i915_gem_context.h"
45#include "gem/i915_gem_ioctls.h"
46#include "gem/i915_gem_mman.h"
47#include "gem/i915_gem_region.h"
48#include "gt/intel_engine_user.h"
49#include "gt/intel_gt.h"
50#include "gt/intel_gt_pm.h"
51#include "gt/intel_workarounds.h"
52
53#include "i915_drv.h"
54#include "i915_trace.h"
55#include "i915_vgpu.h"
56
57#include "intel_pm.h"
58
59static int
60insert_mappable_node(struct i915_ggtt *ggtt, struct drm_mm_node *node, u32 size)
61{
62 int err;
63
64 err = mutex_lock_interruptible(&ggtt->vm.mutex);
65 if (err)
66 return err;
67
68 memset(node, 0, sizeof(*node));
69 err = drm_mm_insert_node_in_range(&ggtt->vm.mm, node,
70 size, 0, I915_COLOR_UNEVICTABLE,
71 0, ggtt->mappable_end,
72 DRM_MM_INSERT_LOW);
73
74 mutex_unlock(&ggtt->vm.mutex);
75
76 return err;
77}
78
79static void
80remove_mappable_node(struct i915_ggtt *ggtt, struct drm_mm_node *node)
81{
82 mutex_lock(&ggtt->vm.mutex);
83 drm_mm_remove_node(node);
84 mutex_unlock(&ggtt->vm.mutex);
85}
86
87int
88i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
89 struct drm_file *file)
90{
91 struct i915_ggtt *ggtt = &to_i915(dev)->ggtt;
92 struct drm_i915_gem_get_aperture *args = data;
93 struct i915_vma *vma;
94 u64 pinned;
95
96 if (mutex_lock_interruptible(&ggtt->vm.mutex))
97 return -EINTR;
98
99 pinned = ggtt->vm.reserved;
100 list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link)
101 if (i915_vma_is_pinned(vma))
102 pinned += vma->node.size;
103
104 mutex_unlock(&ggtt->vm.mutex);
105
106 args->aper_size = ggtt->vm.total;
107 args->aper_available_size = args->aper_size - pinned;
108
109 return 0;
110}
111
112int i915_gem_object_unbind(struct drm_i915_gem_object *obj,
113 unsigned long flags)
114{
115 struct intel_runtime_pm *rpm = &to_i915(obj->base.dev)->runtime_pm;
116 LIST_HEAD(still_in_list);
117 intel_wakeref_t wakeref;
118 struct i915_vma *vma;
119 int ret;
120
121 if (list_empty(&obj->vma.list))
122 return 0;
123
124 /*
125 * As some machines use ACPI to handle runtime-resume callbacks, and
126 * ACPI is quite kmalloc happy, we cannot resume beneath the vm->mutex
127 * as they are required by the shrinker. Ergo, we wake the device up
128 * first just in case.
129 */
130 wakeref = intel_runtime_pm_get(rpm);
131
132try_again:
133 ret = 0;
134 spin_lock(&obj->vma.lock);
135 while (!ret && (vma = list_first_entry_or_null(&obj->vma.list,
136 struct i915_vma,
137 obj_link))) {
138 struct i915_address_space *vm = vma->vm;
139
140 list_move_tail(&vma->obj_link, &still_in_list);
141 if (!i915_vma_is_bound(vma, I915_VMA_BIND_MASK))
142 continue;
143
144 if (flags & I915_GEM_OBJECT_UNBIND_TEST) {
145 ret = -EBUSY;
146 break;
147 }
148
149 ret = -EAGAIN;
150 if (!i915_vm_tryopen(vm))
151 break;
152
153 /* Prevent vma being freed by i915_vma_parked as we unbind */
154 vma = __i915_vma_get(vma);
155 spin_unlock(&obj->vma.lock);
156
157 if (vma) {
158 ret = -EBUSY;
159 if (flags & I915_GEM_OBJECT_UNBIND_ACTIVE ||
160 !i915_vma_is_active(vma)) {
161 if (flags & I915_GEM_OBJECT_UNBIND_VM_TRYLOCK) {
162 if (mutex_trylock(&vma->vm->mutex)) {
163 ret = __i915_vma_unbind(vma);
164 mutex_unlock(&vma->vm->mutex);
165 } else {
166 ret = -EBUSY;
167 }
168 } else {
169 ret = i915_vma_unbind(vma);
170 }
171 }
172
173 __i915_vma_put(vma);
174 }
175
176 i915_vm_close(vm);
177 spin_lock(&obj->vma.lock);
178 }
179 list_splice_init(&still_in_list, &obj->vma.list);
180 spin_unlock(&obj->vma.lock);
181
182 if (ret == -EAGAIN && flags & I915_GEM_OBJECT_UNBIND_BARRIER) {
183 rcu_barrier(); /* flush the i915_vm_release() */
184 goto try_again;
185 }
186
187 intel_runtime_pm_put(rpm, wakeref);
188
189 return ret;
190}
191
192static int
193shmem_pread(struct page *page, int offset, int len, char __user *user_data,
194 bool needs_clflush)
195{
196 char *vaddr;
197 int ret;
198
199 vaddr = kmap(page);
200
201 if (needs_clflush)
202 drm_clflush_virt_range(vaddr + offset, len);
203
204 ret = __copy_to_user(user_data, vaddr + offset, len);
205
206 kunmap(page);
207
208 return ret ? -EFAULT : 0;
209}
210
211static int
212i915_gem_shmem_pread(struct drm_i915_gem_object *obj,
213 struct drm_i915_gem_pread *args)
214{
215 unsigned int needs_clflush;
216 unsigned int idx, offset;
217 char __user *user_data;
218 u64 remain;
219 int ret;
220
221 ret = i915_gem_object_lock_interruptible(obj, NULL);
222 if (ret)
223 return ret;
224
225 ret = i915_gem_object_pin_pages(obj);
226 if (ret)
227 goto err_unlock;
228
229 ret = i915_gem_object_prepare_read(obj, &needs_clflush);
230 if (ret)
231 goto err_unpin;
232
233 i915_gem_object_finish_access(obj);
234 i915_gem_object_unlock(obj);
235
236 remain = args->size;
237 user_data = u64_to_user_ptr(args->data_ptr);
238 offset = offset_in_page(args->offset);
239 for (idx = args->offset >> PAGE_SHIFT; remain; idx++) {
240 struct page *page = i915_gem_object_get_page(obj, idx);
241 unsigned int length = min_t(u64, remain, PAGE_SIZE - offset);
242
243 ret = shmem_pread(page, offset, length, user_data,
244 needs_clflush);
245 if (ret)
246 break;
247
248 remain -= length;
249 user_data += length;
250 offset = 0;
251 }
252
253 i915_gem_object_unpin_pages(obj);
254 return ret;
255
256err_unpin:
257 i915_gem_object_unpin_pages(obj);
258err_unlock:
259 i915_gem_object_unlock(obj);
260 return ret;
261}
262
263static inline bool
264gtt_user_read(struct io_mapping *mapping,
265 loff_t base, int offset,
266 char __user *user_data, int length)
267{
268 void __iomem *vaddr;
269 unsigned long unwritten;
270
271 /* We can use the cpu mem copy function because this is X86. */
272 vaddr = io_mapping_map_atomic_wc(mapping, base);
273 unwritten = __copy_to_user_inatomic(user_data,
274 (void __force *)vaddr + offset,
275 length);
276 io_mapping_unmap_atomic(vaddr);
277 if (unwritten) {
278 vaddr = io_mapping_map_wc(mapping, base, PAGE_SIZE);
279 unwritten = copy_to_user(user_data,
280 (void __force *)vaddr + offset,
281 length);
282 io_mapping_unmap(vaddr);
283 }
284 return unwritten;
285}
286
287static struct i915_vma *i915_gem_gtt_prepare(struct drm_i915_gem_object *obj,
288 struct drm_mm_node *node,
289 bool write)
290{
291 struct drm_i915_private *i915 = to_i915(obj->base.dev);
292 struct i915_ggtt *ggtt = &i915->ggtt;
293 struct i915_vma *vma;
294 struct i915_gem_ww_ctx ww;
295 int ret;
296
297 i915_gem_ww_ctx_init(&ww, true);
298retry:
299 vma = ERR_PTR(-ENODEV);
300 ret = i915_gem_object_lock(obj, &ww);
301 if (ret)
302 goto err_ww;
303
304 ret = i915_gem_object_set_to_gtt_domain(obj, write);
305 if (ret)
306 goto err_ww;
307
308 if (!i915_gem_object_is_tiled(obj))
309 vma = i915_gem_object_ggtt_pin_ww(obj, &ww, NULL, 0, 0,
310 PIN_MAPPABLE |
311 PIN_NONBLOCK /* NOWARN */ |
312 PIN_NOEVICT);
313 if (vma == ERR_PTR(-EDEADLK)) {
314 ret = -EDEADLK;
315 goto err_ww;
316 } else if (!IS_ERR(vma)) {
317 node->start = i915_ggtt_offset(vma);
318 node->flags = 0;
319 } else {
320 ret = insert_mappable_node(ggtt, node, PAGE_SIZE);
321 if (ret)
322 goto err_ww;
323 GEM_BUG_ON(!drm_mm_node_allocated(node));
324 vma = NULL;
325 }
326
327 ret = i915_gem_object_pin_pages(obj);
328 if (ret) {
329 if (drm_mm_node_allocated(node)) {
330 ggtt->vm.clear_range(&ggtt->vm, node->start, node->size);
331 remove_mappable_node(ggtt, node);
332 } else {
333 i915_vma_unpin(vma);
334 }
335 }
336
337err_ww:
338 if (ret == -EDEADLK) {
339 ret = i915_gem_ww_ctx_backoff(&ww);
340 if (!ret)
341 goto retry;
342 }
343 i915_gem_ww_ctx_fini(&ww);
344
345 return ret ? ERR_PTR(ret) : vma;
346}
347
348static void i915_gem_gtt_cleanup(struct drm_i915_gem_object *obj,
349 struct drm_mm_node *node,
350 struct i915_vma *vma)
351{
352 struct drm_i915_private *i915 = to_i915(obj->base.dev);
353 struct i915_ggtt *ggtt = &i915->ggtt;
354
355 i915_gem_object_unpin_pages(obj);
356 if (drm_mm_node_allocated(node)) {
357 ggtt->vm.clear_range(&ggtt->vm, node->start, node->size);
358 remove_mappable_node(ggtt, node);
359 } else {
360 i915_vma_unpin(vma);
361 }
362}
363
364static int
365i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
366 const struct drm_i915_gem_pread *args)
367{
368 struct drm_i915_private *i915 = to_i915(obj->base.dev);
369 struct i915_ggtt *ggtt = &i915->ggtt;
370 intel_wakeref_t wakeref;
371 struct drm_mm_node node;
372 void __user *user_data;
373 struct i915_vma *vma;
374 u64 remain, offset;
375 int ret = 0;
376
377 wakeref = intel_runtime_pm_get(&i915->runtime_pm);
378
379 vma = i915_gem_gtt_prepare(obj, &node, false);
380 if (IS_ERR(vma)) {
381 ret = PTR_ERR(vma);
382 goto out_rpm;
383 }
384
385 user_data = u64_to_user_ptr(args->data_ptr);
386 remain = args->size;
387 offset = args->offset;
388
389 while (remain > 0) {
390 /* Operation in this page
391 *
392 * page_base = page offset within aperture
393 * page_offset = offset within page
394 * page_length = bytes to copy for this page
395 */
396 u32 page_base = node.start;
397 unsigned page_offset = offset_in_page(offset);
398 unsigned page_length = PAGE_SIZE - page_offset;
399 page_length = remain < page_length ? remain : page_length;
400 if (drm_mm_node_allocated(&node)) {
401 ggtt->vm.insert_page(&ggtt->vm,
402 i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
403 node.start, I915_CACHE_NONE, 0);
404 } else {
405 page_base += offset & PAGE_MASK;
406 }
407
408 if (gtt_user_read(&ggtt->iomap, page_base, page_offset,
409 user_data, page_length)) {
410 ret = -EFAULT;
411 break;
412 }
413
414 remain -= page_length;
415 user_data += page_length;
416 offset += page_length;
417 }
418
419 i915_gem_gtt_cleanup(obj, &node, vma);
420out_rpm:
421 intel_runtime_pm_put(&i915->runtime_pm, wakeref);
422 return ret;
423}
424
425/**
426 * Reads data from the object referenced by handle.
427 * @dev: drm device pointer
428 * @data: ioctl data blob
429 * @file: drm file pointer
430 *
431 * On error, the contents of *data are undefined.
432 */
433int
434i915_gem_pread_ioctl(struct drm_device *dev, void *data,
435 struct drm_file *file)
436{
437 struct drm_i915_private *i915 = to_i915(dev);
438 struct drm_i915_gem_pread *args = data;
439 struct drm_i915_gem_object *obj;
440 int ret;
441
442 /* PREAD is disallowed for all platforms after TGL-LP. This also
443 * covers all platforms with local memory.
444 */
445 if (GRAPHICS_VER(i915) >= 12 && !IS_TIGERLAKE(i915))
446 return -EOPNOTSUPP;
447
448 if (args->size == 0)
449 return 0;
450
451 if (!access_ok(u64_to_user_ptr(args->data_ptr),
452 args->size))
453 return -EFAULT;
454
455 obj = i915_gem_object_lookup(file, args->handle);
456 if (!obj)
457 return -ENOENT;
458
459 /* Bounds check source. */
460 if (range_overflows_t(u64, args->offset, args->size, obj->base.size)) {
461 ret = -EINVAL;
462 goto out;
463 }
464
465 trace_i915_gem_object_pread(obj, args->offset, args->size);
466 ret = -ENODEV;
467 if (obj->ops->pread)
468 ret = obj->ops->pread(obj, args);
469 if (ret != -ENODEV)
470 goto out;
471
472 ret = -ENODEV;
473 if (obj->ops->pread)
474 ret = obj->ops->pread(obj, args);
475 if (ret != -ENODEV)
476 goto out;
477
478 ret = i915_gem_object_wait(obj,
479 I915_WAIT_INTERRUPTIBLE,
480 MAX_SCHEDULE_TIMEOUT);
481 if (ret)
482 goto out;
483
484 ret = i915_gem_shmem_pread(obj, args);
485 if (ret == -EFAULT || ret == -ENODEV)
486 ret = i915_gem_gtt_pread(obj, args);
487
488out:
489 i915_gem_object_put(obj);
490 return ret;
491}
492
493/* This is the fast write path which cannot handle
494 * page faults in the source data
495 */
496
497static inline bool
498ggtt_write(struct io_mapping *mapping,
499 loff_t base, int offset,
500 char __user *user_data, int length)
501{
502 void __iomem *vaddr;
503 unsigned long unwritten;
504
505 /* We can use the cpu mem copy function because this is X86. */
506 vaddr = io_mapping_map_atomic_wc(mapping, base);
507 unwritten = __copy_from_user_inatomic_nocache((void __force *)vaddr + offset,
508 user_data, length);
509 io_mapping_unmap_atomic(vaddr);
510 if (unwritten) {
511 vaddr = io_mapping_map_wc(mapping, base, PAGE_SIZE);
512 unwritten = copy_from_user((void __force *)vaddr + offset,
513 user_data, length);
514 io_mapping_unmap(vaddr);
515 }
516
517 return unwritten;
518}
519
520/**
521 * This is the fast pwrite path, where we copy the data directly from the
522 * user into the GTT, uncached.
523 * @obj: i915 GEM object
524 * @args: pwrite arguments structure
525 */
526static int
527i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
528 const struct drm_i915_gem_pwrite *args)
529{
530 struct drm_i915_private *i915 = to_i915(obj->base.dev);
531 struct i915_ggtt *ggtt = &i915->ggtt;
532 struct intel_runtime_pm *rpm = &i915->runtime_pm;
533 intel_wakeref_t wakeref;
534 struct drm_mm_node node;
535 struct i915_vma *vma;
536 u64 remain, offset;
537 void __user *user_data;
538 int ret = 0;
539
540 if (i915_gem_object_has_struct_page(obj)) {
541 /*
542 * Avoid waking the device up if we can fallback, as
543 * waking/resuming is very slow (worst-case 10-100 ms
544 * depending on PCI sleeps and our own resume time).
545 * This easily dwarfs any performance advantage from
546 * using the cache bypass of indirect GGTT access.
547 */
548 wakeref = intel_runtime_pm_get_if_in_use(rpm);
549 if (!wakeref)
550 return -EFAULT;
551 } else {
552 /* No backing pages, no fallback, we must force GGTT access */
553 wakeref = intel_runtime_pm_get(rpm);
554 }
555
556 vma = i915_gem_gtt_prepare(obj, &node, true);
557 if (IS_ERR(vma)) {
558 ret = PTR_ERR(vma);
559 goto out_rpm;
560 }
561
562 i915_gem_object_invalidate_frontbuffer(obj, ORIGIN_CPU);
563
564 user_data = u64_to_user_ptr(args->data_ptr);
565 offset = args->offset;
566 remain = args->size;
567 while (remain) {
568 /* Operation in this page
569 *
570 * page_base = page offset within aperture
571 * page_offset = offset within page
572 * page_length = bytes to copy for this page
573 */
574 u32 page_base = node.start;
575 unsigned int page_offset = offset_in_page(offset);
576 unsigned int page_length = PAGE_SIZE - page_offset;
577 page_length = remain < page_length ? remain : page_length;
578 if (drm_mm_node_allocated(&node)) {
579 /* flush the write before we modify the GGTT */
580 intel_gt_flush_ggtt_writes(ggtt->vm.gt);
581 ggtt->vm.insert_page(&ggtt->vm,
582 i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
583 node.start, I915_CACHE_NONE, 0);
584 wmb(); /* flush modifications to the GGTT (insert_page) */
585 } else {
586 page_base += offset & PAGE_MASK;
587 }
588 /* If we get a fault while copying data, then (presumably) our
589 * source page isn't available. Return the error and we'll
590 * retry in the slow path.
591 * If the object is non-shmem backed, we retry again with the
592 * path that handles page fault.
593 */
594 if (ggtt_write(&ggtt->iomap, page_base, page_offset,
595 user_data, page_length)) {
596 ret = -EFAULT;
597 break;
598 }
599
600 remain -= page_length;
601 user_data += page_length;
602 offset += page_length;
603 }
604
605 intel_gt_flush_ggtt_writes(ggtt->vm.gt);
606 i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
607
608 i915_gem_gtt_cleanup(obj, &node, vma);
609out_rpm:
610 intel_runtime_pm_put(rpm, wakeref);
611 return ret;
612}
613
614/* Per-page copy function for the shmem pwrite fastpath.
615 * Flushes invalid cachelines before writing to the target if
616 * needs_clflush_before is set and flushes out any written cachelines after
617 * writing if needs_clflush is set.
618 */
619static int
620shmem_pwrite(struct page *page, int offset, int len, char __user *user_data,
621 bool needs_clflush_before,
622 bool needs_clflush_after)
623{
624 char *vaddr;
625 int ret;
626
627 vaddr = kmap(page);
628
629 if (needs_clflush_before)
630 drm_clflush_virt_range(vaddr + offset, len);
631
632 ret = __copy_from_user(vaddr + offset, user_data, len);
633 if (!ret && needs_clflush_after)
634 drm_clflush_virt_range(vaddr + offset, len);
635
636 kunmap(page);
637
638 return ret ? -EFAULT : 0;
639}
640
641static int
642i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj,
643 const struct drm_i915_gem_pwrite *args)
644{
645 unsigned int partial_cacheline_write;
646 unsigned int needs_clflush;
647 unsigned int offset, idx;
648 void __user *user_data;
649 u64 remain;
650 int ret;
651
652 ret = i915_gem_object_lock_interruptible(obj, NULL);
653 if (ret)
654 return ret;
655
656 ret = i915_gem_object_pin_pages(obj);
657 if (ret)
658 goto err_unlock;
659
660 ret = i915_gem_object_prepare_write(obj, &needs_clflush);
661 if (ret)
662 goto err_unpin;
663
664 i915_gem_object_finish_access(obj);
665 i915_gem_object_unlock(obj);
666
667 /* If we don't overwrite a cacheline completely we need to be
668 * careful to have up-to-date data by first clflushing. Don't
669 * overcomplicate things and flush the entire patch.
670 */
671 partial_cacheline_write = 0;
672 if (needs_clflush & CLFLUSH_BEFORE)
673 partial_cacheline_write = boot_cpu_data.x86_clflush_size - 1;
674
675 user_data = u64_to_user_ptr(args->data_ptr);
676 remain = args->size;
677 offset = offset_in_page(args->offset);
678 for (idx = args->offset >> PAGE_SHIFT; remain; idx++) {
679 struct page *page = i915_gem_object_get_page(obj, idx);
680 unsigned int length = min_t(u64, remain, PAGE_SIZE - offset);
681
682 ret = shmem_pwrite(page, offset, length, user_data,
683 (offset | length) & partial_cacheline_write,
684 needs_clflush & CLFLUSH_AFTER);
685 if (ret)
686 break;
687
688 remain -= length;
689 user_data += length;
690 offset = 0;
691 }
692
693 i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
694
695 i915_gem_object_unpin_pages(obj);
696 return ret;
697
698err_unpin:
699 i915_gem_object_unpin_pages(obj);
700err_unlock:
701 i915_gem_object_unlock(obj);
702 return ret;
703}
704
705/**
706 * Writes data to the object referenced by handle.
707 * @dev: drm device
708 * @data: ioctl data blob
709 * @file: drm file
710 *
711 * On error, the contents of the buffer that were to be modified are undefined.
712 */
713int
714i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
715 struct drm_file *file)
716{
717 struct drm_i915_private *i915 = to_i915(dev);
718 struct drm_i915_gem_pwrite *args = data;
719 struct drm_i915_gem_object *obj;
720 int ret;
721
722 /* PWRITE is disallowed for all platforms after TGL-LP. This also
723 * covers all platforms with local memory.
724 */
725 if (GRAPHICS_VER(i915) >= 12 && !IS_TIGERLAKE(i915))
726 return -EOPNOTSUPP;
727
728 if (args->size == 0)
729 return 0;
730
731 if (!access_ok(u64_to_user_ptr(args->data_ptr), args->size))
732 return -EFAULT;
733
734 obj = i915_gem_object_lookup(file, args->handle);
735 if (!obj)
736 return -ENOENT;
737
738 /* Bounds check destination. */
739 if (range_overflows_t(u64, args->offset, args->size, obj->base.size)) {
740 ret = -EINVAL;
741 goto err;
742 }
743
744 /* Writes not allowed into this read-only object */
745 if (i915_gem_object_is_readonly(obj)) {
746 ret = -EINVAL;
747 goto err;
748 }
749
750 trace_i915_gem_object_pwrite(obj, args->offset, args->size);
751
752 ret = -ENODEV;
753 if (obj->ops->pwrite)
754 ret = obj->ops->pwrite(obj, args);
755 if (ret != -ENODEV)
756 goto err;
757
758 ret = i915_gem_object_wait(obj,
759 I915_WAIT_INTERRUPTIBLE |
760 I915_WAIT_ALL,
761 MAX_SCHEDULE_TIMEOUT);
762 if (ret)
763 goto err;
764
765 ret = -EFAULT;
766 /* We can only do the GTT pwrite on untiled buffers, as otherwise
767 * it would end up going through the fenced access, and we'll get
768 * different detiling behavior between reading and writing.
769 * pread/pwrite currently are reading and writing from the CPU
770 * perspective, requiring manual detiling by the client.
771 */
772 if (!i915_gem_object_has_struct_page(obj) ||
773 cpu_write_needs_clflush(obj))
774 /* Note that the gtt paths might fail with non-page-backed user
775 * pointers (e.g. gtt mappings when moving data between
776 * textures). Fallback to the shmem path in that case.
777 */
778 ret = i915_gem_gtt_pwrite_fast(obj, args);
779
780 if (ret == -EFAULT || ret == -ENOSPC) {
781 if (i915_gem_object_has_struct_page(obj))
782 ret = i915_gem_shmem_pwrite(obj, args);
783 }
784
785err:
786 i915_gem_object_put(obj);
787 return ret;
788}
789
790/**
791 * Called when user space has done writes to this buffer
792 * @dev: drm device
793 * @data: ioctl data blob
794 * @file: drm file
795 */
796int
797i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
798 struct drm_file *file)
799{
800 struct drm_i915_gem_sw_finish *args = data;
801 struct drm_i915_gem_object *obj;
802
803 obj = i915_gem_object_lookup(file, args->handle);
804 if (!obj)
805 return -ENOENT;
806
807 /*
808 * Proxy objects are barred from CPU access, so there is no
809 * need to ban sw_finish as it is a nop.
810 */
811
812 /* Pinned buffers may be scanout, so flush the cache */
813 i915_gem_object_flush_if_display(obj);
814 i915_gem_object_put(obj);
815
816 return 0;
817}
818
819void i915_gem_runtime_suspend(struct drm_i915_private *i915)
820{
821 struct drm_i915_gem_object *obj, *on;
822 int i;
823
824 /*
825 * Only called during RPM suspend. All users of the userfault_list
826 * must be holding an RPM wakeref to ensure that this can not
827 * run concurrently with themselves (and use the struct_mutex for
828 * protection between themselves).
829 */
830
831 list_for_each_entry_safe(obj, on,
832 &i915->ggtt.userfault_list, userfault_link)
833 __i915_gem_object_release_mmap_gtt(obj);
834
835 /*
836 * The fence will be lost when the device powers down. If any were
837 * in use by hardware (i.e. they are pinned), we should not be powering
838 * down! All other fences will be reacquired by the user upon waking.
839 */
840 for (i = 0; i < i915->ggtt.num_fences; i++) {
841 struct i915_fence_reg *reg = &i915->ggtt.fence_regs[i];
842
843 /*
844 * Ideally we want to assert that the fence register is not
845 * live at this point (i.e. that no piece of code will be
846 * trying to write through fence + GTT, as that both violates
847 * our tracking of activity and associated locking/barriers,
848 * but also is illegal given that the hw is powered down).
849 *
850 * Previously we used reg->pin_count as a "liveness" indicator.
851 * That is not sufficient, and we need a more fine-grained
852 * tool if we want to have a sanity check here.
853 */
854
855 if (!reg->vma)
856 continue;
857
858 GEM_BUG_ON(i915_vma_has_userfault(reg->vma));
859 reg->dirty = true;
860 }
861}
862
863static void discard_ggtt_vma(struct i915_vma *vma)
864{
865 struct drm_i915_gem_object *obj = vma->obj;
866
867 spin_lock(&obj->vma.lock);
868 if (!RB_EMPTY_NODE(&vma->obj_node)) {
869 rb_erase(&vma->obj_node, &obj->vma.tree);
870 RB_CLEAR_NODE(&vma->obj_node);
871 }
872 spin_unlock(&obj->vma.lock);
873}
874
875struct i915_vma *
876i915_gem_object_ggtt_pin_ww(struct drm_i915_gem_object *obj,
877 struct i915_gem_ww_ctx *ww,
878 const struct i915_ggtt_view *view,
879 u64 size, u64 alignment, u64 flags)
880{
881 struct drm_i915_private *i915 = to_i915(obj->base.dev);
882 struct i915_ggtt *ggtt = &i915->ggtt;
883 struct i915_vma *vma;
884 int ret;
885
886 if (flags & PIN_MAPPABLE &&
887 (!view || view->type == I915_GGTT_VIEW_NORMAL)) {
888 /*
889 * If the required space is larger than the available
890 * aperture, we will not able to find a slot for the
891 * object and unbinding the object now will be in
892 * vain. Worse, doing so may cause us to ping-pong
893 * the object in and out of the Global GTT and
894 * waste a lot of cycles under the mutex.
895 */
896 if (obj->base.size > ggtt->mappable_end)
897 return ERR_PTR(-E2BIG);
898
899 /*
900 * If NONBLOCK is set the caller is optimistically
901 * trying to cache the full object within the mappable
902 * aperture, and *must* have a fallback in place for
903 * situations where we cannot bind the object. We
904 * can be a little more lax here and use the fallback
905 * more often to avoid costly migrations of ourselves
906 * and other objects within the aperture.
907 *
908 * Half-the-aperture is used as a simple heuristic.
909 * More interesting would to do search for a free
910 * block prior to making the commitment to unbind.
911 * That caters for the self-harm case, and with a
912 * little more heuristics (e.g. NOFAULT, NOEVICT)
913 * we could try to minimise harm to others.
914 */
915 if (flags & PIN_NONBLOCK &&
916 obj->base.size > ggtt->mappable_end / 2)
917 return ERR_PTR(-ENOSPC);
918 }
919
920new_vma:
921 vma = i915_vma_instance(obj, &ggtt->vm, view);
922 if (IS_ERR(vma))
923 return vma;
924
925 if (i915_vma_misplaced(vma, size, alignment, flags)) {
926 if (flags & PIN_NONBLOCK) {
927 if (i915_vma_is_pinned(vma) || i915_vma_is_active(vma))
928 return ERR_PTR(-ENOSPC);
929
930 if (flags & PIN_MAPPABLE &&
931 vma->fence_size > ggtt->mappable_end / 2)
932 return ERR_PTR(-ENOSPC);
933 }
934
935 if (i915_vma_is_pinned(vma) || i915_vma_is_active(vma)) {
936 discard_ggtt_vma(vma);
937 goto new_vma;
938 }
939
940 ret = i915_vma_unbind(vma);
941 if (ret)
942 return ERR_PTR(ret);
943 }
944
945 if (ww)
946 ret = i915_vma_pin_ww(vma, ww, size, alignment, flags | PIN_GLOBAL);
947 else
948 ret = i915_vma_pin(vma, size, alignment, flags | PIN_GLOBAL);
949
950 if (ret)
951 return ERR_PTR(ret);
952
953 if (vma->fence && !i915_gem_object_is_tiled(obj)) {
954 mutex_lock(&ggtt->vm.mutex);
955 i915_vma_revoke_fence(vma);
956 mutex_unlock(&ggtt->vm.mutex);
957 }
958
959 ret = i915_vma_wait_for_bind(vma);
960 if (ret) {
961 i915_vma_unpin(vma);
962 return ERR_PTR(ret);
963 }
964
965 return vma;
966}
967
968int
969i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
970 struct drm_file *file_priv)
971{
972 struct drm_i915_private *i915 = to_i915(dev);
973 struct drm_i915_gem_madvise *args = data;
974 struct drm_i915_gem_object *obj;
975 int err;
976
977 switch (args->madv) {
978 case I915_MADV_DONTNEED:
979 case I915_MADV_WILLNEED:
980 break;
981 default:
982 return -EINVAL;
983 }
984
985 obj = i915_gem_object_lookup(file_priv, args->handle);
986 if (!obj)
987 return -ENOENT;
988
989 err = i915_gem_object_lock_interruptible(obj, NULL);
990 if (err)
991 goto out;
992
993 if (i915_gem_object_has_pages(obj) &&
994 i915_gem_object_is_tiled(obj) &&
995 i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
996 if (obj->mm.madv == I915_MADV_WILLNEED) {
997 GEM_BUG_ON(!i915_gem_object_has_tiling_quirk(obj));
998 i915_gem_object_clear_tiling_quirk(obj);
999 i915_gem_object_make_shrinkable(obj);
1000 }
1001 if (args->madv == I915_MADV_WILLNEED) {
1002 GEM_BUG_ON(i915_gem_object_has_tiling_quirk(obj));
1003 i915_gem_object_make_unshrinkable(obj);
1004 i915_gem_object_set_tiling_quirk(obj);
1005 }
1006 }
1007
1008 if (obj->mm.madv != __I915_MADV_PURGED)
1009 obj->mm.madv = args->madv;
1010
1011 if (i915_gem_object_has_pages(obj)) {
1012 unsigned long flags;
1013
1014 spin_lock_irqsave(&i915->mm.obj_lock, flags);
1015 if (!list_empty(&obj->mm.link)) {
1016 struct list_head *list;
1017
1018 if (obj->mm.madv != I915_MADV_WILLNEED)
1019 list = &i915->mm.purge_list;
1020 else
1021 list = &i915->mm.shrink_list;
1022 list_move_tail(&obj->mm.link, list);
1023
1024 }
1025 spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
1026 }
1027
1028 /* if the object is no longer attached, discard its backing storage */
1029 if (obj->mm.madv == I915_MADV_DONTNEED &&
1030 !i915_gem_object_has_pages(obj))
1031 i915_gem_object_truncate(obj);
1032
1033 args->retained = obj->mm.madv != __I915_MADV_PURGED;
1034
1035 i915_gem_object_unlock(obj);
1036out:
1037 i915_gem_object_put(obj);
1038 return err;
1039}
1040
1041int i915_gem_init(struct drm_i915_private *dev_priv)
1042{
1043 int ret;
1044
1045 /* We need to fallback to 4K pages if host doesn't support huge gtt. */
1046 if (intel_vgpu_active(dev_priv) && !intel_vgpu_has_huge_gtt(dev_priv))
1047 mkwrite_device_info(dev_priv)->page_sizes =
1048 I915_GTT_PAGE_SIZE_4K;
1049
1050 ret = i915_gem_init_userptr(dev_priv);
1051 if (ret)
1052 return ret;
1053
1054 intel_uc_fetch_firmwares(&dev_priv->gt.uc);
1055 intel_wopcm_init(&dev_priv->wopcm);
1056
1057 ret = i915_init_ggtt(dev_priv);
1058 if (ret) {
1059 GEM_BUG_ON(ret == -EIO);
1060 goto err_unlock;
1061 }
1062
1063 /*
1064 * Despite its name intel_init_clock_gating applies both display
1065 * clock gating workarounds; GT mmio workarounds and the occasional
1066 * GT power context workaround. Worse, sometimes it includes a context
1067 * register workaround which we need to apply before we record the
1068 * default HW state for all contexts.
1069 *
1070 * FIXME: break up the workarounds and apply them at the right time!
1071 */
1072 intel_init_clock_gating(dev_priv);
1073
1074 ret = intel_gt_init(&dev_priv->gt);
1075 if (ret)
1076 goto err_unlock;
1077
1078 return 0;
1079
1080 /*
1081 * Unwinding is complicated by that we want to handle -EIO to mean
1082 * disable GPU submission but keep KMS alive. We want to mark the
1083 * HW as irrevisibly wedged, but keep enough state around that the
1084 * driver doesn't explode during runtime.
1085 */
1086err_unlock:
1087 i915_gem_drain_workqueue(dev_priv);
1088
1089 if (ret != -EIO)
1090 intel_uc_cleanup_firmwares(&dev_priv->gt.uc);
1091
1092 if (ret == -EIO) {
1093 /*
1094 * Allow engines or uC initialisation to fail by marking the GPU
1095 * as wedged. But we only want to do this when the GPU is angry,
1096 * for all other failure, such as an allocation failure, bail.
1097 */
1098 if (!intel_gt_is_wedged(&dev_priv->gt)) {
1099 i915_probe_error(dev_priv,
1100 "Failed to initialize GPU, declaring it wedged!\n");
1101 intel_gt_set_wedged(&dev_priv->gt);
1102 }
1103
1104 /* Minimal basic recovery for KMS */
1105 ret = i915_ggtt_enable_hw(dev_priv);
1106 i915_ggtt_resume(&dev_priv->ggtt);
1107 intel_init_clock_gating(dev_priv);
1108 }
1109
1110 i915_gem_drain_freed_objects(dev_priv);
1111
1112 return ret;
1113}
1114
1115void i915_gem_driver_register(struct drm_i915_private *i915)
1116{
1117 i915_gem_driver_register__shrinker(i915);
1118
1119 intel_engines_driver_register(i915);
1120}
1121
1122void i915_gem_driver_unregister(struct drm_i915_private *i915)
1123{
1124 i915_gem_driver_unregister__shrinker(i915);
1125}
1126
1127void i915_gem_driver_remove(struct drm_i915_private *dev_priv)
1128{
1129 intel_wakeref_auto_fini(&dev_priv->ggtt.userfault_wakeref);
1130
1131 i915_gem_suspend_late(dev_priv);
1132 intel_gt_driver_remove(&dev_priv->gt);
1133 dev_priv->uabi_engines = RB_ROOT;
1134
1135 /* Flush any outstanding unpin_work. */
1136 i915_gem_drain_workqueue(dev_priv);
1137
1138 i915_gem_drain_freed_objects(dev_priv);
1139}
1140
1141void i915_gem_driver_release(struct drm_i915_private *dev_priv)
1142{
1143 intel_gt_driver_release(&dev_priv->gt);
1144
1145 intel_wa_list_free(&dev_priv->gt_wa_list);
1146
1147 intel_uc_cleanup_firmwares(&dev_priv->gt.uc);
1148
1149 i915_gem_drain_freed_objects(dev_priv);
1150
1151 drm_WARN_ON(&dev_priv->drm, !list_empty(&dev_priv->gem.contexts.list));
1152}
1153
1154static void i915_gem_init__mm(struct drm_i915_private *i915)
1155{
1156 spin_lock_init(&i915->mm.obj_lock);
1157
1158 init_llist_head(&i915->mm.free_list);
1159
1160 INIT_LIST_HEAD(&i915->mm.purge_list);
1161 INIT_LIST_HEAD(&i915->mm.shrink_list);
1162
1163 i915_gem_init__objects(i915);
1164}
1165
1166void i915_gem_init_early(struct drm_i915_private *dev_priv)
1167{
1168 i915_gem_init__mm(dev_priv);
1169 i915_gem_init__contexts(dev_priv);
1170
1171 spin_lock_init(&dev_priv->fb_tracking.lock);
1172}
1173
1174void i915_gem_cleanup_early(struct drm_i915_private *dev_priv)
1175{
1176 i915_gem_drain_freed_objects(dev_priv);
1177 GEM_BUG_ON(!llist_empty(&dev_priv->mm.free_list));
1178 GEM_BUG_ON(atomic_read(&dev_priv->mm.free_count));
1179 drm_WARN_ON(&dev_priv->drm, dev_priv->mm.shrink_count);
1180}
1181
1182int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file)
1183{
1184 struct drm_i915_file_private *file_priv;
1185 int ret;
1186
1187 DRM_DEBUG("\n");
1188
1189 file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
1190 if (!file_priv)
1191 return -ENOMEM;
1192
1193 file->driver_priv = file_priv;
1194 file_priv->dev_priv = i915;
1195 file_priv->file = file;
1196
1197 file_priv->bsd_engine = -1;
1198 file_priv->hang_timestamp = jiffies;
1199
1200 ret = i915_gem_context_open(i915, file);
1201 if (ret)
1202 kfree(file_priv);
1203
1204 return ret;
1205}
1206
1207void i915_gem_ww_ctx_init(struct i915_gem_ww_ctx *ww, bool intr)
1208{
1209 ww_acquire_init(&ww->ctx, &reservation_ww_class);
1210 INIT_LIST_HEAD(&ww->obj_list);
1211 ww->intr = intr;
1212 ww->contended = NULL;
1213}
1214
1215static void i915_gem_ww_ctx_unlock_all(struct i915_gem_ww_ctx *ww)
1216{
1217 struct drm_i915_gem_object *obj;
1218
1219 while ((obj = list_first_entry_or_null(&ww->obj_list, struct drm_i915_gem_object, obj_link))) {
1220 list_del(&obj->obj_link);
1221 i915_gem_object_unlock(obj);
1222 }
1223}
1224
1225void i915_gem_ww_unlock_single(struct drm_i915_gem_object *obj)
1226{
1227 list_del(&obj->obj_link);
1228 i915_gem_object_unlock(obj);
1229}
1230
1231void i915_gem_ww_ctx_fini(struct i915_gem_ww_ctx *ww)
1232{
1233 i915_gem_ww_ctx_unlock_all(ww);
1234 WARN_ON(ww->contended);
1235 ww_acquire_fini(&ww->ctx);
1236}
1237
1238int __must_check i915_gem_ww_ctx_backoff(struct i915_gem_ww_ctx *ww)
1239{
1240 int ret = 0;
1241
1242 if (WARN_ON(!ww->contended))
1243 return -EINVAL;
1244
1245 i915_gem_ww_ctx_unlock_all(ww);
1246 if (ww->intr)
1247 ret = dma_resv_lock_slow_interruptible(ww->contended->base.resv, &ww->ctx);
1248 else
1249 dma_resv_lock_slow(ww->contended->base.resv, &ww->ctx);
1250
1251 if (!ret)
1252 list_add_tail(&ww->contended->obj_link, &ww->obj_list);
1253
1254 ww->contended = NULL;
1255
1256 return ret;
1257}
1258
1259#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1260#include "selftests/mock_gem_device.c"
1261#include "selftests/i915_gem.c"
1262#endif
1/*
2 * Copyright © 2008-2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 *
26 */
27
28#include <linux/dma-fence-array.h>
29#include <linux/kthread.h>
30#include <linux/dma-resv.h>
31#include <linux/shmem_fs.h>
32#include <linux/slab.h>
33#include <linux/stop_machine.h>
34#include <linux/swap.h>
35#include <linux/pci.h>
36#include <linux/dma-buf.h>
37#include <linux/mman.h>
38
39#include <drm/drm_cache.h>
40#include <drm/drm_vma_manager.h>
41
42#include "gem/i915_gem_clflush.h"
43#include "gem/i915_gem_context.h"
44#include "gem/i915_gem_ioctls.h"
45#include "gem/i915_gem_mman.h"
46#include "gem/i915_gem_object_frontbuffer.h"
47#include "gem/i915_gem_pm.h"
48#include "gem/i915_gem_region.h"
49#include "gt/intel_engine_user.h"
50#include "gt/intel_gt.h"
51#include "gt/intel_gt_pm.h"
52#include "gt/intel_workarounds.h"
53
54#include "i915_drv.h"
55#include "i915_file_private.h"
56#include "i915_trace.h"
57#include "i915_vgpu.h"
58#include "intel_clock_gating.h"
59
60static int
61insert_mappable_node(struct i915_ggtt *ggtt, struct drm_mm_node *node, u32 size)
62{
63 int err;
64
65 err = mutex_lock_interruptible(&ggtt->vm.mutex);
66 if (err)
67 return err;
68
69 memset(node, 0, sizeof(*node));
70 err = drm_mm_insert_node_in_range(&ggtt->vm.mm, node,
71 size, 0, I915_COLOR_UNEVICTABLE,
72 0, ggtt->mappable_end,
73 DRM_MM_INSERT_LOW);
74
75 mutex_unlock(&ggtt->vm.mutex);
76
77 return err;
78}
79
80static void
81remove_mappable_node(struct i915_ggtt *ggtt, struct drm_mm_node *node)
82{
83 mutex_lock(&ggtt->vm.mutex);
84 drm_mm_remove_node(node);
85 mutex_unlock(&ggtt->vm.mutex);
86}
87
88int
89i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
90 struct drm_file *file)
91{
92 struct drm_i915_private *i915 = to_i915(dev);
93 struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
94 struct drm_i915_gem_get_aperture *args = data;
95 struct i915_vma *vma;
96 u64 pinned;
97
98 if (mutex_lock_interruptible(&ggtt->vm.mutex))
99 return -EINTR;
100
101 pinned = ggtt->vm.reserved;
102 list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link)
103 if (i915_vma_is_pinned(vma))
104 pinned += vma->node.size;
105
106 mutex_unlock(&ggtt->vm.mutex);
107
108 args->aper_size = ggtt->vm.total;
109 args->aper_available_size = args->aper_size - pinned;
110
111 return 0;
112}
113
114int i915_gem_object_unbind(struct drm_i915_gem_object *obj,
115 unsigned long flags)
116{
117 struct intel_runtime_pm *rpm = &to_i915(obj->base.dev)->runtime_pm;
118 bool vm_trylock = !!(flags & I915_GEM_OBJECT_UNBIND_VM_TRYLOCK);
119 LIST_HEAD(still_in_list);
120 intel_wakeref_t wakeref;
121 struct i915_vma *vma;
122 int ret;
123
124 assert_object_held(obj);
125
126 if (list_empty(&obj->vma.list))
127 return 0;
128
129 /*
130 * As some machines use ACPI to handle runtime-resume callbacks, and
131 * ACPI is quite kmalloc happy, we cannot resume beneath the vm->mutex
132 * as they are required by the shrinker. Ergo, we wake the device up
133 * first just in case.
134 */
135 wakeref = intel_runtime_pm_get(rpm);
136
137try_again:
138 ret = 0;
139 spin_lock(&obj->vma.lock);
140 while (!ret && (vma = list_first_entry_or_null(&obj->vma.list,
141 struct i915_vma,
142 obj_link))) {
143 list_move_tail(&vma->obj_link, &still_in_list);
144 if (!i915_vma_is_bound(vma, I915_VMA_BIND_MASK))
145 continue;
146
147 if (flags & I915_GEM_OBJECT_UNBIND_TEST) {
148 ret = -EBUSY;
149 break;
150 }
151
152 /*
153 * Requiring the vm destructor to take the object lock
154 * before destroying a vma would help us eliminate the
155 * i915_vm_tryget() here, AND thus also the barrier stuff
156 * at the end. That's an easy fix, but sleeping locks in
157 * a kthread should generally be avoided.
158 */
159 ret = -EAGAIN;
160 if (!i915_vm_tryget(vma->vm))
161 break;
162
163 spin_unlock(&obj->vma.lock);
164
165 /*
166 * Since i915_vma_parked() takes the object lock
167 * before vma destruction, it won't race us here,
168 * and destroy the vma from under us.
169 */
170
171 ret = -EBUSY;
172 if (flags & I915_GEM_OBJECT_UNBIND_ASYNC) {
173 assert_object_held(vma->obj);
174 ret = i915_vma_unbind_async(vma, vm_trylock);
175 }
176
177 if (ret == -EBUSY && (flags & I915_GEM_OBJECT_UNBIND_ACTIVE ||
178 !i915_vma_is_active(vma))) {
179 if (vm_trylock) {
180 if (mutex_trylock(&vma->vm->mutex)) {
181 ret = __i915_vma_unbind(vma);
182 mutex_unlock(&vma->vm->mutex);
183 }
184 } else {
185 ret = i915_vma_unbind(vma);
186 }
187 }
188
189 i915_vm_put(vma->vm);
190 spin_lock(&obj->vma.lock);
191 }
192 list_splice_init(&still_in_list, &obj->vma.list);
193 spin_unlock(&obj->vma.lock);
194
195 if (ret == -EAGAIN && flags & I915_GEM_OBJECT_UNBIND_BARRIER) {
196 rcu_barrier(); /* flush the i915_vm_release() */
197 goto try_again;
198 }
199
200 intel_runtime_pm_put(rpm, wakeref);
201
202 return ret;
203}
204
205static int
206shmem_pread(struct page *page, int offset, int len, char __user *user_data,
207 bool needs_clflush)
208{
209 char *vaddr;
210 int ret;
211
212 vaddr = kmap(page);
213
214 if (needs_clflush)
215 drm_clflush_virt_range(vaddr + offset, len);
216
217 ret = __copy_to_user(user_data, vaddr + offset, len);
218
219 kunmap(page);
220
221 return ret ? -EFAULT : 0;
222}
223
224static int
225i915_gem_shmem_pread(struct drm_i915_gem_object *obj,
226 struct drm_i915_gem_pread *args)
227{
228 unsigned int needs_clflush;
229 char __user *user_data;
230 unsigned long offset;
231 pgoff_t idx;
232 u64 remain;
233 int ret;
234
235 ret = i915_gem_object_lock_interruptible(obj, NULL);
236 if (ret)
237 return ret;
238
239 ret = i915_gem_object_pin_pages(obj);
240 if (ret)
241 goto err_unlock;
242
243 ret = i915_gem_object_prepare_read(obj, &needs_clflush);
244 if (ret)
245 goto err_unpin;
246
247 i915_gem_object_finish_access(obj);
248 i915_gem_object_unlock(obj);
249
250 remain = args->size;
251 user_data = u64_to_user_ptr(args->data_ptr);
252 offset = offset_in_page(args->offset);
253 for (idx = args->offset >> PAGE_SHIFT; remain; idx++) {
254 struct page *page = i915_gem_object_get_page(obj, idx);
255 unsigned int length = min_t(u64, remain, PAGE_SIZE - offset);
256
257 ret = shmem_pread(page, offset, length, user_data,
258 needs_clflush);
259 if (ret)
260 break;
261
262 remain -= length;
263 user_data += length;
264 offset = 0;
265 }
266
267 i915_gem_object_unpin_pages(obj);
268 return ret;
269
270err_unpin:
271 i915_gem_object_unpin_pages(obj);
272err_unlock:
273 i915_gem_object_unlock(obj);
274 return ret;
275}
276
277static inline bool
278gtt_user_read(struct io_mapping *mapping,
279 loff_t base, int offset,
280 char __user *user_data, int length)
281{
282 void __iomem *vaddr;
283 unsigned long unwritten;
284
285 /* We can use the cpu mem copy function because this is X86. */
286 vaddr = io_mapping_map_atomic_wc(mapping, base);
287 unwritten = __copy_to_user_inatomic(user_data,
288 (void __force *)vaddr + offset,
289 length);
290 io_mapping_unmap_atomic(vaddr);
291 if (unwritten) {
292 vaddr = io_mapping_map_wc(mapping, base, PAGE_SIZE);
293 unwritten = copy_to_user(user_data,
294 (void __force *)vaddr + offset,
295 length);
296 io_mapping_unmap(vaddr);
297 }
298 return unwritten;
299}
300
301static struct i915_vma *i915_gem_gtt_prepare(struct drm_i915_gem_object *obj,
302 struct drm_mm_node *node,
303 bool write)
304{
305 struct drm_i915_private *i915 = to_i915(obj->base.dev);
306 struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
307 struct i915_vma *vma;
308 struct i915_gem_ww_ctx ww;
309 int ret;
310
311 i915_gem_ww_ctx_init(&ww, true);
312retry:
313 vma = ERR_PTR(-ENODEV);
314 ret = i915_gem_object_lock(obj, &ww);
315 if (ret)
316 goto err_ww;
317
318 ret = i915_gem_object_set_to_gtt_domain(obj, write);
319 if (ret)
320 goto err_ww;
321
322 if (!i915_gem_object_is_tiled(obj))
323 vma = i915_gem_object_ggtt_pin_ww(obj, &ww, NULL, 0, 0,
324 PIN_MAPPABLE |
325 PIN_NONBLOCK /* NOWARN */ |
326 PIN_NOEVICT);
327 if (vma == ERR_PTR(-EDEADLK)) {
328 ret = -EDEADLK;
329 goto err_ww;
330 } else if (!IS_ERR(vma)) {
331 node->start = i915_ggtt_offset(vma);
332 node->flags = 0;
333 } else {
334 ret = insert_mappable_node(ggtt, node, PAGE_SIZE);
335 if (ret)
336 goto err_ww;
337 GEM_BUG_ON(!drm_mm_node_allocated(node));
338 vma = NULL;
339 }
340
341 ret = i915_gem_object_pin_pages(obj);
342 if (ret) {
343 if (drm_mm_node_allocated(node)) {
344 ggtt->vm.clear_range(&ggtt->vm, node->start, node->size);
345 remove_mappable_node(ggtt, node);
346 } else {
347 i915_vma_unpin(vma);
348 }
349 }
350
351err_ww:
352 if (ret == -EDEADLK) {
353 ret = i915_gem_ww_ctx_backoff(&ww);
354 if (!ret)
355 goto retry;
356 }
357 i915_gem_ww_ctx_fini(&ww);
358
359 return ret ? ERR_PTR(ret) : vma;
360}
361
362static void i915_gem_gtt_cleanup(struct drm_i915_gem_object *obj,
363 struct drm_mm_node *node,
364 struct i915_vma *vma)
365{
366 struct drm_i915_private *i915 = to_i915(obj->base.dev);
367 struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
368
369 i915_gem_object_unpin_pages(obj);
370 if (drm_mm_node_allocated(node)) {
371 ggtt->vm.clear_range(&ggtt->vm, node->start, node->size);
372 remove_mappable_node(ggtt, node);
373 } else {
374 i915_vma_unpin(vma);
375 }
376}
377
378static int
379i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
380 const struct drm_i915_gem_pread *args)
381{
382 struct drm_i915_private *i915 = to_i915(obj->base.dev);
383 struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
384 unsigned long remain, offset;
385 intel_wakeref_t wakeref;
386 struct drm_mm_node node;
387 void __user *user_data;
388 struct i915_vma *vma;
389 int ret = 0;
390
391 if (overflows_type(args->size, remain) ||
392 overflows_type(args->offset, offset))
393 return -EINVAL;
394
395 wakeref = intel_runtime_pm_get(&i915->runtime_pm);
396
397 vma = i915_gem_gtt_prepare(obj, &node, false);
398 if (IS_ERR(vma)) {
399 ret = PTR_ERR(vma);
400 goto out_rpm;
401 }
402
403 user_data = u64_to_user_ptr(args->data_ptr);
404 remain = args->size;
405 offset = args->offset;
406
407 while (remain > 0) {
408 /* Operation in this page
409 *
410 * page_base = page offset within aperture
411 * page_offset = offset within page
412 * page_length = bytes to copy for this page
413 */
414 u32 page_base = node.start;
415 unsigned page_offset = offset_in_page(offset);
416 unsigned page_length = PAGE_SIZE - page_offset;
417 page_length = remain < page_length ? remain : page_length;
418 if (drm_mm_node_allocated(&node)) {
419 ggtt->vm.insert_page(&ggtt->vm,
420 i915_gem_object_get_dma_address(obj,
421 offset >> PAGE_SHIFT),
422 node.start,
423 i915_gem_get_pat_index(i915,
424 I915_CACHE_NONE), 0);
425 } else {
426 page_base += offset & PAGE_MASK;
427 }
428
429 if (gtt_user_read(&ggtt->iomap, page_base, page_offset,
430 user_data, page_length)) {
431 ret = -EFAULT;
432 break;
433 }
434
435 remain -= page_length;
436 user_data += page_length;
437 offset += page_length;
438 }
439
440 i915_gem_gtt_cleanup(obj, &node, vma);
441out_rpm:
442 intel_runtime_pm_put(&i915->runtime_pm, wakeref);
443 return ret;
444}
445
446/**
447 * i915_gem_pread_ioctl - Reads data from the object referenced by handle.
448 * @dev: drm device pointer
449 * @data: ioctl data blob
450 * @file: drm file pointer
451 *
452 * On error, the contents of *data are undefined.
453 */
454int
455i915_gem_pread_ioctl(struct drm_device *dev, void *data,
456 struct drm_file *file)
457{
458 struct drm_i915_private *i915 = to_i915(dev);
459 struct drm_i915_gem_pread *args = data;
460 struct drm_i915_gem_object *obj;
461 int ret;
462
463 /* PREAD is disallowed for all platforms after TGL-LP. This also
464 * covers all platforms with local memory.
465 */
466 if (GRAPHICS_VER(i915) >= 12 && !IS_TIGERLAKE(i915))
467 return -EOPNOTSUPP;
468
469 if (args->size == 0)
470 return 0;
471
472 if (!access_ok(u64_to_user_ptr(args->data_ptr),
473 args->size))
474 return -EFAULT;
475
476 obj = i915_gem_object_lookup(file, args->handle);
477 if (!obj)
478 return -ENOENT;
479
480 /* Bounds check source. */
481 if (range_overflows_t(u64, args->offset, args->size, obj->base.size)) {
482 ret = -EINVAL;
483 goto out;
484 }
485
486 trace_i915_gem_object_pread(obj, args->offset, args->size);
487 ret = -ENODEV;
488 if (obj->ops->pread)
489 ret = obj->ops->pread(obj, args);
490 if (ret != -ENODEV)
491 goto out;
492
493 ret = i915_gem_object_wait(obj,
494 I915_WAIT_INTERRUPTIBLE,
495 MAX_SCHEDULE_TIMEOUT);
496 if (ret)
497 goto out;
498
499 ret = i915_gem_shmem_pread(obj, args);
500 if (ret == -EFAULT || ret == -ENODEV)
501 ret = i915_gem_gtt_pread(obj, args);
502
503out:
504 i915_gem_object_put(obj);
505 return ret;
506}
507
508/* This is the fast write path which cannot handle
509 * page faults in the source data
510 */
511
512static inline bool
513ggtt_write(struct io_mapping *mapping,
514 loff_t base, int offset,
515 char __user *user_data, int length)
516{
517 void __iomem *vaddr;
518 unsigned long unwritten;
519
520 /* We can use the cpu mem copy function because this is X86. */
521 vaddr = io_mapping_map_atomic_wc(mapping, base);
522 unwritten = __copy_from_user_inatomic_nocache((void __force *)vaddr + offset,
523 user_data, length);
524 io_mapping_unmap_atomic(vaddr);
525 if (unwritten) {
526 vaddr = io_mapping_map_wc(mapping, base, PAGE_SIZE);
527 unwritten = copy_from_user((void __force *)vaddr + offset,
528 user_data, length);
529 io_mapping_unmap(vaddr);
530 }
531
532 return unwritten;
533}
534
535/**
536 * i915_gem_gtt_pwrite_fast - This is the fast pwrite path, where we copy the data directly from the
537 * user into the GTT, uncached.
538 * @obj: i915 GEM object
539 * @args: pwrite arguments structure
540 */
541static int
542i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
543 const struct drm_i915_gem_pwrite *args)
544{
545 struct drm_i915_private *i915 = to_i915(obj->base.dev);
546 struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
547 struct intel_runtime_pm *rpm = &i915->runtime_pm;
548 unsigned long remain, offset;
549 intel_wakeref_t wakeref;
550 struct drm_mm_node node;
551 struct i915_vma *vma;
552 void __user *user_data;
553 int ret = 0;
554
555 if (overflows_type(args->size, remain) ||
556 overflows_type(args->offset, offset))
557 return -EINVAL;
558
559 if (i915_gem_object_has_struct_page(obj)) {
560 /*
561 * Avoid waking the device up if we can fallback, as
562 * waking/resuming is very slow (worst-case 10-100 ms
563 * depending on PCI sleeps and our own resume time).
564 * This easily dwarfs any performance advantage from
565 * using the cache bypass of indirect GGTT access.
566 */
567 wakeref = intel_runtime_pm_get_if_in_use(rpm);
568 if (!wakeref)
569 return -EFAULT;
570 } else {
571 /* No backing pages, no fallback, we must force GGTT access */
572 wakeref = intel_runtime_pm_get(rpm);
573 }
574
575 vma = i915_gem_gtt_prepare(obj, &node, true);
576 if (IS_ERR(vma)) {
577 ret = PTR_ERR(vma);
578 goto out_rpm;
579 }
580
581 i915_gem_object_invalidate_frontbuffer(obj, ORIGIN_CPU);
582
583 user_data = u64_to_user_ptr(args->data_ptr);
584 offset = args->offset;
585 remain = args->size;
586 while (remain) {
587 /* Operation in this page
588 *
589 * page_base = page offset within aperture
590 * page_offset = offset within page
591 * page_length = bytes to copy for this page
592 */
593 u32 page_base = node.start;
594 unsigned int page_offset = offset_in_page(offset);
595 unsigned int page_length = PAGE_SIZE - page_offset;
596 page_length = remain < page_length ? remain : page_length;
597 if (drm_mm_node_allocated(&node)) {
598 /* flush the write before we modify the GGTT */
599 intel_gt_flush_ggtt_writes(ggtt->vm.gt);
600 ggtt->vm.insert_page(&ggtt->vm,
601 i915_gem_object_get_dma_address(obj,
602 offset >> PAGE_SHIFT),
603 node.start,
604 i915_gem_get_pat_index(i915,
605 I915_CACHE_NONE), 0);
606 wmb(); /* flush modifications to the GGTT (insert_page) */
607 } else {
608 page_base += offset & PAGE_MASK;
609 }
610 /* If we get a fault while copying data, then (presumably) our
611 * source page isn't available. Return the error and we'll
612 * retry in the slow path.
613 * If the object is non-shmem backed, we retry again with the
614 * path that handles page fault.
615 */
616 if (ggtt_write(&ggtt->iomap, page_base, page_offset,
617 user_data, page_length)) {
618 ret = -EFAULT;
619 break;
620 }
621
622 remain -= page_length;
623 user_data += page_length;
624 offset += page_length;
625 }
626
627 intel_gt_flush_ggtt_writes(ggtt->vm.gt);
628 i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
629
630 i915_gem_gtt_cleanup(obj, &node, vma);
631out_rpm:
632 intel_runtime_pm_put(rpm, wakeref);
633 return ret;
634}
635
636/* Per-page copy function for the shmem pwrite fastpath.
637 * Flushes invalid cachelines before writing to the target if
638 * needs_clflush_before is set and flushes out any written cachelines after
639 * writing if needs_clflush is set.
640 */
641static int
642shmem_pwrite(struct page *page, int offset, int len, char __user *user_data,
643 bool needs_clflush_before,
644 bool needs_clflush_after)
645{
646 char *vaddr;
647 int ret;
648
649 vaddr = kmap(page);
650
651 if (needs_clflush_before)
652 drm_clflush_virt_range(vaddr + offset, len);
653
654 ret = __copy_from_user(vaddr + offset, user_data, len);
655 if (!ret && needs_clflush_after)
656 drm_clflush_virt_range(vaddr + offset, len);
657
658 kunmap(page);
659
660 return ret ? -EFAULT : 0;
661}
662
663static int
664i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj,
665 const struct drm_i915_gem_pwrite *args)
666{
667 unsigned int partial_cacheline_write;
668 unsigned int needs_clflush;
669 void __user *user_data;
670 unsigned long offset;
671 pgoff_t idx;
672 u64 remain;
673 int ret;
674
675 ret = i915_gem_object_lock_interruptible(obj, NULL);
676 if (ret)
677 return ret;
678
679 ret = i915_gem_object_pin_pages(obj);
680 if (ret)
681 goto err_unlock;
682
683 ret = i915_gem_object_prepare_write(obj, &needs_clflush);
684 if (ret)
685 goto err_unpin;
686
687 i915_gem_object_finish_access(obj);
688 i915_gem_object_unlock(obj);
689
690 /* If we don't overwrite a cacheline completely we need to be
691 * careful to have up-to-date data by first clflushing. Don't
692 * overcomplicate things and flush the entire patch.
693 */
694 partial_cacheline_write = 0;
695 if (needs_clflush & CLFLUSH_BEFORE)
696 partial_cacheline_write = boot_cpu_data.x86_clflush_size - 1;
697
698 user_data = u64_to_user_ptr(args->data_ptr);
699 remain = args->size;
700 offset = offset_in_page(args->offset);
701 for (idx = args->offset >> PAGE_SHIFT; remain; idx++) {
702 struct page *page = i915_gem_object_get_page(obj, idx);
703 unsigned int length = min_t(u64, remain, PAGE_SIZE - offset);
704
705 ret = shmem_pwrite(page, offset, length, user_data,
706 (offset | length) & partial_cacheline_write,
707 needs_clflush & CLFLUSH_AFTER);
708 if (ret)
709 break;
710
711 remain -= length;
712 user_data += length;
713 offset = 0;
714 }
715
716 i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
717
718 i915_gem_object_unpin_pages(obj);
719 return ret;
720
721err_unpin:
722 i915_gem_object_unpin_pages(obj);
723err_unlock:
724 i915_gem_object_unlock(obj);
725 return ret;
726}
727
728/**
729 * i915_gem_pwrite_ioctl - Writes data to the object referenced by handle.
730 * @dev: drm device
731 * @data: ioctl data blob
732 * @file: drm file
733 *
734 * On error, the contents of the buffer that were to be modified are undefined.
735 */
736int
737i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
738 struct drm_file *file)
739{
740 struct drm_i915_private *i915 = to_i915(dev);
741 struct drm_i915_gem_pwrite *args = data;
742 struct drm_i915_gem_object *obj;
743 int ret;
744
745 /* PWRITE is disallowed for all platforms after TGL-LP. This also
746 * covers all platforms with local memory.
747 */
748 if (GRAPHICS_VER(i915) >= 12 && !IS_TIGERLAKE(i915))
749 return -EOPNOTSUPP;
750
751 if (args->size == 0)
752 return 0;
753
754 if (!access_ok(u64_to_user_ptr(args->data_ptr), args->size))
755 return -EFAULT;
756
757 obj = i915_gem_object_lookup(file, args->handle);
758 if (!obj)
759 return -ENOENT;
760
761 /* Bounds check destination. */
762 if (range_overflows_t(u64, args->offset, args->size, obj->base.size)) {
763 ret = -EINVAL;
764 goto err;
765 }
766
767 /* Writes not allowed into this read-only object */
768 if (i915_gem_object_is_readonly(obj)) {
769 ret = -EINVAL;
770 goto err;
771 }
772
773 trace_i915_gem_object_pwrite(obj, args->offset, args->size);
774
775 ret = -ENODEV;
776 if (obj->ops->pwrite)
777 ret = obj->ops->pwrite(obj, args);
778 if (ret != -ENODEV)
779 goto err;
780
781 ret = i915_gem_object_wait(obj,
782 I915_WAIT_INTERRUPTIBLE |
783 I915_WAIT_ALL,
784 MAX_SCHEDULE_TIMEOUT);
785 if (ret)
786 goto err;
787
788 ret = -EFAULT;
789 /* We can only do the GTT pwrite on untiled buffers, as otherwise
790 * it would end up going through the fenced access, and we'll get
791 * different detiling behavior between reading and writing.
792 * pread/pwrite currently are reading and writing from the CPU
793 * perspective, requiring manual detiling by the client.
794 */
795 if (!i915_gem_object_has_struct_page(obj) ||
796 i915_gem_cpu_write_needs_clflush(obj))
797 /* Note that the gtt paths might fail with non-page-backed user
798 * pointers (e.g. gtt mappings when moving data between
799 * textures). Fallback to the shmem path in that case.
800 */
801 ret = i915_gem_gtt_pwrite_fast(obj, args);
802
803 if (ret == -EFAULT || ret == -ENOSPC) {
804 if (i915_gem_object_has_struct_page(obj))
805 ret = i915_gem_shmem_pwrite(obj, args);
806 }
807
808err:
809 i915_gem_object_put(obj);
810 return ret;
811}
812
813/**
814 * i915_gem_sw_finish_ioctl - Called when user space has done writes to this buffer
815 * @dev: drm device
816 * @data: ioctl data blob
817 * @file: drm file
818 */
819int
820i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
821 struct drm_file *file)
822{
823 struct drm_i915_gem_sw_finish *args = data;
824 struct drm_i915_gem_object *obj;
825
826 obj = i915_gem_object_lookup(file, args->handle);
827 if (!obj)
828 return -ENOENT;
829
830 /*
831 * Proxy objects are barred from CPU access, so there is no
832 * need to ban sw_finish as it is a nop.
833 */
834
835 /* Pinned buffers may be scanout, so flush the cache */
836 i915_gem_object_flush_if_display(obj);
837 i915_gem_object_put(obj);
838
839 return 0;
840}
841
842void i915_gem_runtime_suspend(struct drm_i915_private *i915)
843{
844 struct drm_i915_gem_object *obj, *on;
845 int i;
846
847 /*
848 * Only called during RPM suspend. All users of the userfault_list
849 * must be holding an RPM wakeref to ensure that this can not
850 * run concurrently with themselves (and use the struct_mutex for
851 * protection between themselves).
852 */
853
854 list_for_each_entry_safe(obj, on,
855 &to_gt(i915)->ggtt->userfault_list, userfault_link)
856 __i915_gem_object_release_mmap_gtt(obj);
857
858 list_for_each_entry_safe(obj, on,
859 &i915->runtime_pm.lmem_userfault_list, userfault_link)
860 i915_gem_object_runtime_pm_release_mmap_offset(obj);
861
862 /*
863 * The fence will be lost when the device powers down. If any were
864 * in use by hardware (i.e. they are pinned), we should not be powering
865 * down! All other fences will be reacquired by the user upon waking.
866 */
867 for (i = 0; i < to_gt(i915)->ggtt->num_fences; i++) {
868 struct i915_fence_reg *reg = &to_gt(i915)->ggtt->fence_regs[i];
869
870 /*
871 * Ideally we want to assert that the fence register is not
872 * live at this point (i.e. that no piece of code will be
873 * trying to write through fence + GTT, as that both violates
874 * our tracking of activity and associated locking/barriers,
875 * but also is illegal given that the hw is powered down).
876 *
877 * Previously we used reg->pin_count as a "liveness" indicator.
878 * That is not sufficient, and we need a more fine-grained
879 * tool if we want to have a sanity check here.
880 */
881
882 if (!reg->vma)
883 continue;
884
885 GEM_BUG_ON(i915_vma_has_userfault(reg->vma));
886 reg->dirty = true;
887 }
888}
889
890static void discard_ggtt_vma(struct i915_vma *vma)
891{
892 struct drm_i915_gem_object *obj = vma->obj;
893
894 spin_lock(&obj->vma.lock);
895 if (!RB_EMPTY_NODE(&vma->obj_node)) {
896 rb_erase(&vma->obj_node, &obj->vma.tree);
897 RB_CLEAR_NODE(&vma->obj_node);
898 }
899 spin_unlock(&obj->vma.lock);
900}
901
902struct i915_vma *
903i915_gem_object_ggtt_pin_ww(struct drm_i915_gem_object *obj,
904 struct i915_gem_ww_ctx *ww,
905 const struct i915_gtt_view *view,
906 u64 size, u64 alignment, u64 flags)
907{
908 struct drm_i915_private *i915 = to_i915(obj->base.dev);
909 struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
910 struct i915_vma *vma;
911 int ret;
912
913 GEM_WARN_ON(!ww);
914
915 if (flags & PIN_MAPPABLE &&
916 (!view || view->type == I915_GTT_VIEW_NORMAL)) {
917 /*
918 * If the required space is larger than the available
919 * aperture, we will not able to find a slot for the
920 * object and unbinding the object now will be in
921 * vain. Worse, doing so may cause us to ping-pong
922 * the object in and out of the Global GTT and
923 * waste a lot of cycles under the mutex.
924 */
925 if (obj->base.size > ggtt->mappable_end)
926 return ERR_PTR(-E2BIG);
927
928 /*
929 * If NONBLOCK is set the caller is optimistically
930 * trying to cache the full object within the mappable
931 * aperture, and *must* have a fallback in place for
932 * situations where we cannot bind the object. We
933 * can be a little more lax here and use the fallback
934 * more often to avoid costly migrations of ourselves
935 * and other objects within the aperture.
936 *
937 * Half-the-aperture is used as a simple heuristic.
938 * More interesting would to do search for a free
939 * block prior to making the commitment to unbind.
940 * That caters for the self-harm case, and with a
941 * little more heuristics (e.g. NOFAULT, NOEVICT)
942 * we could try to minimise harm to others.
943 */
944 if (flags & PIN_NONBLOCK &&
945 obj->base.size > ggtt->mappable_end / 2)
946 return ERR_PTR(-ENOSPC);
947 }
948
949new_vma:
950 vma = i915_vma_instance(obj, &ggtt->vm, view);
951 if (IS_ERR(vma))
952 return vma;
953
954 if (i915_vma_misplaced(vma, size, alignment, flags)) {
955 if (flags & PIN_NONBLOCK) {
956 if (i915_vma_is_pinned(vma) || i915_vma_is_active(vma))
957 return ERR_PTR(-ENOSPC);
958
959 /*
960 * If this misplaced vma is too big (i.e, at-least
961 * half the size of aperture) or hasn't been pinned
962 * mappable before, we ignore the misplacement when
963 * PIN_NONBLOCK is set in order to avoid the ping-pong
964 * issue described above. In other words, we try to
965 * avoid the costly operation of unbinding this vma
966 * from the GGTT and rebinding it back because there
967 * may not be enough space for this vma in the aperture.
968 */
969 if (flags & PIN_MAPPABLE &&
970 (vma->fence_size > ggtt->mappable_end / 2 ||
971 !i915_vma_is_map_and_fenceable(vma)))
972 return ERR_PTR(-ENOSPC);
973 }
974
975 if (i915_vma_is_pinned(vma) || i915_vma_is_active(vma)) {
976 discard_ggtt_vma(vma);
977 goto new_vma;
978 }
979
980 ret = i915_vma_unbind(vma);
981 if (ret)
982 return ERR_PTR(ret);
983 }
984
985 ret = i915_vma_pin_ww(vma, ww, size, alignment, flags | PIN_GLOBAL);
986
987 if (ret)
988 return ERR_PTR(ret);
989
990 if (vma->fence && !i915_gem_object_is_tiled(obj)) {
991 mutex_lock(&ggtt->vm.mutex);
992 i915_vma_revoke_fence(vma);
993 mutex_unlock(&ggtt->vm.mutex);
994 }
995
996 ret = i915_vma_wait_for_bind(vma);
997 if (ret) {
998 i915_vma_unpin(vma);
999 return ERR_PTR(ret);
1000 }
1001
1002 return vma;
1003}
1004
1005struct i915_vma * __must_check
1006i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
1007 const struct i915_gtt_view *view,
1008 u64 size, u64 alignment, u64 flags)
1009{
1010 struct i915_gem_ww_ctx ww;
1011 struct i915_vma *ret;
1012 int err;
1013
1014 for_i915_gem_ww(&ww, err, true) {
1015 err = i915_gem_object_lock(obj, &ww);
1016 if (err)
1017 continue;
1018
1019 ret = i915_gem_object_ggtt_pin_ww(obj, &ww, view, size,
1020 alignment, flags);
1021 if (IS_ERR(ret))
1022 err = PTR_ERR(ret);
1023 }
1024
1025 return err ? ERR_PTR(err) : ret;
1026}
1027
1028int
1029i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
1030 struct drm_file *file_priv)
1031{
1032 struct drm_i915_private *i915 = to_i915(dev);
1033 struct drm_i915_gem_madvise *args = data;
1034 struct drm_i915_gem_object *obj;
1035 int err;
1036
1037 switch (args->madv) {
1038 case I915_MADV_DONTNEED:
1039 case I915_MADV_WILLNEED:
1040 break;
1041 default:
1042 return -EINVAL;
1043 }
1044
1045 obj = i915_gem_object_lookup(file_priv, args->handle);
1046 if (!obj)
1047 return -ENOENT;
1048
1049 err = i915_gem_object_lock_interruptible(obj, NULL);
1050 if (err)
1051 goto out;
1052
1053 if (i915_gem_object_has_pages(obj) &&
1054 i915_gem_object_is_tiled(obj) &&
1055 i915->gem_quirks & GEM_QUIRK_PIN_SWIZZLED_PAGES) {
1056 if (obj->mm.madv == I915_MADV_WILLNEED) {
1057 GEM_BUG_ON(!i915_gem_object_has_tiling_quirk(obj));
1058 i915_gem_object_clear_tiling_quirk(obj);
1059 i915_gem_object_make_shrinkable(obj);
1060 }
1061 if (args->madv == I915_MADV_WILLNEED) {
1062 GEM_BUG_ON(i915_gem_object_has_tiling_quirk(obj));
1063 i915_gem_object_make_unshrinkable(obj);
1064 i915_gem_object_set_tiling_quirk(obj);
1065 }
1066 }
1067
1068 if (obj->mm.madv != __I915_MADV_PURGED) {
1069 obj->mm.madv = args->madv;
1070 if (obj->ops->adjust_lru)
1071 obj->ops->adjust_lru(obj);
1072 }
1073
1074 if (i915_gem_object_has_pages(obj) ||
1075 i915_gem_object_has_self_managed_shrink_list(obj)) {
1076 unsigned long flags;
1077
1078 spin_lock_irqsave(&i915->mm.obj_lock, flags);
1079 if (!list_empty(&obj->mm.link)) {
1080 struct list_head *list;
1081
1082 if (obj->mm.madv != I915_MADV_WILLNEED)
1083 list = &i915->mm.purge_list;
1084 else
1085 list = &i915->mm.shrink_list;
1086 list_move_tail(&obj->mm.link, list);
1087
1088 }
1089 spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
1090 }
1091
1092 /* if the object is no longer attached, discard its backing storage */
1093 if (obj->mm.madv == I915_MADV_DONTNEED &&
1094 !i915_gem_object_has_pages(obj))
1095 i915_gem_object_truncate(obj);
1096
1097 args->retained = obj->mm.madv != __I915_MADV_PURGED;
1098
1099 i915_gem_object_unlock(obj);
1100out:
1101 i915_gem_object_put(obj);
1102 return err;
1103}
1104
1105/*
1106 * A single pass should suffice to release all the freed objects (along most
1107 * call paths), but be a little more paranoid in that freeing the objects does
1108 * take a little amount of time, during which the rcu callbacks could have added
1109 * new objects into the freed list, and armed the work again.
1110 */
1111void i915_gem_drain_freed_objects(struct drm_i915_private *i915)
1112{
1113 while (atomic_read(&i915->mm.free_count)) {
1114 flush_work(&i915->mm.free_work);
1115 drain_workqueue(i915->bdev.wq);
1116 rcu_barrier();
1117 }
1118}
1119
1120/*
1121 * Similar to objects above (see i915_gem_drain_freed-objects), in general we
1122 * have workers that are armed by RCU and then rearm themselves in their
1123 * callbacks. To be paranoid, we need to drain the workqueue a second time after
1124 * waiting for the RCU grace period so that we catch work queued via RCU from
1125 * the first pass. As neither drain_workqueue() nor flush_workqueue() report a
1126 * result, we make an assumption that we only don't require more than 3 passes
1127 * to catch all _recursive_ RCU delayed work.
1128 */
1129void i915_gem_drain_workqueue(struct drm_i915_private *i915)
1130{
1131 int i;
1132
1133 for (i = 0; i < 3; i++) {
1134 flush_workqueue(i915->wq);
1135 rcu_barrier();
1136 i915_gem_drain_freed_objects(i915);
1137 }
1138
1139 drain_workqueue(i915->wq);
1140}
1141
1142int i915_gem_init(struct drm_i915_private *dev_priv)
1143{
1144 struct intel_gt *gt;
1145 unsigned int i;
1146 int ret;
1147
1148 /*
1149 * In the proccess of replacing cache_level with pat_index a tricky
1150 * dependency is created on the definition of the enum i915_cache_level.
1151 * in case this enum is changed, PTE encode would be broken.
1152 * Add a WARNING here. And remove when we completely quit using this
1153 * enum
1154 */
1155 BUILD_BUG_ON(I915_CACHE_NONE != 0 ||
1156 I915_CACHE_LLC != 1 ||
1157 I915_CACHE_L3_LLC != 2 ||
1158 I915_CACHE_WT != 3 ||
1159 I915_MAX_CACHE_LEVEL != 4);
1160
1161 /* We need to fallback to 4K pages if host doesn't support huge gtt. */
1162 if (intel_vgpu_active(dev_priv) && !intel_vgpu_has_huge_gtt(dev_priv))
1163 RUNTIME_INFO(dev_priv)->page_sizes = I915_GTT_PAGE_SIZE_4K;
1164
1165 for_each_gt(gt, dev_priv, i) {
1166 intel_uc_fetch_firmwares(>->uc);
1167 intel_wopcm_init(>->wopcm);
1168 if (GRAPHICS_VER(dev_priv) >= 8)
1169 setup_private_pat(gt);
1170 }
1171
1172 ret = i915_init_ggtt(dev_priv);
1173 if (ret) {
1174 GEM_BUG_ON(ret == -EIO);
1175 goto err_unlock;
1176 }
1177
1178 /*
1179 * Despite its name intel_clock_gating_init applies both display
1180 * clock gating workarounds; GT mmio workarounds and the occasional
1181 * GT power context workaround. Worse, sometimes it includes a context
1182 * register workaround which we need to apply before we record the
1183 * default HW state for all contexts.
1184 *
1185 * FIXME: break up the workarounds and apply them at the right time!
1186 */
1187 intel_clock_gating_init(dev_priv);
1188
1189 for_each_gt(gt, dev_priv, i) {
1190 ret = intel_gt_init(gt);
1191 if (ret)
1192 goto err_unlock;
1193 }
1194
1195 /*
1196 * Register engines early to ensure the engine list is in its final
1197 * rb-tree form, lowering the amount of code that has to deal with
1198 * the intermediate llist state.
1199 */
1200 intel_engines_driver_register(dev_priv);
1201
1202 return 0;
1203
1204 /*
1205 * Unwinding is complicated by that we want to handle -EIO to mean
1206 * disable GPU submission but keep KMS alive. We want to mark the
1207 * HW as irrevisibly wedged, but keep enough state around that the
1208 * driver doesn't explode during runtime.
1209 */
1210err_unlock:
1211 i915_gem_drain_workqueue(dev_priv);
1212
1213 if (ret != -EIO) {
1214 for_each_gt(gt, dev_priv, i) {
1215 intel_gt_driver_remove(gt);
1216 intel_gt_driver_release(gt);
1217 intel_uc_cleanup_firmwares(>->uc);
1218 }
1219 }
1220
1221 if (ret == -EIO) {
1222 /*
1223 * Allow engines or uC initialisation to fail by marking the GPU
1224 * as wedged. But we only want to do this when the GPU is angry,
1225 * for all other failure, such as an allocation failure, bail.
1226 */
1227 for_each_gt(gt, dev_priv, i) {
1228 if (!intel_gt_is_wedged(gt)) {
1229 i915_probe_error(dev_priv,
1230 "Failed to initialize GPU, declaring it wedged!\n");
1231 intel_gt_set_wedged(gt);
1232 }
1233 }
1234
1235 /* Minimal basic recovery for KMS */
1236 ret = i915_ggtt_enable_hw(dev_priv);
1237 i915_ggtt_resume(to_gt(dev_priv)->ggtt);
1238 intel_clock_gating_init(dev_priv);
1239 }
1240
1241 i915_gem_drain_freed_objects(dev_priv);
1242
1243 return ret;
1244}
1245
1246void i915_gem_driver_register(struct drm_i915_private *i915)
1247{
1248 i915_gem_driver_register__shrinker(i915);
1249}
1250
1251void i915_gem_driver_unregister(struct drm_i915_private *i915)
1252{
1253 i915_gem_driver_unregister__shrinker(i915);
1254}
1255
1256void i915_gem_driver_remove(struct drm_i915_private *dev_priv)
1257{
1258 struct intel_gt *gt;
1259 unsigned int i;
1260
1261 i915_gem_suspend_late(dev_priv);
1262 for_each_gt(gt, dev_priv, i)
1263 intel_gt_driver_remove(gt);
1264 dev_priv->uabi_engines = RB_ROOT;
1265
1266 /* Flush any outstanding unpin_work. */
1267 i915_gem_drain_workqueue(dev_priv);
1268}
1269
1270void i915_gem_driver_release(struct drm_i915_private *dev_priv)
1271{
1272 struct intel_gt *gt;
1273 unsigned int i;
1274
1275 for_each_gt(gt, dev_priv, i) {
1276 intel_gt_driver_release(gt);
1277 intel_uc_cleanup_firmwares(>->uc);
1278 }
1279
1280 /* Flush any outstanding work, including i915_gem_context.release_work. */
1281 i915_gem_drain_workqueue(dev_priv);
1282
1283 drm_WARN_ON(&dev_priv->drm, !list_empty(&dev_priv->gem.contexts.list));
1284}
1285
1286static void i915_gem_init__mm(struct drm_i915_private *i915)
1287{
1288 spin_lock_init(&i915->mm.obj_lock);
1289
1290 init_llist_head(&i915->mm.free_list);
1291
1292 INIT_LIST_HEAD(&i915->mm.purge_list);
1293 INIT_LIST_HEAD(&i915->mm.shrink_list);
1294
1295 i915_gem_init__objects(i915);
1296}
1297
1298void i915_gem_init_early(struct drm_i915_private *dev_priv)
1299{
1300 i915_gem_init__mm(dev_priv);
1301 i915_gem_init__contexts(dev_priv);
1302}
1303
1304void i915_gem_cleanup_early(struct drm_i915_private *dev_priv)
1305{
1306 i915_gem_drain_workqueue(dev_priv);
1307 GEM_BUG_ON(!llist_empty(&dev_priv->mm.free_list));
1308 GEM_BUG_ON(atomic_read(&dev_priv->mm.free_count));
1309 drm_WARN_ON(&dev_priv->drm, dev_priv->mm.shrink_count);
1310}
1311
1312int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file)
1313{
1314 struct drm_i915_file_private *file_priv;
1315 struct i915_drm_client *client;
1316 int ret = -ENOMEM;
1317
1318 drm_dbg(&i915->drm, "\n");
1319
1320 file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
1321 if (!file_priv)
1322 goto err_alloc;
1323
1324 client = i915_drm_client_alloc();
1325 if (!client)
1326 goto err_client;
1327
1328 file->driver_priv = file_priv;
1329 file_priv->i915 = i915;
1330 file_priv->file = file;
1331 file_priv->client = client;
1332
1333 file_priv->bsd_engine = -1;
1334 file_priv->hang_timestamp = jiffies;
1335
1336 ret = i915_gem_context_open(i915, file);
1337 if (ret)
1338 goto err_context;
1339
1340 return 0;
1341
1342err_context:
1343 i915_drm_client_put(client);
1344err_client:
1345 kfree(file_priv);
1346err_alloc:
1347 return ret;
1348}
1349
1350#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1351#include "selftests/mock_gem_device.c"
1352#include "selftests/i915_gem.c"
1353#endif