Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2013 Red Hat
4 * Author: Rob Clark <robdclark@gmail.com>
5 */
6
7#include <linux/dma-map-ops.h>
8#include <linux/vmalloc.h>
9#include <linux/spinlock.h>
10#include <linux/shmem_fs.h>
11#include <linux/dma-buf.h>
12#include <linux/pfn_t.h>
13
14#include <drm/drm_prime.h>
15
16#include "msm_drv.h"
17#include "msm_fence.h"
18#include "msm_gem.h"
19#include "msm_gpu.h"
20#include "msm_mmu.h"
21
22static dma_addr_t physaddr(struct drm_gem_object *obj)
23{
24 struct msm_gem_object *msm_obj = to_msm_bo(obj);
25 struct msm_drm_private *priv = obj->dev->dev_private;
26 return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) +
27 priv->vram.paddr;
28}
29
30static bool use_pages(struct drm_gem_object *obj)
31{
32 struct msm_gem_object *msm_obj = to_msm_bo(obj);
33 return !msm_obj->vram_node;
34}
35
36/*
37 * Cache sync.. this is a bit over-complicated, to fit dma-mapping
38 * API. Really GPU cache is out of scope here (handled on cmdstream)
39 * and all we need to do is invalidate newly allocated pages before
40 * mapping to CPU as uncached/writecombine.
41 *
42 * On top of this, we have the added headache, that depending on
43 * display generation, the display's iommu may be wired up to either
44 * the toplevel drm device (mdss), or to the mdp sub-node, meaning
45 * that here we either have dma-direct or iommu ops.
46 *
47 * Let this be a cautionary tail of abstraction gone wrong.
48 */
49
50static void sync_for_device(struct msm_gem_object *msm_obj)
51{
52 struct device *dev = msm_obj->base.dev->dev;
53
54 dma_map_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0);
55}
56
57static void sync_for_cpu(struct msm_gem_object *msm_obj)
58{
59 struct device *dev = msm_obj->base.dev->dev;
60
61 dma_unmap_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0);
62}
63
64static void update_lru_active(struct drm_gem_object *obj)
65{
66 struct msm_drm_private *priv = obj->dev->dev_private;
67 struct msm_gem_object *msm_obj = to_msm_bo(obj);
68
69 GEM_WARN_ON(!msm_obj->pages);
70
71 if (msm_obj->pin_count) {
72 drm_gem_lru_move_tail_locked(&priv->lru.pinned, obj);
73 } else if (msm_obj->madv == MSM_MADV_WILLNEED) {
74 drm_gem_lru_move_tail_locked(&priv->lru.willneed, obj);
75 } else {
76 GEM_WARN_ON(msm_obj->madv != MSM_MADV_DONTNEED);
77
78 drm_gem_lru_move_tail_locked(&priv->lru.dontneed, obj);
79 }
80}
81
82static void update_lru_locked(struct drm_gem_object *obj)
83{
84 struct msm_drm_private *priv = obj->dev->dev_private;
85 struct msm_gem_object *msm_obj = to_msm_bo(obj);
86
87 msm_gem_assert_locked(&msm_obj->base);
88
89 if (!msm_obj->pages) {
90 GEM_WARN_ON(msm_obj->pin_count);
91
92 drm_gem_lru_move_tail_locked(&priv->lru.unbacked, obj);
93 } else {
94 update_lru_active(obj);
95 }
96}
97
98static void update_lru(struct drm_gem_object *obj)
99{
100 struct msm_drm_private *priv = obj->dev->dev_private;
101
102 mutex_lock(&priv->lru.lock);
103 update_lru_locked(obj);
104 mutex_unlock(&priv->lru.lock);
105}
106
107/* allocate pages from VRAM carveout, used when no IOMMU: */
108static struct page **get_pages_vram(struct drm_gem_object *obj, int npages)
109{
110 struct msm_gem_object *msm_obj = to_msm_bo(obj);
111 struct msm_drm_private *priv = obj->dev->dev_private;
112 dma_addr_t paddr;
113 struct page **p;
114 int ret, i;
115
116 p = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
117 if (!p)
118 return ERR_PTR(-ENOMEM);
119
120 spin_lock(&priv->vram.lock);
121 ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, npages);
122 spin_unlock(&priv->vram.lock);
123 if (ret) {
124 kvfree(p);
125 return ERR_PTR(ret);
126 }
127
128 paddr = physaddr(obj);
129 for (i = 0; i < npages; i++) {
130 p[i] = pfn_to_page(__phys_to_pfn(paddr));
131 paddr += PAGE_SIZE;
132 }
133
134 return p;
135}
136
137static struct page **get_pages(struct drm_gem_object *obj)
138{
139 struct msm_gem_object *msm_obj = to_msm_bo(obj);
140
141 msm_gem_assert_locked(obj);
142
143 if (!msm_obj->pages) {
144 struct drm_device *dev = obj->dev;
145 struct page **p;
146 int npages = obj->size >> PAGE_SHIFT;
147
148 if (use_pages(obj))
149 p = drm_gem_get_pages(obj);
150 else
151 p = get_pages_vram(obj, npages);
152
153 if (IS_ERR(p)) {
154 DRM_DEV_ERROR(dev->dev, "could not get pages: %ld\n",
155 PTR_ERR(p));
156 return p;
157 }
158
159 msm_obj->pages = p;
160
161 msm_obj->sgt = drm_prime_pages_to_sg(obj->dev, p, npages);
162 if (IS_ERR(msm_obj->sgt)) {
163 void *ptr = ERR_CAST(msm_obj->sgt);
164
165 DRM_DEV_ERROR(dev->dev, "failed to allocate sgt\n");
166 msm_obj->sgt = NULL;
167 return ptr;
168 }
169
170 /* For non-cached buffers, ensure the new pages are clean
171 * because display controller, GPU, etc. are not coherent:
172 */
173 if (msm_obj->flags & MSM_BO_WC)
174 sync_for_device(msm_obj);
175
176 update_lru(obj);
177 }
178
179 return msm_obj->pages;
180}
181
182static void put_pages_vram(struct drm_gem_object *obj)
183{
184 struct msm_gem_object *msm_obj = to_msm_bo(obj);
185 struct msm_drm_private *priv = obj->dev->dev_private;
186
187 spin_lock(&priv->vram.lock);
188 drm_mm_remove_node(msm_obj->vram_node);
189 spin_unlock(&priv->vram.lock);
190
191 kvfree(msm_obj->pages);
192}
193
194static void put_pages(struct drm_gem_object *obj)
195{
196 struct msm_gem_object *msm_obj = to_msm_bo(obj);
197
198 if (msm_obj->pages) {
199 if (msm_obj->sgt) {
200 /* For non-cached buffers, ensure the new
201 * pages are clean because display controller,
202 * GPU, etc. are not coherent:
203 */
204 if (msm_obj->flags & MSM_BO_WC)
205 sync_for_cpu(msm_obj);
206
207 sg_free_table(msm_obj->sgt);
208 kfree(msm_obj->sgt);
209 msm_obj->sgt = NULL;
210 }
211
212 if (use_pages(obj))
213 drm_gem_put_pages(obj, msm_obj->pages, true, false);
214 else
215 put_pages_vram(obj);
216
217 msm_obj->pages = NULL;
218 update_lru(obj);
219 }
220}
221
222static struct page **msm_gem_pin_pages_locked(struct drm_gem_object *obj,
223 unsigned madv)
224{
225 struct msm_gem_object *msm_obj = to_msm_bo(obj);
226
227 msm_gem_assert_locked(obj);
228
229 if (msm_obj->madv > madv) {
230 DRM_DEV_DEBUG_DRIVER(obj->dev->dev, "Invalid madv state: %u vs %u\n",
231 msm_obj->madv, madv);
232 return ERR_PTR(-EBUSY);
233 }
234
235 return get_pages(obj);
236}
237
238/*
239 * Update the pin count of the object, call under lru.lock
240 */
241void msm_gem_pin_obj_locked(struct drm_gem_object *obj)
242{
243 struct msm_drm_private *priv = obj->dev->dev_private;
244
245 msm_gem_assert_locked(obj);
246
247 to_msm_bo(obj)->pin_count++;
248 drm_gem_lru_move_tail_locked(&priv->lru.pinned, obj);
249}
250
251static void pin_obj_locked(struct drm_gem_object *obj)
252{
253 struct msm_drm_private *priv = obj->dev->dev_private;
254
255 mutex_lock(&priv->lru.lock);
256 msm_gem_pin_obj_locked(obj);
257 mutex_unlock(&priv->lru.lock);
258}
259
260struct page **msm_gem_pin_pages(struct drm_gem_object *obj)
261{
262 struct page **p;
263
264 msm_gem_lock(obj);
265 p = msm_gem_pin_pages_locked(obj, MSM_MADV_WILLNEED);
266 if (!IS_ERR(p))
267 pin_obj_locked(obj);
268 msm_gem_unlock(obj);
269
270 return p;
271}
272
273void msm_gem_unpin_pages(struct drm_gem_object *obj)
274{
275 msm_gem_lock(obj);
276 msm_gem_unpin_locked(obj);
277 msm_gem_unlock(obj);
278}
279
280static pgprot_t msm_gem_pgprot(struct msm_gem_object *msm_obj, pgprot_t prot)
281{
282 if (msm_obj->flags & MSM_BO_WC)
283 return pgprot_writecombine(prot);
284 return prot;
285}
286
287static vm_fault_t msm_gem_fault(struct vm_fault *vmf)
288{
289 struct vm_area_struct *vma = vmf->vma;
290 struct drm_gem_object *obj = vma->vm_private_data;
291 struct msm_gem_object *msm_obj = to_msm_bo(obj);
292 struct page **pages;
293 unsigned long pfn;
294 pgoff_t pgoff;
295 int err;
296 vm_fault_t ret;
297
298 /*
299 * vm_ops.open/drm_gem_mmap_obj and close get and put
300 * a reference on obj. So, we dont need to hold one here.
301 */
302 err = msm_gem_lock_interruptible(obj);
303 if (err) {
304 ret = VM_FAULT_NOPAGE;
305 goto out;
306 }
307
308 if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
309 msm_gem_unlock(obj);
310 return VM_FAULT_SIGBUS;
311 }
312
313 /* make sure we have pages attached now */
314 pages = get_pages(obj);
315 if (IS_ERR(pages)) {
316 ret = vmf_error(PTR_ERR(pages));
317 goto out_unlock;
318 }
319
320 /* We don't use vmf->pgoff since that has the fake offset: */
321 pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
322
323 pfn = page_to_pfn(pages[pgoff]);
324
325 VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
326 pfn, pfn << PAGE_SHIFT);
327
328 ret = vmf_insert_pfn(vma, vmf->address, pfn);
329
330out_unlock:
331 msm_gem_unlock(obj);
332out:
333 return ret;
334}
335
336/** get mmap offset */
337static uint64_t mmap_offset(struct drm_gem_object *obj)
338{
339 struct drm_device *dev = obj->dev;
340 int ret;
341
342 msm_gem_assert_locked(obj);
343
344 /* Make it mmapable */
345 ret = drm_gem_create_mmap_offset(obj);
346
347 if (ret) {
348 DRM_DEV_ERROR(dev->dev, "could not allocate mmap offset\n");
349 return 0;
350 }
351
352 return drm_vma_node_offset_addr(&obj->vma_node);
353}
354
355uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
356{
357 uint64_t offset;
358
359 msm_gem_lock(obj);
360 offset = mmap_offset(obj);
361 msm_gem_unlock(obj);
362 return offset;
363}
364
365static struct msm_gem_vma *add_vma(struct drm_gem_object *obj,
366 struct msm_gem_address_space *aspace)
367{
368 struct msm_gem_object *msm_obj = to_msm_bo(obj);
369 struct msm_gem_vma *vma;
370
371 msm_gem_assert_locked(obj);
372
373 vma = msm_gem_vma_new(aspace);
374 if (!vma)
375 return ERR_PTR(-ENOMEM);
376
377 list_add_tail(&vma->list, &msm_obj->vmas);
378
379 return vma;
380}
381
382static struct msm_gem_vma *lookup_vma(struct drm_gem_object *obj,
383 struct msm_gem_address_space *aspace)
384{
385 struct msm_gem_object *msm_obj = to_msm_bo(obj);
386 struct msm_gem_vma *vma;
387
388 msm_gem_assert_locked(obj);
389
390 list_for_each_entry(vma, &msm_obj->vmas, list) {
391 if (vma->aspace == aspace)
392 return vma;
393 }
394
395 return NULL;
396}
397
398static void del_vma(struct msm_gem_vma *vma)
399{
400 if (!vma)
401 return;
402
403 list_del(&vma->list);
404 kfree(vma);
405}
406
407/*
408 * If close is true, this also closes the VMA (releasing the allocated
409 * iova range) in addition to removing the iommu mapping. In the eviction
410 * case (!close), we keep the iova allocated, but only remove the iommu
411 * mapping.
412 */
413static void
414put_iova_spaces(struct drm_gem_object *obj, bool close)
415{
416 struct msm_gem_object *msm_obj = to_msm_bo(obj);
417 struct msm_gem_vma *vma;
418
419 msm_gem_assert_locked(obj);
420
421 list_for_each_entry(vma, &msm_obj->vmas, list) {
422 if (vma->aspace) {
423 msm_gem_vma_purge(vma);
424 if (close)
425 msm_gem_vma_close(vma);
426 }
427 }
428}
429
430/* Called with msm_obj locked */
431static void
432put_iova_vmas(struct drm_gem_object *obj)
433{
434 struct msm_gem_object *msm_obj = to_msm_bo(obj);
435 struct msm_gem_vma *vma, *tmp;
436
437 msm_gem_assert_locked(obj);
438
439 list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) {
440 del_vma(vma);
441 }
442}
443
444static struct msm_gem_vma *get_vma_locked(struct drm_gem_object *obj,
445 struct msm_gem_address_space *aspace,
446 u64 range_start, u64 range_end)
447{
448 struct msm_gem_vma *vma;
449
450 msm_gem_assert_locked(obj);
451
452 vma = lookup_vma(obj, aspace);
453
454 if (!vma) {
455 int ret;
456
457 vma = add_vma(obj, aspace);
458 if (IS_ERR(vma))
459 return vma;
460
461 ret = msm_gem_vma_init(vma, obj->size,
462 range_start, range_end);
463 if (ret) {
464 del_vma(vma);
465 return ERR_PTR(ret);
466 }
467 } else {
468 GEM_WARN_ON(vma->iova < range_start);
469 GEM_WARN_ON((vma->iova + obj->size) > range_end);
470 }
471
472 return vma;
473}
474
475int msm_gem_pin_vma_locked(struct drm_gem_object *obj, struct msm_gem_vma *vma)
476{
477 struct msm_gem_object *msm_obj = to_msm_bo(obj);
478 struct page **pages;
479 int prot = IOMMU_READ;
480
481 if (!(msm_obj->flags & MSM_BO_GPU_READONLY))
482 prot |= IOMMU_WRITE;
483
484 if (msm_obj->flags & MSM_BO_MAP_PRIV)
485 prot |= IOMMU_PRIV;
486
487 if (msm_obj->flags & MSM_BO_CACHED_COHERENT)
488 prot |= IOMMU_CACHE;
489
490 msm_gem_assert_locked(obj);
491
492 pages = msm_gem_pin_pages_locked(obj, MSM_MADV_WILLNEED);
493 if (IS_ERR(pages))
494 return PTR_ERR(pages);
495
496 return msm_gem_vma_map(vma, prot, msm_obj->sgt, obj->size);
497}
498
499void msm_gem_unpin_locked(struct drm_gem_object *obj)
500{
501 struct msm_drm_private *priv = obj->dev->dev_private;
502 struct msm_gem_object *msm_obj = to_msm_bo(obj);
503
504 msm_gem_assert_locked(obj);
505
506 mutex_lock(&priv->lru.lock);
507 msm_obj->pin_count--;
508 GEM_WARN_ON(msm_obj->pin_count < 0);
509 update_lru_locked(obj);
510 mutex_unlock(&priv->lru.lock);
511}
512
513/* Special unpin path for use in fence-signaling path, avoiding the need
514 * to hold the obj lock by only depending on things that a protected by
515 * the LRU lock. In particular we know that that we already have backing
516 * and and that the object's dma_resv has the fence for the current
517 * submit/job which will prevent us racing against page eviction.
518 */
519void msm_gem_unpin_active(struct drm_gem_object *obj)
520{
521 struct msm_gem_object *msm_obj = to_msm_bo(obj);
522
523 msm_obj->pin_count--;
524 GEM_WARN_ON(msm_obj->pin_count < 0);
525 update_lru_active(obj);
526}
527
528struct msm_gem_vma *msm_gem_get_vma_locked(struct drm_gem_object *obj,
529 struct msm_gem_address_space *aspace)
530{
531 return get_vma_locked(obj, aspace, 0, U64_MAX);
532}
533
534static int get_and_pin_iova_range_locked(struct drm_gem_object *obj,
535 struct msm_gem_address_space *aspace, uint64_t *iova,
536 u64 range_start, u64 range_end)
537{
538 struct msm_gem_vma *vma;
539 int ret;
540
541 msm_gem_assert_locked(obj);
542
543 vma = get_vma_locked(obj, aspace, range_start, range_end);
544 if (IS_ERR(vma))
545 return PTR_ERR(vma);
546
547 ret = msm_gem_pin_vma_locked(obj, vma);
548 if (!ret) {
549 *iova = vma->iova;
550 pin_obj_locked(obj);
551 }
552
553 return ret;
554}
555
556/*
557 * get iova and pin it. Should have a matching put
558 * limits iova to specified range (in pages)
559 */
560int msm_gem_get_and_pin_iova_range(struct drm_gem_object *obj,
561 struct msm_gem_address_space *aspace, uint64_t *iova,
562 u64 range_start, u64 range_end)
563{
564 int ret;
565
566 msm_gem_lock(obj);
567 ret = get_and_pin_iova_range_locked(obj, aspace, iova, range_start, range_end);
568 msm_gem_unlock(obj);
569
570 return ret;
571}
572
573/* get iova and pin it. Should have a matching put */
574int msm_gem_get_and_pin_iova(struct drm_gem_object *obj,
575 struct msm_gem_address_space *aspace, uint64_t *iova)
576{
577 return msm_gem_get_and_pin_iova_range(obj, aspace, iova, 0, U64_MAX);
578}
579
580/*
581 * Get an iova but don't pin it. Doesn't need a put because iovas are currently
582 * valid for the life of the object
583 */
584int msm_gem_get_iova(struct drm_gem_object *obj,
585 struct msm_gem_address_space *aspace, uint64_t *iova)
586{
587 struct msm_gem_vma *vma;
588 int ret = 0;
589
590 msm_gem_lock(obj);
591 vma = get_vma_locked(obj, aspace, 0, U64_MAX);
592 if (IS_ERR(vma)) {
593 ret = PTR_ERR(vma);
594 } else {
595 *iova = vma->iova;
596 }
597 msm_gem_unlock(obj);
598
599 return ret;
600}
601
602static int clear_iova(struct drm_gem_object *obj,
603 struct msm_gem_address_space *aspace)
604{
605 struct msm_gem_vma *vma = lookup_vma(obj, aspace);
606
607 if (!vma)
608 return 0;
609
610 msm_gem_vma_purge(vma);
611 msm_gem_vma_close(vma);
612 del_vma(vma);
613
614 return 0;
615}
616
617/*
618 * Get the requested iova but don't pin it. Fails if the requested iova is
619 * not available. Doesn't need a put because iovas are currently valid for
620 * the life of the object.
621 *
622 * Setting an iova of zero will clear the vma.
623 */
624int msm_gem_set_iova(struct drm_gem_object *obj,
625 struct msm_gem_address_space *aspace, uint64_t iova)
626{
627 int ret = 0;
628
629 msm_gem_lock(obj);
630 if (!iova) {
631 ret = clear_iova(obj, aspace);
632 } else {
633 struct msm_gem_vma *vma;
634 vma = get_vma_locked(obj, aspace, iova, iova + obj->size);
635 if (IS_ERR(vma)) {
636 ret = PTR_ERR(vma);
637 } else if (GEM_WARN_ON(vma->iova != iova)) {
638 clear_iova(obj, aspace);
639 ret = -EBUSY;
640 }
641 }
642 msm_gem_unlock(obj);
643
644 return ret;
645}
646
647/*
648 * Unpin a iova by updating the reference counts. The memory isn't actually
649 * purged until something else (shrinker, mm_notifier, destroy, etc) decides
650 * to get rid of it
651 */
652void msm_gem_unpin_iova(struct drm_gem_object *obj,
653 struct msm_gem_address_space *aspace)
654{
655 struct msm_gem_vma *vma;
656
657 msm_gem_lock(obj);
658 vma = lookup_vma(obj, aspace);
659 if (!GEM_WARN_ON(!vma)) {
660 msm_gem_unpin_locked(obj);
661 }
662 msm_gem_unlock(obj);
663}
664
665int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
666 struct drm_mode_create_dumb *args)
667{
668 args->pitch = align_pitch(args->width, args->bpp);
669 args->size = PAGE_ALIGN(args->pitch * args->height);
670 return msm_gem_new_handle(dev, file, args->size,
671 MSM_BO_SCANOUT | MSM_BO_WC, &args->handle, "dumb");
672}
673
674int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
675 uint32_t handle, uint64_t *offset)
676{
677 struct drm_gem_object *obj;
678 int ret = 0;
679
680 /* GEM does all our handle to object mapping */
681 obj = drm_gem_object_lookup(file, handle);
682 if (obj == NULL) {
683 ret = -ENOENT;
684 goto fail;
685 }
686
687 *offset = msm_gem_mmap_offset(obj);
688
689 drm_gem_object_put(obj);
690
691fail:
692 return ret;
693}
694
695static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
696{
697 struct msm_gem_object *msm_obj = to_msm_bo(obj);
698 struct page **pages;
699 int ret = 0;
700
701 msm_gem_assert_locked(obj);
702
703 if (obj->import_attach)
704 return ERR_PTR(-ENODEV);
705
706 pages = msm_gem_pin_pages_locked(obj, madv);
707 if (IS_ERR(pages))
708 return ERR_CAST(pages);
709
710 pin_obj_locked(obj);
711
712 /* increment vmap_count *before* vmap() call, so shrinker can
713 * check vmap_count (is_vunmapable()) outside of msm_obj lock.
714 * This guarantees that we won't try to msm_gem_vunmap() this
715 * same object from within the vmap() call (while we already
716 * hold msm_obj lock)
717 */
718 msm_obj->vmap_count++;
719
720 if (!msm_obj->vaddr) {
721 msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
722 VM_MAP, msm_gem_pgprot(msm_obj, PAGE_KERNEL));
723 if (msm_obj->vaddr == NULL) {
724 ret = -ENOMEM;
725 goto fail;
726 }
727 }
728
729 return msm_obj->vaddr;
730
731fail:
732 msm_obj->vmap_count--;
733 msm_gem_unpin_locked(obj);
734 return ERR_PTR(ret);
735}
736
737void *msm_gem_get_vaddr_locked(struct drm_gem_object *obj)
738{
739 return get_vaddr(obj, MSM_MADV_WILLNEED);
740}
741
742void *msm_gem_get_vaddr(struct drm_gem_object *obj)
743{
744 void *ret;
745
746 msm_gem_lock(obj);
747 ret = msm_gem_get_vaddr_locked(obj);
748 msm_gem_unlock(obj);
749
750 return ret;
751}
752
753/*
754 * Don't use this! It is for the very special case of dumping
755 * submits from GPU hangs or faults, were the bo may already
756 * be MSM_MADV_DONTNEED, but we know the buffer is still on the
757 * active list.
758 */
759void *msm_gem_get_vaddr_active(struct drm_gem_object *obj)
760{
761 return get_vaddr(obj, __MSM_MADV_PURGED);
762}
763
764void msm_gem_put_vaddr_locked(struct drm_gem_object *obj)
765{
766 struct msm_gem_object *msm_obj = to_msm_bo(obj);
767
768 msm_gem_assert_locked(obj);
769 GEM_WARN_ON(msm_obj->vmap_count < 1);
770
771 msm_obj->vmap_count--;
772 msm_gem_unpin_locked(obj);
773}
774
775void msm_gem_put_vaddr(struct drm_gem_object *obj)
776{
777 msm_gem_lock(obj);
778 msm_gem_put_vaddr_locked(obj);
779 msm_gem_unlock(obj);
780}
781
782/* Update madvise status, returns true if not purged, else
783 * false or -errno.
784 */
785int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv)
786{
787 struct msm_drm_private *priv = obj->dev->dev_private;
788 struct msm_gem_object *msm_obj = to_msm_bo(obj);
789
790 msm_gem_lock(obj);
791
792 mutex_lock(&priv->lru.lock);
793
794 if (msm_obj->madv != __MSM_MADV_PURGED)
795 msm_obj->madv = madv;
796
797 madv = msm_obj->madv;
798
799 /* If the obj is inactive, we might need to move it
800 * between inactive lists
801 */
802 update_lru_locked(obj);
803
804 mutex_unlock(&priv->lru.lock);
805
806 msm_gem_unlock(obj);
807
808 return (madv != __MSM_MADV_PURGED);
809}
810
811void msm_gem_purge(struct drm_gem_object *obj)
812{
813 struct drm_device *dev = obj->dev;
814 struct msm_drm_private *priv = obj->dev->dev_private;
815 struct msm_gem_object *msm_obj = to_msm_bo(obj);
816
817 msm_gem_assert_locked(obj);
818 GEM_WARN_ON(!is_purgeable(msm_obj));
819
820 /* Get rid of any iommu mapping(s): */
821 put_iova_spaces(obj, true);
822
823 msm_gem_vunmap(obj);
824
825 drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
826
827 put_pages(obj);
828
829 put_iova_vmas(obj);
830
831 mutex_lock(&priv->lru.lock);
832 /* A one-way transition: */
833 msm_obj->madv = __MSM_MADV_PURGED;
834 mutex_unlock(&priv->lru.lock);
835
836 drm_gem_free_mmap_offset(obj);
837
838 /* Our goal here is to return as much of the memory as
839 * is possible back to the system as we are called from OOM.
840 * To do this we must instruct the shmfs to drop all of its
841 * backing pages, *now*.
842 */
843 shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
844
845 invalidate_mapping_pages(file_inode(obj->filp)->i_mapping,
846 0, (loff_t)-1);
847}
848
849/*
850 * Unpin the backing pages and make them available to be swapped out.
851 */
852void msm_gem_evict(struct drm_gem_object *obj)
853{
854 struct drm_device *dev = obj->dev;
855 struct msm_gem_object *msm_obj = to_msm_bo(obj);
856
857 msm_gem_assert_locked(obj);
858 GEM_WARN_ON(is_unevictable(msm_obj));
859
860 /* Get rid of any iommu mapping(s): */
861 put_iova_spaces(obj, false);
862
863 drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
864
865 put_pages(obj);
866}
867
868void msm_gem_vunmap(struct drm_gem_object *obj)
869{
870 struct msm_gem_object *msm_obj = to_msm_bo(obj);
871
872 msm_gem_assert_locked(obj);
873
874 if (!msm_obj->vaddr || GEM_WARN_ON(!is_vunmapable(msm_obj)))
875 return;
876
877 vunmap(msm_obj->vaddr);
878 msm_obj->vaddr = NULL;
879}
880
881bool msm_gem_active(struct drm_gem_object *obj)
882{
883 msm_gem_assert_locked(obj);
884
885 if (to_msm_bo(obj)->pin_count)
886 return true;
887
888 return !dma_resv_test_signaled(obj->resv, dma_resv_usage_rw(true));
889}
890
891int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
892{
893 bool write = !!(op & MSM_PREP_WRITE);
894 unsigned long remain =
895 op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout);
896 long ret;
897
898 if (op & MSM_PREP_BOOST) {
899 dma_resv_set_deadline(obj->resv, dma_resv_usage_rw(write),
900 ktime_get());
901 }
902
903 ret = dma_resv_wait_timeout(obj->resv, dma_resv_usage_rw(write),
904 true, remain);
905 if (ret == 0)
906 return remain == 0 ? -EBUSY : -ETIMEDOUT;
907 else if (ret < 0)
908 return ret;
909
910 /* TODO cache maintenance */
911
912 return 0;
913}
914
915int msm_gem_cpu_fini(struct drm_gem_object *obj)
916{
917 /* TODO cache maintenance */
918 return 0;
919}
920
921#ifdef CONFIG_DEBUG_FS
922void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m,
923 struct msm_gem_stats *stats)
924{
925 struct msm_gem_object *msm_obj = to_msm_bo(obj);
926 struct dma_resv *robj = obj->resv;
927 struct msm_gem_vma *vma;
928 uint64_t off = drm_vma_node_start(&obj->vma_node);
929 const char *madv;
930
931 msm_gem_lock(obj);
932
933 stats->all.count++;
934 stats->all.size += obj->size;
935
936 if (msm_gem_active(obj)) {
937 stats->active.count++;
938 stats->active.size += obj->size;
939 }
940
941 if (msm_obj->pages) {
942 stats->resident.count++;
943 stats->resident.size += obj->size;
944 }
945
946 switch (msm_obj->madv) {
947 case __MSM_MADV_PURGED:
948 stats->purged.count++;
949 stats->purged.size += obj->size;
950 madv = " purged";
951 break;
952 case MSM_MADV_DONTNEED:
953 stats->purgeable.count++;
954 stats->purgeable.size += obj->size;
955 madv = " purgeable";
956 break;
957 case MSM_MADV_WILLNEED:
958 default:
959 madv = "";
960 break;
961 }
962
963 seq_printf(m, "%08x: %c %2d (%2d) %08llx %p",
964 msm_obj->flags, msm_gem_active(obj) ? 'A' : 'I',
965 obj->name, kref_read(&obj->refcount),
966 off, msm_obj->vaddr);
967
968 seq_printf(m, " %08zu %9s %-32s\n", obj->size, madv, msm_obj->name);
969
970 if (!list_empty(&msm_obj->vmas)) {
971
972 seq_puts(m, " vmas:");
973
974 list_for_each_entry(vma, &msm_obj->vmas, list) {
975 const char *name, *comm;
976 if (vma->aspace) {
977 struct msm_gem_address_space *aspace = vma->aspace;
978 struct task_struct *task =
979 get_pid_task(aspace->pid, PIDTYPE_PID);
980 if (task) {
981 comm = kstrdup(task->comm, GFP_KERNEL);
982 put_task_struct(task);
983 } else {
984 comm = NULL;
985 }
986 name = aspace->name;
987 } else {
988 name = comm = NULL;
989 }
990 seq_printf(m, " [%s%s%s: aspace=%p, %08llx,%s]",
991 name, comm ? ":" : "", comm ? comm : "",
992 vma->aspace, vma->iova,
993 vma->mapped ? "mapped" : "unmapped");
994 kfree(comm);
995 }
996
997 seq_puts(m, "\n");
998 }
999
1000 dma_resv_describe(robj, m);
1001 msm_gem_unlock(obj);
1002}
1003
1004void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
1005{
1006 struct msm_gem_stats stats = {};
1007 struct msm_gem_object *msm_obj;
1008
1009 seq_puts(m, " flags id ref offset kaddr size madv name\n");
1010 list_for_each_entry(msm_obj, list, node) {
1011 struct drm_gem_object *obj = &msm_obj->base;
1012 seq_puts(m, " ");
1013 msm_gem_describe(obj, m, &stats);
1014 }
1015
1016 seq_printf(m, "Total: %4d objects, %9zu bytes\n",
1017 stats.all.count, stats.all.size);
1018 seq_printf(m, "Active: %4d objects, %9zu bytes\n",
1019 stats.active.count, stats.active.size);
1020 seq_printf(m, "Resident: %4d objects, %9zu bytes\n",
1021 stats.resident.count, stats.resident.size);
1022 seq_printf(m, "Purgeable: %4d objects, %9zu bytes\n",
1023 stats.purgeable.count, stats.purgeable.size);
1024 seq_printf(m, "Purged: %4d objects, %9zu bytes\n",
1025 stats.purged.count, stats.purged.size);
1026}
1027#endif
1028
1029/* don't call directly! Use drm_gem_object_put() */
1030static void msm_gem_free_object(struct drm_gem_object *obj)
1031{
1032 struct msm_gem_object *msm_obj = to_msm_bo(obj);
1033 struct drm_device *dev = obj->dev;
1034 struct msm_drm_private *priv = dev->dev_private;
1035
1036 mutex_lock(&priv->obj_lock);
1037 list_del(&msm_obj->node);
1038 mutex_unlock(&priv->obj_lock);
1039
1040 put_iova_spaces(obj, true);
1041
1042 if (obj->import_attach) {
1043 GEM_WARN_ON(msm_obj->vaddr);
1044
1045 /* Don't drop the pages for imported dmabuf, as they are not
1046 * ours, just free the array we allocated:
1047 */
1048 kvfree(msm_obj->pages);
1049
1050 put_iova_vmas(obj);
1051
1052 drm_prime_gem_destroy(obj, msm_obj->sgt);
1053 } else {
1054 msm_gem_vunmap(obj);
1055 put_pages(obj);
1056 put_iova_vmas(obj);
1057 }
1058
1059 drm_gem_object_release(obj);
1060
1061 kfree(msm_obj->metadata);
1062 kfree(msm_obj);
1063}
1064
1065static int msm_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
1066{
1067 struct msm_gem_object *msm_obj = to_msm_bo(obj);
1068
1069 vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
1070 vma->vm_page_prot = msm_gem_pgprot(msm_obj, vm_get_page_prot(vma->vm_flags));
1071
1072 return 0;
1073}
1074
1075/* convenience method to construct a GEM buffer object, and userspace handle */
1076int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
1077 uint32_t size, uint32_t flags, uint32_t *handle,
1078 char *name)
1079{
1080 struct drm_gem_object *obj;
1081 int ret;
1082
1083 obj = msm_gem_new(dev, size, flags);
1084
1085 if (IS_ERR(obj))
1086 return PTR_ERR(obj);
1087
1088 if (name)
1089 msm_gem_object_set_name(obj, "%s", name);
1090
1091 ret = drm_gem_handle_create(file, obj, handle);
1092
1093 /* drop reference from allocate - handle holds it now */
1094 drm_gem_object_put(obj);
1095
1096 return ret;
1097}
1098
1099static enum drm_gem_object_status msm_gem_status(struct drm_gem_object *obj)
1100{
1101 struct msm_gem_object *msm_obj = to_msm_bo(obj);
1102 enum drm_gem_object_status status = 0;
1103
1104 if (msm_obj->pages)
1105 status |= DRM_GEM_OBJECT_RESIDENT;
1106
1107 if (msm_obj->madv == MSM_MADV_DONTNEED)
1108 status |= DRM_GEM_OBJECT_PURGEABLE;
1109
1110 return status;
1111}
1112
1113static const struct vm_operations_struct vm_ops = {
1114 .fault = msm_gem_fault,
1115 .open = drm_gem_vm_open,
1116 .close = drm_gem_vm_close,
1117};
1118
1119static const struct drm_gem_object_funcs msm_gem_object_funcs = {
1120 .free = msm_gem_free_object,
1121 .pin = msm_gem_prime_pin,
1122 .unpin = msm_gem_prime_unpin,
1123 .get_sg_table = msm_gem_prime_get_sg_table,
1124 .vmap = msm_gem_prime_vmap,
1125 .vunmap = msm_gem_prime_vunmap,
1126 .mmap = msm_gem_object_mmap,
1127 .status = msm_gem_status,
1128 .vm_ops = &vm_ops,
1129};
1130
1131static int msm_gem_new_impl(struct drm_device *dev,
1132 uint32_t size, uint32_t flags,
1133 struct drm_gem_object **obj)
1134{
1135 struct msm_drm_private *priv = dev->dev_private;
1136 struct msm_gem_object *msm_obj;
1137
1138 switch (flags & MSM_BO_CACHE_MASK) {
1139 case MSM_BO_CACHED:
1140 case MSM_BO_WC:
1141 break;
1142 case MSM_BO_CACHED_COHERENT:
1143 if (priv->has_cached_coherent)
1144 break;
1145 fallthrough;
1146 default:
1147 DRM_DEV_DEBUG(dev->dev, "invalid cache flag: %x\n",
1148 (flags & MSM_BO_CACHE_MASK));
1149 return -EINVAL;
1150 }
1151
1152 msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL);
1153 if (!msm_obj)
1154 return -ENOMEM;
1155
1156 msm_obj->flags = flags;
1157 msm_obj->madv = MSM_MADV_WILLNEED;
1158
1159 INIT_LIST_HEAD(&msm_obj->node);
1160 INIT_LIST_HEAD(&msm_obj->vmas);
1161
1162 *obj = &msm_obj->base;
1163 (*obj)->funcs = &msm_gem_object_funcs;
1164
1165 return 0;
1166}
1167
1168struct drm_gem_object *msm_gem_new(struct drm_device *dev, uint32_t size, uint32_t flags)
1169{
1170 struct msm_drm_private *priv = dev->dev_private;
1171 struct msm_gem_object *msm_obj;
1172 struct drm_gem_object *obj = NULL;
1173 bool use_vram = false;
1174 int ret;
1175
1176 size = PAGE_ALIGN(size);
1177
1178 if (!msm_use_mmu(dev))
1179 use_vram = true;
1180 else if ((flags & (MSM_BO_STOLEN | MSM_BO_SCANOUT)) && priv->vram.size)
1181 use_vram = true;
1182
1183 if (GEM_WARN_ON(use_vram && !priv->vram.size))
1184 return ERR_PTR(-EINVAL);
1185
1186 /* Disallow zero sized objects as they make the underlying
1187 * infrastructure grumpy
1188 */
1189 if (size == 0)
1190 return ERR_PTR(-EINVAL);
1191
1192 ret = msm_gem_new_impl(dev, size, flags, &obj);
1193 if (ret)
1194 return ERR_PTR(ret);
1195
1196 msm_obj = to_msm_bo(obj);
1197
1198 if (use_vram) {
1199 struct msm_gem_vma *vma;
1200 struct page **pages;
1201
1202 drm_gem_private_object_init(dev, obj, size);
1203
1204 msm_gem_lock(obj);
1205
1206 vma = add_vma(obj, NULL);
1207 msm_gem_unlock(obj);
1208 if (IS_ERR(vma)) {
1209 ret = PTR_ERR(vma);
1210 goto fail;
1211 }
1212
1213 to_msm_bo(obj)->vram_node = &vma->node;
1214
1215 msm_gem_lock(obj);
1216 pages = get_pages(obj);
1217 msm_gem_unlock(obj);
1218 if (IS_ERR(pages)) {
1219 ret = PTR_ERR(pages);
1220 goto fail;
1221 }
1222
1223 vma->iova = physaddr(obj);
1224 } else {
1225 ret = drm_gem_object_init(dev, obj, size);
1226 if (ret)
1227 goto fail;
1228 /*
1229 * Our buffers are kept pinned, so allocating them from the
1230 * MOVABLE zone is a really bad idea, and conflicts with CMA.
1231 * See comments above new_inode() why this is required _and_
1232 * expected if you're going to pin these pages.
1233 */
1234 mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER);
1235 }
1236
1237 drm_gem_lru_move_tail(&priv->lru.unbacked, obj);
1238
1239 mutex_lock(&priv->obj_lock);
1240 list_add_tail(&msm_obj->node, &priv->objects);
1241 mutex_unlock(&priv->obj_lock);
1242
1243 ret = drm_gem_create_mmap_offset(obj);
1244 if (ret)
1245 goto fail;
1246
1247 return obj;
1248
1249fail:
1250 drm_gem_object_put(obj);
1251 return ERR_PTR(ret);
1252}
1253
1254struct drm_gem_object *msm_gem_import(struct drm_device *dev,
1255 struct dma_buf *dmabuf, struct sg_table *sgt)
1256{
1257 struct msm_drm_private *priv = dev->dev_private;
1258 struct msm_gem_object *msm_obj;
1259 struct drm_gem_object *obj;
1260 uint32_t size;
1261 int ret, npages;
1262
1263 /* if we don't have IOMMU, don't bother pretending we can import: */
1264 if (!msm_use_mmu(dev)) {
1265 DRM_DEV_ERROR(dev->dev, "cannot import without IOMMU\n");
1266 return ERR_PTR(-EINVAL);
1267 }
1268
1269 size = PAGE_ALIGN(dmabuf->size);
1270
1271 ret = msm_gem_new_impl(dev, size, MSM_BO_WC, &obj);
1272 if (ret)
1273 return ERR_PTR(ret);
1274
1275 drm_gem_private_object_init(dev, obj, size);
1276
1277 npages = size / PAGE_SIZE;
1278
1279 msm_obj = to_msm_bo(obj);
1280 msm_gem_lock(obj);
1281 msm_obj->sgt = sgt;
1282 msm_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
1283 if (!msm_obj->pages) {
1284 msm_gem_unlock(obj);
1285 ret = -ENOMEM;
1286 goto fail;
1287 }
1288
1289 ret = drm_prime_sg_to_page_array(sgt, msm_obj->pages, npages);
1290 if (ret) {
1291 msm_gem_unlock(obj);
1292 goto fail;
1293 }
1294
1295 msm_gem_unlock(obj);
1296
1297 drm_gem_lru_move_tail(&priv->lru.pinned, obj);
1298
1299 mutex_lock(&priv->obj_lock);
1300 list_add_tail(&msm_obj->node, &priv->objects);
1301 mutex_unlock(&priv->obj_lock);
1302
1303 ret = drm_gem_create_mmap_offset(obj);
1304 if (ret)
1305 goto fail;
1306
1307 return obj;
1308
1309fail:
1310 drm_gem_object_put(obj);
1311 return ERR_PTR(ret);
1312}
1313
1314void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
1315 uint32_t flags, struct msm_gem_address_space *aspace,
1316 struct drm_gem_object **bo, uint64_t *iova)
1317{
1318 void *vaddr;
1319 struct drm_gem_object *obj = msm_gem_new(dev, size, flags);
1320 int ret;
1321
1322 if (IS_ERR(obj))
1323 return ERR_CAST(obj);
1324
1325 if (iova) {
1326 ret = msm_gem_get_and_pin_iova(obj, aspace, iova);
1327 if (ret)
1328 goto err;
1329 }
1330
1331 vaddr = msm_gem_get_vaddr(obj);
1332 if (IS_ERR(vaddr)) {
1333 msm_gem_unpin_iova(obj, aspace);
1334 ret = PTR_ERR(vaddr);
1335 goto err;
1336 }
1337
1338 if (bo)
1339 *bo = obj;
1340
1341 return vaddr;
1342err:
1343 drm_gem_object_put(obj);
1344
1345 return ERR_PTR(ret);
1346
1347}
1348
1349void msm_gem_kernel_put(struct drm_gem_object *bo,
1350 struct msm_gem_address_space *aspace)
1351{
1352 if (IS_ERR_OR_NULL(bo))
1353 return;
1354
1355 msm_gem_put_vaddr(bo);
1356 msm_gem_unpin_iova(bo, aspace);
1357 drm_gem_object_put(bo);
1358}
1359
1360void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...)
1361{
1362 struct msm_gem_object *msm_obj = to_msm_bo(bo);
1363 va_list ap;
1364
1365 if (!fmt)
1366 return;
1367
1368 va_start(ap, fmt);
1369 vsnprintf(msm_obj->name, sizeof(msm_obj->name), fmt, ap);
1370 va_end(ap);
1371}
1/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include <linux/spinlock.h>
19#include <linux/shmem_fs.h>
20#include <linux/dma-buf.h>
21#include <linux/pfn_t.h>
22
23#include "msm_drv.h"
24#include "msm_fence.h"
25#include "msm_gem.h"
26#include "msm_gpu.h"
27#include "msm_mmu.h"
28
29static dma_addr_t physaddr(struct drm_gem_object *obj)
30{
31 struct msm_gem_object *msm_obj = to_msm_bo(obj);
32 struct msm_drm_private *priv = obj->dev->dev_private;
33 return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) +
34 priv->vram.paddr;
35}
36
37static bool use_pages(struct drm_gem_object *obj)
38{
39 struct msm_gem_object *msm_obj = to_msm_bo(obj);
40 return !msm_obj->vram_node;
41}
42
43/* allocate pages from VRAM carveout, used when no IOMMU: */
44static struct page **get_pages_vram(struct drm_gem_object *obj,
45 int npages)
46{
47 struct msm_gem_object *msm_obj = to_msm_bo(obj);
48 struct msm_drm_private *priv = obj->dev->dev_private;
49 dma_addr_t paddr;
50 struct page **p;
51 int ret, i;
52
53 p = drm_malloc_ab(npages, sizeof(struct page *));
54 if (!p)
55 return ERR_PTR(-ENOMEM);
56
57 ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node,
58 npages, 0, DRM_MM_SEARCH_DEFAULT);
59 if (ret) {
60 drm_free_large(p);
61 return ERR_PTR(ret);
62 }
63
64 paddr = physaddr(obj);
65 for (i = 0; i < npages; i++) {
66 p[i] = phys_to_page(paddr);
67 paddr += PAGE_SIZE;
68 }
69
70 return p;
71}
72
73/* called with dev->struct_mutex held */
74static struct page **get_pages(struct drm_gem_object *obj)
75{
76 struct msm_gem_object *msm_obj = to_msm_bo(obj);
77
78 if (!msm_obj->pages) {
79 struct drm_device *dev = obj->dev;
80 struct page **p;
81 int npages = obj->size >> PAGE_SHIFT;
82
83 if (use_pages(obj))
84 p = drm_gem_get_pages(obj);
85 else
86 p = get_pages_vram(obj, npages);
87
88 if (IS_ERR(p)) {
89 dev_err(dev->dev, "could not get pages: %ld\n",
90 PTR_ERR(p));
91 return p;
92 }
93
94 msm_obj->sgt = drm_prime_pages_to_sg(p, npages);
95 if (IS_ERR(msm_obj->sgt)) {
96 dev_err(dev->dev, "failed to allocate sgt\n");
97 return ERR_CAST(msm_obj->sgt);
98 }
99
100 msm_obj->pages = p;
101
102 /* For non-cached buffers, ensure the new pages are clean
103 * because display controller, GPU, etc. are not coherent:
104 */
105 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
106 dma_map_sg(dev->dev, msm_obj->sgt->sgl,
107 msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
108 }
109
110 return msm_obj->pages;
111}
112
113static void put_pages(struct drm_gem_object *obj)
114{
115 struct msm_gem_object *msm_obj = to_msm_bo(obj);
116
117 if (msm_obj->pages) {
118 /* For non-cached buffers, ensure the new pages are clean
119 * because display controller, GPU, etc. are not coherent:
120 */
121 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
122 dma_unmap_sg(obj->dev->dev, msm_obj->sgt->sgl,
123 msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
124 sg_free_table(msm_obj->sgt);
125 kfree(msm_obj->sgt);
126
127 if (use_pages(obj))
128 drm_gem_put_pages(obj, msm_obj->pages, true, false);
129 else {
130 drm_mm_remove_node(msm_obj->vram_node);
131 drm_free_large(msm_obj->pages);
132 }
133
134 msm_obj->pages = NULL;
135 }
136}
137
138struct page **msm_gem_get_pages(struct drm_gem_object *obj)
139{
140 struct drm_device *dev = obj->dev;
141 struct page **p;
142 mutex_lock(&dev->struct_mutex);
143 p = get_pages(obj);
144 mutex_unlock(&dev->struct_mutex);
145 return p;
146}
147
148void msm_gem_put_pages(struct drm_gem_object *obj)
149{
150 /* when we start tracking the pin count, then do something here */
151}
152
153int msm_gem_mmap_obj(struct drm_gem_object *obj,
154 struct vm_area_struct *vma)
155{
156 struct msm_gem_object *msm_obj = to_msm_bo(obj);
157
158 vma->vm_flags &= ~VM_PFNMAP;
159 vma->vm_flags |= VM_MIXEDMAP;
160
161 if (msm_obj->flags & MSM_BO_WC) {
162 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
163 } else if (msm_obj->flags & MSM_BO_UNCACHED) {
164 vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
165 } else {
166 /*
167 * Shunt off cached objs to shmem file so they have their own
168 * address_space (so unmap_mapping_range does what we want,
169 * in particular in the case of mmap'd dmabufs)
170 */
171 fput(vma->vm_file);
172 get_file(obj->filp);
173 vma->vm_pgoff = 0;
174 vma->vm_file = obj->filp;
175
176 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
177 }
178
179 return 0;
180}
181
182int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
183{
184 int ret;
185
186 ret = drm_gem_mmap(filp, vma);
187 if (ret) {
188 DBG("mmap failed: %d", ret);
189 return ret;
190 }
191
192 return msm_gem_mmap_obj(vma->vm_private_data, vma);
193}
194
195int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
196{
197 struct drm_gem_object *obj = vma->vm_private_data;
198 struct drm_device *dev = obj->dev;
199 struct msm_drm_private *priv = dev->dev_private;
200 struct page **pages;
201 unsigned long pfn;
202 pgoff_t pgoff;
203 int ret;
204
205 /* This should only happen if userspace tries to pass a mmap'd
206 * but unfaulted gem bo vaddr into submit ioctl, triggering
207 * a page fault while struct_mutex is already held. This is
208 * not a valid use-case so just bail.
209 */
210 if (priv->struct_mutex_task == current)
211 return VM_FAULT_SIGBUS;
212
213 /* Make sure we don't parallel update on a fault, nor move or remove
214 * something from beneath our feet
215 */
216 ret = mutex_lock_interruptible(&dev->struct_mutex);
217 if (ret)
218 goto out;
219
220 /* make sure we have pages attached now */
221 pages = get_pages(obj);
222 if (IS_ERR(pages)) {
223 ret = PTR_ERR(pages);
224 goto out_unlock;
225 }
226
227 /* We don't use vmf->pgoff since that has the fake offset: */
228 pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
229
230 pfn = page_to_pfn(pages[pgoff]);
231
232 VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
233 pfn, pfn << PAGE_SHIFT);
234
235 ret = vm_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV));
236
237out_unlock:
238 mutex_unlock(&dev->struct_mutex);
239out:
240 switch (ret) {
241 case -EAGAIN:
242 case 0:
243 case -ERESTARTSYS:
244 case -EINTR:
245 case -EBUSY:
246 /*
247 * EBUSY is ok: this just means that another thread
248 * already did the job.
249 */
250 return VM_FAULT_NOPAGE;
251 case -ENOMEM:
252 return VM_FAULT_OOM;
253 default:
254 return VM_FAULT_SIGBUS;
255 }
256}
257
258/** get mmap offset */
259static uint64_t mmap_offset(struct drm_gem_object *obj)
260{
261 struct drm_device *dev = obj->dev;
262 int ret;
263
264 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
265
266 /* Make it mmapable */
267 ret = drm_gem_create_mmap_offset(obj);
268
269 if (ret) {
270 dev_err(dev->dev, "could not allocate mmap offset\n");
271 return 0;
272 }
273
274 return drm_vma_node_offset_addr(&obj->vma_node);
275}
276
277uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
278{
279 uint64_t offset;
280 mutex_lock(&obj->dev->struct_mutex);
281 offset = mmap_offset(obj);
282 mutex_unlock(&obj->dev->struct_mutex);
283 return offset;
284}
285
286static void
287put_iova(struct drm_gem_object *obj)
288{
289 struct drm_device *dev = obj->dev;
290 struct msm_drm_private *priv = obj->dev->dev_private;
291 struct msm_gem_object *msm_obj = to_msm_bo(obj);
292 int id;
293
294 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
295
296 for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) {
297 if (!priv->aspace[id])
298 continue;
299 msm_gem_unmap_vma(priv->aspace[id],
300 &msm_obj->domain[id], msm_obj->sgt);
301 }
302}
303
304/* should be called under struct_mutex.. although it can be called
305 * from atomic context without struct_mutex to acquire an extra
306 * iova ref if you know one is already held.
307 *
308 * That means when I do eventually need to add support for unpinning
309 * the refcnt counter needs to be atomic_t.
310 */
311int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
312 uint64_t *iova)
313{
314 struct msm_gem_object *msm_obj = to_msm_bo(obj);
315 int ret = 0;
316
317 if (!msm_obj->domain[id].iova) {
318 struct msm_drm_private *priv = obj->dev->dev_private;
319 struct page **pages = get_pages(obj);
320
321 if (IS_ERR(pages))
322 return PTR_ERR(pages);
323
324 if (iommu_present(&platform_bus_type)) {
325 ret = msm_gem_map_vma(priv->aspace[id], &msm_obj->domain[id],
326 msm_obj->sgt, obj->size >> PAGE_SHIFT);
327 } else {
328 msm_obj->domain[id].iova = physaddr(obj);
329 }
330 }
331
332 if (!ret)
333 *iova = msm_obj->domain[id].iova;
334
335 return ret;
336}
337
338/* get iova, taking a reference. Should have a matching put */
339int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint64_t *iova)
340{
341 struct msm_gem_object *msm_obj = to_msm_bo(obj);
342 int ret;
343
344 /* this is safe right now because we don't unmap until the
345 * bo is deleted:
346 */
347 if (msm_obj->domain[id].iova) {
348 *iova = msm_obj->domain[id].iova;
349 return 0;
350 }
351
352 mutex_lock(&obj->dev->struct_mutex);
353 ret = msm_gem_get_iova_locked(obj, id, iova);
354 mutex_unlock(&obj->dev->struct_mutex);
355 return ret;
356}
357
358/* get iova without taking a reference, used in places where you have
359 * already done a 'msm_gem_get_iova()'.
360 */
361uint64_t msm_gem_iova(struct drm_gem_object *obj, int id)
362{
363 struct msm_gem_object *msm_obj = to_msm_bo(obj);
364 WARN_ON(!msm_obj->domain[id].iova);
365 return msm_obj->domain[id].iova;
366}
367
368void msm_gem_put_iova(struct drm_gem_object *obj, int id)
369{
370 // XXX TODO ..
371 // NOTE: probably don't need a _locked() version.. we wouldn't
372 // normally unmap here, but instead just mark that it could be
373 // unmapped (if the iova refcnt drops to zero), but then later
374 // if another _get_iova_locked() fails we can start unmapping
375 // things that are no longer needed..
376}
377
378int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
379 struct drm_mode_create_dumb *args)
380{
381 args->pitch = align_pitch(args->width, args->bpp);
382 args->size = PAGE_ALIGN(args->pitch * args->height);
383 return msm_gem_new_handle(dev, file, args->size,
384 MSM_BO_SCANOUT | MSM_BO_WC, &args->handle);
385}
386
387int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
388 uint32_t handle, uint64_t *offset)
389{
390 struct drm_gem_object *obj;
391 int ret = 0;
392
393 /* GEM does all our handle to object mapping */
394 obj = drm_gem_object_lookup(file, handle);
395 if (obj == NULL) {
396 ret = -ENOENT;
397 goto fail;
398 }
399
400 *offset = msm_gem_mmap_offset(obj);
401
402 drm_gem_object_unreference_unlocked(obj);
403
404fail:
405 return ret;
406}
407
408void *msm_gem_get_vaddr_locked(struct drm_gem_object *obj)
409{
410 struct msm_gem_object *msm_obj = to_msm_bo(obj);
411 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
412 if (!msm_obj->vaddr) {
413 struct page **pages = get_pages(obj);
414 if (IS_ERR(pages))
415 return ERR_CAST(pages);
416 msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
417 VM_MAP, pgprot_writecombine(PAGE_KERNEL));
418 if (msm_obj->vaddr == NULL)
419 return ERR_PTR(-ENOMEM);
420 }
421 msm_obj->vmap_count++;
422 return msm_obj->vaddr;
423}
424
425void *msm_gem_get_vaddr(struct drm_gem_object *obj)
426{
427 void *ret;
428 mutex_lock(&obj->dev->struct_mutex);
429 ret = msm_gem_get_vaddr_locked(obj);
430 mutex_unlock(&obj->dev->struct_mutex);
431 return ret;
432}
433
434void msm_gem_put_vaddr_locked(struct drm_gem_object *obj)
435{
436 struct msm_gem_object *msm_obj = to_msm_bo(obj);
437 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
438 WARN_ON(msm_obj->vmap_count < 1);
439 msm_obj->vmap_count--;
440}
441
442void msm_gem_put_vaddr(struct drm_gem_object *obj)
443{
444 mutex_lock(&obj->dev->struct_mutex);
445 msm_gem_put_vaddr_locked(obj);
446 mutex_unlock(&obj->dev->struct_mutex);
447}
448
449/* Update madvise status, returns true if not purged, else
450 * false or -errno.
451 */
452int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv)
453{
454 struct msm_gem_object *msm_obj = to_msm_bo(obj);
455
456 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
457
458 if (msm_obj->madv != __MSM_MADV_PURGED)
459 msm_obj->madv = madv;
460
461 return (msm_obj->madv != __MSM_MADV_PURGED);
462}
463
464void msm_gem_purge(struct drm_gem_object *obj)
465{
466 struct drm_device *dev = obj->dev;
467 struct msm_gem_object *msm_obj = to_msm_bo(obj);
468
469 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
470 WARN_ON(!is_purgeable(msm_obj));
471 WARN_ON(obj->import_attach);
472
473 put_iova(obj);
474
475 msm_gem_vunmap(obj);
476
477 put_pages(obj);
478
479 msm_obj->madv = __MSM_MADV_PURGED;
480
481 drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
482 drm_gem_free_mmap_offset(obj);
483
484 /* Our goal here is to return as much of the memory as
485 * is possible back to the system as we are called from OOM.
486 * To do this we must instruct the shmfs to drop all of its
487 * backing pages, *now*.
488 */
489 shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
490
491 invalidate_mapping_pages(file_inode(obj->filp)->i_mapping,
492 0, (loff_t)-1);
493}
494
495void msm_gem_vunmap(struct drm_gem_object *obj)
496{
497 struct msm_gem_object *msm_obj = to_msm_bo(obj);
498
499 if (!msm_obj->vaddr || WARN_ON(!is_vunmapable(msm_obj)))
500 return;
501
502 vunmap(msm_obj->vaddr);
503 msm_obj->vaddr = NULL;
504}
505
506/* must be called before _move_to_active().. */
507int msm_gem_sync_object(struct drm_gem_object *obj,
508 struct msm_fence_context *fctx, bool exclusive)
509{
510 struct msm_gem_object *msm_obj = to_msm_bo(obj);
511 struct reservation_object_list *fobj;
512 struct dma_fence *fence;
513 int i, ret;
514
515 if (!exclusive) {
516 /* NOTE: _reserve_shared() must happen before _add_shared_fence(),
517 * which makes this a slightly strange place to call it. OTOH this
518 * is a convenient can-fail point to hook it in. (And similar to
519 * how etnaviv and nouveau handle this.)
520 */
521 ret = reservation_object_reserve_shared(msm_obj->resv);
522 if (ret)
523 return ret;
524 }
525
526 fobj = reservation_object_get_list(msm_obj->resv);
527 if (!fobj || (fobj->shared_count == 0)) {
528 fence = reservation_object_get_excl(msm_obj->resv);
529 /* don't need to wait on our own fences, since ring is fifo */
530 if (fence && (fence->context != fctx->context)) {
531 ret = dma_fence_wait(fence, true);
532 if (ret)
533 return ret;
534 }
535 }
536
537 if (!exclusive || !fobj)
538 return 0;
539
540 for (i = 0; i < fobj->shared_count; i++) {
541 fence = rcu_dereference_protected(fobj->shared[i],
542 reservation_object_held(msm_obj->resv));
543 if (fence->context != fctx->context) {
544 ret = dma_fence_wait(fence, true);
545 if (ret)
546 return ret;
547 }
548 }
549
550 return 0;
551}
552
553void msm_gem_move_to_active(struct drm_gem_object *obj,
554 struct msm_gpu *gpu, bool exclusive, struct dma_fence *fence)
555{
556 struct msm_gem_object *msm_obj = to_msm_bo(obj);
557 WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED);
558 msm_obj->gpu = gpu;
559 if (exclusive)
560 reservation_object_add_excl_fence(msm_obj->resv, fence);
561 else
562 reservation_object_add_shared_fence(msm_obj->resv, fence);
563 list_del_init(&msm_obj->mm_list);
564 list_add_tail(&msm_obj->mm_list, &gpu->active_list);
565}
566
567void msm_gem_move_to_inactive(struct drm_gem_object *obj)
568{
569 struct drm_device *dev = obj->dev;
570 struct msm_drm_private *priv = dev->dev_private;
571 struct msm_gem_object *msm_obj = to_msm_bo(obj);
572
573 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
574
575 msm_obj->gpu = NULL;
576 list_del_init(&msm_obj->mm_list);
577 list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
578}
579
580int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
581{
582 struct msm_gem_object *msm_obj = to_msm_bo(obj);
583 bool write = !!(op & MSM_PREP_WRITE);
584 unsigned long remain =
585 op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout);
586 long ret;
587
588 ret = reservation_object_wait_timeout_rcu(msm_obj->resv, write,
589 true, remain);
590 if (ret == 0)
591 return remain == 0 ? -EBUSY : -ETIMEDOUT;
592 else if (ret < 0)
593 return ret;
594
595 /* TODO cache maintenance */
596
597 return 0;
598}
599
600int msm_gem_cpu_fini(struct drm_gem_object *obj)
601{
602 /* TODO cache maintenance */
603 return 0;
604}
605
606#ifdef CONFIG_DEBUG_FS
607static void describe_fence(struct dma_fence *fence, const char *type,
608 struct seq_file *m)
609{
610 if (!dma_fence_is_signaled(fence))
611 seq_printf(m, "\t%9s: %s %s seq %u\n", type,
612 fence->ops->get_driver_name(fence),
613 fence->ops->get_timeline_name(fence),
614 fence->seqno);
615}
616
617void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
618{
619 struct msm_gem_object *msm_obj = to_msm_bo(obj);
620 struct reservation_object *robj = msm_obj->resv;
621 struct reservation_object_list *fobj;
622 struct msm_drm_private *priv = obj->dev->dev_private;
623 struct dma_fence *fence;
624 uint64_t off = drm_vma_node_start(&obj->vma_node);
625 const char *madv;
626 unsigned id;
627
628 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
629
630 switch (msm_obj->madv) {
631 case __MSM_MADV_PURGED:
632 madv = " purged";
633 break;
634 case MSM_MADV_DONTNEED:
635 madv = " purgeable";
636 break;
637 case MSM_MADV_WILLNEED:
638 default:
639 madv = "";
640 break;
641 }
642
643 seq_printf(m, "%08x: %c %2d (%2d) %08llx %p\t",
644 msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
645 obj->name, obj->refcount.refcount.counter,
646 off, msm_obj->vaddr);
647
648 for (id = 0; id < priv->num_aspaces; id++)
649 seq_printf(m, " %08llx", msm_obj->domain[id].iova);
650
651 seq_printf(m, " %zu%s\n", obj->size, madv);
652
653 rcu_read_lock();
654 fobj = rcu_dereference(robj->fence);
655 if (fobj) {
656 unsigned int i, shared_count = fobj->shared_count;
657
658 for (i = 0; i < shared_count; i++) {
659 fence = rcu_dereference(fobj->shared[i]);
660 describe_fence(fence, "Shared", m);
661 }
662 }
663
664 fence = rcu_dereference(robj->fence_excl);
665 if (fence)
666 describe_fence(fence, "Exclusive", m);
667 rcu_read_unlock();
668}
669
670void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
671{
672 struct msm_gem_object *msm_obj;
673 int count = 0;
674 size_t size = 0;
675
676 list_for_each_entry(msm_obj, list, mm_list) {
677 struct drm_gem_object *obj = &msm_obj->base;
678 seq_printf(m, " ");
679 msm_gem_describe(obj, m);
680 count++;
681 size += obj->size;
682 }
683
684 seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
685}
686#endif
687
688void msm_gem_free_object(struct drm_gem_object *obj)
689{
690 struct drm_device *dev = obj->dev;
691 struct msm_gem_object *msm_obj = to_msm_bo(obj);
692
693 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
694
695 /* object should not be on active list: */
696 WARN_ON(is_active(msm_obj));
697
698 list_del(&msm_obj->mm_list);
699
700 put_iova(obj);
701
702 if (obj->import_attach) {
703 if (msm_obj->vaddr)
704 dma_buf_vunmap(obj->import_attach->dmabuf, msm_obj->vaddr);
705
706 /* Don't drop the pages for imported dmabuf, as they are not
707 * ours, just free the array we allocated:
708 */
709 if (msm_obj->pages)
710 drm_free_large(msm_obj->pages);
711
712 drm_prime_gem_destroy(obj, msm_obj->sgt);
713 } else {
714 msm_gem_vunmap(obj);
715 put_pages(obj);
716 }
717
718 if (msm_obj->resv == &msm_obj->_resv)
719 reservation_object_fini(msm_obj->resv);
720
721 drm_gem_object_release(obj);
722
723 kfree(msm_obj);
724}
725
726/* convenience method to construct a GEM buffer object, and userspace handle */
727int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
728 uint32_t size, uint32_t flags, uint32_t *handle)
729{
730 struct drm_gem_object *obj;
731 int ret;
732
733 ret = mutex_lock_interruptible(&dev->struct_mutex);
734 if (ret)
735 return ret;
736
737 obj = msm_gem_new(dev, size, flags);
738
739 mutex_unlock(&dev->struct_mutex);
740
741 if (IS_ERR(obj))
742 return PTR_ERR(obj);
743
744 ret = drm_gem_handle_create(file, obj, handle);
745
746 /* drop reference from allocate - handle holds it now */
747 drm_gem_object_unreference_unlocked(obj);
748
749 return ret;
750}
751
752static int msm_gem_new_impl(struct drm_device *dev,
753 uint32_t size, uint32_t flags,
754 struct reservation_object *resv,
755 struct drm_gem_object **obj)
756{
757 struct msm_drm_private *priv = dev->dev_private;
758 struct msm_gem_object *msm_obj;
759 bool use_vram = false;
760
761 switch (flags & MSM_BO_CACHE_MASK) {
762 case MSM_BO_UNCACHED:
763 case MSM_BO_CACHED:
764 case MSM_BO_WC:
765 break;
766 default:
767 dev_err(dev->dev, "invalid cache flag: %x\n",
768 (flags & MSM_BO_CACHE_MASK));
769 return -EINVAL;
770 }
771
772 if (!iommu_present(&platform_bus_type))
773 use_vram = true;
774 else if ((flags & MSM_BO_STOLEN) && priv->vram.size)
775 use_vram = true;
776
777 if (WARN_ON(use_vram && !priv->vram.size))
778 return -EINVAL;
779
780 msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL);
781 if (!msm_obj)
782 return -ENOMEM;
783
784 if (use_vram)
785 msm_obj->vram_node = &msm_obj->domain[0].node;
786
787 msm_obj->flags = flags;
788 msm_obj->madv = MSM_MADV_WILLNEED;
789
790 if (resv) {
791 msm_obj->resv = resv;
792 } else {
793 msm_obj->resv = &msm_obj->_resv;
794 reservation_object_init(msm_obj->resv);
795 }
796
797 INIT_LIST_HEAD(&msm_obj->submit_entry);
798 list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
799
800 *obj = &msm_obj->base;
801
802 return 0;
803}
804
805struct drm_gem_object *msm_gem_new(struct drm_device *dev,
806 uint32_t size, uint32_t flags)
807{
808 struct drm_gem_object *obj = NULL;
809 int ret;
810
811 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
812
813 size = PAGE_ALIGN(size);
814
815 ret = msm_gem_new_impl(dev, size, flags, NULL, &obj);
816 if (ret)
817 goto fail;
818
819 if (use_pages(obj)) {
820 ret = drm_gem_object_init(dev, obj, size);
821 if (ret)
822 goto fail;
823 } else {
824 drm_gem_private_object_init(dev, obj, size);
825 }
826
827 return obj;
828
829fail:
830 drm_gem_object_unreference(obj);
831 return ERR_PTR(ret);
832}
833
834struct drm_gem_object *msm_gem_import(struct drm_device *dev,
835 struct dma_buf *dmabuf, struct sg_table *sgt)
836{
837 struct msm_gem_object *msm_obj;
838 struct drm_gem_object *obj;
839 uint32_t size;
840 int ret, npages;
841
842 /* if we don't have IOMMU, don't bother pretending we can import: */
843 if (!iommu_present(&platform_bus_type)) {
844 dev_err(dev->dev, "cannot import without IOMMU\n");
845 return ERR_PTR(-EINVAL);
846 }
847
848 size = PAGE_ALIGN(dmabuf->size);
849
850 ret = msm_gem_new_impl(dev, size, MSM_BO_WC, dmabuf->resv, &obj);
851 if (ret)
852 goto fail;
853
854 drm_gem_private_object_init(dev, obj, size);
855
856 npages = size / PAGE_SIZE;
857
858 msm_obj = to_msm_bo(obj);
859 msm_obj->sgt = sgt;
860 msm_obj->pages = drm_malloc_ab(npages, sizeof(struct page *));
861 if (!msm_obj->pages) {
862 ret = -ENOMEM;
863 goto fail;
864 }
865
866 ret = drm_prime_sg_to_page_addr_arrays(sgt, msm_obj->pages, NULL, npages);
867 if (ret)
868 goto fail;
869
870 return obj;
871
872fail:
873 drm_gem_object_unreference_unlocked(obj);
874 return ERR_PTR(ret);
875}