Loading...
1/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include <linux/spinlock.h>
19#include <linux/shmem_fs.h>
20#include <linux/dma-buf.h>
21#include <linux/pfn_t.h>
22
23#include "msm_drv.h"
24#include "msm_fence.h"
25#include "msm_gem.h"
26#include "msm_gpu.h"
27#include "msm_mmu.h"
28
29static void msm_gem_vunmap_locked(struct drm_gem_object *obj);
30
31
32static dma_addr_t physaddr(struct drm_gem_object *obj)
33{
34 struct msm_gem_object *msm_obj = to_msm_bo(obj);
35 struct msm_drm_private *priv = obj->dev->dev_private;
36 return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) +
37 priv->vram.paddr;
38}
39
40static bool use_pages(struct drm_gem_object *obj)
41{
42 struct msm_gem_object *msm_obj = to_msm_bo(obj);
43 return !msm_obj->vram_node;
44}
45
46/* allocate pages from VRAM carveout, used when no IOMMU: */
47static struct page **get_pages_vram(struct drm_gem_object *obj, int npages)
48{
49 struct msm_gem_object *msm_obj = to_msm_bo(obj);
50 struct msm_drm_private *priv = obj->dev->dev_private;
51 dma_addr_t paddr;
52 struct page **p;
53 int ret, i;
54
55 p = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
56 if (!p)
57 return ERR_PTR(-ENOMEM);
58
59 spin_lock(&priv->vram.lock);
60 ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, npages);
61 spin_unlock(&priv->vram.lock);
62 if (ret) {
63 kvfree(p);
64 return ERR_PTR(ret);
65 }
66
67 paddr = physaddr(obj);
68 for (i = 0; i < npages; i++) {
69 p[i] = phys_to_page(paddr);
70 paddr += PAGE_SIZE;
71 }
72
73 return p;
74}
75
76static struct page **get_pages(struct drm_gem_object *obj)
77{
78 struct msm_gem_object *msm_obj = to_msm_bo(obj);
79
80 if (!msm_obj->pages) {
81 struct drm_device *dev = obj->dev;
82 struct page **p;
83 int npages = obj->size >> PAGE_SHIFT;
84
85 if (use_pages(obj))
86 p = drm_gem_get_pages(obj);
87 else
88 p = get_pages_vram(obj, npages);
89
90 if (IS_ERR(p)) {
91 dev_err(dev->dev, "could not get pages: %ld\n",
92 PTR_ERR(p));
93 return p;
94 }
95
96 msm_obj->pages = p;
97
98 msm_obj->sgt = drm_prime_pages_to_sg(p, npages);
99 if (IS_ERR(msm_obj->sgt)) {
100 void *ptr = ERR_CAST(msm_obj->sgt);
101
102 dev_err(dev->dev, "failed to allocate sgt\n");
103 msm_obj->sgt = NULL;
104 return ptr;
105 }
106
107 /* For non-cached buffers, ensure the new pages are clean
108 * because display controller, GPU, etc. are not coherent:
109 */
110 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
111 dma_map_sg(dev->dev, msm_obj->sgt->sgl,
112 msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
113 }
114
115 return msm_obj->pages;
116}
117
118static void put_pages_vram(struct drm_gem_object *obj)
119{
120 struct msm_gem_object *msm_obj = to_msm_bo(obj);
121 struct msm_drm_private *priv = obj->dev->dev_private;
122
123 spin_lock(&priv->vram.lock);
124 drm_mm_remove_node(msm_obj->vram_node);
125 spin_unlock(&priv->vram.lock);
126
127 kvfree(msm_obj->pages);
128}
129
130static void put_pages(struct drm_gem_object *obj)
131{
132 struct msm_gem_object *msm_obj = to_msm_bo(obj);
133
134 if (msm_obj->pages) {
135 if (msm_obj->sgt) {
136 /* For non-cached buffers, ensure the new
137 * pages are clean because display controller,
138 * GPU, etc. are not coherent:
139 */
140 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
141 dma_unmap_sg(obj->dev->dev, msm_obj->sgt->sgl,
142 msm_obj->sgt->nents,
143 DMA_BIDIRECTIONAL);
144
145 sg_free_table(msm_obj->sgt);
146 kfree(msm_obj->sgt);
147 }
148
149 if (use_pages(obj))
150 drm_gem_put_pages(obj, msm_obj->pages, true, false);
151 else
152 put_pages_vram(obj);
153
154 msm_obj->pages = NULL;
155 }
156}
157
158struct page **msm_gem_get_pages(struct drm_gem_object *obj)
159{
160 struct msm_gem_object *msm_obj = to_msm_bo(obj);
161 struct page **p;
162
163 mutex_lock(&msm_obj->lock);
164
165 if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
166 mutex_unlock(&msm_obj->lock);
167 return ERR_PTR(-EBUSY);
168 }
169
170 p = get_pages(obj);
171 mutex_unlock(&msm_obj->lock);
172 return p;
173}
174
175void msm_gem_put_pages(struct drm_gem_object *obj)
176{
177 /* when we start tracking the pin count, then do something here */
178}
179
180int msm_gem_mmap_obj(struct drm_gem_object *obj,
181 struct vm_area_struct *vma)
182{
183 struct msm_gem_object *msm_obj = to_msm_bo(obj);
184
185 vma->vm_flags &= ~VM_PFNMAP;
186 vma->vm_flags |= VM_MIXEDMAP;
187
188 if (msm_obj->flags & MSM_BO_WC) {
189 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
190 } else if (msm_obj->flags & MSM_BO_UNCACHED) {
191 vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
192 } else {
193 /*
194 * Shunt off cached objs to shmem file so they have their own
195 * address_space (so unmap_mapping_range does what we want,
196 * in particular in the case of mmap'd dmabufs)
197 */
198 fput(vma->vm_file);
199 get_file(obj->filp);
200 vma->vm_pgoff = 0;
201 vma->vm_file = obj->filp;
202
203 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
204 }
205
206 return 0;
207}
208
209int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
210{
211 int ret;
212
213 ret = drm_gem_mmap(filp, vma);
214 if (ret) {
215 DBG("mmap failed: %d", ret);
216 return ret;
217 }
218
219 return msm_gem_mmap_obj(vma->vm_private_data, vma);
220}
221
222int msm_gem_fault(struct vm_fault *vmf)
223{
224 struct vm_area_struct *vma = vmf->vma;
225 struct drm_gem_object *obj = vma->vm_private_data;
226 struct msm_gem_object *msm_obj = to_msm_bo(obj);
227 struct page **pages;
228 unsigned long pfn;
229 pgoff_t pgoff;
230 int ret;
231
232 /*
233 * vm_ops.open/drm_gem_mmap_obj and close get and put
234 * a reference on obj. So, we dont need to hold one here.
235 */
236 ret = mutex_lock_interruptible(&msm_obj->lock);
237 if (ret)
238 goto out;
239
240 if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
241 mutex_unlock(&msm_obj->lock);
242 return VM_FAULT_SIGBUS;
243 }
244
245 /* make sure we have pages attached now */
246 pages = get_pages(obj);
247 if (IS_ERR(pages)) {
248 ret = PTR_ERR(pages);
249 goto out_unlock;
250 }
251
252 /* We don't use vmf->pgoff since that has the fake offset: */
253 pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
254
255 pfn = page_to_pfn(pages[pgoff]);
256
257 VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
258 pfn, pfn << PAGE_SHIFT);
259
260 ret = vm_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV));
261
262out_unlock:
263 mutex_unlock(&msm_obj->lock);
264out:
265 switch (ret) {
266 case -EAGAIN:
267 case 0:
268 case -ERESTARTSYS:
269 case -EINTR:
270 case -EBUSY:
271 /*
272 * EBUSY is ok: this just means that another thread
273 * already did the job.
274 */
275 return VM_FAULT_NOPAGE;
276 case -ENOMEM:
277 return VM_FAULT_OOM;
278 default:
279 return VM_FAULT_SIGBUS;
280 }
281}
282
283/** get mmap offset */
284static uint64_t mmap_offset(struct drm_gem_object *obj)
285{
286 struct drm_device *dev = obj->dev;
287 struct msm_gem_object *msm_obj = to_msm_bo(obj);
288 int ret;
289
290 WARN_ON(!mutex_is_locked(&msm_obj->lock));
291
292 /* Make it mmapable */
293 ret = drm_gem_create_mmap_offset(obj);
294
295 if (ret) {
296 dev_err(dev->dev, "could not allocate mmap offset\n");
297 return 0;
298 }
299
300 return drm_vma_node_offset_addr(&obj->vma_node);
301}
302
303uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
304{
305 uint64_t offset;
306 struct msm_gem_object *msm_obj = to_msm_bo(obj);
307
308 mutex_lock(&msm_obj->lock);
309 offset = mmap_offset(obj);
310 mutex_unlock(&msm_obj->lock);
311 return offset;
312}
313
314static struct msm_gem_vma *add_vma(struct drm_gem_object *obj,
315 struct msm_gem_address_space *aspace)
316{
317 struct msm_gem_object *msm_obj = to_msm_bo(obj);
318 struct msm_gem_vma *vma;
319
320 WARN_ON(!mutex_is_locked(&msm_obj->lock));
321
322 vma = kzalloc(sizeof(*vma), GFP_KERNEL);
323 if (!vma)
324 return ERR_PTR(-ENOMEM);
325
326 vma->aspace = aspace;
327
328 list_add_tail(&vma->list, &msm_obj->vmas);
329
330 return vma;
331}
332
333static struct msm_gem_vma *lookup_vma(struct drm_gem_object *obj,
334 struct msm_gem_address_space *aspace)
335{
336 struct msm_gem_object *msm_obj = to_msm_bo(obj);
337 struct msm_gem_vma *vma;
338
339 WARN_ON(!mutex_is_locked(&msm_obj->lock));
340
341 list_for_each_entry(vma, &msm_obj->vmas, list) {
342 if (vma->aspace == aspace)
343 return vma;
344 }
345
346 return NULL;
347}
348
349static void del_vma(struct msm_gem_vma *vma)
350{
351 if (!vma)
352 return;
353
354 list_del(&vma->list);
355 kfree(vma);
356}
357
358/* Called with msm_obj->lock locked */
359static void
360put_iova(struct drm_gem_object *obj)
361{
362 struct msm_gem_object *msm_obj = to_msm_bo(obj);
363 struct msm_gem_vma *vma, *tmp;
364
365 WARN_ON(!mutex_is_locked(&msm_obj->lock));
366
367 list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) {
368 msm_gem_unmap_vma(vma->aspace, vma, msm_obj->sgt);
369 del_vma(vma);
370 }
371}
372
373/* get iova, taking a reference. Should have a matching put */
374int msm_gem_get_iova(struct drm_gem_object *obj,
375 struct msm_gem_address_space *aspace, uint64_t *iova)
376{
377 struct msm_gem_object *msm_obj = to_msm_bo(obj);
378 struct msm_gem_vma *vma;
379 int ret = 0;
380
381 mutex_lock(&msm_obj->lock);
382
383 if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
384 mutex_unlock(&msm_obj->lock);
385 return -EBUSY;
386 }
387
388 vma = lookup_vma(obj, aspace);
389
390 if (!vma) {
391 struct page **pages;
392
393 vma = add_vma(obj, aspace);
394 if (IS_ERR(vma)) {
395 ret = PTR_ERR(vma);
396 goto unlock;
397 }
398
399 pages = get_pages(obj);
400 if (IS_ERR(pages)) {
401 ret = PTR_ERR(pages);
402 goto fail;
403 }
404
405 ret = msm_gem_map_vma(aspace, vma, msm_obj->sgt,
406 obj->size >> PAGE_SHIFT);
407 if (ret)
408 goto fail;
409 }
410
411 *iova = vma->iova;
412
413 mutex_unlock(&msm_obj->lock);
414 return 0;
415
416fail:
417 del_vma(vma);
418unlock:
419 mutex_unlock(&msm_obj->lock);
420 return ret;
421}
422
423/* get iova without taking a reference, used in places where you have
424 * already done a 'msm_gem_get_iova()'.
425 */
426uint64_t msm_gem_iova(struct drm_gem_object *obj,
427 struct msm_gem_address_space *aspace)
428{
429 struct msm_gem_object *msm_obj = to_msm_bo(obj);
430 struct msm_gem_vma *vma;
431
432 mutex_lock(&msm_obj->lock);
433 vma = lookup_vma(obj, aspace);
434 mutex_unlock(&msm_obj->lock);
435 WARN_ON(!vma);
436
437 return vma ? vma->iova : 0;
438}
439
440void msm_gem_put_iova(struct drm_gem_object *obj,
441 struct msm_gem_address_space *aspace)
442{
443 // XXX TODO ..
444 // NOTE: probably don't need a _locked() version.. we wouldn't
445 // normally unmap here, but instead just mark that it could be
446 // unmapped (if the iova refcnt drops to zero), but then later
447 // if another _get_iova_locked() fails we can start unmapping
448 // things that are no longer needed..
449}
450
451int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
452 struct drm_mode_create_dumb *args)
453{
454 args->pitch = align_pitch(args->width, args->bpp);
455 args->size = PAGE_ALIGN(args->pitch * args->height);
456 return msm_gem_new_handle(dev, file, args->size,
457 MSM_BO_SCANOUT | MSM_BO_WC, &args->handle);
458}
459
460int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
461 uint32_t handle, uint64_t *offset)
462{
463 struct drm_gem_object *obj;
464 int ret = 0;
465
466 /* GEM does all our handle to object mapping */
467 obj = drm_gem_object_lookup(file, handle);
468 if (obj == NULL) {
469 ret = -ENOENT;
470 goto fail;
471 }
472
473 *offset = msm_gem_mmap_offset(obj);
474
475 drm_gem_object_put_unlocked(obj);
476
477fail:
478 return ret;
479}
480
481static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
482{
483 struct msm_gem_object *msm_obj = to_msm_bo(obj);
484 int ret = 0;
485
486 mutex_lock(&msm_obj->lock);
487
488 if (WARN_ON(msm_obj->madv > madv)) {
489 dev_err(obj->dev->dev, "Invalid madv state: %u vs %u\n",
490 msm_obj->madv, madv);
491 mutex_unlock(&msm_obj->lock);
492 return ERR_PTR(-EBUSY);
493 }
494
495 /* increment vmap_count *before* vmap() call, so shrinker can
496 * check vmap_count (is_vunmapable()) outside of msm_obj->lock.
497 * This guarantees that we won't try to msm_gem_vunmap() this
498 * same object from within the vmap() call (while we already
499 * hold msm_obj->lock)
500 */
501 msm_obj->vmap_count++;
502
503 if (!msm_obj->vaddr) {
504 struct page **pages = get_pages(obj);
505 if (IS_ERR(pages)) {
506 ret = PTR_ERR(pages);
507 goto fail;
508 }
509 msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
510 VM_MAP, pgprot_writecombine(PAGE_KERNEL));
511 if (msm_obj->vaddr == NULL) {
512 ret = -ENOMEM;
513 goto fail;
514 }
515 }
516
517 mutex_unlock(&msm_obj->lock);
518 return msm_obj->vaddr;
519
520fail:
521 msm_obj->vmap_count--;
522 mutex_unlock(&msm_obj->lock);
523 return ERR_PTR(ret);
524}
525
526void *msm_gem_get_vaddr(struct drm_gem_object *obj)
527{
528 return get_vaddr(obj, MSM_MADV_WILLNEED);
529}
530
531/*
532 * Don't use this! It is for the very special case of dumping
533 * submits from GPU hangs or faults, were the bo may already
534 * be MSM_MADV_DONTNEED, but we know the buffer is still on the
535 * active list.
536 */
537void *msm_gem_get_vaddr_active(struct drm_gem_object *obj)
538{
539 return get_vaddr(obj, __MSM_MADV_PURGED);
540}
541
542void msm_gem_put_vaddr(struct drm_gem_object *obj)
543{
544 struct msm_gem_object *msm_obj = to_msm_bo(obj);
545
546 mutex_lock(&msm_obj->lock);
547 WARN_ON(msm_obj->vmap_count < 1);
548 msm_obj->vmap_count--;
549 mutex_unlock(&msm_obj->lock);
550}
551
552/* Update madvise status, returns true if not purged, else
553 * false or -errno.
554 */
555int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv)
556{
557 struct msm_gem_object *msm_obj = to_msm_bo(obj);
558
559 mutex_lock(&msm_obj->lock);
560
561 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
562
563 if (msm_obj->madv != __MSM_MADV_PURGED)
564 msm_obj->madv = madv;
565
566 madv = msm_obj->madv;
567
568 mutex_unlock(&msm_obj->lock);
569
570 return (madv != __MSM_MADV_PURGED);
571}
572
573void msm_gem_purge(struct drm_gem_object *obj, enum msm_gem_lock subclass)
574{
575 struct drm_device *dev = obj->dev;
576 struct msm_gem_object *msm_obj = to_msm_bo(obj);
577
578 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
579 WARN_ON(!is_purgeable(msm_obj));
580 WARN_ON(obj->import_attach);
581
582 mutex_lock_nested(&msm_obj->lock, subclass);
583
584 put_iova(obj);
585
586 msm_gem_vunmap_locked(obj);
587
588 put_pages(obj);
589
590 msm_obj->madv = __MSM_MADV_PURGED;
591
592 drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
593 drm_gem_free_mmap_offset(obj);
594
595 /* Our goal here is to return as much of the memory as
596 * is possible back to the system as we are called from OOM.
597 * To do this we must instruct the shmfs to drop all of its
598 * backing pages, *now*.
599 */
600 shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
601
602 invalidate_mapping_pages(file_inode(obj->filp)->i_mapping,
603 0, (loff_t)-1);
604
605 mutex_unlock(&msm_obj->lock);
606}
607
608static void msm_gem_vunmap_locked(struct drm_gem_object *obj)
609{
610 struct msm_gem_object *msm_obj = to_msm_bo(obj);
611
612 WARN_ON(!mutex_is_locked(&msm_obj->lock));
613
614 if (!msm_obj->vaddr || WARN_ON(!is_vunmapable(msm_obj)))
615 return;
616
617 vunmap(msm_obj->vaddr);
618 msm_obj->vaddr = NULL;
619}
620
621void msm_gem_vunmap(struct drm_gem_object *obj, enum msm_gem_lock subclass)
622{
623 struct msm_gem_object *msm_obj = to_msm_bo(obj);
624
625 mutex_lock_nested(&msm_obj->lock, subclass);
626 msm_gem_vunmap_locked(obj);
627 mutex_unlock(&msm_obj->lock);
628}
629
630/* must be called before _move_to_active().. */
631int msm_gem_sync_object(struct drm_gem_object *obj,
632 struct msm_fence_context *fctx, bool exclusive)
633{
634 struct msm_gem_object *msm_obj = to_msm_bo(obj);
635 struct reservation_object_list *fobj;
636 struct dma_fence *fence;
637 int i, ret;
638
639 fobj = reservation_object_get_list(msm_obj->resv);
640 if (!fobj || (fobj->shared_count == 0)) {
641 fence = reservation_object_get_excl(msm_obj->resv);
642 /* don't need to wait on our own fences, since ring is fifo */
643 if (fence && (fence->context != fctx->context)) {
644 ret = dma_fence_wait(fence, true);
645 if (ret)
646 return ret;
647 }
648 }
649
650 if (!exclusive || !fobj)
651 return 0;
652
653 for (i = 0; i < fobj->shared_count; i++) {
654 fence = rcu_dereference_protected(fobj->shared[i],
655 reservation_object_held(msm_obj->resv));
656 if (fence->context != fctx->context) {
657 ret = dma_fence_wait(fence, true);
658 if (ret)
659 return ret;
660 }
661 }
662
663 return 0;
664}
665
666void msm_gem_move_to_active(struct drm_gem_object *obj,
667 struct msm_gpu *gpu, bool exclusive, struct dma_fence *fence)
668{
669 struct msm_gem_object *msm_obj = to_msm_bo(obj);
670 WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED);
671 msm_obj->gpu = gpu;
672 if (exclusive)
673 reservation_object_add_excl_fence(msm_obj->resv, fence);
674 else
675 reservation_object_add_shared_fence(msm_obj->resv, fence);
676 list_del_init(&msm_obj->mm_list);
677 list_add_tail(&msm_obj->mm_list, &gpu->active_list);
678}
679
680void msm_gem_move_to_inactive(struct drm_gem_object *obj)
681{
682 struct drm_device *dev = obj->dev;
683 struct msm_drm_private *priv = dev->dev_private;
684 struct msm_gem_object *msm_obj = to_msm_bo(obj);
685
686 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
687
688 msm_obj->gpu = NULL;
689 list_del_init(&msm_obj->mm_list);
690 list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
691}
692
693int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
694{
695 struct msm_gem_object *msm_obj = to_msm_bo(obj);
696 bool write = !!(op & MSM_PREP_WRITE);
697 unsigned long remain =
698 op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout);
699 long ret;
700
701 ret = reservation_object_wait_timeout_rcu(msm_obj->resv, write,
702 true, remain);
703 if (ret == 0)
704 return remain == 0 ? -EBUSY : -ETIMEDOUT;
705 else if (ret < 0)
706 return ret;
707
708 /* TODO cache maintenance */
709
710 return 0;
711}
712
713int msm_gem_cpu_fini(struct drm_gem_object *obj)
714{
715 /* TODO cache maintenance */
716 return 0;
717}
718
719#ifdef CONFIG_DEBUG_FS
720static void describe_fence(struct dma_fence *fence, const char *type,
721 struct seq_file *m)
722{
723 if (!dma_fence_is_signaled(fence))
724 seq_printf(m, "\t%9s: %s %s seq %u\n", type,
725 fence->ops->get_driver_name(fence),
726 fence->ops->get_timeline_name(fence),
727 fence->seqno);
728}
729
730void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
731{
732 struct msm_gem_object *msm_obj = to_msm_bo(obj);
733 struct reservation_object *robj = msm_obj->resv;
734 struct reservation_object_list *fobj;
735 struct dma_fence *fence;
736 struct msm_gem_vma *vma;
737 uint64_t off = drm_vma_node_start(&obj->vma_node);
738 const char *madv;
739
740 mutex_lock(&msm_obj->lock);
741
742 switch (msm_obj->madv) {
743 case __MSM_MADV_PURGED:
744 madv = " purged";
745 break;
746 case MSM_MADV_DONTNEED:
747 madv = " purgeable";
748 break;
749 case MSM_MADV_WILLNEED:
750 default:
751 madv = "";
752 break;
753 }
754
755 seq_printf(m, "%08x: %c %2d (%2d) %08llx %p\t",
756 msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
757 obj->name, kref_read(&obj->refcount),
758 off, msm_obj->vaddr);
759
760 /* FIXME: we need to print the address space here too */
761 list_for_each_entry(vma, &msm_obj->vmas, list)
762 seq_printf(m, " %08llx", vma->iova);
763
764 seq_printf(m, " %zu%s\n", obj->size, madv);
765
766 rcu_read_lock();
767 fobj = rcu_dereference(robj->fence);
768 if (fobj) {
769 unsigned int i, shared_count = fobj->shared_count;
770
771 for (i = 0; i < shared_count; i++) {
772 fence = rcu_dereference(fobj->shared[i]);
773 describe_fence(fence, "Shared", m);
774 }
775 }
776
777 fence = rcu_dereference(robj->fence_excl);
778 if (fence)
779 describe_fence(fence, "Exclusive", m);
780 rcu_read_unlock();
781
782 mutex_unlock(&msm_obj->lock);
783}
784
785void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
786{
787 struct msm_gem_object *msm_obj;
788 int count = 0;
789 size_t size = 0;
790
791 list_for_each_entry(msm_obj, list, mm_list) {
792 struct drm_gem_object *obj = &msm_obj->base;
793 seq_printf(m, " ");
794 msm_gem_describe(obj, m);
795 count++;
796 size += obj->size;
797 }
798
799 seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
800}
801#endif
802
803/* don't call directly! Use drm_gem_object_put() and friends */
804void msm_gem_free_object(struct drm_gem_object *obj)
805{
806 struct drm_device *dev = obj->dev;
807 struct msm_gem_object *msm_obj = to_msm_bo(obj);
808
809 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
810
811 /* object should not be on active list: */
812 WARN_ON(is_active(msm_obj));
813
814 list_del(&msm_obj->mm_list);
815
816 mutex_lock(&msm_obj->lock);
817
818 put_iova(obj);
819
820 if (obj->import_attach) {
821 if (msm_obj->vaddr)
822 dma_buf_vunmap(obj->import_attach->dmabuf, msm_obj->vaddr);
823
824 /* Don't drop the pages for imported dmabuf, as they are not
825 * ours, just free the array we allocated:
826 */
827 if (msm_obj->pages)
828 kvfree(msm_obj->pages);
829
830 drm_prime_gem_destroy(obj, msm_obj->sgt);
831 } else {
832 msm_gem_vunmap_locked(obj);
833 put_pages(obj);
834 }
835
836 if (msm_obj->resv == &msm_obj->_resv)
837 reservation_object_fini(msm_obj->resv);
838
839 drm_gem_object_release(obj);
840
841 mutex_unlock(&msm_obj->lock);
842 kfree(msm_obj);
843}
844
845/* convenience method to construct a GEM buffer object, and userspace handle */
846int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
847 uint32_t size, uint32_t flags, uint32_t *handle)
848{
849 struct drm_gem_object *obj;
850 int ret;
851
852 obj = msm_gem_new(dev, size, flags);
853
854 if (IS_ERR(obj))
855 return PTR_ERR(obj);
856
857 ret = drm_gem_handle_create(file, obj, handle);
858
859 /* drop reference from allocate - handle holds it now */
860 drm_gem_object_put_unlocked(obj);
861
862 return ret;
863}
864
865static int msm_gem_new_impl(struct drm_device *dev,
866 uint32_t size, uint32_t flags,
867 struct reservation_object *resv,
868 struct drm_gem_object **obj,
869 bool struct_mutex_locked)
870{
871 struct msm_drm_private *priv = dev->dev_private;
872 struct msm_gem_object *msm_obj;
873
874 switch (flags & MSM_BO_CACHE_MASK) {
875 case MSM_BO_UNCACHED:
876 case MSM_BO_CACHED:
877 case MSM_BO_WC:
878 break;
879 default:
880 dev_err(dev->dev, "invalid cache flag: %x\n",
881 (flags & MSM_BO_CACHE_MASK));
882 return -EINVAL;
883 }
884
885 msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL);
886 if (!msm_obj)
887 return -ENOMEM;
888
889 mutex_init(&msm_obj->lock);
890
891 msm_obj->flags = flags;
892 msm_obj->madv = MSM_MADV_WILLNEED;
893
894 if (resv) {
895 msm_obj->resv = resv;
896 } else {
897 msm_obj->resv = &msm_obj->_resv;
898 reservation_object_init(msm_obj->resv);
899 }
900
901 INIT_LIST_HEAD(&msm_obj->submit_entry);
902 INIT_LIST_HEAD(&msm_obj->vmas);
903
904 if (struct_mutex_locked) {
905 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
906 list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
907 } else {
908 mutex_lock(&dev->struct_mutex);
909 list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
910 mutex_unlock(&dev->struct_mutex);
911 }
912
913 *obj = &msm_obj->base;
914
915 return 0;
916}
917
918static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
919 uint32_t size, uint32_t flags, bool struct_mutex_locked)
920{
921 struct msm_drm_private *priv = dev->dev_private;
922 struct drm_gem_object *obj = NULL;
923 bool use_vram = false;
924 int ret;
925
926 size = PAGE_ALIGN(size);
927
928 if (!iommu_present(&platform_bus_type))
929 use_vram = true;
930 else if ((flags & MSM_BO_STOLEN) && priv->vram.size)
931 use_vram = true;
932
933 if (WARN_ON(use_vram && !priv->vram.size))
934 return ERR_PTR(-EINVAL);
935
936 /* Disallow zero sized objects as they make the underlying
937 * infrastructure grumpy
938 */
939 if (size == 0)
940 return ERR_PTR(-EINVAL);
941
942 ret = msm_gem_new_impl(dev, size, flags, NULL, &obj, struct_mutex_locked);
943 if (ret)
944 goto fail;
945
946 if (use_vram) {
947 struct msm_gem_vma *vma;
948 struct page **pages;
949 struct msm_gem_object *msm_obj = to_msm_bo(obj);
950
951 mutex_lock(&msm_obj->lock);
952
953 vma = add_vma(obj, NULL);
954 mutex_unlock(&msm_obj->lock);
955 if (IS_ERR(vma)) {
956 ret = PTR_ERR(vma);
957 goto fail;
958 }
959
960 to_msm_bo(obj)->vram_node = &vma->node;
961
962 drm_gem_private_object_init(dev, obj, size);
963
964 pages = get_pages(obj);
965 if (IS_ERR(pages)) {
966 ret = PTR_ERR(pages);
967 goto fail;
968 }
969
970 vma->iova = physaddr(obj);
971 } else {
972 ret = drm_gem_object_init(dev, obj, size);
973 if (ret)
974 goto fail;
975 }
976
977 return obj;
978
979fail:
980 drm_gem_object_put_unlocked(obj);
981 return ERR_PTR(ret);
982}
983
984struct drm_gem_object *msm_gem_new_locked(struct drm_device *dev,
985 uint32_t size, uint32_t flags)
986{
987 return _msm_gem_new(dev, size, flags, true);
988}
989
990struct drm_gem_object *msm_gem_new(struct drm_device *dev,
991 uint32_t size, uint32_t flags)
992{
993 return _msm_gem_new(dev, size, flags, false);
994}
995
996struct drm_gem_object *msm_gem_import(struct drm_device *dev,
997 struct dma_buf *dmabuf, struct sg_table *sgt)
998{
999 struct msm_gem_object *msm_obj;
1000 struct drm_gem_object *obj;
1001 uint32_t size;
1002 int ret, npages;
1003
1004 /* if we don't have IOMMU, don't bother pretending we can import: */
1005 if (!iommu_present(&platform_bus_type)) {
1006 dev_err(dev->dev, "cannot import without IOMMU\n");
1007 return ERR_PTR(-EINVAL);
1008 }
1009
1010 size = PAGE_ALIGN(dmabuf->size);
1011
1012 ret = msm_gem_new_impl(dev, size, MSM_BO_WC, dmabuf->resv, &obj, false);
1013 if (ret)
1014 goto fail;
1015
1016 drm_gem_private_object_init(dev, obj, size);
1017
1018 npages = size / PAGE_SIZE;
1019
1020 msm_obj = to_msm_bo(obj);
1021 mutex_lock(&msm_obj->lock);
1022 msm_obj->sgt = sgt;
1023 msm_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
1024 if (!msm_obj->pages) {
1025 mutex_unlock(&msm_obj->lock);
1026 ret = -ENOMEM;
1027 goto fail;
1028 }
1029
1030 ret = drm_prime_sg_to_page_addr_arrays(sgt, msm_obj->pages, NULL, npages);
1031 if (ret) {
1032 mutex_unlock(&msm_obj->lock);
1033 goto fail;
1034 }
1035
1036 mutex_unlock(&msm_obj->lock);
1037 return obj;
1038
1039fail:
1040 drm_gem_object_put_unlocked(obj);
1041 return ERR_PTR(ret);
1042}
1043
1044static void *_msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
1045 uint32_t flags, struct msm_gem_address_space *aspace,
1046 struct drm_gem_object **bo, uint64_t *iova, bool locked)
1047{
1048 void *vaddr;
1049 struct drm_gem_object *obj = _msm_gem_new(dev, size, flags, locked);
1050 int ret;
1051
1052 if (IS_ERR(obj))
1053 return ERR_CAST(obj);
1054
1055 if (iova) {
1056 ret = msm_gem_get_iova(obj, aspace, iova);
1057 if (ret) {
1058 drm_gem_object_put(obj);
1059 return ERR_PTR(ret);
1060 }
1061 }
1062
1063 vaddr = msm_gem_get_vaddr(obj);
1064 if (IS_ERR(vaddr)) {
1065 msm_gem_put_iova(obj, aspace);
1066 drm_gem_object_put(obj);
1067 return ERR_CAST(vaddr);
1068 }
1069
1070 if (bo)
1071 *bo = obj;
1072
1073 return vaddr;
1074}
1075
1076void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
1077 uint32_t flags, struct msm_gem_address_space *aspace,
1078 struct drm_gem_object **bo, uint64_t *iova)
1079{
1080 return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, false);
1081}
1082
1083void *msm_gem_kernel_new_locked(struct drm_device *dev, uint32_t size,
1084 uint32_t flags, struct msm_gem_address_space *aspace,
1085 struct drm_gem_object **bo, uint64_t *iova)
1086{
1087 return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, true);
1088}
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2013 Red Hat
4 * Author: Rob Clark <robdclark@gmail.com>
5 */
6
7#include <linux/spinlock.h>
8#include <linux/shmem_fs.h>
9#include <linux/dma-buf.h>
10#include <linux/pfn_t.h>
11
12#include <drm/drm_prime.h>
13
14#include "msm_drv.h"
15#include "msm_fence.h"
16#include "msm_gem.h"
17#include "msm_gpu.h"
18#include "msm_mmu.h"
19
20static void msm_gem_vunmap_locked(struct drm_gem_object *obj);
21
22
23static dma_addr_t physaddr(struct drm_gem_object *obj)
24{
25 struct msm_gem_object *msm_obj = to_msm_bo(obj);
26 struct msm_drm_private *priv = obj->dev->dev_private;
27 return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) +
28 priv->vram.paddr;
29}
30
31static bool use_pages(struct drm_gem_object *obj)
32{
33 struct msm_gem_object *msm_obj = to_msm_bo(obj);
34 return !msm_obj->vram_node;
35}
36
37/*
38 * Cache sync.. this is a bit over-complicated, to fit dma-mapping
39 * API. Really GPU cache is out of scope here (handled on cmdstream)
40 * and all we need to do is invalidate newly allocated pages before
41 * mapping to CPU as uncached/writecombine.
42 *
43 * On top of this, we have the added headache, that depending on
44 * display generation, the display's iommu may be wired up to either
45 * the toplevel drm device (mdss), or to the mdp sub-node, meaning
46 * that here we either have dma-direct or iommu ops.
47 *
48 * Let this be a cautionary tail of abstraction gone wrong.
49 */
50
51static void sync_for_device(struct msm_gem_object *msm_obj)
52{
53 struct device *dev = msm_obj->base.dev->dev;
54
55 if (get_dma_ops(dev) && IS_ENABLED(CONFIG_ARM64)) {
56 dma_sync_sg_for_device(dev, msm_obj->sgt->sgl,
57 msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
58 } else {
59 dma_map_sg(dev, msm_obj->sgt->sgl,
60 msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
61 }
62}
63
64static void sync_for_cpu(struct msm_gem_object *msm_obj)
65{
66 struct device *dev = msm_obj->base.dev->dev;
67
68 if (get_dma_ops(dev) && IS_ENABLED(CONFIG_ARM64)) {
69 dma_sync_sg_for_cpu(dev, msm_obj->sgt->sgl,
70 msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
71 } else {
72 dma_unmap_sg(dev, msm_obj->sgt->sgl,
73 msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
74 }
75}
76
77/* allocate pages from VRAM carveout, used when no IOMMU: */
78static struct page **get_pages_vram(struct drm_gem_object *obj, int npages)
79{
80 struct msm_gem_object *msm_obj = to_msm_bo(obj);
81 struct msm_drm_private *priv = obj->dev->dev_private;
82 dma_addr_t paddr;
83 struct page **p;
84 int ret, i;
85
86 p = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
87 if (!p)
88 return ERR_PTR(-ENOMEM);
89
90 spin_lock(&priv->vram.lock);
91 ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, npages);
92 spin_unlock(&priv->vram.lock);
93 if (ret) {
94 kvfree(p);
95 return ERR_PTR(ret);
96 }
97
98 paddr = physaddr(obj);
99 for (i = 0; i < npages; i++) {
100 p[i] = phys_to_page(paddr);
101 paddr += PAGE_SIZE;
102 }
103
104 return p;
105}
106
107static struct page **get_pages(struct drm_gem_object *obj)
108{
109 struct msm_gem_object *msm_obj = to_msm_bo(obj);
110
111 if (!msm_obj->pages) {
112 struct drm_device *dev = obj->dev;
113 struct page **p;
114 int npages = obj->size >> PAGE_SHIFT;
115
116 if (use_pages(obj))
117 p = drm_gem_get_pages(obj);
118 else
119 p = get_pages_vram(obj, npages);
120
121 if (IS_ERR(p)) {
122 DRM_DEV_ERROR(dev->dev, "could not get pages: %ld\n",
123 PTR_ERR(p));
124 return p;
125 }
126
127 msm_obj->pages = p;
128
129 msm_obj->sgt = drm_prime_pages_to_sg(p, npages);
130 if (IS_ERR(msm_obj->sgt)) {
131 void *ptr = ERR_CAST(msm_obj->sgt);
132
133 DRM_DEV_ERROR(dev->dev, "failed to allocate sgt\n");
134 msm_obj->sgt = NULL;
135 return ptr;
136 }
137
138 /* For non-cached buffers, ensure the new pages are clean
139 * because display controller, GPU, etc. are not coherent:
140 */
141 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
142 sync_for_device(msm_obj);
143 }
144
145 return msm_obj->pages;
146}
147
148static void put_pages_vram(struct drm_gem_object *obj)
149{
150 struct msm_gem_object *msm_obj = to_msm_bo(obj);
151 struct msm_drm_private *priv = obj->dev->dev_private;
152
153 spin_lock(&priv->vram.lock);
154 drm_mm_remove_node(msm_obj->vram_node);
155 spin_unlock(&priv->vram.lock);
156
157 kvfree(msm_obj->pages);
158}
159
160static void put_pages(struct drm_gem_object *obj)
161{
162 struct msm_gem_object *msm_obj = to_msm_bo(obj);
163
164 if (msm_obj->pages) {
165 if (msm_obj->sgt) {
166 /* For non-cached buffers, ensure the new
167 * pages are clean because display controller,
168 * GPU, etc. are not coherent:
169 */
170 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
171 sync_for_cpu(msm_obj);
172
173 sg_free_table(msm_obj->sgt);
174 kfree(msm_obj->sgt);
175 }
176
177 if (use_pages(obj))
178 drm_gem_put_pages(obj, msm_obj->pages, true, false);
179 else
180 put_pages_vram(obj);
181
182 msm_obj->pages = NULL;
183 }
184}
185
186struct page **msm_gem_get_pages(struct drm_gem_object *obj)
187{
188 struct msm_gem_object *msm_obj = to_msm_bo(obj);
189 struct page **p;
190
191 mutex_lock(&msm_obj->lock);
192
193 if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
194 mutex_unlock(&msm_obj->lock);
195 return ERR_PTR(-EBUSY);
196 }
197
198 p = get_pages(obj);
199 mutex_unlock(&msm_obj->lock);
200 return p;
201}
202
203void msm_gem_put_pages(struct drm_gem_object *obj)
204{
205 /* when we start tracking the pin count, then do something here */
206}
207
208int msm_gem_mmap_obj(struct drm_gem_object *obj,
209 struct vm_area_struct *vma)
210{
211 struct msm_gem_object *msm_obj = to_msm_bo(obj);
212
213 vma->vm_flags &= ~VM_PFNMAP;
214 vma->vm_flags |= VM_MIXEDMAP;
215
216 if (msm_obj->flags & MSM_BO_WC) {
217 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
218 } else if (msm_obj->flags & MSM_BO_UNCACHED) {
219 vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
220 } else {
221 /*
222 * Shunt off cached objs to shmem file so they have their own
223 * address_space (so unmap_mapping_range does what we want,
224 * in particular in the case of mmap'd dmabufs)
225 */
226 fput(vma->vm_file);
227 get_file(obj->filp);
228 vma->vm_pgoff = 0;
229 vma->vm_file = obj->filp;
230
231 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
232 }
233
234 return 0;
235}
236
237int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
238{
239 int ret;
240
241 ret = drm_gem_mmap(filp, vma);
242 if (ret) {
243 DBG("mmap failed: %d", ret);
244 return ret;
245 }
246
247 return msm_gem_mmap_obj(vma->vm_private_data, vma);
248}
249
250vm_fault_t msm_gem_fault(struct vm_fault *vmf)
251{
252 struct vm_area_struct *vma = vmf->vma;
253 struct drm_gem_object *obj = vma->vm_private_data;
254 struct msm_gem_object *msm_obj = to_msm_bo(obj);
255 struct page **pages;
256 unsigned long pfn;
257 pgoff_t pgoff;
258 int err;
259 vm_fault_t ret;
260
261 /*
262 * vm_ops.open/drm_gem_mmap_obj and close get and put
263 * a reference on obj. So, we dont need to hold one here.
264 */
265 err = mutex_lock_interruptible(&msm_obj->lock);
266 if (err) {
267 ret = VM_FAULT_NOPAGE;
268 goto out;
269 }
270
271 if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
272 mutex_unlock(&msm_obj->lock);
273 return VM_FAULT_SIGBUS;
274 }
275
276 /* make sure we have pages attached now */
277 pages = get_pages(obj);
278 if (IS_ERR(pages)) {
279 ret = vmf_error(PTR_ERR(pages));
280 goto out_unlock;
281 }
282
283 /* We don't use vmf->pgoff since that has the fake offset: */
284 pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
285
286 pfn = page_to_pfn(pages[pgoff]);
287
288 VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
289 pfn, pfn << PAGE_SHIFT);
290
291 ret = vmf_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV));
292out_unlock:
293 mutex_unlock(&msm_obj->lock);
294out:
295 return ret;
296}
297
298/** get mmap offset */
299static uint64_t mmap_offset(struct drm_gem_object *obj)
300{
301 struct drm_device *dev = obj->dev;
302 struct msm_gem_object *msm_obj = to_msm_bo(obj);
303 int ret;
304
305 WARN_ON(!mutex_is_locked(&msm_obj->lock));
306
307 /* Make it mmapable */
308 ret = drm_gem_create_mmap_offset(obj);
309
310 if (ret) {
311 DRM_DEV_ERROR(dev->dev, "could not allocate mmap offset\n");
312 return 0;
313 }
314
315 return drm_vma_node_offset_addr(&obj->vma_node);
316}
317
318uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
319{
320 uint64_t offset;
321 struct msm_gem_object *msm_obj = to_msm_bo(obj);
322
323 mutex_lock(&msm_obj->lock);
324 offset = mmap_offset(obj);
325 mutex_unlock(&msm_obj->lock);
326 return offset;
327}
328
329static struct msm_gem_vma *add_vma(struct drm_gem_object *obj,
330 struct msm_gem_address_space *aspace)
331{
332 struct msm_gem_object *msm_obj = to_msm_bo(obj);
333 struct msm_gem_vma *vma;
334
335 WARN_ON(!mutex_is_locked(&msm_obj->lock));
336
337 vma = kzalloc(sizeof(*vma), GFP_KERNEL);
338 if (!vma)
339 return ERR_PTR(-ENOMEM);
340
341 vma->aspace = aspace;
342
343 list_add_tail(&vma->list, &msm_obj->vmas);
344
345 return vma;
346}
347
348static struct msm_gem_vma *lookup_vma(struct drm_gem_object *obj,
349 struct msm_gem_address_space *aspace)
350{
351 struct msm_gem_object *msm_obj = to_msm_bo(obj);
352 struct msm_gem_vma *vma;
353
354 WARN_ON(!mutex_is_locked(&msm_obj->lock));
355
356 list_for_each_entry(vma, &msm_obj->vmas, list) {
357 if (vma->aspace == aspace)
358 return vma;
359 }
360
361 return NULL;
362}
363
364static void del_vma(struct msm_gem_vma *vma)
365{
366 if (!vma)
367 return;
368
369 list_del(&vma->list);
370 kfree(vma);
371}
372
373/* Called with msm_obj->lock locked */
374static void
375put_iova(struct drm_gem_object *obj)
376{
377 struct msm_gem_object *msm_obj = to_msm_bo(obj);
378 struct msm_gem_vma *vma, *tmp;
379
380 WARN_ON(!mutex_is_locked(&msm_obj->lock));
381
382 list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) {
383 if (vma->aspace) {
384 msm_gem_purge_vma(vma->aspace, vma);
385 msm_gem_close_vma(vma->aspace, vma);
386 }
387 del_vma(vma);
388 }
389}
390
391static int msm_gem_get_iova_locked(struct drm_gem_object *obj,
392 struct msm_gem_address_space *aspace, uint64_t *iova)
393{
394 struct msm_gem_object *msm_obj = to_msm_bo(obj);
395 struct msm_gem_vma *vma;
396 int ret = 0;
397
398 WARN_ON(!mutex_is_locked(&msm_obj->lock));
399
400 vma = lookup_vma(obj, aspace);
401
402 if (!vma) {
403 vma = add_vma(obj, aspace);
404 if (IS_ERR(vma))
405 return PTR_ERR(vma);
406
407 ret = msm_gem_init_vma(aspace, vma, obj->size >> PAGE_SHIFT);
408 if (ret) {
409 del_vma(vma);
410 return ret;
411 }
412 }
413
414 *iova = vma->iova;
415 return 0;
416}
417
418static int msm_gem_pin_iova(struct drm_gem_object *obj,
419 struct msm_gem_address_space *aspace)
420{
421 struct msm_gem_object *msm_obj = to_msm_bo(obj);
422 struct msm_gem_vma *vma;
423 struct page **pages;
424 int prot = IOMMU_READ;
425
426 if (!(msm_obj->flags & MSM_BO_GPU_READONLY))
427 prot |= IOMMU_WRITE;
428
429 WARN_ON(!mutex_is_locked(&msm_obj->lock));
430
431 if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED))
432 return -EBUSY;
433
434 vma = lookup_vma(obj, aspace);
435 if (WARN_ON(!vma))
436 return -EINVAL;
437
438 pages = get_pages(obj);
439 if (IS_ERR(pages))
440 return PTR_ERR(pages);
441
442 return msm_gem_map_vma(aspace, vma, prot,
443 msm_obj->sgt, obj->size >> PAGE_SHIFT);
444}
445
446/* get iova and pin it. Should have a matching put */
447int msm_gem_get_and_pin_iova(struct drm_gem_object *obj,
448 struct msm_gem_address_space *aspace, uint64_t *iova)
449{
450 struct msm_gem_object *msm_obj = to_msm_bo(obj);
451 u64 local;
452 int ret;
453
454 mutex_lock(&msm_obj->lock);
455
456 ret = msm_gem_get_iova_locked(obj, aspace, &local);
457
458 if (!ret)
459 ret = msm_gem_pin_iova(obj, aspace);
460
461 if (!ret)
462 *iova = local;
463
464 mutex_unlock(&msm_obj->lock);
465 return ret;
466}
467
468/*
469 * Get an iova but don't pin it. Doesn't need a put because iovas are currently
470 * valid for the life of the object
471 */
472int msm_gem_get_iova(struct drm_gem_object *obj,
473 struct msm_gem_address_space *aspace, uint64_t *iova)
474{
475 struct msm_gem_object *msm_obj = to_msm_bo(obj);
476 int ret;
477
478 mutex_lock(&msm_obj->lock);
479 ret = msm_gem_get_iova_locked(obj, aspace, iova);
480 mutex_unlock(&msm_obj->lock);
481
482 return ret;
483}
484
485/* get iova without taking a reference, used in places where you have
486 * already done a 'msm_gem_get_and_pin_iova' or 'msm_gem_get_iova'
487 */
488uint64_t msm_gem_iova(struct drm_gem_object *obj,
489 struct msm_gem_address_space *aspace)
490{
491 struct msm_gem_object *msm_obj = to_msm_bo(obj);
492 struct msm_gem_vma *vma;
493
494 mutex_lock(&msm_obj->lock);
495 vma = lookup_vma(obj, aspace);
496 mutex_unlock(&msm_obj->lock);
497 WARN_ON(!vma);
498
499 return vma ? vma->iova : 0;
500}
501
502/*
503 * Unpin a iova by updating the reference counts. The memory isn't actually
504 * purged until something else (shrinker, mm_notifier, destroy, etc) decides
505 * to get rid of it
506 */
507void msm_gem_unpin_iova(struct drm_gem_object *obj,
508 struct msm_gem_address_space *aspace)
509{
510 struct msm_gem_object *msm_obj = to_msm_bo(obj);
511 struct msm_gem_vma *vma;
512
513 mutex_lock(&msm_obj->lock);
514 vma = lookup_vma(obj, aspace);
515
516 if (!WARN_ON(!vma))
517 msm_gem_unmap_vma(aspace, vma);
518
519 mutex_unlock(&msm_obj->lock);
520}
521
522int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
523 struct drm_mode_create_dumb *args)
524{
525 args->pitch = align_pitch(args->width, args->bpp);
526 args->size = PAGE_ALIGN(args->pitch * args->height);
527 return msm_gem_new_handle(dev, file, args->size,
528 MSM_BO_SCANOUT | MSM_BO_WC, &args->handle, "dumb");
529}
530
531int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
532 uint32_t handle, uint64_t *offset)
533{
534 struct drm_gem_object *obj;
535 int ret = 0;
536
537 /* GEM does all our handle to object mapping */
538 obj = drm_gem_object_lookup(file, handle);
539 if (obj == NULL) {
540 ret = -ENOENT;
541 goto fail;
542 }
543
544 *offset = msm_gem_mmap_offset(obj);
545
546 drm_gem_object_put_unlocked(obj);
547
548fail:
549 return ret;
550}
551
552static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
553{
554 struct msm_gem_object *msm_obj = to_msm_bo(obj);
555 int ret = 0;
556
557 mutex_lock(&msm_obj->lock);
558
559 if (WARN_ON(msm_obj->madv > madv)) {
560 DRM_DEV_ERROR(obj->dev->dev, "Invalid madv state: %u vs %u\n",
561 msm_obj->madv, madv);
562 mutex_unlock(&msm_obj->lock);
563 return ERR_PTR(-EBUSY);
564 }
565
566 /* increment vmap_count *before* vmap() call, so shrinker can
567 * check vmap_count (is_vunmapable()) outside of msm_obj->lock.
568 * This guarantees that we won't try to msm_gem_vunmap() this
569 * same object from within the vmap() call (while we already
570 * hold msm_obj->lock)
571 */
572 msm_obj->vmap_count++;
573
574 if (!msm_obj->vaddr) {
575 struct page **pages = get_pages(obj);
576 if (IS_ERR(pages)) {
577 ret = PTR_ERR(pages);
578 goto fail;
579 }
580 msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
581 VM_MAP, pgprot_writecombine(PAGE_KERNEL));
582 if (msm_obj->vaddr == NULL) {
583 ret = -ENOMEM;
584 goto fail;
585 }
586 }
587
588 mutex_unlock(&msm_obj->lock);
589 return msm_obj->vaddr;
590
591fail:
592 msm_obj->vmap_count--;
593 mutex_unlock(&msm_obj->lock);
594 return ERR_PTR(ret);
595}
596
597void *msm_gem_get_vaddr(struct drm_gem_object *obj)
598{
599 return get_vaddr(obj, MSM_MADV_WILLNEED);
600}
601
602/*
603 * Don't use this! It is for the very special case of dumping
604 * submits from GPU hangs or faults, were the bo may already
605 * be MSM_MADV_DONTNEED, but we know the buffer is still on the
606 * active list.
607 */
608void *msm_gem_get_vaddr_active(struct drm_gem_object *obj)
609{
610 return get_vaddr(obj, __MSM_MADV_PURGED);
611}
612
613void msm_gem_put_vaddr(struct drm_gem_object *obj)
614{
615 struct msm_gem_object *msm_obj = to_msm_bo(obj);
616
617 mutex_lock(&msm_obj->lock);
618 WARN_ON(msm_obj->vmap_count < 1);
619 msm_obj->vmap_count--;
620 mutex_unlock(&msm_obj->lock);
621}
622
623/* Update madvise status, returns true if not purged, else
624 * false or -errno.
625 */
626int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv)
627{
628 struct msm_gem_object *msm_obj = to_msm_bo(obj);
629
630 mutex_lock(&msm_obj->lock);
631
632 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
633
634 if (msm_obj->madv != __MSM_MADV_PURGED)
635 msm_obj->madv = madv;
636
637 madv = msm_obj->madv;
638
639 mutex_unlock(&msm_obj->lock);
640
641 return (madv != __MSM_MADV_PURGED);
642}
643
644void msm_gem_purge(struct drm_gem_object *obj, enum msm_gem_lock subclass)
645{
646 struct drm_device *dev = obj->dev;
647 struct msm_gem_object *msm_obj = to_msm_bo(obj);
648
649 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
650 WARN_ON(!is_purgeable(msm_obj));
651 WARN_ON(obj->import_attach);
652
653 mutex_lock_nested(&msm_obj->lock, subclass);
654
655 put_iova(obj);
656
657 msm_gem_vunmap_locked(obj);
658
659 put_pages(obj);
660
661 msm_obj->madv = __MSM_MADV_PURGED;
662
663 drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
664 drm_gem_free_mmap_offset(obj);
665
666 /* Our goal here is to return as much of the memory as
667 * is possible back to the system as we are called from OOM.
668 * To do this we must instruct the shmfs to drop all of its
669 * backing pages, *now*.
670 */
671 shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
672
673 invalidate_mapping_pages(file_inode(obj->filp)->i_mapping,
674 0, (loff_t)-1);
675
676 mutex_unlock(&msm_obj->lock);
677}
678
679static void msm_gem_vunmap_locked(struct drm_gem_object *obj)
680{
681 struct msm_gem_object *msm_obj = to_msm_bo(obj);
682
683 WARN_ON(!mutex_is_locked(&msm_obj->lock));
684
685 if (!msm_obj->vaddr || WARN_ON(!is_vunmapable(msm_obj)))
686 return;
687
688 vunmap(msm_obj->vaddr);
689 msm_obj->vaddr = NULL;
690}
691
692void msm_gem_vunmap(struct drm_gem_object *obj, enum msm_gem_lock subclass)
693{
694 struct msm_gem_object *msm_obj = to_msm_bo(obj);
695
696 mutex_lock_nested(&msm_obj->lock, subclass);
697 msm_gem_vunmap_locked(obj);
698 mutex_unlock(&msm_obj->lock);
699}
700
701/* must be called before _move_to_active().. */
702int msm_gem_sync_object(struct drm_gem_object *obj,
703 struct msm_fence_context *fctx, bool exclusive)
704{
705 struct dma_resv_list *fobj;
706 struct dma_fence *fence;
707 int i, ret;
708
709 fobj = dma_resv_get_list(obj->resv);
710 if (!fobj || (fobj->shared_count == 0)) {
711 fence = dma_resv_get_excl(obj->resv);
712 /* don't need to wait on our own fences, since ring is fifo */
713 if (fence && (fence->context != fctx->context)) {
714 ret = dma_fence_wait(fence, true);
715 if (ret)
716 return ret;
717 }
718 }
719
720 if (!exclusive || !fobj)
721 return 0;
722
723 for (i = 0; i < fobj->shared_count; i++) {
724 fence = rcu_dereference_protected(fobj->shared[i],
725 dma_resv_held(obj->resv));
726 if (fence->context != fctx->context) {
727 ret = dma_fence_wait(fence, true);
728 if (ret)
729 return ret;
730 }
731 }
732
733 return 0;
734}
735
736void msm_gem_move_to_active(struct drm_gem_object *obj,
737 struct msm_gpu *gpu, bool exclusive, struct dma_fence *fence)
738{
739 struct msm_gem_object *msm_obj = to_msm_bo(obj);
740 WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED);
741 msm_obj->gpu = gpu;
742 if (exclusive)
743 dma_resv_add_excl_fence(obj->resv, fence);
744 else
745 dma_resv_add_shared_fence(obj->resv, fence);
746 list_del_init(&msm_obj->mm_list);
747 list_add_tail(&msm_obj->mm_list, &gpu->active_list);
748}
749
750void msm_gem_move_to_inactive(struct drm_gem_object *obj)
751{
752 struct drm_device *dev = obj->dev;
753 struct msm_drm_private *priv = dev->dev_private;
754 struct msm_gem_object *msm_obj = to_msm_bo(obj);
755
756 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
757
758 msm_obj->gpu = NULL;
759 list_del_init(&msm_obj->mm_list);
760 list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
761}
762
763int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
764{
765 bool write = !!(op & MSM_PREP_WRITE);
766 unsigned long remain =
767 op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout);
768 long ret;
769
770 ret = dma_resv_wait_timeout_rcu(obj->resv, write,
771 true, remain);
772 if (ret == 0)
773 return remain == 0 ? -EBUSY : -ETIMEDOUT;
774 else if (ret < 0)
775 return ret;
776
777 /* TODO cache maintenance */
778
779 return 0;
780}
781
782int msm_gem_cpu_fini(struct drm_gem_object *obj)
783{
784 /* TODO cache maintenance */
785 return 0;
786}
787
788#ifdef CONFIG_DEBUG_FS
789static void describe_fence(struct dma_fence *fence, const char *type,
790 struct seq_file *m)
791{
792 if (!dma_fence_is_signaled(fence))
793 seq_printf(m, "\t%9s: %s %s seq %llu\n", type,
794 fence->ops->get_driver_name(fence),
795 fence->ops->get_timeline_name(fence),
796 fence->seqno);
797}
798
799void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
800{
801 struct msm_gem_object *msm_obj = to_msm_bo(obj);
802 struct dma_resv *robj = obj->resv;
803 struct dma_resv_list *fobj;
804 struct dma_fence *fence;
805 struct msm_gem_vma *vma;
806 uint64_t off = drm_vma_node_start(&obj->vma_node);
807 const char *madv;
808
809 mutex_lock(&msm_obj->lock);
810
811 switch (msm_obj->madv) {
812 case __MSM_MADV_PURGED:
813 madv = " purged";
814 break;
815 case MSM_MADV_DONTNEED:
816 madv = " purgeable";
817 break;
818 case MSM_MADV_WILLNEED:
819 default:
820 madv = "";
821 break;
822 }
823
824 seq_printf(m, "%08x: %c %2d (%2d) %08llx %p",
825 msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
826 obj->name, kref_read(&obj->refcount),
827 off, msm_obj->vaddr);
828
829 seq_printf(m, " %08zu %9s %-32s\n", obj->size, madv, msm_obj->name);
830
831 if (!list_empty(&msm_obj->vmas)) {
832
833 seq_puts(m, " vmas:");
834
835 list_for_each_entry(vma, &msm_obj->vmas, list)
836 seq_printf(m, " [%s: %08llx,%s,inuse=%d]",
837 vma->aspace != NULL ? vma->aspace->name : NULL,
838 vma->iova, vma->mapped ? "mapped" : "unmapped",
839 vma->inuse);
840
841 seq_puts(m, "\n");
842 }
843
844 rcu_read_lock();
845 fobj = rcu_dereference(robj->fence);
846 if (fobj) {
847 unsigned int i, shared_count = fobj->shared_count;
848
849 for (i = 0; i < shared_count; i++) {
850 fence = rcu_dereference(fobj->shared[i]);
851 describe_fence(fence, "Shared", m);
852 }
853 }
854
855 fence = rcu_dereference(robj->fence_excl);
856 if (fence)
857 describe_fence(fence, "Exclusive", m);
858 rcu_read_unlock();
859
860 mutex_unlock(&msm_obj->lock);
861}
862
863void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
864{
865 struct msm_gem_object *msm_obj;
866 int count = 0;
867 size_t size = 0;
868
869 seq_puts(m, " flags id ref offset kaddr size madv name\n");
870 list_for_each_entry(msm_obj, list, mm_list) {
871 struct drm_gem_object *obj = &msm_obj->base;
872 seq_puts(m, " ");
873 msm_gem_describe(obj, m);
874 count++;
875 size += obj->size;
876 }
877
878 seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
879}
880#endif
881
882/* don't call directly! Use drm_gem_object_put() and friends */
883void msm_gem_free_object(struct drm_gem_object *obj)
884{
885 struct msm_gem_object *msm_obj = to_msm_bo(obj);
886 struct drm_device *dev = obj->dev;
887 struct msm_drm_private *priv = dev->dev_private;
888
889 if (llist_add(&msm_obj->freed, &priv->free_list))
890 queue_work(priv->wq, &priv->free_work);
891}
892
893static void free_object(struct msm_gem_object *msm_obj)
894{
895 struct drm_gem_object *obj = &msm_obj->base;
896 struct drm_device *dev = obj->dev;
897
898 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
899
900 /* object should not be on active list: */
901 WARN_ON(is_active(msm_obj));
902
903 list_del(&msm_obj->mm_list);
904
905 mutex_lock(&msm_obj->lock);
906
907 put_iova(obj);
908
909 if (obj->import_attach) {
910 if (msm_obj->vaddr)
911 dma_buf_vunmap(obj->import_attach->dmabuf, msm_obj->vaddr);
912
913 /* Don't drop the pages for imported dmabuf, as they are not
914 * ours, just free the array we allocated:
915 */
916 if (msm_obj->pages)
917 kvfree(msm_obj->pages);
918
919 drm_prime_gem_destroy(obj, msm_obj->sgt);
920 } else {
921 msm_gem_vunmap_locked(obj);
922 put_pages(obj);
923 }
924
925 drm_gem_object_release(obj);
926
927 mutex_unlock(&msm_obj->lock);
928 kfree(msm_obj);
929}
930
931void msm_gem_free_work(struct work_struct *work)
932{
933 struct msm_drm_private *priv =
934 container_of(work, struct msm_drm_private, free_work);
935 struct drm_device *dev = priv->dev;
936 struct llist_node *freed;
937 struct msm_gem_object *msm_obj, *next;
938
939 while ((freed = llist_del_all(&priv->free_list))) {
940
941 mutex_lock(&dev->struct_mutex);
942
943 llist_for_each_entry_safe(msm_obj, next,
944 freed, freed)
945 free_object(msm_obj);
946
947 mutex_unlock(&dev->struct_mutex);
948
949 if (need_resched())
950 break;
951 }
952}
953
954/* convenience method to construct a GEM buffer object, and userspace handle */
955int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
956 uint32_t size, uint32_t flags, uint32_t *handle,
957 char *name)
958{
959 struct drm_gem_object *obj;
960 int ret;
961
962 obj = msm_gem_new(dev, size, flags);
963
964 if (IS_ERR(obj))
965 return PTR_ERR(obj);
966
967 if (name)
968 msm_gem_object_set_name(obj, "%s", name);
969
970 ret = drm_gem_handle_create(file, obj, handle);
971
972 /* drop reference from allocate - handle holds it now */
973 drm_gem_object_put_unlocked(obj);
974
975 return ret;
976}
977
978static int msm_gem_new_impl(struct drm_device *dev,
979 uint32_t size, uint32_t flags,
980 struct drm_gem_object **obj,
981 bool struct_mutex_locked)
982{
983 struct msm_drm_private *priv = dev->dev_private;
984 struct msm_gem_object *msm_obj;
985
986 switch (flags & MSM_BO_CACHE_MASK) {
987 case MSM_BO_UNCACHED:
988 case MSM_BO_CACHED:
989 case MSM_BO_WC:
990 break;
991 default:
992 DRM_DEV_ERROR(dev->dev, "invalid cache flag: %x\n",
993 (flags & MSM_BO_CACHE_MASK));
994 return -EINVAL;
995 }
996
997 msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL);
998 if (!msm_obj)
999 return -ENOMEM;
1000
1001 mutex_init(&msm_obj->lock);
1002
1003 msm_obj->flags = flags;
1004 msm_obj->madv = MSM_MADV_WILLNEED;
1005
1006 INIT_LIST_HEAD(&msm_obj->submit_entry);
1007 INIT_LIST_HEAD(&msm_obj->vmas);
1008
1009 if (struct_mutex_locked) {
1010 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
1011 list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
1012 } else {
1013 mutex_lock(&dev->struct_mutex);
1014 list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
1015 mutex_unlock(&dev->struct_mutex);
1016 }
1017
1018 *obj = &msm_obj->base;
1019
1020 return 0;
1021}
1022
1023static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
1024 uint32_t size, uint32_t flags, bool struct_mutex_locked)
1025{
1026 struct msm_drm_private *priv = dev->dev_private;
1027 struct drm_gem_object *obj = NULL;
1028 bool use_vram = false;
1029 int ret;
1030
1031 size = PAGE_ALIGN(size);
1032
1033 if (!msm_use_mmu(dev))
1034 use_vram = true;
1035 else if ((flags & (MSM_BO_STOLEN | MSM_BO_SCANOUT)) && priv->vram.size)
1036 use_vram = true;
1037
1038 if (WARN_ON(use_vram && !priv->vram.size))
1039 return ERR_PTR(-EINVAL);
1040
1041 /* Disallow zero sized objects as they make the underlying
1042 * infrastructure grumpy
1043 */
1044 if (size == 0)
1045 return ERR_PTR(-EINVAL);
1046
1047 ret = msm_gem_new_impl(dev, size, flags, &obj, struct_mutex_locked);
1048 if (ret)
1049 goto fail;
1050
1051 if (use_vram) {
1052 struct msm_gem_vma *vma;
1053 struct page **pages;
1054 struct msm_gem_object *msm_obj = to_msm_bo(obj);
1055
1056 mutex_lock(&msm_obj->lock);
1057
1058 vma = add_vma(obj, NULL);
1059 mutex_unlock(&msm_obj->lock);
1060 if (IS_ERR(vma)) {
1061 ret = PTR_ERR(vma);
1062 goto fail;
1063 }
1064
1065 to_msm_bo(obj)->vram_node = &vma->node;
1066
1067 drm_gem_private_object_init(dev, obj, size);
1068
1069 pages = get_pages(obj);
1070 if (IS_ERR(pages)) {
1071 ret = PTR_ERR(pages);
1072 goto fail;
1073 }
1074
1075 vma->iova = physaddr(obj);
1076 } else {
1077 ret = drm_gem_object_init(dev, obj, size);
1078 if (ret)
1079 goto fail;
1080 /*
1081 * Our buffers are kept pinned, so allocating them from the
1082 * MOVABLE zone is a really bad idea, and conflicts with CMA.
1083 * See comments above new_inode() why this is required _and_
1084 * expected if you're going to pin these pages.
1085 */
1086 mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER);
1087 }
1088
1089 return obj;
1090
1091fail:
1092 drm_gem_object_put_unlocked(obj);
1093 return ERR_PTR(ret);
1094}
1095
1096struct drm_gem_object *msm_gem_new_locked(struct drm_device *dev,
1097 uint32_t size, uint32_t flags)
1098{
1099 return _msm_gem_new(dev, size, flags, true);
1100}
1101
1102struct drm_gem_object *msm_gem_new(struct drm_device *dev,
1103 uint32_t size, uint32_t flags)
1104{
1105 return _msm_gem_new(dev, size, flags, false);
1106}
1107
1108struct drm_gem_object *msm_gem_import(struct drm_device *dev,
1109 struct dma_buf *dmabuf, struct sg_table *sgt)
1110{
1111 struct msm_gem_object *msm_obj;
1112 struct drm_gem_object *obj;
1113 uint32_t size;
1114 int ret, npages;
1115
1116 /* if we don't have IOMMU, don't bother pretending we can import: */
1117 if (!msm_use_mmu(dev)) {
1118 DRM_DEV_ERROR(dev->dev, "cannot import without IOMMU\n");
1119 return ERR_PTR(-EINVAL);
1120 }
1121
1122 size = PAGE_ALIGN(dmabuf->size);
1123
1124 ret = msm_gem_new_impl(dev, size, MSM_BO_WC, &obj, false);
1125 if (ret)
1126 goto fail;
1127
1128 drm_gem_private_object_init(dev, obj, size);
1129
1130 npages = size / PAGE_SIZE;
1131
1132 msm_obj = to_msm_bo(obj);
1133 mutex_lock(&msm_obj->lock);
1134 msm_obj->sgt = sgt;
1135 msm_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
1136 if (!msm_obj->pages) {
1137 mutex_unlock(&msm_obj->lock);
1138 ret = -ENOMEM;
1139 goto fail;
1140 }
1141
1142 ret = drm_prime_sg_to_page_addr_arrays(sgt, msm_obj->pages, NULL, npages);
1143 if (ret) {
1144 mutex_unlock(&msm_obj->lock);
1145 goto fail;
1146 }
1147
1148 mutex_unlock(&msm_obj->lock);
1149 return obj;
1150
1151fail:
1152 drm_gem_object_put_unlocked(obj);
1153 return ERR_PTR(ret);
1154}
1155
1156static void *_msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
1157 uint32_t flags, struct msm_gem_address_space *aspace,
1158 struct drm_gem_object **bo, uint64_t *iova, bool locked)
1159{
1160 void *vaddr;
1161 struct drm_gem_object *obj = _msm_gem_new(dev, size, flags, locked);
1162 int ret;
1163
1164 if (IS_ERR(obj))
1165 return ERR_CAST(obj);
1166
1167 if (iova) {
1168 ret = msm_gem_get_and_pin_iova(obj, aspace, iova);
1169 if (ret)
1170 goto err;
1171 }
1172
1173 vaddr = msm_gem_get_vaddr(obj);
1174 if (IS_ERR(vaddr)) {
1175 msm_gem_unpin_iova(obj, aspace);
1176 ret = PTR_ERR(vaddr);
1177 goto err;
1178 }
1179
1180 if (bo)
1181 *bo = obj;
1182
1183 return vaddr;
1184err:
1185 if (locked)
1186 drm_gem_object_put(obj);
1187 else
1188 drm_gem_object_put_unlocked(obj);
1189
1190 return ERR_PTR(ret);
1191
1192}
1193
1194void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
1195 uint32_t flags, struct msm_gem_address_space *aspace,
1196 struct drm_gem_object **bo, uint64_t *iova)
1197{
1198 return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, false);
1199}
1200
1201void *msm_gem_kernel_new_locked(struct drm_device *dev, uint32_t size,
1202 uint32_t flags, struct msm_gem_address_space *aspace,
1203 struct drm_gem_object **bo, uint64_t *iova)
1204{
1205 return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, true);
1206}
1207
1208void msm_gem_kernel_put(struct drm_gem_object *bo,
1209 struct msm_gem_address_space *aspace, bool locked)
1210{
1211 if (IS_ERR_OR_NULL(bo))
1212 return;
1213
1214 msm_gem_put_vaddr(bo);
1215 msm_gem_unpin_iova(bo, aspace);
1216
1217 if (locked)
1218 drm_gem_object_put(bo);
1219 else
1220 drm_gem_object_put_unlocked(bo);
1221}
1222
1223void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...)
1224{
1225 struct msm_gem_object *msm_obj = to_msm_bo(bo);
1226 va_list ap;
1227
1228 if (!fmt)
1229 return;
1230
1231 va_start(ap, fmt);
1232 vsnprintf(msm_obj->name, sizeof(msm_obj->name), fmt, ap);
1233 va_end(ap);
1234}