Loading...
1/*
2 * Copyright (C) 2015 Etnaviv Project
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License version 2 as published by
6 * the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16
17#include <linux/spinlock.h>
18#include <linux/shmem_fs.h>
19
20#include "etnaviv_drv.h"
21#include "etnaviv_gem.h"
22#include "etnaviv_gpu.h"
23#include "etnaviv_mmu.h"
24
25static void etnaviv_gem_scatter_map(struct etnaviv_gem_object *etnaviv_obj)
26{
27 struct drm_device *dev = etnaviv_obj->base.dev;
28 struct sg_table *sgt = etnaviv_obj->sgt;
29
30 /*
31 * For non-cached buffers, ensure the new pages are clean
32 * because display controller, GPU, etc. are not coherent.
33 */
34 if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK)
35 dma_map_sg(dev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL);
36}
37
38static void etnaviv_gem_scatterlist_unmap(struct etnaviv_gem_object *etnaviv_obj)
39{
40 struct drm_device *dev = etnaviv_obj->base.dev;
41 struct sg_table *sgt = etnaviv_obj->sgt;
42
43 /*
44 * For non-cached buffers, ensure the new pages are clean
45 * because display controller, GPU, etc. are not coherent:
46 *
47 * WARNING: The DMA API does not support concurrent CPU
48 * and device access to the memory area. With BIDIRECTIONAL,
49 * we will clean the cache lines which overlap the region,
50 * and invalidate all cache lines (partially) contained in
51 * the region.
52 *
53 * If you have dirty data in the overlapping cache lines,
54 * that will corrupt the GPU-written data. If you have
55 * written into the remainder of the region, this can
56 * discard those writes.
57 */
58 if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK)
59 dma_unmap_sg(dev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL);
60}
61
62/* called with etnaviv_obj->lock held */
63static int etnaviv_gem_shmem_get_pages(struct etnaviv_gem_object *etnaviv_obj)
64{
65 struct drm_device *dev = etnaviv_obj->base.dev;
66 struct page **p = drm_gem_get_pages(&etnaviv_obj->base);
67
68 if (IS_ERR(p)) {
69 dev_err(dev->dev, "could not get pages: %ld\n", PTR_ERR(p));
70 return PTR_ERR(p);
71 }
72
73 etnaviv_obj->pages = p;
74
75 return 0;
76}
77
78static void put_pages(struct etnaviv_gem_object *etnaviv_obj)
79{
80 if (etnaviv_obj->sgt) {
81 etnaviv_gem_scatterlist_unmap(etnaviv_obj);
82 sg_free_table(etnaviv_obj->sgt);
83 kfree(etnaviv_obj->sgt);
84 etnaviv_obj->sgt = NULL;
85 }
86 if (etnaviv_obj->pages) {
87 drm_gem_put_pages(&etnaviv_obj->base, etnaviv_obj->pages,
88 true, false);
89
90 etnaviv_obj->pages = NULL;
91 }
92}
93
94struct page **etnaviv_gem_get_pages(struct etnaviv_gem_object *etnaviv_obj)
95{
96 int ret;
97
98 lockdep_assert_held(&etnaviv_obj->lock);
99
100 if (!etnaviv_obj->pages) {
101 ret = etnaviv_obj->ops->get_pages(etnaviv_obj);
102 if (ret < 0)
103 return ERR_PTR(ret);
104 }
105
106 if (!etnaviv_obj->sgt) {
107 struct drm_device *dev = etnaviv_obj->base.dev;
108 int npages = etnaviv_obj->base.size >> PAGE_SHIFT;
109 struct sg_table *sgt;
110
111 sgt = drm_prime_pages_to_sg(etnaviv_obj->pages, npages);
112 if (IS_ERR(sgt)) {
113 dev_err(dev->dev, "failed to allocate sgt: %ld\n",
114 PTR_ERR(sgt));
115 return ERR_CAST(sgt);
116 }
117
118 etnaviv_obj->sgt = sgt;
119
120 etnaviv_gem_scatter_map(etnaviv_obj);
121 }
122
123 return etnaviv_obj->pages;
124}
125
126void etnaviv_gem_put_pages(struct etnaviv_gem_object *etnaviv_obj)
127{
128 lockdep_assert_held(&etnaviv_obj->lock);
129 /* when we start tracking the pin count, then do something here */
130}
131
132static int etnaviv_gem_mmap_obj(struct drm_gem_object *obj,
133 struct vm_area_struct *vma)
134{
135 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
136 pgprot_t vm_page_prot;
137
138 vma->vm_flags &= ~VM_PFNMAP;
139 vma->vm_flags |= VM_MIXEDMAP;
140
141 vm_page_prot = vm_get_page_prot(vma->vm_flags);
142
143 if (etnaviv_obj->flags & ETNA_BO_WC) {
144 vma->vm_page_prot = pgprot_writecombine(vm_page_prot);
145 } else if (etnaviv_obj->flags & ETNA_BO_UNCACHED) {
146 vma->vm_page_prot = pgprot_noncached(vm_page_prot);
147 } else {
148 /*
149 * Shunt off cached objs to shmem file so they have their own
150 * address_space (so unmap_mapping_range does what we want,
151 * in particular in the case of mmap'd dmabufs)
152 */
153 fput(vma->vm_file);
154 get_file(obj->filp);
155 vma->vm_pgoff = 0;
156 vma->vm_file = obj->filp;
157
158 vma->vm_page_prot = vm_page_prot;
159 }
160
161 return 0;
162}
163
164int etnaviv_gem_mmap(struct file *filp, struct vm_area_struct *vma)
165{
166 struct etnaviv_gem_object *obj;
167 int ret;
168
169 ret = drm_gem_mmap(filp, vma);
170 if (ret) {
171 DBG("mmap failed: %d", ret);
172 return ret;
173 }
174
175 obj = to_etnaviv_bo(vma->vm_private_data);
176 return etnaviv_gem_mmap_obj(vma->vm_private_data, vma);
177}
178
179int etnaviv_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
180{
181 struct drm_gem_object *obj = vma->vm_private_data;
182 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
183 struct page **pages, *page;
184 pgoff_t pgoff;
185 int ret;
186
187 /*
188 * Make sure we don't parallel update on a fault, nor move or remove
189 * something from beneath our feet. Note that vm_insert_page() is
190 * specifically coded to take care of this, so we don't have to.
191 */
192 ret = mutex_lock_interruptible(&etnaviv_obj->lock);
193 if (ret)
194 goto out;
195
196 /* make sure we have pages attached now */
197 pages = etnaviv_gem_get_pages(etnaviv_obj);
198 mutex_unlock(&etnaviv_obj->lock);
199
200 if (IS_ERR(pages)) {
201 ret = PTR_ERR(pages);
202 goto out;
203 }
204
205 /* We don't use vmf->pgoff since that has the fake offset: */
206 pgoff = ((unsigned long)vmf->virtual_address -
207 vma->vm_start) >> PAGE_SHIFT;
208
209 page = pages[pgoff];
210
211 VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
212 page_to_pfn(page), page_to_pfn(page) << PAGE_SHIFT);
213
214 ret = vm_insert_page(vma, (unsigned long)vmf->virtual_address, page);
215
216out:
217 switch (ret) {
218 case -EAGAIN:
219 case 0:
220 case -ERESTARTSYS:
221 case -EINTR:
222 case -EBUSY:
223 /*
224 * EBUSY is ok: this just means that another thread
225 * already did the job.
226 */
227 return VM_FAULT_NOPAGE;
228 case -ENOMEM:
229 return VM_FAULT_OOM;
230 default:
231 return VM_FAULT_SIGBUS;
232 }
233}
234
235int etnaviv_gem_mmap_offset(struct drm_gem_object *obj, u64 *offset)
236{
237 int ret;
238
239 /* Make it mmapable */
240 ret = drm_gem_create_mmap_offset(obj);
241 if (ret)
242 dev_err(obj->dev->dev, "could not allocate mmap offset\n");
243 else
244 *offset = drm_vma_node_offset_addr(&obj->vma_node);
245
246 return ret;
247}
248
249static struct etnaviv_vram_mapping *
250etnaviv_gem_get_vram_mapping(struct etnaviv_gem_object *obj,
251 struct etnaviv_iommu *mmu)
252{
253 struct etnaviv_vram_mapping *mapping;
254
255 list_for_each_entry(mapping, &obj->vram_list, obj_node) {
256 if (mapping->mmu == mmu)
257 return mapping;
258 }
259
260 return NULL;
261}
262
263void etnaviv_gem_mapping_reference(struct etnaviv_vram_mapping *mapping)
264{
265 struct etnaviv_gem_object *etnaviv_obj = mapping->object;
266
267 drm_gem_object_reference(&etnaviv_obj->base);
268
269 mutex_lock(&etnaviv_obj->lock);
270 WARN_ON(mapping->use == 0);
271 mapping->use += 1;
272 mutex_unlock(&etnaviv_obj->lock);
273}
274
275void etnaviv_gem_mapping_unreference(struct etnaviv_vram_mapping *mapping)
276{
277 struct etnaviv_gem_object *etnaviv_obj = mapping->object;
278
279 mutex_lock(&etnaviv_obj->lock);
280 WARN_ON(mapping->use == 0);
281 mapping->use -= 1;
282 mutex_unlock(&etnaviv_obj->lock);
283
284 drm_gem_object_unreference_unlocked(&etnaviv_obj->base);
285}
286
287struct etnaviv_vram_mapping *etnaviv_gem_mapping_get(
288 struct drm_gem_object *obj, struct etnaviv_gpu *gpu)
289{
290 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
291 struct etnaviv_vram_mapping *mapping;
292 struct page **pages;
293 int ret = 0;
294
295 mutex_lock(&etnaviv_obj->lock);
296 mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, gpu->mmu);
297 if (mapping) {
298 /*
299 * Holding the object lock prevents the use count changing
300 * beneath us. If the use count is zero, the MMU might be
301 * reaping this object, so take the lock and re-check that
302 * the MMU owns this mapping to close this race.
303 */
304 if (mapping->use == 0) {
305 mutex_lock(&gpu->mmu->lock);
306 if (mapping->mmu == gpu->mmu)
307 mapping->use += 1;
308 else
309 mapping = NULL;
310 mutex_unlock(&gpu->mmu->lock);
311 if (mapping)
312 goto out;
313 } else {
314 mapping->use += 1;
315 goto out;
316 }
317 }
318
319 pages = etnaviv_gem_get_pages(etnaviv_obj);
320 if (IS_ERR(pages)) {
321 ret = PTR_ERR(pages);
322 goto out;
323 }
324
325 /*
326 * See if we have a reaped vram mapping we can re-use before
327 * allocating a fresh mapping.
328 */
329 mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, NULL);
330 if (!mapping) {
331 mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
332 if (!mapping) {
333 ret = -ENOMEM;
334 goto out;
335 }
336
337 INIT_LIST_HEAD(&mapping->scan_node);
338 mapping->object = etnaviv_obj;
339 } else {
340 list_del(&mapping->obj_node);
341 }
342
343 mapping->mmu = gpu->mmu;
344 mapping->use = 1;
345
346 ret = etnaviv_iommu_map_gem(gpu->mmu, etnaviv_obj, gpu->memory_base,
347 mapping);
348 if (ret < 0)
349 kfree(mapping);
350 else
351 list_add_tail(&mapping->obj_node, &etnaviv_obj->vram_list);
352
353out:
354 mutex_unlock(&etnaviv_obj->lock);
355
356 if (ret)
357 return ERR_PTR(ret);
358
359 /* Take a reference on the object */
360 drm_gem_object_reference(obj);
361 return mapping;
362}
363
364void *etnaviv_gem_vmap(struct drm_gem_object *obj)
365{
366 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
367
368 if (etnaviv_obj->vaddr)
369 return etnaviv_obj->vaddr;
370
371 mutex_lock(&etnaviv_obj->lock);
372 /*
373 * Need to check again, as we might have raced with another thread
374 * while waiting for the mutex.
375 */
376 if (!etnaviv_obj->vaddr)
377 etnaviv_obj->vaddr = etnaviv_obj->ops->vmap(etnaviv_obj);
378 mutex_unlock(&etnaviv_obj->lock);
379
380 return etnaviv_obj->vaddr;
381}
382
383static void *etnaviv_gem_vmap_impl(struct etnaviv_gem_object *obj)
384{
385 struct page **pages;
386
387 lockdep_assert_held(&obj->lock);
388
389 pages = etnaviv_gem_get_pages(obj);
390 if (IS_ERR(pages))
391 return NULL;
392
393 return vmap(pages, obj->base.size >> PAGE_SHIFT,
394 VM_MAP, pgprot_writecombine(PAGE_KERNEL));
395}
396
397static inline enum dma_data_direction etnaviv_op_to_dma_dir(u32 op)
398{
399 if (op & ETNA_PREP_READ)
400 return DMA_FROM_DEVICE;
401 else if (op & ETNA_PREP_WRITE)
402 return DMA_TO_DEVICE;
403 else
404 return DMA_BIDIRECTIONAL;
405}
406
407int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op,
408 struct timespec *timeout)
409{
410 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
411 struct drm_device *dev = obj->dev;
412 bool write = !!(op & ETNA_PREP_WRITE);
413 int ret;
414
415 if (op & ETNA_PREP_NOSYNC) {
416 if (!reservation_object_test_signaled_rcu(etnaviv_obj->resv,
417 write))
418 return -EBUSY;
419 } else {
420 unsigned long remain = etnaviv_timeout_to_jiffies(timeout);
421
422 ret = reservation_object_wait_timeout_rcu(etnaviv_obj->resv,
423 write, true, remain);
424 if (ret <= 0)
425 return ret == 0 ? -ETIMEDOUT : ret;
426 }
427
428 if (etnaviv_obj->flags & ETNA_BO_CACHED) {
429 if (!etnaviv_obj->sgt) {
430 void *ret;
431
432 mutex_lock(&etnaviv_obj->lock);
433 ret = etnaviv_gem_get_pages(etnaviv_obj);
434 mutex_unlock(&etnaviv_obj->lock);
435 if (IS_ERR(ret))
436 return PTR_ERR(ret);
437 }
438
439 dma_sync_sg_for_cpu(dev->dev, etnaviv_obj->sgt->sgl,
440 etnaviv_obj->sgt->nents,
441 etnaviv_op_to_dma_dir(op));
442 etnaviv_obj->last_cpu_prep_op = op;
443 }
444
445 return 0;
446}
447
448int etnaviv_gem_cpu_fini(struct drm_gem_object *obj)
449{
450 struct drm_device *dev = obj->dev;
451 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
452
453 if (etnaviv_obj->flags & ETNA_BO_CACHED) {
454 /* fini without a prep is almost certainly a userspace error */
455 WARN_ON(etnaviv_obj->last_cpu_prep_op == 0);
456 dma_sync_sg_for_device(dev->dev, etnaviv_obj->sgt->sgl,
457 etnaviv_obj->sgt->nents,
458 etnaviv_op_to_dma_dir(etnaviv_obj->last_cpu_prep_op));
459 etnaviv_obj->last_cpu_prep_op = 0;
460 }
461
462 return 0;
463}
464
465int etnaviv_gem_wait_bo(struct etnaviv_gpu *gpu, struct drm_gem_object *obj,
466 struct timespec *timeout)
467{
468 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
469
470 return etnaviv_gpu_wait_obj_inactive(gpu, etnaviv_obj, timeout);
471}
472
473#ifdef CONFIG_DEBUG_FS
474static void etnaviv_gem_describe_fence(struct fence *fence,
475 const char *type, struct seq_file *m)
476{
477 if (!test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
478 seq_printf(m, "\t%9s: %s %s seq %u\n",
479 type,
480 fence->ops->get_driver_name(fence),
481 fence->ops->get_timeline_name(fence),
482 fence->seqno);
483}
484
485static void etnaviv_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
486{
487 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
488 struct reservation_object *robj = etnaviv_obj->resv;
489 struct reservation_object_list *fobj;
490 struct fence *fence;
491 unsigned long off = drm_vma_node_start(&obj->vma_node);
492
493 seq_printf(m, "%08x: %c %2d (%2d) %08lx %p %zd\n",
494 etnaviv_obj->flags, is_active(etnaviv_obj) ? 'A' : 'I',
495 obj->name, obj->refcount.refcount.counter,
496 off, etnaviv_obj->vaddr, obj->size);
497
498 rcu_read_lock();
499 fobj = rcu_dereference(robj->fence);
500 if (fobj) {
501 unsigned int i, shared_count = fobj->shared_count;
502
503 for (i = 0; i < shared_count; i++) {
504 fence = rcu_dereference(fobj->shared[i]);
505 etnaviv_gem_describe_fence(fence, "Shared", m);
506 }
507 }
508
509 fence = rcu_dereference(robj->fence_excl);
510 if (fence)
511 etnaviv_gem_describe_fence(fence, "Exclusive", m);
512 rcu_read_unlock();
513}
514
515void etnaviv_gem_describe_objects(struct etnaviv_drm_private *priv,
516 struct seq_file *m)
517{
518 struct etnaviv_gem_object *etnaviv_obj;
519 int count = 0;
520 size_t size = 0;
521
522 mutex_lock(&priv->gem_lock);
523 list_for_each_entry(etnaviv_obj, &priv->gem_list, gem_node) {
524 struct drm_gem_object *obj = &etnaviv_obj->base;
525
526 seq_puts(m, " ");
527 etnaviv_gem_describe(obj, m);
528 count++;
529 size += obj->size;
530 }
531 mutex_unlock(&priv->gem_lock);
532
533 seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
534}
535#endif
536
537static void etnaviv_gem_shmem_release(struct etnaviv_gem_object *etnaviv_obj)
538{
539 if (etnaviv_obj->vaddr)
540 vunmap(etnaviv_obj->vaddr);
541 put_pages(etnaviv_obj);
542}
543
544static const struct etnaviv_gem_ops etnaviv_gem_shmem_ops = {
545 .get_pages = etnaviv_gem_shmem_get_pages,
546 .release = etnaviv_gem_shmem_release,
547 .vmap = etnaviv_gem_vmap_impl,
548};
549
550void etnaviv_gem_free_object(struct drm_gem_object *obj)
551{
552 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
553 struct etnaviv_vram_mapping *mapping, *tmp;
554
555 /* object should not be active */
556 WARN_ON(is_active(etnaviv_obj));
557
558 list_del(&etnaviv_obj->gem_node);
559
560 list_for_each_entry_safe(mapping, tmp, &etnaviv_obj->vram_list,
561 obj_node) {
562 struct etnaviv_iommu *mmu = mapping->mmu;
563
564 WARN_ON(mapping->use);
565
566 if (mmu)
567 etnaviv_iommu_unmap_gem(mmu, mapping);
568
569 list_del(&mapping->obj_node);
570 kfree(mapping);
571 }
572
573 drm_gem_free_mmap_offset(obj);
574 etnaviv_obj->ops->release(etnaviv_obj);
575 if (etnaviv_obj->resv == &etnaviv_obj->_resv)
576 reservation_object_fini(&etnaviv_obj->_resv);
577 drm_gem_object_release(obj);
578
579 kfree(etnaviv_obj);
580}
581
582int etnaviv_gem_obj_add(struct drm_device *dev, struct drm_gem_object *obj)
583{
584 struct etnaviv_drm_private *priv = dev->dev_private;
585 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
586
587 mutex_lock(&priv->gem_lock);
588 list_add_tail(&etnaviv_obj->gem_node, &priv->gem_list);
589 mutex_unlock(&priv->gem_lock);
590
591 return 0;
592}
593
594static int etnaviv_gem_new_impl(struct drm_device *dev, u32 size, u32 flags,
595 struct reservation_object *robj, const struct etnaviv_gem_ops *ops,
596 struct drm_gem_object **obj)
597{
598 struct etnaviv_gem_object *etnaviv_obj;
599 unsigned sz = sizeof(*etnaviv_obj);
600 bool valid = true;
601
602 /* validate flags */
603 switch (flags & ETNA_BO_CACHE_MASK) {
604 case ETNA_BO_UNCACHED:
605 case ETNA_BO_CACHED:
606 case ETNA_BO_WC:
607 break;
608 default:
609 valid = false;
610 }
611
612 if (!valid) {
613 dev_err(dev->dev, "invalid cache flag: %x\n",
614 (flags & ETNA_BO_CACHE_MASK));
615 return -EINVAL;
616 }
617
618 etnaviv_obj = kzalloc(sz, GFP_KERNEL);
619 if (!etnaviv_obj)
620 return -ENOMEM;
621
622 etnaviv_obj->flags = flags;
623 etnaviv_obj->ops = ops;
624 if (robj) {
625 etnaviv_obj->resv = robj;
626 } else {
627 etnaviv_obj->resv = &etnaviv_obj->_resv;
628 reservation_object_init(&etnaviv_obj->_resv);
629 }
630
631 mutex_init(&etnaviv_obj->lock);
632 INIT_LIST_HEAD(&etnaviv_obj->vram_list);
633
634 *obj = &etnaviv_obj->base;
635
636 return 0;
637}
638
639static struct drm_gem_object *__etnaviv_gem_new(struct drm_device *dev,
640 u32 size, u32 flags)
641{
642 struct drm_gem_object *obj = NULL;
643 int ret;
644
645 size = PAGE_ALIGN(size);
646
647 ret = etnaviv_gem_new_impl(dev, size, flags, NULL,
648 &etnaviv_gem_shmem_ops, &obj);
649 if (ret)
650 goto fail;
651
652 ret = drm_gem_object_init(dev, obj, size);
653 if (ret == 0) {
654 struct address_space *mapping;
655
656 /*
657 * Our buffers are kept pinned, so allocating them
658 * from the MOVABLE zone is a really bad idea, and
659 * conflicts with CMA. See coments above new_inode()
660 * why this is required _and_ expected if you're
661 * going to pin these pages.
662 */
663 mapping = file_inode(obj->filp)->i_mapping;
664 mapping_set_gfp_mask(mapping, GFP_HIGHUSER);
665 }
666
667 if (ret)
668 goto fail;
669
670 return obj;
671
672fail:
673 if (obj)
674 drm_gem_object_unreference_unlocked(obj);
675
676 return ERR_PTR(ret);
677}
678
679/* convenience method to construct a GEM buffer object, and userspace handle */
680int etnaviv_gem_new_handle(struct drm_device *dev, struct drm_file *file,
681 u32 size, u32 flags, u32 *handle)
682{
683 struct drm_gem_object *obj;
684 int ret;
685
686 obj = __etnaviv_gem_new(dev, size, flags);
687 if (IS_ERR(obj))
688 return PTR_ERR(obj);
689
690 ret = etnaviv_gem_obj_add(dev, obj);
691 if (ret < 0) {
692 drm_gem_object_unreference_unlocked(obj);
693 return ret;
694 }
695
696 ret = drm_gem_handle_create(file, obj, handle);
697
698 /* drop reference from allocate - handle holds it now */
699 drm_gem_object_unreference_unlocked(obj);
700
701 return ret;
702}
703
704struct drm_gem_object *etnaviv_gem_new(struct drm_device *dev,
705 u32 size, u32 flags)
706{
707 struct drm_gem_object *obj;
708 int ret;
709
710 obj = __etnaviv_gem_new(dev, size, flags);
711 if (IS_ERR(obj))
712 return obj;
713
714 ret = etnaviv_gem_obj_add(dev, obj);
715 if (ret < 0) {
716 drm_gem_object_unreference_unlocked(obj);
717 return ERR_PTR(ret);
718 }
719
720 return obj;
721}
722
723int etnaviv_gem_new_private(struct drm_device *dev, size_t size, u32 flags,
724 struct reservation_object *robj, const struct etnaviv_gem_ops *ops,
725 struct etnaviv_gem_object **res)
726{
727 struct drm_gem_object *obj;
728 int ret;
729
730 ret = etnaviv_gem_new_impl(dev, size, flags, robj, ops, &obj);
731 if (ret)
732 return ret;
733
734 drm_gem_private_object_init(dev, obj, size);
735
736 *res = to_etnaviv_bo(obj);
737
738 return 0;
739}
740
741struct get_pages_work {
742 struct work_struct work;
743 struct mm_struct *mm;
744 struct task_struct *task;
745 struct etnaviv_gem_object *etnaviv_obj;
746};
747
748static struct page **etnaviv_gem_userptr_do_get_pages(
749 struct etnaviv_gem_object *etnaviv_obj, struct mm_struct *mm, struct task_struct *task)
750{
751 int ret = 0, pinned, npages = etnaviv_obj->base.size >> PAGE_SHIFT;
752 struct page **pvec;
753 uintptr_t ptr;
754
755 pvec = drm_malloc_ab(npages, sizeof(struct page *));
756 if (!pvec)
757 return ERR_PTR(-ENOMEM);
758
759 pinned = 0;
760 ptr = etnaviv_obj->userptr.ptr;
761
762 down_read(&mm->mmap_sem);
763 while (pinned < npages) {
764 ret = get_user_pages_remote(task, mm, ptr, npages - pinned,
765 !etnaviv_obj->userptr.ro, 0,
766 pvec + pinned, NULL);
767 if (ret < 0)
768 break;
769
770 ptr += ret * PAGE_SIZE;
771 pinned += ret;
772 }
773 up_read(&mm->mmap_sem);
774
775 if (ret < 0) {
776 release_pages(pvec, pinned, 0);
777 drm_free_large(pvec);
778 return ERR_PTR(ret);
779 }
780
781 return pvec;
782}
783
784static void __etnaviv_gem_userptr_get_pages(struct work_struct *_work)
785{
786 struct get_pages_work *work = container_of(_work, typeof(*work), work);
787 struct etnaviv_gem_object *etnaviv_obj = work->etnaviv_obj;
788 struct page **pvec;
789
790 pvec = etnaviv_gem_userptr_do_get_pages(etnaviv_obj, work->mm, work->task);
791
792 mutex_lock(&etnaviv_obj->lock);
793 if (IS_ERR(pvec)) {
794 etnaviv_obj->userptr.work = ERR_CAST(pvec);
795 } else {
796 etnaviv_obj->userptr.work = NULL;
797 etnaviv_obj->pages = pvec;
798 }
799
800 mutex_unlock(&etnaviv_obj->lock);
801 drm_gem_object_unreference_unlocked(&etnaviv_obj->base);
802
803 mmput(work->mm);
804 put_task_struct(work->task);
805 kfree(work);
806}
807
808static int etnaviv_gem_userptr_get_pages(struct etnaviv_gem_object *etnaviv_obj)
809{
810 struct page **pvec = NULL;
811 struct get_pages_work *work;
812 struct mm_struct *mm;
813 int ret, pinned, npages = etnaviv_obj->base.size >> PAGE_SHIFT;
814
815 if (etnaviv_obj->userptr.work) {
816 if (IS_ERR(etnaviv_obj->userptr.work)) {
817 ret = PTR_ERR(etnaviv_obj->userptr.work);
818 etnaviv_obj->userptr.work = NULL;
819 } else {
820 ret = -EAGAIN;
821 }
822 return ret;
823 }
824
825 mm = get_task_mm(etnaviv_obj->userptr.task);
826 pinned = 0;
827 if (mm == current->mm) {
828 pvec = drm_malloc_ab(npages, sizeof(struct page *));
829 if (!pvec) {
830 mmput(mm);
831 return -ENOMEM;
832 }
833
834 pinned = __get_user_pages_fast(etnaviv_obj->userptr.ptr, npages,
835 !etnaviv_obj->userptr.ro, pvec);
836 if (pinned < 0) {
837 drm_free_large(pvec);
838 mmput(mm);
839 return pinned;
840 }
841
842 if (pinned == npages) {
843 etnaviv_obj->pages = pvec;
844 mmput(mm);
845 return 0;
846 }
847 }
848
849 release_pages(pvec, pinned, 0);
850 drm_free_large(pvec);
851
852 work = kmalloc(sizeof(*work), GFP_KERNEL);
853 if (!work) {
854 mmput(mm);
855 return -ENOMEM;
856 }
857
858 get_task_struct(current);
859 drm_gem_object_reference(&etnaviv_obj->base);
860
861 work->mm = mm;
862 work->task = current;
863 work->etnaviv_obj = etnaviv_obj;
864
865 etnaviv_obj->userptr.work = &work->work;
866 INIT_WORK(&work->work, __etnaviv_gem_userptr_get_pages);
867
868 etnaviv_queue_work(etnaviv_obj->base.dev, &work->work);
869
870 return -EAGAIN;
871}
872
873static void etnaviv_gem_userptr_release(struct etnaviv_gem_object *etnaviv_obj)
874{
875 if (etnaviv_obj->sgt) {
876 etnaviv_gem_scatterlist_unmap(etnaviv_obj);
877 sg_free_table(etnaviv_obj->sgt);
878 kfree(etnaviv_obj->sgt);
879 }
880 if (etnaviv_obj->pages) {
881 int npages = etnaviv_obj->base.size >> PAGE_SHIFT;
882
883 release_pages(etnaviv_obj->pages, npages, 0);
884 drm_free_large(etnaviv_obj->pages);
885 }
886 put_task_struct(etnaviv_obj->userptr.task);
887}
888
889static const struct etnaviv_gem_ops etnaviv_gem_userptr_ops = {
890 .get_pages = etnaviv_gem_userptr_get_pages,
891 .release = etnaviv_gem_userptr_release,
892 .vmap = etnaviv_gem_vmap_impl,
893};
894
895int etnaviv_gem_new_userptr(struct drm_device *dev, struct drm_file *file,
896 uintptr_t ptr, u32 size, u32 flags, u32 *handle)
897{
898 struct etnaviv_gem_object *etnaviv_obj;
899 int ret;
900
901 ret = etnaviv_gem_new_private(dev, size, ETNA_BO_CACHED, NULL,
902 &etnaviv_gem_userptr_ops, &etnaviv_obj);
903 if (ret)
904 return ret;
905
906 etnaviv_obj->userptr.ptr = ptr;
907 etnaviv_obj->userptr.task = current;
908 etnaviv_obj->userptr.ro = !(flags & ETNA_USERPTR_WRITE);
909 get_task_struct(current);
910
911 ret = etnaviv_gem_obj_add(dev, &etnaviv_obj->base);
912 if (ret) {
913 drm_gem_object_unreference_unlocked(&etnaviv_obj->base);
914 return ret;
915 }
916
917 ret = drm_gem_handle_create(file, &etnaviv_obj->base, handle);
918
919 /* drop reference from allocate - handle holds it now */
920 drm_gem_object_unreference_unlocked(&etnaviv_obj->base);
921
922 return ret;
923}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2015-2018 Etnaviv Project
4 */
5
6#include <drm/drm_prime.h>
7#include <linux/dma-mapping.h>
8#include <linux/shmem_fs.h>
9#include <linux/spinlock.h>
10#include <linux/vmalloc.h>
11
12#include "etnaviv_drv.h"
13#include "etnaviv_gem.h"
14#include "etnaviv_gpu.h"
15#include "etnaviv_mmu.h"
16
17static struct lock_class_key etnaviv_shm_lock_class;
18static struct lock_class_key etnaviv_userptr_lock_class;
19
20static void etnaviv_gem_scatter_map(struct etnaviv_gem_object *etnaviv_obj)
21{
22 struct drm_device *dev = etnaviv_obj->base.dev;
23 struct sg_table *sgt = etnaviv_obj->sgt;
24
25 /*
26 * For non-cached buffers, ensure the new pages are clean
27 * because display controller, GPU, etc. are not coherent.
28 */
29 if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK)
30 dma_map_sgtable(dev->dev, sgt, DMA_BIDIRECTIONAL, 0);
31}
32
33static void etnaviv_gem_scatterlist_unmap(struct etnaviv_gem_object *etnaviv_obj)
34{
35 struct drm_device *dev = etnaviv_obj->base.dev;
36 struct sg_table *sgt = etnaviv_obj->sgt;
37
38 /*
39 * For non-cached buffers, ensure the new pages are clean
40 * because display controller, GPU, etc. are not coherent:
41 *
42 * WARNING: The DMA API does not support concurrent CPU
43 * and device access to the memory area. With BIDIRECTIONAL,
44 * we will clean the cache lines which overlap the region,
45 * and invalidate all cache lines (partially) contained in
46 * the region.
47 *
48 * If you have dirty data in the overlapping cache lines,
49 * that will corrupt the GPU-written data. If you have
50 * written into the remainder of the region, this can
51 * discard those writes.
52 */
53 if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK)
54 dma_unmap_sgtable(dev->dev, sgt, DMA_BIDIRECTIONAL, 0);
55}
56
57/* called with etnaviv_obj->lock held */
58static int etnaviv_gem_shmem_get_pages(struct etnaviv_gem_object *etnaviv_obj)
59{
60 struct drm_device *dev = etnaviv_obj->base.dev;
61 struct page **p = drm_gem_get_pages(&etnaviv_obj->base);
62
63 if (IS_ERR(p)) {
64 dev_dbg(dev->dev, "could not get pages: %ld\n", PTR_ERR(p));
65 return PTR_ERR(p);
66 }
67
68 etnaviv_obj->pages = p;
69
70 return 0;
71}
72
73static void put_pages(struct etnaviv_gem_object *etnaviv_obj)
74{
75 if (etnaviv_obj->sgt) {
76 etnaviv_gem_scatterlist_unmap(etnaviv_obj);
77 sg_free_table(etnaviv_obj->sgt);
78 kfree(etnaviv_obj->sgt);
79 etnaviv_obj->sgt = NULL;
80 }
81 if (etnaviv_obj->pages) {
82 drm_gem_put_pages(&etnaviv_obj->base, etnaviv_obj->pages,
83 true, false);
84
85 etnaviv_obj->pages = NULL;
86 }
87}
88
89struct page **etnaviv_gem_get_pages(struct etnaviv_gem_object *etnaviv_obj)
90{
91 int ret;
92
93 lockdep_assert_held(&etnaviv_obj->lock);
94
95 if (!etnaviv_obj->pages) {
96 ret = etnaviv_obj->ops->get_pages(etnaviv_obj);
97 if (ret < 0)
98 return ERR_PTR(ret);
99 }
100
101 if (!etnaviv_obj->sgt) {
102 struct drm_device *dev = etnaviv_obj->base.dev;
103 unsigned int npages = etnaviv_obj->base.size >> PAGE_SHIFT;
104 struct sg_table *sgt;
105
106 sgt = drm_prime_pages_to_sg(dev, etnaviv_obj->pages, npages);
107 if (IS_ERR(sgt)) {
108 dev_err(dev->dev, "failed to allocate sgt: %ld\n",
109 PTR_ERR(sgt));
110 return ERR_CAST(sgt);
111 }
112
113 etnaviv_obj->sgt = sgt;
114
115 etnaviv_gem_scatter_map(etnaviv_obj);
116 }
117
118 return etnaviv_obj->pages;
119}
120
121void etnaviv_gem_put_pages(struct etnaviv_gem_object *etnaviv_obj)
122{
123 lockdep_assert_held(&etnaviv_obj->lock);
124 /* when we start tracking the pin count, then do something here */
125}
126
127static int etnaviv_gem_mmap_obj(struct etnaviv_gem_object *etnaviv_obj,
128 struct vm_area_struct *vma)
129{
130 pgprot_t vm_page_prot;
131
132 vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
133
134 vm_page_prot = vm_get_page_prot(vma->vm_flags);
135
136 if (etnaviv_obj->flags & ETNA_BO_WC) {
137 vma->vm_page_prot = pgprot_writecombine(vm_page_prot);
138 } else if (etnaviv_obj->flags & ETNA_BO_UNCACHED) {
139 vma->vm_page_prot = pgprot_noncached(vm_page_prot);
140 } else {
141 /*
142 * Shunt off cached objs to shmem file so they have their own
143 * address_space (so unmap_mapping_range does what we want,
144 * in particular in the case of mmap'd dmabufs)
145 */
146 vma->vm_pgoff = 0;
147 vma_set_file(vma, etnaviv_obj->base.filp);
148
149 vma->vm_page_prot = vm_page_prot;
150 }
151
152 return 0;
153}
154
155static int etnaviv_gem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
156{
157 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
158
159 return etnaviv_obj->ops->mmap(etnaviv_obj, vma);
160}
161
162static vm_fault_t etnaviv_gem_fault(struct vm_fault *vmf)
163{
164 struct vm_area_struct *vma = vmf->vma;
165 struct drm_gem_object *obj = vma->vm_private_data;
166 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
167 struct page **pages;
168 unsigned long pfn;
169 pgoff_t pgoff;
170 int err;
171
172 /*
173 * Make sure we don't parallel update on a fault, nor move or remove
174 * something from beneath our feet. Note that vmf_insert_page() is
175 * specifically coded to take care of this, so we don't have to.
176 */
177 err = mutex_lock_interruptible(&etnaviv_obj->lock);
178 if (err)
179 return VM_FAULT_NOPAGE;
180 /* make sure we have pages attached now */
181 pages = etnaviv_gem_get_pages(etnaviv_obj);
182 mutex_unlock(&etnaviv_obj->lock);
183
184 if (IS_ERR(pages)) {
185 err = PTR_ERR(pages);
186 return vmf_error(err);
187 }
188
189 /* We don't use vmf->pgoff since that has the fake offset: */
190 pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
191
192 pfn = page_to_pfn(pages[pgoff]);
193
194 VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
195 pfn, pfn << PAGE_SHIFT);
196
197 return vmf_insert_pfn(vma, vmf->address, pfn);
198}
199
200int etnaviv_gem_mmap_offset(struct drm_gem_object *obj, u64 *offset)
201{
202 int ret;
203
204 /* Make it mmapable */
205 ret = drm_gem_create_mmap_offset(obj);
206 if (ret)
207 dev_err(obj->dev->dev, "could not allocate mmap offset\n");
208 else
209 *offset = drm_vma_node_offset_addr(&obj->vma_node);
210
211 return ret;
212}
213
214static struct etnaviv_vram_mapping *
215etnaviv_gem_get_vram_mapping(struct etnaviv_gem_object *obj,
216 struct etnaviv_iommu_context *context)
217{
218 struct etnaviv_vram_mapping *mapping;
219
220 list_for_each_entry(mapping, &obj->vram_list, obj_node) {
221 if (mapping->context == context)
222 return mapping;
223 }
224
225 return NULL;
226}
227
228void etnaviv_gem_mapping_unreference(struct etnaviv_vram_mapping *mapping)
229{
230 struct etnaviv_gem_object *etnaviv_obj = mapping->object;
231
232 mutex_lock(&etnaviv_obj->lock);
233 WARN_ON(mapping->use == 0);
234 mapping->use -= 1;
235 mutex_unlock(&etnaviv_obj->lock);
236
237 drm_gem_object_put(&etnaviv_obj->base);
238}
239
240struct etnaviv_vram_mapping *etnaviv_gem_mapping_get(
241 struct drm_gem_object *obj, struct etnaviv_iommu_context *mmu_context,
242 u64 va)
243{
244 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
245 struct etnaviv_vram_mapping *mapping;
246 struct page **pages;
247 int ret = 0;
248
249 mutex_lock(&etnaviv_obj->lock);
250 mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, mmu_context);
251 if (mapping) {
252 /*
253 * Holding the object lock prevents the use count changing
254 * beneath us. If the use count is zero, the MMU might be
255 * reaping this object, so take the lock and re-check that
256 * the MMU owns this mapping to close this race.
257 */
258 if (mapping->use == 0) {
259 mutex_lock(&mmu_context->lock);
260 if (mapping->context == mmu_context)
261 if (va && mapping->iova != va) {
262 etnaviv_iommu_reap_mapping(mapping);
263 mapping = NULL;
264 } else {
265 mapping->use += 1;
266 }
267 else
268 mapping = NULL;
269 mutex_unlock(&mmu_context->lock);
270 if (mapping)
271 goto out;
272 } else {
273 mapping->use += 1;
274 goto out;
275 }
276 }
277
278 pages = etnaviv_gem_get_pages(etnaviv_obj);
279 if (IS_ERR(pages)) {
280 ret = PTR_ERR(pages);
281 goto out;
282 }
283
284 /*
285 * See if we have a reaped vram mapping we can re-use before
286 * allocating a fresh mapping.
287 */
288 mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, NULL);
289 if (!mapping) {
290 mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
291 if (!mapping) {
292 ret = -ENOMEM;
293 goto out;
294 }
295
296 INIT_LIST_HEAD(&mapping->scan_node);
297 mapping->object = etnaviv_obj;
298 } else {
299 list_del(&mapping->obj_node);
300 }
301
302 mapping->use = 1;
303
304 ret = etnaviv_iommu_map_gem(mmu_context, etnaviv_obj,
305 mmu_context->global->memory_base,
306 mapping, va);
307 if (ret < 0)
308 kfree(mapping);
309 else
310 list_add_tail(&mapping->obj_node, &etnaviv_obj->vram_list);
311
312out:
313 mutex_unlock(&etnaviv_obj->lock);
314
315 if (ret)
316 return ERR_PTR(ret);
317
318 /* Take a reference on the object */
319 drm_gem_object_get(obj);
320 return mapping;
321}
322
323void *etnaviv_gem_vmap(struct drm_gem_object *obj)
324{
325 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
326
327 if (etnaviv_obj->vaddr)
328 return etnaviv_obj->vaddr;
329
330 mutex_lock(&etnaviv_obj->lock);
331 /*
332 * Need to check again, as we might have raced with another thread
333 * while waiting for the mutex.
334 */
335 if (!etnaviv_obj->vaddr)
336 etnaviv_obj->vaddr = etnaviv_obj->ops->vmap(etnaviv_obj);
337 mutex_unlock(&etnaviv_obj->lock);
338
339 return etnaviv_obj->vaddr;
340}
341
342static void *etnaviv_gem_vmap_impl(struct etnaviv_gem_object *obj)
343{
344 struct page **pages;
345 pgprot_t prot;
346
347 lockdep_assert_held(&obj->lock);
348
349 pages = etnaviv_gem_get_pages(obj);
350 if (IS_ERR(pages))
351 return NULL;
352
353 switch (obj->flags & ETNA_BO_CACHE_MASK) {
354 case ETNA_BO_CACHED:
355 prot = PAGE_KERNEL;
356 break;
357 case ETNA_BO_UNCACHED:
358 prot = pgprot_noncached(PAGE_KERNEL);
359 break;
360 case ETNA_BO_WC:
361 default:
362 prot = pgprot_writecombine(PAGE_KERNEL);
363 }
364
365 return vmap(pages, obj->base.size >> PAGE_SHIFT, VM_MAP, prot);
366}
367
368static inline enum dma_data_direction etnaviv_op_to_dma_dir(u32 op)
369{
370 op &= ETNA_PREP_READ | ETNA_PREP_WRITE;
371
372 if (op == ETNA_PREP_READ)
373 return DMA_FROM_DEVICE;
374 else if (op == ETNA_PREP_WRITE)
375 return DMA_TO_DEVICE;
376 else
377 return DMA_BIDIRECTIONAL;
378}
379
380int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op,
381 struct drm_etnaviv_timespec *timeout)
382{
383 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
384 struct drm_device *dev = obj->dev;
385 bool write = !!(op & ETNA_PREP_WRITE);
386 int ret;
387
388 if (!etnaviv_obj->sgt) {
389 void *ret;
390
391 mutex_lock(&etnaviv_obj->lock);
392 ret = etnaviv_gem_get_pages(etnaviv_obj);
393 mutex_unlock(&etnaviv_obj->lock);
394 if (IS_ERR(ret))
395 return PTR_ERR(ret);
396 }
397
398 if (op & ETNA_PREP_NOSYNC) {
399 if (!dma_resv_test_signaled(obj->resv,
400 dma_resv_usage_rw(write)))
401 return -EBUSY;
402 } else {
403 unsigned long remain = etnaviv_timeout_to_jiffies(timeout);
404
405 ret = dma_resv_wait_timeout(obj->resv, dma_resv_usage_rw(write),
406 true, remain);
407 if (ret <= 0)
408 return ret == 0 ? -ETIMEDOUT : ret;
409 }
410
411 if (etnaviv_obj->flags & ETNA_BO_CACHED) {
412 dma_sync_sgtable_for_cpu(dev->dev, etnaviv_obj->sgt,
413 etnaviv_op_to_dma_dir(op));
414 etnaviv_obj->last_cpu_prep_op = op;
415 }
416
417 return 0;
418}
419
420int etnaviv_gem_cpu_fini(struct drm_gem_object *obj)
421{
422 struct drm_device *dev = obj->dev;
423 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
424
425 if (etnaviv_obj->flags & ETNA_BO_CACHED) {
426 /* fini without a prep is almost certainly a userspace error */
427 WARN_ON(etnaviv_obj->last_cpu_prep_op == 0);
428 dma_sync_sgtable_for_device(dev->dev, etnaviv_obj->sgt,
429 etnaviv_op_to_dma_dir(etnaviv_obj->last_cpu_prep_op));
430 etnaviv_obj->last_cpu_prep_op = 0;
431 }
432
433 return 0;
434}
435
436int etnaviv_gem_wait_bo(struct etnaviv_gpu *gpu, struct drm_gem_object *obj,
437 struct drm_etnaviv_timespec *timeout)
438{
439 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
440
441 return etnaviv_gpu_wait_obj_inactive(gpu, etnaviv_obj, timeout);
442}
443
444#ifdef CONFIG_DEBUG_FS
445static void etnaviv_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
446{
447 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
448 struct dma_resv *robj = obj->resv;
449 unsigned long off = drm_vma_node_start(&obj->vma_node);
450 int r;
451
452 seq_printf(m, "%08x: %c %2d (%2d) %08lx %p %zd\n",
453 etnaviv_obj->flags, is_active(etnaviv_obj) ? 'A' : 'I',
454 obj->name, kref_read(&obj->refcount),
455 off, etnaviv_obj->vaddr, obj->size);
456
457 r = dma_resv_lock(robj, NULL);
458 if (r)
459 return;
460
461 dma_resv_describe(robj, m);
462 dma_resv_unlock(robj);
463}
464
465void etnaviv_gem_describe_objects(struct etnaviv_drm_private *priv,
466 struct seq_file *m)
467{
468 struct etnaviv_gem_object *etnaviv_obj;
469 int count = 0;
470 size_t size = 0;
471
472 mutex_lock(&priv->gem_lock);
473 list_for_each_entry(etnaviv_obj, &priv->gem_list, gem_node) {
474 struct drm_gem_object *obj = &etnaviv_obj->base;
475
476 seq_puts(m, " ");
477 etnaviv_gem_describe(obj, m);
478 count++;
479 size += obj->size;
480 }
481 mutex_unlock(&priv->gem_lock);
482
483 seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
484}
485#endif
486
487static void etnaviv_gem_shmem_release(struct etnaviv_gem_object *etnaviv_obj)
488{
489 vunmap(etnaviv_obj->vaddr);
490 put_pages(etnaviv_obj);
491}
492
493static const struct etnaviv_gem_ops etnaviv_gem_shmem_ops = {
494 .get_pages = etnaviv_gem_shmem_get_pages,
495 .release = etnaviv_gem_shmem_release,
496 .vmap = etnaviv_gem_vmap_impl,
497 .mmap = etnaviv_gem_mmap_obj,
498};
499
500void etnaviv_gem_free_object(struct drm_gem_object *obj)
501{
502 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
503 struct etnaviv_drm_private *priv = obj->dev->dev_private;
504 struct etnaviv_vram_mapping *mapping, *tmp;
505
506 /* object should not be active */
507 WARN_ON(is_active(etnaviv_obj));
508
509 mutex_lock(&priv->gem_lock);
510 list_del(&etnaviv_obj->gem_node);
511 mutex_unlock(&priv->gem_lock);
512
513 list_for_each_entry_safe(mapping, tmp, &etnaviv_obj->vram_list,
514 obj_node) {
515 struct etnaviv_iommu_context *context = mapping->context;
516
517 WARN_ON(mapping->use);
518
519 if (context)
520 etnaviv_iommu_unmap_gem(context, mapping);
521
522 list_del(&mapping->obj_node);
523 kfree(mapping);
524 }
525
526 etnaviv_obj->ops->release(etnaviv_obj);
527 drm_gem_object_release(obj);
528
529 mutex_destroy(&etnaviv_obj->lock);
530 kfree(etnaviv_obj);
531}
532
533void etnaviv_gem_obj_add(struct drm_device *dev, struct drm_gem_object *obj)
534{
535 struct etnaviv_drm_private *priv = dev->dev_private;
536 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
537
538 mutex_lock(&priv->gem_lock);
539 list_add_tail(&etnaviv_obj->gem_node, &priv->gem_list);
540 mutex_unlock(&priv->gem_lock);
541}
542
543static const struct vm_operations_struct vm_ops = {
544 .fault = etnaviv_gem_fault,
545 .open = drm_gem_vm_open,
546 .close = drm_gem_vm_close,
547};
548
549static const struct drm_gem_object_funcs etnaviv_gem_object_funcs = {
550 .free = etnaviv_gem_free_object,
551 .pin = etnaviv_gem_prime_pin,
552 .unpin = etnaviv_gem_prime_unpin,
553 .get_sg_table = etnaviv_gem_prime_get_sg_table,
554 .vmap = etnaviv_gem_prime_vmap,
555 .mmap = etnaviv_gem_mmap,
556 .vm_ops = &vm_ops,
557};
558
559static int etnaviv_gem_new_impl(struct drm_device *dev, u32 size, u32 flags,
560 const struct etnaviv_gem_ops *ops, struct drm_gem_object **obj)
561{
562 struct etnaviv_gem_object *etnaviv_obj;
563 unsigned sz = sizeof(*etnaviv_obj);
564 bool valid = true;
565
566 /* validate flags */
567 switch (flags & ETNA_BO_CACHE_MASK) {
568 case ETNA_BO_UNCACHED:
569 case ETNA_BO_CACHED:
570 case ETNA_BO_WC:
571 break;
572 default:
573 valid = false;
574 }
575
576 if (!valid) {
577 dev_err(dev->dev, "invalid cache flag: %x\n",
578 (flags & ETNA_BO_CACHE_MASK));
579 return -EINVAL;
580 }
581
582 etnaviv_obj = kzalloc(sz, GFP_KERNEL);
583 if (!etnaviv_obj)
584 return -ENOMEM;
585
586 etnaviv_obj->size = ALIGN(size, SZ_4K);
587 etnaviv_obj->flags = flags;
588 etnaviv_obj->ops = ops;
589
590 mutex_init(&etnaviv_obj->lock);
591 INIT_LIST_HEAD(&etnaviv_obj->vram_list);
592
593 *obj = &etnaviv_obj->base;
594 (*obj)->funcs = &etnaviv_gem_object_funcs;
595
596 return 0;
597}
598
599/* convenience method to construct a GEM buffer object, and userspace handle */
600int etnaviv_gem_new_handle(struct drm_device *dev, struct drm_file *file,
601 u32 size, u32 flags, u32 *handle)
602{
603 struct etnaviv_drm_private *priv = dev->dev_private;
604 struct drm_gem_object *obj = NULL;
605 int ret;
606
607 ret = etnaviv_gem_new_impl(dev, size, flags, &etnaviv_gem_shmem_ops, &obj);
608 if (ret)
609 goto fail;
610
611 lockdep_set_class(&to_etnaviv_bo(obj)->lock, &etnaviv_shm_lock_class);
612
613 ret = drm_gem_object_init(dev, obj, PAGE_ALIGN(size));
614 if (ret)
615 goto fail;
616
617 /*
618 * Our buffers are kept pinned, so allocating them from the MOVABLE
619 * zone is a really bad idea, and conflicts with CMA. See comments
620 * above new_inode() why this is required _and_ expected if you're
621 * going to pin these pages.
622 */
623 mapping_set_gfp_mask(obj->filp->f_mapping, priv->shm_gfp_mask);
624
625 etnaviv_gem_obj_add(dev, obj);
626
627 ret = drm_gem_handle_create(file, obj, handle);
628
629 /* drop reference from allocate - handle holds it now */
630fail:
631 drm_gem_object_put(obj);
632
633 return ret;
634}
635
636int etnaviv_gem_new_private(struct drm_device *dev, size_t size, u32 flags,
637 const struct etnaviv_gem_ops *ops, struct etnaviv_gem_object **res)
638{
639 struct drm_gem_object *obj;
640 int ret;
641
642 ret = etnaviv_gem_new_impl(dev, size, flags, ops, &obj);
643 if (ret)
644 return ret;
645
646 drm_gem_private_object_init(dev, obj, size);
647
648 *res = to_etnaviv_bo(obj);
649
650 return 0;
651}
652
653static int etnaviv_gem_userptr_get_pages(struct etnaviv_gem_object *etnaviv_obj)
654{
655 struct page **pvec = NULL;
656 struct etnaviv_gem_userptr *userptr = &etnaviv_obj->userptr;
657 int ret, pinned = 0, npages = etnaviv_obj->base.size >> PAGE_SHIFT;
658 unsigned int gup_flags = FOLL_LONGTERM;
659
660 might_lock_read(¤t->mm->mmap_lock);
661
662 if (userptr->mm != current->mm)
663 return -EPERM;
664
665 pvec = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
666 if (!pvec)
667 return -ENOMEM;
668
669 if (!userptr->ro)
670 gup_flags |= FOLL_WRITE;
671
672 do {
673 unsigned num_pages = npages - pinned;
674 uint64_t ptr = userptr->ptr + pinned * PAGE_SIZE;
675 struct page **pages = pvec + pinned;
676
677 ret = pin_user_pages_fast(ptr, num_pages, gup_flags, pages);
678 if (ret < 0) {
679 unpin_user_pages(pvec, pinned);
680 kvfree(pvec);
681 return ret;
682 }
683
684 pinned += ret;
685
686 } while (pinned < npages);
687
688 etnaviv_obj->pages = pvec;
689
690 return 0;
691}
692
693static void etnaviv_gem_userptr_release(struct etnaviv_gem_object *etnaviv_obj)
694{
695 if (etnaviv_obj->sgt) {
696 etnaviv_gem_scatterlist_unmap(etnaviv_obj);
697 sg_free_table(etnaviv_obj->sgt);
698 kfree(etnaviv_obj->sgt);
699 }
700 if (etnaviv_obj->pages) {
701 unsigned int npages = etnaviv_obj->base.size >> PAGE_SHIFT;
702
703 unpin_user_pages(etnaviv_obj->pages, npages);
704 kvfree(etnaviv_obj->pages);
705 }
706}
707
708static int etnaviv_gem_userptr_mmap_obj(struct etnaviv_gem_object *etnaviv_obj,
709 struct vm_area_struct *vma)
710{
711 return -EINVAL;
712}
713
714static const struct etnaviv_gem_ops etnaviv_gem_userptr_ops = {
715 .get_pages = etnaviv_gem_userptr_get_pages,
716 .release = etnaviv_gem_userptr_release,
717 .vmap = etnaviv_gem_vmap_impl,
718 .mmap = etnaviv_gem_userptr_mmap_obj,
719};
720
721int etnaviv_gem_new_userptr(struct drm_device *dev, struct drm_file *file,
722 uintptr_t ptr, u32 size, u32 flags, u32 *handle)
723{
724 struct etnaviv_gem_object *etnaviv_obj;
725 int ret;
726
727 ret = etnaviv_gem_new_private(dev, size, ETNA_BO_CACHED,
728 &etnaviv_gem_userptr_ops, &etnaviv_obj);
729 if (ret)
730 return ret;
731
732 lockdep_set_class(&etnaviv_obj->lock, &etnaviv_userptr_lock_class);
733
734 etnaviv_obj->userptr.ptr = ptr;
735 etnaviv_obj->userptr.mm = current->mm;
736 etnaviv_obj->userptr.ro = !(flags & ETNA_USERPTR_WRITE);
737
738 etnaviv_gem_obj_add(dev, &etnaviv_obj->base);
739
740 ret = drm_gem_handle_create(file, &etnaviv_obj->base, handle);
741
742 /* drop reference from allocate - handle holds it now */
743 drm_gem_object_put(&etnaviv_obj->base);
744 return ret;
745}