Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * NVIDIA Tegra DRM GEM helper functions
4 *
5 * Copyright (C) 2012 Sascha Hauer, Pengutronix
6 * Copyright (C) 2013-2015 NVIDIA CORPORATION, All rights reserved.
7 *
8 * Based on the GEM/CMA helpers
9 *
10 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
11 */
12
13#include <linux/dma-buf.h>
14#include <linux/iommu.h>
15
16#include <drm/drm_drv.h>
17#include <drm/drm_prime.h>
18#include <drm/tegra_drm.h>
19
20#include "drm.h"
21#include "gem.h"
22
23static void tegra_bo_put(struct host1x_bo *bo)
24{
25 struct tegra_bo *obj = host1x_to_tegra_bo(bo);
26
27 drm_gem_object_put(&obj->gem);
28}
29
30/* XXX move this into lib/scatterlist.c? */
31static int sg_alloc_table_from_sg(struct sg_table *sgt, struct scatterlist *sg,
32 unsigned int nents, gfp_t gfp_mask)
33{
34 struct scatterlist *dst;
35 unsigned int i;
36 int err;
37
38 err = sg_alloc_table(sgt, nents, gfp_mask);
39 if (err < 0)
40 return err;
41
42 dst = sgt->sgl;
43
44 for (i = 0; i < nents; i++) {
45 sg_set_page(dst, sg_page(sg), sg->length, 0);
46 dst = sg_next(dst);
47 sg = sg_next(sg);
48 }
49
50 return 0;
51}
52
53static struct sg_table *tegra_bo_pin(struct device *dev, struct host1x_bo *bo,
54 dma_addr_t *phys)
55{
56 struct tegra_bo *obj = host1x_to_tegra_bo(bo);
57 struct sg_table *sgt;
58 int err;
59
60 /*
61 * If we've manually mapped the buffer object through the IOMMU, make
62 * sure to return the IOVA address of our mapping.
63 *
64 * Similarly, for buffers that have been allocated by the DMA API the
65 * physical address can be used for devices that are not attached to
66 * an IOMMU. For these devices, callers must pass a valid pointer via
67 * the @phys argument.
68 *
69 * Imported buffers were also already mapped at import time, so the
70 * existing mapping can be reused.
71 */
72 if (phys) {
73 *phys = obj->iova;
74 return NULL;
75 }
76
77 /*
78 * If we don't have a mapping for this buffer yet, return an SG table
79 * so that host1x can do the mapping for us via the DMA API.
80 */
81 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
82 if (!sgt)
83 return ERR_PTR(-ENOMEM);
84
85 if (obj->pages) {
86 /*
87 * If the buffer object was allocated from the explicit IOMMU
88 * API code paths, construct an SG table from the pages.
89 */
90 err = sg_alloc_table_from_pages(sgt, obj->pages, obj->num_pages,
91 0, obj->gem.size, GFP_KERNEL);
92 if (err < 0)
93 goto free;
94 } else if (obj->sgt) {
95 /*
96 * If the buffer object already has an SG table but no pages
97 * were allocated for it, it means the buffer was imported and
98 * the SG table needs to be copied to avoid overwriting any
99 * other potential users of the original SG table.
100 */
101 err = sg_alloc_table_from_sg(sgt, obj->sgt->sgl, obj->sgt->nents,
102 GFP_KERNEL);
103 if (err < 0)
104 goto free;
105 } else {
106 /*
107 * If the buffer object had no pages allocated and if it was
108 * not imported, it had to be allocated with the DMA API, so
109 * the DMA API helper can be used.
110 */
111 err = dma_get_sgtable(dev, sgt, obj->vaddr, obj->iova,
112 obj->gem.size);
113 if (err < 0)
114 goto free;
115 }
116
117 return sgt;
118
119free:
120 kfree(sgt);
121 return ERR_PTR(err);
122}
123
124static void tegra_bo_unpin(struct device *dev, struct sg_table *sgt)
125{
126 if (sgt) {
127 sg_free_table(sgt);
128 kfree(sgt);
129 }
130}
131
132static void *tegra_bo_mmap(struct host1x_bo *bo)
133{
134 struct tegra_bo *obj = host1x_to_tegra_bo(bo);
135
136 if (obj->vaddr)
137 return obj->vaddr;
138 else if (obj->gem.import_attach)
139 return dma_buf_vmap(obj->gem.import_attach->dmabuf);
140 else
141 return vmap(obj->pages, obj->num_pages, VM_MAP,
142 pgprot_writecombine(PAGE_KERNEL));
143}
144
145static void tegra_bo_munmap(struct host1x_bo *bo, void *addr)
146{
147 struct tegra_bo *obj = host1x_to_tegra_bo(bo);
148
149 if (obj->vaddr)
150 return;
151 else if (obj->gem.import_attach)
152 dma_buf_vunmap(obj->gem.import_attach->dmabuf, addr);
153 else
154 vunmap(addr);
155}
156
157static struct host1x_bo *tegra_bo_get(struct host1x_bo *bo)
158{
159 struct tegra_bo *obj = host1x_to_tegra_bo(bo);
160
161 drm_gem_object_get(&obj->gem);
162
163 return bo;
164}
165
166static const struct host1x_bo_ops tegra_bo_ops = {
167 .get = tegra_bo_get,
168 .put = tegra_bo_put,
169 .pin = tegra_bo_pin,
170 .unpin = tegra_bo_unpin,
171 .mmap = tegra_bo_mmap,
172 .munmap = tegra_bo_munmap,
173};
174
175static int tegra_bo_iommu_map(struct tegra_drm *tegra, struct tegra_bo *bo)
176{
177 int prot = IOMMU_READ | IOMMU_WRITE;
178 int err;
179
180 if (bo->mm)
181 return -EBUSY;
182
183 bo->mm = kzalloc(sizeof(*bo->mm), GFP_KERNEL);
184 if (!bo->mm)
185 return -ENOMEM;
186
187 mutex_lock(&tegra->mm_lock);
188
189 err = drm_mm_insert_node_generic(&tegra->mm,
190 bo->mm, bo->gem.size, PAGE_SIZE, 0, 0);
191 if (err < 0) {
192 dev_err(tegra->drm->dev, "out of I/O virtual memory: %d\n",
193 err);
194 goto unlock;
195 }
196
197 bo->iova = bo->mm->start;
198
199 bo->size = iommu_map_sg(tegra->domain, bo->iova, bo->sgt->sgl,
200 bo->sgt->nents, prot);
201 if (!bo->size) {
202 dev_err(tegra->drm->dev, "failed to map buffer\n");
203 err = -ENOMEM;
204 goto remove;
205 }
206
207 mutex_unlock(&tegra->mm_lock);
208
209 return 0;
210
211remove:
212 drm_mm_remove_node(bo->mm);
213unlock:
214 mutex_unlock(&tegra->mm_lock);
215 kfree(bo->mm);
216 return err;
217}
218
219static int tegra_bo_iommu_unmap(struct tegra_drm *tegra, struct tegra_bo *bo)
220{
221 if (!bo->mm)
222 return 0;
223
224 mutex_lock(&tegra->mm_lock);
225 iommu_unmap(tegra->domain, bo->iova, bo->size);
226 drm_mm_remove_node(bo->mm);
227 mutex_unlock(&tegra->mm_lock);
228
229 kfree(bo->mm);
230
231 return 0;
232}
233
234static struct tegra_bo *tegra_bo_alloc_object(struct drm_device *drm,
235 size_t size)
236{
237 struct tegra_bo *bo;
238 int err;
239
240 bo = kzalloc(sizeof(*bo), GFP_KERNEL);
241 if (!bo)
242 return ERR_PTR(-ENOMEM);
243
244 host1x_bo_init(&bo->base, &tegra_bo_ops);
245 size = round_up(size, PAGE_SIZE);
246
247 err = drm_gem_object_init(drm, &bo->gem, size);
248 if (err < 0)
249 goto free;
250
251 err = drm_gem_create_mmap_offset(&bo->gem);
252 if (err < 0)
253 goto release;
254
255 return bo;
256
257release:
258 drm_gem_object_release(&bo->gem);
259free:
260 kfree(bo);
261 return ERR_PTR(err);
262}
263
264static void tegra_bo_free(struct drm_device *drm, struct tegra_bo *bo)
265{
266 if (bo->pages) {
267 dma_unmap_sg(drm->dev, bo->sgt->sgl, bo->sgt->nents,
268 DMA_FROM_DEVICE);
269 drm_gem_put_pages(&bo->gem, bo->pages, true, true);
270 sg_free_table(bo->sgt);
271 kfree(bo->sgt);
272 } else if (bo->vaddr) {
273 dma_free_wc(drm->dev, bo->gem.size, bo->vaddr, bo->iova);
274 }
275}
276
277static int tegra_bo_get_pages(struct drm_device *drm, struct tegra_bo *bo)
278{
279 int err;
280
281 bo->pages = drm_gem_get_pages(&bo->gem);
282 if (IS_ERR(bo->pages))
283 return PTR_ERR(bo->pages);
284
285 bo->num_pages = bo->gem.size >> PAGE_SHIFT;
286
287 bo->sgt = drm_prime_pages_to_sg(bo->pages, bo->num_pages);
288 if (IS_ERR(bo->sgt)) {
289 err = PTR_ERR(bo->sgt);
290 goto put_pages;
291 }
292
293 err = dma_map_sg(drm->dev, bo->sgt->sgl, bo->sgt->nents,
294 DMA_FROM_DEVICE);
295 if (err == 0) {
296 err = -EFAULT;
297 goto free_sgt;
298 }
299
300 return 0;
301
302free_sgt:
303 sg_free_table(bo->sgt);
304 kfree(bo->sgt);
305put_pages:
306 drm_gem_put_pages(&bo->gem, bo->pages, false, false);
307 return err;
308}
309
310static int tegra_bo_alloc(struct drm_device *drm, struct tegra_bo *bo)
311{
312 struct tegra_drm *tegra = drm->dev_private;
313 int err;
314
315 if (tegra->domain) {
316 err = tegra_bo_get_pages(drm, bo);
317 if (err < 0)
318 return err;
319
320 err = tegra_bo_iommu_map(tegra, bo);
321 if (err < 0) {
322 tegra_bo_free(drm, bo);
323 return err;
324 }
325 } else {
326 size_t size = bo->gem.size;
327
328 bo->vaddr = dma_alloc_wc(drm->dev, size, &bo->iova,
329 GFP_KERNEL | __GFP_NOWARN);
330 if (!bo->vaddr) {
331 dev_err(drm->dev,
332 "failed to allocate buffer of size %zu\n",
333 size);
334 return -ENOMEM;
335 }
336 }
337
338 return 0;
339}
340
341struct tegra_bo *tegra_bo_create(struct drm_device *drm, size_t size,
342 unsigned long flags)
343{
344 struct tegra_bo *bo;
345 int err;
346
347 bo = tegra_bo_alloc_object(drm, size);
348 if (IS_ERR(bo))
349 return bo;
350
351 err = tegra_bo_alloc(drm, bo);
352 if (err < 0)
353 goto release;
354
355 if (flags & DRM_TEGRA_GEM_CREATE_TILED)
356 bo->tiling.mode = TEGRA_BO_TILING_MODE_TILED;
357
358 if (flags & DRM_TEGRA_GEM_CREATE_BOTTOM_UP)
359 bo->flags |= TEGRA_BO_BOTTOM_UP;
360
361 return bo;
362
363release:
364 drm_gem_object_release(&bo->gem);
365 kfree(bo);
366 return ERR_PTR(err);
367}
368
369struct tegra_bo *tegra_bo_create_with_handle(struct drm_file *file,
370 struct drm_device *drm,
371 size_t size,
372 unsigned long flags,
373 u32 *handle)
374{
375 struct tegra_bo *bo;
376 int err;
377
378 bo = tegra_bo_create(drm, size, flags);
379 if (IS_ERR(bo))
380 return bo;
381
382 err = drm_gem_handle_create(file, &bo->gem, handle);
383 if (err) {
384 tegra_bo_free_object(&bo->gem);
385 return ERR_PTR(err);
386 }
387
388 drm_gem_object_put(&bo->gem);
389
390 return bo;
391}
392
393static struct tegra_bo *tegra_bo_import(struct drm_device *drm,
394 struct dma_buf *buf)
395{
396 struct tegra_drm *tegra = drm->dev_private;
397 struct dma_buf_attachment *attach;
398 struct tegra_bo *bo;
399 int err;
400
401 bo = tegra_bo_alloc_object(drm, buf->size);
402 if (IS_ERR(bo))
403 return bo;
404
405 attach = dma_buf_attach(buf, drm->dev);
406 if (IS_ERR(attach)) {
407 err = PTR_ERR(attach);
408 goto free;
409 }
410
411 get_dma_buf(buf);
412
413 bo->sgt = dma_buf_map_attachment(attach, DMA_TO_DEVICE);
414 if (IS_ERR(bo->sgt)) {
415 err = PTR_ERR(bo->sgt);
416 goto detach;
417 }
418
419 if (tegra->domain) {
420 err = tegra_bo_iommu_map(tegra, bo);
421 if (err < 0)
422 goto detach;
423 }
424
425 bo->gem.import_attach = attach;
426
427 return bo;
428
429detach:
430 if (!IS_ERR_OR_NULL(bo->sgt))
431 dma_buf_unmap_attachment(attach, bo->sgt, DMA_TO_DEVICE);
432
433 dma_buf_detach(buf, attach);
434 dma_buf_put(buf);
435free:
436 drm_gem_object_release(&bo->gem);
437 kfree(bo);
438 return ERR_PTR(err);
439}
440
441void tegra_bo_free_object(struct drm_gem_object *gem)
442{
443 struct tegra_drm *tegra = gem->dev->dev_private;
444 struct tegra_bo *bo = to_tegra_bo(gem);
445
446 if (tegra->domain)
447 tegra_bo_iommu_unmap(tegra, bo);
448
449 if (gem->import_attach) {
450 dma_buf_unmap_attachment(gem->import_attach, bo->sgt,
451 DMA_TO_DEVICE);
452 drm_prime_gem_destroy(gem, NULL);
453 } else {
454 tegra_bo_free(gem->dev, bo);
455 }
456
457 drm_gem_object_release(gem);
458 kfree(bo);
459}
460
461int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm,
462 struct drm_mode_create_dumb *args)
463{
464 unsigned int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
465 struct tegra_drm *tegra = drm->dev_private;
466 struct tegra_bo *bo;
467
468 args->pitch = round_up(min_pitch, tegra->pitch_align);
469 args->size = args->pitch * args->height;
470
471 bo = tegra_bo_create_with_handle(file, drm, args->size, 0,
472 &args->handle);
473 if (IS_ERR(bo))
474 return PTR_ERR(bo);
475
476 return 0;
477}
478
479static vm_fault_t tegra_bo_fault(struct vm_fault *vmf)
480{
481 struct vm_area_struct *vma = vmf->vma;
482 struct drm_gem_object *gem = vma->vm_private_data;
483 struct tegra_bo *bo = to_tegra_bo(gem);
484 struct page *page;
485 pgoff_t offset;
486
487 if (!bo->pages)
488 return VM_FAULT_SIGBUS;
489
490 offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
491 page = bo->pages[offset];
492
493 return vmf_insert_page(vma, vmf->address, page);
494}
495
496const struct vm_operations_struct tegra_bo_vm_ops = {
497 .fault = tegra_bo_fault,
498 .open = drm_gem_vm_open,
499 .close = drm_gem_vm_close,
500};
501
502int __tegra_gem_mmap(struct drm_gem_object *gem, struct vm_area_struct *vma)
503{
504 struct tegra_bo *bo = to_tegra_bo(gem);
505
506 if (!bo->pages) {
507 unsigned long vm_pgoff = vma->vm_pgoff;
508 int err;
509
510 /*
511 * Clear the VM_PFNMAP flag that was set by drm_gem_mmap(),
512 * and set the vm_pgoff (used as a fake buffer offset by DRM)
513 * to 0 as we want to map the whole buffer.
514 */
515 vma->vm_flags &= ~VM_PFNMAP;
516 vma->vm_pgoff = 0;
517
518 err = dma_mmap_wc(gem->dev->dev, vma, bo->vaddr, bo->iova,
519 gem->size);
520 if (err < 0) {
521 drm_gem_vm_close(vma);
522 return err;
523 }
524
525 vma->vm_pgoff = vm_pgoff;
526 } else {
527 pgprot_t prot = vm_get_page_prot(vma->vm_flags);
528
529 vma->vm_flags |= VM_MIXEDMAP;
530 vma->vm_flags &= ~VM_PFNMAP;
531
532 vma->vm_page_prot = pgprot_writecombine(prot);
533 }
534
535 return 0;
536}
537
538int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma)
539{
540 struct drm_gem_object *gem;
541 int err;
542
543 err = drm_gem_mmap(file, vma);
544 if (err < 0)
545 return err;
546
547 gem = vma->vm_private_data;
548
549 return __tegra_gem_mmap(gem, vma);
550}
551
552static struct sg_table *
553tegra_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
554 enum dma_data_direction dir)
555{
556 struct drm_gem_object *gem = attach->dmabuf->priv;
557 struct tegra_bo *bo = to_tegra_bo(gem);
558 struct sg_table *sgt;
559
560 sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
561 if (!sgt)
562 return NULL;
563
564 if (bo->pages) {
565 if (sg_alloc_table_from_pages(sgt, bo->pages, bo->num_pages,
566 0, gem->size, GFP_KERNEL) < 0)
567 goto free;
568 } else {
569 if (dma_get_sgtable(attach->dev, sgt, bo->vaddr, bo->iova,
570 gem->size) < 0)
571 goto free;
572 }
573
574 if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0)
575 goto free;
576
577 return sgt;
578
579free:
580 sg_free_table(sgt);
581 kfree(sgt);
582 return NULL;
583}
584
585static void tegra_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach,
586 struct sg_table *sgt,
587 enum dma_data_direction dir)
588{
589 struct drm_gem_object *gem = attach->dmabuf->priv;
590 struct tegra_bo *bo = to_tegra_bo(gem);
591
592 if (bo->pages)
593 dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir);
594
595 sg_free_table(sgt);
596 kfree(sgt);
597}
598
599static void tegra_gem_prime_release(struct dma_buf *buf)
600{
601 drm_gem_dmabuf_release(buf);
602}
603
604static int tegra_gem_prime_begin_cpu_access(struct dma_buf *buf,
605 enum dma_data_direction direction)
606{
607 struct drm_gem_object *gem = buf->priv;
608 struct tegra_bo *bo = to_tegra_bo(gem);
609 struct drm_device *drm = gem->dev;
610
611 if (bo->pages)
612 dma_sync_sg_for_cpu(drm->dev, bo->sgt->sgl, bo->sgt->nents,
613 DMA_FROM_DEVICE);
614
615 return 0;
616}
617
618static int tegra_gem_prime_end_cpu_access(struct dma_buf *buf,
619 enum dma_data_direction direction)
620{
621 struct drm_gem_object *gem = buf->priv;
622 struct tegra_bo *bo = to_tegra_bo(gem);
623 struct drm_device *drm = gem->dev;
624
625 if (bo->pages)
626 dma_sync_sg_for_device(drm->dev, bo->sgt->sgl, bo->sgt->nents,
627 DMA_TO_DEVICE);
628
629 return 0;
630}
631
632static int tegra_gem_prime_mmap(struct dma_buf *buf, struct vm_area_struct *vma)
633{
634 struct drm_gem_object *gem = buf->priv;
635 int err;
636
637 err = drm_gem_mmap_obj(gem, gem->size, vma);
638 if (err < 0)
639 return err;
640
641 return __tegra_gem_mmap(gem, vma);
642}
643
644static void *tegra_gem_prime_vmap(struct dma_buf *buf)
645{
646 struct drm_gem_object *gem = buf->priv;
647 struct tegra_bo *bo = to_tegra_bo(gem);
648
649 return bo->vaddr;
650}
651
652static void tegra_gem_prime_vunmap(struct dma_buf *buf, void *vaddr)
653{
654}
655
656static const struct dma_buf_ops tegra_gem_prime_dmabuf_ops = {
657 .map_dma_buf = tegra_gem_prime_map_dma_buf,
658 .unmap_dma_buf = tegra_gem_prime_unmap_dma_buf,
659 .release = tegra_gem_prime_release,
660 .begin_cpu_access = tegra_gem_prime_begin_cpu_access,
661 .end_cpu_access = tegra_gem_prime_end_cpu_access,
662 .mmap = tegra_gem_prime_mmap,
663 .vmap = tegra_gem_prime_vmap,
664 .vunmap = tegra_gem_prime_vunmap,
665};
666
667struct dma_buf *tegra_gem_prime_export(struct drm_gem_object *gem,
668 int flags)
669{
670 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
671
672 exp_info.exp_name = KBUILD_MODNAME;
673 exp_info.owner = gem->dev->driver->fops->owner;
674 exp_info.ops = &tegra_gem_prime_dmabuf_ops;
675 exp_info.size = gem->size;
676 exp_info.flags = flags;
677 exp_info.priv = gem;
678
679 return drm_gem_dmabuf_export(gem->dev, &exp_info);
680}
681
682struct drm_gem_object *tegra_gem_prime_import(struct drm_device *drm,
683 struct dma_buf *buf)
684{
685 struct tegra_bo *bo;
686
687 if (buf->ops == &tegra_gem_prime_dmabuf_ops) {
688 struct drm_gem_object *gem = buf->priv;
689
690 if (gem->dev == drm) {
691 drm_gem_object_get(gem);
692 return gem;
693 }
694 }
695
696 bo = tegra_bo_import(drm, buf);
697 if (IS_ERR(bo))
698 return ERR_CAST(bo);
699
700 return &bo->gem;
701}
1/*
2 * NVIDIA Tegra DRM GEM helper functions
3 *
4 * Copyright (C) 2012 Sascha Hauer, Pengutronix
5 * Copyright (C) 2013 NVIDIA CORPORATION, All rights reserved.
6 *
7 * Based on the GEM/CMA helpers
8 *
9 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
14 */
15
16#include <linux/dma-buf.h>
17#include <drm/tegra_drm.h>
18
19#include "gem.h"
20
21static inline struct tegra_bo *host1x_to_tegra_bo(struct host1x_bo *bo)
22{
23 return container_of(bo, struct tegra_bo, base);
24}
25
26static void tegra_bo_put(struct host1x_bo *bo)
27{
28 struct tegra_bo *obj = host1x_to_tegra_bo(bo);
29 struct drm_device *drm = obj->gem.dev;
30
31 mutex_lock(&drm->struct_mutex);
32 drm_gem_object_unreference(&obj->gem);
33 mutex_unlock(&drm->struct_mutex);
34}
35
36static dma_addr_t tegra_bo_pin(struct host1x_bo *bo, struct sg_table **sgt)
37{
38 struct tegra_bo *obj = host1x_to_tegra_bo(bo);
39
40 return obj->paddr;
41}
42
43static void tegra_bo_unpin(struct host1x_bo *bo, struct sg_table *sgt)
44{
45}
46
47static void *tegra_bo_mmap(struct host1x_bo *bo)
48{
49 struct tegra_bo *obj = host1x_to_tegra_bo(bo);
50
51 return obj->vaddr;
52}
53
54static void tegra_bo_munmap(struct host1x_bo *bo, void *addr)
55{
56}
57
58static void *tegra_bo_kmap(struct host1x_bo *bo, unsigned int page)
59{
60 struct tegra_bo *obj = host1x_to_tegra_bo(bo);
61
62 return obj->vaddr + page * PAGE_SIZE;
63}
64
65static void tegra_bo_kunmap(struct host1x_bo *bo, unsigned int page,
66 void *addr)
67{
68}
69
70static struct host1x_bo *tegra_bo_get(struct host1x_bo *bo)
71{
72 struct tegra_bo *obj = host1x_to_tegra_bo(bo);
73 struct drm_device *drm = obj->gem.dev;
74
75 mutex_lock(&drm->struct_mutex);
76 drm_gem_object_reference(&obj->gem);
77 mutex_unlock(&drm->struct_mutex);
78
79 return bo;
80}
81
82static const struct host1x_bo_ops tegra_bo_ops = {
83 .get = tegra_bo_get,
84 .put = tegra_bo_put,
85 .pin = tegra_bo_pin,
86 .unpin = tegra_bo_unpin,
87 .mmap = tegra_bo_mmap,
88 .munmap = tegra_bo_munmap,
89 .kmap = tegra_bo_kmap,
90 .kunmap = tegra_bo_kunmap,
91};
92
93static void tegra_bo_destroy(struct drm_device *drm, struct tegra_bo *bo)
94{
95 dma_free_writecombine(drm->dev, bo->gem.size, bo->vaddr, bo->paddr);
96}
97
98struct tegra_bo *tegra_bo_create(struct drm_device *drm, unsigned int size,
99 unsigned long flags)
100{
101 struct tegra_bo *bo;
102 int err;
103
104 bo = kzalloc(sizeof(*bo), GFP_KERNEL);
105 if (!bo)
106 return ERR_PTR(-ENOMEM);
107
108 host1x_bo_init(&bo->base, &tegra_bo_ops);
109 size = round_up(size, PAGE_SIZE);
110
111 bo->vaddr = dma_alloc_writecombine(drm->dev, size, &bo->paddr,
112 GFP_KERNEL | __GFP_NOWARN);
113 if (!bo->vaddr) {
114 dev_err(drm->dev, "failed to allocate buffer with size %u\n",
115 size);
116 err = -ENOMEM;
117 goto err_dma;
118 }
119
120 err = drm_gem_object_init(drm, &bo->gem, size);
121 if (err)
122 goto err_init;
123
124 err = drm_gem_create_mmap_offset(&bo->gem);
125 if (err)
126 goto err_mmap;
127
128 if (flags & DRM_TEGRA_GEM_CREATE_TILED)
129 bo->flags |= TEGRA_BO_TILED;
130
131 if (flags & DRM_TEGRA_GEM_CREATE_BOTTOM_UP)
132 bo->flags |= TEGRA_BO_BOTTOM_UP;
133
134 return bo;
135
136err_mmap:
137 drm_gem_object_release(&bo->gem);
138err_init:
139 tegra_bo_destroy(drm, bo);
140err_dma:
141 kfree(bo);
142
143 return ERR_PTR(err);
144}
145
146struct tegra_bo *tegra_bo_create_with_handle(struct drm_file *file,
147 struct drm_device *drm,
148 unsigned int size,
149 unsigned long flags,
150 unsigned int *handle)
151{
152 struct tegra_bo *bo;
153 int ret;
154
155 bo = tegra_bo_create(drm, size, flags);
156 if (IS_ERR(bo))
157 return bo;
158
159 ret = drm_gem_handle_create(file, &bo->gem, handle);
160 if (ret)
161 goto err;
162
163 drm_gem_object_unreference_unlocked(&bo->gem);
164
165 return bo;
166
167err:
168 tegra_bo_free_object(&bo->gem);
169 return ERR_PTR(ret);
170}
171
172struct tegra_bo *tegra_bo_import(struct drm_device *drm, struct dma_buf *buf)
173{
174 struct dma_buf_attachment *attach;
175 struct tegra_bo *bo;
176 ssize_t size;
177 int err;
178
179 bo = kzalloc(sizeof(*bo), GFP_KERNEL);
180 if (!bo)
181 return ERR_PTR(-ENOMEM);
182
183 host1x_bo_init(&bo->base, &tegra_bo_ops);
184 size = round_up(buf->size, PAGE_SIZE);
185
186 err = drm_gem_object_init(drm, &bo->gem, size);
187 if (err < 0)
188 goto free;
189
190 err = drm_gem_create_mmap_offset(&bo->gem);
191 if (err < 0)
192 goto release;
193
194 attach = dma_buf_attach(buf, drm->dev);
195 if (IS_ERR(attach)) {
196 err = PTR_ERR(attach);
197 goto free_mmap;
198 }
199
200 get_dma_buf(buf);
201
202 bo->sgt = dma_buf_map_attachment(attach, DMA_TO_DEVICE);
203 if (!bo->sgt) {
204 err = -ENOMEM;
205 goto detach;
206 }
207
208 if (IS_ERR(bo->sgt)) {
209 err = PTR_ERR(bo->sgt);
210 goto detach;
211 }
212
213 if (bo->sgt->nents > 1) {
214 err = -EINVAL;
215 goto detach;
216 }
217
218 bo->paddr = sg_dma_address(bo->sgt->sgl);
219 bo->gem.import_attach = attach;
220
221 return bo;
222
223detach:
224 if (!IS_ERR_OR_NULL(bo->sgt))
225 dma_buf_unmap_attachment(attach, bo->sgt, DMA_TO_DEVICE);
226
227 dma_buf_detach(buf, attach);
228 dma_buf_put(buf);
229free_mmap:
230 drm_gem_free_mmap_offset(&bo->gem);
231release:
232 drm_gem_object_release(&bo->gem);
233free:
234 kfree(bo);
235
236 return ERR_PTR(err);
237}
238
239void tegra_bo_free_object(struct drm_gem_object *gem)
240{
241 struct tegra_bo *bo = to_tegra_bo(gem);
242
243 if (gem->import_attach) {
244 dma_buf_unmap_attachment(gem->import_attach, bo->sgt,
245 DMA_TO_DEVICE);
246 drm_prime_gem_destroy(gem, NULL);
247 } else {
248 tegra_bo_destroy(gem->dev, bo);
249 }
250
251 drm_gem_free_mmap_offset(gem);
252 drm_gem_object_release(gem);
253
254 kfree(bo);
255}
256
257int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm,
258 struct drm_mode_create_dumb *args)
259{
260 int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
261 struct tegra_bo *bo;
262
263 if (args->pitch < min_pitch)
264 args->pitch = min_pitch;
265
266 if (args->size < args->pitch * args->height)
267 args->size = args->pitch * args->height;
268
269 bo = tegra_bo_create_with_handle(file, drm, args->size, 0,
270 &args->handle);
271 if (IS_ERR(bo))
272 return PTR_ERR(bo);
273
274 return 0;
275}
276
277int tegra_bo_dumb_map_offset(struct drm_file *file, struct drm_device *drm,
278 uint32_t handle, uint64_t *offset)
279{
280 struct drm_gem_object *gem;
281 struct tegra_bo *bo;
282
283 mutex_lock(&drm->struct_mutex);
284
285 gem = drm_gem_object_lookup(drm, file, handle);
286 if (!gem) {
287 dev_err(drm->dev, "failed to lookup GEM object\n");
288 mutex_unlock(&drm->struct_mutex);
289 return -EINVAL;
290 }
291
292 bo = to_tegra_bo(gem);
293
294 *offset = drm_vma_node_offset_addr(&bo->gem.vma_node);
295
296 drm_gem_object_unreference(gem);
297
298 mutex_unlock(&drm->struct_mutex);
299
300 return 0;
301}
302
303const struct vm_operations_struct tegra_bo_vm_ops = {
304 .open = drm_gem_vm_open,
305 .close = drm_gem_vm_close,
306};
307
308int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma)
309{
310 struct drm_gem_object *gem;
311 struct tegra_bo *bo;
312 int ret;
313
314 ret = drm_gem_mmap(file, vma);
315 if (ret)
316 return ret;
317
318 gem = vma->vm_private_data;
319 bo = to_tegra_bo(gem);
320
321 ret = remap_pfn_range(vma, vma->vm_start, bo->paddr >> PAGE_SHIFT,
322 vma->vm_end - vma->vm_start, vma->vm_page_prot);
323 if (ret)
324 drm_gem_vm_close(vma);
325
326 return ret;
327}
328
329static struct sg_table *
330tegra_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
331 enum dma_data_direction dir)
332{
333 struct drm_gem_object *gem = attach->dmabuf->priv;
334 struct tegra_bo *bo = to_tegra_bo(gem);
335 struct sg_table *sgt;
336
337 sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
338 if (!sgt)
339 return NULL;
340
341 if (sg_alloc_table(sgt, 1, GFP_KERNEL)) {
342 kfree(sgt);
343 return NULL;
344 }
345
346 sg_dma_address(sgt->sgl) = bo->paddr;
347 sg_dma_len(sgt->sgl) = gem->size;
348
349 return sgt;
350}
351
352static void tegra_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach,
353 struct sg_table *sgt,
354 enum dma_data_direction dir)
355{
356 sg_free_table(sgt);
357 kfree(sgt);
358}
359
360static void tegra_gem_prime_release(struct dma_buf *buf)
361{
362 drm_gem_dmabuf_release(buf);
363}
364
365static void *tegra_gem_prime_kmap_atomic(struct dma_buf *buf,
366 unsigned long page)
367{
368 return NULL;
369}
370
371static void tegra_gem_prime_kunmap_atomic(struct dma_buf *buf,
372 unsigned long page,
373 void *addr)
374{
375}
376
377static void *tegra_gem_prime_kmap(struct dma_buf *buf, unsigned long page)
378{
379 return NULL;
380}
381
382static void tegra_gem_prime_kunmap(struct dma_buf *buf, unsigned long page,
383 void *addr)
384{
385}
386
387static int tegra_gem_prime_mmap(struct dma_buf *buf, struct vm_area_struct *vma)
388{
389 return -EINVAL;
390}
391
392static void *tegra_gem_prime_vmap(struct dma_buf *buf)
393{
394 struct drm_gem_object *gem = buf->priv;
395 struct tegra_bo *bo = to_tegra_bo(gem);
396
397 return bo->vaddr;
398}
399
400static void tegra_gem_prime_vunmap(struct dma_buf *buf, void *vaddr)
401{
402}
403
404static const struct dma_buf_ops tegra_gem_prime_dmabuf_ops = {
405 .map_dma_buf = tegra_gem_prime_map_dma_buf,
406 .unmap_dma_buf = tegra_gem_prime_unmap_dma_buf,
407 .release = tegra_gem_prime_release,
408 .kmap_atomic = tegra_gem_prime_kmap_atomic,
409 .kunmap_atomic = tegra_gem_prime_kunmap_atomic,
410 .kmap = tegra_gem_prime_kmap,
411 .kunmap = tegra_gem_prime_kunmap,
412 .mmap = tegra_gem_prime_mmap,
413 .vmap = tegra_gem_prime_vmap,
414 .vunmap = tegra_gem_prime_vunmap,
415};
416
417struct dma_buf *tegra_gem_prime_export(struct drm_device *drm,
418 struct drm_gem_object *gem,
419 int flags)
420{
421 return dma_buf_export(gem, &tegra_gem_prime_dmabuf_ops, gem->size,
422 flags);
423}
424
425struct drm_gem_object *tegra_gem_prime_import(struct drm_device *drm,
426 struct dma_buf *buf)
427{
428 struct tegra_bo *bo;
429
430 if (buf->ops == &tegra_gem_prime_dmabuf_ops) {
431 struct drm_gem_object *gem = buf->priv;
432
433 if (gem->dev == drm) {
434 drm_gem_object_reference(gem);
435 return gem;
436 }
437 }
438
439 bo = tegra_bo_import(drm, buf);
440 if (IS_ERR(bo))
441 return ERR_CAST(bo);
442
443 return &bo->gem;
444}