Loading...
1/*
2 * NVIDIA Tegra DRM GEM helper functions
3 *
4 * Copyright (C) 2012 Sascha Hauer, Pengutronix
5 * Copyright (C) 2013 NVIDIA CORPORATION, All rights reserved.
6 *
7 * Based on the GEM/CMA helpers
8 *
9 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
14 */
15
16#include <linux/dma-buf.h>
17#include <linux/iommu.h>
18#include <drm/tegra_drm.h>
19
20#include "drm.h"
21#include "gem.h"
22
23static inline struct tegra_bo *host1x_to_tegra_bo(struct host1x_bo *bo)
24{
25 return container_of(bo, struct tegra_bo, base);
26}
27
28static void tegra_bo_put(struct host1x_bo *bo)
29{
30 struct tegra_bo *obj = host1x_to_tegra_bo(bo);
31
32 drm_gem_object_unreference_unlocked(&obj->gem);
33}
34
35static dma_addr_t tegra_bo_pin(struct host1x_bo *bo, struct sg_table **sgt)
36{
37 struct tegra_bo *obj = host1x_to_tegra_bo(bo);
38
39 return obj->paddr;
40}
41
42static void tegra_bo_unpin(struct host1x_bo *bo, struct sg_table *sgt)
43{
44}
45
46static void *tegra_bo_mmap(struct host1x_bo *bo)
47{
48 struct tegra_bo *obj = host1x_to_tegra_bo(bo);
49
50 return obj->vaddr;
51}
52
53static void tegra_bo_munmap(struct host1x_bo *bo, void *addr)
54{
55}
56
57static void *tegra_bo_kmap(struct host1x_bo *bo, unsigned int page)
58{
59 struct tegra_bo *obj = host1x_to_tegra_bo(bo);
60
61 return obj->vaddr + page * PAGE_SIZE;
62}
63
64static void tegra_bo_kunmap(struct host1x_bo *bo, unsigned int page,
65 void *addr)
66{
67}
68
69static struct host1x_bo *tegra_bo_get(struct host1x_bo *bo)
70{
71 struct tegra_bo *obj = host1x_to_tegra_bo(bo);
72
73 drm_gem_object_reference(&obj->gem);
74
75 return bo;
76}
77
78static const struct host1x_bo_ops tegra_bo_ops = {
79 .get = tegra_bo_get,
80 .put = tegra_bo_put,
81 .pin = tegra_bo_pin,
82 .unpin = tegra_bo_unpin,
83 .mmap = tegra_bo_mmap,
84 .munmap = tegra_bo_munmap,
85 .kmap = tegra_bo_kmap,
86 .kunmap = tegra_bo_kunmap,
87};
88
89static int tegra_bo_iommu_map(struct tegra_drm *tegra, struct tegra_bo *bo)
90{
91 int prot = IOMMU_READ | IOMMU_WRITE;
92 ssize_t err;
93
94 if (bo->mm)
95 return -EBUSY;
96
97 bo->mm = kzalloc(sizeof(*bo->mm), GFP_KERNEL);
98 if (!bo->mm)
99 return -ENOMEM;
100
101 err = drm_mm_insert_node_generic(&tegra->mm, bo->mm, bo->gem.size,
102 PAGE_SIZE, 0, 0, 0);
103 if (err < 0) {
104 dev_err(tegra->drm->dev, "out of I/O virtual memory: %zd\n",
105 err);
106 goto free;
107 }
108
109 bo->paddr = bo->mm->start;
110
111 err = iommu_map_sg(tegra->domain, bo->paddr, bo->sgt->sgl,
112 bo->sgt->nents, prot);
113 if (err < 0) {
114 dev_err(tegra->drm->dev, "failed to map buffer: %zd\n", err);
115 goto remove;
116 }
117
118 bo->size = err;
119
120 return 0;
121
122remove:
123 drm_mm_remove_node(bo->mm);
124free:
125 kfree(bo->mm);
126 return err;
127}
128
129static int tegra_bo_iommu_unmap(struct tegra_drm *tegra, struct tegra_bo *bo)
130{
131 if (!bo->mm)
132 return 0;
133
134 iommu_unmap(tegra->domain, bo->paddr, bo->size);
135 drm_mm_remove_node(bo->mm);
136 kfree(bo->mm);
137
138 return 0;
139}
140
141static struct tegra_bo *tegra_bo_alloc_object(struct drm_device *drm,
142 size_t size)
143{
144 struct tegra_bo *bo;
145 int err;
146
147 bo = kzalloc(sizeof(*bo), GFP_KERNEL);
148 if (!bo)
149 return ERR_PTR(-ENOMEM);
150
151 host1x_bo_init(&bo->base, &tegra_bo_ops);
152 size = round_up(size, PAGE_SIZE);
153
154 err = drm_gem_object_init(drm, &bo->gem, size);
155 if (err < 0)
156 goto free;
157
158 err = drm_gem_create_mmap_offset(&bo->gem);
159 if (err < 0)
160 goto release;
161
162 return bo;
163
164release:
165 drm_gem_object_release(&bo->gem);
166free:
167 kfree(bo);
168 return ERR_PTR(err);
169}
170
171static void tegra_bo_free(struct drm_device *drm, struct tegra_bo *bo)
172{
173 if (bo->pages) {
174 drm_gem_put_pages(&bo->gem, bo->pages, true, true);
175 sg_free_table(bo->sgt);
176 kfree(bo->sgt);
177 } else if (bo->vaddr) {
178 dma_free_wc(drm->dev, bo->gem.size, bo->vaddr, bo->paddr);
179 }
180}
181
182static int tegra_bo_get_pages(struct drm_device *drm, struct tegra_bo *bo)
183{
184 struct scatterlist *s;
185 unsigned int i;
186
187 bo->pages = drm_gem_get_pages(&bo->gem);
188 if (IS_ERR(bo->pages))
189 return PTR_ERR(bo->pages);
190
191 bo->num_pages = bo->gem.size >> PAGE_SHIFT;
192
193 bo->sgt = drm_prime_pages_to_sg(bo->pages, bo->num_pages);
194 if (IS_ERR(bo->sgt))
195 goto put_pages;
196
197 /*
198 * Fake up the SG table so that dma_sync_sg_for_device() can be used
199 * to flush the pages associated with it.
200 *
201 * TODO: Replace this by drm_clflash_sg() once it can be implemented
202 * without relying on symbols that are not exported.
203 */
204 for_each_sg(bo->sgt->sgl, s, bo->sgt->nents, i)
205 sg_dma_address(s) = sg_phys(s);
206
207 dma_sync_sg_for_device(drm->dev, bo->sgt->sgl, bo->sgt->nents,
208 DMA_TO_DEVICE);
209
210 return 0;
211
212put_pages:
213 drm_gem_put_pages(&bo->gem, bo->pages, false, false);
214 return PTR_ERR(bo->sgt);
215}
216
217static int tegra_bo_alloc(struct drm_device *drm, struct tegra_bo *bo)
218{
219 struct tegra_drm *tegra = drm->dev_private;
220 int err;
221
222 if (tegra->domain) {
223 err = tegra_bo_get_pages(drm, bo);
224 if (err < 0)
225 return err;
226
227 err = tegra_bo_iommu_map(tegra, bo);
228 if (err < 0) {
229 tegra_bo_free(drm, bo);
230 return err;
231 }
232 } else {
233 size_t size = bo->gem.size;
234
235 bo->vaddr = dma_alloc_wc(drm->dev, size, &bo->paddr,
236 GFP_KERNEL | __GFP_NOWARN);
237 if (!bo->vaddr) {
238 dev_err(drm->dev,
239 "failed to allocate buffer of size %zu\n",
240 size);
241 return -ENOMEM;
242 }
243 }
244
245 return 0;
246}
247
248struct tegra_bo *tegra_bo_create(struct drm_device *drm, size_t size,
249 unsigned long flags)
250{
251 struct tegra_bo *bo;
252 int err;
253
254 bo = tegra_bo_alloc_object(drm, size);
255 if (IS_ERR(bo))
256 return bo;
257
258 err = tegra_bo_alloc(drm, bo);
259 if (err < 0)
260 goto release;
261
262 if (flags & DRM_TEGRA_GEM_CREATE_TILED)
263 bo->tiling.mode = TEGRA_BO_TILING_MODE_TILED;
264
265 if (flags & DRM_TEGRA_GEM_CREATE_BOTTOM_UP)
266 bo->flags |= TEGRA_BO_BOTTOM_UP;
267
268 return bo;
269
270release:
271 drm_gem_object_release(&bo->gem);
272 kfree(bo);
273 return ERR_PTR(err);
274}
275
276struct tegra_bo *tegra_bo_create_with_handle(struct drm_file *file,
277 struct drm_device *drm,
278 size_t size,
279 unsigned long flags,
280 u32 *handle)
281{
282 struct tegra_bo *bo;
283 int err;
284
285 bo = tegra_bo_create(drm, size, flags);
286 if (IS_ERR(bo))
287 return bo;
288
289 err = drm_gem_handle_create(file, &bo->gem, handle);
290 if (err) {
291 tegra_bo_free_object(&bo->gem);
292 return ERR_PTR(err);
293 }
294
295 drm_gem_object_unreference_unlocked(&bo->gem);
296
297 return bo;
298}
299
300static struct tegra_bo *tegra_bo_import(struct drm_device *drm,
301 struct dma_buf *buf)
302{
303 struct tegra_drm *tegra = drm->dev_private;
304 struct dma_buf_attachment *attach;
305 struct tegra_bo *bo;
306 int err;
307
308 bo = tegra_bo_alloc_object(drm, buf->size);
309 if (IS_ERR(bo))
310 return bo;
311
312 attach = dma_buf_attach(buf, drm->dev);
313 if (IS_ERR(attach)) {
314 err = PTR_ERR(attach);
315 goto free;
316 }
317
318 get_dma_buf(buf);
319
320 bo->sgt = dma_buf_map_attachment(attach, DMA_TO_DEVICE);
321 if (!bo->sgt) {
322 err = -ENOMEM;
323 goto detach;
324 }
325
326 if (IS_ERR(bo->sgt)) {
327 err = PTR_ERR(bo->sgt);
328 goto detach;
329 }
330
331 if (tegra->domain) {
332 err = tegra_bo_iommu_map(tegra, bo);
333 if (err < 0)
334 goto detach;
335 } else {
336 if (bo->sgt->nents > 1) {
337 err = -EINVAL;
338 goto detach;
339 }
340
341 bo->paddr = sg_dma_address(bo->sgt->sgl);
342 }
343
344 bo->gem.import_attach = attach;
345
346 return bo;
347
348detach:
349 if (!IS_ERR_OR_NULL(bo->sgt))
350 dma_buf_unmap_attachment(attach, bo->sgt, DMA_TO_DEVICE);
351
352 dma_buf_detach(buf, attach);
353 dma_buf_put(buf);
354free:
355 drm_gem_object_release(&bo->gem);
356 kfree(bo);
357 return ERR_PTR(err);
358}
359
360void tegra_bo_free_object(struct drm_gem_object *gem)
361{
362 struct tegra_drm *tegra = gem->dev->dev_private;
363 struct tegra_bo *bo = to_tegra_bo(gem);
364
365 if (tegra->domain)
366 tegra_bo_iommu_unmap(tegra, bo);
367
368 if (gem->import_attach) {
369 dma_buf_unmap_attachment(gem->import_attach, bo->sgt,
370 DMA_TO_DEVICE);
371 drm_prime_gem_destroy(gem, NULL);
372 } else {
373 tegra_bo_free(gem->dev, bo);
374 }
375
376 drm_gem_object_release(gem);
377 kfree(bo);
378}
379
380int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm,
381 struct drm_mode_create_dumb *args)
382{
383 unsigned int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
384 struct tegra_drm *tegra = drm->dev_private;
385 struct tegra_bo *bo;
386
387 args->pitch = round_up(min_pitch, tegra->pitch_align);
388 args->size = args->pitch * args->height;
389
390 bo = tegra_bo_create_with_handle(file, drm, args->size, 0,
391 &args->handle);
392 if (IS_ERR(bo))
393 return PTR_ERR(bo);
394
395 return 0;
396}
397
398int tegra_bo_dumb_map_offset(struct drm_file *file, struct drm_device *drm,
399 u32 handle, u64 *offset)
400{
401 struct drm_gem_object *gem;
402 struct tegra_bo *bo;
403
404 gem = drm_gem_object_lookup(drm, file, handle);
405 if (!gem) {
406 dev_err(drm->dev, "failed to lookup GEM object\n");
407 return -EINVAL;
408 }
409
410 bo = to_tegra_bo(gem);
411
412 *offset = drm_vma_node_offset_addr(&bo->gem.vma_node);
413
414 drm_gem_object_unreference_unlocked(gem);
415
416 return 0;
417}
418
419static int tegra_bo_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
420{
421 struct drm_gem_object *gem = vma->vm_private_data;
422 struct tegra_bo *bo = to_tegra_bo(gem);
423 struct page *page;
424 pgoff_t offset;
425 int err;
426
427 if (!bo->pages)
428 return VM_FAULT_SIGBUS;
429
430 offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >> PAGE_SHIFT;
431 page = bo->pages[offset];
432
433 err = vm_insert_page(vma, (unsigned long)vmf->virtual_address, page);
434 switch (err) {
435 case -EAGAIN:
436 case 0:
437 case -ERESTARTSYS:
438 case -EINTR:
439 case -EBUSY:
440 return VM_FAULT_NOPAGE;
441
442 case -ENOMEM:
443 return VM_FAULT_OOM;
444 }
445
446 return VM_FAULT_SIGBUS;
447}
448
449const struct vm_operations_struct tegra_bo_vm_ops = {
450 .fault = tegra_bo_fault,
451 .open = drm_gem_vm_open,
452 .close = drm_gem_vm_close,
453};
454
455int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma)
456{
457 struct drm_gem_object *gem;
458 struct tegra_bo *bo;
459 int ret;
460
461 ret = drm_gem_mmap(file, vma);
462 if (ret)
463 return ret;
464
465 gem = vma->vm_private_data;
466 bo = to_tegra_bo(gem);
467
468 if (!bo->pages) {
469 unsigned long vm_pgoff = vma->vm_pgoff;
470
471 vma->vm_flags &= ~VM_PFNMAP;
472 vma->vm_pgoff = 0;
473
474 ret = dma_mmap_wc(gem->dev->dev, vma, bo->vaddr, bo->paddr,
475 gem->size);
476 if (ret) {
477 drm_gem_vm_close(vma);
478 return ret;
479 }
480
481 vma->vm_pgoff = vm_pgoff;
482 } else {
483 pgprot_t prot = vm_get_page_prot(vma->vm_flags);
484
485 vma->vm_flags |= VM_MIXEDMAP;
486 vma->vm_flags &= ~VM_PFNMAP;
487
488 vma->vm_page_prot = pgprot_writecombine(prot);
489 }
490
491 return 0;
492}
493
494static struct sg_table *
495tegra_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
496 enum dma_data_direction dir)
497{
498 struct drm_gem_object *gem = attach->dmabuf->priv;
499 struct tegra_bo *bo = to_tegra_bo(gem);
500 struct sg_table *sgt;
501
502 sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
503 if (!sgt)
504 return NULL;
505
506 if (bo->pages) {
507 struct scatterlist *sg;
508 unsigned int i;
509
510 if (sg_alloc_table(sgt, bo->num_pages, GFP_KERNEL))
511 goto free;
512
513 for_each_sg(sgt->sgl, sg, bo->num_pages, i)
514 sg_set_page(sg, bo->pages[i], PAGE_SIZE, 0);
515
516 if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0)
517 goto free;
518 } else {
519 if (sg_alloc_table(sgt, 1, GFP_KERNEL))
520 goto free;
521
522 sg_dma_address(sgt->sgl) = bo->paddr;
523 sg_dma_len(sgt->sgl) = gem->size;
524 }
525
526 return sgt;
527
528free:
529 sg_free_table(sgt);
530 kfree(sgt);
531 return NULL;
532}
533
534static void tegra_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach,
535 struct sg_table *sgt,
536 enum dma_data_direction dir)
537{
538 struct drm_gem_object *gem = attach->dmabuf->priv;
539 struct tegra_bo *bo = to_tegra_bo(gem);
540
541 if (bo->pages)
542 dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir);
543
544 sg_free_table(sgt);
545 kfree(sgt);
546}
547
548static void tegra_gem_prime_release(struct dma_buf *buf)
549{
550 drm_gem_dmabuf_release(buf);
551}
552
553static void *tegra_gem_prime_kmap_atomic(struct dma_buf *buf,
554 unsigned long page)
555{
556 return NULL;
557}
558
559static void tegra_gem_prime_kunmap_atomic(struct dma_buf *buf,
560 unsigned long page,
561 void *addr)
562{
563}
564
565static void *tegra_gem_prime_kmap(struct dma_buf *buf, unsigned long page)
566{
567 return NULL;
568}
569
570static void tegra_gem_prime_kunmap(struct dma_buf *buf, unsigned long page,
571 void *addr)
572{
573}
574
575static int tegra_gem_prime_mmap(struct dma_buf *buf, struct vm_area_struct *vma)
576{
577 return -EINVAL;
578}
579
580static void *tegra_gem_prime_vmap(struct dma_buf *buf)
581{
582 struct drm_gem_object *gem = buf->priv;
583 struct tegra_bo *bo = to_tegra_bo(gem);
584
585 return bo->vaddr;
586}
587
588static void tegra_gem_prime_vunmap(struct dma_buf *buf, void *vaddr)
589{
590}
591
592static const struct dma_buf_ops tegra_gem_prime_dmabuf_ops = {
593 .map_dma_buf = tegra_gem_prime_map_dma_buf,
594 .unmap_dma_buf = tegra_gem_prime_unmap_dma_buf,
595 .release = tegra_gem_prime_release,
596 .kmap_atomic = tegra_gem_prime_kmap_atomic,
597 .kunmap_atomic = tegra_gem_prime_kunmap_atomic,
598 .kmap = tegra_gem_prime_kmap,
599 .kunmap = tegra_gem_prime_kunmap,
600 .mmap = tegra_gem_prime_mmap,
601 .vmap = tegra_gem_prime_vmap,
602 .vunmap = tegra_gem_prime_vunmap,
603};
604
605struct dma_buf *tegra_gem_prime_export(struct drm_device *drm,
606 struct drm_gem_object *gem,
607 int flags)
608{
609 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
610
611 exp_info.ops = &tegra_gem_prime_dmabuf_ops;
612 exp_info.size = gem->size;
613 exp_info.flags = flags;
614 exp_info.priv = gem;
615
616 return dma_buf_export(&exp_info);
617}
618
619struct drm_gem_object *tegra_gem_prime_import(struct drm_device *drm,
620 struct dma_buf *buf)
621{
622 struct tegra_bo *bo;
623
624 if (buf->ops == &tegra_gem_prime_dmabuf_ops) {
625 struct drm_gem_object *gem = buf->priv;
626
627 if (gem->dev == drm) {
628 drm_gem_object_reference(gem);
629 return gem;
630 }
631 }
632
633 bo = tegra_bo_import(drm, buf);
634 if (IS_ERR(bo))
635 return ERR_CAST(bo);
636
637 return &bo->gem;
638}
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * NVIDIA Tegra DRM GEM helper functions
4 *
5 * Copyright (C) 2012 Sascha Hauer, Pengutronix
6 * Copyright (C) 2013-2015 NVIDIA CORPORATION, All rights reserved.
7 *
8 * Based on the GEM/CMA helpers
9 *
10 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
11 */
12
13#include <linux/dma-buf.h>
14#include <linux/iommu.h>
15#include <linux/module.h>
16
17#include <drm/drm_drv.h>
18#include <drm/drm_prime.h>
19#include <drm/tegra_drm.h>
20
21#include "drm.h"
22#include "gem.h"
23
24MODULE_IMPORT_NS(DMA_BUF);
25
26static unsigned int sg_dma_count_chunks(struct scatterlist *sgl, unsigned int nents)
27{
28 dma_addr_t next = ~(dma_addr_t)0;
29 unsigned int count = 0, i;
30 struct scatterlist *s;
31
32 for_each_sg(sgl, s, nents, i) {
33 /* sg_dma_address(s) is only valid for entries that have sg_dma_len(s) != 0. */
34 if (!sg_dma_len(s))
35 continue;
36
37 if (sg_dma_address(s) != next) {
38 next = sg_dma_address(s) + sg_dma_len(s);
39 count++;
40 }
41 }
42
43 return count;
44}
45
46static inline unsigned int sgt_dma_count_chunks(struct sg_table *sgt)
47{
48 return sg_dma_count_chunks(sgt->sgl, sgt->nents);
49}
50
51static void tegra_bo_put(struct host1x_bo *bo)
52{
53 struct tegra_bo *obj = host1x_to_tegra_bo(bo);
54
55 drm_gem_object_put(&obj->gem);
56}
57
58static struct host1x_bo_mapping *tegra_bo_pin(struct device *dev, struct host1x_bo *bo,
59 enum dma_data_direction direction)
60{
61 struct tegra_bo *obj = host1x_to_tegra_bo(bo);
62 struct drm_gem_object *gem = &obj->gem;
63 struct host1x_bo_mapping *map;
64 int err;
65
66 map = kzalloc(sizeof(*map), GFP_KERNEL);
67 if (!map)
68 return ERR_PTR(-ENOMEM);
69
70 kref_init(&map->ref);
71 map->bo = host1x_bo_get(bo);
72 map->direction = direction;
73 map->dev = dev;
74
75 /*
76 * Imported buffers need special treatment to satisfy the semantics of DMA-BUF.
77 */
78 if (gem->import_attach) {
79 struct dma_buf *buf = gem->import_attach->dmabuf;
80
81 map->attach = dma_buf_attach(buf, dev);
82 if (IS_ERR(map->attach)) {
83 err = PTR_ERR(map->attach);
84 goto free;
85 }
86
87 map->sgt = dma_buf_map_attachment_unlocked(map->attach, direction);
88 if (IS_ERR(map->sgt)) {
89 dma_buf_detach(buf, map->attach);
90 err = PTR_ERR(map->sgt);
91 map->sgt = NULL;
92 goto free;
93 }
94
95 err = sgt_dma_count_chunks(map->sgt);
96 map->size = gem->size;
97
98 goto out;
99 }
100
101 /*
102 * If we don't have a mapping for this buffer yet, return an SG table
103 * so that host1x can do the mapping for us via the DMA API.
104 */
105 map->sgt = kzalloc(sizeof(*map->sgt), GFP_KERNEL);
106 if (!map->sgt) {
107 err = -ENOMEM;
108 goto free;
109 }
110
111 if (obj->pages) {
112 /*
113 * If the buffer object was allocated from the explicit IOMMU
114 * API code paths, construct an SG table from the pages.
115 */
116 err = sg_alloc_table_from_pages(map->sgt, obj->pages, obj->num_pages, 0, gem->size,
117 GFP_KERNEL);
118 if (err < 0)
119 goto free;
120 } else {
121 /*
122 * If the buffer object had no pages allocated and if it was
123 * not imported, it had to be allocated with the DMA API, so
124 * the DMA API helper can be used.
125 */
126 err = dma_get_sgtable(dev, map->sgt, obj->vaddr, obj->iova, gem->size);
127 if (err < 0)
128 goto free;
129 }
130
131 err = dma_map_sgtable(dev, map->sgt, direction, 0);
132 if (err)
133 goto free_sgt;
134
135out:
136 /*
137 * If we've manually mapped the buffer object through the IOMMU, make sure to return the
138 * existing IOVA address of our mapping.
139 */
140 if (!obj->mm) {
141 map->phys = sg_dma_address(map->sgt->sgl);
142 map->chunks = err;
143 } else {
144 map->phys = obj->iova;
145 map->chunks = 1;
146 }
147
148 map->size = gem->size;
149
150 return map;
151
152free_sgt:
153 sg_free_table(map->sgt);
154free:
155 kfree(map->sgt);
156 kfree(map);
157 return ERR_PTR(err);
158}
159
160static void tegra_bo_unpin(struct host1x_bo_mapping *map)
161{
162 if (map->attach) {
163 dma_buf_unmap_attachment_unlocked(map->attach, map->sgt,
164 map->direction);
165 dma_buf_detach(map->attach->dmabuf, map->attach);
166 } else {
167 dma_unmap_sgtable(map->dev, map->sgt, map->direction, 0);
168 sg_free_table(map->sgt);
169 kfree(map->sgt);
170 }
171
172 host1x_bo_put(map->bo);
173 kfree(map);
174}
175
176static void *tegra_bo_mmap(struct host1x_bo *bo)
177{
178 struct tegra_bo *obj = host1x_to_tegra_bo(bo);
179 struct iosys_map map;
180 int ret;
181
182 if (obj->vaddr) {
183 return obj->vaddr;
184 } else if (obj->gem.import_attach) {
185 ret = dma_buf_vmap_unlocked(obj->gem.import_attach->dmabuf, &map);
186 return ret ? NULL : map.vaddr;
187 } else {
188 return vmap(obj->pages, obj->num_pages, VM_MAP,
189 pgprot_writecombine(PAGE_KERNEL));
190 }
191}
192
193static void tegra_bo_munmap(struct host1x_bo *bo, void *addr)
194{
195 struct tegra_bo *obj = host1x_to_tegra_bo(bo);
196 struct iosys_map map = IOSYS_MAP_INIT_VADDR(addr);
197
198 if (obj->vaddr)
199 return;
200 else if (obj->gem.import_attach)
201 dma_buf_vunmap_unlocked(obj->gem.import_attach->dmabuf, &map);
202 else
203 vunmap(addr);
204}
205
206static struct host1x_bo *tegra_bo_get(struct host1x_bo *bo)
207{
208 struct tegra_bo *obj = host1x_to_tegra_bo(bo);
209
210 drm_gem_object_get(&obj->gem);
211
212 return bo;
213}
214
215static const struct host1x_bo_ops tegra_bo_ops = {
216 .get = tegra_bo_get,
217 .put = tegra_bo_put,
218 .pin = tegra_bo_pin,
219 .unpin = tegra_bo_unpin,
220 .mmap = tegra_bo_mmap,
221 .munmap = tegra_bo_munmap,
222};
223
224static int tegra_bo_iommu_map(struct tegra_drm *tegra, struct tegra_bo *bo)
225{
226 int prot = IOMMU_READ | IOMMU_WRITE;
227 int err;
228
229 if (bo->mm)
230 return -EBUSY;
231
232 bo->mm = kzalloc(sizeof(*bo->mm), GFP_KERNEL);
233 if (!bo->mm)
234 return -ENOMEM;
235
236 mutex_lock(&tegra->mm_lock);
237
238 err = drm_mm_insert_node_generic(&tegra->mm,
239 bo->mm, bo->gem.size, PAGE_SIZE, 0, 0);
240 if (err < 0) {
241 dev_err(tegra->drm->dev, "out of I/O virtual memory: %d\n",
242 err);
243 goto unlock;
244 }
245
246 bo->iova = bo->mm->start;
247
248 bo->size = iommu_map_sgtable(tegra->domain, bo->iova, bo->sgt, prot);
249 if (!bo->size) {
250 dev_err(tegra->drm->dev, "failed to map buffer\n");
251 err = -ENOMEM;
252 goto remove;
253 }
254
255 mutex_unlock(&tegra->mm_lock);
256
257 return 0;
258
259remove:
260 drm_mm_remove_node(bo->mm);
261unlock:
262 mutex_unlock(&tegra->mm_lock);
263 kfree(bo->mm);
264 return err;
265}
266
267static int tegra_bo_iommu_unmap(struct tegra_drm *tegra, struct tegra_bo *bo)
268{
269 if (!bo->mm)
270 return 0;
271
272 mutex_lock(&tegra->mm_lock);
273 iommu_unmap(tegra->domain, bo->iova, bo->size);
274 drm_mm_remove_node(bo->mm);
275 mutex_unlock(&tegra->mm_lock);
276
277 kfree(bo->mm);
278
279 return 0;
280}
281
282static const struct drm_gem_object_funcs tegra_gem_object_funcs = {
283 .free = tegra_bo_free_object,
284 .export = tegra_gem_prime_export,
285 .vm_ops = &tegra_bo_vm_ops,
286};
287
288static struct tegra_bo *tegra_bo_alloc_object(struct drm_device *drm,
289 size_t size)
290{
291 struct tegra_bo *bo;
292 int err;
293
294 bo = kzalloc(sizeof(*bo), GFP_KERNEL);
295 if (!bo)
296 return ERR_PTR(-ENOMEM);
297
298 bo->gem.funcs = &tegra_gem_object_funcs;
299
300 host1x_bo_init(&bo->base, &tegra_bo_ops);
301 size = round_up(size, PAGE_SIZE);
302
303 err = drm_gem_object_init(drm, &bo->gem, size);
304 if (err < 0)
305 goto free;
306
307 err = drm_gem_create_mmap_offset(&bo->gem);
308 if (err < 0)
309 goto release;
310
311 return bo;
312
313release:
314 drm_gem_object_release(&bo->gem);
315free:
316 kfree(bo);
317 return ERR_PTR(err);
318}
319
320static void tegra_bo_free(struct drm_device *drm, struct tegra_bo *bo)
321{
322 if (bo->pages) {
323 dma_unmap_sgtable(drm->dev, bo->sgt, DMA_FROM_DEVICE, 0);
324 drm_gem_put_pages(&bo->gem, bo->pages, true, true);
325 sg_free_table(bo->sgt);
326 kfree(bo->sgt);
327 } else if (bo->vaddr) {
328 dma_free_wc(drm->dev, bo->gem.size, bo->vaddr, bo->iova);
329 }
330}
331
332static int tegra_bo_get_pages(struct drm_device *drm, struct tegra_bo *bo)
333{
334 int err;
335
336 bo->pages = drm_gem_get_pages(&bo->gem);
337 if (IS_ERR(bo->pages))
338 return PTR_ERR(bo->pages);
339
340 bo->num_pages = bo->gem.size >> PAGE_SHIFT;
341
342 bo->sgt = drm_prime_pages_to_sg(bo->gem.dev, bo->pages, bo->num_pages);
343 if (IS_ERR(bo->sgt)) {
344 err = PTR_ERR(bo->sgt);
345 goto put_pages;
346 }
347
348 err = dma_map_sgtable(drm->dev, bo->sgt, DMA_FROM_DEVICE, 0);
349 if (err)
350 goto free_sgt;
351
352 return 0;
353
354free_sgt:
355 sg_free_table(bo->sgt);
356 kfree(bo->sgt);
357put_pages:
358 drm_gem_put_pages(&bo->gem, bo->pages, false, false);
359 return err;
360}
361
362static int tegra_bo_alloc(struct drm_device *drm, struct tegra_bo *bo)
363{
364 struct tegra_drm *tegra = drm->dev_private;
365 int err;
366
367 if (tegra->domain) {
368 err = tegra_bo_get_pages(drm, bo);
369 if (err < 0)
370 return err;
371
372 err = tegra_bo_iommu_map(tegra, bo);
373 if (err < 0) {
374 tegra_bo_free(drm, bo);
375 return err;
376 }
377 } else {
378 size_t size = bo->gem.size;
379
380 bo->vaddr = dma_alloc_wc(drm->dev, size, &bo->iova,
381 GFP_KERNEL | __GFP_NOWARN);
382 if (!bo->vaddr) {
383 dev_err(drm->dev,
384 "failed to allocate buffer of size %zu\n",
385 size);
386 return -ENOMEM;
387 }
388 }
389
390 return 0;
391}
392
393struct tegra_bo *tegra_bo_create(struct drm_device *drm, size_t size,
394 unsigned long flags)
395{
396 struct tegra_bo *bo;
397 int err;
398
399 bo = tegra_bo_alloc_object(drm, size);
400 if (IS_ERR(bo))
401 return bo;
402
403 err = tegra_bo_alloc(drm, bo);
404 if (err < 0)
405 goto release;
406
407 if (flags & DRM_TEGRA_GEM_CREATE_TILED)
408 bo->tiling.mode = TEGRA_BO_TILING_MODE_TILED;
409
410 if (flags & DRM_TEGRA_GEM_CREATE_BOTTOM_UP)
411 bo->flags |= TEGRA_BO_BOTTOM_UP;
412
413 return bo;
414
415release:
416 drm_gem_object_release(&bo->gem);
417 kfree(bo);
418 return ERR_PTR(err);
419}
420
421struct tegra_bo *tegra_bo_create_with_handle(struct drm_file *file,
422 struct drm_device *drm,
423 size_t size,
424 unsigned long flags,
425 u32 *handle)
426{
427 struct tegra_bo *bo;
428 int err;
429
430 bo = tegra_bo_create(drm, size, flags);
431 if (IS_ERR(bo))
432 return bo;
433
434 err = drm_gem_handle_create(file, &bo->gem, handle);
435 if (err) {
436 tegra_bo_free_object(&bo->gem);
437 return ERR_PTR(err);
438 }
439
440 drm_gem_object_put(&bo->gem);
441
442 return bo;
443}
444
445static struct tegra_bo *tegra_bo_import(struct drm_device *drm,
446 struct dma_buf *buf)
447{
448 struct tegra_drm *tegra = drm->dev_private;
449 struct dma_buf_attachment *attach;
450 struct tegra_bo *bo;
451 int err;
452
453 bo = tegra_bo_alloc_object(drm, buf->size);
454 if (IS_ERR(bo))
455 return bo;
456
457 attach = dma_buf_attach(buf, drm->dev);
458 if (IS_ERR(attach)) {
459 err = PTR_ERR(attach);
460 goto free;
461 }
462
463 get_dma_buf(buf);
464
465 bo->sgt = dma_buf_map_attachment_unlocked(attach, DMA_TO_DEVICE);
466 if (IS_ERR(bo->sgt)) {
467 err = PTR_ERR(bo->sgt);
468 goto detach;
469 }
470
471 if (tegra->domain) {
472 err = tegra_bo_iommu_map(tegra, bo);
473 if (err < 0)
474 goto detach;
475 }
476
477 bo->gem.import_attach = attach;
478
479 return bo;
480
481detach:
482 if (!IS_ERR_OR_NULL(bo->sgt))
483 dma_buf_unmap_attachment_unlocked(attach, bo->sgt, DMA_TO_DEVICE);
484
485 dma_buf_detach(buf, attach);
486 dma_buf_put(buf);
487free:
488 drm_gem_object_release(&bo->gem);
489 kfree(bo);
490 return ERR_PTR(err);
491}
492
493void tegra_bo_free_object(struct drm_gem_object *gem)
494{
495 struct tegra_drm *tegra = gem->dev->dev_private;
496 struct host1x_bo_mapping *mapping, *tmp;
497 struct tegra_bo *bo = to_tegra_bo(gem);
498
499 /* remove all mappings of this buffer object from any caches */
500 list_for_each_entry_safe(mapping, tmp, &bo->base.mappings, list) {
501 if (mapping->cache)
502 host1x_bo_unpin(mapping);
503 else
504 dev_err(gem->dev->dev, "mapping %p stale for device %s\n", mapping,
505 dev_name(mapping->dev));
506 }
507
508 if (tegra->domain)
509 tegra_bo_iommu_unmap(tegra, bo);
510
511 if (gem->import_attach) {
512 dma_buf_unmap_attachment_unlocked(gem->import_attach, bo->sgt,
513 DMA_TO_DEVICE);
514 drm_prime_gem_destroy(gem, NULL);
515 } else {
516 tegra_bo_free(gem->dev, bo);
517 }
518
519 drm_gem_object_release(gem);
520 kfree(bo);
521}
522
523int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm,
524 struct drm_mode_create_dumb *args)
525{
526 unsigned int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
527 struct tegra_drm *tegra = drm->dev_private;
528 struct tegra_bo *bo;
529
530 args->pitch = round_up(min_pitch, tegra->pitch_align);
531 args->size = args->pitch * args->height;
532
533 bo = tegra_bo_create_with_handle(file, drm, args->size, 0,
534 &args->handle);
535 if (IS_ERR(bo))
536 return PTR_ERR(bo);
537
538 return 0;
539}
540
541static vm_fault_t tegra_bo_fault(struct vm_fault *vmf)
542{
543 struct vm_area_struct *vma = vmf->vma;
544 struct drm_gem_object *gem = vma->vm_private_data;
545 struct tegra_bo *bo = to_tegra_bo(gem);
546 struct page *page;
547 pgoff_t offset;
548
549 if (!bo->pages)
550 return VM_FAULT_SIGBUS;
551
552 offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
553 page = bo->pages[offset];
554
555 return vmf_insert_page(vma, vmf->address, page);
556}
557
558const struct vm_operations_struct tegra_bo_vm_ops = {
559 .fault = tegra_bo_fault,
560 .open = drm_gem_vm_open,
561 .close = drm_gem_vm_close,
562};
563
564int __tegra_gem_mmap(struct drm_gem_object *gem, struct vm_area_struct *vma)
565{
566 struct tegra_bo *bo = to_tegra_bo(gem);
567
568 if (!bo->pages) {
569 unsigned long vm_pgoff = vma->vm_pgoff;
570 int err;
571
572 /*
573 * Clear the VM_PFNMAP flag that was set by drm_gem_mmap(),
574 * and set the vm_pgoff (used as a fake buffer offset by DRM)
575 * to 0 as we want to map the whole buffer.
576 */
577 vma->vm_flags &= ~VM_PFNMAP;
578 vma->vm_pgoff = 0;
579
580 err = dma_mmap_wc(gem->dev->dev, vma, bo->vaddr, bo->iova,
581 gem->size);
582 if (err < 0) {
583 drm_gem_vm_close(vma);
584 return err;
585 }
586
587 vma->vm_pgoff = vm_pgoff;
588 } else {
589 pgprot_t prot = vm_get_page_prot(vma->vm_flags);
590
591 vma->vm_flags |= VM_MIXEDMAP;
592 vma->vm_flags &= ~VM_PFNMAP;
593
594 vma->vm_page_prot = pgprot_writecombine(prot);
595 }
596
597 return 0;
598}
599
600int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma)
601{
602 struct drm_gem_object *gem;
603 int err;
604
605 err = drm_gem_mmap(file, vma);
606 if (err < 0)
607 return err;
608
609 gem = vma->vm_private_data;
610
611 return __tegra_gem_mmap(gem, vma);
612}
613
614static struct sg_table *
615tegra_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
616 enum dma_data_direction dir)
617{
618 struct drm_gem_object *gem = attach->dmabuf->priv;
619 struct tegra_bo *bo = to_tegra_bo(gem);
620 struct sg_table *sgt;
621
622 sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
623 if (!sgt)
624 return NULL;
625
626 if (bo->pages) {
627 if (sg_alloc_table_from_pages(sgt, bo->pages, bo->num_pages,
628 0, gem->size, GFP_KERNEL) < 0)
629 goto free;
630 } else {
631 if (dma_get_sgtable(attach->dev, sgt, bo->vaddr, bo->iova,
632 gem->size) < 0)
633 goto free;
634 }
635
636 if (dma_map_sgtable(attach->dev, sgt, dir, 0))
637 goto free;
638
639 return sgt;
640
641free:
642 sg_free_table(sgt);
643 kfree(sgt);
644 return NULL;
645}
646
647static void tegra_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach,
648 struct sg_table *sgt,
649 enum dma_data_direction dir)
650{
651 struct drm_gem_object *gem = attach->dmabuf->priv;
652 struct tegra_bo *bo = to_tegra_bo(gem);
653
654 if (bo->pages)
655 dma_unmap_sgtable(attach->dev, sgt, dir, 0);
656
657 sg_free_table(sgt);
658 kfree(sgt);
659}
660
661static void tegra_gem_prime_release(struct dma_buf *buf)
662{
663 drm_gem_dmabuf_release(buf);
664}
665
666static int tegra_gem_prime_begin_cpu_access(struct dma_buf *buf,
667 enum dma_data_direction direction)
668{
669 struct drm_gem_object *gem = buf->priv;
670 struct tegra_bo *bo = to_tegra_bo(gem);
671 struct drm_device *drm = gem->dev;
672
673 if (bo->pages)
674 dma_sync_sgtable_for_cpu(drm->dev, bo->sgt, DMA_FROM_DEVICE);
675
676 return 0;
677}
678
679static int tegra_gem_prime_end_cpu_access(struct dma_buf *buf,
680 enum dma_data_direction direction)
681{
682 struct drm_gem_object *gem = buf->priv;
683 struct tegra_bo *bo = to_tegra_bo(gem);
684 struct drm_device *drm = gem->dev;
685
686 if (bo->pages)
687 dma_sync_sgtable_for_device(drm->dev, bo->sgt, DMA_TO_DEVICE);
688
689 return 0;
690}
691
692static int tegra_gem_prime_mmap(struct dma_buf *buf, struct vm_area_struct *vma)
693{
694 struct drm_gem_object *gem = buf->priv;
695 int err;
696
697 dma_resv_assert_held(buf->resv);
698
699 err = drm_gem_mmap_obj(gem, gem->size, vma);
700 if (err < 0)
701 return err;
702
703 return __tegra_gem_mmap(gem, vma);
704}
705
706static int tegra_gem_prime_vmap(struct dma_buf *buf, struct iosys_map *map)
707{
708 struct drm_gem_object *gem = buf->priv;
709 struct tegra_bo *bo = to_tegra_bo(gem);
710 void *vaddr;
711
712 vaddr = tegra_bo_mmap(&bo->base);
713 if (IS_ERR(vaddr))
714 return PTR_ERR(vaddr);
715
716 iosys_map_set_vaddr(map, vaddr);
717
718 return 0;
719}
720
721static void tegra_gem_prime_vunmap(struct dma_buf *buf, struct iosys_map *map)
722{
723 struct drm_gem_object *gem = buf->priv;
724 struct tegra_bo *bo = to_tegra_bo(gem);
725
726 tegra_bo_munmap(&bo->base, map->vaddr);
727}
728
729static const struct dma_buf_ops tegra_gem_prime_dmabuf_ops = {
730 .map_dma_buf = tegra_gem_prime_map_dma_buf,
731 .unmap_dma_buf = tegra_gem_prime_unmap_dma_buf,
732 .release = tegra_gem_prime_release,
733 .begin_cpu_access = tegra_gem_prime_begin_cpu_access,
734 .end_cpu_access = tegra_gem_prime_end_cpu_access,
735 .mmap = tegra_gem_prime_mmap,
736 .vmap = tegra_gem_prime_vmap,
737 .vunmap = tegra_gem_prime_vunmap,
738};
739
740struct dma_buf *tegra_gem_prime_export(struct drm_gem_object *gem,
741 int flags)
742{
743 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
744
745 exp_info.exp_name = KBUILD_MODNAME;
746 exp_info.owner = gem->dev->driver->fops->owner;
747 exp_info.ops = &tegra_gem_prime_dmabuf_ops;
748 exp_info.size = gem->size;
749 exp_info.flags = flags;
750 exp_info.priv = gem;
751
752 return drm_gem_dmabuf_export(gem->dev, &exp_info);
753}
754
755struct drm_gem_object *tegra_gem_prime_import(struct drm_device *drm,
756 struct dma_buf *buf)
757{
758 struct tegra_bo *bo;
759
760 if (buf->ops == &tegra_gem_prime_dmabuf_ops) {
761 struct drm_gem_object *gem = buf->priv;
762
763 if (gem->dev == drm) {
764 drm_gem_object_get(gem);
765 return gem;
766 }
767 }
768
769 bo = tegra_bo_import(drm, buf);
770 if (IS_ERR(bo))
771 return ERR_CAST(bo);
772
773 return &bo->gem;
774}
775
776struct host1x_bo *tegra_gem_lookup(struct drm_file *file, u32 handle)
777{
778 struct drm_gem_object *gem;
779 struct tegra_bo *bo;
780
781 gem = drm_gem_object_lookup(file, handle);
782 if (!gem)
783 return NULL;
784
785 bo = to_tegra_bo(gem);
786 return &bo->base;
787}