Linux Audio

Check our new training course

Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * NVIDIA Tegra DRM GEM helper functions
  4 *
  5 * Copyright (C) 2012 Sascha Hauer, Pengutronix
  6 * Copyright (C) 2013-2015 NVIDIA CORPORATION, All rights reserved.
  7 *
  8 * Based on the GEM/CMA helpers
  9 *
 10 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
 11 */
 12
 13#include <linux/dma-buf.h>
 14#include <linux/iommu.h>
 15#include <linux/module.h>
 16#include <linux/vmalloc.h>
 17
 18#include <drm/drm_drv.h>
 19#include <drm/drm_prime.h>
 20#include <drm/tegra_drm.h>
 21
 22#include "drm.h"
 23#include "gem.h"
 24
 25MODULE_IMPORT_NS("DMA_BUF");
 26
 27static unsigned int sg_dma_count_chunks(struct scatterlist *sgl, unsigned int nents)
 28{
 29	dma_addr_t next = ~(dma_addr_t)0;
 30	unsigned int count = 0, i;
 31	struct scatterlist *s;
 32
 33	for_each_sg(sgl, s, nents, i) {
 34		/* sg_dma_address(s) is only valid for entries that have sg_dma_len(s) != 0. */
 35		if (!sg_dma_len(s))
 36			continue;
 37
 38		if (sg_dma_address(s) != next) {
 39			next = sg_dma_address(s) + sg_dma_len(s);
 40			count++;
 41		}
 42	}
 43
 44	return count;
 45}
 46
 47static inline unsigned int sgt_dma_count_chunks(struct sg_table *sgt)
 48{
 49	return sg_dma_count_chunks(sgt->sgl, sgt->nents);
 50}
 51
 52static void tegra_bo_put(struct host1x_bo *bo)
 53{
 54	struct tegra_bo *obj = host1x_to_tegra_bo(bo);
 55
 56	drm_gem_object_put(&obj->gem);
 57}
 58
 59static struct host1x_bo_mapping *tegra_bo_pin(struct device *dev, struct host1x_bo *bo,
 60					      enum dma_data_direction direction)
 
 61{
 62	struct tegra_bo *obj = host1x_to_tegra_bo(bo);
 63	struct drm_gem_object *gem = &obj->gem;
 64	struct host1x_bo_mapping *map;
 65	int err;
 66
 67	map = kzalloc(sizeof(*map), GFP_KERNEL);
 68	if (!map)
 69		return ERR_PTR(-ENOMEM);
 70
 71	kref_init(&map->ref);
 72	map->bo = host1x_bo_get(bo);
 73	map->direction = direction;
 74	map->dev = dev;
 75
 76	/*
 77	 * Imported buffers need special treatment to satisfy the semantics of DMA-BUF.
 78	 */
 79	if (obj->dma_buf) {
 80		struct dma_buf *buf = obj->dma_buf;
 81
 82		map->attach = dma_buf_attach(buf, dev);
 83		if (IS_ERR(map->attach)) {
 84			err = PTR_ERR(map->attach);
 85			goto free;
 86		}
 87
 88		map->sgt = dma_buf_map_attachment_unlocked(map->attach, direction);
 89		if (IS_ERR(map->sgt)) {
 90			dma_buf_detach(buf, map->attach);
 91			err = PTR_ERR(map->sgt);
 92			map->sgt = NULL;
 93			goto free;
 94		}
 95
 96		err = sgt_dma_count_chunks(map->sgt);
 97		map->size = gem->size;
 
 
 
 
 98
 99		goto out;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
100	}
101
102	/*
103	 * If we don't have a mapping for this buffer yet, return an SG table
104	 * so that host1x can do the mapping for us via the DMA API.
105	 */
106	map->sgt = kzalloc(sizeof(*map->sgt), GFP_KERNEL);
107	if (!map->sgt) {
108		err = -ENOMEM;
109		goto free;
110	}
111
112	if (obj->pages) {
113		/*
114		 * If the buffer object was allocated from the explicit IOMMU
115		 * API code paths, construct an SG table from the pages.
116		 */
117		err = sg_alloc_table_from_pages(map->sgt, obj->pages, obj->num_pages, 0, gem->size,
118						GFP_KERNEL);
 
 
 
 
 
 
 
 
 
 
 
119		if (err < 0)
120			goto free;
121	} else {
122		/*
123		 * If the buffer object had no pages allocated and if it was
124		 * not imported, it had to be allocated with the DMA API, so
125		 * the DMA API helper can be used.
126		 */
127		err = dma_get_sgtable(dev, map->sgt, obj->vaddr, obj->iova, gem->size);
 
128		if (err < 0)
129			goto free;
130	}
131
132	err = dma_map_sgtable(dev, map->sgt, direction, 0);
133	if (err)
134		goto free_sgt;
135
136out:
137	/*
138	 * If we've manually mapped the buffer object through the IOMMU, make sure to return the
139	 * existing IOVA address of our mapping.
140	 */
141	if (!obj->mm) {
142		map->phys = sg_dma_address(map->sgt->sgl);
143		map->chunks = err;
144	} else {
145		map->phys = obj->iova;
146		map->chunks = 1;
147	}
148
149	map->size = gem->size;
150
151	return map;
152
153free_sgt:
154	sg_free_table(map->sgt);
155free:
156	kfree(map->sgt);
157	kfree(map);
158	return ERR_PTR(err);
159}
160
161static void tegra_bo_unpin(struct host1x_bo_mapping *map)
162{
163	if (map->attach) {
164		dma_buf_unmap_attachment_unlocked(map->attach, map->sgt,
165						  map->direction);
166		dma_buf_detach(map->attach->dmabuf, map->attach);
167	} else {
168		dma_unmap_sgtable(map->dev, map->sgt, map->direction, 0);
169		sg_free_table(map->sgt);
170		kfree(map->sgt);
171	}
172
173	host1x_bo_put(map->bo);
174	kfree(map);
175}
176
177static void *tegra_bo_mmap(struct host1x_bo *bo)
178{
179	struct tegra_bo *obj = host1x_to_tegra_bo(bo);
180	struct iosys_map map = { 0 };
181	void *vaddr;
182	int ret;
183
184	if (obj->vaddr)
185		return obj->vaddr;
186
187	if (obj->dma_buf) {
188		ret = dma_buf_vmap_unlocked(obj->dma_buf, &map);
189		if (ret < 0)
190			return ERR_PTR(ret);
191
192		return map.vaddr;
193	}
194
195	vaddr = vmap(obj->pages, obj->num_pages, VM_MAP,
196		     pgprot_writecombine(PAGE_KERNEL));
197	if (!vaddr)
198		return ERR_PTR(-ENOMEM);
199
200	return vaddr;
201}
202
203static void tegra_bo_munmap(struct host1x_bo *bo, void *addr)
204{
205	struct tegra_bo *obj = host1x_to_tegra_bo(bo);
206	struct iosys_map map = IOSYS_MAP_INIT_VADDR(addr);
207
208	if (obj->vaddr)
209		return;
210
211	if (obj->dma_buf)
212		return dma_buf_vunmap_unlocked(obj->dma_buf, &map);
213
214	vunmap(addr);
215}
216
217static struct host1x_bo *tegra_bo_get(struct host1x_bo *bo)
218{
219	struct tegra_bo *obj = host1x_to_tegra_bo(bo);
220
221	drm_gem_object_get(&obj->gem);
222
223	return bo;
224}
225
226static const struct host1x_bo_ops tegra_bo_ops = {
227	.get = tegra_bo_get,
228	.put = tegra_bo_put,
229	.pin = tegra_bo_pin,
230	.unpin = tegra_bo_unpin,
231	.mmap = tegra_bo_mmap,
232	.munmap = tegra_bo_munmap,
233};
234
235static int tegra_bo_iommu_map(struct tegra_drm *tegra, struct tegra_bo *bo)
236{
237	int prot = IOMMU_READ | IOMMU_WRITE;
238	int err;
239
240	if (bo->mm)
241		return -EBUSY;
242
243	bo->mm = kzalloc(sizeof(*bo->mm), GFP_KERNEL);
244	if (!bo->mm)
245		return -ENOMEM;
246
247	mutex_lock(&tegra->mm_lock);
248
249	err = drm_mm_insert_node_generic(&tegra->mm,
250					 bo->mm, bo->gem.size, PAGE_SIZE, 0, 0);
251	if (err < 0) {
252		dev_err(tegra->drm->dev, "out of I/O virtual memory: %d\n",
253			err);
254		goto unlock;
255	}
256
257	bo->iova = bo->mm->start;
258
259	bo->size = iommu_map_sgtable(tegra->domain, bo->iova, bo->sgt, prot);
260	if (!bo->size) {
261		dev_err(tegra->drm->dev, "failed to map buffer\n");
262		err = -ENOMEM;
263		goto remove;
264	}
265
266	mutex_unlock(&tegra->mm_lock);
267
268	return 0;
269
270remove:
271	drm_mm_remove_node(bo->mm);
272unlock:
273	mutex_unlock(&tegra->mm_lock);
274	kfree(bo->mm);
275	return err;
276}
277
278static int tegra_bo_iommu_unmap(struct tegra_drm *tegra, struct tegra_bo *bo)
279{
280	if (!bo->mm)
281		return 0;
282
283	mutex_lock(&tegra->mm_lock);
284	iommu_unmap(tegra->domain, bo->iova, bo->size);
285	drm_mm_remove_node(bo->mm);
286	mutex_unlock(&tegra->mm_lock);
287
288	kfree(bo->mm);
289
290	return 0;
291}
292
293static const struct drm_gem_object_funcs tegra_gem_object_funcs = {
294	.free = tegra_bo_free_object,
295	.export = tegra_gem_prime_export,
296	.vm_ops = &tegra_bo_vm_ops,
297};
298
299static struct tegra_bo *tegra_bo_alloc_object(struct drm_device *drm,
300					      size_t size)
301{
302	struct tegra_bo *bo;
303	int err;
304
305	bo = kzalloc(sizeof(*bo), GFP_KERNEL);
306	if (!bo)
307		return ERR_PTR(-ENOMEM);
308
309	bo->gem.funcs = &tegra_gem_object_funcs;
310
311	host1x_bo_init(&bo->base, &tegra_bo_ops);
312	size = round_up(size, PAGE_SIZE);
313
314	err = drm_gem_object_init(drm, &bo->gem, size);
315	if (err < 0)
316		goto free;
317
318	err = drm_gem_create_mmap_offset(&bo->gem);
319	if (err < 0)
320		goto release;
321
322	return bo;
323
324release:
325	drm_gem_object_release(&bo->gem);
326free:
327	kfree(bo);
328	return ERR_PTR(err);
329}
330
331static void tegra_bo_free(struct drm_device *drm, struct tegra_bo *bo)
332{
333	if (bo->pages) {
334		dma_unmap_sgtable(drm->dev, bo->sgt, DMA_FROM_DEVICE, 0);
335		drm_gem_put_pages(&bo->gem, bo->pages, true, true);
336		sg_free_table(bo->sgt);
337		kfree(bo->sgt);
338	} else if (bo->vaddr) {
339		dma_free_wc(drm->dev, bo->gem.size, bo->vaddr, bo->iova);
340	}
341}
342
343static int tegra_bo_get_pages(struct drm_device *drm, struct tegra_bo *bo)
344{
345	int err;
346
347	bo->pages = drm_gem_get_pages(&bo->gem);
348	if (IS_ERR(bo->pages))
349		return PTR_ERR(bo->pages);
350
351	bo->num_pages = bo->gem.size >> PAGE_SHIFT;
352
353	bo->sgt = drm_prime_pages_to_sg(bo->gem.dev, bo->pages, bo->num_pages);
354	if (IS_ERR(bo->sgt)) {
355		err = PTR_ERR(bo->sgt);
356		goto put_pages;
357	}
358
359	err = dma_map_sgtable(drm->dev, bo->sgt, DMA_FROM_DEVICE, 0);
360	if (err)
361		goto free_sgt;
362
363	return 0;
364
365free_sgt:
366	sg_free_table(bo->sgt);
367	kfree(bo->sgt);
368put_pages:
369	drm_gem_put_pages(&bo->gem, bo->pages, false, false);
370	return err;
371}
372
373static int tegra_bo_alloc(struct drm_device *drm, struct tegra_bo *bo)
374{
375	struct tegra_drm *tegra = drm->dev_private;
376	int err;
377
378	if (tegra->domain) {
379		err = tegra_bo_get_pages(drm, bo);
380		if (err < 0)
381			return err;
382
383		err = tegra_bo_iommu_map(tegra, bo);
384		if (err < 0) {
385			tegra_bo_free(drm, bo);
386			return err;
387		}
388	} else {
389		size_t size = bo->gem.size;
390
391		bo->vaddr = dma_alloc_wc(drm->dev, size, &bo->iova,
392					 GFP_KERNEL | __GFP_NOWARN);
393		if (!bo->vaddr) {
394			dev_err(drm->dev,
395				"failed to allocate buffer of size %zu\n",
396				size);
397			return -ENOMEM;
398		}
399	}
400
401	return 0;
402}
403
404struct tegra_bo *tegra_bo_create(struct drm_device *drm, size_t size,
405				 unsigned long flags)
406{
407	struct tegra_bo *bo;
408	int err;
409
410	bo = tegra_bo_alloc_object(drm, size);
411	if (IS_ERR(bo))
412		return bo;
413
414	err = tegra_bo_alloc(drm, bo);
415	if (err < 0)
416		goto release;
417
418	if (flags & DRM_TEGRA_GEM_CREATE_TILED)
419		bo->tiling.mode = TEGRA_BO_TILING_MODE_TILED;
420
421	if (flags & DRM_TEGRA_GEM_CREATE_BOTTOM_UP)
422		bo->flags |= TEGRA_BO_BOTTOM_UP;
423
424	return bo;
425
426release:
427	drm_gem_object_release(&bo->gem);
428	kfree(bo);
429	return ERR_PTR(err);
430}
431
432struct tegra_bo *tegra_bo_create_with_handle(struct drm_file *file,
433					     struct drm_device *drm,
434					     size_t size,
435					     unsigned long flags,
436					     u32 *handle)
437{
438	struct tegra_bo *bo;
439	int err;
440
441	bo = tegra_bo_create(drm, size, flags);
442	if (IS_ERR(bo))
443		return bo;
444
445	err = drm_gem_handle_create(file, &bo->gem, handle);
446	if (err) {
447		tegra_bo_free_object(&bo->gem);
448		return ERR_PTR(err);
449	}
450
451	drm_gem_object_put(&bo->gem);
452
453	return bo;
454}
455
456static struct tegra_bo *tegra_bo_import(struct drm_device *drm,
457					struct dma_buf *buf)
458{
459	struct tegra_drm *tegra = drm->dev_private;
460	struct dma_buf_attachment *attach;
461	struct tegra_bo *bo;
462	int err;
463
464	bo = tegra_bo_alloc_object(drm, buf->size);
465	if (IS_ERR(bo))
466		return bo;
467
468	/*
469	 * If we need to use IOMMU API to map the dma-buf into the internally managed
470	 * domain, map it first to the DRM device to get an sgt.
471	 */
472	if (tegra->domain) {
473		attach = dma_buf_attach(buf, drm->dev);
474		if (IS_ERR(attach)) {
475			err = PTR_ERR(attach);
476			goto free;
477		}
478
479		bo->sgt = dma_buf_map_attachment_unlocked(attach, DMA_TO_DEVICE);
480		if (IS_ERR(bo->sgt)) {
481			err = PTR_ERR(bo->sgt);
482			goto detach;
483		}
484
 
 
 
 
 
 
 
485		err = tegra_bo_iommu_map(tegra, bo);
486		if (err < 0)
487			goto detach;
488
489		bo->gem.import_attach = attach;
490	}
491
492	get_dma_buf(buf);
493	bo->dma_buf = buf;
494
495	return bo;
496
497detach:
498	if (!IS_ERR_OR_NULL(bo->sgt))
499		dma_buf_unmap_attachment_unlocked(attach, bo->sgt, DMA_TO_DEVICE);
500
501	dma_buf_detach(buf, attach);
502	dma_buf_put(buf);
503free:
504	drm_gem_object_release(&bo->gem);
505	kfree(bo);
506	return ERR_PTR(err);
507}
508
509void tegra_bo_free_object(struct drm_gem_object *gem)
510{
511	struct tegra_drm *tegra = gem->dev->dev_private;
512	struct host1x_bo_mapping *mapping, *tmp;
513	struct tegra_bo *bo = to_tegra_bo(gem);
514
515	/* remove all mappings of this buffer object from any caches */
516	list_for_each_entry_safe(mapping, tmp, &bo->base.mappings, list) {
517		if (mapping->cache)
518			host1x_bo_unpin(mapping);
519		else
520			dev_err(gem->dev->dev, "mapping %p stale for device %s\n", mapping,
521				dev_name(mapping->dev));
522	}
523
524	if (tegra->domain) {
525		tegra_bo_iommu_unmap(tegra, bo);
526
527		if (gem->import_attach) {
528			dma_buf_unmap_attachment_unlocked(gem->import_attach, bo->sgt,
529							  DMA_TO_DEVICE);
530			dma_buf_detach(gem->import_attach->dmabuf, gem->import_attach);
531		}
 
532	}
533
534	tegra_bo_free(gem->dev, bo);
535
536	if (bo->dma_buf)
537		dma_buf_put(bo->dma_buf);
538
539	drm_gem_object_release(gem);
540	kfree(bo);
541}
542
543int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm,
544			 struct drm_mode_create_dumb *args)
545{
546	unsigned int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
547	struct tegra_drm *tegra = drm->dev_private;
548	struct tegra_bo *bo;
549
550	args->pitch = round_up(min_pitch, tegra->pitch_align);
551	args->size = args->pitch * args->height;
552
553	bo = tegra_bo_create_with_handle(file, drm, args->size, 0,
554					 &args->handle);
555	if (IS_ERR(bo))
556		return PTR_ERR(bo);
557
558	return 0;
559}
560
561static vm_fault_t tegra_bo_fault(struct vm_fault *vmf)
562{
563	struct vm_area_struct *vma = vmf->vma;
564	struct drm_gem_object *gem = vma->vm_private_data;
565	struct tegra_bo *bo = to_tegra_bo(gem);
566	struct page *page;
567	pgoff_t offset;
568
569	if (!bo->pages)
570		return VM_FAULT_SIGBUS;
571
572	offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
573	page = bo->pages[offset];
574
575	return vmf_insert_page(vma, vmf->address, page);
576}
577
578const struct vm_operations_struct tegra_bo_vm_ops = {
579	.fault = tegra_bo_fault,
580	.open = drm_gem_vm_open,
581	.close = drm_gem_vm_close,
582};
583
584int __tegra_gem_mmap(struct drm_gem_object *gem, struct vm_area_struct *vma)
585{
586	struct tegra_bo *bo = to_tegra_bo(gem);
587
588	if (!bo->pages) {
589		unsigned long vm_pgoff = vma->vm_pgoff;
590		int err;
591
592		/*
593		 * Clear the VM_PFNMAP flag that was set by drm_gem_mmap(),
594		 * and set the vm_pgoff (used as a fake buffer offset by DRM)
595		 * to 0 as we want to map the whole buffer.
596		 */
597		vm_flags_clear(vma, VM_PFNMAP);
598		vma->vm_pgoff = 0;
599
600		err = dma_mmap_wc(gem->dev->dev, vma, bo->vaddr, bo->iova,
601				  gem->size);
602		if (err < 0) {
603			drm_gem_vm_close(vma);
604			return err;
605		}
606
607		vma->vm_pgoff = vm_pgoff;
608	} else {
609		pgprot_t prot = vm_get_page_prot(vma->vm_flags);
610
611		vm_flags_mod(vma, VM_MIXEDMAP, VM_PFNMAP);
 
612
613		vma->vm_page_prot = pgprot_writecombine(prot);
614	}
615
616	return 0;
617}
618
619int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma)
620{
621	struct drm_gem_object *gem;
622	int err;
623
624	err = drm_gem_mmap(file, vma);
625	if (err < 0)
626		return err;
627
628	gem = vma->vm_private_data;
629
630	return __tegra_gem_mmap(gem, vma);
631}
632
633static struct sg_table *
634tegra_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
635			    enum dma_data_direction dir)
636{
637	struct drm_gem_object *gem = attach->dmabuf->priv;
638	struct tegra_bo *bo = to_tegra_bo(gem);
639	struct sg_table *sgt;
640
641	sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
642	if (!sgt)
643		return NULL;
644
645	if (bo->pages) {
646		if (sg_alloc_table_from_pages(sgt, bo->pages, bo->num_pages,
647					      0, gem->size, GFP_KERNEL) < 0)
648			goto free;
649	} else {
650		if (dma_get_sgtable(attach->dev, sgt, bo->vaddr, bo->iova,
651				    gem->size) < 0)
652			goto free;
653	}
654
655	if (dma_map_sgtable(attach->dev, sgt, dir, 0))
656		goto free;
657
658	return sgt;
659
660free:
661	sg_free_table(sgt);
662	kfree(sgt);
663	return NULL;
664}
665
666static void tegra_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach,
667					  struct sg_table *sgt,
668					  enum dma_data_direction dir)
669{
670	struct drm_gem_object *gem = attach->dmabuf->priv;
671	struct tegra_bo *bo = to_tegra_bo(gem);
672
673	if (bo->pages)
674		dma_unmap_sgtable(attach->dev, sgt, dir, 0);
675
676	sg_free_table(sgt);
677	kfree(sgt);
678}
679
680static void tegra_gem_prime_release(struct dma_buf *buf)
681{
682	drm_gem_dmabuf_release(buf);
683}
684
685static int tegra_gem_prime_begin_cpu_access(struct dma_buf *buf,
686					    enum dma_data_direction direction)
687{
688	struct drm_gem_object *gem = buf->priv;
689	struct tegra_bo *bo = to_tegra_bo(gem);
690	struct drm_device *drm = gem->dev;
691
692	if (bo->pages)
693		dma_sync_sgtable_for_cpu(drm->dev, bo->sgt, DMA_FROM_DEVICE);
694
695	return 0;
696}
697
698static int tegra_gem_prime_end_cpu_access(struct dma_buf *buf,
699					  enum dma_data_direction direction)
700{
701	struct drm_gem_object *gem = buf->priv;
702	struct tegra_bo *bo = to_tegra_bo(gem);
703	struct drm_device *drm = gem->dev;
704
705	if (bo->pages)
706		dma_sync_sgtable_for_device(drm->dev, bo->sgt, DMA_TO_DEVICE);
707
708	return 0;
709}
710
711static int tegra_gem_prime_mmap(struct dma_buf *buf, struct vm_area_struct *vma)
712{
713	struct drm_gem_object *gem = buf->priv;
714	int err;
715
716	err = drm_gem_mmap_obj(gem, gem->size, vma);
717	if (err < 0)
718		return err;
719
720	return __tegra_gem_mmap(gem, vma);
721}
722
723static int tegra_gem_prime_vmap(struct dma_buf *buf, struct iosys_map *map)
724{
725	struct drm_gem_object *gem = buf->priv;
726	struct tegra_bo *bo = to_tegra_bo(gem);
727	void *vaddr;
728
729	vaddr = tegra_bo_mmap(&bo->base);
730	if (IS_ERR(vaddr))
731		return PTR_ERR(vaddr);
732
733	iosys_map_set_vaddr(map, vaddr);
734
735	return 0;
736}
737
738static void tegra_gem_prime_vunmap(struct dma_buf *buf, struct iosys_map *map)
739{
740	struct drm_gem_object *gem = buf->priv;
741	struct tegra_bo *bo = to_tegra_bo(gem);
742
743	tegra_bo_munmap(&bo->base, map->vaddr);
744}
745
746static const struct dma_buf_ops tegra_gem_prime_dmabuf_ops = {
747	.map_dma_buf = tegra_gem_prime_map_dma_buf,
748	.unmap_dma_buf = tegra_gem_prime_unmap_dma_buf,
749	.release = tegra_gem_prime_release,
750	.begin_cpu_access = tegra_gem_prime_begin_cpu_access,
751	.end_cpu_access = tegra_gem_prime_end_cpu_access,
752	.mmap = tegra_gem_prime_mmap,
753	.vmap = tegra_gem_prime_vmap,
754	.vunmap = tegra_gem_prime_vunmap,
755};
756
757struct dma_buf *tegra_gem_prime_export(struct drm_gem_object *gem,
758				       int flags)
759{
760	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
761
762	exp_info.exp_name = KBUILD_MODNAME;
763	exp_info.owner = gem->dev->driver->fops->owner;
764	exp_info.ops = &tegra_gem_prime_dmabuf_ops;
765	exp_info.size = gem->size;
766	exp_info.flags = flags;
767	exp_info.priv = gem;
768
769	return drm_gem_dmabuf_export(gem->dev, &exp_info);
770}
771
772struct drm_gem_object *tegra_gem_prime_import(struct drm_device *drm,
773					      struct dma_buf *buf)
774{
775	struct tegra_bo *bo;
776
777	if (buf->ops == &tegra_gem_prime_dmabuf_ops) {
778		struct drm_gem_object *gem = buf->priv;
779
780		if (gem->dev == drm) {
781			drm_gem_object_get(gem);
782			return gem;
783		}
784	}
785
786	bo = tegra_bo_import(drm, buf);
787	if (IS_ERR(bo))
788		return ERR_CAST(bo);
789
790	return &bo->gem;
791}
792
793struct host1x_bo *tegra_gem_lookup(struct drm_file *file, u32 handle)
794{
795	struct drm_gem_object *gem;
796	struct tegra_bo *bo;
797
798	gem = drm_gem_object_lookup(file, handle);
799	if (!gem)
800		return NULL;
801
802	bo = to_tegra_bo(gem);
803	return &bo->base;
804}
v5.14.15
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * NVIDIA Tegra DRM GEM helper functions
  4 *
  5 * Copyright (C) 2012 Sascha Hauer, Pengutronix
  6 * Copyright (C) 2013-2015 NVIDIA CORPORATION, All rights reserved.
  7 *
  8 * Based on the GEM/CMA helpers
  9 *
 10 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
 11 */
 12
 13#include <linux/dma-buf.h>
 14#include <linux/iommu.h>
 
 
 15
 16#include <drm/drm_drv.h>
 17#include <drm/drm_prime.h>
 18#include <drm/tegra_drm.h>
 19
 20#include "drm.h"
 21#include "gem.h"
 22
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 23static void tegra_bo_put(struct host1x_bo *bo)
 24{
 25	struct tegra_bo *obj = host1x_to_tegra_bo(bo);
 26
 27	drm_gem_object_put(&obj->gem);
 28}
 29
 30/* XXX move this into lib/scatterlist.c? */
 31static int sg_alloc_table_from_sg(struct sg_table *sgt, struct scatterlist *sg,
 32				  unsigned int nents, gfp_t gfp_mask)
 33{
 34	struct scatterlist *dst;
 35	unsigned int i;
 
 36	int err;
 37
 38	err = sg_alloc_table(sgt, nents, gfp_mask);
 39	if (err < 0)
 40		return err;
 
 
 
 
 
 41
 42	dst = sgt->sgl;
 
 
 
 
 43
 44	for (i = 0; i < nents; i++) {
 45		sg_set_page(dst, sg_page(sg), sg->length, 0);
 46		dst = sg_next(dst);
 47		sg = sg_next(sg);
 48	}
 49
 50	return 0;
 51}
 
 
 
 
 
 52
 53static struct sg_table *tegra_bo_pin(struct device *dev, struct host1x_bo *bo,
 54				     dma_addr_t *phys)
 55{
 56	struct tegra_bo *obj = host1x_to_tegra_bo(bo);
 57	struct sg_table *sgt;
 58	int err;
 59
 60	/*
 61	 * If we've manually mapped the buffer object through the IOMMU, make
 62	 * sure to return the IOVA address of our mapping.
 63	 *
 64	 * Similarly, for buffers that have been allocated by the DMA API the
 65	 * physical address can be used for devices that are not attached to
 66	 * an IOMMU. For these devices, callers must pass a valid pointer via
 67	 * the @phys argument.
 68	 *
 69	 * Imported buffers were also already mapped at import time, so the
 70	 * existing mapping can be reused.
 71	 */
 72	if (phys) {
 73		*phys = obj->iova;
 74		return NULL;
 75	}
 76
 77	/*
 78	 * If we don't have a mapping for this buffer yet, return an SG table
 79	 * so that host1x can do the mapping for us via the DMA API.
 80	 */
 81	sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
 82	if (!sgt)
 83		return ERR_PTR(-ENOMEM);
 
 
 84
 85	if (obj->pages) {
 86		/*
 87		 * If the buffer object was allocated from the explicit IOMMU
 88		 * API code paths, construct an SG table from the pages.
 89		 */
 90		err = sg_alloc_table_from_pages(sgt, obj->pages, obj->num_pages,
 91						0, obj->gem.size, GFP_KERNEL);
 92		if (err < 0)
 93			goto free;
 94	} else if (obj->sgt) {
 95		/*
 96		 * If the buffer object already has an SG table but no pages
 97		 * were allocated for it, it means the buffer was imported and
 98		 * the SG table needs to be copied to avoid overwriting any
 99		 * other potential users of the original SG table.
100		 */
101		err = sg_alloc_table_from_sg(sgt, obj->sgt->sgl,
102					     obj->sgt->orig_nents, GFP_KERNEL);
103		if (err < 0)
104			goto free;
105	} else {
106		/*
107		 * If the buffer object had no pages allocated and if it was
108		 * not imported, it had to be allocated with the DMA API, so
109		 * the DMA API helper can be used.
110		 */
111		err = dma_get_sgtable(dev, sgt, obj->vaddr, obj->iova,
112				      obj->gem.size);
113		if (err < 0)
114			goto free;
115	}
116
117	return sgt;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
118
 
 
119free:
120	kfree(sgt);
 
121	return ERR_PTR(err);
122}
123
124static void tegra_bo_unpin(struct device *dev, struct sg_table *sgt)
125{
126	if (sgt) {
127		sg_free_table(sgt);
128		kfree(sgt);
 
 
 
 
 
129	}
 
 
 
130}
131
132static void *tegra_bo_mmap(struct host1x_bo *bo)
133{
134	struct tegra_bo *obj = host1x_to_tegra_bo(bo);
135	struct dma_buf_map map;
 
136	int ret;
137
138	if (obj->vaddr) {
139		return obj->vaddr;
140	} else if (obj->gem.import_attach) {
141		ret = dma_buf_vmap(obj->gem.import_attach->dmabuf, &map);
142		return ret ? NULL : map.vaddr;
143	} else {
144		return vmap(obj->pages, obj->num_pages, VM_MAP,
145			    pgprot_writecombine(PAGE_KERNEL));
 
146	}
 
 
 
 
 
 
 
147}
148
149static void tegra_bo_munmap(struct host1x_bo *bo, void *addr)
150{
151	struct tegra_bo *obj = host1x_to_tegra_bo(bo);
152	struct dma_buf_map map = DMA_BUF_MAP_INIT_VADDR(addr);
153
154	if (obj->vaddr)
155		return;
156	else if (obj->gem.import_attach)
157		dma_buf_vunmap(obj->gem.import_attach->dmabuf, &map);
158	else
159		vunmap(addr);
 
160}
161
162static struct host1x_bo *tegra_bo_get(struct host1x_bo *bo)
163{
164	struct tegra_bo *obj = host1x_to_tegra_bo(bo);
165
166	drm_gem_object_get(&obj->gem);
167
168	return bo;
169}
170
171static const struct host1x_bo_ops tegra_bo_ops = {
172	.get = tegra_bo_get,
173	.put = tegra_bo_put,
174	.pin = tegra_bo_pin,
175	.unpin = tegra_bo_unpin,
176	.mmap = tegra_bo_mmap,
177	.munmap = tegra_bo_munmap,
178};
179
180static int tegra_bo_iommu_map(struct tegra_drm *tegra, struct tegra_bo *bo)
181{
182	int prot = IOMMU_READ | IOMMU_WRITE;
183	int err;
184
185	if (bo->mm)
186		return -EBUSY;
187
188	bo->mm = kzalloc(sizeof(*bo->mm), GFP_KERNEL);
189	if (!bo->mm)
190		return -ENOMEM;
191
192	mutex_lock(&tegra->mm_lock);
193
194	err = drm_mm_insert_node_generic(&tegra->mm,
195					 bo->mm, bo->gem.size, PAGE_SIZE, 0, 0);
196	if (err < 0) {
197		dev_err(tegra->drm->dev, "out of I/O virtual memory: %d\n",
198			err);
199		goto unlock;
200	}
201
202	bo->iova = bo->mm->start;
203
204	bo->size = iommu_map_sgtable(tegra->domain, bo->iova, bo->sgt, prot);
205	if (!bo->size) {
206		dev_err(tegra->drm->dev, "failed to map buffer\n");
207		err = -ENOMEM;
208		goto remove;
209	}
210
211	mutex_unlock(&tegra->mm_lock);
212
213	return 0;
214
215remove:
216	drm_mm_remove_node(bo->mm);
217unlock:
218	mutex_unlock(&tegra->mm_lock);
219	kfree(bo->mm);
220	return err;
221}
222
223static int tegra_bo_iommu_unmap(struct tegra_drm *tegra, struct tegra_bo *bo)
224{
225	if (!bo->mm)
226		return 0;
227
228	mutex_lock(&tegra->mm_lock);
229	iommu_unmap(tegra->domain, bo->iova, bo->size);
230	drm_mm_remove_node(bo->mm);
231	mutex_unlock(&tegra->mm_lock);
232
233	kfree(bo->mm);
234
235	return 0;
236}
237
238static const struct drm_gem_object_funcs tegra_gem_object_funcs = {
239	.free = tegra_bo_free_object,
240	.export = tegra_gem_prime_export,
241	.vm_ops = &tegra_bo_vm_ops,
242};
243
244static struct tegra_bo *tegra_bo_alloc_object(struct drm_device *drm,
245					      size_t size)
246{
247	struct tegra_bo *bo;
248	int err;
249
250	bo = kzalloc(sizeof(*bo), GFP_KERNEL);
251	if (!bo)
252		return ERR_PTR(-ENOMEM);
253
254	bo->gem.funcs = &tegra_gem_object_funcs;
255
256	host1x_bo_init(&bo->base, &tegra_bo_ops);
257	size = round_up(size, PAGE_SIZE);
258
259	err = drm_gem_object_init(drm, &bo->gem, size);
260	if (err < 0)
261		goto free;
262
263	err = drm_gem_create_mmap_offset(&bo->gem);
264	if (err < 0)
265		goto release;
266
267	return bo;
268
269release:
270	drm_gem_object_release(&bo->gem);
271free:
272	kfree(bo);
273	return ERR_PTR(err);
274}
275
276static void tegra_bo_free(struct drm_device *drm, struct tegra_bo *bo)
277{
278	if (bo->pages) {
279		dma_unmap_sgtable(drm->dev, bo->sgt, DMA_FROM_DEVICE, 0);
280		drm_gem_put_pages(&bo->gem, bo->pages, true, true);
281		sg_free_table(bo->sgt);
282		kfree(bo->sgt);
283	} else if (bo->vaddr) {
284		dma_free_wc(drm->dev, bo->gem.size, bo->vaddr, bo->iova);
285	}
286}
287
288static int tegra_bo_get_pages(struct drm_device *drm, struct tegra_bo *bo)
289{
290	int err;
291
292	bo->pages = drm_gem_get_pages(&bo->gem);
293	if (IS_ERR(bo->pages))
294		return PTR_ERR(bo->pages);
295
296	bo->num_pages = bo->gem.size >> PAGE_SHIFT;
297
298	bo->sgt = drm_prime_pages_to_sg(bo->gem.dev, bo->pages, bo->num_pages);
299	if (IS_ERR(bo->sgt)) {
300		err = PTR_ERR(bo->sgt);
301		goto put_pages;
302	}
303
304	err = dma_map_sgtable(drm->dev, bo->sgt, DMA_FROM_DEVICE, 0);
305	if (err)
306		goto free_sgt;
307
308	return 0;
309
310free_sgt:
311	sg_free_table(bo->sgt);
312	kfree(bo->sgt);
313put_pages:
314	drm_gem_put_pages(&bo->gem, bo->pages, false, false);
315	return err;
316}
317
318static int tegra_bo_alloc(struct drm_device *drm, struct tegra_bo *bo)
319{
320	struct tegra_drm *tegra = drm->dev_private;
321	int err;
322
323	if (tegra->domain) {
324		err = tegra_bo_get_pages(drm, bo);
325		if (err < 0)
326			return err;
327
328		err = tegra_bo_iommu_map(tegra, bo);
329		if (err < 0) {
330			tegra_bo_free(drm, bo);
331			return err;
332		}
333	} else {
334		size_t size = bo->gem.size;
335
336		bo->vaddr = dma_alloc_wc(drm->dev, size, &bo->iova,
337					 GFP_KERNEL | __GFP_NOWARN);
338		if (!bo->vaddr) {
339			dev_err(drm->dev,
340				"failed to allocate buffer of size %zu\n",
341				size);
342			return -ENOMEM;
343		}
344	}
345
346	return 0;
347}
348
349struct tegra_bo *tegra_bo_create(struct drm_device *drm, size_t size,
350				 unsigned long flags)
351{
352	struct tegra_bo *bo;
353	int err;
354
355	bo = tegra_bo_alloc_object(drm, size);
356	if (IS_ERR(bo))
357		return bo;
358
359	err = tegra_bo_alloc(drm, bo);
360	if (err < 0)
361		goto release;
362
363	if (flags & DRM_TEGRA_GEM_CREATE_TILED)
364		bo->tiling.mode = TEGRA_BO_TILING_MODE_TILED;
365
366	if (flags & DRM_TEGRA_GEM_CREATE_BOTTOM_UP)
367		bo->flags |= TEGRA_BO_BOTTOM_UP;
368
369	return bo;
370
371release:
372	drm_gem_object_release(&bo->gem);
373	kfree(bo);
374	return ERR_PTR(err);
375}
376
377struct tegra_bo *tegra_bo_create_with_handle(struct drm_file *file,
378					     struct drm_device *drm,
379					     size_t size,
380					     unsigned long flags,
381					     u32 *handle)
382{
383	struct tegra_bo *bo;
384	int err;
385
386	bo = tegra_bo_create(drm, size, flags);
387	if (IS_ERR(bo))
388		return bo;
389
390	err = drm_gem_handle_create(file, &bo->gem, handle);
391	if (err) {
392		tegra_bo_free_object(&bo->gem);
393		return ERR_PTR(err);
394	}
395
396	drm_gem_object_put(&bo->gem);
397
398	return bo;
399}
400
401static struct tegra_bo *tegra_bo_import(struct drm_device *drm,
402					struct dma_buf *buf)
403{
404	struct tegra_drm *tegra = drm->dev_private;
405	struct dma_buf_attachment *attach;
406	struct tegra_bo *bo;
407	int err;
408
409	bo = tegra_bo_alloc_object(drm, buf->size);
410	if (IS_ERR(bo))
411		return bo;
412
413	attach = dma_buf_attach(buf, drm->dev);
414	if (IS_ERR(attach)) {
415		err = PTR_ERR(attach);
416		goto free;
417	}
 
 
 
 
 
418
419	get_dma_buf(buf);
 
 
 
 
420
421	bo->sgt = dma_buf_map_attachment(attach, DMA_TO_DEVICE);
422	if (IS_ERR(bo->sgt)) {
423		err = PTR_ERR(bo->sgt);
424		goto detach;
425	}
426
427	if (tegra->domain) {
428		err = tegra_bo_iommu_map(tegra, bo);
429		if (err < 0)
430			goto detach;
 
 
431	}
432
433	bo->gem.import_attach = attach;
 
434
435	return bo;
436
437detach:
438	if (!IS_ERR_OR_NULL(bo->sgt))
439		dma_buf_unmap_attachment(attach, bo->sgt, DMA_TO_DEVICE);
440
441	dma_buf_detach(buf, attach);
442	dma_buf_put(buf);
443free:
444	drm_gem_object_release(&bo->gem);
445	kfree(bo);
446	return ERR_PTR(err);
447}
448
449void tegra_bo_free_object(struct drm_gem_object *gem)
450{
451	struct tegra_drm *tegra = gem->dev->dev_private;
 
452	struct tegra_bo *bo = to_tegra_bo(gem);
453
454	if (tegra->domain)
 
 
 
 
 
 
 
 
 
455		tegra_bo_iommu_unmap(tegra, bo);
456
457	if (gem->import_attach) {
458		dma_buf_unmap_attachment(gem->import_attach, bo->sgt,
459					 DMA_TO_DEVICE);
460		drm_prime_gem_destroy(gem, NULL);
461	} else {
462		tegra_bo_free(gem->dev, bo);
463	}
464
 
 
 
 
 
465	drm_gem_object_release(gem);
466	kfree(bo);
467}
468
469int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm,
470			 struct drm_mode_create_dumb *args)
471{
472	unsigned int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
473	struct tegra_drm *tegra = drm->dev_private;
474	struct tegra_bo *bo;
475
476	args->pitch = round_up(min_pitch, tegra->pitch_align);
477	args->size = args->pitch * args->height;
478
479	bo = tegra_bo_create_with_handle(file, drm, args->size, 0,
480					 &args->handle);
481	if (IS_ERR(bo))
482		return PTR_ERR(bo);
483
484	return 0;
485}
486
487static vm_fault_t tegra_bo_fault(struct vm_fault *vmf)
488{
489	struct vm_area_struct *vma = vmf->vma;
490	struct drm_gem_object *gem = vma->vm_private_data;
491	struct tegra_bo *bo = to_tegra_bo(gem);
492	struct page *page;
493	pgoff_t offset;
494
495	if (!bo->pages)
496		return VM_FAULT_SIGBUS;
497
498	offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
499	page = bo->pages[offset];
500
501	return vmf_insert_page(vma, vmf->address, page);
502}
503
504const struct vm_operations_struct tegra_bo_vm_ops = {
505	.fault = tegra_bo_fault,
506	.open = drm_gem_vm_open,
507	.close = drm_gem_vm_close,
508};
509
510int __tegra_gem_mmap(struct drm_gem_object *gem, struct vm_area_struct *vma)
511{
512	struct tegra_bo *bo = to_tegra_bo(gem);
513
514	if (!bo->pages) {
515		unsigned long vm_pgoff = vma->vm_pgoff;
516		int err;
517
518		/*
519		 * Clear the VM_PFNMAP flag that was set by drm_gem_mmap(),
520		 * and set the vm_pgoff (used as a fake buffer offset by DRM)
521		 * to 0 as we want to map the whole buffer.
522		 */
523		vma->vm_flags &= ~VM_PFNMAP;
524		vma->vm_pgoff = 0;
525
526		err = dma_mmap_wc(gem->dev->dev, vma, bo->vaddr, bo->iova,
527				  gem->size);
528		if (err < 0) {
529			drm_gem_vm_close(vma);
530			return err;
531		}
532
533		vma->vm_pgoff = vm_pgoff;
534	} else {
535		pgprot_t prot = vm_get_page_prot(vma->vm_flags);
536
537		vma->vm_flags |= VM_MIXEDMAP;
538		vma->vm_flags &= ~VM_PFNMAP;
539
540		vma->vm_page_prot = pgprot_writecombine(prot);
541	}
542
543	return 0;
544}
545
546int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma)
547{
548	struct drm_gem_object *gem;
549	int err;
550
551	err = drm_gem_mmap(file, vma);
552	if (err < 0)
553		return err;
554
555	gem = vma->vm_private_data;
556
557	return __tegra_gem_mmap(gem, vma);
558}
559
560static struct sg_table *
561tegra_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
562			    enum dma_data_direction dir)
563{
564	struct drm_gem_object *gem = attach->dmabuf->priv;
565	struct tegra_bo *bo = to_tegra_bo(gem);
566	struct sg_table *sgt;
567
568	sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
569	if (!sgt)
570		return NULL;
571
572	if (bo->pages) {
573		if (sg_alloc_table_from_pages(sgt, bo->pages, bo->num_pages,
574					      0, gem->size, GFP_KERNEL) < 0)
575			goto free;
576	} else {
577		if (dma_get_sgtable(attach->dev, sgt, bo->vaddr, bo->iova,
578				    gem->size) < 0)
579			goto free;
580	}
581
582	if (dma_map_sgtable(attach->dev, sgt, dir, 0))
583		goto free;
584
585	return sgt;
586
587free:
588	sg_free_table(sgt);
589	kfree(sgt);
590	return NULL;
591}
592
593static void tegra_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach,
594					  struct sg_table *sgt,
595					  enum dma_data_direction dir)
596{
597	struct drm_gem_object *gem = attach->dmabuf->priv;
598	struct tegra_bo *bo = to_tegra_bo(gem);
599
600	if (bo->pages)
601		dma_unmap_sgtable(attach->dev, sgt, dir, 0);
602
603	sg_free_table(sgt);
604	kfree(sgt);
605}
606
607static void tegra_gem_prime_release(struct dma_buf *buf)
608{
609	drm_gem_dmabuf_release(buf);
610}
611
612static int tegra_gem_prime_begin_cpu_access(struct dma_buf *buf,
613					    enum dma_data_direction direction)
614{
615	struct drm_gem_object *gem = buf->priv;
616	struct tegra_bo *bo = to_tegra_bo(gem);
617	struct drm_device *drm = gem->dev;
618
619	if (bo->pages)
620		dma_sync_sgtable_for_cpu(drm->dev, bo->sgt, DMA_FROM_DEVICE);
621
622	return 0;
623}
624
625static int tegra_gem_prime_end_cpu_access(struct dma_buf *buf,
626					  enum dma_data_direction direction)
627{
628	struct drm_gem_object *gem = buf->priv;
629	struct tegra_bo *bo = to_tegra_bo(gem);
630	struct drm_device *drm = gem->dev;
631
632	if (bo->pages)
633		dma_sync_sgtable_for_device(drm->dev, bo->sgt, DMA_TO_DEVICE);
634
635	return 0;
636}
637
638static int tegra_gem_prime_mmap(struct dma_buf *buf, struct vm_area_struct *vma)
639{
640	struct drm_gem_object *gem = buf->priv;
641	int err;
642
643	err = drm_gem_mmap_obj(gem, gem->size, vma);
644	if (err < 0)
645		return err;
646
647	return __tegra_gem_mmap(gem, vma);
648}
649
650static int tegra_gem_prime_vmap(struct dma_buf *buf, struct dma_buf_map *map)
651{
652	struct drm_gem_object *gem = buf->priv;
653	struct tegra_bo *bo = to_tegra_bo(gem);
 
654
655	dma_buf_map_set_vaddr(map, bo->vaddr);
 
 
 
 
656
657	return 0;
658}
659
660static void tegra_gem_prime_vunmap(struct dma_buf *buf, struct dma_buf_map *map)
661{
 
 
 
 
662}
663
664static const struct dma_buf_ops tegra_gem_prime_dmabuf_ops = {
665	.map_dma_buf = tegra_gem_prime_map_dma_buf,
666	.unmap_dma_buf = tegra_gem_prime_unmap_dma_buf,
667	.release = tegra_gem_prime_release,
668	.begin_cpu_access = tegra_gem_prime_begin_cpu_access,
669	.end_cpu_access = tegra_gem_prime_end_cpu_access,
670	.mmap = tegra_gem_prime_mmap,
671	.vmap = tegra_gem_prime_vmap,
672	.vunmap = tegra_gem_prime_vunmap,
673};
674
675struct dma_buf *tegra_gem_prime_export(struct drm_gem_object *gem,
676				       int flags)
677{
678	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
679
680	exp_info.exp_name = KBUILD_MODNAME;
681	exp_info.owner = gem->dev->driver->fops->owner;
682	exp_info.ops = &tegra_gem_prime_dmabuf_ops;
683	exp_info.size = gem->size;
684	exp_info.flags = flags;
685	exp_info.priv = gem;
686
687	return drm_gem_dmabuf_export(gem->dev, &exp_info);
688}
689
690struct drm_gem_object *tegra_gem_prime_import(struct drm_device *drm,
691					      struct dma_buf *buf)
692{
693	struct tegra_bo *bo;
694
695	if (buf->ops == &tegra_gem_prime_dmabuf_ops) {
696		struct drm_gem_object *gem = buf->priv;
697
698		if (gem->dev == drm) {
699			drm_gem_object_get(gem);
700			return gem;
701		}
702	}
703
704	bo = tegra_bo_import(drm, buf);
705	if (IS_ERR(bo))
706		return ERR_CAST(bo);
707
708	return &bo->gem;
 
 
 
 
 
 
 
 
 
 
 
 
 
709}