Linux Audio

Check our new training course

Loading...
v5.9
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * NVIDIA Tegra DRM GEM helper functions
  4 *
  5 * Copyright (C) 2012 Sascha Hauer, Pengutronix
  6 * Copyright (C) 2013-2015 NVIDIA CORPORATION, All rights reserved.
  7 *
  8 * Based on the GEM/CMA helpers
  9 *
 10 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
 11 */
 12
 13#include <linux/dma-buf.h>
 14#include <linux/iommu.h>
 
 15
 16#include <drm/drm_drv.h>
 17#include <drm/drm_prime.h>
 18#include <drm/tegra_drm.h>
 19
 20#include "drm.h"
 21#include "gem.h"
 22
 23static void tegra_bo_put(struct host1x_bo *bo)
 
 
 24{
 25	struct tegra_bo *obj = host1x_to_tegra_bo(bo);
 
 
 
 
 
 
 
 
 
 
 
 
 
 26
 27	drm_gem_object_put(&obj->gem);
 28}
 29
 30/* XXX move this into lib/scatterlist.c? */
 31static int sg_alloc_table_from_sg(struct sg_table *sgt, struct scatterlist *sg,
 32				  unsigned int nents, gfp_t gfp_mask)
 33{
 34	struct scatterlist *dst;
 35	unsigned int i;
 36	int err;
 37
 38	err = sg_alloc_table(sgt, nents, gfp_mask);
 39	if (err < 0)
 40		return err;
 41
 42	dst = sgt->sgl;
 43
 44	for (i = 0; i < nents; i++) {
 45		sg_set_page(dst, sg_page(sg), sg->length, 0);
 46		dst = sg_next(dst);
 47		sg = sg_next(sg);
 48	}
 49
 50	return 0;
 51}
 52
 53static struct sg_table *tegra_bo_pin(struct device *dev, struct host1x_bo *bo,
 54				     dma_addr_t *phys)
 55{
 56	struct tegra_bo *obj = host1x_to_tegra_bo(bo);
 57	struct sg_table *sgt;
 
 58	int err;
 59
 
 
 
 
 
 
 
 
 
 60	/*
 61	 * If we've manually mapped the buffer object through the IOMMU, make
 62	 * sure to return the IOVA address of our mapping.
 63	 *
 64	 * Similarly, for buffers that have been allocated by the DMA API the
 65	 * physical address can be used for devices that are not attached to
 66	 * an IOMMU. For these devices, callers must pass a valid pointer via
 67	 * the @phys argument.
 68	 *
 69	 * Imported buffers were also already mapped at import time, so the
 70	 * existing mapping can be reused.
 71	 */
 72	if (phys) {
 73		*phys = obj->iova;
 74		return NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 75	}
 76
 77	/*
 78	 * If we don't have a mapping for this buffer yet, return an SG table
 79	 * so that host1x can do the mapping for us via the DMA API.
 80	 */
 81	sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
 82	if (!sgt)
 83		return ERR_PTR(-ENOMEM);
 
 
 84
 85	if (obj->pages) {
 86		/*
 87		 * If the buffer object was allocated from the explicit IOMMU
 88		 * API code paths, construct an SG table from the pages.
 89		 */
 90		err = sg_alloc_table_from_pages(sgt, obj->pages, obj->num_pages,
 91						0, obj->gem.size, GFP_KERNEL);
 92		if (err < 0)
 93			goto free;
 94	} else if (obj->sgt) {
 95		/*
 96		 * If the buffer object already has an SG table but no pages
 97		 * were allocated for it, it means the buffer was imported and
 98		 * the SG table needs to be copied to avoid overwriting any
 99		 * other potential users of the original SG table.
100		 */
101		err = sg_alloc_table_from_sg(sgt, obj->sgt->sgl, obj->sgt->nents,
102					     GFP_KERNEL);
103		if (err < 0)
104			goto free;
105	} else {
106		/*
107		 * If the buffer object had no pages allocated and if it was
108		 * not imported, it had to be allocated with the DMA API, so
109		 * the DMA API helper can be used.
110		 */
111		err = dma_get_sgtable(dev, sgt, obj->vaddr, obj->iova,
112				      obj->gem.size);
113		if (err < 0)
114			goto free;
115	}
116
117	return sgt;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
118
 
 
119free:
120	kfree(sgt);
 
121	return ERR_PTR(err);
122}
123
124static void tegra_bo_unpin(struct device *dev, struct sg_table *sgt)
125{
126	if (sgt) {
127		sg_free_table(sgt);
128		kfree(sgt);
 
 
 
 
 
129	}
 
 
 
130}
131
132static void *tegra_bo_mmap(struct host1x_bo *bo)
133{
134	struct tegra_bo *obj = host1x_to_tegra_bo(bo);
 
 
135
136	if (obj->vaddr)
137		return obj->vaddr;
138	else if (obj->gem.import_attach)
139		return dma_buf_vmap(obj->gem.import_attach->dmabuf);
140	else
 
141		return vmap(obj->pages, obj->num_pages, VM_MAP,
142			    pgprot_writecombine(PAGE_KERNEL));
 
143}
144
145static void tegra_bo_munmap(struct host1x_bo *bo, void *addr)
146{
147	struct tegra_bo *obj = host1x_to_tegra_bo(bo);
 
148
149	if (obj->vaddr)
150		return;
151	else if (obj->gem.import_attach)
152		dma_buf_vunmap(obj->gem.import_attach->dmabuf, addr);
153	else
154		vunmap(addr);
155}
156
157static struct host1x_bo *tegra_bo_get(struct host1x_bo *bo)
158{
159	struct tegra_bo *obj = host1x_to_tegra_bo(bo);
160
161	drm_gem_object_get(&obj->gem);
162
163	return bo;
164}
165
166static const struct host1x_bo_ops tegra_bo_ops = {
167	.get = tegra_bo_get,
168	.put = tegra_bo_put,
169	.pin = tegra_bo_pin,
170	.unpin = tegra_bo_unpin,
171	.mmap = tegra_bo_mmap,
172	.munmap = tegra_bo_munmap,
173};
174
175static int tegra_bo_iommu_map(struct tegra_drm *tegra, struct tegra_bo *bo)
176{
177	int prot = IOMMU_READ | IOMMU_WRITE;
178	int err;
179
180	if (bo->mm)
181		return -EBUSY;
182
183	bo->mm = kzalloc(sizeof(*bo->mm), GFP_KERNEL);
184	if (!bo->mm)
185		return -ENOMEM;
186
187	mutex_lock(&tegra->mm_lock);
188
189	err = drm_mm_insert_node_generic(&tegra->mm,
190					 bo->mm, bo->gem.size, PAGE_SIZE, 0, 0);
191	if (err < 0) {
192		dev_err(tegra->drm->dev, "out of I/O virtual memory: %d\n",
193			err);
194		goto unlock;
195	}
196
197	bo->iova = bo->mm->start;
198
199	bo->size = iommu_map_sg(tegra->domain, bo->iova, bo->sgt->sgl,
200				bo->sgt->nents, prot);
201	if (!bo->size) {
202		dev_err(tegra->drm->dev, "failed to map buffer\n");
203		err = -ENOMEM;
204		goto remove;
205	}
206
207	mutex_unlock(&tegra->mm_lock);
208
209	return 0;
210
211remove:
212	drm_mm_remove_node(bo->mm);
213unlock:
214	mutex_unlock(&tegra->mm_lock);
215	kfree(bo->mm);
216	return err;
217}
218
219static int tegra_bo_iommu_unmap(struct tegra_drm *tegra, struct tegra_bo *bo)
220{
221	if (!bo->mm)
222		return 0;
223
224	mutex_lock(&tegra->mm_lock);
225	iommu_unmap(tegra->domain, bo->iova, bo->size);
226	drm_mm_remove_node(bo->mm);
227	mutex_unlock(&tegra->mm_lock);
228
229	kfree(bo->mm);
230
231	return 0;
232}
233
 
 
 
 
 
 
234static struct tegra_bo *tegra_bo_alloc_object(struct drm_device *drm,
235					      size_t size)
236{
237	struct tegra_bo *bo;
238	int err;
239
240	bo = kzalloc(sizeof(*bo), GFP_KERNEL);
241	if (!bo)
242		return ERR_PTR(-ENOMEM);
243
 
 
244	host1x_bo_init(&bo->base, &tegra_bo_ops);
245	size = round_up(size, PAGE_SIZE);
246
247	err = drm_gem_object_init(drm, &bo->gem, size);
248	if (err < 0)
249		goto free;
250
251	err = drm_gem_create_mmap_offset(&bo->gem);
252	if (err < 0)
253		goto release;
254
255	return bo;
256
257release:
258	drm_gem_object_release(&bo->gem);
259free:
260	kfree(bo);
261	return ERR_PTR(err);
262}
263
264static void tegra_bo_free(struct drm_device *drm, struct tegra_bo *bo)
265{
266	if (bo->pages) {
267		dma_unmap_sg(drm->dev, bo->sgt->sgl, bo->sgt->nents,
268			     DMA_FROM_DEVICE);
269		drm_gem_put_pages(&bo->gem, bo->pages, true, true);
270		sg_free_table(bo->sgt);
271		kfree(bo->sgt);
272	} else if (bo->vaddr) {
273		dma_free_wc(drm->dev, bo->gem.size, bo->vaddr, bo->iova);
274	}
275}
276
277static int tegra_bo_get_pages(struct drm_device *drm, struct tegra_bo *bo)
278{
279	int err;
280
281	bo->pages = drm_gem_get_pages(&bo->gem);
282	if (IS_ERR(bo->pages))
283		return PTR_ERR(bo->pages);
284
285	bo->num_pages = bo->gem.size >> PAGE_SHIFT;
286
287	bo->sgt = drm_prime_pages_to_sg(bo->pages, bo->num_pages);
288	if (IS_ERR(bo->sgt)) {
289		err = PTR_ERR(bo->sgt);
290		goto put_pages;
291	}
292
293	err = dma_map_sg(drm->dev, bo->sgt->sgl, bo->sgt->nents,
294			 DMA_FROM_DEVICE);
295	if (err == 0) {
296		err = -EFAULT;
297		goto free_sgt;
298	}
299
300	return 0;
301
302free_sgt:
303	sg_free_table(bo->sgt);
304	kfree(bo->sgt);
305put_pages:
306	drm_gem_put_pages(&bo->gem, bo->pages, false, false);
307	return err;
308}
309
310static int tegra_bo_alloc(struct drm_device *drm, struct tegra_bo *bo)
311{
312	struct tegra_drm *tegra = drm->dev_private;
313	int err;
314
315	if (tegra->domain) {
316		err = tegra_bo_get_pages(drm, bo);
317		if (err < 0)
318			return err;
319
320		err = tegra_bo_iommu_map(tegra, bo);
321		if (err < 0) {
322			tegra_bo_free(drm, bo);
323			return err;
324		}
325	} else {
326		size_t size = bo->gem.size;
327
328		bo->vaddr = dma_alloc_wc(drm->dev, size, &bo->iova,
329					 GFP_KERNEL | __GFP_NOWARN);
330		if (!bo->vaddr) {
331			dev_err(drm->dev,
332				"failed to allocate buffer of size %zu\n",
333				size);
334			return -ENOMEM;
335		}
336	}
337
338	return 0;
339}
340
341struct tegra_bo *tegra_bo_create(struct drm_device *drm, size_t size,
342				 unsigned long flags)
343{
344	struct tegra_bo *bo;
345	int err;
346
347	bo = tegra_bo_alloc_object(drm, size);
348	if (IS_ERR(bo))
349		return bo;
350
351	err = tegra_bo_alloc(drm, bo);
352	if (err < 0)
353		goto release;
354
355	if (flags & DRM_TEGRA_GEM_CREATE_TILED)
356		bo->tiling.mode = TEGRA_BO_TILING_MODE_TILED;
357
358	if (flags & DRM_TEGRA_GEM_CREATE_BOTTOM_UP)
359		bo->flags |= TEGRA_BO_BOTTOM_UP;
360
361	return bo;
362
363release:
364	drm_gem_object_release(&bo->gem);
365	kfree(bo);
366	return ERR_PTR(err);
367}
368
369struct tegra_bo *tegra_bo_create_with_handle(struct drm_file *file,
370					     struct drm_device *drm,
371					     size_t size,
372					     unsigned long flags,
373					     u32 *handle)
374{
375	struct tegra_bo *bo;
376	int err;
377
378	bo = tegra_bo_create(drm, size, flags);
379	if (IS_ERR(bo))
380		return bo;
381
382	err = drm_gem_handle_create(file, &bo->gem, handle);
383	if (err) {
384		tegra_bo_free_object(&bo->gem);
385		return ERR_PTR(err);
386	}
387
388	drm_gem_object_put(&bo->gem);
389
390	return bo;
391}
392
393static struct tegra_bo *tegra_bo_import(struct drm_device *drm,
394					struct dma_buf *buf)
395{
396	struct tegra_drm *tegra = drm->dev_private;
397	struct dma_buf_attachment *attach;
398	struct tegra_bo *bo;
399	int err;
400
401	bo = tegra_bo_alloc_object(drm, buf->size);
402	if (IS_ERR(bo))
403		return bo;
404
405	attach = dma_buf_attach(buf, drm->dev);
406	if (IS_ERR(attach)) {
407		err = PTR_ERR(attach);
408		goto free;
409	}
410
411	get_dma_buf(buf);
412
413	bo->sgt = dma_buf_map_attachment(attach, DMA_TO_DEVICE);
414	if (IS_ERR(bo->sgt)) {
415		err = PTR_ERR(bo->sgt);
416		goto detach;
417	}
418
419	if (tegra->domain) {
420		err = tegra_bo_iommu_map(tegra, bo);
421		if (err < 0)
422			goto detach;
423	}
424
425	bo->gem.import_attach = attach;
426
427	return bo;
428
429detach:
430	if (!IS_ERR_OR_NULL(bo->sgt))
431		dma_buf_unmap_attachment(attach, bo->sgt, DMA_TO_DEVICE);
432
433	dma_buf_detach(buf, attach);
434	dma_buf_put(buf);
435free:
436	drm_gem_object_release(&bo->gem);
437	kfree(bo);
438	return ERR_PTR(err);
439}
440
441void tegra_bo_free_object(struct drm_gem_object *gem)
442{
443	struct tegra_drm *tegra = gem->dev->dev_private;
 
444	struct tegra_bo *bo = to_tegra_bo(gem);
445
 
 
 
 
 
 
 
 
 
446	if (tegra->domain)
447		tegra_bo_iommu_unmap(tegra, bo);
448
449	if (gem->import_attach) {
450		dma_buf_unmap_attachment(gem->import_attach, bo->sgt,
451					 DMA_TO_DEVICE);
452		drm_prime_gem_destroy(gem, NULL);
453	} else {
454		tegra_bo_free(gem->dev, bo);
455	}
456
457	drm_gem_object_release(gem);
458	kfree(bo);
459}
460
461int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm,
462			 struct drm_mode_create_dumb *args)
463{
464	unsigned int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
465	struct tegra_drm *tegra = drm->dev_private;
466	struct tegra_bo *bo;
467
468	args->pitch = round_up(min_pitch, tegra->pitch_align);
469	args->size = args->pitch * args->height;
470
471	bo = tegra_bo_create_with_handle(file, drm, args->size, 0,
472					 &args->handle);
473	if (IS_ERR(bo))
474		return PTR_ERR(bo);
475
476	return 0;
477}
478
479static vm_fault_t tegra_bo_fault(struct vm_fault *vmf)
480{
481	struct vm_area_struct *vma = vmf->vma;
482	struct drm_gem_object *gem = vma->vm_private_data;
483	struct tegra_bo *bo = to_tegra_bo(gem);
484	struct page *page;
485	pgoff_t offset;
486
487	if (!bo->pages)
488		return VM_FAULT_SIGBUS;
489
490	offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
491	page = bo->pages[offset];
492
493	return vmf_insert_page(vma, vmf->address, page);
494}
495
496const struct vm_operations_struct tegra_bo_vm_ops = {
497	.fault = tegra_bo_fault,
498	.open = drm_gem_vm_open,
499	.close = drm_gem_vm_close,
500};
501
502int __tegra_gem_mmap(struct drm_gem_object *gem, struct vm_area_struct *vma)
503{
504	struct tegra_bo *bo = to_tegra_bo(gem);
505
506	if (!bo->pages) {
507		unsigned long vm_pgoff = vma->vm_pgoff;
508		int err;
509
510		/*
511		 * Clear the VM_PFNMAP flag that was set by drm_gem_mmap(),
512		 * and set the vm_pgoff (used as a fake buffer offset by DRM)
513		 * to 0 as we want to map the whole buffer.
514		 */
515		vma->vm_flags &= ~VM_PFNMAP;
516		vma->vm_pgoff = 0;
517
518		err = dma_mmap_wc(gem->dev->dev, vma, bo->vaddr, bo->iova,
519				  gem->size);
520		if (err < 0) {
521			drm_gem_vm_close(vma);
522			return err;
523		}
524
525		vma->vm_pgoff = vm_pgoff;
526	} else {
527		pgprot_t prot = vm_get_page_prot(vma->vm_flags);
528
529		vma->vm_flags |= VM_MIXEDMAP;
530		vma->vm_flags &= ~VM_PFNMAP;
531
532		vma->vm_page_prot = pgprot_writecombine(prot);
533	}
534
535	return 0;
536}
537
538int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma)
539{
540	struct drm_gem_object *gem;
541	int err;
542
543	err = drm_gem_mmap(file, vma);
544	if (err < 0)
545		return err;
546
547	gem = vma->vm_private_data;
548
549	return __tegra_gem_mmap(gem, vma);
550}
551
552static struct sg_table *
553tegra_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
554			    enum dma_data_direction dir)
555{
556	struct drm_gem_object *gem = attach->dmabuf->priv;
557	struct tegra_bo *bo = to_tegra_bo(gem);
558	struct sg_table *sgt;
559
560	sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
561	if (!sgt)
562		return NULL;
563
564	if (bo->pages) {
565		if (sg_alloc_table_from_pages(sgt, bo->pages, bo->num_pages,
566					      0, gem->size, GFP_KERNEL) < 0)
567			goto free;
568	} else {
569		if (dma_get_sgtable(attach->dev, sgt, bo->vaddr, bo->iova,
570				    gem->size) < 0)
571			goto free;
572	}
573
574	if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0)
575		goto free;
576
577	return sgt;
578
579free:
580	sg_free_table(sgt);
581	kfree(sgt);
582	return NULL;
583}
584
585static void tegra_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach,
586					  struct sg_table *sgt,
587					  enum dma_data_direction dir)
588{
589	struct drm_gem_object *gem = attach->dmabuf->priv;
590	struct tegra_bo *bo = to_tegra_bo(gem);
591
592	if (bo->pages)
593		dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir);
594
595	sg_free_table(sgt);
596	kfree(sgt);
597}
598
599static void tegra_gem_prime_release(struct dma_buf *buf)
600{
601	drm_gem_dmabuf_release(buf);
602}
603
604static int tegra_gem_prime_begin_cpu_access(struct dma_buf *buf,
605					    enum dma_data_direction direction)
606{
607	struct drm_gem_object *gem = buf->priv;
608	struct tegra_bo *bo = to_tegra_bo(gem);
609	struct drm_device *drm = gem->dev;
610
611	if (bo->pages)
612		dma_sync_sg_for_cpu(drm->dev, bo->sgt->sgl, bo->sgt->nents,
613				    DMA_FROM_DEVICE);
614
615	return 0;
616}
617
618static int tegra_gem_prime_end_cpu_access(struct dma_buf *buf,
619					  enum dma_data_direction direction)
620{
621	struct drm_gem_object *gem = buf->priv;
622	struct tegra_bo *bo = to_tegra_bo(gem);
623	struct drm_device *drm = gem->dev;
624
625	if (bo->pages)
626		dma_sync_sg_for_device(drm->dev, bo->sgt->sgl, bo->sgt->nents,
627				       DMA_TO_DEVICE);
628
629	return 0;
630}
631
632static int tegra_gem_prime_mmap(struct dma_buf *buf, struct vm_area_struct *vma)
633{
634	struct drm_gem_object *gem = buf->priv;
635	int err;
636
 
 
637	err = drm_gem_mmap_obj(gem, gem->size, vma);
638	if (err < 0)
639		return err;
640
641	return __tegra_gem_mmap(gem, vma);
642}
643
644static void *tegra_gem_prime_vmap(struct dma_buf *buf)
645{
646	struct drm_gem_object *gem = buf->priv;
647	struct tegra_bo *bo = to_tegra_bo(gem);
 
 
 
 
 
 
 
648
649	return bo->vaddr;
650}
651
652static void tegra_gem_prime_vunmap(struct dma_buf *buf, void *vaddr)
653{
 
 
 
 
654}
655
656static const struct dma_buf_ops tegra_gem_prime_dmabuf_ops = {
657	.map_dma_buf = tegra_gem_prime_map_dma_buf,
658	.unmap_dma_buf = tegra_gem_prime_unmap_dma_buf,
659	.release = tegra_gem_prime_release,
660	.begin_cpu_access = tegra_gem_prime_begin_cpu_access,
661	.end_cpu_access = tegra_gem_prime_end_cpu_access,
662	.mmap = tegra_gem_prime_mmap,
663	.vmap = tegra_gem_prime_vmap,
664	.vunmap = tegra_gem_prime_vunmap,
665};
666
667struct dma_buf *tegra_gem_prime_export(struct drm_gem_object *gem,
668				       int flags)
669{
670	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
671
672	exp_info.exp_name = KBUILD_MODNAME;
673	exp_info.owner = gem->dev->driver->fops->owner;
674	exp_info.ops = &tegra_gem_prime_dmabuf_ops;
675	exp_info.size = gem->size;
676	exp_info.flags = flags;
677	exp_info.priv = gem;
678
679	return drm_gem_dmabuf_export(gem->dev, &exp_info);
680}
681
682struct drm_gem_object *tegra_gem_prime_import(struct drm_device *drm,
683					      struct dma_buf *buf)
684{
685	struct tegra_bo *bo;
686
687	if (buf->ops == &tegra_gem_prime_dmabuf_ops) {
688		struct drm_gem_object *gem = buf->priv;
689
690		if (gem->dev == drm) {
691			drm_gem_object_get(gem);
692			return gem;
693		}
694	}
695
696	bo = tegra_bo_import(drm, buf);
697	if (IS_ERR(bo))
698		return ERR_CAST(bo);
699
700	return &bo->gem;
 
 
 
 
 
 
 
 
 
 
 
 
 
701}
v6.2
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * NVIDIA Tegra DRM GEM helper functions
  4 *
  5 * Copyright (C) 2012 Sascha Hauer, Pengutronix
  6 * Copyright (C) 2013-2015 NVIDIA CORPORATION, All rights reserved.
  7 *
  8 * Based on the GEM/CMA helpers
  9 *
 10 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
 11 */
 12
 13#include <linux/dma-buf.h>
 14#include <linux/iommu.h>
 15#include <linux/module.h>
 16
 17#include <drm/drm_drv.h>
 18#include <drm/drm_prime.h>
 19#include <drm/tegra_drm.h>
 20
 21#include "drm.h"
 22#include "gem.h"
 23
 24MODULE_IMPORT_NS(DMA_BUF);
 25
 26static unsigned int sg_dma_count_chunks(struct scatterlist *sgl, unsigned int nents)
 27{
 28	dma_addr_t next = ~(dma_addr_t)0;
 29	unsigned int count = 0, i;
 30	struct scatterlist *s;
 31
 32	for_each_sg(sgl, s, nents, i) {
 33		/* sg_dma_address(s) is only valid for entries that have sg_dma_len(s) != 0. */
 34		if (!sg_dma_len(s))
 35			continue;
 36
 37		if (sg_dma_address(s) != next) {
 38			next = sg_dma_address(s) + sg_dma_len(s);
 39			count++;
 40		}
 41	}
 42
 43	return count;
 44}
 45
 46static inline unsigned int sgt_dma_count_chunks(struct sg_table *sgt)
 
 
 47{
 48	return sg_dma_count_chunks(sgt->sgl, sgt->nents);
 49}
 
 
 
 
 
 
 
 50
 51static void tegra_bo_put(struct host1x_bo *bo)
 52{
 53	struct tegra_bo *obj = host1x_to_tegra_bo(bo);
 
 
 54
 55	drm_gem_object_put(&obj->gem);
 56}
 57
 58static struct host1x_bo_mapping *tegra_bo_pin(struct device *dev, struct host1x_bo *bo,
 59					      enum dma_data_direction direction)
 60{
 61	struct tegra_bo *obj = host1x_to_tegra_bo(bo);
 62	struct drm_gem_object *gem = &obj->gem;
 63	struct host1x_bo_mapping *map;
 64	int err;
 65
 66	map = kzalloc(sizeof(*map), GFP_KERNEL);
 67	if (!map)
 68		return ERR_PTR(-ENOMEM);
 69
 70	kref_init(&map->ref);
 71	map->bo = host1x_bo_get(bo);
 72	map->direction = direction;
 73	map->dev = dev;
 74
 75	/*
 76	 * Imported buffers need special treatment to satisfy the semantics of DMA-BUF.
 
 
 
 
 
 
 
 
 
 77	 */
 78	if (gem->import_attach) {
 79		struct dma_buf *buf = gem->import_attach->dmabuf;
 80
 81		map->attach = dma_buf_attach(buf, dev);
 82		if (IS_ERR(map->attach)) {
 83			err = PTR_ERR(map->attach);
 84			goto free;
 85		}
 86
 87		map->sgt = dma_buf_map_attachment_unlocked(map->attach, direction);
 88		if (IS_ERR(map->sgt)) {
 89			dma_buf_detach(buf, map->attach);
 90			err = PTR_ERR(map->sgt);
 91			map->sgt = NULL;
 92			goto free;
 93		}
 94
 95		err = sgt_dma_count_chunks(map->sgt);
 96		map->size = gem->size;
 97
 98		goto out;
 99	}
100
101	/*
102	 * If we don't have a mapping for this buffer yet, return an SG table
103	 * so that host1x can do the mapping for us via the DMA API.
104	 */
105	map->sgt = kzalloc(sizeof(*map->sgt), GFP_KERNEL);
106	if (!map->sgt) {
107		err = -ENOMEM;
108		goto free;
109	}
110
111	if (obj->pages) {
112		/*
113		 * If the buffer object was allocated from the explicit IOMMU
114		 * API code paths, construct an SG table from the pages.
115		 */
116		err = sg_alloc_table_from_pages(map->sgt, obj->pages, obj->num_pages, 0, gem->size,
117						GFP_KERNEL);
 
 
 
 
 
 
 
 
 
 
 
118		if (err < 0)
119			goto free;
120	} else {
121		/*
122		 * If the buffer object had no pages allocated and if it was
123		 * not imported, it had to be allocated with the DMA API, so
124		 * the DMA API helper can be used.
125		 */
126		err = dma_get_sgtable(dev, map->sgt, obj->vaddr, obj->iova, gem->size);
 
127		if (err < 0)
128			goto free;
129	}
130
131	err = dma_map_sgtable(dev, map->sgt, direction, 0);
132	if (err)
133		goto free_sgt;
134
135out:
136	/*
137	 * If we've manually mapped the buffer object through the IOMMU, make sure to return the
138	 * existing IOVA address of our mapping.
139	 */
140	if (!obj->mm) {
141		map->phys = sg_dma_address(map->sgt->sgl);
142		map->chunks = err;
143	} else {
144		map->phys = obj->iova;
145		map->chunks = 1;
146	}
147
148	map->size = gem->size;
149
150	return map;
151
152free_sgt:
153	sg_free_table(map->sgt);
154free:
155	kfree(map->sgt);
156	kfree(map);
157	return ERR_PTR(err);
158}
159
160static void tegra_bo_unpin(struct host1x_bo_mapping *map)
161{
162	if (map->attach) {
163		dma_buf_unmap_attachment_unlocked(map->attach, map->sgt,
164						  map->direction);
165		dma_buf_detach(map->attach->dmabuf, map->attach);
166	} else {
167		dma_unmap_sgtable(map->dev, map->sgt, map->direction, 0);
168		sg_free_table(map->sgt);
169		kfree(map->sgt);
170	}
171
172	host1x_bo_put(map->bo);
173	kfree(map);
174}
175
176static void *tegra_bo_mmap(struct host1x_bo *bo)
177{
178	struct tegra_bo *obj = host1x_to_tegra_bo(bo);
179	struct iosys_map map;
180	int ret;
181
182	if (obj->vaddr) {
183		return obj->vaddr;
184	} else if (obj->gem.import_attach) {
185		ret = dma_buf_vmap_unlocked(obj->gem.import_attach->dmabuf, &map);
186		return ret ? NULL : map.vaddr;
187	} else {
188		return vmap(obj->pages, obj->num_pages, VM_MAP,
189			    pgprot_writecombine(PAGE_KERNEL));
190	}
191}
192
193static void tegra_bo_munmap(struct host1x_bo *bo, void *addr)
194{
195	struct tegra_bo *obj = host1x_to_tegra_bo(bo);
196	struct iosys_map map = IOSYS_MAP_INIT_VADDR(addr);
197
198	if (obj->vaddr)
199		return;
200	else if (obj->gem.import_attach)
201		dma_buf_vunmap_unlocked(obj->gem.import_attach->dmabuf, &map);
202	else
203		vunmap(addr);
204}
205
206static struct host1x_bo *tegra_bo_get(struct host1x_bo *bo)
207{
208	struct tegra_bo *obj = host1x_to_tegra_bo(bo);
209
210	drm_gem_object_get(&obj->gem);
211
212	return bo;
213}
214
215static const struct host1x_bo_ops tegra_bo_ops = {
216	.get = tegra_bo_get,
217	.put = tegra_bo_put,
218	.pin = tegra_bo_pin,
219	.unpin = tegra_bo_unpin,
220	.mmap = tegra_bo_mmap,
221	.munmap = tegra_bo_munmap,
222};
223
224static int tegra_bo_iommu_map(struct tegra_drm *tegra, struct tegra_bo *bo)
225{
226	int prot = IOMMU_READ | IOMMU_WRITE;
227	int err;
228
229	if (bo->mm)
230		return -EBUSY;
231
232	bo->mm = kzalloc(sizeof(*bo->mm), GFP_KERNEL);
233	if (!bo->mm)
234		return -ENOMEM;
235
236	mutex_lock(&tegra->mm_lock);
237
238	err = drm_mm_insert_node_generic(&tegra->mm,
239					 bo->mm, bo->gem.size, PAGE_SIZE, 0, 0);
240	if (err < 0) {
241		dev_err(tegra->drm->dev, "out of I/O virtual memory: %d\n",
242			err);
243		goto unlock;
244	}
245
246	bo->iova = bo->mm->start;
247
248	bo->size = iommu_map_sgtable(tegra->domain, bo->iova, bo->sgt, prot);
 
249	if (!bo->size) {
250		dev_err(tegra->drm->dev, "failed to map buffer\n");
251		err = -ENOMEM;
252		goto remove;
253	}
254
255	mutex_unlock(&tegra->mm_lock);
256
257	return 0;
258
259remove:
260	drm_mm_remove_node(bo->mm);
261unlock:
262	mutex_unlock(&tegra->mm_lock);
263	kfree(bo->mm);
264	return err;
265}
266
267static int tegra_bo_iommu_unmap(struct tegra_drm *tegra, struct tegra_bo *bo)
268{
269	if (!bo->mm)
270		return 0;
271
272	mutex_lock(&tegra->mm_lock);
273	iommu_unmap(tegra->domain, bo->iova, bo->size);
274	drm_mm_remove_node(bo->mm);
275	mutex_unlock(&tegra->mm_lock);
276
277	kfree(bo->mm);
278
279	return 0;
280}
281
282static const struct drm_gem_object_funcs tegra_gem_object_funcs = {
283	.free = tegra_bo_free_object,
284	.export = tegra_gem_prime_export,
285	.vm_ops = &tegra_bo_vm_ops,
286};
287
288static struct tegra_bo *tegra_bo_alloc_object(struct drm_device *drm,
289					      size_t size)
290{
291	struct tegra_bo *bo;
292	int err;
293
294	bo = kzalloc(sizeof(*bo), GFP_KERNEL);
295	if (!bo)
296		return ERR_PTR(-ENOMEM);
297
298	bo->gem.funcs = &tegra_gem_object_funcs;
299
300	host1x_bo_init(&bo->base, &tegra_bo_ops);
301	size = round_up(size, PAGE_SIZE);
302
303	err = drm_gem_object_init(drm, &bo->gem, size);
304	if (err < 0)
305		goto free;
306
307	err = drm_gem_create_mmap_offset(&bo->gem);
308	if (err < 0)
309		goto release;
310
311	return bo;
312
313release:
314	drm_gem_object_release(&bo->gem);
315free:
316	kfree(bo);
317	return ERR_PTR(err);
318}
319
320static void tegra_bo_free(struct drm_device *drm, struct tegra_bo *bo)
321{
322	if (bo->pages) {
323		dma_unmap_sgtable(drm->dev, bo->sgt, DMA_FROM_DEVICE, 0);
 
324		drm_gem_put_pages(&bo->gem, bo->pages, true, true);
325		sg_free_table(bo->sgt);
326		kfree(bo->sgt);
327	} else if (bo->vaddr) {
328		dma_free_wc(drm->dev, bo->gem.size, bo->vaddr, bo->iova);
329	}
330}
331
332static int tegra_bo_get_pages(struct drm_device *drm, struct tegra_bo *bo)
333{
334	int err;
335
336	bo->pages = drm_gem_get_pages(&bo->gem);
337	if (IS_ERR(bo->pages))
338		return PTR_ERR(bo->pages);
339
340	bo->num_pages = bo->gem.size >> PAGE_SHIFT;
341
342	bo->sgt = drm_prime_pages_to_sg(bo->gem.dev, bo->pages, bo->num_pages);
343	if (IS_ERR(bo->sgt)) {
344		err = PTR_ERR(bo->sgt);
345		goto put_pages;
346	}
347
348	err = dma_map_sgtable(drm->dev, bo->sgt, DMA_FROM_DEVICE, 0);
349	if (err)
 
 
350		goto free_sgt;
 
351
352	return 0;
353
354free_sgt:
355	sg_free_table(bo->sgt);
356	kfree(bo->sgt);
357put_pages:
358	drm_gem_put_pages(&bo->gem, bo->pages, false, false);
359	return err;
360}
361
362static int tegra_bo_alloc(struct drm_device *drm, struct tegra_bo *bo)
363{
364	struct tegra_drm *tegra = drm->dev_private;
365	int err;
366
367	if (tegra->domain) {
368		err = tegra_bo_get_pages(drm, bo);
369		if (err < 0)
370			return err;
371
372		err = tegra_bo_iommu_map(tegra, bo);
373		if (err < 0) {
374			tegra_bo_free(drm, bo);
375			return err;
376		}
377	} else {
378		size_t size = bo->gem.size;
379
380		bo->vaddr = dma_alloc_wc(drm->dev, size, &bo->iova,
381					 GFP_KERNEL | __GFP_NOWARN);
382		if (!bo->vaddr) {
383			dev_err(drm->dev,
384				"failed to allocate buffer of size %zu\n",
385				size);
386			return -ENOMEM;
387		}
388	}
389
390	return 0;
391}
392
393struct tegra_bo *tegra_bo_create(struct drm_device *drm, size_t size,
394				 unsigned long flags)
395{
396	struct tegra_bo *bo;
397	int err;
398
399	bo = tegra_bo_alloc_object(drm, size);
400	if (IS_ERR(bo))
401		return bo;
402
403	err = tegra_bo_alloc(drm, bo);
404	if (err < 0)
405		goto release;
406
407	if (flags & DRM_TEGRA_GEM_CREATE_TILED)
408		bo->tiling.mode = TEGRA_BO_TILING_MODE_TILED;
409
410	if (flags & DRM_TEGRA_GEM_CREATE_BOTTOM_UP)
411		bo->flags |= TEGRA_BO_BOTTOM_UP;
412
413	return bo;
414
415release:
416	drm_gem_object_release(&bo->gem);
417	kfree(bo);
418	return ERR_PTR(err);
419}
420
421struct tegra_bo *tegra_bo_create_with_handle(struct drm_file *file,
422					     struct drm_device *drm,
423					     size_t size,
424					     unsigned long flags,
425					     u32 *handle)
426{
427	struct tegra_bo *bo;
428	int err;
429
430	bo = tegra_bo_create(drm, size, flags);
431	if (IS_ERR(bo))
432		return bo;
433
434	err = drm_gem_handle_create(file, &bo->gem, handle);
435	if (err) {
436		tegra_bo_free_object(&bo->gem);
437		return ERR_PTR(err);
438	}
439
440	drm_gem_object_put(&bo->gem);
441
442	return bo;
443}
444
445static struct tegra_bo *tegra_bo_import(struct drm_device *drm,
446					struct dma_buf *buf)
447{
448	struct tegra_drm *tegra = drm->dev_private;
449	struct dma_buf_attachment *attach;
450	struct tegra_bo *bo;
451	int err;
452
453	bo = tegra_bo_alloc_object(drm, buf->size);
454	if (IS_ERR(bo))
455		return bo;
456
457	attach = dma_buf_attach(buf, drm->dev);
458	if (IS_ERR(attach)) {
459		err = PTR_ERR(attach);
460		goto free;
461	}
462
463	get_dma_buf(buf);
464
465	bo->sgt = dma_buf_map_attachment_unlocked(attach, DMA_TO_DEVICE);
466	if (IS_ERR(bo->sgt)) {
467		err = PTR_ERR(bo->sgt);
468		goto detach;
469	}
470
471	if (tegra->domain) {
472		err = tegra_bo_iommu_map(tegra, bo);
473		if (err < 0)
474			goto detach;
475	}
476
477	bo->gem.import_attach = attach;
478
479	return bo;
480
481detach:
482	if (!IS_ERR_OR_NULL(bo->sgt))
483		dma_buf_unmap_attachment_unlocked(attach, bo->sgt, DMA_TO_DEVICE);
484
485	dma_buf_detach(buf, attach);
486	dma_buf_put(buf);
487free:
488	drm_gem_object_release(&bo->gem);
489	kfree(bo);
490	return ERR_PTR(err);
491}
492
493void tegra_bo_free_object(struct drm_gem_object *gem)
494{
495	struct tegra_drm *tegra = gem->dev->dev_private;
496	struct host1x_bo_mapping *mapping, *tmp;
497	struct tegra_bo *bo = to_tegra_bo(gem);
498
499	/* remove all mappings of this buffer object from any caches */
500	list_for_each_entry_safe(mapping, tmp, &bo->base.mappings, list) {
501		if (mapping->cache)
502			host1x_bo_unpin(mapping);
503		else
504			dev_err(gem->dev->dev, "mapping %p stale for device %s\n", mapping,
505				dev_name(mapping->dev));
506	}
507
508	if (tegra->domain)
509		tegra_bo_iommu_unmap(tegra, bo);
510
511	if (gem->import_attach) {
512		dma_buf_unmap_attachment_unlocked(gem->import_attach, bo->sgt,
513						  DMA_TO_DEVICE);
514		drm_prime_gem_destroy(gem, NULL);
515	} else {
516		tegra_bo_free(gem->dev, bo);
517	}
518
519	drm_gem_object_release(gem);
520	kfree(bo);
521}
522
523int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm,
524			 struct drm_mode_create_dumb *args)
525{
526	unsigned int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
527	struct tegra_drm *tegra = drm->dev_private;
528	struct tegra_bo *bo;
529
530	args->pitch = round_up(min_pitch, tegra->pitch_align);
531	args->size = args->pitch * args->height;
532
533	bo = tegra_bo_create_with_handle(file, drm, args->size, 0,
534					 &args->handle);
535	if (IS_ERR(bo))
536		return PTR_ERR(bo);
537
538	return 0;
539}
540
541static vm_fault_t tegra_bo_fault(struct vm_fault *vmf)
542{
543	struct vm_area_struct *vma = vmf->vma;
544	struct drm_gem_object *gem = vma->vm_private_data;
545	struct tegra_bo *bo = to_tegra_bo(gem);
546	struct page *page;
547	pgoff_t offset;
548
549	if (!bo->pages)
550		return VM_FAULT_SIGBUS;
551
552	offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
553	page = bo->pages[offset];
554
555	return vmf_insert_page(vma, vmf->address, page);
556}
557
558const struct vm_operations_struct tegra_bo_vm_ops = {
559	.fault = tegra_bo_fault,
560	.open = drm_gem_vm_open,
561	.close = drm_gem_vm_close,
562};
563
564int __tegra_gem_mmap(struct drm_gem_object *gem, struct vm_area_struct *vma)
565{
566	struct tegra_bo *bo = to_tegra_bo(gem);
567
568	if (!bo->pages) {
569		unsigned long vm_pgoff = vma->vm_pgoff;
570		int err;
571
572		/*
573		 * Clear the VM_PFNMAP flag that was set by drm_gem_mmap(),
574		 * and set the vm_pgoff (used as a fake buffer offset by DRM)
575		 * to 0 as we want to map the whole buffer.
576		 */
577		vma->vm_flags &= ~VM_PFNMAP;
578		vma->vm_pgoff = 0;
579
580		err = dma_mmap_wc(gem->dev->dev, vma, bo->vaddr, bo->iova,
581				  gem->size);
582		if (err < 0) {
583			drm_gem_vm_close(vma);
584			return err;
585		}
586
587		vma->vm_pgoff = vm_pgoff;
588	} else {
589		pgprot_t prot = vm_get_page_prot(vma->vm_flags);
590
591		vma->vm_flags |= VM_MIXEDMAP;
592		vma->vm_flags &= ~VM_PFNMAP;
593
594		vma->vm_page_prot = pgprot_writecombine(prot);
595	}
596
597	return 0;
598}
599
600int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma)
601{
602	struct drm_gem_object *gem;
603	int err;
604
605	err = drm_gem_mmap(file, vma);
606	if (err < 0)
607		return err;
608
609	gem = vma->vm_private_data;
610
611	return __tegra_gem_mmap(gem, vma);
612}
613
614static struct sg_table *
615tegra_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
616			    enum dma_data_direction dir)
617{
618	struct drm_gem_object *gem = attach->dmabuf->priv;
619	struct tegra_bo *bo = to_tegra_bo(gem);
620	struct sg_table *sgt;
621
622	sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
623	if (!sgt)
624		return NULL;
625
626	if (bo->pages) {
627		if (sg_alloc_table_from_pages(sgt, bo->pages, bo->num_pages,
628					      0, gem->size, GFP_KERNEL) < 0)
629			goto free;
630	} else {
631		if (dma_get_sgtable(attach->dev, sgt, bo->vaddr, bo->iova,
632				    gem->size) < 0)
633			goto free;
634	}
635
636	if (dma_map_sgtable(attach->dev, sgt, dir, 0))
637		goto free;
638
639	return sgt;
640
641free:
642	sg_free_table(sgt);
643	kfree(sgt);
644	return NULL;
645}
646
647static void tegra_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach,
648					  struct sg_table *sgt,
649					  enum dma_data_direction dir)
650{
651	struct drm_gem_object *gem = attach->dmabuf->priv;
652	struct tegra_bo *bo = to_tegra_bo(gem);
653
654	if (bo->pages)
655		dma_unmap_sgtable(attach->dev, sgt, dir, 0);
656
657	sg_free_table(sgt);
658	kfree(sgt);
659}
660
661static void tegra_gem_prime_release(struct dma_buf *buf)
662{
663	drm_gem_dmabuf_release(buf);
664}
665
666static int tegra_gem_prime_begin_cpu_access(struct dma_buf *buf,
667					    enum dma_data_direction direction)
668{
669	struct drm_gem_object *gem = buf->priv;
670	struct tegra_bo *bo = to_tegra_bo(gem);
671	struct drm_device *drm = gem->dev;
672
673	if (bo->pages)
674		dma_sync_sgtable_for_cpu(drm->dev, bo->sgt, DMA_FROM_DEVICE);
 
675
676	return 0;
677}
678
679static int tegra_gem_prime_end_cpu_access(struct dma_buf *buf,
680					  enum dma_data_direction direction)
681{
682	struct drm_gem_object *gem = buf->priv;
683	struct tegra_bo *bo = to_tegra_bo(gem);
684	struct drm_device *drm = gem->dev;
685
686	if (bo->pages)
687		dma_sync_sgtable_for_device(drm->dev, bo->sgt, DMA_TO_DEVICE);
 
688
689	return 0;
690}
691
692static int tegra_gem_prime_mmap(struct dma_buf *buf, struct vm_area_struct *vma)
693{
694	struct drm_gem_object *gem = buf->priv;
695	int err;
696
697	dma_resv_assert_held(buf->resv);
698
699	err = drm_gem_mmap_obj(gem, gem->size, vma);
700	if (err < 0)
701		return err;
702
703	return __tegra_gem_mmap(gem, vma);
704}
705
706static int tegra_gem_prime_vmap(struct dma_buf *buf, struct iosys_map *map)
707{
708	struct drm_gem_object *gem = buf->priv;
709	struct tegra_bo *bo = to_tegra_bo(gem);
710	void *vaddr;
711
712	vaddr = tegra_bo_mmap(&bo->base);
713	if (IS_ERR(vaddr))
714		return PTR_ERR(vaddr);
715
716	iosys_map_set_vaddr(map, vaddr);
717
718	return 0;
719}
720
721static void tegra_gem_prime_vunmap(struct dma_buf *buf, struct iosys_map *map)
722{
723	struct drm_gem_object *gem = buf->priv;
724	struct tegra_bo *bo = to_tegra_bo(gem);
725
726	tegra_bo_munmap(&bo->base, map->vaddr);
727}
728
729static const struct dma_buf_ops tegra_gem_prime_dmabuf_ops = {
730	.map_dma_buf = tegra_gem_prime_map_dma_buf,
731	.unmap_dma_buf = tegra_gem_prime_unmap_dma_buf,
732	.release = tegra_gem_prime_release,
733	.begin_cpu_access = tegra_gem_prime_begin_cpu_access,
734	.end_cpu_access = tegra_gem_prime_end_cpu_access,
735	.mmap = tegra_gem_prime_mmap,
736	.vmap = tegra_gem_prime_vmap,
737	.vunmap = tegra_gem_prime_vunmap,
738};
739
740struct dma_buf *tegra_gem_prime_export(struct drm_gem_object *gem,
741				       int flags)
742{
743	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
744
745	exp_info.exp_name = KBUILD_MODNAME;
746	exp_info.owner = gem->dev->driver->fops->owner;
747	exp_info.ops = &tegra_gem_prime_dmabuf_ops;
748	exp_info.size = gem->size;
749	exp_info.flags = flags;
750	exp_info.priv = gem;
751
752	return drm_gem_dmabuf_export(gem->dev, &exp_info);
753}
754
755struct drm_gem_object *tegra_gem_prime_import(struct drm_device *drm,
756					      struct dma_buf *buf)
757{
758	struct tegra_bo *bo;
759
760	if (buf->ops == &tegra_gem_prime_dmabuf_ops) {
761		struct drm_gem_object *gem = buf->priv;
762
763		if (gem->dev == drm) {
764			drm_gem_object_get(gem);
765			return gem;
766		}
767	}
768
769	bo = tegra_bo_import(drm, buf);
770	if (IS_ERR(bo))
771		return ERR_CAST(bo);
772
773	return &bo->gem;
774}
775
776struct host1x_bo *tegra_gem_lookup(struct drm_file *file, u32 handle)
777{
778	struct drm_gem_object *gem;
779	struct tegra_bo *bo;
780
781	gem = drm_gem_object_lookup(file, handle);
782	if (!gem)
783		return NULL;
784
785	bo = to_tegra_bo(gem);
786	return &bo->base;
787}