Linux Audio

Check our new training course

Loading...
v4.6
 
  1/*
  2 * NVIDIA Tegra DRM GEM helper functions
  3 *
  4 * Copyright (C) 2012 Sascha Hauer, Pengutronix
  5 * Copyright (C) 2013 NVIDIA CORPORATION, All rights reserved.
  6 *
  7 * Based on the GEM/CMA helpers
  8 *
  9 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
 10 *
 11 * This program is free software; you can redistribute it and/or modify
 12 * it under the terms of the GNU General Public License version 2 as
 13 * published by the Free Software Foundation.
 14 */
 15
 16#include <linux/dma-buf.h>
 17#include <linux/iommu.h>
 
 
 
 
 
 18#include <drm/tegra_drm.h>
 19
 20#include "drm.h"
 21#include "gem.h"
 22
 23static inline struct tegra_bo *host1x_to_tegra_bo(struct host1x_bo *bo)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 24{
 25	return container_of(bo, struct tegra_bo, base);
 26}
 27
 28static void tegra_bo_put(struct host1x_bo *bo)
 29{
 30	struct tegra_bo *obj = host1x_to_tegra_bo(bo);
 31
 32	drm_gem_object_unreference_unlocked(&obj->gem);
 33}
 34
 35static dma_addr_t tegra_bo_pin(struct host1x_bo *bo, struct sg_table **sgt)
 
 36{
 37	struct tegra_bo *obj = host1x_to_tegra_bo(bo);
 
 
 
 38
 39	return obj->paddr;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 40}
 41
 42static void tegra_bo_unpin(struct host1x_bo *bo, struct sg_table *sgt)
 43{
 
 
 
 
 
 
 
 
 
 
 
 
 44}
 45
 46static void *tegra_bo_mmap(struct host1x_bo *bo)
 47{
 48	struct tegra_bo *obj = host1x_to_tegra_bo(bo);
 
 
 
 49
 50	return obj->vaddr;
 51}
 52
 53static void tegra_bo_munmap(struct host1x_bo *bo, void *addr)
 54{
 
 
 
 
 
 
 
 
 
 
 
 
 55}
 56
 57static void *tegra_bo_kmap(struct host1x_bo *bo, unsigned int page)
 58{
 59	struct tegra_bo *obj = host1x_to_tegra_bo(bo);
 
 60
 61	return obj->vaddr + page * PAGE_SIZE;
 62}
 63
 64static void tegra_bo_kunmap(struct host1x_bo *bo, unsigned int page,
 65			    void *addr)
 66{
 
 67}
 68
 69static struct host1x_bo *tegra_bo_get(struct host1x_bo *bo)
 70{
 71	struct tegra_bo *obj = host1x_to_tegra_bo(bo);
 72
 73	drm_gem_object_reference(&obj->gem);
 74
 75	return bo;
 76}
 77
 78static const struct host1x_bo_ops tegra_bo_ops = {
 79	.get = tegra_bo_get,
 80	.put = tegra_bo_put,
 81	.pin = tegra_bo_pin,
 82	.unpin = tegra_bo_unpin,
 83	.mmap = tegra_bo_mmap,
 84	.munmap = tegra_bo_munmap,
 85	.kmap = tegra_bo_kmap,
 86	.kunmap = tegra_bo_kunmap,
 87};
 88
 89static int tegra_bo_iommu_map(struct tegra_drm *tegra, struct tegra_bo *bo)
 90{
 91	int prot = IOMMU_READ | IOMMU_WRITE;
 92	ssize_t err;
 93
 94	if (bo->mm)
 95		return -EBUSY;
 96
 97	bo->mm = kzalloc(sizeof(*bo->mm), GFP_KERNEL);
 98	if (!bo->mm)
 99		return -ENOMEM;
100
101	err = drm_mm_insert_node_generic(&tegra->mm, bo->mm, bo->gem.size,
102					 PAGE_SIZE, 0, 0, 0);
 
 
103	if (err < 0) {
104		dev_err(tegra->drm->dev, "out of I/O virtual memory: %zd\n",
105			err);
106		goto free;
107	}
108
109	bo->paddr = bo->mm->start;
110
111	err = iommu_map_sg(tegra->domain, bo->paddr, bo->sgt->sgl,
112			   bo->sgt->nents, prot);
113	if (err < 0) {
114		dev_err(tegra->drm->dev, "failed to map buffer: %zd\n", err);
115		goto remove;
116	}
117
118	bo->size = err;
119
120	return 0;
121
122remove:
123	drm_mm_remove_node(bo->mm);
124free:
 
125	kfree(bo->mm);
126	return err;
127}
128
129static int tegra_bo_iommu_unmap(struct tegra_drm *tegra, struct tegra_bo *bo)
130{
131	if (!bo->mm)
132		return 0;
133
134	iommu_unmap(tegra->domain, bo->paddr, bo->size);
 
135	drm_mm_remove_node(bo->mm);
 
 
136	kfree(bo->mm);
137
138	return 0;
139}
140
 
 
 
 
 
 
141static struct tegra_bo *tegra_bo_alloc_object(struct drm_device *drm,
142					      size_t size)
143{
144	struct tegra_bo *bo;
145	int err;
146
147	bo = kzalloc(sizeof(*bo), GFP_KERNEL);
148	if (!bo)
149		return ERR_PTR(-ENOMEM);
150
 
 
151	host1x_bo_init(&bo->base, &tegra_bo_ops);
152	size = round_up(size, PAGE_SIZE);
153
154	err = drm_gem_object_init(drm, &bo->gem, size);
155	if (err < 0)
156		goto free;
157
158	err = drm_gem_create_mmap_offset(&bo->gem);
159	if (err < 0)
160		goto release;
161
162	return bo;
163
164release:
165	drm_gem_object_release(&bo->gem);
166free:
167	kfree(bo);
168	return ERR_PTR(err);
169}
170
171static void tegra_bo_free(struct drm_device *drm, struct tegra_bo *bo)
172{
173	if (bo->pages) {
 
174		drm_gem_put_pages(&bo->gem, bo->pages, true, true);
175		sg_free_table(bo->sgt);
176		kfree(bo->sgt);
177	} else if (bo->vaddr) {
178		dma_free_wc(drm->dev, bo->gem.size, bo->vaddr, bo->paddr);
179	}
180}
181
182static int tegra_bo_get_pages(struct drm_device *drm, struct tegra_bo *bo)
183{
184	struct scatterlist *s;
185	unsigned int i;
186
187	bo->pages = drm_gem_get_pages(&bo->gem);
188	if (IS_ERR(bo->pages))
189		return PTR_ERR(bo->pages);
190
191	bo->num_pages = bo->gem.size >> PAGE_SHIFT;
192
193	bo->sgt = drm_prime_pages_to_sg(bo->pages, bo->num_pages);
194	if (IS_ERR(bo->sgt))
 
195		goto put_pages;
 
196
197	/*
198	 * Fake up the SG table so that dma_sync_sg_for_device() can be used
199	 * to flush the pages associated with it.
200	 *
201	 * TODO: Replace this by drm_clflash_sg() once it can be implemented
202	 * without relying on symbols that are not exported.
203	 */
204	for_each_sg(bo->sgt->sgl, s, bo->sgt->nents, i)
205		sg_dma_address(s) = sg_phys(s);
206
207	dma_sync_sg_for_device(drm->dev, bo->sgt->sgl, bo->sgt->nents,
208			       DMA_TO_DEVICE);
209
210	return 0;
211
 
 
 
212put_pages:
213	drm_gem_put_pages(&bo->gem, bo->pages, false, false);
214	return PTR_ERR(bo->sgt);
215}
216
217static int tegra_bo_alloc(struct drm_device *drm, struct tegra_bo *bo)
218{
219	struct tegra_drm *tegra = drm->dev_private;
220	int err;
221
222	if (tegra->domain) {
223		err = tegra_bo_get_pages(drm, bo);
224		if (err < 0)
225			return err;
226
227		err = tegra_bo_iommu_map(tegra, bo);
228		if (err < 0) {
229			tegra_bo_free(drm, bo);
230			return err;
231		}
232	} else {
233		size_t size = bo->gem.size;
234
235		bo->vaddr = dma_alloc_wc(drm->dev, size, &bo->paddr,
236					 GFP_KERNEL | __GFP_NOWARN);
237		if (!bo->vaddr) {
238			dev_err(drm->dev,
239				"failed to allocate buffer of size %zu\n",
240				size);
241			return -ENOMEM;
242		}
243	}
244
245	return 0;
246}
247
248struct tegra_bo *tegra_bo_create(struct drm_device *drm, size_t size,
249				 unsigned long flags)
250{
251	struct tegra_bo *bo;
252	int err;
253
254	bo = tegra_bo_alloc_object(drm, size);
255	if (IS_ERR(bo))
256		return bo;
257
258	err = tegra_bo_alloc(drm, bo);
259	if (err < 0)
260		goto release;
261
262	if (flags & DRM_TEGRA_GEM_CREATE_TILED)
263		bo->tiling.mode = TEGRA_BO_TILING_MODE_TILED;
264
265	if (flags & DRM_TEGRA_GEM_CREATE_BOTTOM_UP)
266		bo->flags |= TEGRA_BO_BOTTOM_UP;
267
268	return bo;
269
270release:
271	drm_gem_object_release(&bo->gem);
272	kfree(bo);
273	return ERR_PTR(err);
274}
275
276struct tegra_bo *tegra_bo_create_with_handle(struct drm_file *file,
277					     struct drm_device *drm,
278					     size_t size,
279					     unsigned long flags,
280					     u32 *handle)
281{
282	struct tegra_bo *bo;
283	int err;
284
285	bo = tegra_bo_create(drm, size, flags);
286	if (IS_ERR(bo))
287		return bo;
288
289	err = drm_gem_handle_create(file, &bo->gem, handle);
290	if (err) {
291		tegra_bo_free_object(&bo->gem);
292		return ERR_PTR(err);
293	}
294
295	drm_gem_object_unreference_unlocked(&bo->gem);
296
297	return bo;
298}
299
300static struct tegra_bo *tegra_bo_import(struct drm_device *drm,
301					struct dma_buf *buf)
302{
303	struct tegra_drm *tegra = drm->dev_private;
304	struct dma_buf_attachment *attach;
305	struct tegra_bo *bo;
306	int err;
307
308	bo = tegra_bo_alloc_object(drm, buf->size);
309	if (IS_ERR(bo))
310		return bo;
311
312	attach = dma_buf_attach(buf, drm->dev);
313	if (IS_ERR(attach)) {
314		err = PTR_ERR(attach);
315		goto free;
316	}
317
318	get_dma_buf(buf);
319
320	bo->sgt = dma_buf_map_attachment(attach, DMA_TO_DEVICE);
321	if (!bo->sgt) {
322		err = -ENOMEM;
323		goto detach;
324	}
325
326	if (IS_ERR(bo->sgt)) {
327		err = PTR_ERR(bo->sgt);
328		goto detach;
329	}
330
331	if (tegra->domain) {
332		err = tegra_bo_iommu_map(tegra, bo);
333		if (err < 0)
334			goto detach;
335	} else {
336		if (bo->sgt->nents > 1) {
337			err = -EINVAL;
338			goto detach;
339		}
340
341		bo->paddr = sg_dma_address(bo->sgt->sgl);
342	}
343
344	bo->gem.import_attach = attach;
345
346	return bo;
347
348detach:
349	if (!IS_ERR_OR_NULL(bo->sgt))
350		dma_buf_unmap_attachment(attach, bo->sgt, DMA_TO_DEVICE);
351
352	dma_buf_detach(buf, attach);
353	dma_buf_put(buf);
354free:
355	drm_gem_object_release(&bo->gem);
356	kfree(bo);
357	return ERR_PTR(err);
358}
359
360void tegra_bo_free_object(struct drm_gem_object *gem)
361{
362	struct tegra_drm *tegra = gem->dev->dev_private;
 
363	struct tegra_bo *bo = to_tegra_bo(gem);
364
 
 
 
 
 
 
 
 
 
365	if (tegra->domain)
366		tegra_bo_iommu_unmap(tegra, bo);
367
368	if (gem->import_attach) {
369		dma_buf_unmap_attachment(gem->import_attach, bo->sgt,
370					 DMA_TO_DEVICE);
371		drm_prime_gem_destroy(gem, NULL);
372	} else {
373		tegra_bo_free(gem->dev, bo);
374	}
375
376	drm_gem_object_release(gem);
377	kfree(bo);
378}
379
380int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm,
381			 struct drm_mode_create_dumb *args)
382{
383	unsigned int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
384	struct tegra_drm *tegra = drm->dev_private;
385	struct tegra_bo *bo;
386
387	args->pitch = round_up(min_pitch, tegra->pitch_align);
388	args->size = args->pitch * args->height;
389
390	bo = tegra_bo_create_with_handle(file, drm, args->size, 0,
391					 &args->handle);
392	if (IS_ERR(bo))
393		return PTR_ERR(bo);
394
395	return 0;
396}
397
398int tegra_bo_dumb_map_offset(struct drm_file *file, struct drm_device *drm,
399			     u32 handle, u64 *offset)
400{
401	struct drm_gem_object *gem;
402	struct tegra_bo *bo;
403
404	gem = drm_gem_object_lookup(drm, file, handle);
405	if (!gem) {
406		dev_err(drm->dev, "failed to lookup GEM object\n");
407		return -EINVAL;
408	}
409
410	bo = to_tegra_bo(gem);
411
412	*offset = drm_vma_node_offset_addr(&bo->gem.vma_node);
413
414	drm_gem_object_unreference_unlocked(gem);
415
416	return 0;
417}
418
419static int tegra_bo_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
420{
 
421	struct drm_gem_object *gem = vma->vm_private_data;
422	struct tegra_bo *bo = to_tegra_bo(gem);
423	struct page *page;
424	pgoff_t offset;
425	int err;
426
427	if (!bo->pages)
428		return VM_FAULT_SIGBUS;
429
430	offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >> PAGE_SHIFT;
431	page = bo->pages[offset];
432
433	err = vm_insert_page(vma, (unsigned long)vmf->virtual_address, page);
434	switch (err) {
435	case -EAGAIN:
436	case 0:
437	case -ERESTARTSYS:
438	case -EINTR:
439	case -EBUSY:
440		return VM_FAULT_NOPAGE;
441
442	case -ENOMEM:
443		return VM_FAULT_OOM;
444	}
445
446	return VM_FAULT_SIGBUS;
447}
448
449const struct vm_operations_struct tegra_bo_vm_ops = {
450	.fault = tegra_bo_fault,
451	.open = drm_gem_vm_open,
452	.close = drm_gem_vm_close,
453};
454
455int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma)
456{
457	struct drm_gem_object *gem;
458	struct tegra_bo *bo;
459	int ret;
460
461	ret = drm_gem_mmap(file, vma);
462	if (ret)
463		return ret;
464
465	gem = vma->vm_private_data;
466	bo = to_tegra_bo(gem);
467
468	if (!bo->pages) {
469		unsigned long vm_pgoff = vma->vm_pgoff;
 
470
471		vma->vm_flags &= ~VM_PFNMAP;
 
 
 
 
 
472		vma->vm_pgoff = 0;
473
474		ret = dma_mmap_wc(gem->dev->dev, vma, bo->vaddr, bo->paddr,
475				  gem->size);
476		if (ret) {
477			drm_gem_vm_close(vma);
478			return ret;
479		}
480
481		vma->vm_pgoff = vm_pgoff;
482	} else {
483		pgprot_t prot = vm_get_page_prot(vma->vm_flags);
484
485		vma->vm_flags |= VM_MIXEDMAP;
486		vma->vm_flags &= ~VM_PFNMAP;
487
488		vma->vm_page_prot = pgprot_writecombine(prot);
489	}
490
491	return 0;
492}
493
 
 
 
 
 
 
 
 
 
 
 
 
 
 
494static struct sg_table *
495tegra_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
496			    enum dma_data_direction dir)
497{
498	struct drm_gem_object *gem = attach->dmabuf->priv;
499	struct tegra_bo *bo = to_tegra_bo(gem);
500	struct sg_table *sgt;
501
502	sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
503	if (!sgt)
504		return NULL;
505
506	if (bo->pages) {
507		struct scatterlist *sg;
508		unsigned int i;
509
510		if (sg_alloc_table(sgt, bo->num_pages, GFP_KERNEL))
511			goto free;
512
513		for_each_sg(sgt->sgl, sg, bo->num_pages, i)
514			sg_set_page(sg, bo->pages[i], PAGE_SIZE, 0);
515
516		if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0)
517			goto free;
518	} else {
519		if (sg_alloc_table(sgt, 1, GFP_KERNEL))
 
520			goto free;
521
522		sg_dma_address(sgt->sgl) = bo->paddr;
523		sg_dma_len(sgt->sgl) = gem->size;
524	}
525
 
 
 
526	return sgt;
527
528free:
529	sg_free_table(sgt);
530	kfree(sgt);
531	return NULL;
532}
533
534static void tegra_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach,
535					  struct sg_table *sgt,
536					  enum dma_data_direction dir)
537{
538	struct drm_gem_object *gem = attach->dmabuf->priv;
539	struct tegra_bo *bo = to_tegra_bo(gem);
540
541	if (bo->pages)
542		dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir);
543
544	sg_free_table(sgt);
545	kfree(sgt);
546}
547
548static void tegra_gem_prime_release(struct dma_buf *buf)
549{
550	drm_gem_dmabuf_release(buf);
551}
552
553static void *tegra_gem_prime_kmap_atomic(struct dma_buf *buf,
554					 unsigned long page)
555{
556	return NULL;
557}
 
558
559static void tegra_gem_prime_kunmap_atomic(struct dma_buf *buf,
560					  unsigned long page,
561					  void *addr)
562{
563}
564
565static void *tegra_gem_prime_kmap(struct dma_buf *buf, unsigned long page)
566{
567	return NULL;
568}
569
570static void tegra_gem_prime_kunmap(struct dma_buf *buf, unsigned long page,
571				   void *addr)
572{
 
 
 
 
 
 
 
 
573}
574
575static int tegra_gem_prime_mmap(struct dma_buf *buf, struct vm_area_struct *vma)
576{
577	return -EINVAL;
 
 
 
 
 
 
 
578}
579
580static void *tegra_gem_prime_vmap(struct dma_buf *buf)
581{
582	struct drm_gem_object *gem = buf->priv;
583	struct tegra_bo *bo = to_tegra_bo(gem);
 
 
 
 
 
 
 
584
585	return bo->vaddr;
586}
587
588static void tegra_gem_prime_vunmap(struct dma_buf *buf, void *vaddr)
589{
 
 
 
 
590}
591
592static const struct dma_buf_ops tegra_gem_prime_dmabuf_ops = {
593	.map_dma_buf = tegra_gem_prime_map_dma_buf,
594	.unmap_dma_buf = tegra_gem_prime_unmap_dma_buf,
595	.release = tegra_gem_prime_release,
596	.kmap_atomic = tegra_gem_prime_kmap_atomic,
597	.kunmap_atomic = tegra_gem_prime_kunmap_atomic,
598	.kmap = tegra_gem_prime_kmap,
599	.kunmap = tegra_gem_prime_kunmap,
600	.mmap = tegra_gem_prime_mmap,
601	.vmap = tegra_gem_prime_vmap,
602	.vunmap = tegra_gem_prime_vunmap,
603};
604
605struct dma_buf *tegra_gem_prime_export(struct drm_device *drm,
606				       struct drm_gem_object *gem,
607				       int flags)
608{
609	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
610
 
 
611	exp_info.ops = &tegra_gem_prime_dmabuf_ops;
612	exp_info.size = gem->size;
613	exp_info.flags = flags;
614	exp_info.priv = gem;
615
616	return dma_buf_export(&exp_info);
617}
618
619struct drm_gem_object *tegra_gem_prime_import(struct drm_device *drm,
620					      struct dma_buf *buf)
621{
622	struct tegra_bo *bo;
623
624	if (buf->ops == &tegra_gem_prime_dmabuf_ops) {
625		struct drm_gem_object *gem = buf->priv;
626
627		if (gem->dev == drm) {
628			drm_gem_object_reference(gem);
629			return gem;
630		}
631	}
632
633	bo = tegra_bo_import(drm, buf);
634	if (IS_ERR(bo))
635		return ERR_CAST(bo);
636
637	return &bo->gem;
 
 
 
 
 
 
 
 
 
 
 
 
 
638}
v6.9.4
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * NVIDIA Tegra DRM GEM helper functions
  4 *
  5 * Copyright (C) 2012 Sascha Hauer, Pengutronix
  6 * Copyright (C) 2013-2015 NVIDIA CORPORATION, All rights reserved.
  7 *
  8 * Based on the GEM/CMA helpers
  9 *
 10 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
 
 
 
 
 11 */
 12
 13#include <linux/dma-buf.h>
 14#include <linux/iommu.h>
 15#include <linux/module.h>
 16#include <linux/vmalloc.h>
 17
 18#include <drm/drm_drv.h>
 19#include <drm/drm_prime.h>
 20#include <drm/tegra_drm.h>
 21
 22#include "drm.h"
 23#include "gem.h"
 24
 25MODULE_IMPORT_NS(DMA_BUF);
 26
 27static unsigned int sg_dma_count_chunks(struct scatterlist *sgl, unsigned int nents)
 28{
 29	dma_addr_t next = ~(dma_addr_t)0;
 30	unsigned int count = 0, i;
 31	struct scatterlist *s;
 32
 33	for_each_sg(sgl, s, nents, i) {
 34		/* sg_dma_address(s) is only valid for entries that have sg_dma_len(s) != 0. */
 35		if (!sg_dma_len(s))
 36			continue;
 37
 38		if (sg_dma_address(s) != next) {
 39			next = sg_dma_address(s) + sg_dma_len(s);
 40			count++;
 41		}
 42	}
 43
 44	return count;
 45}
 46
 47static inline unsigned int sgt_dma_count_chunks(struct sg_table *sgt)
 48{
 49	return sg_dma_count_chunks(sgt->sgl, sgt->nents);
 50}
 51
 52static void tegra_bo_put(struct host1x_bo *bo)
 53{
 54	struct tegra_bo *obj = host1x_to_tegra_bo(bo);
 55
 56	drm_gem_object_put(&obj->gem);
 57}
 58
 59static struct host1x_bo_mapping *tegra_bo_pin(struct device *dev, struct host1x_bo *bo,
 60					      enum dma_data_direction direction)
 61{
 62	struct tegra_bo *obj = host1x_to_tegra_bo(bo);
 63	struct drm_gem_object *gem = &obj->gem;
 64	struct host1x_bo_mapping *map;
 65	int err;
 66
 67	map = kzalloc(sizeof(*map), GFP_KERNEL);
 68	if (!map)
 69		return ERR_PTR(-ENOMEM);
 70
 71	kref_init(&map->ref);
 72	map->bo = host1x_bo_get(bo);
 73	map->direction = direction;
 74	map->dev = dev;
 75
 76	/*
 77	 * Imported buffers need special treatment to satisfy the semantics of DMA-BUF.
 78	 */
 79	if (gem->import_attach) {
 80		struct dma_buf *buf = gem->import_attach->dmabuf;
 81
 82		map->attach = dma_buf_attach(buf, dev);
 83		if (IS_ERR(map->attach)) {
 84			err = PTR_ERR(map->attach);
 85			goto free;
 86		}
 87
 88		map->sgt = dma_buf_map_attachment_unlocked(map->attach, direction);
 89		if (IS_ERR(map->sgt)) {
 90			dma_buf_detach(buf, map->attach);
 91			err = PTR_ERR(map->sgt);
 92			map->sgt = NULL;
 93			goto free;
 94		}
 95
 96		err = sgt_dma_count_chunks(map->sgt);
 97		map->size = gem->size;
 98
 99		goto out;
100	}
101
102	/*
103	 * If we don't have a mapping for this buffer yet, return an SG table
104	 * so that host1x can do the mapping for us via the DMA API.
105	 */
106	map->sgt = kzalloc(sizeof(*map->sgt), GFP_KERNEL);
107	if (!map->sgt) {
108		err = -ENOMEM;
109		goto free;
110	}
111
112	if (obj->pages) {
113		/*
114		 * If the buffer object was allocated from the explicit IOMMU
115		 * API code paths, construct an SG table from the pages.
116		 */
117		err = sg_alloc_table_from_pages(map->sgt, obj->pages, obj->num_pages, 0, gem->size,
118						GFP_KERNEL);
119		if (err < 0)
120			goto free;
121	} else {
122		/*
123		 * If the buffer object had no pages allocated and if it was
124		 * not imported, it had to be allocated with the DMA API, so
125		 * the DMA API helper can be used.
126		 */
127		err = dma_get_sgtable(dev, map->sgt, obj->vaddr, obj->iova, gem->size);
128		if (err < 0)
129			goto free;
130	}
131
132	err = dma_map_sgtable(dev, map->sgt, direction, 0);
133	if (err)
134		goto free_sgt;
135
136out:
137	/*
138	 * If we've manually mapped the buffer object through the IOMMU, make sure to return the
139	 * existing IOVA address of our mapping.
140	 */
141	if (!obj->mm) {
142		map->phys = sg_dma_address(map->sgt->sgl);
143		map->chunks = err;
144	} else {
145		map->phys = obj->iova;
146		map->chunks = 1;
147	}
148
149	map->size = gem->size;
150
151	return map;
152
153free_sgt:
154	sg_free_table(map->sgt);
155free:
156	kfree(map->sgt);
157	kfree(map);
158	return ERR_PTR(err);
159}
160
161static void tegra_bo_unpin(struct host1x_bo_mapping *map)
162{
163	if (map->attach) {
164		dma_buf_unmap_attachment_unlocked(map->attach, map->sgt,
165						  map->direction);
166		dma_buf_detach(map->attach->dmabuf, map->attach);
167	} else {
168		dma_unmap_sgtable(map->dev, map->sgt, map->direction, 0);
169		sg_free_table(map->sgt);
170		kfree(map->sgt);
171	}
172
173	host1x_bo_put(map->bo);
174	kfree(map);
175}
176
177static void *tegra_bo_mmap(struct host1x_bo *bo)
178{
179	struct tegra_bo *obj = host1x_to_tegra_bo(bo);
180	struct iosys_map map = { 0 };
181	void *vaddr;
182	int ret;
183
184	if (obj->vaddr)
185		return obj->vaddr;
186
187	if (obj->gem.import_attach) {
188		ret = dma_buf_vmap_unlocked(obj->gem.import_attach->dmabuf, &map);
189		if (ret < 0)
190			return ERR_PTR(ret);
191
192		return map.vaddr;
193	}
194
195	vaddr = vmap(obj->pages, obj->num_pages, VM_MAP,
196		     pgprot_writecombine(PAGE_KERNEL));
197	if (!vaddr)
198		return ERR_PTR(-ENOMEM);
199
200	return vaddr;
201}
202
203static void tegra_bo_munmap(struct host1x_bo *bo, void *addr)
204{
205	struct tegra_bo *obj = host1x_to_tegra_bo(bo);
206	struct iosys_map map = IOSYS_MAP_INIT_VADDR(addr);
207
208	if (obj->vaddr)
209		return;
210
211	if (obj->gem.import_attach)
212		return dma_buf_vunmap_unlocked(obj->gem.import_attach->dmabuf, &map);
213
214	vunmap(addr);
215}
216
217static struct host1x_bo *tegra_bo_get(struct host1x_bo *bo)
218{
219	struct tegra_bo *obj = host1x_to_tegra_bo(bo);
220
221	drm_gem_object_get(&obj->gem);
222
223	return bo;
224}
225
226static const struct host1x_bo_ops tegra_bo_ops = {
227	.get = tegra_bo_get,
228	.put = tegra_bo_put,
229	.pin = tegra_bo_pin,
230	.unpin = tegra_bo_unpin,
231	.mmap = tegra_bo_mmap,
232	.munmap = tegra_bo_munmap,
 
 
233};
234
235static int tegra_bo_iommu_map(struct tegra_drm *tegra, struct tegra_bo *bo)
236{
237	int prot = IOMMU_READ | IOMMU_WRITE;
238	int err;
239
240	if (bo->mm)
241		return -EBUSY;
242
243	bo->mm = kzalloc(sizeof(*bo->mm), GFP_KERNEL);
244	if (!bo->mm)
245		return -ENOMEM;
246
247	mutex_lock(&tegra->mm_lock);
248
249	err = drm_mm_insert_node_generic(&tegra->mm,
250					 bo->mm, bo->gem.size, PAGE_SIZE, 0, 0);
251	if (err < 0) {
252		dev_err(tegra->drm->dev, "out of I/O virtual memory: %d\n",
253			err);
254		goto unlock;
255	}
256
257	bo->iova = bo->mm->start;
258
259	bo->size = iommu_map_sgtable(tegra->domain, bo->iova, bo->sgt, prot);
260	if (!bo->size) {
261		dev_err(tegra->drm->dev, "failed to map buffer\n");
262		err = -ENOMEM;
263		goto remove;
264	}
265
266	mutex_unlock(&tegra->mm_lock);
267
268	return 0;
269
270remove:
271	drm_mm_remove_node(bo->mm);
272unlock:
273	mutex_unlock(&tegra->mm_lock);
274	kfree(bo->mm);
275	return err;
276}
277
278static int tegra_bo_iommu_unmap(struct tegra_drm *tegra, struct tegra_bo *bo)
279{
280	if (!bo->mm)
281		return 0;
282
283	mutex_lock(&tegra->mm_lock);
284	iommu_unmap(tegra->domain, bo->iova, bo->size);
285	drm_mm_remove_node(bo->mm);
286	mutex_unlock(&tegra->mm_lock);
287
288	kfree(bo->mm);
289
290	return 0;
291}
292
293static const struct drm_gem_object_funcs tegra_gem_object_funcs = {
294	.free = tegra_bo_free_object,
295	.export = tegra_gem_prime_export,
296	.vm_ops = &tegra_bo_vm_ops,
297};
298
299static struct tegra_bo *tegra_bo_alloc_object(struct drm_device *drm,
300					      size_t size)
301{
302	struct tegra_bo *bo;
303	int err;
304
305	bo = kzalloc(sizeof(*bo), GFP_KERNEL);
306	if (!bo)
307		return ERR_PTR(-ENOMEM);
308
309	bo->gem.funcs = &tegra_gem_object_funcs;
310
311	host1x_bo_init(&bo->base, &tegra_bo_ops);
312	size = round_up(size, PAGE_SIZE);
313
314	err = drm_gem_object_init(drm, &bo->gem, size);
315	if (err < 0)
316		goto free;
317
318	err = drm_gem_create_mmap_offset(&bo->gem);
319	if (err < 0)
320		goto release;
321
322	return bo;
323
324release:
325	drm_gem_object_release(&bo->gem);
326free:
327	kfree(bo);
328	return ERR_PTR(err);
329}
330
331static void tegra_bo_free(struct drm_device *drm, struct tegra_bo *bo)
332{
333	if (bo->pages) {
334		dma_unmap_sgtable(drm->dev, bo->sgt, DMA_FROM_DEVICE, 0);
335		drm_gem_put_pages(&bo->gem, bo->pages, true, true);
336		sg_free_table(bo->sgt);
337		kfree(bo->sgt);
338	} else if (bo->vaddr) {
339		dma_free_wc(drm->dev, bo->gem.size, bo->vaddr, bo->iova);
340	}
341}
342
343static int tegra_bo_get_pages(struct drm_device *drm, struct tegra_bo *bo)
344{
345	int err;
 
346
347	bo->pages = drm_gem_get_pages(&bo->gem);
348	if (IS_ERR(bo->pages))
349		return PTR_ERR(bo->pages);
350
351	bo->num_pages = bo->gem.size >> PAGE_SHIFT;
352
353	bo->sgt = drm_prime_pages_to_sg(bo->gem.dev, bo->pages, bo->num_pages);
354	if (IS_ERR(bo->sgt)) {
355		err = PTR_ERR(bo->sgt);
356		goto put_pages;
357	}
358
359	err = dma_map_sgtable(drm->dev, bo->sgt, DMA_FROM_DEVICE, 0);
360	if (err)
361		goto free_sgt;
 
 
 
 
 
 
 
 
 
362
363	return 0;
364
365free_sgt:
366	sg_free_table(bo->sgt);
367	kfree(bo->sgt);
368put_pages:
369	drm_gem_put_pages(&bo->gem, bo->pages, false, false);
370	return err;
371}
372
373static int tegra_bo_alloc(struct drm_device *drm, struct tegra_bo *bo)
374{
375	struct tegra_drm *tegra = drm->dev_private;
376	int err;
377
378	if (tegra->domain) {
379		err = tegra_bo_get_pages(drm, bo);
380		if (err < 0)
381			return err;
382
383		err = tegra_bo_iommu_map(tegra, bo);
384		if (err < 0) {
385			tegra_bo_free(drm, bo);
386			return err;
387		}
388	} else {
389		size_t size = bo->gem.size;
390
391		bo->vaddr = dma_alloc_wc(drm->dev, size, &bo->iova,
392					 GFP_KERNEL | __GFP_NOWARN);
393		if (!bo->vaddr) {
394			dev_err(drm->dev,
395				"failed to allocate buffer of size %zu\n",
396				size);
397			return -ENOMEM;
398		}
399	}
400
401	return 0;
402}
403
404struct tegra_bo *tegra_bo_create(struct drm_device *drm, size_t size,
405				 unsigned long flags)
406{
407	struct tegra_bo *bo;
408	int err;
409
410	bo = tegra_bo_alloc_object(drm, size);
411	if (IS_ERR(bo))
412		return bo;
413
414	err = tegra_bo_alloc(drm, bo);
415	if (err < 0)
416		goto release;
417
418	if (flags & DRM_TEGRA_GEM_CREATE_TILED)
419		bo->tiling.mode = TEGRA_BO_TILING_MODE_TILED;
420
421	if (flags & DRM_TEGRA_GEM_CREATE_BOTTOM_UP)
422		bo->flags |= TEGRA_BO_BOTTOM_UP;
423
424	return bo;
425
426release:
427	drm_gem_object_release(&bo->gem);
428	kfree(bo);
429	return ERR_PTR(err);
430}
431
432struct tegra_bo *tegra_bo_create_with_handle(struct drm_file *file,
433					     struct drm_device *drm,
434					     size_t size,
435					     unsigned long flags,
436					     u32 *handle)
437{
438	struct tegra_bo *bo;
439	int err;
440
441	bo = tegra_bo_create(drm, size, flags);
442	if (IS_ERR(bo))
443		return bo;
444
445	err = drm_gem_handle_create(file, &bo->gem, handle);
446	if (err) {
447		tegra_bo_free_object(&bo->gem);
448		return ERR_PTR(err);
449	}
450
451	drm_gem_object_put(&bo->gem);
452
453	return bo;
454}
455
456static struct tegra_bo *tegra_bo_import(struct drm_device *drm,
457					struct dma_buf *buf)
458{
459	struct tegra_drm *tegra = drm->dev_private;
460	struct dma_buf_attachment *attach;
461	struct tegra_bo *bo;
462	int err;
463
464	bo = tegra_bo_alloc_object(drm, buf->size);
465	if (IS_ERR(bo))
466		return bo;
467
468	attach = dma_buf_attach(buf, drm->dev);
469	if (IS_ERR(attach)) {
470		err = PTR_ERR(attach);
471		goto free;
472	}
473
474	get_dma_buf(buf);
475
476	bo->sgt = dma_buf_map_attachment_unlocked(attach, DMA_TO_DEVICE);
 
 
 
 
 
477	if (IS_ERR(bo->sgt)) {
478		err = PTR_ERR(bo->sgt);
479		goto detach;
480	}
481
482	if (tegra->domain) {
483		err = tegra_bo_iommu_map(tegra, bo);
484		if (err < 0)
485			goto detach;
 
 
 
 
 
 
 
486	}
487
488	bo->gem.import_attach = attach;
489
490	return bo;
491
492detach:
493	if (!IS_ERR_OR_NULL(bo->sgt))
494		dma_buf_unmap_attachment_unlocked(attach, bo->sgt, DMA_TO_DEVICE);
495
496	dma_buf_detach(buf, attach);
497	dma_buf_put(buf);
498free:
499	drm_gem_object_release(&bo->gem);
500	kfree(bo);
501	return ERR_PTR(err);
502}
503
504void tegra_bo_free_object(struct drm_gem_object *gem)
505{
506	struct tegra_drm *tegra = gem->dev->dev_private;
507	struct host1x_bo_mapping *mapping, *tmp;
508	struct tegra_bo *bo = to_tegra_bo(gem);
509
510	/* remove all mappings of this buffer object from any caches */
511	list_for_each_entry_safe(mapping, tmp, &bo->base.mappings, list) {
512		if (mapping->cache)
513			host1x_bo_unpin(mapping);
514		else
515			dev_err(gem->dev->dev, "mapping %p stale for device %s\n", mapping,
516				dev_name(mapping->dev));
517	}
518
519	if (tegra->domain)
520		tegra_bo_iommu_unmap(tegra, bo);
521
522	if (gem->import_attach) {
523		dma_buf_unmap_attachment_unlocked(gem->import_attach, bo->sgt,
524						  DMA_TO_DEVICE);
525		drm_prime_gem_destroy(gem, NULL);
526	} else {
527		tegra_bo_free(gem->dev, bo);
528	}
529
530	drm_gem_object_release(gem);
531	kfree(bo);
532}
533
534int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm,
535			 struct drm_mode_create_dumb *args)
536{
537	unsigned int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
538	struct tegra_drm *tegra = drm->dev_private;
539	struct tegra_bo *bo;
540
541	args->pitch = round_up(min_pitch, tegra->pitch_align);
542	args->size = args->pitch * args->height;
543
544	bo = tegra_bo_create_with_handle(file, drm, args->size, 0,
545					 &args->handle);
546	if (IS_ERR(bo))
547		return PTR_ERR(bo);
548
549	return 0;
550}
551
552static vm_fault_t tegra_bo_fault(struct vm_fault *vmf)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
553{
554	struct vm_area_struct *vma = vmf->vma;
555	struct drm_gem_object *gem = vma->vm_private_data;
556	struct tegra_bo *bo = to_tegra_bo(gem);
557	struct page *page;
558	pgoff_t offset;
 
559
560	if (!bo->pages)
561		return VM_FAULT_SIGBUS;
562
563	offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
564	page = bo->pages[offset];
565
566	return vmf_insert_page(vma, vmf->address, page);
 
 
 
 
 
 
 
 
 
 
 
 
 
567}
568
569const struct vm_operations_struct tegra_bo_vm_ops = {
570	.fault = tegra_bo_fault,
571	.open = drm_gem_vm_open,
572	.close = drm_gem_vm_close,
573};
574
575int __tegra_gem_mmap(struct drm_gem_object *gem, struct vm_area_struct *vma)
576{
577	struct tegra_bo *bo = to_tegra_bo(gem);
 
 
 
 
 
 
 
 
 
578
579	if (!bo->pages) {
580		unsigned long vm_pgoff = vma->vm_pgoff;
581		int err;
582
583		/*
584		 * Clear the VM_PFNMAP flag that was set by drm_gem_mmap(),
585		 * and set the vm_pgoff (used as a fake buffer offset by DRM)
586		 * to 0 as we want to map the whole buffer.
587		 */
588		vm_flags_clear(vma, VM_PFNMAP);
589		vma->vm_pgoff = 0;
590
591		err = dma_mmap_wc(gem->dev->dev, vma, bo->vaddr, bo->iova,
592				  gem->size);
593		if (err < 0) {
594			drm_gem_vm_close(vma);
595			return err;
596		}
597
598		vma->vm_pgoff = vm_pgoff;
599	} else {
600		pgprot_t prot = vm_get_page_prot(vma->vm_flags);
601
602		vm_flags_mod(vma, VM_MIXEDMAP, VM_PFNMAP);
 
603
604		vma->vm_page_prot = pgprot_writecombine(prot);
605	}
606
607	return 0;
608}
609
610int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma)
611{
612	struct drm_gem_object *gem;
613	int err;
614
615	err = drm_gem_mmap(file, vma);
616	if (err < 0)
617		return err;
618
619	gem = vma->vm_private_data;
620
621	return __tegra_gem_mmap(gem, vma);
622}
623
624static struct sg_table *
625tegra_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
626			    enum dma_data_direction dir)
627{
628	struct drm_gem_object *gem = attach->dmabuf->priv;
629	struct tegra_bo *bo = to_tegra_bo(gem);
630	struct sg_table *sgt;
631
632	sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
633	if (!sgt)
634		return NULL;
635
636	if (bo->pages) {
637		if (sg_alloc_table_from_pages(sgt, bo->pages, bo->num_pages,
638					      0, gem->size, GFP_KERNEL) < 0)
 
 
 
 
 
 
 
 
639			goto free;
640	} else {
641		if (dma_get_sgtable(attach->dev, sgt, bo->vaddr, bo->iova,
642				    gem->size) < 0)
643			goto free;
 
 
 
644	}
645
646	if (dma_map_sgtable(attach->dev, sgt, dir, 0))
647		goto free;
648
649	return sgt;
650
651free:
652	sg_free_table(sgt);
653	kfree(sgt);
654	return NULL;
655}
656
657static void tegra_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach,
658					  struct sg_table *sgt,
659					  enum dma_data_direction dir)
660{
661	struct drm_gem_object *gem = attach->dmabuf->priv;
662	struct tegra_bo *bo = to_tegra_bo(gem);
663
664	if (bo->pages)
665		dma_unmap_sgtable(attach->dev, sgt, dir, 0);
666
667	sg_free_table(sgt);
668	kfree(sgt);
669}
670
671static void tegra_gem_prime_release(struct dma_buf *buf)
672{
673	drm_gem_dmabuf_release(buf);
674}
675
676static int tegra_gem_prime_begin_cpu_access(struct dma_buf *buf,
677					    enum dma_data_direction direction)
678{
679	struct drm_gem_object *gem = buf->priv;
680	struct tegra_bo *bo = to_tegra_bo(gem);
681	struct drm_device *drm = gem->dev;
682
683	if (bo->pages)
684		dma_sync_sgtable_for_cpu(drm->dev, bo->sgt, DMA_FROM_DEVICE);
 
 
 
685
686	return 0;
 
 
687}
688
689static int tegra_gem_prime_end_cpu_access(struct dma_buf *buf,
690					  enum dma_data_direction direction)
691{
692	struct drm_gem_object *gem = buf->priv;
693	struct tegra_bo *bo = to_tegra_bo(gem);
694	struct drm_device *drm = gem->dev;
695
696	if (bo->pages)
697		dma_sync_sgtable_for_device(drm->dev, bo->sgt, DMA_TO_DEVICE);
698
699	return 0;
700}
701
702static int tegra_gem_prime_mmap(struct dma_buf *buf, struct vm_area_struct *vma)
703{
704	struct drm_gem_object *gem = buf->priv;
705	int err;
706
707	err = drm_gem_mmap_obj(gem, gem->size, vma);
708	if (err < 0)
709		return err;
710
711	return __tegra_gem_mmap(gem, vma);
712}
713
714static int tegra_gem_prime_vmap(struct dma_buf *buf, struct iosys_map *map)
715{
716	struct drm_gem_object *gem = buf->priv;
717	struct tegra_bo *bo = to_tegra_bo(gem);
718	void *vaddr;
719
720	vaddr = tegra_bo_mmap(&bo->base);
721	if (IS_ERR(vaddr))
722		return PTR_ERR(vaddr);
723
724	iosys_map_set_vaddr(map, vaddr);
725
726	return 0;
727}
728
729static void tegra_gem_prime_vunmap(struct dma_buf *buf, struct iosys_map *map)
730{
731	struct drm_gem_object *gem = buf->priv;
732	struct tegra_bo *bo = to_tegra_bo(gem);
733
734	tegra_bo_munmap(&bo->base, map->vaddr);
735}
736
737static const struct dma_buf_ops tegra_gem_prime_dmabuf_ops = {
738	.map_dma_buf = tegra_gem_prime_map_dma_buf,
739	.unmap_dma_buf = tegra_gem_prime_unmap_dma_buf,
740	.release = tegra_gem_prime_release,
741	.begin_cpu_access = tegra_gem_prime_begin_cpu_access,
742	.end_cpu_access = tegra_gem_prime_end_cpu_access,
 
 
743	.mmap = tegra_gem_prime_mmap,
744	.vmap = tegra_gem_prime_vmap,
745	.vunmap = tegra_gem_prime_vunmap,
746};
747
748struct dma_buf *tegra_gem_prime_export(struct drm_gem_object *gem,
 
749				       int flags)
750{
751	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
752
753	exp_info.exp_name = KBUILD_MODNAME;
754	exp_info.owner = gem->dev->driver->fops->owner;
755	exp_info.ops = &tegra_gem_prime_dmabuf_ops;
756	exp_info.size = gem->size;
757	exp_info.flags = flags;
758	exp_info.priv = gem;
759
760	return drm_gem_dmabuf_export(gem->dev, &exp_info);
761}
762
763struct drm_gem_object *tegra_gem_prime_import(struct drm_device *drm,
764					      struct dma_buf *buf)
765{
766	struct tegra_bo *bo;
767
768	if (buf->ops == &tegra_gem_prime_dmabuf_ops) {
769		struct drm_gem_object *gem = buf->priv;
770
771		if (gem->dev == drm) {
772			drm_gem_object_get(gem);
773			return gem;
774		}
775	}
776
777	bo = tegra_bo_import(drm, buf);
778	if (IS_ERR(bo))
779		return ERR_CAST(bo);
780
781	return &bo->gem;
782}
783
784struct host1x_bo *tegra_gem_lookup(struct drm_file *file, u32 handle)
785{
786	struct drm_gem_object *gem;
787	struct tegra_bo *bo;
788
789	gem = drm_gem_object_lookup(file, handle);
790	if (!gem)
791		return NULL;
792
793	bo = to_tegra_bo(gem);
794	return &bo->base;
795}