Linux Audio

Check our new training course

Loading...
v5.4
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
  4 * Author:Mark Yao <mark.yao@rock-chips.com>
  5 */
  6
  7#include <linux/dma-buf.h>
  8#include <linux/iommu.h>
 
  9
 10#include <drm/drm.h>
 11#include <drm/drm_gem.h>
 
 12#include <drm/drm_prime.h>
 13#include <drm/drm_vma_manager.h>
 14
 15#include "rockchip_drm_drv.h"
 16#include "rockchip_drm_gem.h"
 17
 18static int rockchip_gem_iommu_map(struct rockchip_gem_object *rk_obj)
 19{
 20	struct drm_device *drm = rk_obj->base.dev;
 21	struct rockchip_drm_private *private = drm->dev_private;
 22	int prot = IOMMU_READ | IOMMU_WRITE;
 23	ssize_t ret;
 24
 25	mutex_lock(&private->mm_lock);
 26	ret = drm_mm_insert_node_generic(&private->mm, &rk_obj->mm,
 27					 rk_obj->base.size, PAGE_SIZE,
 28					 0, 0);
 29	mutex_unlock(&private->mm_lock);
 30
 31	if (ret < 0) {
 32		DRM_ERROR("out of I/O virtual memory: %zd\n", ret);
 33		return ret;
 34	}
 35
 36	rk_obj->dma_addr = rk_obj->mm.start;
 37
 38	ret = iommu_map_sg(private->domain, rk_obj->dma_addr, rk_obj->sgt->sgl,
 39			   rk_obj->sgt->nents, prot);
 40	if (ret < rk_obj->base.size) {
 41		DRM_ERROR("failed to map buffer: size=%zd request_size=%zd\n",
 42			  ret, rk_obj->base.size);
 43		ret = -ENOMEM;
 44		goto err_remove_node;
 45	}
 46
 47	rk_obj->size = ret;
 48
 49	return 0;
 50
 51err_remove_node:
 52	mutex_lock(&private->mm_lock);
 53	drm_mm_remove_node(&rk_obj->mm);
 54	mutex_unlock(&private->mm_lock);
 55
 56	return ret;
 57}
 58
 59static int rockchip_gem_iommu_unmap(struct rockchip_gem_object *rk_obj)
 60{
 61	struct drm_device *drm = rk_obj->base.dev;
 62	struct rockchip_drm_private *private = drm->dev_private;
 63
 64	iommu_unmap(private->domain, rk_obj->dma_addr, rk_obj->size);
 65
 66	mutex_lock(&private->mm_lock);
 67
 68	drm_mm_remove_node(&rk_obj->mm);
 69
 70	mutex_unlock(&private->mm_lock);
 71
 72	return 0;
 73}
 74
 75static int rockchip_gem_get_pages(struct rockchip_gem_object *rk_obj)
 76{
 77	struct drm_device *drm = rk_obj->base.dev;
 78	int ret, i;
 79	struct scatterlist *s;
 80
 81	rk_obj->pages = drm_gem_get_pages(&rk_obj->base);
 82	if (IS_ERR(rk_obj->pages))
 83		return PTR_ERR(rk_obj->pages);
 84
 85	rk_obj->num_pages = rk_obj->base.size >> PAGE_SHIFT;
 86
 87	rk_obj->sgt = drm_prime_pages_to_sg(rk_obj->pages, rk_obj->num_pages);
 
 88	if (IS_ERR(rk_obj->sgt)) {
 89		ret = PTR_ERR(rk_obj->sgt);
 90		goto err_put_pages;
 91	}
 92
 93	/*
 94	 * Fake up the SG table so that dma_sync_sg_for_device() can be used
 95	 * to flush the pages associated with it.
 96	 *
 97	 * TODO: Replace this by drm_clflush_sg() once it can be implemented
 98	 * without relying on symbols that are not exported.
 99	 */
100	for_each_sg(rk_obj->sgt->sgl, s, rk_obj->sgt->nents, i)
101		sg_dma_address(s) = sg_phys(s);
102
103	dma_sync_sg_for_device(drm->dev, rk_obj->sgt->sgl, rk_obj->sgt->nents,
104			       DMA_TO_DEVICE);
105
106	return 0;
107
108err_put_pages:
109	drm_gem_put_pages(&rk_obj->base, rk_obj->pages, false, false);
110	return ret;
111}
112
113static void rockchip_gem_put_pages(struct rockchip_gem_object *rk_obj)
114{
115	sg_free_table(rk_obj->sgt);
116	kfree(rk_obj->sgt);
117	drm_gem_put_pages(&rk_obj->base, rk_obj->pages, true, true);
118}
119
120static int rockchip_gem_alloc_iommu(struct rockchip_gem_object *rk_obj,
121				    bool alloc_kmap)
122{
123	int ret;
124
125	ret = rockchip_gem_get_pages(rk_obj);
126	if (ret < 0)
127		return ret;
128
129	ret = rockchip_gem_iommu_map(rk_obj);
130	if (ret < 0)
131		goto err_free;
132
133	if (alloc_kmap) {
134		rk_obj->kvaddr = vmap(rk_obj->pages, rk_obj->num_pages, VM_MAP,
135				      pgprot_writecombine(PAGE_KERNEL));
136		if (!rk_obj->kvaddr) {
137			DRM_ERROR("failed to vmap() buffer\n");
138			ret = -ENOMEM;
139			goto err_unmap;
140		}
141	}
142
143	return 0;
144
145err_unmap:
146	rockchip_gem_iommu_unmap(rk_obj);
147err_free:
148	rockchip_gem_put_pages(rk_obj);
149
150	return ret;
151}
152
153static int rockchip_gem_alloc_dma(struct rockchip_gem_object *rk_obj,
154				  bool alloc_kmap)
155{
156	struct drm_gem_object *obj = &rk_obj->base;
157	struct drm_device *drm = obj->dev;
158
159	rk_obj->dma_attrs = DMA_ATTR_WRITE_COMBINE;
160
161	if (!alloc_kmap)
162		rk_obj->dma_attrs |= DMA_ATTR_NO_KERNEL_MAPPING;
163
164	rk_obj->kvaddr = dma_alloc_attrs(drm->dev, obj->size,
165					 &rk_obj->dma_addr, GFP_KERNEL,
166					 rk_obj->dma_attrs);
167	if (!rk_obj->kvaddr) {
168		DRM_ERROR("failed to allocate %zu byte dma buffer", obj->size);
169		return -ENOMEM;
170	}
171
172	return 0;
173}
174
175static int rockchip_gem_alloc_buf(struct rockchip_gem_object *rk_obj,
176				  bool alloc_kmap)
177{
178	struct drm_gem_object *obj = &rk_obj->base;
179	struct drm_device *drm = obj->dev;
180	struct rockchip_drm_private *private = drm->dev_private;
181
182	if (private->domain)
183		return rockchip_gem_alloc_iommu(rk_obj, alloc_kmap);
184	else
185		return rockchip_gem_alloc_dma(rk_obj, alloc_kmap);
186}
187
188static void rockchip_gem_free_iommu(struct rockchip_gem_object *rk_obj)
189{
190	vunmap(rk_obj->kvaddr);
191	rockchip_gem_iommu_unmap(rk_obj);
192	rockchip_gem_put_pages(rk_obj);
193}
194
195static void rockchip_gem_free_dma(struct rockchip_gem_object *rk_obj)
196{
197	struct drm_gem_object *obj = &rk_obj->base;
198	struct drm_device *drm = obj->dev;
199
200	dma_free_attrs(drm->dev, obj->size, rk_obj->kvaddr, rk_obj->dma_addr,
201		       rk_obj->dma_attrs);
202}
203
204static void rockchip_gem_free_buf(struct rockchip_gem_object *rk_obj)
205{
206	if (rk_obj->pages)
207		rockchip_gem_free_iommu(rk_obj);
208	else
209		rockchip_gem_free_dma(rk_obj);
210}
211
212static int rockchip_drm_gem_object_mmap_iommu(struct drm_gem_object *obj,
213					      struct vm_area_struct *vma)
214{
215	struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
216	unsigned int count = obj->size >> PAGE_SHIFT;
217	unsigned long user_count = vma_pages(vma);
218
219	if (user_count == 0)
220		return -ENXIO;
221
222	return vm_map_pages(vma, rk_obj->pages, count);
223}
224
225static int rockchip_drm_gem_object_mmap_dma(struct drm_gem_object *obj,
226					    struct vm_area_struct *vma)
227{
228	struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
229	struct drm_device *drm = obj->dev;
230
231	return dma_mmap_attrs(drm->dev, vma, rk_obj->kvaddr, rk_obj->dma_addr,
232			      obj->size, rk_obj->dma_attrs);
233}
234
235static int rockchip_drm_gem_object_mmap(struct drm_gem_object *obj,
236					struct vm_area_struct *vma)
237{
238	int ret;
239	struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
240
241	/*
242	 * We allocated a struct page table for rk_obj, so clear
243	 * VM_PFNMAP flag that was set by drm_gem_mmap_obj()/drm_gem_mmap().
244	 */
245	vma->vm_flags &= ~VM_PFNMAP;
246
247	if (rk_obj->pages)
248		ret = rockchip_drm_gem_object_mmap_iommu(obj, vma);
249	else
250		ret = rockchip_drm_gem_object_mmap_dma(obj, vma);
251
252	if (ret)
253		drm_gem_vm_close(vma);
254
255	return ret;
256}
257
258int rockchip_gem_mmap_buf(struct drm_gem_object *obj,
259			  struct vm_area_struct *vma)
260{
261	int ret;
262
263	ret = drm_gem_mmap_obj(obj, obj->size, vma);
264	if (ret)
265		return ret;
266
267	return rockchip_drm_gem_object_mmap(obj, vma);
268}
269
270/* drm driver mmap file operations */
271int rockchip_gem_mmap(struct file *filp, struct vm_area_struct *vma)
272{
273	struct drm_gem_object *obj;
274	int ret;
275
276	ret = drm_gem_mmap(filp, vma);
277	if (ret)
278		return ret;
279
280	/*
281	 * Set vm_pgoff (used as a fake buffer offset by DRM) to 0 and map the
282	 * whole buffer from the start.
283	 */
284	vma->vm_pgoff = 0;
285
286	obj = vma->vm_private_data;
287
288	return rockchip_drm_gem_object_mmap(obj, vma);
289}
290
291static void rockchip_gem_release_object(struct rockchip_gem_object *rk_obj)
292{
293	drm_gem_object_release(&rk_obj->base);
294	kfree(rk_obj);
295}
296
297struct rockchip_gem_object *
 
 
 
 
 
 
 
 
298	rockchip_gem_alloc_object(struct drm_device *drm, unsigned int size)
299{
300	struct rockchip_gem_object *rk_obj;
301	struct drm_gem_object *obj;
302
303	size = round_up(size, PAGE_SIZE);
304
305	rk_obj = kzalloc(sizeof(*rk_obj), GFP_KERNEL);
306	if (!rk_obj)
307		return ERR_PTR(-ENOMEM);
308
309	obj = &rk_obj->base;
310
 
 
311	drm_gem_object_init(drm, obj, size);
312
313	return rk_obj;
314}
315
316struct rockchip_gem_object *
317rockchip_gem_create_object(struct drm_device *drm, unsigned int size,
318			   bool alloc_kmap)
319{
320	struct rockchip_gem_object *rk_obj;
321	int ret;
322
323	rk_obj = rockchip_gem_alloc_object(drm, size);
324	if (IS_ERR(rk_obj))
325		return rk_obj;
326
327	ret = rockchip_gem_alloc_buf(rk_obj, alloc_kmap);
328	if (ret)
329		goto err_free_rk_obj;
330
331	return rk_obj;
332
333err_free_rk_obj:
334	rockchip_gem_release_object(rk_obj);
335	return ERR_PTR(ret);
336}
337
338/*
339 * rockchip_gem_free_object - (struct drm_driver)->gem_free_object_unlocked
340 * callback function
341 */
342void rockchip_gem_free_object(struct drm_gem_object *obj)
343{
344	struct drm_device *drm = obj->dev;
345	struct rockchip_drm_private *private = drm->dev_private;
346	struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
347
348	if (obj->import_attach) {
349		if (private->domain) {
350			rockchip_gem_iommu_unmap(rk_obj);
351		} else {
352			dma_unmap_sg(drm->dev, rk_obj->sgt->sgl,
353				     rk_obj->sgt->nents, DMA_BIDIRECTIONAL);
354		}
355		drm_prime_gem_destroy(obj, rk_obj->sgt);
356	} else {
357		rockchip_gem_free_buf(rk_obj);
358	}
359
360	rockchip_gem_release_object(rk_obj);
361}
362
363/*
364 * rockchip_gem_create_with_handle - allocate an object with the given
365 * size and create a gem handle on it
366 *
367 * returns a struct rockchip_gem_object* on success or ERR_PTR values
368 * on failure.
369 */
370static struct rockchip_gem_object *
371rockchip_gem_create_with_handle(struct drm_file *file_priv,
372				struct drm_device *drm, unsigned int size,
373				unsigned int *handle)
374{
375	struct rockchip_gem_object *rk_obj;
376	struct drm_gem_object *obj;
377	int ret;
378
379	rk_obj = rockchip_gem_create_object(drm, size, false);
380	if (IS_ERR(rk_obj))
381		return ERR_CAST(rk_obj);
382
383	obj = &rk_obj->base;
384
385	/*
386	 * allocate a id of idr table where the obj is registered
387	 * and handle has the id what user can see.
388	 */
389	ret = drm_gem_handle_create(file_priv, obj, handle);
390	if (ret)
391		goto err_handle_create;
392
393	/* drop reference from allocate - handle holds it now. */
394	drm_gem_object_put_unlocked(obj);
395
396	return rk_obj;
397
398err_handle_create:
399	rockchip_gem_free_object(obj);
400
401	return ERR_PTR(ret);
402}
403
404/*
405 * rockchip_gem_dumb_create - (struct drm_driver)->dumb_create callback
406 * function
407 *
408 * This aligns the pitch and size arguments to the minimum required. wrap
409 * this into your own function if you need bigger alignment.
410 */
411int rockchip_gem_dumb_create(struct drm_file *file_priv,
412			     struct drm_device *dev,
413			     struct drm_mode_create_dumb *args)
414{
415	struct rockchip_gem_object *rk_obj;
416	int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
417
418	/*
419	 * align to 64 bytes since Mali requires it.
420	 */
421	args->pitch = ALIGN(min_pitch, 64);
422	args->size = args->pitch * args->height;
423
424	rk_obj = rockchip_gem_create_with_handle(file_priv, dev, args->size,
425						 &args->handle);
426
427	return PTR_ERR_OR_ZERO(rk_obj);
428}
429
430/*
431 * Allocate a sg_table for this GEM object.
432 * Note: Both the table's contents, and the sg_table itself must be freed by
433 *       the caller.
434 * Returns a pointer to the newly allocated sg_table, or an ERR_PTR() error.
435 */
436struct sg_table *rockchip_gem_prime_get_sg_table(struct drm_gem_object *obj)
437{
438	struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
439	struct drm_device *drm = obj->dev;
440	struct sg_table *sgt;
441	int ret;
442
443	if (rk_obj->pages)
444		return drm_prime_pages_to_sg(rk_obj->pages, rk_obj->num_pages);
445
446	sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
447	if (!sgt)
448		return ERR_PTR(-ENOMEM);
449
450	ret = dma_get_sgtable_attrs(drm->dev, sgt, rk_obj->kvaddr,
451				    rk_obj->dma_addr, obj->size,
452				    rk_obj->dma_attrs);
453	if (ret) {
454		DRM_ERROR("failed to allocate sgt, %d\n", ret);
455		kfree(sgt);
456		return ERR_PTR(ret);
457	}
458
459	return sgt;
460}
461
462static unsigned long rockchip_sg_get_contiguous_size(struct sg_table *sgt,
463						     int count)
464{
465	struct scatterlist *s;
466	dma_addr_t expected = sg_dma_address(sgt->sgl);
467	unsigned int i;
468	unsigned long size = 0;
469
470	for_each_sg(sgt->sgl, s, count, i) {
471		if (sg_dma_address(s) != expected)
472			break;
473		expected = sg_dma_address(s) + sg_dma_len(s);
474		size += sg_dma_len(s);
475	}
476	return size;
477}
478
479static int
480rockchip_gem_iommu_map_sg(struct drm_device *drm,
481			  struct dma_buf_attachment *attach,
482			  struct sg_table *sg,
483			  struct rockchip_gem_object *rk_obj)
484{
485	rk_obj->sgt = sg;
486	return rockchip_gem_iommu_map(rk_obj);
487}
488
489static int
490rockchip_gem_dma_map_sg(struct drm_device *drm,
491			struct dma_buf_attachment *attach,
492			struct sg_table *sg,
493			struct rockchip_gem_object *rk_obj)
494{
495	int count = dma_map_sg(drm->dev, sg->sgl, sg->nents,
496			       DMA_BIDIRECTIONAL);
497	if (!count)
498		return -EINVAL;
499
500	if (rockchip_sg_get_contiguous_size(sg, count) < attach->dmabuf->size) {
501		DRM_ERROR("failed to map sg_table to contiguous linear address.\n");
502		dma_unmap_sg(drm->dev, sg->sgl, sg->nents,
503			     DMA_BIDIRECTIONAL);
504		return -EINVAL;
505	}
506
507	rk_obj->dma_addr = sg_dma_address(sg->sgl);
508	rk_obj->sgt = sg;
509	return 0;
510}
511
512struct drm_gem_object *
513rockchip_gem_prime_import_sg_table(struct drm_device *drm,
514				   struct dma_buf_attachment *attach,
515				   struct sg_table *sg)
516{
517	struct rockchip_drm_private *private = drm->dev_private;
518	struct rockchip_gem_object *rk_obj;
519	int ret;
520
521	rk_obj = rockchip_gem_alloc_object(drm, attach->dmabuf->size);
522	if (IS_ERR(rk_obj))
523		return ERR_CAST(rk_obj);
524
525	if (private->domain)
526		ret = rockchip_gem_iommu_map_sg(drm, attach, sg, rk_obj);
527	else
528		ret = rockchip_gem_dma_map_sg(drm, attach, sg, rk_obj);
529
530	if (ret < 0) {
531		DRM_ERROR("failed to import sg table: %d\n", ret);
532		goto err_free_rk_obj;
533	}
534
535	return &rk_obj->base;
536
537err_free_rk_obj:
538	rockchip_gem_release_object(rk_obj);
539	return ERR_PTR(ret);
540}
541
542void *rockchip_gem_prime_vmap(struct drm_gem_object *obj)
543{
544	struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
545
546	if (rk_obj->pages)
547		return vmap(rk_obj->pages, rk_obj->num_pages, VM_MAP,
548			    pgprot_writecombine(PAGE_KERNEL));
 
 
 
 
 
549
550	if (rk_obj->dma_attrs & DMA_ATTR_NO_KERNEL_MAPPING)
551		return NULL;
 
552
553	return rk_obj->kvaddr;
554}
555
556void rockchip_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
557{
558	struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
559
560	if (rk_obj->pages) {
561		vunmap(vaddr);
562		return;
563	}
564
565	/* Nothing to do if allocated by DMA mapping API. */
566}
v5.14.15
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
  4 * Author:Mark Yao <mark.yao@rock-chips.com>
  5 */
  6
  7#include <linux/dma-buf.h>
  8#include <linux/iommu.h>
  9#include <linux/vmalloc.h>
 10
 11#include <drm/drm.h>
 12#include <drm/drm_gem.h>
 13#include <drm/drm_gem_cma_helper.h>
 14#include <drm/drm_prime.h>
 15#include <drm/drm_vma_manager.h>
 16
 17#include "rockchip_drm_drv.h"
 18#include "rockchip_drm_gem.h"
 19
 20static int rockchip_gem_iommu_map(struct rockchip_gem_object *rk_obj)
 21{
 22	struct drm_device *drm = rk_obj->base.dev;
 23	struct rockchip_drm_private *private = drm->dev_private;
 24	int prot = IOMMU_READ | IOMMU_WRITE;
 25	ssize_t ret;
 26
 27	mutex_lock(&private->mm_lock);
 28	ret = drm_mm_insert_node_generic(&private->mm, &rk_obj->mm,
 29					 rk_obj->base.size, PAGE_SIZE,
 30					 0, 0);
 31	mutex_unlock(&private->mm_lock);
 32
 33	if (ret < 0) {
 34		DRM_ERROR("out of I/O virtual memory: %zd\n", ret);
 35		return ret;
 36	}
 37
 38	rk_obj->dma_addr = rk_obj->mm.start;
 39
 40	ret = iommu_map_sgtable(private->domain, rk_obj->dma_addr, rk_obj->sgt,
 41				prot);
 42	if (ret < rk_obj->base.size) {
 43		DRM_ERROR("failed to map buffer: size=%zd request_size=%zd\n",
 44			  ret, rk_obj->base.size);
 45		ret = -ENOMEM;
 46		goto err_remove_node;
 47	}
 48
 49	rk_obj->size = ret;
 50
 51	return 0;
 52
 53err_remove_node:
 54	mutex_lock(&private->mm_lock);
 55	drm_mm_remove_node(&rk_obj->mm);
 56	mutex_unlock(&private->mm_lock);
 57
 58	return ret;
 59}
 60
 61static int rockchip_gem_iommu_unmap(struct rockchip_gem_object *rk_obj)
 62{
 63	struct drm_device *drm = rk_obj->base.dev;
 64	struct rockchip_drm_private *private = drm->dev_private;
 65
 66	iommu_unmap(private->domain, rk_obj->dma_addr, rk_obj->size);
 67
 68	mutex_lock(&private->mm_lock);
 69
 70	drm_mm_remove_node(&rk_obj->mm);
 71
 72	mutex_unlock(&private->mm_lock);
 73
 74	return 0;
 75}
 76
 77static int rockchip_gem_get_pages(struct rockchip_gem_object *rk_obj)
 78{
 79	struct drm_device *drm = rk_obj->base.dev;
 80	int ret, i;
 81	struct scatterlist *s;
 82
 83	rk_obj->pages = drm_gem_get_pages(&rk_obj->base);
 84	if (IS_ERR(rk_obj->pages))
 85		return PTR_ERR(rk_obj->pages);
 86
 87	rk_obj->num_pages = rk_obj->base.size >> PAGE_SHIFT;
 88
 89	rk_obj->sgt = drm_prime_pages_to_sg(rk_obj->base.dev,
 90					    rk_obj->pages, rk_obj->num_pages);
 91	if (IS_ERR(rk_obj->sgt)) {
 92		ret = PTR_ERR(rk_obj->sgt);
 93		goto err_put_pages;
 94	}
 95
 96	/*
 97	 * Fake up the SG table so that dma_sync_sg_for_device() can be used
 98	 * to flush the pages associated with it.
 99	 *
100	 * TODO: Replace this by drm_clflush_sg() once it can be implemented
101	 * without relying on symbols that are not exported.
102	 */
103	for_each_sgtable_sg(rk_obj->sgt, s, i)
104		sg_dma_address(s) = sg_phys(s);
105
106	dma_sync_sgtable_for_device(drm->dev, rk_obj->sgt, DMA_TO_DEVICE);
 
107
108	return 0;
109
110err_put_pages:
111	drm_gem_put_pages(&rk_obj->base, rk_obj->pages, false, false);
112	return ret;
113}
114
115static void rockchip_gem_put_pages(struct rockchip_gem_object *rk_obj)
116{
117	sg_free_table(rk_obj->sgt);
118	kfree(rk_obj->sgt);
119	drm_gem_put_pages(&rk_obj->base, rk_obj->pages, true, true);
120}
121
122static int rockchip_gem_alloc_iommu(struct rockchip_gem_object *rk_obj,
123				    bool alloc_kmap)
124{
125	int ret;
126
127	ret = rockchip_gem_get_pages(rk_obj);
128	if (ret < 0)
129		return ret;
130
131	ret = rockchip_gem_iommu_map(rk_obj);
132	if (ret < 0)
133		goto err_free;
134
135	if (alloc_kmap) {
136		rk_obj->kvaddr = vmap(rk_obj->pages, rk_obj->num_pages, VM_MAP,
137				      pgprot_writecombine(PAGE_KERNEL));
138		if (!rk_obj->kvaddr) {
139			DRM_ERROR("failed to vmap() buffer\n");
140			ret = -ENOMEM;
141			goto err_unmap;
142		}
143	}
144
145	return 0;
146
147err_unmap:
148	rockchip_gem_iommu_unmap(rk_obj);
149err_free:
150	rockchip_gem_put_pages(rk_obj);
151
152	return ret;
153}
154
155static int rockchip_gem_alloc_dma(struct rockchip_gem_object *rk_obj,
156				  bool alloc_kmap)
157{
158	struct drm_gem_object *obj = &rk_obj->base;
159	struct drm_device *drm = obj->dev;
160
161	rk_obj->dma_attrs = DMA_ATTR_WRITE_COMBINE;
162
163	if (!alloc_kmap)
164		rk_obj->dma_attrs |= DMA_ATTR_NO_KERNEL_MAPPING;
165
166	rk_obj->kvaddr = dma_alloc_attrs(drm->dev, obj->size,
167					 &rk_obj->dma_addr, GFP_KERNEL,
168					 rk_obj->dma_attrs);
169	if (!rk_obj->kvaddr) {
170		DRM_ERROR("failed to allocate %zu byte dma buffer", obj->size);
171		return -ENOMEM;
172	}
173
174	return 0;
175}
176
177static int rockchip_gem_alloc_buf(struct rockchip_gem_object *rk_obj,
178				  bool alloc_kmap)
179{
180	struct drm_gem_object *obj = &rk_obj->base;
181	struct drm_device *drm = obj->dev;
182	struct rockchip_drm_private *private = drm->dev_private;
183
184	if (private->domain)
185		return rockchip_gem_alloc_iommu(rk_obj, alloc_kmap);
186	else
187		return rockchip_gem_alloc_dma(rk_obj, alloc_kmap);
188}
189
190static void rockchip_gem_free_iommu(struct rockchip_gem_object *rk_obj)
191{
192	vunmap(rk_obj->kvaddr);
193	rockchip_gem_iommu_unmap(rk_obj);
194	rockchip_gem_put_pages(rk_obj);
195}
196
197static void rockchip_gem_free_dma(struct rockchip_gem_object *rk_obj)
198{
199	struct drm_gem_object *obj = &rk_obj->base;
200	struct drm_device *drm = obj->dev;
201
202	dma_free_attrs(drm->dev, obj->size, rk_obj->kvaddr, rk_obj->dma_addr,
203		       rk_obj->dma_attrs);
204}
205
206static void rockchip_gem_free_buf(struct rockchip_gem_object *rk_obj)
207{
208	if (rk_obj->pages)
209		rockchip_gem_free_iommu(rk_obj);
210	else
211		rockchip_gem_free_dma(rk_obj);
212}
213
214static int rockchip_drm_gem_object_mmap_iommu(struct drm_gem_object *obj,
215					      struct vm_area_struct *vma)
216{
217	struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
218	unsigned int count = obj->size >> PAGE_SHIFT;
219	unsigned long user_count = vma_pages(vma);
220
221	if (user_count == 0)
222		return -ENXIO;
223
224	return vm_map_pages(vma, rk_obj->pages, count);
225}
226
227static int rockchip_drm_gem_object_mmap_dma(struct drm_gem_object *obj,
228					    struct vm_area_struct *vma)
229{
230	struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
231	struct drm_device *drm = obj->dev;
232
233	return dma_mmap_attrs(drm->dev, vma, rk_obj->kvaddr, rk_obj->dma_addr,
234			      obj->size, rk_obj->dma_attrs);
235}
236
237static int rockchip_drm_gem_object_mmap(struct drm_gem_object *obj,
238					struct vm_area_struct *vma)
239{
240	int ret;
241	struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
242
243	/*
244	 * We allocated a struct page table for rk_obj, so clear
245	 * VM_PFNMAP flag that was set by drm_gem_mmap_obj()/drm_gem_mmap().
246	 */
247	vma->vm_flags &= ~VM_PFNMAP;
248
249	if (rk_obj->pages)
250		ret = rockchip_drm_gem_object_mmap_iommu(obj, vma);
251	else
252		ret = rockchip_drm_gem_object_mmap_dma(obj, vma);
253
254	if (ret)
255		drm_gem_vm_close(vma);
256
257	return ret;
258}
259
260int rockchip_gem_mmap_buf(struct drm_gem_object *obj,
261			  struct vm_area_struct *vma)
262{
263	int ret;
264
265	ret = drm_gem_mmap_obj(obj, obj->size, vma);
266	if (ret)
267		return ret;
268
269	return rockchip_drm_gem_object_mmap(obj, vma);
270}
271
272/* drm driver mmap file operations */
273int rockchip_gem_mmap(struct file *filp, struct vm_area_struct *vma)
274{
275	struct drm_gem_object *obj;
276	int ret;
277
278	ret = drm_gem_mmap(filp, vma);
279	if (ret)
280		return ret;
281
282	/*
283	 * Set vm_pgoff (used as a fake buffer offset by DRM) to 0 and map the
284	 * whole buffer from the start.
285	 */
286	vma->vm_pgoff = 0;
287
288	obj = vma->vm_private_data;
289
290	return rockchip_drm_gem_object_mmap(obj, vma);
291}
292
293static void rockchip_gem_release_object(struct rockchip_gem_object *rk_obj)
294{
295	drm_gem_object_release(&rk_obj->base);
296	kfree(rk_obj);
297}
298
299static const struct drm_gem_object_funcs rockchip_gem_object_funcs = {
300	.free = rockchip_gem_free_object,
301	.get_sg_table = rockchip_gem_prime_get_sg_table,
302	.vmap = rockchip_gem_prime_vmap,
303	.vunmap	= rockchip_gem_prime_vunmap,
304	.vm_ops = &drm_gem_cma_vm_ops,
305};
306
307static struct rockchip_gem_object *
308	rockchip_gem_alloc_object(struct drm_device *drm, unsigned int size)
309{
310	struct rockchip_gem_object *rk_obj;
311	struct drm_gem_object *obj;
312
313	size = round_up(size, PAGE_SIZE);
314
315	rk_obj = kzalloc(sizeof(*rk_obj), GFP_KERNEL);
316	if (!rk_obj)
317		return ERR_PTR(-ENOMEM);
318
319	obj = &rk_obj->base;
320
321	obj->funcs = &rockchip_gem_object_funcs;
322
323	drm_gem_object_init(drm, obj, size);
324
325	return rk_obj;
326}
327
328struct rockchip_gem_object *
329rockchip_gem_create_object(struct drm_device *drm, unsigned int size,
330			   bool alloc_kmap)
331{
332	struct rockchip_gem_object *rk_obj;
333	int ret;
334
335	rk_obj = rockchip_gem_alloc_object(drm, size);
336	if (IS_ERR(rk_obj))
337		return rk_obj;
338
339	ret = rockchip_gem_alloc_buf(rk_obj, alloc_kmap);
340	if (ret)
341		goto err_free_rk_obj;
342
343	return rk_obj;
344
345err_free_rk_obj:
346	rockchip_gem_release_object(rk_obj);
347	return ERR_PTR(ret);
348}
349
350/*
351 * rockchip_gem_free_object - (struct drm_gem_object_funcs)->free
352 * callback function
353 */
354void rockchip_gem_free_object(struct drm_gem_object *obj)
355{
356	struct drm_device *drm = obj->dev;
357	struct rockchip_drm_private *private = drm->dev_private;
358	struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
359
360	if (obj->import_attach) {
361		if (private->domain) {
362			rockchip_gem_iommu_unmap(rk_obj);
363		} else {
364			dma_unmap_sgtable(drm->dev, rk_obj->sgt,
365					  DMA_BIDIRECTIONAL, 0);
366		}
367		drm_prime_gem_destroy(obj, rk_obj->sgt);
368	} else {
369		rockchip_gem_free_buf(rk_obj);
370	}
371
372	rockchip_gem_release_object(rk_obj);
373}
374
375/*
376 * rockchip_gem_create_with_handle - allocate an object with the given
377 * size and create a gem handle on it
378 *
379 * returns a struct rockchip_gem_object* on success or ERR_PTR values
380 * on failure.
381 */
382static struct rockchip_gem_object *
383rockchip_gem_create_with_handle(struct drm_file *file_priv,
384				struct drm_device *drm, unsigned int size,
385				unsigned int *handle)
386{
387	struct rockchip_gem_object *rk_obj;
388	struct drm_gem_object *obj;
389	int ret;
390
391	rk_obj = rockchip_gem_create_object(drm, size, false);
392	if (IS_ERR(rk_obj))
393		return ERR_CAST(rk_obj);
394
395	obj = &rk_obj->base;
396
397	/*
398	 * allocate a id of idr table where the obj is registered
399	 * and handle has the id what user can see.
400	 */
401	ret = drm_gem_handle_create(file_priv, obj, handle);
402	if (ret)
403		goto err_handle_create;
404
405	/* drop reference from allocate - handle holds it now. */
406	drm_gem_object_put(obj);
407
408	return rk_obj;
409
410err_handle_create:
411	rockchip_gem_free_object(obj);
412
413	return ERR_PTR(ret);
414}
415
416/*
417 * rockchip_gem_dumb_create - (struct drm_driver)->dumb_create callback
418 * function
419 *
420 * This aligns the pitch and size arguments to the minimum required. wrap
421 * this into your own function if you need bigger alignment.
422 */
423int rockchip_gem_dumb_create(struct drm_file *file_priv,
424			     struct drm_device *dev,
425			     struct drm_mode_create_dumb *args)
426{
427	struct rockchip_gem_object *rk_obj;
428	int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
429
430	/*
431	 * align to 64 bytes since Mali requires it.
432	 */
433	args->pitch = ALIGN(min_pitch, 64);
434	args->size = args->pitch * args->height;
435
436	rk_obj = rockchip_gem_create_with_handle(file_priv, dev, args->size,
437						 &args->handle);
438
439	return PTR_ERR_OR_ZERO(rk_obj);
440}
441
442/*
443 * Allocate a sg_table for this GEM object.
444 * Note: Both the table's contents, and the sg_table itself must be freed by
445 *       the caller.
446 * Returns a pointer to the newly allocated sg_table, or an ERR_PTR() error.
447 */
448struct sg_table *rockchip_gem_prime_get_sg_table(struct drm_gem_object *obj)
449{
450	struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
451	struct drm_device *drm = obj->dev;
452	struct sg_table *sgt;
453	int ret;
454
455	if (rk_obj->pages)
456		return drm_prime_pages_to_sg(obj->dev, rk_obj->pages, rk_obj->num_pages);
457
458	sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
459	if (!sgt)
460		return ERR_PTR(-ENOMEM);
461
462	ret = dma_get_sgtable_attrs(drm->dev, sgt, rk_obj->kvaddr,
463				    rk_obj->dma_addr, obj->size,
464				    rk_obj->dma_attrs);
465	if (ret) {
466		DRM_ERROR("failed to allocate sgt, %d\n", ret);
467		kfree(sgt);
468		return ERR_PTR(ret);
469	}
470
471	return sgt;
472}
473
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
474static int
475rockchip_gem_iommu_map_sg(struct drm_device *drm,
476			  struct dma_buf_attachment *attach,
477			  struct sg_table *sg,
478			  struct rockchip_gem_object *rk_obj)
479{
480	rk_obj->sgt = sg;
481	return rockchip_gem_iommu_map(rk_obj);
482}
483
484static int
485rockchip_gem_dma_map_sg(struct drm_device *drm,
486			struct dma_buf_attachment *attach,
487			struct sg_table *sg,
488			struct rockchip_gem_object *rk_obj)
489{
490	int err = dma_map_sgtable(drm->dev, sg, DMA_BIDIRECTIONAL, 0);
491	if (err)
492		return err;
 
493
494	if (drm_prime_get_contiguous_size(sg) < attach->dmabuf->size) {
495		DRM_ERROR("failed to map sg_table to contiguous linear address.\n");
496		dma_unmap_sgtable(drm->dev, sg, DMA_BIDIRECTIONAL, 0);
 
497		return -EINVAL;
498	}
499
500	rk_obj->dma_addr = sg_dma_address(sg->sgl);
501	rk_obj->sgt = sg;
502	return 0;
503}
504
505struct drm_gem_object *
506rockchip_gem_prime_import_sg_table(struct drm_device *drm,
507				   struct dma_buf_attachment *attach,
508				   struct sg_table *sg)
509{
510	struct rockchip_drm_private *private = drm->dev_private;
511	struct rockchip_gem_object *rk_obj;
512	int ret;
513
514	rk_obj = rockchip_gem_alloc_object(drm, attach->dmabuf->size);
515	if (IS_ERR(rk_obj))
516		return ERR_CAST(rk_obj);
517
518	if (private->domain)
519		ret = rockchip_gem_iommu_map_sg(drm, attach, sg, rk_obj);
520	else
521		ret = rockchip_gem_dma_map_sg(drm, attach, sg, rk_obj);
522
523	if (ret < 0) {
524		DRM_ERROR("failed to import sg table: %d\n", ret);
525		goto err_free_rk_obj;
526	}
527
528	return &rk_obj->base;
529
530err_free_rk_obj:
531	rockchip_gem_release_object(rk_obj);
532	return ERR_PTR(ret);
533}
534
535int rockchip_gem_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map)
536{
537	struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
538
539	if (rk_obj->pages) {
540		void *vaddr = vmap(rk_obj->pages, rk_obj->num_pages, VM_MAP,
541				  pgprot_writecombine(PAGE_KERNEL));
542		if (!vaddr)
543			return -ENOMEM;
544		dma_buf_map_set_vaddr(map, vaddr);
545		return 0;
546	}
547
548	if (rk_obj->dma_attrs & DMA_ATTR_NO_KERNEL_MAPPING)
549		return -ENOMEM;
550	dma_buf_map_set_vaddr(map, rk_obj->kvaddr);
551
552	return 0;
553}
554
555void rockchip_gem_prime_vunmap(struct drm_gem_object *obj, struct dma_buf_map *map)
556{
557	struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
558
559	if (rk_obj->pages) {
560		vunmap(map->vaddr);
561		return;
562	}
563
564	/* Nothing to do if allocated by DMA mapping API. */
565}