Linux Audio

Check our new training course

Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/* exynos_drm_gem.c
  3 *
  4 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
  5 * Author: Inki Dae <inki.dae@samsung.com>
 
 
 
 
 
  6 */
  7
 
 
  8
 
  9#include <linux/dma-buf.h>
 10#include <linux/pfn_t.h>
 11#include <linux/shmem_fs.h>
 12#include <linux/module.h>
 13
 14#include <drm/drm_prime.h>
 15#include <drm/drm_vma_manager.h>
 16#include <drm/exynos_drm.h>
 17
 18#include "exynos_drm_drv.h"
 19#include "exynos_drm_gem.h"
 
 20
 21MODULE_IMPORT_NS(DMA_BUF);
 22
 23static int exynos_drm_gem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
 24
 25static int exynos_drm_alloc_buf(struct exynos_drm_gem *exynos_gem, bool kvmap)
 26{
 27	struct drm_device *dev = exynos_gem->base.dev;
 28	unsigned long attr = 0;
 
 
 
 29
 30	if (exynos_gem->dma_addr) {
 31		DRM_DEV_DEBUG_KMS(to_dma_dev(dev), "already allocated.\n");
 32		return 0;
 33	}
 34
 
 
 35	/*
 36	 * if EXYNOS_BO_CONTIG, fully physically contiguous memory
 37	 * region will be allocated else physically contiguous
 38	 * as possible.
 39	 */
 40	if (!(exynos_gem->flags & EXYNOS_BO_NONCONTIG))
 41		attr |= DMA_ATTR_FORCE_CONTIGUOUS;
 42
 43	/*
 44	 * if EXYNOS_BO_WC or EXYNOS_BO_NONCACHABLE, writecombine mapping
 45	 * else cachable mapping.
 46	 */
 47	if (exynos_gem->flags & EXYNOS_BO_WC ||
 48			!(exynos_gem->flags & EXYNOS_BO_CACHABLE))
 49		attr |= DMA_ATTR_WRITE_COMBINE;
 
 
 
 
 
 50
 51	/* FBDev emulation requires kernel mapping */
 52	if (!kvmap)
 53		attr |= DMA_ATTR_NO_KERNEL_MAPPING;
 
 
 
 
 
 54
 55	exynos_gem->dma_attrs = attr;
 56	exynos_gem->cookie = dma_alloc_attrs(to_dma_dev(dev), exynos_gem->size,
 57					     &exynos_gem->dma_addr, GFP_KERNEL,
 58					     exynos_gem->dma_attrs);
 59	if (!exynos_gem->cookie) {
 60		DRM_DEV_ERROR(to_dma_dev(dev), "failed to allocate buffer.\n");
 61		return -ENOMEM;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 62	}
 63
 64	if (kvmap)
 65		exynos_gem->kvaddr = exynos_gem->cookie;
 66
 67	DRM_DEV_DEBUG_KMS(to_dma_dev(dev), "dma_addr(0x%lx), size(0x%lx)\n",
 68			(unsigned long)exynos_gem->dma_addr, exynos_gem->size);
 
 69	return 0;
 
 
 
 
 
 
 
 
 
 
 70}
 71
 72static void exynos_drm_free_buf(struct exynos_drm_gem *exynos_gem)
 73{
 74	struct drm_device *dev = exynos_gem->base.dev;
 75
 76	if (!exynos_gem->dma_addr) {
 77		DRM_DEV_DEBUG_KMS(dev->dev, "dma_addr is invalid.\n");
 78		return;
 79	}
 80
 81	DRM_DEV_DEBUG_KMS(dev->dev, "dma_addr(0x%lx), size(0x%lx)\n",
 82			(unsigned long)exynos_gem->dma_addr, exynos_gem->size);
 83
 84	dma_free_attrs(to_dma_dev(dev), exynos_gem->size, exynos_gem->cookie,
 85			(dma_addr_t)exynos_gem->dma_addr,
 86			exynos_gem->dma_attrs);
 
 
 87}
 88
 89static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,
 90					struct drm_file *file_priv,
 91					unsigned int *handle)
 92{
 93	int ret;
 94
 95	/*
 96	 * allocate a id of idr table where the obj is registered
 97	 * and handle has the id what user can see.
 98	 */
 99	ret = drm_gem_handle_create(file_priv, obj, handle);
100	if (ret)
101		return ret;
102
103	DRM_DEV_DEBUG_KMS(to_dma_dev(obj->dev), "gem handle = 0x%x\n", *handle);
104
105	/* drop reference from allocate - handle holds it now. */
106	drm_gem_object_put(obj);
107
108	return 0;
109}
110
111void exynos_drm_gem_destroy(struct exynos_drm_gem *exynos_gem)
112{
113	struct drm_gem_object *obj = &exynos_gem->base;
114
115	DRM_DEV_DEBUG_KMS(to_dma_dev(obj->dev), "handle count = %d\n",
116			  obj->handle_count);
117
118	/*
119	 * do not release memory region from exporter.
120	 *
121	 * the region will be released by exporter
122	 * once dmabuf's refcount becomes 0.
123	 */
124	if (obj->import_attach)
125		drm_prime_gem_destroy(obj, exynos_gem->sgt);
126	else
127		exynos_drm_free_buf(exynos_gem);
128
129	/* release file pointer to gem object. */
130	drm_gem_object_release(obj);
131
132	kfree(exynos_gem);
133}
134
135static const struct vm_operations_struct exynos_drm_gem_vm_ops = {
136	.open = drm_gem_vm_open,
137	.close = drm_gem_vm_close,
138};
139
140static const struct drm_gem_object_funcs exynos_drm_gem_object_funcs = {
141	.free = exynos_drm_gem_free_object,
142	.get_sg_table = exynos_drm_gem_prime_get_sg_table,
143	.mmap = exynos_drm_gem_mmap,
144	.vm_ops = &exynos_drm_gem_vm_ops,
145};
 
 
 
 
 
 
 
 
146
147static struct exynos_drm_gem *exynos_drm_gem_init(struct drm_device *dev,
148						  unsigned long size)
149{
150	struct exynos_drm_gem *exynos_gem;
151	struct drm_gem_object *obj;
152	int ret;
153
154	exynos_gem = kzalloc(sizeof(*exynos_gem), GFP_KERNEL);
155	if (!exynos_gem)
156		return ERR_PTR(-ENOMEM);
157
158	exynos_gem->size = size;
159	obj = &exynos_gem->base;
160
161	obj->funcs = &exynos_drm_gem_object_funcs;
162
163	ret = drm_gem_object_init(dev, obj, size);
164	if (ret < 0) {
165		DRM_DEV_ERROR(dev->dev, "failed to initialize gem object\n");
166		kfree(exynos_gem);
167		return ERR_PTR(ret);
168	}
169
170	ret = drm_gem_create_mmap_offset(obj);
171	if (ret < 0) {
172		drm_gem_object_release(obj);
173		kfree(exynos_gem);
174		return ERR_PTR(ret);
175	}
176
177	DRM_DEV_DEBUG_KMS(dev->dev, "created file object = %pK\n", obj->filp);
178
179	return exynos_gem;
180}
181
182struct exynos_drm_gem *exynos_drm_gem_create(struct drm_device *dev,
183					     unsigned int flags,
184					     unsigned long size,
185					     bool kvmap)
186{
187	struct exynos_drm_gem *exynos_gem;
188	int ret;
189
190	if (flags & ~(EXYNOS_BO_MASK)) {
191		DRM_DEV_ERROR(dev->dev,
192			      "invalid GEM buffer flags: %u\n", flags);
193		return ERR_PTR(-EINVAL);
194	}
195
196	if (!size) {
197		DRM_DEV_ERROR(dev->dev, "invalid GEM buffer size: %lu\n", size);
198		return ERR_PTR(-EINVAL);
199	}
200
201	size = roundup(size, PAGE_SIZE);
202
203	exynos_gem = exynos_drm_gem_init(dev, size);
204	if (IS_ERR(exynos_gem))
205		return exynos_gem;
206
207	if (!is_drm_iommu_supported(dev) && (flags & EXYNOS_BO_NONCONTIG)) {
208		/*
209		 * when no IOMMU is available, all allocated buffers are
210		 * contiguous anyway, so drop EXYNOS_BO_NONCONTIG flag
211		 */
212		flags &= ~EXYNOS_BO_NONCONTIG;
213		DRM_WARN("Non-contiguous allocation is not supported without IOMMU, falling back to contiguous buffer\n");
214	}
215
216	/* set memory type and cache attribute from user side. */
217	exynos_gem->flags = flags;
218
219	ret = exynos_drm_alloc_buf(exynos_gem, kvmap);
220	if (ret < 0) {
221		drm_gem_object_release(&exynos_gem->base);
222		kfree(exynos_gem);
223		return ERR_PTR(ret);
224	}
225
226	return exynos_gem;
227}
228
229int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
230				struct drm_file *file_priv)
231{
232	struct drm_exynos_gem_create *args = data;
233	struct exynos_drm_gem *exynos_gem;
234	int ret;
235
236	exynos_gem = exynos_drm_gem_create(dev, args->flags, args->size, false);
237	if (IS_ERR(exynos_gem))
238		return PTR_ERR(exynos_gem);
239
240	ret = exynos_drm_gem_handle_create(&exynos_gem->base, file_priv,
241					   &args->handle);
242	if (ret) {
243		exynos_drm_gem_destroy(exynos_gem);
244		return ret;
245	}
246
247	return 0;
248}
249
250int exynos_drm_gem_map_ioctl(struct drm_device *dev, void *data,
251			     struct drm_file *file_priv)
252{
253	struct drm_exynos_gem_map *args = data;
254
255	return drm_gem_dumb_map_offset(file_priv, dev, args->handle,
256				       &args->offset);
257}
258
259struct exynos_drm_gem *exynos_drm_gem_get(struct drm_file *filp,
260					  unsigned int gem_handle)
 
261{
 
262	struct drm_gem_object *obj;
263
264	obj = drm_gem_object_lookup(filp, gem_handle);
265	if (!obj)
266		return NULL;
267	return to_exynos_gem(obj);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
268}
269
270static int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem *exynos_gem,
271				      struct vm_area_struct *vma)
272{
273	struct drm_device *drm_dev = exynos_gem->base.dev;
274	unsigned long vm_size;
275	int ret;
276
277	vm_flags_clear(vma, VM_PFNMAP);
278	vma->vm_pgoff = 0;
279
280	vm_size = vma->vm_end - vma->vm_start;
281
282	/* check if user-requested size is valid. */
283	if (vm_size > exynos_gem->size)
284		return -EINVAL;
285
286	ret = dma_mmap_attrs(to_dma_dev(drm_dev), vma, exynos_gem->cookie,
287			     exynos_gem->dma_addr, exynos_gem->size,
288			     exynos_gem->dma_attrs);
289	if (ret < 0) {
290		DRM_ERROR("failed to mmap.\n");
291		return ret;
292	}
293
294	return 0;
295}
296
297int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
298				      struct drm_file *file_priv)
299{
300	struct exynos_drm_gem *exynos_gem;
301	struct drm_exynos_gem_info *args = data;
302	struct drm_gem_object *obj;
303
304	obj = drm_gem_object_lookup(file_priv, args->handle);
305	if (!obj) {
306		DRM_DEV_ERROR(dev->dev, "failed to lookup gem object.\n");
307		return -EINVAL;
308	}
309
310	exynos_gem = to_exynos_gem(obj);
311
312	args->flags = exynos_gem->flags;
313	args->size = exynos_gem->size;
314
315	drm_gem_object_put(obj);
316
317	return 0;
318}
319
320void exynos_drm_gem_free_object(struct drm_gem_object *obj)
321{
322	exynos_drm_gem_destroy(to_exynos_gem(obj));
323}
324
325int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
326			       struct drm_device *dev,
327			       struct drm_mode_create_dumb *args)
328{
329	struct exynos_drm_gem *exynos_gem;
330	unsigned int flags;
331	int ret;
332
333	/*
334	 * allocate memory to be used for framebuffer.
335	 * - this callback would be called by user application
336	 *	with DRM_IOCTL_MODE_CREATE_DUMB command.
337	 */
338
339	args->pitch = args->width * ((args->bpp + 7) / 8);
340	args->size = args->pitch * args->height;
341
342	if (is_drm_iommu_supported(dev))
343		flags = EXYNOS_BO_NONCONTIG | EXYNOS_BO_WC;
344	else
345		flags = EXYNOS_BO_CONTIG | EXYNOS_BO_WC;
346
347	exynos_gem = exynos_drm_gem_create(dev, flags, args->size, false);
348	if (IS_ERR(exynos_gem)) {
349		dev_warn(dev->dev, "FB allocation failed.\n");
350		return PTR_ERR(exynos_gem);
351	}
352
353	ret = exynos_drm_gem_handle_create(&exynos_gem->base, file_priv,
354					   &args->handle);
355	if (ret) {
356		exynos_drm_gem_destroy(exynos_gem);
357		return ret;
358	}
359
360	return 0;
361}
362
363static int exynos_drm_gem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
364{
 
 
365	struct exynos_drm_gem *exynos_gem = to_exynos_gem(obj);
 
 
366	int ret;
367
368	if (obj->import_attach)
369		return dma_buf_mmap(obj->dma_buf, vma, 0);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
370
371	vm_flags_set(vma, VM_IO | VM_DONTEXPAND | VM_DONTDUMP);
 
 
 
 
372
373	DRM_DEV_DEBUG_KMS(to_dma_dev(obj->dev), "flags = 0x%x\n",
374			  exynos_gem->flags);
375
376	/* non-cachable as default. */
377	if (exynos_gem->flags & EXYNOS_BO_CACHABLE)
378		vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
379	else if (exynos_gem->flags & EXYNOS_BO_WC)
380		vma->vm_page_prot =
381			pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
382	else
383		vma->vm_page_prot =
384			pgprot_noncached(vm_get_page_prot(vma->vm_flags));
385
386	ret = exynos_drm_gem_mmap_buffer(exynos_gem, vma);
387	if (ret)
388		goto err_close_vm;
389
390	return ret;
391
392err_close_vm:
393	drm_gem_vm_close(vma);
394
395	return ret;
396}
397
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
398/* low-level interface prime helpers */
399struct drm_gem_object *exynos_drm_gem_prime_import(struct drm_device *dev,
400					    struct dma_buf *dma_buf)
401{
402	return drm_gem_prime_import_dev(dev, dma_buf, to_dma_dev(dev));
403}
404
405struct sg_table *exynos_drm_gem_prime_get_sg_table(struct drm_gem_object *obj)
406{
407	struct exynos_drm_gem *exynos_gem = to_exynos_gem(obj);
408	struct drm_device *drm_dev = obj->dev;
409	struct sg_table *sgt;
410	int ret;
411
412	sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
413	if (!sgt)
414		return ERR_PTR(-ENOMEM);
415
416	ret = dma_get_sgtable_attrs(to_dma_dev(drm_dev), sgt, exynos_gem->cookie,
417				    exynos_gem->dma_addr, exynos_gem->size,
418				    exynos_gem->dma_attrs);
419	if (ret) {
420		DRM_ERROR("failed to get sgtable, %d\n", ret);
421		kfree(sgt);
422		return ERR_PTR(ret);
423	}
424
425	return sgt;
426}
427
428struct drm_gem_object *
429exynos_drm_gem_prime_import_sg_table(struct drm_device *dev,
430				     struct dma_buf_attachment *attach,
431				     struct sg_table *sgt)
432{
433	struct exynos_drm_gem *exynos_gem;
 
 
434
435	/* check if the entries in the sg_table are contiguous */
436	if (drm_prime_get_contiguous_size(sgt) < attach->dmabuf->size) {
437		DRM_ERROR("buffer chunks must be mapped contiguously");
438		return ERR_PTR(-EINVAL);
439	}
440
441	exynos_gem = exynos_drm_gem_init(dev, attach->dmabuf->size);
442	if (IS_ERR(exynos_gem))
443		return ERR_CAST(exynos_gem);
444
445	/*
446	 * Buffer has been mapped as contiguous into DMA address space,
447	 * but if there is IOMMU, it can be either CONTIG or NONCONTIG.
448	 * We assume a simplified logic below:
449	 */
450	if (is_drm_iommu_supported(dev))
451		exynos_gem->flags |= EXYNOS_BO_NONCONTIG;
452	else
453		exynos_gem->flags |= EXYNOS_BO_CONTIG;
 
 
454
455	exynos_gem->dma_addr = sg_dma_address(sgt->sgl);
456	exynos_gem->sgt = sgt;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
457	return &exynos_gem->base;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
458}
v4.17
 
  1/* exynos_drm_gem.c
  2 *
  3 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
  4 * Author: Inki Dae <inki.dae@samsung.com>
  5 *
  6 * This program is free software; you can redistribute  it and/or modify it
  7 * under  the terms of  the GNU General  Public License as published by the
  8 * Free Software Foundation;  either version 2 of the  License, or (at your
  9 * option) any later version.
 10 */
 11
 12#include <drm/drmP.h>
 13#include <drm/drm_vma_manager.h>
 14
 15#include <linux/shmem_fs.h>
 16#include <linux/dma-buf.h>
 17#include <linux/pfn_t.h>
 
 
 
 
 
 18#include <drm/exynos_drm.h>
 19
 20#include "exynos_drm_drv.h"
 21#include "exynos_drm_gem.h"
 22#include "exynos_drm_iommu.h"
 23
 24static int exynos_drm_alloc_buf(struct exynos_drm_gem *exynos_gem)
 
 
 
 
 25{
 26	struct drm_device *dev = exynos_gem->base.dev;
 27	unsigned long attr;
 28	unsigned int nr_pages;
 29	struct sg_table sgt;
 30	int ret = -ENOMEM;
 31
 32	if (exynos_gem->dma_addr) {
 33		DRM_DEBUG_KMS("already allocated.\n");
 34		return 0;
 35	}
 36
 37	exynos_gem->dma_attrs = 0;
 38
 39	/*
 40	 * if EXYNOS_BO_CONTIG, fully physically contiguous memory
 41	 * region will be allocated else physically contiguous
 42	 * as possible.
 43	 */
 44	if (!(exynos_gem->flags & EXYNOS_BO_NONCONTIG))
 45		exynos_gem->dma_attrs |= DMA_ATTR_FORCE_CONTIGUOUS;
 46
 47	/*
 48	 * if EXYNOS_BO_WC or EXYNOS_BO_NONCACHABLE, writecombine mapping
 49	 * else cachable mapping.
 50	 */
 51	if (exynos_gem->flags & EXYNOS_BO_WC ||
 52			!(exynos_gem->flags & EXYNOS_BO_CACHABLE))
 53		attr = DMA_ATTR_WRITE_COMBINE;
 54	else
 55		attr = DMA_ATTR_NON_CONSISTENT;
 56
 57	exynos_gem->dma_attrs |= attr;
 58	exynos_gem->dma_attrs |= DMA_ATTR_NO_KERNEL_MAPPING;
 59
 60	nr_pages = exynos_gem->size >> PAGE_SHIFT;
 61
 62	exynos_gem->pages = kvmalloc_array(nr_pages, sizeof(struct page *),
 63			GFP_KERNEL | __GFP_ZERO);
 64	if (!exynos_gem->pages) {
 65		DRM_ERROR("failed to allocate pages.\n");
 66		return -ENOMEM;
 67	}
 68
 
 69	exynos_gem->cookie = dma_alloc_attrs(to_dma_dev(dev), exynos_gem->size,
 70					     &exynos_gem->dma_addr, GFP_KERNEL,
 71					     exynos_gem->dma_attrs);
 72	if (!exynos_gem->cookie) {
 73		DRM_ERROR("failed to allocate buffer.\n");
 74		goto err_free;
 75	}
 76
 77	ret = dma_get_sgtable_attrs(to_dma_dev(dev), &sgt, exynos_gem->cookie,
 78				    exynos_gem->dma_addr, exynos_gem->size,
 79				    exynos_gem->dma_attrs);
 80	if (ret < 0) {
 81		DRM_ERROR("failed to get sgtable.\n");
 82		goto err_dma_free;
 83	}
 84
 85	if (drm_prime_sg_to_page_addr_arrays(&sgt, exynos_gem->pages, NULL,
 86					     nr_pages)) {
 87		DRM_ERROR("invalid sgtable.\n");
 88		ret = -EINVAL;
 89		goto err_sgt_free;
 90	}
 91
 92	sg_free_table(&sgt);
 
 93
 94	DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
 95			(unsigned long)exynos_gem->dma_addr, exynos_gem->size);
 96
 97	return 0;
 98
 99err_sgt_free:
100	sg_free_table(&sgt);
101err_dma_free:
102	dma_free_attrs(to_dma_dev(dev), exynos_gem->size, exynos_gem->cookie,
103		       exynos_gem->dma_addr, exynos_gem->dma_attrs);
104err_free:
105	kvfree(exynos_gem->pages);
106
107	return ret;
108}
109
110static void exynos_drm_free_buf(struct exynos_drm_gem *exynos_gem)
111{
112	struct drm_device *dev = exynos_gem->base.dev;
113
114	if (!exynos_gem->dma_addr) {
115		DRM_DEBUG_KMS("dma_addr is invalid.\n");
116		return;
117	}
118
119	DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
120			(unsigned long)exynos_gem->dma_addr, exynos_gem->size);
121
122	dma_free_attrs(to_dma_dev(dev), exynos_gem->size, exynos_gem->cookie,
123			(dma_addr_t)exynos_gem->dma_addr,
124			exynos_gem->dma_attrs);
125
126	kvfree(exynos_gem->pages);
127}
128
129static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,
130					struct drm_file *file_priv,
131					unsigned int *handle)
132{
133	int ret;
134
135	/*
136	 * allocate a id of idr table where the obj is registered
137	 * and handle has the id what user can see.
138	 */
139	ret = drm_gem_handle_create(file_priv, obj, handle);
140	if (ret)
141		return ret;
142
143	DRM_DEBUG_KMS("gem handle = 0x%x\n", *handle);
144
145	/* drop reference from allocate - handle holds it now. */
146	drm_gem_object_unreference_unlocked(obj);
147
148	return 0;
149}
150
151void exynos_drm_gem_destroy(struct exynos_drm_gem *exynos_gem)
152{
153	struct drm_gem_object *obj = &exynos_gem->base;
154
155	DRM_DEBUG_KMS("handle count = %d\n", obj->handle_count);
 
156
157	/*
158	 * do not release memory region from exporter.
159	 *
160	 * the region will be released by exporter
161	 * once dmabuf's refcount becomes 0.
162	 */
163	if (obj->import_attach)
164		drm_prime_gem_destroy(obj, exynos_gem->sgt);
165	else
166		exynos_drm_free_buf(exynos_gem);
167
168	/* release file pointer to gem object. */
169	drm_gem_object_release(obj);
170
171	kfree(exynos_gem);
172}
173
174unsigned long exynos_drm_gem_get_size(struct drm_device *dev,
175						unsigned int gem_handle,
176						struct drm_file *file_priv)
177{
178	struct exynos_drm_gem *exynos_gem;
179	struct drm_gem_object *obj;
180
181	obj = drm_gem_object_lookup(file_priv, gem_handle);
182	if (!obj) {
183		DRM_ERROR("failed to lookup gem object.\n");
184		return 0;
185	}
186
187	exynos_gem = to_exynos_gem(obj);
188
189	drm_gem_object_unreference_unlocked(obj);
190
191	return exynos_gem->size;
192}
193
194static struct exynos_drm_gem *exynos_drm_gem_init(struct drm_device *dev,
195						  unsigned long size)
196{
197	struct exynos_drm_gem *exynos_gem;
198	struct drm_gem_object *obj;
199	int ret;
200
201	exynos_gem = kzalloc(sizeof(*exynos_gem), GFP_KERNEL);
202	if (!exynos_gem)
203		return ERR_PTR(-ENOMEM);
204
205	exynos_gem->size = size;
206	obj = &exynos_gem->base;
207
 
 
208	ret = drm_gem_object_init(dev, obj, size);
209	if (ret < 0) {
210		DRM_ERROR("failed to initialize gem object\n");
211		kfree(exynos_gem);
212		return ERR_PTR(ret);
213	}
214
215	ret = drm_gem_create_mmap_offset(obj);
216	if (ret < 0) {
217		drm_gem_object_release(obj);
218		kfree(exynos_gem);
219		return ERR_PTR(ret);
220	}
221
222	DRM_DEBUG_KMS("created file object = %pK\n", obj->filp);
223
224	return exynos_gem;
225}
226
227struct exynos_drm_gem *exynos_drm_gem_create(struct drm_device *dev,
228					     unsigned int flags,
229					     unsigned long size)
 
230{
231	struct exynos_drm_gem *exynos_gem;
232	int ret;
233
234	if (flags & ~(EXYNOS_BO_MASK)) {
235		DRM_ERROR("invalid GEM buffer flags: %u\n", flags);
 
236		return ERR_PTR(-EINVAL);
237	}
238
239	if (!size) {
240		DRM_ERROR("invalid GEM buffer size: %lu\n", size);
241		return ERR_PTR(-EINVAL);
242	}
243
244	size = roundup(size, PAGE_SIZE);
245
246	exynos_gem = exynos_drm_gem_init(dev, size);
247	if (IS_ERR(exynos_gem))
248		return exynos_gem;
249
250	if (!is_drm_iommu_supported(dev) && (flags & EXYNOS_BO_NONCONTIG)) {
251		/*
252		 * when no IOMMU is available, all allocated buffers are
253		 * contiguous anyway, so drop EXYNOS_BO_NONCONTIG flag
254		 */
255		flags &= ~EXYNOS_BO_NONCONTIG;
256		DRM_WARN("Non-contiguous allocation is not supported without IOMMU, falling back to contiguous buffer\n");
257	}
258
259	/* set memory type and cache attribute from user side. */
260	exynos_gem->flags = flags;
261
262	ret = exynos_drm_alloc_buf(exynos_gem);
263	if (ret < 0) {
264		drm_gem_object_release(&exynos_gem->base);
265		kfree(exynos_gem);
266		return ERR_PTR(ret);
267	}
268
269	return exynos_gem;
270}
271
272int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
273				struct drm_file *file_priv)
274{
275	struct drm_exynos_gem_create *args = data;
276	struct exynos_drm_gem *exynos_gem;
277	int ret;
278
279	exynos_gem = exynos_drm_gem_create(dev, args->flags, args->size);
280	if (IS_ERR(exynos_gem))
281		return PTR_ERR(exynos_gem);
282
283	ret = exynos_drm_gem_handle_create(&exynos_gem->base, file_priv,
284					   &args->handle);
285	if (ret) {
286		exynos_drm_gem_destroy(exynos_gem);
287		return ret;
288	}
289
290	return 0;
291}
292
293int exynos_drm_gem_map_ioctl(struct drm_device *dev, void *data,
294			     struct drm_file *file_priv)
295{
296	struct drm_exynos_gem_map *args = data;
297
298	return drm_gem_dumb_map_offset(file_priv, dev, args->handle,
299				       &args->offset);
300}
301
302dma_addr_t *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
303					unsigned int gem_handle,
304					struct drm_file *filp)
305{
306	struct exynos_drm_gem *exynos_gem;
307	struct drm_gem_object *obj;
308
309	obj = drm_gem_object_lookup(filp, gem_handle);
310	if (!obj) {
311		DRM_ERROR("failed to lookup gem object.\n");
312		return ERR_PTR(-EINVAL);
313	}
314
315	exynos_gem = to_exynos_gem(obj);
316
317	return &exynos_gem->dma_addr;
318}
319
320void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
321					unsigned int gem_handle,
322					struct drm_file *filp)
323{
324	struct drm_gem_object *obj;
325
326	obj = drm_gem_object_lookup(filp, gem_handle);
327	if (!obj) {
328		DRM_ERROR("failed to lookup gem object.\n");
329		return;
330	}
331
332	drm_gem_object_unreference_unlocked(obj);
333
334	/*
335	 * decrease obj->refcount one more time because we has already
336	 * increased it at exynos_drm_gem_get_dma_addr().
337	 */
338	drm_gem_object_unreference_unlocked(obj);
339}
340
341static int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem *exynos_gem,
342				      struct vm_area_struct *vma)
343{
344	struct drm_device *drm_dev = exynos_gem->base.dev;
345	unsigned long vm_size;
346	int ret;
347
348	vma->vm_flags &= ~VM_PFNMAP;
349	vma->vm_pgoff = 0;
350
351	vm_size = vma->vm_end - vma->vm_start;
352
353	/* check if user-requested size is valid. */
354	if (vm_size > exynos_gem->size)
355		return -EINVAL;
356
357	ret = dma_mmap_attrs(to_dma_dev(drm_dev), vma, exynos_gem->cookie,
358			     exynos_gem->dma_addr, exynos_gem->size,
359			     exynos_gem->dma_attrs);
360	if (ret < 0) {
361		DRM_ERROR("failed to mmap.\n");
362		return ret;
363	}
364
365	return 0;
366}
367
368int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
369				      struct drm_file *file_priv)
370{
371	struct exynos_drm_gem *exynos_gem;
372	struct drm_exynos_gem_info *args = data;
373	struct drm_gem_object *obj;
374
375	obj = drm_gem_object_lookup(file_priv, args->handle);
376	if (!obj) {
377		DRM_ERROR("failed to lookup gem object.\n");
378		return -EINVAL;
379	}
380
381	exynos_gem = to_exynos_gem(obj);
382
383	args->flags = exynos_gem->flags;
384	args->size = exynos_gem->size;
385
386	drm_gem_object_unreference_unlocked(obj);
387
388	return 0;
389}
390
391void exynos_drm_gem_free_object(struct drm_gem_object *obj)
392{
393	exynos_drm_gem_destroy(to_exynos_gem(obj));
394}
395
396int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
397			       struct drm_device *dev,
398			       struct drm_mode_create_dumb *args)
399{
400	struct exynos_drm_gem *exynos_gem;
401	unsigned int flags;
402	int ret;
403
404	/*
405	 * allocate memory to be used for framebuffer.
406	 * - this callback would be called by user application
407	 *	with DRM_IOCTL_MODE_CREATE_DUMB command.
408	 */
409
410	args->pitch = args->width * ((args->bpp + 7) / 8);
411	args->size = args->pitch * args->height;
412
413	if (is_drm_iommu_supported(dev))
414		flags = EXYNOS_BO_NONCONTIG | EXYNOS_BO_WC;
415	else
416		flags = EXYNOS_BO_CONTIG | EXYNOS_BO_WC;
417
418	exynos_gem = exynos_drm_gem_create(dev, flags, args->size);
419	if (IS_ERR(exynos_gem)) {
420		dev_warn(dev->dev, "FB allocation failed.\n");
421		return PTR_ERR(exynos_gem);
422	}
423
424	ret = exynos_drm_gem_handle_create(&exynos_gem->base, file_priv,
425					   &args->handle);
426	if (ret) {
427		exynos_drm_gem_destroy(exynos_gem);
428		return ret;
429	}
430
431	return 0;
432}
433
434int exynos_drm_gem_fault(struct vm_fault *vmf)
435{
436	struct vm_area_struct *vma = vmf->vma;
437	struct drm_gem_object *obj = vma->vm_private_data;
438	struct exynos_drm_gem *exynos_gem = to_exynos_gem(obj);
439	unsigned long pfn;
440	pgoff_t page_offset;
441	int ret;
442
443	page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
444
445	if (page_offset >= (exynos_gem->size >> PAGE_SHIFT)) {
446		DRM_ERROR("invalid page offset\n");
447		ret = -EINVAL;
448		goto out;
449	}
450
451	pfn = page_to_pfn(exynos_gem->pages[page_offset]);
452	ret = vm_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV));
453
454out:
455	switch (ret) {
456	case 0:
457	case -ERESTARTSYS:
458	case -EINTR:
459		return VM_FAULT_NOPAGE;
460	case -ENOMEM:
461		return VM_FAULT_OOM;
462	default:
463		return VM_FAULT_SIGBUS;
464	}
465}
466
467static int exynos_drm_gem_mmap_obj(struct drm_gem_object *obj,
468				   struct vm_area_struct *vma)
469{
470	struct exynos_drm_gem *exynos_gem = to_exynos_gem(obj);
471	int ret;
472
473	DRM_DEBUG_KMS("flags = 0x%x\n", exynos_gem->flags);
 
474
475	/* non-cachable as default. */
476	if (exynos_gem->flags & EXYNOS_BO_CACHABLE)
477		vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
478	else if (exynos_gem->flags & EXYNOS_BO_WC)
479		vma->vm_page_prot =
480			pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
481	else
482		vma->vm_page_prot =
483			pgprot_noncached(vm_get_page_prot(vma->vm_flags));
484
485	ret = exynos_drm_gem_mmap_buffer(exynos_gem, vma);
486	if (ret)
487		goto err_close_vm;
488
489	return ret;
490
491err_close_vm:
492	drm_gem_vm_close(vma);
493
494	return ret;
495}
496
497int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
498{
499	struct drm_gem_object *obj;
500	int ret;
501
502	/* set vm_area_struct. */
503	ret = drm_gem_mmap(filp, vma);
504	if (ret < 0) {
505		DRM_ERROR("failed to mmap.\n");
506		return ret;
507	}
508
509	obj = vma->vm_private_data;
510
511	if (obj->import_attach)
512		return dma_buf_mmap(obj->dma_buf, vma, 0);
513
514	return exynos_drm_gem_mmap_obj(obj, vma);
515}
516
517/* low-level interface prime helpers */
518struct drm_gem_object *exynos_drm_gem_prime_import(struct drm_device *dev,
519					    struct dma_buf *dma_buf)
520{
521	return drm_gem_prime_import_dev(dev, dma_buf, to_dma_dev(dev));
522}
523
524struct sg_table *exynos_drm_gem_prime_get_sg_table(struct drm_gem_object *obj)
525{
526	struct exynos_drm_gem *exynos_gem = to_exynos_gem(obj);
527	int npages;
 
 
528
529	npages = exynos_gem->size >> PAGE_SHIFT;
 
 
530
531	return drm_prime_pages_to_sg(exynos_gem->pages, npages);
 
 
 
 
 
 
 
 
 
532}
533
534struct drm_gem_object *
535exynos_drm_gem_prime_import_sg_table(struct drm_device *dev,
536				     struct dma_buf_attachment *attach,
537				     struct sg_table *sgt)
538{
539	struct exynos_drm_gem *exynos_gem;
540	int npages;
541	int ret;
542
543	exynos_gem = exynos_drm_gem_init(dev, attach->dmabuf->size);
544	if (IS_ERR(exynos_gem)) {
545		ret = PTR_ERR(exynos_gem);
546		return ERR_PTR(ret);
547	}
548
549	exynos_gem->dma_addr = sg_dma_address(sgt->sgl);
 
 
550
551	npages = exynos_gem->size >> PAGE_SHIFT;
552	exynos_gem->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
553	if (!exynos_gem->pages) {
554		ret = -ENOMEM;
555		goto err;
556	}
557
558	ret = drm_prime_sg_to_page_addr_arrays(sgt, exynos_gem->pages, NULL,
559					       npages);
560	if (ret < 0)
561		goto err_free_large;
562
 
563	exynos_gem->sgt = sgt;
564
565	if (sgt->nents == 1) {
566		/* always physically continuous memory if sgt->nents is 1. */
567		exynos_gem->flags |= EXYNOS_BO_CONTIG;
568	} else {
569		/*
570		 * this case could be CONTIG or NONCONTIG type but for now
571		 * sets NONCONTIG.
572		 * TODO. we have to find a way that exporter can notify
573		 * the type of its own buffer to importer.
574		 */
575		exynos_gem->flags |= EXYNOS_BO_NONCONTIG;
576	}
577
578	return &exynos_gem->base;
579
580err_free_large:
581	kvfree(exynos_gem->pages);
582err:
583	drm_gem_object_release(&exynos_gem->base);
584	kfree(exynos_gem);
585	return ERR_PTR(ret);
586}
587
588void *exynos_drm_gem_prime_vmap(struct drm_gem_object *obj)
589{
590	return NULL;
591}
592
593void exynos_drm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
594{
595	/* Nothing to do */
596}
597
598int exynos_drm_gem_prime_mmap(struct drm_gem_object *obj,
599			      struct vm_area_struct *vma)
600{
601	int ret;
602
603	ret = drm_gem_mmap_obj(obj, obj->size, vma);
604	if (ret < 0)
605		return ret;
606
607	return exynos_drm_gem_mmap_obj(obj, vma);
608}