Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/* exynos_drm_gem.c
3 *
4 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
5 * Author: Inki Dae <inki.dae@samsung.com>
6 */
7
8
9#include <linux/dma-buf.h>
10#include <linux/pfn_t.h>
11#include <linux/shmem_fs.h>
12
13#include <drm/drm_prime.h>
14#include <drm/drm_vma_manager.h>
15#include <drm/exynos_drm.h>
16
17#include "exynos_drm_drv.h"
18#include "exynos_drm_gem.h"
19
20static int exynos_drm_alloc_buf(struct exynos_drm_gem *exynos_gem)
21{
22 struct drm_device *dev = exynos_gem->base.dev;
23 unsigned long attr;
24 unsigned int nr_pages;
25 struct sg_table sgt;
26 int ret = -ENOMEM;
27
28 if (exynos_gem->dma_addr) {
29 DRM_DEV_DEBUG_KMS(to_dma_dev(dev), "already allocated.\n");
30 return 0;
31 }
32
33 exynos_gem->dma_attrs = 0;
34
35 /*
36 * if EXYNOS_BO_CONTIG, fully physically contiguous memory
37 * region will be allocated else physically contiguous
38 * as possible.
39 */
40 if (!(exynos_gem->flags & EXYNOS_BO_NONCONTIG))
41 exynos_gem->dma_attrs |= DMA_ATTR_FORCE_CONTIGUOUS;
42
43 /*
44 * if EXYNOS_BO_WC or EXYNOS_BO_NONCACHABLE, writecombine mapping
45 * else cachable mapping.
46 */
47 if (exynos_gem->flags & EXYNOS_BO_WC ||
48 !(exynos_gem->flags & EXYNOS_BO_CACHABLE))
49 attr = DMA_ATTR_WRITE_COMBINE;
50 else
51 attr = DMA_ATTR_NON_CONSISTENT;
52
53 exynos_gem->dma_attrs |= attr;
54 exynos_gem->dma_attrs |= DMA_ATTR_NO_KERNEL_MAPPING;
55
56 nr_pages = exynos_gem->size >> PAGE_SHIFT;
57
58 exynos_gem->pages = kvmalloc_array(nr_pages, sizeof(struct page *),
59 GFP_KERNEL | __GFP_ZERO);
60 if (!exynos_gem->pages) {
61 DRM_DEV_ERROR(to_dma_dev(dev), "failed to allocate pages.\n");
62 return -ENOMEM;
63 }
64
65 exynos_gem->cookie = dma_alloc_attrs(to_dma_dev(dev), exynos_gem->size,
66 &exynos_gem->dma_addr, GFP_KERNEL,
67 exynos_gem->dma_attrs);
68 if (!exynos_gem->cookie) {
69 DRM_DEV_ERROR(to_dma_dev(dev), "failed to allocate buffer.\n");
70 goto err_free;
71 }
72
73 ret = dma_get_sgtable_attrs(to_dma_dev(dev), &sgt, exynos_gem->cookie,
74 exynos_gem->dma_addr, exynos_gem->size,
75 exynos_gem->dma_attrs);
76 if (ret < 0) {
77 DRM_DEV_ERROR(to_dma_dev(dev), "failed to get sgtable.\n");
78 goto err_dma_free;
79 }
80
81 if (drm_prime_sg_to_page_addr_arrays(&sgt, exynos_gem->pages, NULL,
82 nr_pages)) {
83 DRM_DEV_ERROR(to_dma_dev(dev), "invalid sgtable.\n");
84 ret = -EINVAL;
85 goto err_sgt_free;
86 }
87
88 sg_free_table(&sgt);
89
90 DRM_DEV_DEBUG_KMS(to_dma_dev(dev), "dma_addr(0x%lx), size(0x%lx)\n",
91 (unsigned long)exynos_gem->dma_addr, exynos_gem->size);
92
93 return 0;
94
95err_sgt_free:
96 sg_free_table(&sgt);
97err_dma_free:
98 dma_free_attrs(to_dma_dev(dev), exynos_gem->size, exynos_gem->cookie,
99 exynos_gem->dma_addr, exynos_gem->dma_attrs);
100err_free:
101 kvfree(exynos_gem->pages);
102
103 return ret;
104}
105
106static void exynos_drm_free_buf(struct exynos_drm_gem *exynos_gem)
107{
108 struct drm_device *dev = exynos_gem->base.dev;
109
110 if (!exynos_gem->dma_addr) {
111 DRM_DEV_DEBUG_KMS(dev->dev, "dma_addr is invalid.\n");
112 return;
113 }
114
115 DRM_DEV_DEBUG_KMS(dev->dev, "dma_addr(0x%lx), size(0x%lx)\n",
116 (unsigned long)exynos_gem->dma_addr, exynos_gem->size);
117
118 dma_free_attrs(to_dma_dev(dev), exynos_gem->size, exynos_gem->cookie,
119 (dma_addr_t)exynos_gem->dma_addr,
120 exynos_gem->dma_attrs);
121
122 kvfree(exynos_gem->pages);
123}
124
125static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,
126 struct drm_file *file_priv,
127 unsigned int *handle)
128{
129 int ret;
130
131 /*
132 * allocate a id of idr table where the obj is registered
133 * and handle has the id what user can see.
134 */
135 ret = drm_gem_handle_create(file_priv, obj, handle);
136 if (ret)
137 return ret;
138
139 DRM_DEV_DEBUG_KMS(to_dma_dev(obj->dev), "gem handle = 0x%x\n", *handle);
140
141 /* drop reference from allocate - handle holds it now. */
142 drm_gem_object_put_unlocked(obj);
143
144 return 0;
145}
146
147void exynos_drm_gem_destroy(struct exynos_drm_gem *exynos_gem)
148{
149 struct drm_gem_object *obj = &exynos_gem->base;
150
151 DRM_DEV_DEBUG_KMS(to_dma_dev(obj->dev), "handle count = %d\n",
152 obj->handle_count);
153
154 /*
155 * do not release memory region from exporter.
156 *
157 * the region will be released by exporter
158 * once dmabuf's refcount becomes 0.
159 */
160 if (obj->import_attach)
161 drm_prime_gem_destroy(obj, exynos_gem->sgt);
162 else
163 exynos_drm_free_buf(exynos_gem);
164
165 /* release file pointer to gem object. */
166 drm_gem_object_release(obj);
167
168 kfree(exynos_gem);
169}
170
171static struct exynos_drm_gem *exynos_drm_gem_init(struct drm_device *dev,
172 unsigned long size)
173{
174 struct exynos_drm_gem *exynos_gem;
175 struct drm_gem_object *obj;
176 int ret;
177
178 exynos_gem = kzalloc(sizeof(*exynos_gem), GFP_KERNEL);
179 if (!exynos_gem)
180 return ERR_PTR(-ENOMEM);
181
182 exynos_gem->size = size;
183 obj = &exynos_gem->base;
184
185 ret = drm_gem_object_init(dev, obj, size);
186 if (ret < 0) {
187 DRM_DEV_ERROR(dev->dev, "failed to initialize gem object\n");
188 kfree(exynos_gem);
189 return ERR_PTR(ret);
190 }
191
192 ret = drm_gem_create_mmap_offset(obj);
193 if (ret < 0) {
194 drm_gem_object_release(obj);
195 kfree(exynos_gem);
196 return ERR_PTR(ret);
197 }
198
199 DRM_DEV_DEBUG_KMS(dev->dev, "created file object = %pK\n", obj->filp);
200
201 return exynos_gem;
202}
203
204struct exynos_drm_gem *exynos_drm_gem_create(struct drm_device *dev,
205 unsigned int flags,
206 unsigned long size)
207{
208 struct exynos_drm_gem *exynos_gem;
209 int ret;
210
211 if (flags & ~(EXYNOS_BO_MASK)) {
212 DRM_DEV_ERROR(dev->dev,
213 "invalid GEM buffer flags: %u\n", flags);
214 return ERR_PTR(-EINVAL);
215 }
216
217 if (!size) {
218 DRM_DEV_ERROR(dev->dev, "invalid GEM buffer size: %lu\n", size);
219 return ERR_PTR(-EINVAL);
220 }
221
222 size = roundup(size, PAGE_SIZE);
223
224 exynos_gem = exynos_drm_gem_init(dev, size);
225 if (IS_ERR(exynos_gem))
226 return exynos_gem;
227
228 if (!is_drm_iommu_supported(dev) && (flags & EXYNOS_BO_NONCONTIG)) {
229 /*
230 * when no IOMMU is available, all allocated buffers are
231 * contiguous anyway, so drop EXYNOS_BO_NONCONTIG flag
232 */
233 flags &= ~EXYNOS_BO_NONCONTIG;
234 DRM_WARN("Non-contiguous allocation is not supported without IOMMU, falling back to contiguous buffer\n");
235 }
236
237 /* set memory type and cache attribute from user side. */
238 exynos_gem->flags = flags;
239
240 ret = exynos_drm_alloc_buf(exynos_gem);
241 if (ret < 0) {
242 drm_gem_object_release(&exynos_gem->base);
243 kfree(exynos_gem);
244 return ERR_PTR(ret);
245 }
246
247 return exynos_gem;
248}
249
250int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
251 struct drm_file *file_priv)
252{
253 struct drm_exynos_gem_create *args = data;
254 struct exynos_drm_gem *exynos_gem;
255 int ret;
256
257 exynos_gem = exynos_drm_gem_create(dev, args->flags, args->size);
258 if (IS_ERR(exynos_gem))
259 return PTR_ERR(exynos_gem);
260
261 ret = exynos_drm_gem_handle_create(&exynos_gem->base, file_priv,
262 &args->handle);
263 if (ret) {
264 exynos_drm_gem_destroy(exynos_gem);
265 return ret;
266 }
267
268 return 0;
269}
270
271int exynos_drm_gem_map_ioctl(struct drm_device *dev, void *data,
272 struct drm_file *file_priv)
273{
274 struct drm_exynos_gem_map *args = data;
275
276 return drm_gem_dumb_map_offset(file_priv, dev, args->handle,
277 &args->offset);
278}
279
280struct exynos_drm_gem *exynos_drm_gem_get(struct drm_file *filp,
281 unsigned int gem_handle)
282{
283 struct drm_gem_object *obj;
284
285 obj = drm_gem_object_lookup(filp, gem_handle);
286 if (!obj)
287 return NULL;
288 return to_exynos_gem(obj);
289}
290
291static int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem *exynos_gem,
292 struct vm_area_struct *vma)
293{
294 struct drm_device *drm_dev = exynos_gem->base.dev;
295 unsigned long vm_size;
296 int ret;
297
298 vma->vm_flags &= ~VM_PFNMAP;
299 vma->vm_pgoff = 0;
300
301 vm_size = vma->vm_end - vma->vm_start;
302
303 /* check if user-requested size is valid. */
304 if (vm_size > exynos_gem->size)
305 return -EINVAL;
306
307 ret = dma_mmap_attrs(to_dma_dev(drm_dev), vma, exynos_gem->cookie,
308 exynos_gem->dma_addr, exynos_gem->size,
309 exynos_gem->dma_attrs);
310 if (ret < 0) {
311 DRM_ERROR("failed to mmap.\n");
312 return ret;
313 }
314
315 return 0;
316}
317
318int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
319 struct drm_file *file_priv)
320{
321 struct exynos_drm_gem *exynos_gem;
322 struct drm_exynos_gem_info *args = data;
323 struct drm_gem_object *obj;
324
325 obj = drm_gem_object_lookup(file_priv, args->handle);
326 if (!obj) {
327 DRM_DEV_ERROR(dev->dev, "failed to lookup gem object.\n");
328 return -EINVAL;
329 }
330
331 exynos_gem = to_exynos_gem(obj);
332
333 args->flags = exynos_gem->flags;
334 args->size = exynos_gem->size;
335
336 drm_gem_object_put_unlocked(obj);
337
338 return 0;
339}
340
341void exynos_drm_gem_free_object(struct drm_gem_object *obj)
342{
343 exynos_drm_gem_destroy(to_exynos_gem(obj));
344}
345
346int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
347 struct drm_device *dev,
348 struct drm_mode_create_dumb *args)
349{
350 struct exynos_drm_gem *exynos_gem;
351 unsigned int flags;
352 int ret;
353
354 /*
355 * allocate memory to be used for framebuffer.
356 * - this callback would be called by user application
357 * with DRM_IOCTL_MODE_CREATE_DUMB command.
358 */
359
360 args->pitch = args->width * ((args->bpp + 7) / 8);
361 args->size = args->pitch * args->height;
362
363 if (is_drm_iommu_supported(dev))
364 flags = EXYNOS_BO_NONCONTIG | EXYNOS_BO_WC;
365 else
366 flags = EXYNOS_BO_CONTIG | EXYNOS_BO_WC;
367
368 exynos_gem = exynos_drm_gem_create(dev, flags, args->size);
369 if (IS_ERR(exynos_gem)) {
370 dev_warn(dev->dev, "FB allocation failed.\n");
371 return PTR_ERR(exynos_gem);
372 }
373
374 ret = exynos_drm_gem_handle_create(&exynos_gem->base, file_priv,
375 &args->handle);
376 if (ret) {
377 exynos_drm_gem_destroy(exynos_gem);
378 return ret;
379 }
380
381 return 0;
382}
383
384vm_fault_t exynos_drm_gem_fault(struct vm_fault *vmf)
385{
386 struct vm_area_struct *vma = vmf->vma;
387 struct drm_gem_object *obj = vma->vm_private_data;
388 struct exynos_drm_gem *exynos_gem = to_exynos_gem(obj);
389 unsigned long pfn;
390 pgoff_t page_offset;
391
392 page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
393
394 if (page_offset >= (exynos_gem->size >> PAGE_SHIFT)) {
395 DRM_ERROR("invalid page offset\n");
396 return VM_FAULT_SIGBUS;
397 }
398
399 pfn = page_to_pfn(exynos_gem->pages[page_offset]);
400 return vmf_insert_mixed(vma, vmf->address,
401 __pfn_to_pfn_t(pfn, PFN_DEV));
402}
403
404static int exynos_drm_gem_mmap_obj(struct drm_gem_object *obj,
405 struct vm_area_struct *vma)
406{
407 struct exynos_drm_gem *exynos_gem = to_exynos_gem(obj);
408 int ret;
409
410 DRM_DEV_DEBUG_KMS(to_dma_dev(obj->dev), "flags = 0x%x\n",
411 exynos_gem->flags);
412
413 /* non-cachable as default. */
414 if (exynos_gem->flags & EXYNOS_BO_CACHABLE)
415 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
416 else if (exynos_gem->flags & EXYNOS_BO_WC)
417 vma->vm_page_prot =
418 pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
419 else
420 vma->vm_page_prot =
421 pgprot_noncached(vm_get_page_prot(vma->vm_flags));
422
423 ret = exynos_drm_gem_mmap_buffer(exynos_gem, vma);
424 if (ret)
425 goto err_close_vm;
426
427 return ret;
428
429err_close_vm:
430 drm_gem_vm_close(vma);
431
432 return ret;
433}
434
435int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
436{
437 struct drm_gem_object *obj;
438 int ret;
439
440 /* set vm_area_struct. */
441 ret = drm_gem_mmap(filp, vma);
442 if (ret < 0) {
443 DRM_ERROR("failed to mmap.\n");
444 return ret;
445 }
446
447 obj = vma->vm_private_data;
448
449 if (obj->import_attach)
450 return dma_buf_mmap(obj->dma_buf, vma, 0);
451
452 return exynos_drm_gem_mmap_obj(obj, vma);
453}
454
455/* low-level interface prime helpers */
456struct drm_gem_object *exynos_drm_gem_prime_import(struct drm_device *dev,
457 struct dma_buf *dma_buf)
458{
459 return drm_gem_prime_import_dev(dev, dma_buf, to_dma_dev(dev));
460}
461
462struct sg_table *exynos_drm_gem_prime_get_sg_table(struct drm_gem_object *obj)
463{
464 struct exynos_drm_gem *exynos_gem = to_exynos_gem(obj);
465 int npages;
466
467 npages = exynos_gem->size >> PAGE_SHIFT;
468
469 return drm_prime_pages_to_sg(exynos_gem->pages, npages);
470}
471
472struct drm_gem_object *
473exynos_drm_gem_prime_import_sg_table(struct drm_device *dev,
474 struct dma_buf_attachment *attach,
475 struct sg_table *sgt)
476{
477 struct exynos_drm_gem *exynos_gem;
478 int npages;
479 int ret;
480
481 exynos_gem = exynos_drm_gem_init(dev, attach->dmabuf->size);
482 if (IS_ERR(exynos_gem)) {
483 ret = PTR_ERR(exynos_gem);
484 return ERR_PTR(ret);
485 }
486
487 exynos_gem->dma_addr = sg_dma_address(sgt->sgl);
488
489 npages = exynos_gem->size >> PAGE_SHIFT;
490 exynos_gem->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
491 if (!exynos_gem->pages) {
492 ret = -ENOMEM;
493 goto err;
494 }
495
496 ret = drm_prime_sg_to_page_addr_arrays(sgt, exynos_gem->pages, NULL,
497 npages);
498 if (ret < 0)
499 goto err_free_large;
500
501 exynos_gem->sgt = sgt;
502
503 if (sgt->nents == 1) {
504 /* always physically continuous memory if sgt->nents is 1. */
505 exynos_gem->flags |= EXYNOS_BO_CONTIG;
506 } else {
507 /*
508 * this case could be CONTIG or NONCONTIG type but for now
509 * sets NONCONTIG.
510 * TODO. we have to find a way that exporter can notify
511 * the type of its own buffer to importer.
512 */
513 exynos_gem->flags |= EXYNOS_BO_NONCONTIG;
514 }
515
516 return &exynos_gem->base;
517
518err_free_large:
519 kvfree(exynos_gem->pages);
520err:
521 drm_gem_object_release(&exynos_gem->base);
522 kfree(exynos_gem);
523 return ERR_PTR(ret);
524}
525
526void *exynos_drm_gem_prime_vmap(struct drm_gem_object *obj)
527{
528 return NULL;
529}
530
531void exynos_drm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
532{
533 /* Nothing to do */
534}
535
536int exynos_drm_gem_prime_mmap(struct drm_gem_object *obj,
537 struct vm_area_struct *vma)
538{
539 int ret;
540
541 ret = drm_gem_mmap_obj(obj, obj->size, vma);
542 if (ret < 0)
543 return ret;
544
545 return exynos_drm_gem_mmap_obj(obj, vma);
546}
1// SPDX-License-Identifier: GPL-2.0-or-later
2/* exynos_drm_gem.c
3 *
4 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
5 * Author: Inki Dae <inki.dae@samsung.com>
6 */
7
8
9#include <linux/dma-buf.h>
10#include <linux/pfn_t.h>
11#include <linux/shmem_fs.h>
12#include <linux/module.h>
13
14#include <drm/drm_prime.h>
15#include <drm/drm_vma_manager.h>
16#include <drm/exynos_drm.h>
17
18#include "exynos_drm_drv.h"
19#include "exynos_drm_gem.h"
20
21MODULE_IMPORT_NS(DMA_BUF);
22
23static int exynos_drm_gem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
24
25static int exynos_drm_alloc_buf(struct exynos_drm_gem *exynos_gem, bool kvmap)
26{
27 struct drm_device *dev = exynos_gem->base.dev;
28 unsigned long attr = 0;
29
30 if (exynos_gem->dma_addr) {
31 DRM_DEV_DEBUG_KMS(to_dma_dev(dev), "already allocated.\n");
32 return 0;
33 }
34
35 /*
36 * if EXYNOS_BO_CONTIG, fully physically contiguous memory
37 * region will be allocated else physically contiguous
38 * as possible.
39 */
40 if (!(exynos_gem->flags & EXYNOS_BO_NONCONTIG))
41 attr |= DMA_ATTR_FORCE_CONTIGUOUS;
42
43 /*
44 * if EXYNOS_BO_WC or EXYNOS_BO_NONCACHABLE, writecombine mapping
45 * else cachable mapping.
46 */
47 if (exynos_gem->flags & EXYNOS_BO_WC ||
48 !(exynos_gem->flags & EXYNOS_BO_CACHABLE))
49 attr |= DMA_ATTR_WRITE_COMBINE;
50
51 /* FBDev emulation requires kernel mapping */
52 if (!kvmap)
53 attr |= DMA_ATTR_NO_KERNEL_MAPPING;
54
55 exynos_gem->dma_attrs = attr;
56 exynos_gem->cookie = dma_alloc_attrs(to_dma_dev(dev), exynos_gem->size,
57 &exynos_gem->dma_addr, GFP_KERNEL,
58 exynos_gem->dma_attrs);
59 if (!exynos_gem->cookie) {
60 DRM_DEV_ERROR(to_dma_dev(dev), "failed to allocate buffer.\n");
61 return -ENOMEM;
62 }
63
64 if (kvmap)
65 exynos_gem->kvaddr = exynos_gem->cookie;
66
67 DRM_DEV_DEBUG_KMS(to_dma_dev(dev), "dma_addr(0x%lx), size(0x%lx)\n",
68 (unsigned long)exynos_gem->dma_addr, exynos_gem->size);
69 return 0;
70}
71
72static void exynos_drm_free_buf(struct exynos_drm_gem *exynos_gem)
73{
74 struct drm_device *dev = exynos_gem->base.dev;
75
76 if (!exynos_gem->dma_addr) {
77 DRM_DEV_DEBUG_KMS(dev->dev, "dma_addr is invalid.\n");
78 return;
79 }
80
81 DRM_DEV_DEBUG_KMS(dev->dev, "dma_addr(0x%lx), size(0x%lx)\n",
82 (unsigned long)exynos_gem->dma_addr, exynos_gem->size);
83
84 dma_free_attrs(to_dma_dev(dev), exynos_gem->size, exynos_gem->cookie,
85 (dma_addr_t)exynos_gem->dma_addr,
86 exynos_gem->dma_attrs);
87}
88
89static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,
90 struct drm_file *file_priv,
91 unsigned int *handle)
92{
93 int ret;
94
95 /*
96 * allocate a id of idr table where the obj is registered
97 * and handle has the id what user can see.
98 */
99 ret = drm_gem_handle_create(file_priv, obj, handle);
100 if (ret)
101 return ret;
102
103 DRM_DEV_DEBUG_KMS(to_dma_dev(obj->dev), "gem handle = 0x%x\n", *handle);
104
105 /* drop reference from allocate - handle holds it now. */
106 drm_gem_object_put(obj);
107
108 return 0;
109}
110
111void exynos_drm_gem_destroy(struct exynos_drm_gem *exynos_gem)
112{
113 struct drm_gem_object *obj = &exynos_gem->base;
114
115 DRM_DEV_DEBUG_KMS(to_dma_dev(obj->dev), "handle count = %d\n",
116 obj->handle_count);
117
118 /*
119 * do not release memory region from exporter.
120 *
121 * the region will be released by exporter
122 * once dmabuf's refcount becomes 0.
123 */
124 if (obj->import_attach)
125 drm_prime_gem_destroy(obj, exynos_gem->sgt);
126 else
127 exynos_drm_free_buf(exynos_gem);
128
129 /* release file pointer to gem object. */
130 drm_gem_object_release(obj);
131
132 kfree(exynos_gem);
133}
134
135static const struct vm_operations_struct exynos_drm_gem_vm_ops = {
136 .open = drm_gem_vm_open,
137 .close = drm_gem_vm_close,
138};
139
140static const struct drm_gem_object_funcs exynos_drm_gem_object_funcs = {
141 .free = exynos_drm_gem_free_object,
142 .get_sg_table = exynos_drm_gem_prime_get_sg_table,
143 .mmap = exynos_drm_gem_mmap,
144 .vm_ops = &exynos_drm_gem_vm_ops,
145};
146
147static struct exynos_drm_gem *exynos_drm_gem_init(struct drm_device *dev,
148 unsigned long size)
149{
150 struct exynos_drm_gem *exynos_gem;
151 struct drm_gem_object *obj;
152 int ret;
153
154 exynos_gem = kzalloc(sizeof(*exynos_gem), GFP_KERNEL);
155 if (!exynos_gem)
156 return ERR_PTR(-ENOMEM);
157
158 exynos_gem->size = size;
159 obj = &exynos_gem->base;
160
161 obj->funcs = &exynos_drm_gem_object_funcs;
162
163 ret = drm_gem_object_init(dev, obj, size);
164 if (ret < 0) {
165 DRM_DEV_ERROR(dev->dev, "failed to initialize gem object\n");
166 kfree(exynos_gem);
167 return ERR_PTR(ret);
168 }
169
170 ret = drm_gem_create_mmap_offset(obj);
171 if (ret < 0) {
172 drm_gem_object_release(obj);
173 kfree(exynos_gem);
174 return ERR_PTR(ret);
175 }
176
177 DRM_DEV_DEBUG_KMS(dev->dev, "created file object = %pK\n", obj->filp);
178
179 return exynos_gem;
180}
181
182struct exynos_drm_gem *exynos_drm_gem_create(struct drm_device *dev,
183 unsigned int flags,
184 unsigned long size,
185 bool kvmap)
186{
187 struct exynos_drm_gem *exynos_gem;
188 int ret;
189
190 if (flags & ~(EXYNOS_BO_MASK)) {
191 DRM_DEV_ERROR(dev->dev,
192 "invalid GEM buffer flags: %u\n", flags);
193 return ERR_PTR(-EINVAL);
194 }
195
196 if (!size) {
197 DRM_DEV_ERROR(dev->dev, "invalid GEM buffer size: %lu\n", size);
198 return ERR_PTR(-EINVAL);
199 }
200
201 size = roundup(size, PAGE_SIZE);
202
203 exynos_gem = exynos_drm_gem_init(dev, size);
204 if (IS_ERR(exynos_gem))
205 return exynos_gem;
206
207 if (!is_drm_iommu_supported(dev) && (flags & EXYNOS_BO_NONCONTIG)) {
208 /*
209 * when no IOMMU is available, all allocated buffers are
210 * contiguous anyway, so drop EXYNOS_BO_NONCONTIG flag
211 */
212 flags &= ~EXYNOS_BO_NONCONTIG;
213 DRM_WARN("Non-contiguous allocation is not supported without IOMMU, falling back to contiguous buffer\n");
214 }
215
216 /* set memory type and cache attribute from user side. */
217 exynos_gem->flags = flags;
218
219 ret = exynos_drm_alloc_buf(exynos_gem, kvmap);
220 if (ret < 0) {
221 drm_gem_object_release(&exynos_gem->base);
222 kfree(exynos_gem);
223 return ERR_PTR(ret);
224 }
225
226 return exynos_gem;
227}
228
229int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
230 struct drm_file *file_priv)
231{
232 struct drm_exynos_gem_create *args = data;
233 struct exynos_drm_gem *exynos_gem;
234 int ret;
235
236 exynos_gem = exynos_drm_gem_create(dev, args->flags, args->size, false);
237 if (IS_ERR(exynos_gem))
238 return PTR_ERR(exynos_gem);
239
240 ret = exynos_drm_gem_handle_create(&exynos_gem->base, file_priv,
241 &args->handle);
242 if (ret) {
243 exynos_drm_gem_destroy(exynos_gem);
244 return ret;
245 }
246
247 return 0;
248}
249
250int exynos_drm_gem_map_ioctl(struct drm_device *dev, void *data,
251 struct drm_file *file_priv)
252{
253 struct drm_exynos_gem_map *args = data;
254
255 return drm_gem_dumb_map_offset(file_priv, dev, args->handle,
256 &args->offset);
257}
258
259struct exynos_drm_gem *exynos_drm_gem_get(struct drm_file *filp,
260 unsigned int gem_handle)
261{
262 struct drm_gem_object *obj;
263
264 obj = drm_gem_object_lookup(filp, gem_handle);
265 if (!obj)
266 return NULL;
267 return to_exynos_gem(obj);
268}
269
270static int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem *exynos_gem,
271 struct vm_area_struct *vma)
272{
273 struct drm_device *drm_dev = exynos_gem->base.dev;
274 unsigned long vm_size;
275 int ret;
276
277 vm_flags_clear(vma, VM_PFNMAP);
278 vma->vm_pgoff = 0;
279
280 vm_size = vma->vm_end - vma->vm_start;
281
282 /* check if user-requested size is valid. */
283 if (vm_size > exynos_gem->size)
284 return -EINVAL;
285
286 ret = dma_mmap_attrs(to_dma_dev(drm_dev), vma, exynos_gem->cookie,
287 exynos_gem->dma_addr, exynos_gem->size,
288 exynos_gem->dma_attrs);
289 if (ret < 0) {
290 DRM_ERROR("failed to mmap.\n");
291 return ret;
292 }
293
294 return 0;
295}
296
297int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
298 struct drm_file *file_priv)
299{
300 struct exynos_drm_gem *exynos_gem;
301 struct drm_exynos_gem_info *args = data;
302 struct drm_gem_object *obj;
303
304 obj = drm_gem_object_lookup(file_priv, args->handle);
305 if (!obj) {
306 DRM_DEV_ERROR(dev->dev, "failed to lookup gem object.\n");
307 return -EINVAL;
308 }
309
310 exynos_gem = to_exynos_gem(obj);
311
312 args->flags = exynos_gem->flags;
313 args->size = exynos_gem->size;
314
315 drm_gem_object_put(obj);
316
317 return 0;
318}
319
320void exynos_drm_gem_free_object(struct drm_gem_object *obj)
321{
322 exynos_drm_gem_destroy(to_exynos_gem(obj));
323}
324
325int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
326 struct drm_device *dev,
327 struct drm_mode_create_dumb *args)
328{
329 struct exynos_drm_gem *exynos_gem;
330 unsigned int flags;
331 int ret;
332
333 /*
334 * allocate memory to be used for framebuffer.
335 * - this callback would be called by user application
336 * with DRM_IOCTL_MODE_CREATE_DUMB command.
337 */
338
339 args->pitch = args->width * ((args->bpp + 7) / 8);
340 args->size = args->pitch * args->height;
341
342 if (is_drm_iommu_supported(dev))
343 flags = EXYNOS_BO_NONCONTIG | EXYNOS_BO_WC;
344 else
345 flags = EXYNOS_BO_CONTIG | EXYNOS_BO_WC;
346
347 exynos_gem = exynos_drm_gem_create(dev, flags, args->size, false);
348 if (IS_ERR(exynos_gem)) {
349 dev_warn(dev->dev, "FB allocation failed.\n");
350 return PTR_ERR(exynos_gem);
351 }
352
353 ret = exynos_drm_gem_handle_create(&exynos_gem->base, file_priv,
354 &args->handle);
355 if (ret) {
356 exynos_drm_gem_destroy(exynos_gem);
357 return ret;
358 }
359
360 return 0;
361}
362
363static int exynos_drm_gem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
364{
365 struct exynos_drm_gem *exynos_gem = to_exynos_gem(obj);
366 int ret;
367
368 if (obj->import_attach)
369 return dma_buf_mmap(obj->dma_buf, vma, 0);
370
371 vm_flags_set(vma, VM_IO | VM_DONTEXPAND | VM_DONTDUMP);
372
373 DRM_DEV_DEBUG_KMS(to_dma_dev(obj->dev), "flags = 0x%x\n",
374 exynos_gem->flags);
375
376 /* non-cachable as default. */
377 if (exynos_gem->flags & EXYNOS_BO_CACHABLE)
378 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
379 else if (exynos_gem->flags & EXYNOS_BO_WC)
380 vma->vm_page_prot =
381 pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
382 else
383 vma->vm_page_prot =
384 pgprot_noncached(vm_get_page_prot(vma->vm_flags));
385
386 ret = exynos_drm_gem_mmap_buffer(exynos_gem, vma);
387 if (ret)
388 goto err_close_vm;
389
390 return ret;
391
392err_close_vm:
393 drm_gem_vm_close(vma);
394
395 return ret;
396}
397
398/* low-level interface prime helpers */
399struct drm_gem_object *exynos_drm_gem_prime_import(struct drm_device *dev,
400 struct dma_buf *dma_buf)
401{
402 return drm_gem_prime_import_dev(dev, dma_buf, to_dma_dev(dev));
403}
404
405struct sg_table *exynos_drm_gem_prime_get_sg_table(struct drm_gem_object *obj)
406{
407 struct exynos_drm_gem *exynos_gem = to_exynos_gem(obj);
408 struct drm_device *drm_dev = obj->dev;
409 struct sg_table *sgt;
410 int ret;
411
412 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
413 if (!sgt)
414 return ERR_PTR(-ENOMEM);
415
416 ret = dma_get_sgtable_attrs(to_dma_dev(drm_dev), sgt, exynos_gem->cookie,
417 exynos_gem->dma_addr, exynos_gem->size,
418 exynos_gem->dma_attrs);
419 if (ret) {
420 DRM_ERROR("failed to get sgtable, %d\n", ret);
421 kfree(sgt);
422 return ERR_PTR(ret);
423 }
424
425 return sgt;
426}
427
428struct drm_gem_object *
429exynos_drm_gem_prime_import_sg_table(struct drm_device *dev,
430 struct dma_buf_attachment *attach,
431 struct sg_table *sgt)
432{
433 struct exynos_drm_gem *exynos_gem;
434
435 /* check if the entries in the sg_table are contiguous */
436 if (drm_prime_get_contiguous_size(sgt) < attach->dmabuf->size) {
437 DRM_ERROR("buffer chunks must be mapped contiguously");
438 return ERR_PTR(-EINVAL);
439 }
440
441 exynos_gem = exynos_drm_gem_init(dev, attach->dmabuf->size);
442 if (IS_ERR(exynos_gem))
443 return ERR_CAST(exynos_gem);
444
445 /*
446 * Buffer has been mapped as contiguous into DMA address space,
447 * but if there is IOMMU, it can be either CONTIG or NONCONTIG.
448 * We assume a simplified logic below:
449 */
450 if (is_drm_iommu_supported(dev))
451 exynos_gem->flags |= EXYNOS_BO_NONCONTIG;
452 else
453 exynos_gem->flags |= EXYNOS_BO_CONTIG;
454
455 exynos_gem->dma_addr = sg_dma_address(sgt->sgl);
456 exynos_gem->sgt = sgt;
457 return &exynos_gem->base;
458}