Loading...
1/* exynos_drm_gem.c
2 *
3 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
4 * Author: Inki Dae <inki.dae@samsung.com>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
10 */
11
12#include <drm/drmP.h>
13#include <drm/drm_vma_manager.h>
14
15#include <linux/shmem_fs.h>
16#include <linux/dma-buf.h>
17#include <linux/pfn_t.h>
18#include <drm/exynos_drm.h>
19
20#include "exynos_drm_drv.h"
21#include "exynos_drm_gem.h"
22#include "exynos_drm_iommu.h"
23
24static int exynos_drm_alloc_buf(struct exynos_drm_gem *exynos_gem)
25{
26 struct drm_device *dev = exynos_gem->base.dev;
27 unsigned long attr;
28 unsigned int nr_pages;
29 struct sg_table sgt;
30 int ret = -ENOMEM;
31
32 if (exynos_gem->dma_addr) {
33 DRM_DEBUG_KMS("already allocated.\n");
34 return 0;
35 }
36
37 exynos_gem->dma_attrs = 0;
38
39 /*
40 * if EXYNOS_BO_CONTIG, fully physically contiguous memory
41 * region will be allocated else physically contiguous
42 * as possible.
43 */
44 if (!(exynos_gem->flags & EXYNOS_BO_NONCONTIG))
45 exynos_gem->dma_attrs |= DMA_ATTR_FORCE_CONTIGUOUS;
46
47 /*
48 * if EXYNOS_BO_WC or EXYNOS_BO_NONCACHABLE, writecombine mapping
49 * else cachable mapping.
50 */
51 if (exynos_gem->flags & EXYNOS_BO_WC ||
52 !(exynos_gem->flags & EXYNOS_BO_CACHABLE))
53 attr = DMA_ATTR_WRITE_COMBINE;
54 else
55 attr = DMA_ATTR_NON_CONSISTENT;
56
57 exynos_gem->dma_attrs |= attr;
58 exynos_gem->dma_attrs |= DMA_ATTR_NO_KERNEL_MAPPING;
59
60 nr_pages = exynos_gem->size >> PAGE_SHIFT;
61
62 exynos_gem->pages = drm_calloc_large(nr_pages, sizeof(struct page *));
63 if (!exynos_gem->pages) {
64 DRM_ERROR("failed to allocate pages.\n");
65 return -ENOMEM;
66 }
67
68 exynos_gem->cookie = dma_alloc_attrs(to_dma_dev(dev), exynos_gem->size,
69 &exynos_gem->dma_addr, GFP_KERNEL,
70 exynos_gem->dma_attrs);
71 if (!exynos_gem->cookie) {
72 DRM_ERROR("failed to allocate buffer.\n");
73 goto err_free;
74 }
75
76 ret = dma_get_sgtable_attrs(to_dma_dev(dev), &sgt, exynos_gem->cookie,
77 exynos_gem->dma_addr, exynos_gem->size,
78 exynos_gem->dma_attrs);
79 if (ret < 0) {
80 DRM_ERROR("failed to get sgtable.\n");
81 goto err_dma_free;
82 }
83
84 if (drm_prime_sg_to_page_addr_arrays(&sgt, exynos_gem->pages, NULL,
85 nr_pages)) {
86 DRM_ERROR("invalid sgtable.\n");
87 ret = -EINVAL;
88 goto err_sgt_free;
89 }
90
91 sg_free_table(&sgt);
92
93 DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
94 (unsigned long)exynos_gem->dma_addr, exynos_gem->size);
95
96 return 0;
97
98err_sgt_free:
99 sg_free_table(&sgt);
100err_dma_free:
101 dma_free_attrs(to_dma_dev(dev), exynos_gem->size, exynos_gem->cookie,
102 exynos_gem->dma_addr, exynos_gem->dma_attrs);
103err_free:
104 drm_free_large(exynos_gem->pages);
105
106 return ret;
107}
108
109static void exynos_drm_free_buf(struct exynos_drm_gem *exynos_gem)
110{
111 struct drm_device *dev = exynos_gem->base.dev;
112
113 if (!exynos_gem->dma_addr) {
114 DRM_DEBUG_KMS("dma_addr is invalid.\n");
115 return;
116 }
117
118 DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
119 (unsigned long)exynos_gem->dma_addr, exynos_gem->size);
120
121 dma_free_attrs(to_dma_dev(dev), exynos_gem->size, exynos_gem->cookie,
122 (dma_addr_t)exynos_gem->dma_addr,
123 exynos_gem->dma_attrs);
124
125 drm_free_large(exynos_gem->pages);
126}
127
128static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,
129 struct drm_file *file_priv,
130 unsigned int *handle)
131{
132 int ret;
133
134 /*
135 * allocate a id of idr table where the obj is registered
136 * and handle has the id what user can see.
137 */
138 ret = drm_gem_handle_create(file_priv, obj, handle);
139 if (ret)
140 return ret;
141
142 DRM_DEBUG_KMS("gem handle = 0x%x\n", *handle);
143
144 /* drop reference from allocate - handle holds it now. */
145 drm_gem_object_unreference_unlocked(obj);
146
147 return 0;
148}
149
150void exynos_drm_gem_destroy(struct exynos_drm_gem *exynos_gem)
151{
152 struct drm_gem_object *obj = &exynos_gem->base;
153
154 DRM_DEBUG_KMS("handle count = %d\n", obj->handle_count);
155
156 /*
157 * do not release memory region from exporter.
158 *
159 * the region will be released by exporter
160 * once dmabuf's refcount becomes 0.
161 */
162 if (obj->import_attach)
163 drm_prime_gem_destroy(obj, exynos_gem->sgt);
164 else
165 exynos_drm_free_buf(exynos_gem);
166
167 /* release file pointer to gem object. */
168 drm_gem_object_release(obj);
169
170 kfree(exynos_gem);
171}
172
173unsigned long exynos_drm_gem_get_size(struct drm_device *dev,
174 unsigned int gem_handle,
175 struct drm_file *file_priv)
176{
177 struct exynos_drm_gem *exynos_gem;
178 struct drm_gem_object *obj;
179
180 obj = drm_gem_object_lookup(file_priv, gem_handle);
181 if (!obj) {
182 DRM_ERROR("failed to lookup gem object.\n");
183 return 0;
184 }
185
186 exynos_gem = to_exynos_gem(obj);
187
188 drm_gem_object_unreference_unlocked(obj);
189
190 return exynos_gem->size;
191}
192
193static struct exynos_drm_gem *exynos_drm_gem_init(struct drm_device *dev,
194 unsigned long size)
195{
196 struct exynos_drm_gem *exynos_gem;
197 struct drm_gem_object *obj;
198 int ret;
199
200 exynos_gem = kzalloc(sizeof(*exynos_gem), GFP_KERNEL);
201 if (!exynos_gem)
202 return ERR_PTR(-ENOMEM);
203
204 exynos_gem->size = size;
205 obj = &exynos_gem->base;
206
207 ret = drm_gem_object_init(dev, obj, size);
208 if (ret < 0) {
209 DRM_ERROR("failed to initialize gem object\n");
210 kfree(exynos_gem);
211 return ERR_PTR(ret);
212 }
213
214 ret = drm_gem_create_mmap_offset(obj);
215 if (ret < 0) {
216 drm_gem_object_release(obj);
217 kfree(exynos_gem);
218 return ERR_PTR(ret);
219 }
220
221 DRM_DEBUG_KMS("created file object = %p\n", obj->filp);
222
223 return exynos_gem;
224}
225
226struct exynos_drm_gem *exynos_drm_gem_create(struct drm_device *dev,
227 unsigned int flags,
228 unsigned long size)
229{
230 struct exynos_drm_gem *exynos_gem;
231 int ret;
232
233 if (flags & ~(EXYNOS_BO_MASK)) {
234 DRM_ERROR("invalid GEM buffer flags: %u\n", flags);
235 return ERR_PTR(-EINVAL);
236 }
237
238 if (!size) {
239 DRM_ERROR("invalid GEM buffer size: %lu\n", size);
240 return ERR_PTR(-EINVAL);
241 }
242
243 size = roundup(size, PAGE_SIZE);
244
245 exynos_gem = exynos_drm_gem_init(dev, size);
246 if (IS_ERR(exynos_gem))
247 return exynos_gem;
248
249 /* set memory type and cache attribute from user side. */
250 exynos_gem->flags = flags;
251
252 ret = exynos_drm_alloc_buf(exynos_gem);
253 if (ret < 0) {
254 drm_gem_object_release(&exynos_gem->base);
255 kfree(exynos_gem);
256 return ERR_PTR(ret);
257 }
258
259 return exynos_gem;
260}
261
262int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
263 struct drm_file *file_priv)
264{
265 struct drm_exynos_gem_create *args = data;
266 struct exynos_drm_gem *exynos_gem;
267 int ret;
268
269 exynos_gem = exynos_drm_gem_create(dev, args->flags, args->size);
270 if (IS_ERR(exynos_gem))
271 return PTR_ERR(exynos_gem);
272
273 ret = exynos_drm_gem_handle_create(&exynos_gem->base, file_priv,
274 &args->handle);
275 if (ret) {
276 exynos_drm_gem_destroy(exynos_gem);
277 return ret;
278 }
279
280 return 0;
281}
282
283int exynos_drm_gem_map_ioctl(struct drm_device *dev, void *data,
284 struct drm_file *file_priv)
285{
286 struct drm_exynos_gem_map *args = data;
287
288 return exynos_drm_gem_dumb_map_offset(file_priv, dev, args->handle,
289 &args->offset);
290}
291
292dma_addr_t *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
293 unsigned int gem_handle,
294 struct drm_file *filp)
295{
296 struct exynos_drm_gem *exynos_gem;
297 struct drm_gem_object *obj;
298
299 obj = drm_gem_object_lookup(filp, gem_handle);
300 if (!obj) {
301 DRM_ERROR("failed to lookup gem object.\n");
302 return ERR_PTR(-EINVAL);
303 }
304
305 exynos_gem = to_exynos_gem(obj);
306
307 return &exynos_gem->dma_addr;
308}
309
310void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
311 unsigned int gem_handle,
312 struct drm_file *filp)
313{
314 struct drm_gem_object *obj;
315
316 obj = drm_gem_object_lookup(filp, gem_handle);
317 if (!obj) {
318 DRM_ERROR("failed to lookup gem object.\n");
319 return;
320 }
321
322 drm_gem_object_unreference_unlocked(obj);
323
324 /*
325 * decrease obj->refcount one more time because we has already
326 * increased it at exynos_drm_gem_get_dma_addr().
327 */
328 drm_gem_object_unreference_unlocked(obj);
329}
330
331static int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem *exynos_gem,
332 struct vm_area_struct *vma)
333{
334 struct drm_device *drm_dev = exynos_gem->base.dev;
335 unsigned long vm_size;
336 int ret;
337
338 vma->vm_flags &= ~VM_PFNMAP;
339 vma->vm_pgoff = 0;
340
341 vm_size = vma->vm_end - vma->vm_start;
342
343 /* check if user-requested size is valid. */
344 if (vm_size > exynos_gem->size)
345 return -EINVAL;
346
347 ret = dma_mmap_attrs(to_dma_dev(drm_dev), vma, exynos_gem->cookie,
348 exynos_gem->dma_addr, exynos_gem->size,
349 exynos_gem->dma_attrs);
350 if (ret < 0) {
351 DRM_ERROR("failed to mmap.\n");
352 return ret;
353 }
354
355 return 0;
356}
357
358int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
359 struct drm_file *file_priv)
360{
361 struct exynos_drm_gem *exynos_gem;
362 struct drm_exynos_gem_info *args = data;
363 struct drm_gem_object *obj;
364
365 obj = drm_gem_object_lookup(file_priv, args->handle);
366 if (!obj) {
367 DRM_ERROR("failed to lookup gem object.\n");
368 return -EINVAL;
369 }
370
371 exynos_gem = to_exynos_gem(obj);
372
373 args->flags = exynos_gem->flags;
374 args->size = exynos_gem->size;
375
376 drm_gem_object_unreference_unlocked(obj);
377
378 return 0;
379}
380
381void exynos_drm_gem_free_object(struct drm_gem_object *obj)
382{
383 exynos_drm_gem_destroy(to_exynos_gem(obj));
384}
385
386int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
387 struct drm_device *dev,
388 struct drm_mode_create_dumb *args)
389{
390 struct exynos_drm_gem *exynos_gem;
391 unsigned int flags;
392 int ret;
393
394 /*
395 * allocate memory to be used for framebuffer.
396 * - this callback would be called by user application
397 * with DRM_IOCTL_MODE_CREATE_DUMB command.
398 */
399
400 args->pitch = args->width * ((args->bpp + 7) / 8);
401 args->size = args->pitch * args->height;
402
403 if (is_drm_iommu_supported(dev))
404 flags = EXYNOS_BO_NONCONTIG | EXYNOS_BO_WC;
405 else
406 flags = EXYNOS_BO_CONTIG | EXYNOS_BO_WC;
407
408 exynos_gem = exynos_drm_gem_create(dev, flags, args->size);
409 if (IS_ERR(exynos_gem)) {
410 dev_warn(dev->dev, "FB allocation failed.\n");
411 return PTR_ERR(exynos_gem);
412 }
413
414 ret = exynos_drm_gem_handle_create(&exynos_gem->base, file_priv,
415 &args->handle);
416 if (ret) {
417 exynos_drm_gem_destroy(exynos_gem);
418 return ret;
419 }
420
421 return 0;
422}
423
424int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
425 struct drm_device *dev, uint32_t handle,
426 uint64_t *offset)
427{
428 struct drm_gem_object *obj;
429 int ret = 0;
430
431 /*
432 * get offset of memory allocated for drm framebuffer.
433 * - this callback would be called by user application
434 * with DRM_IOCTL_MODE_MAP_DUMB command.
435 */
436
437 obj = drm_gem_object_lookup(file_priv, handle);
438 if (!obj) {
439 DRM_ERROR("failed to lookup gem object.\n");
440 return -EINVAL;
441 }
442
443 *offset = drm_vma_node_offset_addr(&obj->vma_node);
444 DRM_DEBUG_KMS("offset = 0x%lx\n", (unsigned long)*offset);
445
446 drm_gem_object_unreference_unlocked(obj);
447 return ret;
448}
449
450int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
451{
452 struct drm_gem_object *obj = vma->vm_private_data;
453 struct exynos_drm_gem *exynos_gem = to_exynos_gem(obj);
454 unsigned long pfn;
455 pgoff_t page_offset;
456 int ret;
457
458 page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
459
460 if (page_offset >= (exynos_gem->size >> PAGE_SHIFT)) {
461 DRM_ERROR("invalid page offset\n");
462 ret = -EINVAL;
463 goto out;
464 }
465
466 pfn = page_to_pfn(exynos_gem->pages[page_offset]);
467 ret = vm_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV));
468
469out:
470 switch (ret) {
471 case 0:
472 case -ERESTARTSYS:
473 case -EINTR:
474 return VM_FAULT_NOPAGE;
475 case -ENOMEM:
476 return VM_FAULT_OOM;
477 default:
478 return VM_FAULT_SIGBUS;
479 }
480}
481
482static int exynos_drm_gem_mmap_obj(struct drm_gem_object *obj,
483 struct vm_area_struct *vma)
484{
485 struct exynos_drm_gem *exynos_gem = to_exynos_gem(obj);
486 int ret;
487
488 DRM_DEBUG_KMS("flags = 0x%x\n", exynos_gem->flags);
489
490 /* non-cachable as default. */
491 if (exynos_gem->flags & EXYNOS_BO_CACHABLE)
492 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
493 else if (exynos_gem->flags & EXYNOS_BO_WC)
494 vma->vm_page_prot =
495 pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
496 else
497 vma->vm_page_prot =
498 pgprot_noncached(vm_get_page_prot(vma->vm_flags));
499
500 ret = exynos_drm_gem_mmap_buffer(exynos_gem, vma);
501 if (ret)
502 goto err_close_vm;
503
504 return ret;
505
506err_close_vm:
507 drm_gem_vm_close(vma);
508
509 return ret;
510}
511
512int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
513{
514 struct drm_gem_object *obj;
515 int ret;
516
517 /* set vm_area_struct. */
518 ret = drm_gem_mmap(filp, vma);
519 if (ret < 0) {
520 DRM_ERROR("failed to mmap.\n");
521 return ret;
522 }
523
524 obj = vma->vm_private_data;
525
526 if (obj->import_attach)
527 return dma_buf_mmap(obj->dma_buf, vma, 0);
528
529 return exynos_drm_gem_mmap_obj(obj, vma);
530}
531
532/* low-level interface prime helpers */
533struct sg_table *exynos_drm_gem_prime_get_sg_table(struct drm_gem_object *obj)
534{
535 struct exynos_drm_gem *exynos_gem = to_exynos_gem(obj);
536 int npages;
537
538 npages = exynos_gem->size >> PAGE_SHIFT;
539
540 return drm_prime_pages_to_sg(exynos_gem->pages, npages);
541}
542
543struct drm_gem_object *
544exynos_drm_gem_prime_import_sg_table(struct drm_device *dev,
545 struct dma_buf_attachment *attach,
546 struct sg_table *sgt)
547{
548 struct exynos_drm_gem *exynos_gem;
549 int npages;
550 int ret;
551
552 exynos_gem = exynos_drm_gem_init(dev, attach->dmabuf->size);
553 if (IS_ERR(exynos_gem)) {
554 ret = PTR_ERR(exynos_gem);
555 return ERR_PTR(ret);
556 }
557
558 exynos_gem->dma_addr = sg_dma_address(sgt->sgl);
559
560 npages = exynos_gem->size >> PAGE_SHIFT;
561 exynos_gem->pages = drm_malloc_ab(npages, sizeof(struct page *));
562 if (!exynos_gem->pages) {
563 ret = -ENOMEM;
564 goto err;
565 }
566
567 ret = drm_prime_sg_to_page_addr_arrays(sgt, exynos_gem->pages, NULL,
568 npages);
569 if (ret < 0)
570 goto err_free_large;
571
572 exynos_gem->sgt = sgt;
573
574 if (sgt->nents == 1) {
575 /* always physically continuous memory if sgt->nents is 1. */
576 exynos_gem->flags |= EXYNOS_BO_CONTIG;
577 } else {
578 /*
579 * this case could be CONTIG or NONCONTIG type but for now
580 * sets NONCONTIG.
581 * TODO. we have to find a way that exporter can notify
582 * the type of its own buffer to importer.
583 */
584 exynos_gem->flags |= EXYNOS_BO_NONCONTIG;
585 }
586
587 return &exynos_gem->base;
588
589err_free_large:
590 drm_free_large(exynos_gem->pages);
591err:
592 drm_gem_object_release(&exynos_gem->base);
593 kfree(exynos_gem);
594 return ERR_PTR(ret);
595}
596
597void *exynos_drm_gem_prime_vmap(struct drm_gem_object *obj)
598{
599 return NULL;
600}
601
602void exynos_drm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
603{
604 /* Nothing to do */
605}
606
607int exynos_drm_gem_prime_mmap(struct drm_gem_object *obj,
608 struct vm_area_struct *vma)
609{
610 int ret;
611
612 ret = drm_gem_mmap_obj(obj, obj->size, vma);
613 if (ret < 0)
614 return ret;
615
616 return exynos_drm_gem_mmap_obj(obj, vma);
617}
1// SPDX-License-Identifier: GPL-2.0-or-later
2/* exynos_drm_gem.c
3 *
4 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
5 * Author: Inki Dae <inki.dae@samsung.com>
6 */
7
8
9#include <linux/dma-buf.h>
10#include <linux/pfn_t.h>
11#include <linux/shmem_fs.h>
12
13#include <drm/drm_prime.h>
14#include <drm/drm_vma_manager.h>
15#include <drm/exynos_drm.h>
16
17#include "exynos_drm_drv.h"
18#include "exynos_drm_gem.h"
19
20static int exynos_drm_alloc_buf(struct exynos_drm_gem *exynos_gem, bool kvmap)
21{
22 struct drm_device *dev = exynos_gem->base.dev;
23 unsigned long attr = 0;
24
25 if (exynos_gem->dma_addr) {
26 DRM_DEV_DEBUG_KMS(to_dma_dev(dev), "already allocated.\n");
27 return 0;
28 }
29
30 /*
31 * if EXYNOS_BO_CONTIG, fully physically contiguous memory
32 * region will be allocated else physically contiguous
33 * as possible.
34 */
35 if (!(exynos_gem->flags & EXYNOS_BO_NONCONTIG))
36 attr |= DMA_ATTR_FORCE_CONTIGUOUS;
37
38 /*
39 * if EXYNOS_BO_WC or EXYNOS_BO_NONCACHABLE, writecombine mapping
40 * else cachable mapping.
41 */
42 if (exynos_gem->flags & EXYNOS_BO_WC ||
43 !(exynos_gem->flags & EXYNOS_BO_CACHABLE))
44 attr |= DMA_ATTR_WRITE_COMBINE;
45
46 /* FBDev emulation requires kernel mapping */
47 if (!kvmap)
48 attr |= DMA_ATTR_NO_KERNEL_MAPPING;
49
50 exynos_gem->dma_attrs = attr;
51 exynos_gem->cookie = dma_alloc_attrs(to_dma_dev(dev), exynos_gem->size,
52 &exynos_gem->dma_addr, GFP_KERNEL,
53 exynos_gem->dma_attrs);
54 if (!exynos_gem->cookie) {
55 DRM_DEV_ERROR(to_dma_dev(dev), "failed to allocate buffer.\n");
56 return -ENOMEM;
57 }
58
59 if (kvmap)
60 exynos_gem->kvaddr = exynos_gem->cookie;
61
62 DRM_DEV_DEBUG_KMS(to_dma_dev(dev), "dma_addr(0x%lx), size(0x%lx)\n",
63 (unsigned long)exynos_gem->dma_addr, exynos_gem->size);
64 return 0;
65}
66
67static void exynos_drm_free_buf(struct exynos_drm_gem *exynos_gem)
68{
69 struct drm_device *dev = exynos_gem->base.dev;
70
71 if (!exynos_gem->dma_addr) {
72 DRM_DEV_DEBUG_KMS(dev->dev, "dma_addr is invalid.\n");
73 return;
74 }
75
76 DRM_DEV_DEBUG_KMS(dev->dev, "dma_addr(0x%lx), size(0x%lx)\n",
77 (unsigned long)exynos_gem->dma_addr, exynos_gem->size);
78
79 dma_free_attrs(to_dma_dev(dev), exynos_gem->size, exynos_gem->cookie,
80 (dma_addr_t)exynos_gem->dma_addr,
81 exynos_gem->dma_attrs);
82}
83
84static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,
85 struct drm_file *file_priv,
86 unsigned int *handle)
87{
88 int ret;
89
90 /*
91 * allocate a id of idr table where the obj is registered
92 * and handle has the id what user can see.
93 */
94 ret = drm_gem_handle_create(file_priv, obj, handle);
95 if (ret)
96 return ret;
97
98 DRM_DEV_DEBUG_KMS(to_dma_dev(obj->dev), "gem handle = 0x%x\n", *handle);
99
100 /* drop reference from allocate - handle holds it now. */
101 drm_gem_object_put(obj);
102
103 return 0;
104}
105
106void exynos_drm_gem_destroy(struct exynos_drm_gem *exynos_gem)
107{
108 struct drm_gem_object *obj = &exynos_gem->base;
109
110 DRM_DEV_DEBUG_KMS(to_dma_dev(obj->dev), "handle count = %d\n",
111 obj->handle_count);
112
113 /*
114 * do not release memory region from exporter.
115 *
116 * the region will be released by exporter
117 * once dmabuf's refcount becomes 0.
118 */
119 if (obj->import_attach)
120 drm_prime_gem_destroy(obj, exynos_gem->sgt);
121 else
122 exynos_drm_free_buf(exynos_gem);
123
124 /* release file pointer to gem object. */
125 drm_gem_object_release(obj);
126
127 kfree(exynos_gem);
128}
129
130static const struct vm_operations_struct exynos_drm_gem_vm_ops = {
131 .open = drm_gem_vm_open,
132 .close = drm_gem_vm_close,
133};
134
135static const struct drm_gem_object_funcs exynos_drm_gem_object_funcs = {
136 .free = exynos_drm_gem_free_object,
137 .get_sg_table = exynos_drm_gem_prime_get_sg_table,
138 .vm_ops = &exynos_drm_gem_vm_ops,
139};
140
141static struct exynos_drm_gem *exynos_drm_gem_init(struct drm_device *dev,
142 unsigned long size)
143{
144 struct exynos_drm_gem *exynos_gem;
145 struct drm_gem_object *obj;
146 int ret;
147
148 exynos_gem = kzalloc(sizeof(*exynos_gem), GFP_KERNEL);
149 if (!exynos_gem)
150 return ERR_PTR(-ENOMEM);
151
152 exynos_gem->size = size;
153 obj = &exynos_gem->base;
154
155 obj->funcs = &exynos_drm_gem_object_funcs;
156
157 ret = drm_gem_object_init(dev, obj, size);
158 if (ret < 0) {
159 DRM_DEV_ERROR(dev->dev, "failed to initialize gem object\n");
160 kfree(exynos_gem);
161 return ERR_PTR(ret);
162 }
163
164 ret = drm_gem_create_mmap_offset(obj);
165 if (ret < 0) {
166 drm_gem_object_release(obj);
167 kfree(exynos_gem);
168 return ERR_PTR(ret);
169 }
170
171 DRM_DEV_DEBUG_KMS(dev->dev, "created file object = %pK\n", obj->filp);
172
173 return exynos_gem;
174}
175
176struct exynos_drm_gem *exynos_drm_gem_create(struct drm_device *dev,
177 unsigned int flags,
178 unsigned long size,
179 bool kvmap)
180{
181 struct exynos_drm_gem *exynos_gem;
182 int ret;
183
184 if (flags & ~(EXYNOS_BO_MASK)) {
185 DRM_DEV_ERROR(dev->dev,
186 "invalid GEM buffer flags: %u\n", flags);
187 return ERR_PTR(-EINVAL);
188 }
189
190 if (!size) {
191 DRM_DEV_ERROR(dev->dev, "invalid GEM buffer size: %lu\n", size);
192 return ERR_PTR(-EINVAL);
193 }
194
195 size = roundup(size, PAGE_SIZE);
196
197 exynos_gem = exynos_drm_gem_init(dev, size);
198 if (IS_ERR(exynos_gem))
199 return exynos_gem;
200
201 if (!is_drm_iommu_supported(dev) && (flags & EXYNOS_BO_NONCONTIG)) {
202 /*
203 * when no IOMMU is available, all allocated buffers are
204 * contiguous anyway, so drop EXYNOS_BO_NONCONTIG flag
205 */
206 flags &= ~EXYNOS_BO_NONCONTIG;
207 DRM_WARN("Non-contiguous allocation is not supported without IOMMU, falling back to contiguous buffer\n");
208 }
209
210 /* set memory type and cache attribute from user side. */
211 exynos_gem->flags = flags;
212
213 ret = exynos_drm_alloc_buf(exynos_gem, kvmap);
214 if (ret < 0) {
215 drm_gem_object_release(&exynos_gem->base);
216 kfree(exynos_gem);
217 return ERR_PTR(ret);
218 }
219
220 return exynos_gem;
221}
222
223int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
224 struct drm_file *file_priv)
225{
226 struct drm_exynos_gem_create *args = data;
227 struct exynos_drm_gem *exynos_gem;
228 int ret;
229
230 exynos_gem = exynos_drm_gem_create(dev, args->flags, args->size, false);
231 if (IS_ERR(exynos_gem))
232 return PTR_ERR(exynos_gem);
233
234 ret = exynos_drm_gem_handle_create(&exynos_gem->base, file_priv,
235 &args->handle);
236 if (ret) {
237 exynos_drm_gem_destroy(exynos_gem);
238 return ret;
239 }
240
241 return 0;
242}
243
244int exynos_drm_gem_map_ioctl(struct drm_device *dev, void *data,
245 struct drm_file *file_priv)
246{
247 struct drm_exynos_gem_map *args = data;
248
249 return drm_gem_dumb_map_offset(file_priv, dev, args->handle,
250 &args->offset);
251}
252
253struct exynos_drm_gem *exynos_drm_gem_get(struct drm_file *filp,
254 unsigned int gem_handle)
255{
256 struct drm_gem_object *obj;
257
258 obj = drm_gem_object_lookup(filp, gem_handle);
259 if (!obj)
260 return NULL;
261 return to_exynos_gem(obj);
262}
263
264static int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem *exynos_gem,
265 struct vm_area_struct *vma)
266{
267 struct drm_device *drm_dev = exynos_gem->base.dev;
268 unsigned long vm_size;
269 int ret;
270
271 vma->vm_flags &= ~VM_PFNMAP;
272 vma->vm_pgoff = 0;
273
274 vm_size = vma->vm_end - vma->vm_start;
275
276 /* check if user-requested size is valid. */
277 if (vm_size > exynos_gem->size)
278 return -EINVAL;
279
280 ret = dma_mmap_attrs(to_dma_dev(drm_dev), vma, exynos_gem->cookie,
281 exynos_gem->dma_addr, exynos_gem->size,
282 exynos_gem->dma_attrs);
283 if (ret < 0) {
284 DRM_ERROR("failed to mmap.\n");
285 return ret;
286 }
287
288 return 0;
289}
290
291int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
292 struct drm_file *file_priv)
293{
294 struct exynos_drm_gem *exynos_gem;
295 struct drm_exynos_gem_info *args = data;
296 struct drm_gem_object *obj;
297
298 obj = drm_gem_object_lookup(file_priv, args->handle);
299 if (!obj) {
300 DRM_DEV_ERROR(dev->dev, "failed to lookup gem object.\n");
301 return -EINVAL;
302 }
303
304 exynos_gem = to_exynos_gem(obj);
305
306 args->flags = exynos_gem->flags;
307 args->size = exynos_gem->size;
308
309 drm_gem_object_put(obj);
310
311 return 0;
312}
313
314void exynos_drm_gem_free_object(struct drm_gem_object *obj)
315{
316 exynos_drm_gem_destroy(to_exynos_gem(obj));
317}
318
319int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
320 struct drm_device *dev,
321 struct drm_mode_create_dumb *args)
322{
323 struct exynos_drm_gem *exynos_gem;
324 unsigned int flags;
325 int ret;
326
327 /*
328 * allocate memory to be used for framebuffer.
329 * - this callback would be called by user application
330 * with DRM_IOCTL_MODE_CREATE_DUMB command.
331 */
332
333 args->pitch = args->width * ((args->bpp + 7) / 8);
334 args->size = args->pitch * args->height;
335
336 if (is_drm_iommu_supported(dev))
337 flags = EXYNOS_BO_NONCONTIG | EXYNOS_BO_WC;
338 else
339 flags = EXYNOS_BO_CONTIG | EXYNOS_BO_WC;
340
341 exynos_gem = exynos_drm_gem_create(dev, flags, args->size, false);
342 if (IS_ERR(exynos_gem)) {
343 dev_warn(dev->dev, "FB allocation failed.\n");
344 return PTR_ERR(exynos_gem);
345 }
346
347 ret = exynos_drm_gem_handle_create(&exynos_gem->base, file_priv,
348 &args->handle);
349 if (ret) {
350 exynos_drm_gem_destroy(exynos_gem);
351 return ret;
352 }
353
354 return 0;
355}
356
357static int exynos_drm_gem_mmap_obj(struct drm_gem_object *obj,
358 struct vm_area_struct *vma)
359{
360 struct exynos_drm_gem *exynos_gem = to_exynos_gem(obj);
361 int ret;
362
363 DRM_DEV_DEBUG_KMS(to_dma_dev(obj->dev), "flags = 0x%x\n",
364 exynos_gem->flags);
365
366 /* non-cachable as default. */
367 if (exynos_gem->flags & EXYNOS_BO_CACHABLE)
368 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
369 else if (exynos_gem->flags & EXYNOS_BO_WC)
370 vma->vm_page_prot =
371 pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
372 else
373 vma->vm_page_prot =
374 pgprot_noncached(vm_get_page_prot(vma->vm_flags));
375
376 ret = exynos_drm_gem_mmap_buffer(exynos_gem, vma);
377 if (ret)
378 goto err_close_vm;
379
380 return ret;
381
382err_close_vm:
383 drm_gem_vm_close(vma);
384
385 return ret;
386}
387
388int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
389{
390 struct drm_gem_object *obj;
391 int ret;
392
393 /* set vm_area_struct. */
394 ret = drm_gem_mmap(filp, vma);
395 if (ret < 0) {
396 DRM_ERROR("failed to mmap.\n");
397 return ret;
398 }
399
400 obj = vma->vm_private_data;
401
402 if (obj->import_attach)
403 return dma_buf_mmap(obj->dma_buf, vma, 0);
404
405 return exynos_drm_gem_mmap_obj(obj, vma);
406}
407
408/* low-level interface prime helpers */
409struct drm_gem_object *exynos_drm_gem_prime_import(struct drm_device *dev,
410 struct dma_buf *dma_buf)
411{
412 return drm_gem_prime_import_dev(dev, dma_buf, to_dma_dev(dev));
413}
414
415struct sg_table *exynos_drm_gem_prime_get_sg_table(struct drm_gem_object *obj)
416{
417 struct exynos_drm_gem *exynos_gem = to_exynos_gem(obj);
418 struct drm_device *drm_dev = obj->dev;
419 struct sg_table *sgt;
420 int ret;
421
422 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
423 if (!sgt)
424 return ERR_PTR(-ENOMEM);
425
426 ret = dma_get_sgtable_attrs(to_dma_dev(drm_dev), sgt, exynos_gem->cookie,
427 exynos_gem->dma_addr, exynos_gem->size,
428 exynos_gem->dma_attrs);
429 if (ret) {
430 DRM_ERROR("failed to get sgtable, %d\n", ret);
431 kfree(sgt);
432 return ERR_PTR(ret);
433 }
434
435 return sgt;
436}
437
438struct drm_gem_object *
439exynos_drm_gem_prime_import_sg_table(struct drm_device *dev,
440 struct dma_buf_attachment *attach,
441 struct sg_table *sgt)
442{
443 struct exynos_drm_gem *exynos_gem;
444
445 /* check if the entries in the sg_table are contiguous */
446 if (drm_prime_get_contiguous_size(sgt) < attach->dmabuf->size) {
447 DRM_ERROR("buffer chunks must be mapped contiguously");
448 return ERR_PTR(-EINVAL);
449 }
450
451 exynos_gem = exynos_drm_gem_init(dev, attach->dmabuf->size);
452 if (IS_ERR(exynos_gem))
453 return ERR_CAST(exynos_gem);
454
455 /*
456 * Buffer has been mapped as contiguous into DMA address space,
457 * but if there is IOMMU, it can be either CONTIG or NONCONTIG.
458 * We assume a simplified logic below:
459 */
460 if (is_drm_iommu_supported(dev))
461 exynos_gem->flags |= EXYNOS_BO_NONCONTIG;
462 else
463 exynos_gem->flags |= EXYNOS_BO_CONTIG;
464
465 exynos_gem->dma_addr = sg_dma_address(sgt->sgl);
466 exynos_gem->sgt = sgt;
467 return &exynos_gem->base;
468}
469
470int exynos_drm_gem_prime_mmap(struct drm_gem_object *obj,
471 struct vm_area_struct *vma)
472{
473 int ret;
474
475 ret = drm_gem_mmap_obj(obj, obj->size, vma);
476 if (ret < 0)
477 return ret;
478
479 return exynos_drm_gem_mmap_obj(obj, vma);
480}