Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
4 * Author:Mark Yao <mark.yao@rock-chips.com>
5 */
6
7#include <linux/dma-buf.h>
8#include <linux/iommu.h>
9#include <linux/vmalloc.h>
10
11#include <drm/drm.h>
12#include <drm/drm_fb_helper.h>
13#include <drm/drm_gem.h>
14#include <drm/drm_gem_dma_helper.h>
15#include <drm/drm_prime.h>
16#include <drm/drm_vma_manager.h>
17
18#include "rockchip_drm_drv.h"
19#include "rockchip_drm_gem.h"
20
21static int rockchip_gem_iommu_map(struct rockchip_gem_object *rk_obj)
22{
23 struct drm_device *drm = rk_obj->base.dev;
24 struct rockchip_drm_private *private = drm->dev_private;
25 int prot = IOMMU_READ | IOMMU_WRITE;
26 ssize_t ret;
27
28 mutex_lock(&private->mm_lock);
29 ret = drm_mm_insert_node_generic(&private->mm, &rk_obj->mm,
30 rk_obj->base.size, PAGE_SIZE,
31 0, 0);
32 mutex_unlock(&private->mm_lock);
33
34 if (ret < 0) {
35 DRM_ERROR("out of I/O virtual memory: %zd\n", ret);
36 return ret;
37 }
38
39 rk_obj->dma_addr = rk_obj->mm.start;
40
41 ret = iommu_map_sgtable(private->domain, rk_obj->dma_addr, rk_obj->sgt,
42 prot);
43 if (ret < (ssize_t)rk_obj->base.size) {
44 DRM_ERROR("failed to map buffer: size=%zd request_size=%zd\n",
45 ret, rk_obj->base.size);
46 ret = -ENOMEM;
47 goto err_remove_node;
48 }
49
50 rk_obj->size = ret;
51
52 return 0;
53
54err_remove_node:
55 mutex_lock(&private->mm_lock);
56 drm_mm_remove_node(&rk_obj->mm);
57 mutex_unlock(&private->mm_lock);
58
59 return ret;
60}
61
62static int rockchip_gem_iommu_unmap(struct rockchip_gem_object *rk_obj)
63{
64 struct drm_device *drm = rk_obj->base.dev;
65 struct rockchip_drm_private *private = drm->dev_private;
66
67 iommu_unmap(private->domain, rk_obj->dma_addr, rk_obj->size);
68
69 mutex_lock(&private->mm_lock);
70
71 drm_mm_remove_node(&rk_obj->mm);
72
73 mutex_unlock(&private->mm_lock);
74
75 return 0;
76}
77
78static int rockchip_gem_get_pages(struct rockchip_gem_object *rk_obj)
79{
80 struct drm_device *drm = rk_obj->base.dev;
81 int ret, i;
82 struct scatterlist *s;
83
84 rk_obj->pages = drm_gem_get_pages(&rk_obj->base);
85 if (IS_ERR(rk_obj->pages))
86 return PTR_ERR(rk_obj->pages);
87
88 rk_obj->num_pages = rk_obj->base.size >> PAGE_SHIFT;
89
90 rk_obj->sgt = drm_prime_pages_to_sg(rk_obj->base.dev,
91 rk_obj->pages, rk_obj->num_pages);
92 if (IS_ERR(rk_obj->sgt)) {
93 ret = PTR_ERR(rk_obj->sgt);
94 goto err_put_pages;
95 }
96
97 /*
98 * Fake up the SG table so that dma_sync_sg_for_device() can be used
99 * to flush the pages associated with it.
100 *
101 * TODO: Replace this by drm_clflush_sg() once it can be implemented
102 * without relying on symbols that are not exported.
103 */
104 for_each_sgtable_sg(rk_obj->sgt, s, i)
105 sg_dma_address(s) = sg_phys(s);
106
107 dma_sync_sgtable_for_device(drm->dev, rk_obj->sgt, DMA_TO_DEVICE);
108
109 return 0;
110
111err_put_pages:
112 drm_gem_put_pages(&rk_obj->base, rk_obj->pages, false, false);
113 return ret;
114}
115
116static void rockchip_gem_put_pages(struct rockchip_gem_object *rk_obj)
117{
118 sg_free_table(rk_obj->sgt);
119 kfree(rk_obj->sgt);
120 drm_gem_put_pages(&rk_obj->base, rk_obj->pages, true, true);
121}
122
123static int rockchip_gem_alloc_iommu(struct rockchip_gem_object *rk_obj,
124 bool alloc_kmap)
125{
126 int ret;
127
128 ret = rockchip_gem_get_pages(rk_obj);
129 if (ret < 0)
130 return ret;
131
132 ret = rockchip_gem_iommu_map(rk_obj);
133 if (ret < 0)
134 goto err_free;
135
136 if (alloc_kmap) {
137 rk_obj->kvaddr = vmap(rk_obj->pages, rk_obj->num_pages, VM_MAP,
138 pgprot_writecombine(PAGE_KERNEL));
139 if (!rk_obj->kvaddr) {
140 DRM_ERROR("failed to vmap() buffer\n");
141 ret = -ENOMEM;
142 goto err_unmap;
143 }
144 }
145
146 return 0;
147
148err_unmap:
149 rockchip_gem_iommu_unmap(rk_obj);
150err_free:
151 rockchip_gem_put_pages(rk_obj);
152
153 return ret;
154}
155
156static int rockchip_gem_alloc_dma(struct rockchip_gem_object *rk_obj,
157 bool alloc_kmap)
158{
159 struct drm_gem_object *obj = &rk_obj->base;
160 struct drm_device *drm = obj->dev;
161
162 rk_obj->dma_attrs = DMA_ATTR_WRITE_COMBINE;
163
164 if (!alloc_kmap)
165 rk_obj->dma_attrs |= DMA_ATTR_NO_KERNEL_MAPPING;
166
167 rk_obj->kvaddr = dma_alloc_attrs(drm->dev, obj->size,
168 &rk_obj->dma_addr, GFP_KERNEL,
169 rk_obj->dma_attrs);
170 if (!rk_obj->kvaddr) {
171 DRM_ERROR("failed to allocate %zu byte dma buffer", obj->size);
172 return -ENOMEM;
173 }
174
175 return 0;
176}
177
178static int rockchip_gem_alloc_buf(struct rockchip_gem_object *rk_obj,
179 bool alloc_kmap)
180{
181 struct drm_gem_object *obj = &rk_obj->base;
182 struct drm_device *drm = obj->dev;
183 struct rockchip_drm_private *private = drm->dev_private;
184
185 if (private->domain)
186 return rockchip_gem_alloc_iommu(rk_obj, alloc_kmap);
187 else
188 return rockchip_gem_alloc_dma(rk_obj, alloc_kmap);
189}
190
191static void rockchip_gem_free_iommu(struct rockchip_gem_object *rk_obj)
192{
193 vunmap(rk_obj->kvaddr);
194 rockchip_gem_iommu_unmap(rk_obj);
195 rockchip_gem_put_pages(rk_obj);
196}
197
198static void rockchip_gem_free_dma(struct rockchip_gem_object *rk_obj)
199{
200 struct drm_gem_object *obj = &rk_obj->base;
201 struct drm_device *drm = obj->dev;
202
203 dma_free_attrs(drm->dev, obj->size, rk_obj->kvaddr, rk_obj->dma_addr,
204 rk_obj->dma_attrs);
205}
206
207static void rockchip_gem_free_buf(struct rockchip_gem_object *rk_obj)
208{
209 if (rk_obj->pages)
210 rockchip_gem_free_iommu(rk_obj);
211 else
212 rockchip_gem_free_dma(rk_obj);
213}
214
215static int rockchip_drm_gem_object_mmap_iommu(struct drm_gem_object *obj,
216 struct vm_area_struct *vma)
217{
218 struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
219 unsigned int count = obj->size >> PAGE_SHIFT;
220 unsigned long user_count = vma_pages(vma);
221
222 if (user_count == 0)
223 return -ENXIO;
224
225 return vm_map_pages(vma, rk_obj->pages, count);
226}
227
228static int rockchip_drm_gem_object_mmap_dma(struct drm_gem_object *obj,
229 struct vm_area_struct *vma)
230{
231 struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
232 struct drm_device *drm = obj->dev;
233
234 return dma_mmap_attrs(drm->dev, vma, rk_obj->kvaddr, rk_obj->dma_addr,
235 obj->size, rk_obj->dma_attrs);
236}
237
238static int rockchip_drm_gem_object_mmap(struct drm_gem_object *obj,
239 struct vm_area_struct *vma)
240{
241 int ret;
242 struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
243
244 /*
245 * Set vm_pgoff (used as a fake buffer offset by DRM) to 0 and map the
246 * whole buffer from the start.
247 */
248 vma->vm_pgoff = 0;
249
250 /*
251 * We allocated a struct page table for rk_obj, so clear
252 * VM_PFNMAP flag that was set by drm_gem_mmap_obj()/drm_gem_mmap().
253 */
254 vm_flags_mod(vma, VM_IO | VM_DONTEXPAND | VM_DONTDUMP, VM_PFNMAP);
255
256 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
257 vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
258
259 if (rk_obj->pages)
260 ret = rockchip_drm_gem_object_mmap_iommu(obj, vma);
261 else
262 ret = rockchip_drm_gem_object_mmap_dma(obj, vma);
263
264 return ret;
265}
266
267static void rockchip_gem_release_object(struct rockchip_gem_object *rk_obj)
268{
269 drm_gem_object_release(&rk_obj->base);
270 kfree(rk_obj);
271}
272
273static const struct drm_gem_object_funcs rockchip_gem_object_funcs = {
274 .free = rockchip_gem_free_object,
275 .get_sg_table = rockchip_gem_prime_get_sg_table,
276 .vmap = rockchip_gem_prime_vmap,
277 .vunmap = rockchip_gem_prime_vunmap,
278 .mmap = rockchip_drm_gem_object_mmap,
279 .vm_ops = &drm_gem_dma_vm_ops,
280};
281
282static struct rockchip_gem_object *
283 rockchip_gem_alloc_object(struct drm_device *drm, unsigned int size)
284{
285 struct rockchip_gem_object *rk_obj;
286 struct drm_gem_object *obj;
287
288 size = round_up(size, PAGE_SIZE);
289
290 rk_obj = kzalloc(sizeof(*rk_obj), GFP_KERNEL);
291 if (!rk_obj)
292 return ERR_PTR(-ENOMEM);
293
294 obj = &rk_obj->base;
295
296 obj->funcs = &rockchip_gem_object_funcs;
297
298 drm_gem_object_init(drm, obj, size);
299
300 return rk_obj;
301}
302
303struct rockchip_gem_object *
304rockchip_gem_create_object(struct drm_device *drm, unsigned int size,
305 bool alloc_kmap)
306{
307 struct rockchip_gem_object *rk_obj;
308 int ret;
309
310 rk_obj = rockchip_gem_alloc_object(drm, size);
311 if (IS_ERR(rk_obj))
312 return rk_obj;
313
314 ret = rockchip_gem_alloc_buf(rk_obj, alloc_kmap);
315 if (ret)
316 goto err_free_rk_obj;
317
318 return rk_obj;
319
320err_free_rk_obj:
321 rockchip_gem_release_object(rk_obj);
322 return ERR_PTR(ret);
323}
324
325/*
326 * rockchip_gem_free_object - (struct drm_gem_object_funcs)->free
327 * callback function
328 */
329void rockchip_gem_free_object(struct drm_gem_object *obj)
330{
331 struct drm_device *drm = obj->dev;
332 struct rockchip_drm_private *private = drm->dev_private;
333 struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
334
335 if (obj->import_attach) {
336 if (private->domain) {
337 rockchip_gem_iommu_unmap(rk_obj);
338 } else {
339 dma_unmap_sgtable(drm->dev, rk_obj->sgt,
340 DMA_BIDIRECTIONAL, 0);
341 }
342 drm_prime_gem_destroy(obj, rk_obj->sgt);
343 } else {
344 rockchip_gem_free_buf(rk_obj);
345 }
346
347 rockchip_gem_release_object(rk_obj);
348}
349
350/*
351 * rockchip_gem_create_with_handle - allocate an object with the given
352 * size and create a gem handle on it
353 *
354 * returns a struct rockchip_gem_object* on success or ERR_PTR values
355 * on failure.
356 */
357static struct rockchip_gem_object *
358rockchip_gem_create_with_handle(struct drm_file *file_priv,
359 struct drm_device *drm, unsigned int size,
360 unsigned int *handle)
361{
362 struct rockchip_gem_object *rk_obj;
363 struct drm_gem_object *obj;
364 bool is_framebuffer;
365 int ret;
366
367 is_framebuffer = drm->fb_helper && file_priv == drm->fb_helper->client.file;
368
369 rk_obj = rockchip_gem_create_object(drm, size, is_framebuffer);
370 if (IS_ERR(rk_obj))
371 return ERR_CAST(rk_obj);
372
373 obj = &rk_obj->base;
374
375 /*
376 * allocate a id of idr table where the obj is registered
377 * and handle has the id what user can see.
378 */
379 ret = drm_gem_handle_create(file_priv, obj, handle);
380 if (ret)
381 goto err_handle_create;
382
383 /* drop reference from allocate - handle holds it now. */
384 drm_gem_object_put(obj);
385
386 return rk_obj;
387
388err_handle_create:
389 rockchip_gem_free_object(obj);
390
391 return ERR_PTR(ret);
392}
393
394/*
395 * rockchip_gem_dumb_create - (struct drm_driver)->dumb_create callback
396 * function
397 *
398 * This aligns the pitch and size arguments to the minimum required. wrap
399 * this into your own function if you need bigger alignment.
400 */
401int rockchip_gem_dumb_create(struct drm_file *file_priv,
402 struct drm_device *dev,
403 struct drm_mode_create_dumb *args)
404{
405 struct rockchip_gem_object *rk_obj;
406 int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
407
408 /*
409 * align to 64 bytes since Mali requires it.
410 */
411 args->pitch = ALIGN(min_pitch, 64);
412 args->size = args->pitch * args->height;
413
414 rk_obj = rockchip_gem_create_with_handle(file_priv, dev, args->size,
415 &args->handle);
416
417 return PTR_ERR_OR_ZERO(rk_obj);
418}
419
420/*
421 * Allocate a sg_table for this GEM object.
422 * Note: Both the table's contents, and the sg_table itself must be freed by
423 * the caller.
424 * Returns a pointer to the newly allocated sg_table, or an ERR_PTR() error.
425 */
426struct sg_table *rockchip_gem_prime_get_sg_table(struct drm_gem_object *obj)
427{
428 struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
429 struct drm_device *drm = obj->dev;
430 struct sg_table *sgt;
431 int ret;
432
433 if (rk_obj->pages)
434 return drm_prime_pages_to_sg(obj->dev, rk_obj->pages, rk_obj->num_pages);
435
436 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
437 if (!sgt)
438 return ERR_PTR(-ENOMEM);
439
440 ret = dma_get_sgtable_attrs(drm->dev, sgt, rk_obj->kvaddr,
441 rk_obj->dma_addr, obj->size,
442 rk_obj->dma_attrs);
443 if (ret) {
444 DRM_ERROR("failed to allocate sgt, %d\n", ret);
445 kfree(sgt);
446 return ERR_PTR(ret);
447 }
448
449 return sgt;
450}
451
452static int
453rockchip_gem_iommu_map_sg(struct drm_device *drm,
454 struct dma_buf_attachment *attach,
455 struct sg_table *sg,
456 struct rockchip_gem_object *rk_obj)
457{
458 rk_obj->sgt = sg;
459 return rockchip_gem_iommu_map(rk_obj);
460}
461
462static int
463rockchip_gem_dma_map_sg(struct drm_device *drm,
464 struct dma_buf_attachment *attach,
465 struct sg_table *sg,
466 struct rockchip_gem_object *rk_obj)
467{
468 int err = dma_map_sgtable(drm->dev, sg, DMA_BIDIRECTIONAL, 0);
469 if (err)
470 return err;
471
472 if (drm_prime_get_contiguous_size(sg) < attach->dmabuf->size) {
473 DRM_ERROR("failed to map sg_table to contiguous linear address.\n");
474 dma_unmap_sgtable(drm->dev, sg, DMA_BIDIRECTIONAL, 0);
475 return -EINVAL;
476 }
477
478 rk_obj->dma_addr = sg_dma_address(sg->sgl);
479 rk_obj->sgt = sg;
480 return 0;
481}
482
483struct drm_gem_object *
484rockchip_gem_prime_import_sg_table(struct drm_device *drm,
485 struct dma_buf_attachment *attach,
486 struct sg_table *sg)
487{
488 struct rockchip_drm_private *private = drm->dev_private;
489 struct rockchip_gem_object *rk_obj;
490 int ret;
491
492 rk_obj = rockchip_gem_alloc_object(drm, attach->dmabuf->size);
493 if (IS_ERR(rk_obj))
494 return ERR_CAST(rk_obj);
495
496 if (private->domain)
497 ret = rockchip_gem_iommu_map_sg(drm, attach, sg, rk_obj);
498 else
499 ret = rockchip_gem_dma_map_sg(drm, attach, sg, rk_obj);
500
501 if (ret < 0) {
502 DRM_ERROR("failed to import sg table: %d\n", ret);
503 goto err_free_rk_obj;
504 }
505
506 return &rk_obj->base;
507
508err_free_rk_obj:
509 rockchip_gem_release_object(rk_obj);
510 return ERR_PTR(ret);
511}
512
513int rockchip_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map)
514{
515 struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
516
517 if (rk_obj->pages) {
518 void *vaddr;
519
520 if (rk_obj->kvaddr)
521 vaddr = rk_obj->kvaddr;
522 else
523 vaddr = vmap(rk_obj->pages, rk_obj->num_pages, VM_MAP,
524 pgprot_writecombine(PAGE_KERNEL));
525
526 if (!vaddr)
527 return -ENOMEM;
528 iosys_map_set_vaddr(map, vaddr);
529 return 0;
530 }
531
532 if (rk_obj->dma_attrs & DMA_ATTR_NO_KERNEL_MAPPING)
533 return -ENOMEM;
534 iosys_map_set_vaddr(map, rk_obj->kvaddr);
535
536 return 0;
537}
538
539void rockchip_gem_prime_vunmap(struct drm_gem_object *obj,
540 struct iosys_map *map)
541{
542 struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
543
544 if (rk_obj->pages) {
545 if (map->vaddr != rk_obj->kvaddr)
546 vunmap(map->vaddr);
547 return;
548 }
549
550 /* Nothing to do if allocated by DMA mapping API. */
551}
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
4 * Author:Mark Yao <mark.yao@rock-chips.com>
5 */
6
7#include <linux/dma-buf.h>
8#include <linux/iommu.h>
9#include <linux/vmalloc.h>
10
11#include <drm/drm.h>
12#include <drm/drm_gem.h>
13#include <drm/drm_prime.h>
14#include <drm/drm_vma_manager.h>
15
16#include "rockchip_drm_drv.h"
17#include "rockchip_drm_gem.h"
18
19static int rockchip_gem_iommu_map(struct rockchip_gem_object *rk_obj)
20{
21 struct drm_device *drm = rk_obj->base.dev;
22 struct rockchip_drm_private *private = drm->dev_private;
23 int prot = IOMMU_READ | IOMMU_WRITE;
24 ssize_t ret;
25
26 mutex_lock(&private->mm_lock);
27 ret = drm_mm_insert_node_generic(&private->mm, &rk_obj->mm,
28 rk_obj->base.size, PAGE_SIZE,
29 0, 0);
30 mutex_unlock(&private->mm_lock);
31
32 if (ret < 0) {
33 DRM_ERROR("out of I/O virtual memory: %zd\n", ret);
34 return ret;
35 }
36
37 rk_obj->dma_addr = rk_obj->mm.start;
38
39 ret = iommu_map_sg(private->domain, rk_obj->dma_addr, rk_obj->sgt->sgl,
40 rk_obj->sgt->nents, prot);
41 if (ret < rk_obj->base.size) {
42 DRM_ERROR("failed to map buffer: size=%zd request_size=%zd\n",
43 ret, rk_obj->base.size);
44 ret = -ENOMEM;
45 goto err_remove_node;
46 }
47
48 rk_obj->size = ret;
49
50 return 0;
51
52err_remove_node:
53 mutex_lock(&private->mm_lock);
54 drm_mm_remove_node(&rk_obj->mm);
55 mutex_unlock(&private->mm_lock);
56
57 return ret;
58}
59
60static int rockchip_gem_iommu_unmap(struct rockchip_gem_object *rk_obj)
61{
62 struct drm_device *drm = rk_obj->base.dev;
63 struct rockchip_drm_private *private = drm->dev_private;
64
65 iommu_unmap(private->domain, rk_obj->dma_addr, rk_obj->size);
66
67 mutex_lock(&private->mm_lock);
68
69 drm_mm_remove_node(&rk_obj->mm);
70
71 mutex_unlock(&private->mm_lock);
72
73 return 0;
74}
75
76static int rockchip_gem_get_pages(struct rockchip_gem_object *rk_obj)
77{
78 struct drm_device *drm = rk_obj->base.dev;
79 int ret, i;
80 struct scatterlist *s;
81
82 rk_obj->pages = drm_gem_get_pages(&rk_obj->base);
83 if (IS_ERR(rk_obj->pages))
84 return PTR_ERR(rk_obj->pages);
85
86 rk_obj->num_pages = rk_obj->base.size >> PAGE_SHIFT;
87
88 rk_obj->sgt = drm_prime_pages_to_sg(rk_obj->pages, rk_obj->num_pages);
89 if (IS_ERR(rk_obj->sgt)) {
90 ret = PTR_ERR(rk_obj->sgt);
91 goto err_put_pages;
92 }
93
94 /*
95 * Fake up the SG table so that dma_sync_sg_for_device() can be used
96 * to flush the pages associated with it.
97 *
98 * TODO: Replace this by drm_clflush_sg() once it can be implemented
99 * without relying on symbols that are not exported.
100 */
101 for_each_sg(rk_obj->sgt->sgl, s, rk_obj->sgt->nents, i)
102 sg_dma_address(s) = sg_phys(s);
103
104 dma_sync_sg_for_device(drm->dev, rk_obj->sgt->sgl, rk_obj->sgt->nents,
105 DMA_TO_DEVICE);
106
107 return 0;
108
109err_put_pages:
110 drm_gem_put_pages(&rk_obj->base, rk_obj->pages, false, false);
111 return ret;
112}
113
114static void rockchip_gem_put_pages(struct rockchip_gem_object *rk_obj)
115{
116 sg_free_table(rk_obj->sgt);
117 kfree(rk_obj->sgt);
118 drm_gem_put_pages(&rk_obj->base, rk_obj->pages, true, true);
119}
120
121static int rockchip_gem_alloc_iommu(struct rockchip_gem_object *rk_obj,
122 bool alloc_kmap)
123{
124 int ret;
125
126 ret = rockchip_gem_get_pages(rk_obj);
127 if (ret < 0)
128 return ret;
129
130 ret = rockchip_gem_iommu_map(rk_obj);
131 if (ret < 0)
132 goto err_free;
133
134 if (alloc_kmap) {
135 rk_obj->kvaddr = vmap(rk_obj->pages, rk_obj->num_pages, VM_MAP,
136 pgprot_writecombine(PAGE_KERNEL));
137 if (!rk_obj->kvaddr) {
138 DRM_ERROR("failed to vmap() buffer\n");
139 ret = -ENOMEM;
140 goto err_unmap;
141 }
142 }
143
144 return 0;
145
146err_unmap:
147 rockchip_gem_iommu_unmap(rk_obj);
148err_free:
149 rockchip_gem_put_pages(rk_obj);
150
151 return ret;
152}
153
154static int rockchip_gem_alloc_dma(struct rockchip_gem_object *rk_obj,
155 bool alloc_kmap)
156{
157 struct drm_gem_object *obj = &rk_obj->base;
158 struct drm_device *drm = obj->dev;
159
160 rk_obj->dma_attrs = DMA_ATTR_WRITE_COMBINE;
161
162 if (!alloc_kmap)
163 rk_obj->dma_attrs |= DMA_ATTR_NO_KERNEL_MAPPING;
164
165 rk_obj->kvaddr = dma_alloc_attrs(drm->dev, obj->size,
166 &rk_obj->dma_addr, GFP_KERNEL,
167 rk_obj->dma_attrs);
168 if (!rk_obj->kvaddr) {
169 DRM_ERROR("failed to allocate %zu byte dma buffer", obj->size);
170 return -ENOMEM;
171 }
172
173 return 0;
174}
175
176static int rockchip_gem_alloc_buf(struct rockchip_gem_object *rk_obj,
177 bool alloc_kmap)
178{
179 struct drm_gem_object *obj = &rk_obj->base;
180 struct drm_device *drm = obj->dev;
181 struct rockchip_drm_private *private = drm->dev_private;
182
183 if (private->domain)
184 return rockchip_gem_alloc_iommu(rk_obj, alloc_kmap);
185 else
186 return rockchip_gem_alloc_dma(rk_obj, alloc_kmap);
187}
188
189static void rockchip_gem_free_iommu(struct rockchip_gem_object *rk_obj)
190{
191 vunmap(rk_obj->kvaddr);
192 rockchip_gem_iommu_unmap(rk_obj);
193 rockchip_gem_put_pages(rk_obj);
194}
195
196static void rockchip_gem_free_dma(struct rockchip_gem_object *rk_obj)
197{
198 struct drm_gem_object *obj = &rk_obj->base;
199 struct drm_device *drm = obj->dev;
200
201 dma_free_attrs(drm->dev, obj->size, rk_obj->kvaddr, rk_obj->dma_addr,
202 rk_obj->dma_attrs);
203}
204
205static void rockchip_gem_free_buf(struct rockchip_gem_object *rk_obj)
206{
207 if (rk_obj->pages)
208 rockchip_gem_free_iommu(rk_obj);
209 else
210 rockchip_gem_free_dma(rk_obj);
211}
212
213static int rockchip_drm_gem_object_mmap_iommu(struct drm_gem_object *obj,
214 struct vm_area_struct *vma)
215{
216 struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
217 unsigned int count = obj->size >> PAGE_SHIFT;
218 unsigned long user_count = vma_pages(vma);
219
220 if (user_count == 0)
221 return -ENXIO;
222
223 return vm_map_pages(vma, rk_obj->pages, count);
224}
225
226static int rockchip_drm_gem_object_mmap_dma(struct drm_gem_object *obj,
227 struct vm_area_struct *vma)
228{
229 struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
230 struct drm_device *drm = obj->dev;
231
232 return dma_mmap_attrs(drm->dev, vma, rk_obj->kvaddr, rk_obj->dma_addr,
233 obj->size, rk_obj->dma_attrs);
234}
235
236static int rockchip_drm_gem_object_mmap(struct drm_gem_object *obj,
237 struct vm_area_struct *vma)
238{
239 int ret;
240 struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
241
242 /*
243 * We allocated a struct page table for rk_obj, so clear
244 * VM_PFNMAP flag that was set by drm_gem_mmap_obj()/drm_gem_mmap().
245 */
246 vma->vm_flags &= ~VM_PFNMAP;
247
248 if (rk_obj->pages)
249 ret = rockchip_drm_gem_object_mmap_iommu(obj, vma);
250 else
251 ret = rockchip_drm_gem_object_mmap_dma(obj, vma);
252
253 if (ret)
254 drm_gem_vm_close(vma);
255
256 return ret;
257}
258
259int rockchip_gem_mmap_buf(struct drm_gem_object *obj,
260 struct vm_area_struct *vma)
261{
262 int ret;
263
264 ret = drm_gem_mmap_obj(obj, obj->size, vma);
265 if (ret)
266 return ret;
267
268 return rockchip_drm_gem_object_mmap(obj, vma);
269}
270
271/* drm driver mmap file operations */
272int rockchip_gem_mmap(struct file *filp, struct vm_area_struct *vma)
273{
274 struct drm_gem_object *obj;
275 int ret;
276
277 ret = drm_gem_mmap(filp, vma);
278 if (ret)
279 return ret;
280
281 /*
282 * Set vm_pgoff (used as a fake buffer offset by DRM) to 0 and map the
283 * whole buffer from the start.
284 */
285 vma->vm_pgoff = 0;
286
287 obj = vma->vm_private_data;
288
289 return rockchip_drm_gem_object_mmap(obj, vma);
290}
291
292static void rockchip_gem_release_object(struct rockchip_gem_object *rk_obj)
293{
294 drm_gem_object_release(&rk_obj->base);
295 kfree(rk_obj);
296}
297
298static struct rockchip_gem_object *
299 rockchip_gem_alloc_object(struct drm_device *drm, unsigned int size)
300{
301 struct rockchip_gem_object *rk_obj;
302 struct drm_gem_object *obj;
303
304 size = round_up(size, PAGE_SIZE);
305
306 rk_obj = kzalloc(sizeof(*rk_obj), GFP_KERNEL);
307 if (!rk_obj)
308 return ERR_PTR(-ENOMEM);
309
310 obj = &rk_obj->base;
311
312 drm_gem_object_init(drm, obj, size);
313
314 return rk_obj;
315}
316
317struct rockchip_gem_object *
318rockchip_gem_create_object(struct drm_device *drm, unsigned int size,
319 bool alloc_kmap)
320{
321 struct rockchip_gem_object *rk_obj;
322 int ret;
323
324 rk_obj = rockchip_gem_alloc_object(drm, size);
325 if (IS_ERR(rk_obj))
326 return rk_obj;
327
328 ret = rockchip_gem_alloc_buf(rk_obj, alloc_kmap);
329 if (ret)
330 goto err_free_rk_obj;
331
332 return rk_obj;
333
334err_free_rk_obj:
335 rockchip_gem_release_object(rk_obj);
336 return ERR_PTR(ret);
337}
338
339/*
340 * rockchip_gem_free_object - (struct drm_driver)->gem_free_object_unlocked
341 * callback function
342 */
343void rockchip_gem_free_object(struct drm_gem_object *obj)
344{
345 struct drm_device *drm = obj->dev;
346 struct rockchip_drm_private *private = drm->dev_private;
347 struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
348
349 if (obj->import_attach) {
350 if (private->domain) {
351 rockchip_gem_iommu_unmap(rk_obj);
352 } else {
353 dma_unmap_sg(drm->dev, rk_obj->sgt->sgl,
354 rk_obj->sgt->nents, DMA_BIDIRECTIONAL);
355 }
356 drm_prime_gem_destroy(obj, rk_obj->sgt);
357 } else {
358 rockchip_gem_free_buf(rk_obj);
359 }
360
361 rockchip_gem_release_object(rk_obj);
362}
363
364/*
365 * rockchip_gem_create_with_handle - allocate an object with the given
366 * size and create a gem handle on it
367 *
368 * returns a struct rockchip_gem_object* on success or ERR_PTR values
369 * on failure.
370 */
371static struct rockchip_gem_object *
372rockchip_gem_create_with_handle(struct drm_file *file_priv,
373 struct drm_device *drm, unsigned int size,
374 unsigned int *handle)
375{
376 struct rockchip_gem_object *rk_obj;
377 struct drm_gem_object *obj;
378 int ret;
379
380 rk_obj = rockchip_gem_create_object(drm, size, false);
381 if (IS_ERR(rk_obj))
382 return ERR_CAST(rk_obj);
383
384 obj = &rk_obj->base;
385
386 /*
387 * allocate a id of idr table where the obj is registered
388 * and handle has the id what user can see.
389 */
390 ret = drm_gem_handle_create(file_priv, obj, handle);
391 if (ret)
392 goto err_handle_create;
393
394 /* drop reference from allocate - handle holds it now. */
395 drm_gem_object_put(obj);
396
397 return rk_obj;
398
399err_handle_create:
400 rockchip_gem_free_object(obj);
401
402 return ERR_PTR(ret);
403}
404
405/*
406 * rockchip_gem_dumb_create - (struct drm_driver)->dumb_create callback
407 * function
408 *
409 * This aligns the pitch and size arguments to the minimum required. wrap
410 * this into your own function if you need bigger alignment.
411 */
412int rockchip_gem_dumb_create(struct drm_file *file_priv,
413 struct drm_device *dev,
414 struct drm_mode_create_dumb *args)
415{
416 struct rockchip_gem_object *rk_obj;
417 int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
418
419 /*
420 * align to 64 bytes since Mali requires it.
421 */
422 args->pitch = ALIGN(min_pitch, 64);
423 args->size = args->pitch * args->height;
424
425 rk_obj = rockchip_gem_create_with_handle(file_priv, dev, args->size,
426 &args->handle);
427
428 return PTR_ERR_OR_ZERO(rk_obj);
429}
430
431/*
432 * Allocate a sg_table for this GEM object.
433 * Note: Both the table's contents, and the sg_table itself must be freed by
434 * the caller.
435 * Returns a pointer to the newly allocated sg_table, or an ERR_PTR() error.
436 */
437struct sg_table *rockchip_gem_prime_get_sg_table(struct drm_gem_object *obj)
438{
439 struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
440 struct drm_device *drm = obj->dev;
441 struct sg_table *sgt;
442 int ret;
443
444 if (rk_obj->pages)
445 return drm_prime_pages_to_sg(rk_obj->pages, rk_obj->num_pages);
446
447 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
448 if (!sgt)
449 return ERR_PTR(-ENOMEM);
450
451 ret = dma_get_sgtable_attrs(drm->dev, sgt, rk_obj->kvaddr,
452 rk_obj->dma_addr, obj->size,
453 rk_obj->dma_attrs);
454 if (ret) {
455 DRM_ERROR("failed to allocate sgt, %d\n", ret);
456 kfree(sgt);
457 return ERR_PTR(ret);
458 }
459
460 return sgt;
461}
462
463static unsigned long rockchip_sg_get_contiguous_size(struct sg_table *sgt,
464 int count)
465{
466 struct scatterlist *s;
467 dma_addr_t expected = sg_dma_address(sgt->sgl);
468 unsigned int i;
469 unsigned long size = 0;
470
471 for_each_sg(sgt->sgl, s, count, i) {
472 if (sg_dma_address(s) != expected)
473 break;
474 expected = sg_dma_address(s) + sg_dma_len(s);
475 size += sg_dma_len(s);
476 }
477 return size;
478}
479
480static int
481rockchip_gem_iommu_map_sg(struct drm_device *drm,
482 struct dma_buf_attachment *attach,
483 struct sg_table *sg,
484 struct rockchip_gem_object *rk_obj)
485{
486 rk_obj->sgt = sg;
487 return rockchip_gem_iommu_map(rk_obj);
488}
489
490static int
491rockchip_gem_dma_map_sg(struct drm_device *drm,
492 struct dma_buf_attachment *attach,
493 struct sg_table *sg,
494 struct rockchip_gem_object *rk_obj)
495{
496 int count = dma_map_sg(drm->dev, sg->sgl, sg->nents,
497 DMA_BIDIRECTIONAL);
498 if (!count)
499 return -EINVAL;
500
501 if (rockchip_sg_get_contiguous_size(sg, count) < attach->dmabuf->size) {
502 DRM_ERROR("failed to map sg_table to contiguous linear address.\n");
503 dma_unmap_sg(drm->dev, sg->sgl, sg->nents,
504 DMA_BIDIRECTIONAL);
505 return -EINVAL;
506 }
507
508 rk_obj->dma_addr = sg_dma_address(sg->sgl);
509 rk_obj->sgt = sg;
510 return 0;
511}
512
513struct drm_gem_object *
514rockchip_gem_prime_import_sg_table(struct drm_device *drm,
515 struct dma_buf_attachment *attach,
516 struct sg_table *sg)
517{
518 struct rockchip_drm_private *private = drm->dev_private;
519 struct rockchip_gem_object *rk_obj;
520 int ret;
521
522 rk_obj = rockchip_gem_alloc_object(drm, attach->dmabuf->size);
523 if (IS_ERR(rk_obj))
524 return ERR_CAST(rk_obj);
525
526 if (private->domain)
527 ret = rockchip_gem_iommu_map_sg(drm, attach, sg, rk_obj);
528 else
529 ret = rockchip_gem_dma_map_sg(drm, attach, sg, rk_obj);
530
531 if (ret < 0) {
532 DRM_ERROR("failed to import sg table: %d\n", ret);
533 goto err_free_rk_obj;
534 }
535
536 return &rk_obj->base;
537
538err_free_rk_obj:
539 rockchip_gem_release_object(rk_obj);
540 return ERR_PTR(ret);
541}
542
543void *rockchip_gem_prime_vmap(struct drm_gem_object *obj)
544{
545 struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
546
547 if (rk_obj->pages)
548 return vmap(rk_obj->pages, rk_obj->num_pages, VM_MAP,
549 pgprot_writecombine(PAGE_KERNEL));
550
551 if (rk_obj->dma_attrs & DMA_ATTR_NO_KERNEL_MAPPING)
552 return NULL;
553
554 return rk_obj->kvaddr;
555}
556
557void rockchip_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
558{
559 struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
560
561 if (rk_obj->pages) {
562 vunmap(vaddr);
563 return;
564 }
565
566 /* Nothing to do if allocated by DMA mapping API. */
567}