Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
4 * Author:Mark Yao <mark.yao@rock-chips.com>
5 */
6
7#include <linux/dma-buf.h>
8#include <linux/iommu.h>
9#include <linux/vmalloc.h>
10
11#include <drm/drm.h>
12#include <drm/drm_fb_helper.h>
13#include <drm/drm_gem.h>
14#include <drm/drm_gem_dma_helper.h>
15#include <drm/drm_prime.h>
16#include <drm/drm_vma_manager.h>
17
18#include "rockchip_drm_drv.h"
19#include "rockchip_drm_gem.h"
20
21static int rockchip_gem_iommu_map(struct rockchip_gem_object *rk_obj)
22{
23 struct drm_device *drm = rk_obj->base.dev;
24 struct rockchip_drm_private *private = drm->dev_private;
25 int prot = IOMMU_READ | IOMMU_WRITE;
26 ssize_t ret;
27
28 mutex_lock(&private->mm_lock);
29 ret = drm_mm_insert_node_generic(&private->mm, &rk_obj->mm,
30 rk_obj->base.size, PAGE_SIZE,
31 0, 0);
32 mutex_unlock(&private->mm_lock);
33
34 if (ret < 0) {
35 DRM_ERROR("out of I/O virtual memory: %zd\n", ret);
36 return ret;
37 }
38
39 rk_obj->dma_addr = rk_obj->mm.start;
40
41 ret = iommu_map_sgtable(private->domain, rk_obj->dma_addr, rk_obj->sgt,
42 prot);
43 if (ret < (ssize_t)rk_obj->base.size) {
44 DRM_ERROR("failed to map buffer: size=%zd request_size=%zd\n",
45 ret, rk_obj->base.size);
46 ret = -ENOMEM;
47 goto err_remove_node;
48 }
49
50 rk_obj->size = ret;
51
52 return 0;
53
54err_remove_node:
55 mutex_lock(&private->mm_lock);
56 drm_mm_remove_node(&rk_obj->mm);
57 mutex_unlock(&private->mm_lock);
58
59 return ret;
60}
61
62static int rockchip_gem_iommu_unmap(struct rockchip_gem_object *rk_obj)
63{
64 struct drm_device *drm = rk_obj->base.dev;
65 struct rockchip_drm_private *private = drm->dev_private;
66
67 iommu_unmap(private->domain, rk_obj->dma_addr, rk_obj->size);
68
69 mutex_lock(&private->mm_lock);
70
71 drm_mm_remove_node(&rk_obj->mm);
72
73 mutex_unlock(&private->mm_lock);
74
75 return 0;
76}
77
78static int rockchip_gem_get_pages(struct rockchip_gem_object *rk_obj)
79{
80 struct drm_device *drm = rk_obj->base.dev;
81 int ret, i;
82 struct scatterlist *s;
83
84 rk_obj->pages = drm_gem_get_pages(&rk_obj->base);
85 if (IS_ERR(rk_obj->pages))
86 return PTR_ERR(rk_obj->pages);
87
88 rk_obj->num_pages = rk_obj->base.size >> PAGE_SHIFT;
89
90 rk_obj->sgt = drm_prime_pages_to_sg(rk_obj->base.dev,
91 rk_obj->pages, rk_obj->num_pages);
92 if (IS_ERR(rk_obj->sgt)) {
93 ret = PTR_ERR(rk_obj->sgt);
94 goto err_put_pages;
95 }
96
97 /*
98 * Fake up the SG table so that dma_sync_sg_for_device() can be used
99 * to flush the pages associated with it.
100 *
101 * TODO: Replace this by drm_clflush_sg() once it can be implemented
102 * without relying on symbols that are not exported.
103 */
104 for_each_sgtable_sg(rk_obj->sgt, s, i)
105 sg_dma_address(s) = sg_phys(s);
106
107 dma_sync_sgtable_for_device(drm->dev, rk_obj->sgt, DMA_TO_DEVICE);
108
109 return 0;
110
111err_put_pages:
112 drm_gem_put_pages(&rk_obj->base, rk_obj->pages, false, false);
113 return ret;
114}
115
116static void rockchip_gem_put_pages(struct rockchip_gem_object *rk_obj)
117{
118 sg_free_table(rk_obj->sgt);
119 kfree(rk_obj->sgt);
120 drm_gem_put_pages(&rk_obj->base, rk_obj->pages, true, true);
121}
122
123static int rockchip_gem_alloc_iommu(struct rockchip_gem_object *rk_obj,
124 bool alloc_kmap)
125{
126 int ret;
127
128 ret = rockchip_gem_get_pages(rk_obj);
129 if (ret < 0)
130 return ret;
131
132 ret = rockchip_gem_iommu_map(rk_obj);
133 if (ret < 0)
134 goto err_free;
135
136 if (alloc_kmap) {
137 rk_obj->kvaddr = vmap(rk_obj->pages, rk_obj->num_pages, VM_MAP,
138 pgprot_writecombine(PAGE_KERNEL));
139 if (!rk_obj->kvaddr) {
140 DRM_ERROR("failed to vmap() buffer\n");
141 ret = -ENOMEM;
142 goto err_unmap;
143 }
144 }
145
146 return 0;
147
148err_unmap:
149 rockchip_gem_iommu_unmap(rk_obj);
150err_free:
151 rockchip_gem_put_pages(rk_obj);
152
153 return ret;
154}
155
156static int rockchip_gem_alloc_dma(struct rockchip_gem_object *rk_obj,
157 bool alloc_kmap)
158{
159 struct drm_gem_object *obj = &rk_obj->base;
160 struct drm_device *drm = obj->dev;
161
162 rk_obj->dma_attrs = DMA_ATTR_WRITE_COMBINE;
163
164 if (!alloc_kmap)
165 rk_obj->dma_attrs |= DMA_ATTR_NO_KERNEL_MAPPING;
166
167 rk_obj->kvaddr = dma_alloc_attrs(drm->dev, obj->size,
168 &rk_obj->dma_addr, GFP_KERNEL,
169 rk_obj->dma_attrs);
170 if (!rk_obj->kvaddr) {
171 DRM_ERROR("failed to allocate %zu byte dma buffer", obj->size);
172 return -ENOMEM;
173 }
174
175 return 0;
176}
177
178static int rockchip_gem_alloc_buf(struct rockchip_gem_object *rk_obj,
179 bool alloc_kmap)
180{
181 struct drm_gem_object *obj = &rk_obj->base;
182 struct drm_device *drm = obj->dev;
183 struct rockchip_drm_private *private = drm->dev_private;
184
185 if (private->domain)
186 return rockchip_gem_alloc_iommu(rk_obj, alloc_kmap);
187 else
188 return rockchip_gem_alloc_dma(rk_obj, alloc_kmap);
189}
190
191static void rockchip_gem_free_iommu(struct rockchip_gem_object *rk_obj)
192{
193 vunmap(rk_obj->kvaddr);
194 rockchip_gem_iommu_unmap(rk_obj);
195 rockchip_gem_put_pages(rk_obj);
196}
197
198static void rockchip_gem_free_dma(struct rockchip_gem_object *rk_obj)
199{
200 struct drm_gem_object *obj = &rk_obj->base;
201 struct drm_device *drm = obj->dev;
202
203 dma_free_attrs(drm->dev, obj->size, rk_obj->kvaddr, rk_obj->dma_addr,
204 rk_obj->dma_attrs);
205}
206
207static void rockchip_gem_free_buf(struct rockchip_gem_object *rk_obj)
208{
209 if (rk_obj->pages)
210 rockchip_gem_free_iommu(rk_obj);
211 else
212 rockchip_gem_free_dma(rk_obj);
213}
214
215static int rockchip_drm_gem_object_mmap_iommu(struct drm_gem_object *obj,
216 struct vm_area_struct *vma)
217{
218 struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
219 unsigned int count = obj->size >> PAGE_SHIFT;
220 unsigned long user_count = vma_pages(vma);
221
222 if (user_count == 0)
223 return -ENXIO;
224
225 return vm_map_pages(vma, rk_obj->pages, count);
226}
227
228static int rockchip_drm_gem_object_mmap_dma(struct drm_gem_object *obj,
229 struct vm_area_struct *vma)
230{
231 struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
232 struct drm_device *drm = obj->dev;
233
234 return dma_mmap_attrs(drm->dev, vma, rk_obj->kvaddr, rk_obj->dma_addr,
235 obj->size, rk_obj->dma_attrs);
236}
237
238static int rockchip_drm_gem_object_mmap(struct drm_gem_object *obj,
239 struct vm_area_struct *vma)
240{
241 int ret;
242 struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
243
244 /*
245 * Set vm_pgoff (used as a fake buffer offset by DRM) to 0 and map the
246 * whole buffer from the start.
247 */
248 vma->vm_pgoff = 0;
249
250 /*
251 * We allocated a struct page table for rk_obj, so clear
252 * VM_PFNMAP flag that was set by drm_gem_mmap_obj()/drm_gem_mmap().
253 */
254 vm_flags_mod(vma, VM_IO | VM_DONTEXPAND | VM_DONTDUMP, VM_PFNMAP);
255
256 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
257 vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
258
259 if (rk_obj->pages)
260 ret = rockchip_drm_gem_object_mmap_iommu(obj, vma);
261 else
262 ret = rockchip_drm_gem_object_mmap_dma(obj, vma);
263
264 return ret;
265}
266
267static void rockchip_gem_release_object(struct rockchip_gem_object *rk_obj)
268{
269 drm_gem_object_release(&rk_obj->base);
270 kfree(rk_obj);
271}
272
273static const struct drm_gem_object_funcs rockchip_gem_object_funcs = {
274 .free = rockchip_gem_free_object,
275 .get_sg_table = rockchip_gem_prime_get_sg_table,
276 .vmap = rockchip_gem_prime_vmap,
277 .vunmap = rockchip_gem_prime_vunmap,
278 .mmap = rockchip_drm_gem_object_mmap,
279 .vm_ops = &drm_gem_dma_vm_ops,
280};
281
282static struct rockchip_gem_object *
283 rockchip_gem_alloc_object(struct drm_device *drm, unsigned int size)
284{
285 struct rockchip_gem_object *rk_obj;
286 struct drm_gem_object *obj;
287
288 size = round_up(size, PAGE_SIZE);
289
290 rk_obj = kzalloc(sizeof(*rk_obj), GFP_KERNEL);
291 if (!rk_obj)
292 return ERR_PTR(-ENOMEM);
293
294 obj = &rk_obj->base;
295
296 obj->funcs = &rockchip_gem_object_funcs;
297
298 drm_gem_object_init(drm, obj, size);
299
300 return rk_obj;
301}
302
303struct rockchip_gem_object *
304rockchip_gem_create_object(struct drm_device *drm, unsigned int size,
305 bool alloc_kmap)
306{
307 struct rockchip_gem_object *rk_obj;
308 int ret;
309
310 rk_obj = rockchip_gem_alloc_object(drm, size);
311 if (IS_ERR(rk_obj))
312 return rk_obj;
313
314 ret = rockchip_gem_alloc_buf(rk_obj, alloc_kmap);
315 if (ret)
316 goto err_free_rk_obj;
317
318 return rk_obj;
319
320err_free_rk_obj:
321 rockchip_gem_release_object(rk_obj);
322 return ERR_PTR(ret);
323}
324
325/*
326 * rockchip_gem_free_object - (struct drm_gem_object_funcs)->free
327 * callback function
328 */
329void rockchip_gem_free_object(struct drm_gem_object *obj)
330{
331 struct drm_device *drm = obj->dev;
332 struct rockchip_drm_private *private = drm->dev_private;
333 struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
334
335 if (obj->import_attach) {
336 if (private->domain) {
337 rockchip_gem_iommu_unmap(rk_obj);
338 } else {
339 dma_unmap_sgtable(drm->dev, rk_obj->sgt,
340 DMA_BIDIRECTIONAL, 0);
341 }
342 drm_prime_gem_destroy(obj, rk_obj->sgt);
343 } else {
344 rockchip_gem_free_buf(rk_obj);
345 }
346
347 rockchip_gem_release_object(rk_obj);
348}
349
350/*
351 * rockchip_gem_create_with_handle - allocate an object with the given
352 * size and create a gem handle on it
353 *
354 * returns a struct rockchip_gem_object* on success or ERR_PTR values
355 * on failure.
356 */
357static struct rockchip_gem_object *
358rockchip_gem_create_with_handle(struct drm_file *file_priv,
359 struct drm_device *drm, unsigned int size,
360 unsigned int *handle)
361{
362 struct rockchip_gem_object *rk_obj;
363 struct drm_gem_object *obj;
364 bool is_framebuffer;
365 int ret;
366
367 is_framebuffer = drm->fb_helper && file_priv == drm->fb_helper->client.file;
368
369 rk_obj = rockchip_gem_create_object(drm, size, is_framebuffer);
370 if (IS_ERR(rk_obj))
371 return ERR_CAST(rk_obj);
372
373 obj = &rk_obj->base;
374
375 /*
376 * allocate a id of idr table where the obj is registered
377 * and handle has the id what user can see.
378 */
379 ret = drm_gem_handle_create(file_priv, obj, handle);
380 if (ret)
381 goto err_handle_create;
382
383 /* drop reference from allocate - handle holds it now. */
384 drm_gem_object_put(obj);
385
386 return rk_obj;
387
388err_handle_create:
389 rockchip_gem_free_object(obj);
390
391 return ERR_PTR(ret);
392}
393
394/*
395 * rockchip_gem_dumb_create - (struct drm_driver)->dumb_create callback
396 * function
397 *
398 * This aligns the pitch and size arguments to the minimum required. wrap
399 * this into your own function if you need bigger alignment.
400 */
401int rockchip_gem_dumb_create(struct drm_file *file_priv,
402 struct drm_device *dev,
403 struct drm_mode_create_dumb *args)
404{
405 struct rockchip_gem_object *rk_obj;
406 int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
407
408 /*
409 * align to 64 bytes since Mali requires it.
410 */
411 args->pitch = ALIGN(min_pitch, 64);
412 args->size = args->pitch * args->height;
413
414 rk_obj = rockchip_gem_create_with_handle(file_priv, dev, args->size,
415 &args->handle);
416
417 return PTR_ERR_OR_ZERO(rk_obj);
418}
419
420/*
421 * Allocate a sg_table for this GEM object.
422 * Note: Both the table's contents, and the sg_table itself must be freed by
423 * the caller.
424 * Returns a pointer to the newly allocated sg_table, or an ERR_PTR() error.
425 */
426struct sg_table *rockchip_gem_prime_get_sg_table(struct drm_gem_object *obj)
427{
428 struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
429 struct drm_device *drm = obj->dev;
430 struct sg_table *sgt;
431 int ret;
432
433 if (rk_obj->pages)
434 return drm_prime_pages_to_sg(obj->dev, rk_obj->pages, rk_obj->num_pages);
435
436 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
437 if (!sgt)
438 return ERR_PTR(-ENOMEM);
439
440 ret = dma_get_sgtable_attrs(drm->dev, sgt, rk_obj->kvaddr,
441 rk_obj->dma_addr, obj->size,
442 rk_obj->dma_attrs);
443 if (ret) {
444 DRM_ERROR("failed to allocate sgt, %d\n", ret);
445 kfree(sgt);
446 return ERR_PTR(ret);
447 }
448
449 return sgt;
450}
451
452static int
453rockchip_gem_iommu_map_sg(struct drm_device *drm,
454 struct dma_buf_attachment *attach,
455 struct sg_table *sg,
456 struct rockchip_gem_object *rk_obj)
457{
458 rk_obj->sgt = sg;
459 return rockchip_gem_iommu_map(rk_obj);
460}
461
462static int
463rockchip_gem_dma_map_sg(struct drm_device *drm,
464 struct dma_buf_attachment *attach,
465 struct sg_table *sg,
466 struct rockchip_gem_object *rk_obj)
467{
468 int err = dma_map_sgtable(drm->dev, sg, DMA_BIDIRECTIONAL, 0);
469 if (err)
470 return err;
471
472 if (drm_prime_get_contiguous_size(sg) < attach->dmabuf->size) {
473 DRM_ERROR("failed to map sg_table to contiguous linear address.\n");
474 dma_unmap_sgtable(drm->dev, sg, DMA_BIDIRECTIONAL, 0);
475 return -EINVAL;
476 }
477
478 rk_obj->dma_addr = sg_dma_address(sg->sgl);
479 rk_obj->sgt = sg;
480 return 0;
481}
482
483struct drm_gem_object *
484rockchip_gem_prime_import_sg_table(struct drm_device *drm,
485 struct dma_buf_attachment *attach,
486 struct sg_table *sg)
487{
488 struct rockchip_drm_private *private = drm->dev_private;
489 struct rockchip_gem_object *rk_obj;
490 int ret;
491
492 rk_obj = rockchip_gem_alloc_object(drm, attach->dmabuf->size);
493 if (IS_ERR(rk_obj))
494 return ERR_CAST(rk_obj);
495
496 if (private->domain)
497 ret = rockchip_gem_iommu_map_sg(drm, attach, sg, rk_obj);
498 else
499 ret = rockchip_gem_dma_map_sg(drm, attach, sg, rk_obj);
500
501 if (ret < 0) {
502 DRM_ERROR("failed to import sg table: %d\n", ret);
503 goto err_free_rk_obj;
504 }
505
506 return &rk_obj->base;
507
508err_free_rk_obj:
509 rockchip_gem_release_object(rk_obj);
510 return ERR_PTR(ret);
511}
512
513int rockchip_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map)
514{
515 struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
516
517 if (rk_obj->pages) {
518 void *vaddr;
519
520 if (rk_obj->kvaddr)
521 vaddr = rk_obj->kvaddr;
522 else
523 vaddr = vmap(rk_obj->pages, rk_obj->num_pages, VM_MAP,
524 pgprot_writecombine(PAGE_KERNEL));
525
526 if (!vaddr)
527 return -ENOMEM;
528 iosys_map_set_vaddr(map, vaddr);
529 return 0;
530 }
531
532 if (rk_obj->dma_attrs & DMA_ATTR_NO_KERNEL_MAPPING)
533 return -ENOMEM;
534 iosys_map_set_vaddr(map, rk_obj->kvaddr);
535
536 return 0;
537}
538
539void rockchip_gem_prime_vunmap(struct drm_gem_object *obj,
540 struct iosys_map *map)
541{
542 struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
543
544 if (rk_obj->pages) {
545 if (map->vaddr != rk_obj->kvaddr)
546 vunmap(map->vaddr);
547 return;
548 }
549
550 /* Nothing to do if allocated by DMA mapping API. */
551}
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
4 * Author:Mark Yao <mark.yao@rock-chips.com>
5 */
6
7#include <linux/dma-buf.h>
8#include <linux/iommu.h>
9
10#include <drm/drm.h>
11#include <drm/drm_gem.h>
12#include <drm/drm_prime.h>
13#include <drm/drm_vma_manager.h>
14
15#include "rockchip_drm_drv.h"
16#include "rockchip_drm_gem.h"
17
18static int rockchip_gem_iommu_map(struct rockchip_gem_object *rk_obj)
19{
20 struct drm_device *drm = rk_obj->base.dev;
21 struct rockchip_drm_private *private = drm->dev_private;
22 int prot = IOMMU_READ | IOMMU_WRITE;
23 ssize_t ret;
24
25 mutex_lock(&private->mm_lock);
26 ret = drm_mm_insert_node_generic(&private->mm, &rk_obj->mm,
27 rk_obj->base.size, PAGE_SIZE,
28 0, 0);
29 mutex_unlock(&private->mm_lock);
30
31 if (ret < 0) {
32 DRM_ERROR("out of I/O virtual memory: %zd\n", ret);
33 return ret;
34 }
35
36 rk_obj->dma_addr = rk_obj->mm.start;
37
38 ret = iommu_map_sg(private->domain, rk_obj->dma_addr, rk_obj->sgt->sgl,
39 rk_obj->sgt->nents, prot);
40 if (ret < rk_obj->base.size) {
41 DRM_ERROR("failed to map buffer: size=%zd request_size=%zd\n",
42 ret, rk_obj->base.size);
43 ret = -ENOMEM;
44 goto err_remove_node;
45 }
46
47 rk_obj->size = ret;
48
49 return 0;
50
51err_remove_node:
52 mutex_lock(&private->mm_lock);
53 drm_mm_remove_node(&rk_obj->mm);
54 mutex_unlock(&private->mm_lock);
55
56 return ret;
57}
58
59static int rockchip_gem_iommu_unmap(struct rockchip_gem_object *rk_obj)
60{
61 struct drm_device *drm = rk_obj->base.dev;
62 struct rockchip_drm_private *private = drm->dev_private;
63
64 iommu_unmap(private->domain, rk_obj->dma_addr, rk_obj->size);
65
66 mutex_lock(&private->mm_lock);
67
68 drm_mm_remove_node(&rk_obj->mm);
69
70 mutex_unlock(&private->mm_lock);
71
72 return 0;
73}
74
75static int rockchip_gem_get_pages(struct rockchip_gem_object *rk_obj)
76{
77 struct drm_device *drm = rk_obj->base.dev;
78 int ret, i;
79 struct scatterlist *s;
80
81 rk_obj->pages = drm_gem_get_pages(&rk_obj->base);
82 if (IS_ERR(rk_obj->pages))
83 return PTR_ERR(rk_obj->pages);
84
85 rk_obj->num_pages = rk_obj->base.size >> PAGE_SHIFT;
86
87 rk_obj->sgt = drm_prime_pages_to_sg(rk_obj->pages, rk_obj->num_pages);
88 if (IS_ERR(rk_obj->sgt)) {
89 ret = PTR_ERR(rk_obj->sgt);
90 goto err_put_pages;
91 }
92
93 /*
94 * Fake up the SG table so that dma_sync_sg_for_device() can be used
95 * to flush the pages associated with it.
96 *
97 * TODO: Replace this by drm_clflush_sg() once it can be implemented
98 * without relying on symbols that are not exported.
99 */
100 for_each_sg(rk_obj->sgt->sgl, s, rk_obj->sgt->nents, i)
101 sg_dma_address(s) = sg_phys(s);
102
103 dma_sync_sg_for_device(drm->dev, rk_obj->sgt->sgl, rk_obj->sgt->nents,
104 DMA_TO_DEVICE);
105
106 return 0;
107
108err_put_pages:
109 drm_gem_put_pages(&rk_obj->base, rk_obj->pages, false, false);
110 return ret;
111}
112
113static void rockchip_gem_put_pages(struct rockchip_gem_object *rk_obj)
114{
115 sg_free_table(rk_obj->sgt);
116 kfree(rk_obj->sgt);
117 drm_gem_put_pages(&rk_obj->base, rk_obj->pages, true, true);
118}
119
120static int rockchip_gem_alloc_iommu(struct rockchip_gem_object *rk_obj,
121 bool alloc_kmap)
122{
123 int ret;
124
125 ret = rockchip_gem_get_pages(rk_obj);
126 if (ret < 0)
127 return ret;
128
129 ret = rockchip_gem_iommu_map(rk_obj);
130 if (ret < 0)
131 goto err_free;
132
133 if (alloc_kmap) {
134 rk_obj->kvaddr = vmap(rk_obj->pages, rk_obj->num_pages, VM_MAP,
135 pgprot_writecombine(PAGE_KERNEL));
136 if (!rk_obj->kvaddr) {
137 DRM_ERROR("failed to vmap() buffer\n");
138 ret = -ENOMEM;
139 goto err_unmap;
140 }
141 }
142
143 return 0;
144
145err_unmap:
146 rockchip_gem_iommu_unmap(rk_obj);
147err_free:
148 rockchip_gem_put_pages(rk_obj);
149
150 return ret;
151}
152
153static int rockchip_gem_alloc_dma(struct rockchip_gem_object *rk_obj,
154 bool alloc_kmap)
155{
156 struct drm_gem_object *obj = &rk_obj->base;
157 struct drm_device *drm = obj->dev;
158
159 rk_obj->dma_attrs = DMA_ATTR_WRITE_COMBINE;
160
161 if (!alloc_kmap)
162 rk_obj->dma_attrs |= DMA_ATTR_NO_KERNEL_MAPPING;
163
164 rk_obj->kvaddr = dma_alloc_attrs(drm->dev, obj->size,
165 &rk_obj->dma_addr, GFP_KERNEL,
166 rk_obj->dma_attrs);
167 if (!rk_obj->kvaddr) {
168 DRM_ERROR("failed to allocate %zu byte dma buffer", obj->size);
169 return -ENOMEM;
170 }
171
172 return 0;
173}
174
175static int rockchip_gem_alloc_buf(struct rockchip_gem_object *rk_obj,
176 bool alloc_kmap)
177{
178 struct drm_gem_object *obj = &rk_obj->base;
179 struct drm_device *drm = obj->dev;
180 struct rockchip_drm_private *private = drm->dev_private;
181
182 if (private->domain)
183 return rockchip_gem_alloc_iommu(rk_obj, alloc_kmap);
184 else
185 return rockchip_gem_alloc_dma(rk_obj, alloc_kmap);
186}
187
188static void rockchip_gem_free_iommu(struct rockchip_gem_object *rk_obj)
189{
190 vunmap(rk_obj->kvaddr);
191 rockchip_gem_iommu_unmap(rk_obj);
192 rockchip_gem_put_pages(rk_obj);
193}
194
195static void rockchip_gem_free_dma(struct rockchip_gem_object *rk_obj)
196{
197 struct drm_gem_object *obj = &rk_obj->base;
198 struct drm_device *drm = obj->dev;
199
200 dma_free_attrs(drm->dev, obj->size, rk_obj->kvaddr, rk_obj->dma_addr,
201 rk_obj->dma_attrs);
202}
203
204static void rockchip_gem_free_buf(struct rockchip_gem_object *rk_obj)
205{
206 if (rk_obj->pages)
207 rockchip_gem_free_iommu(rk_obj);
208 else
209 rockchip_gem_free_dma(rk_obj);
210}
211
212static int rockchip_drm_gem_object_mmap_iommu(struct drm_gem_object *obj,
213 struct vm_area_struct *vma)
214{
215 struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
216 unsigned int count = obj->size >> PAGE_SHIFT;
217 unsigned long user_count = vma_pages(vma);
218
219 if (user_count == 0)
220 return -ENXIO;
221
222 return vm_map_pages(vma, rk_obj->pages, count);
223}
224
225static int rockchip_drm_gem_object_mmap_dma(struct drm_gem_object *obj,
226 struct vm_area_struct *vma)
227{
228 struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
229 struct drm_device *drm = obj->dev;
230
231 return dma_mmap_attrs(drm->dev, vma, rk_obj->kvaddr, rk_obj->dma_addr,
232 obj->size, rk_obj->dma_attrs);
233}
234
235static int rockchip_drm_gem_object_mmap(struct drm_gem_object *obj,
236 struct vm_area_struct *vma)
237{
238 int ret;
239 struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
240
241 /*
242 * We allocated a struct page table for rk_obj, so clear
243 * VM_PFNMAP flag that was set by drm_gem_mmap_obj()/drm_gem_mmap().
244 */
245 vma->vm_flags &= ~VM_PFNMAP;
246
247 if (rk_obj->pages)
248 ret = rockchip_drm_gem_object_mmap_iommu(obj, vma);
249 else
250 ret = rockchip_drm_gem_object_mmap_dma(obj, vma);
251
252 if (ret)
253 drm_gem_vm_close(vma);
254
255 return ret;
256}
257
258int rockchip_gem_mmap_buf(struct drm_gem_object *obj,
259 struct vm_area_struct *vma)
260{
261 int ret;
262
263 ret = drm_gem_mmap_obj(obj, obj->size, vma);
264 if (ret)
265 return ret;
266
267 return rockchip_drm_gem_object_mmap(obj, vma);
268}
269
270/* drm driver mmap file operations */
271int rockchip_gem_mmap(struct file *filp, struct vm_area_struct *vma)
272{
273 struct drm_gem_object *obj;
274 int ret;
275
276 ret = drm_gem_mmap(filp, vma);
277 if (ret)
278 return ret;
279
280 /*
281 * Set vm_pgoff (used as a fake buffer offset by DRM) to 0 and map the
282 * whole buffer from the start.
283 */
284 vma->vm_pgoff = 0;
285
286 obj = vma->vm_private_data;
287
288 return rockchip_drm_gem_object_mmap(obj, vma);
289}
290
291static void rockchip_gem_release_object(struct rockchip_gem_object *rk_obj)
292{
293 drm_gem_object_release(&rk_obj->base);
294 kfree(rk_obj);
295}
296
297struct rockchip_gem_object *
298 rockchip_gem_alloc_object(struct drm_device *drm, unsigned int size)
299{
300 struct rockchip_gem_object *rk_obj;
301 struct drm_gem_object *obj;
302
303 size = round_up(size, PAGE_SIZE);
304
305 rk_obj = kzalloc(sizeof(*rk_obj), GFP_KERNEL);
306 if (!rk_obj)
307 return ERR_PTR(-ENOMEM);
308
309 obj = &rk_obj->base;
310
311 drm_gem_object_init(drm, obj, size);
312
313 return rk_obj;
314}
315
316struct rockchip_gem_object *
317rockchip_gem_create_object(struct drm_device *drm, unsigned int size,
318 bool alloc_kmap)
319{
320 struct rockchip_gem_object *rk_obj;
321 int ret;
322
323 rk_obj = rockchip_gem_alloc_object(drm, size);
324 if (IS_ERR(rk_obj))
325 return rk_obj;
326
327 ret = rockchip_gem_alloc_buf(rk_obj, alloc_kmap);
328 if (ret)
329 goto err_free_rk_obj;
330
331 return rk_obj;
332
333err_free_rk_obj:
334 rockchip_gem_release_object(rk_obj);
335 return ERR_PTR(ret);
336}
337
338/*
339 * rockchip_gem_free_object - (struct drm_driver)->gem_free_object_unlocked
340 * callback function
341 */
342void rockchip_gem_free_object(struct drm_gem_object *obj)
343{
344 struct drm_device *drm = obj->dev;
345 struct rockchip_drm_private *private = drm->dev_private;
346 struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
347
348 if (obj->import_attach) {
349 if (private->domain) {
350 rockchip_gem_iommu_unmap(rk_obj);
351 } else {
352 dma_unmap_sg(drm->dev, rk_obj->sgt->sgl,
353 rk_obj->sgt->nents, DMA_BIDIRECTIONAL);
354 }
355 drm_prime_gem_destroy(obj, rk_obj->sgt);
356 } else {
357 rockchip_gem_free_buf(rk_obj);
358 }
359
360 rockchip_gem_release_object(rk_obj);
361}
362
363/*
364 * rockchip_gem_create_with_handle - allocate an object with the given
365 * size and create a gem handle on it
366 *
367 * returns a struct rockchip_gem_object* on success or ERR_PTR values
368 * on failure.
369 */
370static struct rockchip_gem_object *
371rockchip_gem_create_with_handle(struct drm_file *file_priv,
372 struct drm_device *drm, unsigned int size,
373 unsigned int *handle)
374{
375 struct rockchip_gem_object *rk_obj;
376 struct drm_gem_object *obj;
377 int ret;
378
379 rk_obj = rockchip_gem_create_object(drm, size, false);
380 if (IS_ERR(rk_obj))
381 return ERR_CAST(rk_obj);
382
383 obj = &rk_obj->base;
384
385 /*
386 * allocate a id of idr table where the obj is registered
387 * and handle has the id what user can see.
388 */
389 ret = drm_gem_handle_create(file_priv, obj, handle);
390 if (ret)
391 goto err_handle_create;
392
393 /* drop reference from allocate - handle holds it now. */
394 drm_gem_object_put_unlocked(obj);
395
396 return rk_obj;
397
398err_handle_create:
399 rockchip_gem_free_object(obj);
400
401 return ERR_PTR(ret);
402}
403
404/*
405 * rockchip_gem_dumb_create - (struct drm_driver)->dumb_create callback
406 * function
407 *
408 * This aligns the pitch and size arguments to the minimum required. wrap
409 * this into your own function if you need bigger alignment.
410 */
411int rockchip_gem_dumb_create(struct drm_file *file_priv,
412 struct drm_device *dev,
413 struct drm_mode_create_dumb *args)
414{
415 struct rockchip_gem_object *rk_obj;
416 int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
417
418 /*
419 * align to 64 bytes since Mali requires it.
420 */
421 args->pitch = ALIGN(min_pitch, 64);
422 args->size = args->pitch * args->height;
423
424 rk_obj = rockchip_gem_create_with_handle(file_priv, dev, args->size,
425 &args->handle);
426
427 return PTR_ERR_OR_ZERO(rk_obj);
428}
429
430/*
431 * Allocate a sg_table for this GEM object.
432 * Note: Both the table's contents, and the sg_table itself must be freed by
433 * the caller.
434 * Returns a pointer to the newly allocated sg_table, or an ERR_PTR() error.
435 */
436struct sg_table *rockchip_gem_prime_get_sg_table(struct drm_gem_object *obj)
437{
438 struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
439 struct drm_device *drm = obj->dev;
440 struct sg_table *sgt;
441 int ret;
442
443 if (rk_obj->pages)
444 return drm_prime_pages_to_sg(rk_obj->pages, rk_obj->num_pages);
445
446 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
447 if (!sgt)
448 return ERR_PTR(-ENOMEM);
449
450 ret = dma_get_sgtable_attrs(drm->dev, sgt, rk_obj->kvaddr,
451 rk_obj->dma_addr, obj->size,
452 rk_obj->dma_attrs);
453 if (ret) {
454 DRM_ERROR("failed to allocate sgt, %d\n", ret);
455 kfree(sgt);
456 return ERR_PTR(ret);
457 }
458
459 return sgt;
460}
461
462static unsigned long rockchip_sg_get_contiguous_size(struct sg_table *sgt,
463 int count)
464{
465 struct scatterlist *s;
466 dma_addr_t expected = sg_dma_address(sgt->sgl);
467 unsigned int i;
468 unsigned long size = 0;
469
470 for_each_sg(sgt->sgl, s, count, i) {
471 if (sg_dma_address(s) != expected)
472 break;
473 expected = sg_dma_address(s) + sg_dma_len(s);
474 size += sg_dma_len(s);
475 }
476 return size;
477}
478
479static int
480rockchip_gem_iommu_map_sg(struct drm_device *drm,
481 struct dma_buf_attachment *attach,
482 struct sg_table *sg,
483 struct rockchip_gem_object *rk_obj)
484{
485 rk_obj->sgt = sg;
486 return rockchip_gem_iommu_map(rk_obj);
487}
488
489static int
490rockchip_gem_dma_map_sg(struct drm_device *drm,
491 struct dma_buf_attachment *attach,
492 struct sg_table *sg,
493 struct rockchip_gem_object *rk_obj)
494{
495 int count = dma_map_sg(drm->dev, sg->sgl, sg->nents,
496 DMA_BIDIRECTIONAL);
497 if (!count)
498 return -EINVAL;
499
500 if (rockchip_sg_get_contiguous_size(sg, count) < attach->dmabuf->size) {
501 DRM_ERROR("failed to map sg_table to contiguous linear address.\n");
502 dma_unmap_sg(drm->dev, sg->sgl, sg->nents,
503 DMA_BIDIRECTIONAL);
504 return -EINVAL;
505 }
506
507 rk_obj->dma_addr = sg_dma_address(sg->sgl);
508 rk_obj->sgt = sg;
509 return 0;
510}
511
512struct drm_gem_object *
513rockchip_gem_prime_import_sg_table(struct drm_device *drm,
514 struct dma_buf_attachment *attach,
515 struct sg_table *sg)
516{
517 struct rockchip_drm_private *private = drm->dev_private;
518 struct rockchip_gem_object *rk_obj;
519 int ret;
520
521 rk_obj = rockchip_gem_alloc_object(drm, attach->dmabuf->size);
522 if (IS_ERR(rk_obj))
523 return ERR_CAST(rk_obj);
524
525 if (private->domain)
526 ret = rockchip_gem_iommu_map_sg(drm, attach, sg, rk_obj);
527 else
528 ret = rockchip_gem_dma_map_sg(drm, attach, sg, rk_obj);
529
530 if (ret < 0) {
531 DRM_ERROR("failed to import sg table: %d\n", ret);
532 goto err_free_rk_obj;
533 }
534
535 return &rk_obj->base;
536
537err_free_rk_obj:
538 rockchip_gem_release_object(rk_obj);
539 return ERR_PTR(ret);
540}
541
542void *rockchip_gem_prime_vmap(struct drm_gem_object *obj)
543{
544 struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
545
546 if (rk_obj->pages)
547 return vmap(rk_obj->pages, rk_obj->num_pages, VM_MAP,
548 pgprot_writecombine(PAGE_KERNEL));
549
550 if (rk_obj->dma_attrs & DMA_ATTR_NO_KERNEL_MAPPING)
551 return NULL;
552
553 return rk_obj->kvaddr;
554}
555
556void rockchip_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
557{
558 struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
559
560 if (rk_obj->pages) {
561 vunmap(vaddr);
562 return;
563 }
564
565 /* Nothing to do if allocated by DMA mapping API. */
566}