Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2012 Russell King
4 */
5
6#include <linux/dma-buf.h>
7#include <linux/dma-mapping.h>
8#include <linux/mman.h>
9#include <linux/shmem_fs.h>
10
11#include <drm/armada_drm.h>
12#include <drm/drm_prime.h>
13
14#include "armada_drm.h"
15#include "armada_gem.h"
16#include "armada_ioctlP.h"
17
18MODULE_IMPORT_NS("DMA_BUF");
19
20static vm_fault_t armada_gem_vm_fault(struct vm_fault *vmf)
21{
22 struct drm_gem_object *gobj = vmf->vma->vm_private_data;
23 struct armada_gem_object *obj = drm_to_armada_gem(gobj);
24 unsigned long pfn = obj->phys_addr >> PAGE_SHIFT;
25
26 pfn += (vmf->address - vmf->vma->vm_start) >> PAGE_SHIFT;
27 return vmf_insert_pfn(vmf->vma, vmf->address, pfn);
28}
29
30static const struct vm_operations_struct armada_gem_vm_ops = {
31 .fault = armada_gem_vm_fault,
32 .open = drm_gem_vm_open,
33 .close = drm_gem_vm_close,
34};
35
36static size_t roundup_gem_size(size_t size)
37{
38 return roundup(size, PAGE_SIZE);
39}
40
41void armada_gem_free_object(struct drm_gem_object *obj)
42{
43 struct armada_gem_object *dobj = drm_to_armada_gem(obj);
44 struct armada_private *priv = drm_to_armada_dev(obj->dev);
45
46 DRM_DEBUG_DRIVER("release obj %p\n", dobj);
47
48 drm_gem_free_mmap_offset(&dobj->obj);
49
50 might_lock(&priv->linear_lock);
51
52 if (dobj->page) {
53 /* page backed memory */
54 unsigned int order = get_order(dobj->obj.size);
55 __free_pages(dobj->page, order);
56 } else if (dobj->linear) {
57 /* linear backed memory */
58 mutex_lock(&priv->linear_lock);
59 drm_mm_remove_node(dobj->linear);
60 mutex_unlock(&priv->linear_lock);
61 kfree(dobj->linear);
62 if (dobj->addr)
63 iounmap(dobj->addr);
64 }
65
66 if (dobj->obj.import_attach) {
67 /* We only ever display imported data */
68 if (dobj->sgt)
69 dma_buf_unmap_attachment_unlocked(dobj->obj.import_attach,
70 dobj->sgt, DMA_TO_DEVICE);
71 drm_prime_gem_destroy(&dobj->obj, NULL);
72 }
73
74 drm_gem_object_release(&dobj->obj);
75
76 kfree(dobj);
77}
78
79int
80armada_gem_linear_back(struct drm_device *dev, struct armada_gem_object *obj)
81{
82 struct armada_private *priv = drm_to_armada_dev(dev);
83 size_t size = obj->obj.size;
84
85 if (obj->page || obj->linear)
86 return 0;
87
88 /*
89 * If it is a small allocation (typically cursor, which will
90 * be 32x64 or 64x32 ARGB pixels) try to get it from the system.
91 * Framebuffers will never be this small (our minimum size for
92 * framebuffers is larger than this anyway.) Such objects are
93 * only accessed by the CPU so we don't need any special handing
94 * here.
95 */
96 if (size <= 8192) {
97 unsigned int order = get_order(size);
98 struct page *p = alloc_pages(GFP_KERNEL, order);
99
100 if (p) {
101 obj->addr = page_address(p);
102 obj->phys_addr = page_to_phys(p);
103 obj->page = p;
104
105 memset(obj->addr, 0, PAGE_ALIGN(size));
106 }
107 }
108
109 /*
110 * We could grab something from DMA if it's enabled, but that
111 * involves building in a problem:
112 *
113 * GEM DMA helper interface uses dma_alloc_coherent(), which provides
114 * us with an CPU virtual address and a device address.
115 *
116 * The CPU virtual address may be either an address in the kernel
117 * direct mapped region (for example, as it would be on x86) or
118 * it may be remapped into another part of kernel memory space
119 * (eg, as it would be on ARM.) This means virt_to_phys() on the
120 * returned virtual address is invalid depending on the architecture
121 * implementation.
122 *
123 * The device address may also not be a physical address; it may
124 * be that there is some kind of remapping between the device and
125 * system RAM, which makes the use of the device address also
126 * unsafe to re-use as a physical address.
127 *
128 * This makes DRM usage of dma_alloc_coherent() in a generic way
129 * at best very questionable and unsafe.
130 */
131
132 /* Otherwise, grab it from our linear allocation */
133 if (!obj->page) {
134 struct drm_mm_node *node;
135 unsigned align = min_t(unsigned, size, SZ_2M);
136 void __iomem *ptr;
137 int ret;
138
139 node = kzalloc(sizeof(*node), GFP_KERNEL);
140 if (!node)
141 return -ENOSPC;
142
143 mutex_lock(&priv->linear_lock);
144 ret = drm_mm_insert_node_generic(&priv->linear, node,
145 size, align, 0, 0);
146 mutex_unlock(&priv->linear_lock);
147 if (ret) {
148 kfree(node);
149 return ret;
150 }
151
152 obj->linear = node;
153
154 /* Ensure that the memory we're returning is cleared. */
155 ptr = ioremap_wc(obj->linear->start, size);
156 if (!ptr) {
157 mutex_lock(&priv->linear_lock);
158 drm_mm_remove_node(obj->linear);
159 mutex_unlock(&priv->linear_lock);
160 kfree(obj->linear);
161 obj->linear = NULL;
162 return -ENOMEM;
163 }
164
165 memset_io(ptr, 0, size);
166 iounmap(ptr);
167
168 obj->phys_addr = obj->linear->start;
169 obj->dev_addr = obj->linear->start;
170 obj->mapped = true;
171 }
172
173 DRM_DEBUG_DRIVER("obj %p phys %#llx dev %#llx\n", obj,
174 (unsigned long long)obj->phys_addr,
175 (unsigned long long)obj->dev_addr);
176
177 return 0;
178}
179
180void *
181armada_gem_map_object(struct drm_device *dev, struct armada_gem_object *dobj)
182{
183 /* only linear objects need to be ioremap'd */
184 if (!dobj->addr && dobj->linear)
185 dobj->addr = ioremap_wc(dobj->phys_addr, dobj->obj.size);
186 return dobj->addr;
187}
188
189static const struct drm_gem_object_funcs armada_gem_object_funcs = {
190 .free = armada_gem_free_object,
191 .export = armada_gem_prime_export,
192 .vm_ops = &armada_gem_vm_ops,
193};
194
195struct armada_gem_object *
196armada_gem_alloc_private_object(struct drm_device *dev, size_t size)
197{
198 struct armada_gem_object *obj;
199
200 size = roundup_gem_size(size);
201
202 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
203 if (!obj)
204 return NULL;
205
206 obj->obj.funcs = &armada_gem_object_funcs;
207
208 drm_gem_private_object_init(dev, &obj->obj, size);
209
210 DRM_DEBUG_DRIVER("alloc private obj %p size %zu\n", obj, size);
211
212 return obj;
213}
214
215static struct armada_gem_object *armada_gem_alloc_object(struct drm_device *dev,
216 size_t size)
217{
218 struct armada_gem_object *obj;
219 struct address_space *mapping;
220
221 size = roundup_gem_size(size);
222
223 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
224 if (!obj)
225 return NULL;
226
227 obj->obj.funcs = &armada_gem_object_funcs;
228
229 if (drm_gem_object_init(dev, &obj->obj, size)) {
230 kfree(obj);
231 return NULL;
232 }
233
234 mapping = obj->obj.filp->f_mapping;
235 mapping_set_gfp_mask(mapping, GFP_HIGHUSER | __GFP_RECLAIMABLE);
236
237 DRM_DEBUG_DRIVER("alloc obj %p size %zu\n", obj, size);
238
239 return obj;
240}
241
242/* Dumb alloc support */
243int armada_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
244 struct drm_mode_create_dumb *args)
245{
246 struct armada_gem_object *dobj;
247 u32 handle;
248 size_t size;
249 int ret;
250
251 args->pitch = armada_pitch(args->width, args->bpp);
252 args->size = size = args->pitch * args->height;
253
254 dobj = armada_gem_alloc_private_object(dev, size);
255 if (dobj == NULL)
256 return -ENOMEM;
257
258 ret = armada_gem_linear_back(dev, dobj);
259 if (ret)
260 goto err;
261
262 ret = drm_gem_handle_create(file, &dobj->obj, &handle);
263 if (ret)
264 goto err;
265
266 args->handle = handle;
267
268 /* drop reference from allocate - handle holds it now */
269 DRM_DEBUG_DRIVER("obj %p size %zu handle %#x\n", dobj, size, handle);
270 err:
271 drm_gem_object_put(&dobj->obj);
272 return ret;
273}
274
275/* Private driver gem ioctls */
276int armada_gem_create_ioctl(struct drm_device *dev, void *data,
277 struct drm_file *file)
278{
279 struct drm_armada_gem_create *args = data;
280 struct armada_gem_object *dobj;
281 size_t size;
282 u32 handle;
283 int ret;
284
285 if (args->size == 0)
286 return -ENOMEM;
287
288 size = args->size;
289
290 dobj = armada_gem_alloc_object(dev, size);
291 if (dobj == NULL)
292 return -ENOMEM;
293
294 ret = drm_gem_handle_create(file, &dobj->obj, &handle);
295 if (ret)
296 goto err;
297
298 args->handle = handle;
299
300 /* drop reference from allocate - handle holds it now */
301 DRM_DEBUG_DRIVER("obj %p size %zu handle %#x\n", dobj, size, handle);
302 err:
303 drm_gem_object_put(&dobj->obj);
304 return ret;
305}
306
307/* Map a shmem-backed object into process memory space */
308int armada_gem_mmap_ioctl(struct drm_device *dev, void *data,
309 struct drm_file *file)
310{
311 struct drm_armada_gem_mmap *args = data;
312 struct armada_gem_object *dobj;
313 unsigned long addr;
314
315 dobj = armada_gem_object_lookup(file, args->handle);
316 if (dobj == NULL)
317 return -ENOENT;
318
319 if (!dobj->obj.filp) {
320 drm_gem_object_put(&dobj->obj);
321 return -EINVAL;
322 }
323
324 addr = vm_mmap(dobj->obj.filp, 0, args->size, PROT_READ | PROT_WRITE,
325 MAP_SHARED, args->offset);
326 drm_gem_object_put(&dobj->obj);
327 if (IS_ERR_VALUE(addr))
328 return addr;
329
330 args->addr = addr;
331
332 return 0;
333}
334
335int armada_gem_pwrite_ioctl(struct drm_device *dev, void *data,
336 struct drm_file *file)
337{
338 struct drm_armada_gem_pwrite *args = data;
339 struct armada_gem_object *dobj;
340 char __user *ptr;
341 int ret = 0;
342
343 DRM_DEBUG_DRIVER("handle %u off %u size %u ptr 0x%llx\n",
344 args->handle, args->offset, args->size, args->ptr);
345
346 if (args->size == 0)
347 return 0;
348
349 ptr = (char __user *)(uintptr_t)args->ptr;
350
351 if (!access_ok(ptr, args->size))
352 return -EFAULT;
353
354 if (fault_in_readable(ptr, args->size))
355 return -EFAULT;
356
357 dobj = armada_gem_object_lookup(file, args->handle);
358 if (dobj == NULL)
359 return -ENOENT;
360
361 /* Must be a kernel-mapped object */
362 if (!dobj->addr)
363 return -EINVAL;
364
365 if (args->offset > dobj->obj.size ||
366 args->size > dobj->obj.size - args->offset) {
367 DRM_ERROR("invalid size: object size %u\n", dobj->obj.size);
368 ret = -EINVAL;
369 goto unref;
370 }
371
372 if (copy_from_user(dobj->addr + args->offset, ptr, args->size)) {
373 ret = -EFAULT;
374 } else if (dobj->update) {
375 dobj->update(dobj->update_data);
376 ret = 0;
377 }
378
379 unref:
380 drm_gem_object_put(&dobj->obj);
381 return ret;
382}
383
384/* Prime support */
385static struct sg_table *
386armada_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
387 enum dma_data_direction dir)
388{
389 struct drm_gem_object *obj = attach->dmabuf->priv;
390 struct armada_gem_object *dobj = drm_to_armada_gem(obj);
391 struct scatterlist *sg;
392 struct sg_table *sgt;
393 int i;
394
395 sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
396 if (!sgt)
397 return NULL;
398
399 if (dobj->obj.filp) {
400 struct address_space *mapping;
401 int count;
402
403 count = dobj->obj.size / PAGE_SIZE;
404 if (sg_alloc_table(sgt, count, GFP_KERNEL))
405 goto free_sgt;
406
407 mapping = dobj->obj.filp->f_mapping;
408
409 for_each_sgtable_sg(sgt, sg, i) {
410 struct page *page;
411
412 page = shmem_read_mapping_page(mapping, i);
413 if (IS_ERR(page))
414 goto release;
415
416 sg_set_page(sg, page, PAGE_SIZE, 0);
417 }
418
419 if (dma_map_sgtable(attach->dev, sgt, dir, 0))
420 goto release;
421 } else if (dobj->page) {
422 /* Single contiguous page */
423 if (sg_alloc_table(sgt, 1, GFP_KERNEL))
424 goto free_sgt;
425
426 sg_set_page(sgt->sgl, dobj->page, dobj->obj.size, 0);
427
428 if (dma_map_sgtable(attach->dev, sgt, dir, 0))
429 goto free_table;
430 } else if (dobj->linear) {
431 /* Single contiguous physical region - no struct page */
432 if (sg_alloc_table(sgt, 1, GFP_KERNEL))
433 goto free_sgt;
434 sg_dma_address(sgt->sgl) = dobj->dev_addr;
435 sg_dma_len(sgt->sgl) = dobj->obj.size;
436 } else {
437 goto free_sgt;
438 }
439 return sgt;
440
441 release:
442 for_each_sgtable_sg(sgt, sg, i)
443 if (sg_page(sg))
444 put_page(sg_page(sg));
445 free_table:
446 sg_free_table(sgt);
447 free_sgt:
448 kfree(sgt);
449 return NULL;
450}
451
452static void armada_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach,
453 struct sg_table *sgt, enum dma_data_direction dir)
454{
455 struct drm_gem_object *obj = attach->dmabuf->priv;
456 struct armada_gem_object *dobj = drm_to_armada_gem(obj);
457 int i;
458
459 if (!dobj->linear)
460 dma_unmap_sgtable(attach->dev, sgt, dir, 0);
461
462 if (dobj->obj.filp) {
463 struct scatterlist *sg;
464
465 for_each_sgtable_sg(sgt, sg, i)
466 put_page(sg_page(sg));
467 }
468
469 sg_free_table(sgt);
470 kfree(sgt);
471}
472
473static int
474armada_gem_dmabuf_mmap(struct dma_buf *buf, struct vm_area_struct *vma)
475{
476 return -EINVAL;
477}
478
479static const struct dma_buf_ops armada_gem_prime_dmabuf_ops = {
480 .map_dma_buf = armada_gem_prime_map_dma_buf,
481 .unmap_dma_buf = armada_gem_prime_unmap_dma_buf,
482 .release = drm_gem_dmabuf_release,
483 .mmap = armada_gem_dmabuf_mmap,
484};
485
486struct dma_buf *
487armada_gem_prime_export(struct drm_gem_object *obj, int flags)
488{
489 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
490
491 exp_info.ops = &armada_gem_prime_dmabuf_ops;
492 exp_info.size = obj->size;
493 exp_info.flags = O_RDWR;
494 exp_info.priv = obj;
495
496 return drm_gem_dmabuf_export(obj->dev, &exp_info);
497}
498
499struct drm_gem_object *
500armada_gem_prime_import(struct drm_device *dev, struct dma_buf *buf)
501{
502 struct dma_buf_attachment *attach;
503 struct armada_gem_object *dobj;
504
505 if (buf->ops == &armada_gem_prime_dmabuf_ops) {
506 struct drm_gem_object *obj = buf->priv;
507 if (obj->dev == dev) {
508 /*
509 * Importing our own dmabuf(s) increases the
510 * refcount on the gem object itself.
511 */
512 drm_gem_object_get(obj);
513 return obj;
514 }
515 }
516
517 attach = dma_buf_attach(buf, dev->dev);
518 if (IS_ERR(attach))
519 return ERR_CAST(attach);
520
521 dobj = armada_gem_alloc_private_object(dev, buf->size);
522 if (!dobj) {
523 dma_buf_detach(buf, attach);
524 return ERR_PTR(-ENOMEM);
525 }
526
527 dobj->obj.import_attach = attach;
528 get_dma_buf(buf);
529
530 /*
531 * Don't call dma_buf_map_attachment() here - it maps the
532 * scatterlist immediately for DMA, and this is not always
533 * an appropriate thing to do.
534 */
535 return &dobj->obj;
536}
537
538int armada_gem_map_import(struct armada_gem_object *dobj)
539{
540 int ret;
541
542 dobj->sgt = dma_buf_map_attachment_unlocked(dobj->obj.import_attach,
543 DMA_TO_DEVICE);
544 if (IS_ERR(dobj->sgt)) {
545 ret = PTR_ERR(dobj->sgt);
546 dobj->sgt = NULL;
547 DRM_ERROR("dma_buf_map_attachment() error: %d\n", ret);
548 return ret;
549 }
550 if (dobj->sgt->nents > 1) {
551 DRM_ERROR("dma_buf_map_attachment() returned an (unsupported) scattered list\n");
552 return -EINVAL;
553 }
554 if (sg_dma_len(dobj->sgt->sgl) < dobj->obj.size) {
555 DRM_ERROR("dma_buf_map_attachment() returned a small buffer\n");
556 return -EINVAL;
557 }
558 dobj->dev_addr = sg_dma_address(dobj->sgt->sgl);
559 dobj->mapped = true;
560 return 0;
561}
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2012 Russell King
4 */
5
6#include <linux/dma-buf.h>
7#include <linux/dma-mapping.h>
8#include <linux/mman.h>
9#include <linux/shmem_fs.h>
10
11#include <drm/armada_drm.h>
12#include <drm/drm_prime.h>
13
14#include "armada_drm.h"
15#include "armada_gem.h"
16#include "armada_ioctlP.h"
17
18static vm_fault_t armada_gem_vm_fault(struct vm_fault *vmf)
19{
20 struct drm_gem_object *gobj = vmf->vma->vm_private_data;
21 struct armada_gem_object *obj = drm_to_armada_gem(gobj);
22 unsigned long pfn = obj->phys_addr >> PAGE_SHIFT;
23
24 pfn += (vmf->address - vmf->vma->vm_start) >> PAGE_SHIFT;
25 return vmf_insert_pfn(vmf->vma, vmf->address, pfn);
26}
27
28const struct vm_operations_struct armada_gem_vm_ops = {
29 .fault = armada_gem_vm_fault,
30 .open = drm_gem_vm_open,
31 .close = drm_gem_vm_close,
32};
33
34static size_t roundup_gem_size(size_t size)
35{
36 return roundup(size, PAGE_SIZE);
37}
38
39void armada_gem_free_object(struct drm_gem_object *obj)
40{
41 struct armada_gem_object *dobj = drm_to_armada_gem(obj);
42 struct armada_private *priv = obj->dev->dev_private;
43
44 DRM_DEBUG_DRIVER("release obj %p\n", dobj);
45
46 drm_gem_free_mmap_offset(&dobj->obj);
47
48 might_lock(&priv->linear_lock);
49
50 if (dobj->page) {
51 /* page backed memory */
52 unsigned int order = get_order(dobj->obj.size);
53 __free_pages(dobj->page, order);
54 } else if (dobj->linear) {
55 /* linear backed memory */
56 mutex_lock(&priv->linear_lock);
57 drm_mm_remove_node(dobj->linear);
58 mutex_unlock(&priv->linear_lock);
59 kfree(dobj->linear);
60 if (dobj->addr)
61 iounmap(dobj->addr);
62 }
63
64 if (dobj->obj.import_attach) {
65 /* We only ever display imported data */
66 if (dobj->sgt)
67 dma_buf_unmap_attachment(dobj->obj.import_attach,
68 dobj->sgt, DMA_TO_DEVICE);
69 drm_prime_gem_destroy(&dobj->obj, NULL);
70 }
71
72 drm_gem_object_release(&dobj->obj);
73
74 kfree(dobj);
75}
76
77int
78armada_gem_linear_back(struct drm_device *dev, struct armada_gem_object *obj)
79{
80 struct armada_private *priv = dev->dev_private;
81 size_t size = obj->obj.size;
82
83 if (obj->page || obj->linear)
84 return 0;
85
86 /*
87 * If it is a small allocation (typically cursor, which will
88 * be 32x64 or 64x32 ARGB pixels) try to get it from the system.
89 * Framebuffers will never be this small (our minimum size for
90 * framebuffers is larger than this anyway.) Such objects are
91 * only accessed by the CPU so we don't need any special handing
92 * here.
93 */
94 if (size <= 8192) {
95 unsigned int order = get_order(size);
96 struct page *p = alloc_pages(GFP_KERNEL, order);
97
98 if (p) {
99 obj->addr = page_address(p);
100 obj->phys_addr = page_to_phys(p);
101 obj->page = p;
102
103 memset(obj->addr, 0, PAGE_ALIGN(size));
104 }
105 }
106
107 /*
108 * We could grab something from CMA if it's enabled, but that
109 * involves building in a problem:
110 *
111 * CMA's interface uses dma_alloc_coherent(), which provides us
112 * with an CPU virtual address and a device address.
113 *
114 * The CPU virtual address may be either an address in the kernel
115 * direct mapped region (for example, as it would be on x86) or
116 * it may be remapped into another part of kernel memory space
117 * (eg, as it would be on ARM.) This means virt_to_phys() on the
118 * returned virtual address is invalid depending on the architecture
119 * implementation.
120 *
121 * The device address may also not be a physical address; it may
122 * be that there is some kind of remapping between the device and
123 * system RAM, which makes the use of the device address also
124 * unsafe to re-use as a physical address.
125 *
126 * This makes DRM usage of dma_alloc_coherent() in a generic way
127 * at best very questionable and unsafe.
128 */
129
130 /* Otherwise, grab it from our linear allocation */
131 if (!obj->page) {
132 struct drm_mm_node *node;
133 unsigned align = min_t(unsigned, size, SZ_2M);
134 void __iomem *ptr;
135 int ret;
136
137 node = kzalloc(sizeof(*node), GFP_KERNEL);
138 if (!node)
139 return -ENOSPC;
140
141 mutex_lock(&priv->linear_lock);
142 ret = drm_mm_insert_node_generic(&priv->linear, node,
143 size, align, 0, 0);
144 mutex_unlock(&priv->linear_lock);
145 if (ret) {
146 kfree(node);
147 return ret;
148 }
149
150 obj->linear = node;
151
152 /* Ensure that the memory we're returning is cleared. */
153 ptr = ioremap_wc(obj->linear->start, size);
154 if (!ptr) {
155 mutex_lock(&priv->linear_lock);
156 drm_mm_remove_node(obj->linear);
157 mutex_unlock(&priv->linear_lock);
158 kfree(obj->linear);
159 obj->linear = NULL;
160 return -ENOMEM;
161 }
162
163 memset_io(ptr, 0, size);
164 iounmap(ptr);
165
166 obj->phys_addr = obj->linear->start;
167 obj->dev_addr = obj->linear->start;
168 obj->mapped = true;
169 }
170
171 DRM_DEBUG_DRIVER("obj %p phys %#llx dev %#llx\n", obj,
172 (unsigned long long)obj->phys_addr,
173 (unsigned long long)obj->dev_addr);
174
175 return 0;
176}
177
178void *
179armada_gem_map_object(struct drm_device *dev, struct armada_gem_object *dobj)
180{
181 /* only linear objects need to be ioremap'd */
182 if (!dobj->addr && dobj->linear)
183 dobj->addr = ioremap_wc(dobj->phys_addr, dobj->obj.size);
184 return dobj->addr;
185}
186
187struct armada_gem_object *
188armada_gem_alloc_private_object(struct drm_device *dev, size_t size)
189{
190 struct armada_gem_object *obj;
191
192 size = roundup_gem_size(size);
193
194 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
195 if (!obj)
196 return NULL;
197
198 drm_gem_private_object_init(dev, &obj->obj, size);
199
200 DRM_DEBUG_DRIVER("alloc private obj %p size %zu\n", obj, size);
201
202 return obj;
203}
204
205static struct armada_gem_object *armada_gem_alloc_object(struct drm_device *dev,
206 size_t size)
207{
208 struct armada_gem_object *obj;
209 struct address_space *mapping;
210
211 size = roundup_gem_size(size);
212
213 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
214 if (!obj)
215 return NULL;
216
217 if (drm_gem_object_init(dev, &obj->obj, size)) {
218 kfree(obj);
219 return NULL;
220 }
221
222 mapping = obj->obj.filp->f_mapping;
223 mapping_set_gfp_mask(mapping, GFP_HIGHUSER | __GFP_RECLAIMABLE);
224
225 DRM_DEBUG_DRIVER("alloc obj %p size %zu\n", obj, size);
226
227 return obj;
228}
229
230/* Dumb alloc support */
231int armada_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
232 struct drm_mode_create_dumb *args)
233{
234 struct armada_gem_object *dobj;
235 u32 handle;
236 size_t size;
237 int ret;
238
239 args->pitch = armada_pitch(args->width, args->bpp);
240 args->size = size = args->pitch * args->height;
241
242 dobj = armada_gem_alloc_private_object(dev, size);
243 if (dobj == NULL)
244 return -ENOMEM;
245
246 ret = armada_gem_linear_back(dev, dobj);
247 if (ret)
248 goto err;
249
250 ret = drm_gem_handle_create(file, &dobj->obj, &handle);
251 if (ret)
252 goto err;
253
254 args->handle = handle;
255
256 /* drop reference from allocate - handle holds it now */
257 DRM_DEBUG_DRIVER("obj %p size %zu handle %#x\n", dobj, size, handle);
258 err:
259 drm_gem_object_put_unlocked(&dobj->obj);
260 return ret;
261}
262
263/* Private driver gem ioctls */
264int armada_gem_create_ioctl(struct drm_device *dev, void *data,
265 struct drm_file *file)
266{
267 struct drm_armada_gem_create *args = data;
268 struct armada_gem_object *dobj;
269 size_t size;
270 u32 handle;
271 int ret;
272
273 if (args->size == 0)
274 return -ENOMEM;
275
276 size = args->size;
277
278 dobj = armada_gem_alloc_object(dev, size);
279 if (dobj == NULL)
280 return -ENOMEM;
281
282 ret = drm_gem_handle_create(file, &dobj->obj, &handle);
283 if (ret)
284 goto err;
285
286 args->handle = handle;
287
288 /* drop reference from allocate - handle holds it now */
289 DRM_DEBUG_DRIVER("obj %p size %zu handle %#x\n", dobj, size, handle);
290 err:
291 drm_gem_object_put_unlocked(&dobj->obj);
292 return ret;
293}
294
295/* Map a shmem-backed object into process memory space */
296int armada_gem_mmap_ioctl(struct drm_device *dev, void *data,
297 struct drm_file *file)
298{
299 struct drm_armada_gem_mmap *args = data;
300 struct armada_gem_object *dobj;
301 unsigned long addr;
302
303 dobj = armada_gem_object_lookup(file, args->handle);
304 if (dobj == NULL)
305 return -ENOENT;
306
307 if (!dobj->obj.filp) {
308 drm_gem_object_put_unlocked(&dobj->obj);
309 return -EINVAL;
310 }
311
312 addr = vm_mmap(dobj->obj.filp, 0, args->size, PROT_READ | PROT_WRITE,
313 MAP_SHARED, args->offset);
314 drm_gem_object_put_unlocked(&dobj->obj);
315 if (IS_ERR_VALUE(addr))
316 return addr;
317
318 args->addr = addr;
319
320 return 0;
321}
322
323int armada_gem_pwrite_ioctl(struct drm_device *dev, void *data,
324 struct drm_file *file)
325{
326 struct drm_armada_gem_pwrite *args = data;
327 struct armada_gem_object *dobj;
328 char __user *ptr;
329 int ret;
330
331 DRM_DEBUG_DRIVER("handle %u off %u size %u ptr 0x%llx\n",
332 args->handle, args->offset, args->size, args->ptr);
333
334 if (args->size == 0)
335 return 0;
336
337 ptr = (char __user *)(uintptr_t)args->ptr;
338
339 if (!access_ok(ptr, args->size))
340 return -EFAULT;
341
342 ret = fault_in_pages_readable(ptr, args->size);
343 if (ret)
344 return ret;
345
346 dobj = armada_gem_object_lookup(file, args->handle);
347 if (dobj == NULL)
348 return -ENOENT;
349
350 /* Must be a kernel-mapped object */
351 if (!dobj->addr)
352 return -EINVAL;
353
354 if (args->offset > dobj->obj.size ||
355 args->size > dobj->obj.size - args->offset) {
356 DRM_ERROR("invalid size: object size %u\n", dobj->obj.size);
357 ret = -EINVAL;
358 goto unref;
359 }
360
361 if (copy_from_user(dobj->addr + args->offset, ptr, args->size)) {
362 ret = -EFAULT;
363 } else if (dobj->update) {
364 dobj->update(dobj->update_data);
365 ret = 0;
366 }
367
368 unref:
369 drm_gem_object_put_unlocked(&dobj->obj);
370 return ret;
371}
372
373/* Prime support */
374static struct sg_table *
375armada_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
376 enum dma_data_direction dir)
377{
378 struct drm_gem_object *obj = attach->dmabuf->priv;
379 struct armada_gem_object *dobj = drm_to_armada_gem(obj);
380 struct scatterlist *sg;
381 struct sg_table *sgt;
382 int i, num;
383
384 sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
385 if (!sgt)
386 return NULL;
387
388 if (dobj->obj.filp) {
389 struct address_space *mapping;
390 int count;
391
392 count = dobj->obj.size / PAGE_SIZE;
393 if (sg_alloc_table(sgt, count, GFP_KERNEL))
394 goto free_sgt;
395
396 mapping = dobj->obj.filp->f_mapping;
397
398 for_each_sg(sgt->sgl, sg, count, i) {
399 struct page *page;
400
401 page = shmem_read_mapping_page(mapping, i);
402 if (IS_ERR(page)) {
403 num = i;
404 goto release;
405 }
406
407 sg_set_page(sg, page, PAGE_SIZE, 0);
408 }
409
410 if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0) {
411 num = sgt->nents;
412 goto release;
413 }
414 } else if (dobj->page) {
415 /* Single contiguous page */
416 if (sg_alloc_table(sgt, 1, GFP_KERNEL))
417 goto free_sgt;
418
419 sg_set_page(sgt->sgl, dobj->page, dobj->obj.size, 0);
420
421 if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0)
422 goto free_table;
423 } else if (dobj->linear) {
424 /* Single contiguous physical region - no struct page */
425 if (sg_alloc_table(sgt, 1, GFP_KERNEL))
426 goto free_sgt;
427 sg_dma_address(sgt->sgl) = dobj->dev_addr;
428 sg_dma_len(sgt->sgl) = dobj->obj.size;
429 } else {
430 goto free_sgt;
431 }
432 return sgt;
433
434 release:
435 for_each_sg(sgt->sgl, sg, num, i)
436 put_page(sg_page(sg));
437 free_table:
438 sg_free_table(sgt);
439 free_sgt:
440 kfree(sgt);
441 return NULL;
442}
443
444static void armada_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach,
445 struct sg_table *sgt, enum dma_data_direction dir)
446{
447 struct drm_gem_object *obj = attach->dmabuf->priv;
448 struct armada_gem_object *dobj = drm_to_armada_gem(obj);
449 int i;
450
451 if (!dobj->linear)
452 dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir);
453
454 if (dobj->obj.filp) {
455 struct scatterlist *sg;
456 for_each_sg(sgt->sgl, sg, sgt->nents, i)
457 put_page(sg_page(sg));
458 }
459
460 sg_free_table(sgt);
461 kfree(sgt);
462}
463
464static void *armada_gem_dmabuf_no_kmap(struct dma_buf *buf, unsigned long n)
465{
466 return NULL;
467}
468
469static void
470armada_gem_dmabuf_no_kunmap(struct dma_buf *buf, unsigned long n, void *addr)
471{
472}
473
474static int
475armada_gem_dmabuf_mmap(struct dma_buf *buf, struct vm_area_struct *vma)
476{
477 return -EINVAL;
478}
479
480static const struct dma_buf_ops armada_gem_prime_dmabuf_ops = {
481 .map_dma_buf = armada_gem_prime_map_dma_buf,
482 .unmap_dma_buf = armada_gem_prime_unmap_dma_buf,
483 .release = drm_gem_dmabuf_release,
484 .map = armada_gem_dmabuf_no_kmap,
485 .unmap = armada_gem_dmabuf_no_kunmap,
486 .mmap = armada_gem_dmabuf_mmap,
487};
488
489struct dma_buf *
490armada_gem_prime_export(struct drm_gem_object *obj, int flags)
491{
492 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
493
494 exp_info.ops = &armada_gem_prime_dmabuf_ops;
495 exp_info.size = obj->size;
496 exp_info.flags = O_RDWR;
497 exp_info.priv = obj;
498
499 return drm_gem_dmabuf_export(obj->dev, &exp_info);
500}
501
502struct drm_gem_object *
503armada_gem_prime_import(struct drm_device *dev, struct dma_buf *buf)
504{
505 struct dma_buf_attachment *attach;
506 struct armada_gem_object *dobj;
507
508 if (buf->ops == &armada_gem_prime_dmabuf_ops) {
509 struct drm_gem_object *obj = buf->priv;
510 if (obj->dev == dev) {
511 /*
512 * Importing our own dmabuf(s) increases the
513 * refcount on the gem object itself.
514 */
515 drm_gem_object_get(obj);
516 return obj;
517 }
518 }
519
520 attach = dma_buf_attach(buf, dev->dev);
521 if (IS_ERR(attach))
522 return ERR_CAST(attach);
523
524 dobj = armada_gem_alloc_private_object(dev, buf->size);
525 if (!dobj) {
526 dma_buf_detach(buf, attach);
527 return ERR_PTR(-ENOMEM);
528 }
529
530 dobj->obj.import_attach = attach;
531 get_dma_buf(buf);
532
533 /*
534 * Don't call dma_buf_map_attachment() here - it maps the
535 * scatterlist immediately for DMA, and this is not always
536 * an appropriate thing to do.
537 */
538 return &dobj->obj;
539}
540
541int armada_gem_map_import(struct armada_gem_object *dobj)
542{
543 int ret;
544
545 dobj->sgt = dma_buf_map_attachment(dobj->obj.import_attach,
546 DMA_TO_DEVICE);
547 if (IS_ERR(dobj->sgt)) {
548 ret = PTR_ERR(dobj->sgt);
549 dobj->sgt = NULL;
550 DRM_ERROR("dma_buf_map_attachment() error: %d\n", ret);
551 return ret;
552 }
553 if (dobj->sgt->nents > 1) {
554 DRM_ERROR("dma_buf_map_attachment() returned an (unsupported) scattered list\n");
555 return -EINVAL;
556 }
557 if (sg_dma_len(dobj->sgt->sgl) < dobj->obj.size) {
558 DRM_ERROR("dma_buf_map_attachment() returned a small buffer\n");
559 return -EINVAL;
560 }
561 dobj->dev_addr = sg_dma_address(dobj->sgt->sgl);
562 dobj->mapped = true;
563 return 0;
564}