Loading...
1// SPDX-License-Identifier: GPL-2.0
2
3/*
4 * Xen dma-buf functionality for gntdev.
5 *
6 * DMA buffer implementation is based on drivers/gpu/drm/drm_prime.c.
7 *
8 * Copyright (c) 2018 Oleksandr Andrushchenko, EPAM Systems Inc.
9 */
10
11#include <linux/kernel.h>
12#include <linux/errno.h>
13#include <linux/dma-buf.h>
14#include <linux/dma-direct.h>
15#include <linux/slab.h>
16#include <linux/types.h>
17#include <linux/uaccess.h>
18#include <linux/module.h>
19
20#include <xen/xen.h>
21#include <xen/grant_table.h>
22
23#include "gntdev-common.h"
24#include "gntdev-dmabuf.h"
25
26MODULE_IMPORT_NS(DMA_BUF);
27
28struct gntdev_dmabuf {
29 struct gntdev_dmabuf_priv *priv;
30 struct dma_buf *dmabuf;
31 struct list_head next;
32 int fd;
33
34 union {
35 struct {
36 /* Exported buffers are reference counted. */
37 struct kref refcount;
38
39 struct gntdev_priv *priv;
40 struct gntdev_grant_map *map;
41 } exp;
42 struct {
43 /* Granted references of the imported buffer. */
44 grant_ref_t *refs;
45 /* Scatter-gather table of the imported buffer. */
46 struct sg_table *sgt;
47 /* dma-buf attachment of the imported buffer. */
48 struct dma_buf_attachment *attach;
49 } imp;
50 } u;
51
52 /* Number of pages this buffer has. */
53 int nr_pages;
54 /* Pages of this buffer (only for dma-buf export). */
55 struct page **pages;
56};
57
58struct gntdev_dmabuf_wait_obj {
59 struct list_head next;
60 struct gntdev_dmabuf *gntdev_dmabuf;
61 struct completion completion;
62};
63
64struct gntdev_dmabuf_attachment {
65 struct sg_table *sgt;
66 enum dma_data_direction dir;
67};
68
69struct gntdev_dmabuf_priv {
70 /* List of exported DMA buffers. */
71 struct list_head exp_list;
72 /* List of wait objects. */
73 struct list_head exp_wait_list;
74 /* List of imported DMA buffers. */
75 struct list_head imp_list;
76 /* This is the lock which protects dma_buf_xxx lists. */
77 struct mutex lock;
78 /*
79 * We reference this file while exporting dma-bufs, so
80 * the grant device context is not destroyed while there are
81 * external users alive.
82 */
83 struct file *filp;
84};
85
86/* DMA buffer export support. */
87
88/* Implementation of wait for exported DMA buffer to be released. */
89
90static void dmabuf_exp_release(struct kref *kref);
91
92static struct gntdev_dmabuf_wait_obj *
93dmabuf_exp_wait_obj_new(struct gntdev_dmabuf_priv *priv,
94 struct gntdev_dmabuf *gntdev_dmabuf)
95{
96 struct gntdev_dmabuf_wait_obj *obj;
97
98 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
99 if (!obj)
100 return ERR_PTR(-ENOMEM);
101
102 init_completion(&obj->completion);
103 obj->gntdev_dmabuf = gntdev_dmabuf;
104
105 mutex_lock(&priv->lock);
106 list_add(&obj->next, &priv->exp_wait_list);
107 /* Put our reference and wait for gntdev_dmabuf's release to fire. */
108 kref_put(&gntdev_dmabuf->u.exp.refcount, dmabuf_exp_release);
109 mutex_unlock(&priv->lock);
110 return obj;
111}
112
113static void dmabuf_exp_wait_obj_free(struct gntdev_dmabuf_priv *priv,
114 struct gntdev_dmabuf_wait_obj *obj)
115{
116 mutex_lock(&priv->lock);
117 list_del(&obj->next);
118 mutex_unlock(&priv->lock);
119 kfree(obj);
120}
121
122static int dmabuf_exp_wait_obj_wait(struct gntdev_dmabuf_wait_obj *obj,
123 u32 wait_to_ms)
124{
125 if (wait_for_completion_timeout(&obj->completion,
126 msecs_to_jiffies(wait_to_ms)) <= 0)
127 return -ETIMEDOUT;
128
129 return 0;
130}
131
132static void dmabuf_exp_wait_obj_signal(struct gntdev_dmabuf_priv *priv,
133 struct gntdev_dmabuf *gntdev_dmabuf)
134{
135 struct gntdev_dmabuf_wait_obj *obj;
136
137 list_for_each_entry(obj, &priv->exp_wait_list, next)
138 if (obj->gntdev_dmabuf == gntdev_dmabuf) {
139 pr_debug("Found gntdev_dmabuf in the wait list, wake\n");
140 complete_all(&obj->completion);
141 break;
142 }
143}
144
145static struct gntdev_dmabuf *
146dmabuf_exp_wait_obj_get_dmabuf(struct gntdev_dmabuf_priv *priv, int fd)
147{
148 struct gntdev_dmabuf *gntdev_dmabuf, *ret = ERR_PTR(-ENOENT);
149
150 mutex_lock(&priv->lock);
151 list_for_each_entry(gntdev_dmabuf, &priv->exp_list, next)
152 if (gntdev_dmabuf->fd == fd) {
153 pr_debug("Found gntdev_dmabuf in the wait list\n");
154 kref_get(&gntdev_dmabuf->u.exp.refcount);
155 ret = gntdev_dmabuf;
156 break;
157 }
158 mutex_unlock(&priv->lock);
159 return ret;
160}
161
162static int dmabuf_exp_wait_released(struct gntdev_dmabuf_priv *priv, int fd,
163 int wait_to_ms)
164{
165 struct gntdev_dmabuf *gntdev_dmabuf;
166 struct gntdev_dmabuf_wait_obj *obj;
167 int ret;
168
169 pr_debug("Will wait for dma-buf with fd %d\n", fd);
170 /*
171 * Try to find the DMA buffer: if not found means that
172 * either the buffer has already been released or file descriptor
173 * provided is wrong.
174 */
175 gntdev_dmabuf = dmabuf_exp_wait_obj_get_dmabuf(priv, fd);
176 if (IS_ERR(gntdev_dmabuf))
177 return PTR_ERR(gntdev_dmabuf);
178
179 /*
180 * gntdev_dmabuf still exists and is reference count locked by us now,
181 * so prepare to wait: allocate wait object and add it to the wait list,
182 * so we can find it on release.
183 */
184 obj = dmabuf_exp_wait_obj_new(priv, gntdev_dmabuf);
185 if (IS_ERR(obj))
186 return PTR_ERR(obj);
187
188 ret = dmabuf_exp_wait_obj_wait(obj, wait_to_ms);
189 dmabuf_exp_wait_obj_free(priv, obj);
190 return ret;
191}
192
193/* DMA buffer export support. */
194
195static struct sg_table *
196dmabuf_pages_to_sgt(struct page **pages, unsigned int nr_pages)
197{
198 struct sg_table *sgt;
199 int ret;
200
201 sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
202 if (!sgt) {
203 ret = -ENOMEM;
204 goto out;
205 }
206
207 ret = sg_alloc_table_from_pages(sgt, pages, nr_pages, 0,
208 nr_pages << PAGE_SHIFT,
209 GFP_KERNEL);
210 if (ret)
211 goto out;
212
213 return sgt;
214
215out:
216 kfree(sgt);
217 return ERR_PTR(ret);
218}
219
220static int dmabuf_exp_ops_attach(struct dma_buf *dma_buf,
221 struct dma_buf_attachment *attach)
222{
223 struct gntdev_dmabuf_attachment *gntdev_dmabuf_attach;
224
225 gntdev_dmabuf_attach = kzalloc(sizeof(*gntdev_dmabuf_attach),
226 GFP_KERNEL);
227 if (!gntdev_dmabuf_attach)
228 return -ENOMEM;
229
230 gntdev_dmabuf_attach->dir = DMA_NONE;
231 attach->priv = gntdev_dmabuf_attach;
232 return 0;
233}
234
235static void dmabuf_exp_ops_detach(struct dma_buf *dma_buf,
236 struct dma_buf_attachment *attach)
237{
238 struct gntdev_dmabuf_attachment *gntdev_dmabuf_attach = attach->priv;
239
240 if (gntdev_dmabuf_attach) {
241 struct sg_table *sgt = gntdev_dmabuf_attach->sgt;
242
243 if (sgt) {
244 if (gntdev_dmabuf_attach->dir != DMA_NONE)
245 dma_unmap_sgtable(attach->dev, sgt,
246 gntdev_dmabuf_attach->dir,
247 DMA_ATTR_SKIP_CPU_SYNC);
248 sg_free_table(sgt);
249 }
250
251 kfree(sgt);
252 kfree(gntdev_dmabuf_attach);
253 attach->priv = NULL;
254 }
255}
256
257static struct sg_table *
258dmabuf_exp_ops_map_dma_buf(struct dma_buf_attachment *attach,
259 enum dma_data_direction dir)
260{
261 struct gntdev_dmabuf_attachment *gntdev_dmabuf_attach = attach->priv;
262 struct gntdev_dmabuf *gntdev_dmabuf = attach->dmabuf->priv;
263 struct sg_table *sgt;
264
265 pr_debug("Mapping %d pages for dev %p\n", gntdev_dmabuf->nr_pages,
266 attach->dev);
267
268 if (dir == DMA_NONE || !gntdev_dmabuf_attach)
269 return ERR_PTR(-EINVAL);
270
271 /* Return the cached mapping when possible. */
272 if (gntdev_dmabuf_attach->dir == dir)
273 return gntdev_dmabuf_attach->sgt;
274
275 /*
276 * Two mappings with different directions for the same attachment are
277 * not allowed.
278 */
279 if (gntdev_dmabuf_attach->dir != DMA_NONE)
280 return ERR_PTR(-EBUSY);
281
282 sgt = dmabuf_pages_to_sgt(gntdev_dmabuf->pages,
283 gntdev_dmabuf->nr_pages);
284 if (!IS_ERR(sgt)) {
285 if (dma_map_sgtable(attach->dev, sgt, dir,
286 DMA_ATTR_SKIP_CPU_SYNC)) {
287 sg_free_table(sgt);
288 kfree(sgt);
289 sgt = ERR_PTR(-ENOMEM);
290 } else {
291 gntdev_dmabuf_attach->sgt = sgt;
292 gntdev_dmabuf_attach->dir = dir;
293 }
294 }
295 if (IS_ERR(sgt))
296 pr_debug("Failed to map sg table for dev %p\n", attach->dev);
297 return sgt;
298}
299
300static void dmabuf_exp_ops_unmap_dma_buf(struct dma_buf_attachment *attach,
301 struct sg_table *sgt,
302 enum dma_data_direction dir)
303{
304 /* Not implemented. The unmap is done at dmabuf_exp_ops_detach(). */
305}
306
307static void dmabuf_exp_release(struct kref *kref)
308{
309 struct gntdev_dmabuf *gntdev_dmabuf =
310 container_of(kref, struct gntdev_dmabuf, u.exp.refcount);
311
312 dmabuf_exp_wait_obj_signal(gntdev_dmabuf->priv, gntdev_dmabuf);
313 list_del(&gntdev_dmabuf->next);
314 fput(gntdev_dmabuf->priv->filp);
315 kfree(gntdev_dmabuf);
316}
317
318static void dmabuf_exp_remove_map(struct gntdev_priv *priv,
319 struct gntdev_grant_map *map)
320{
321 mutex_lock(&priv->lock);
322 list_del(&map->next);
323 gntdev_put_map(NULL /* already removed */, map);
324 mutex_unlock(&priv->lock);
325}
326
327static void dmabuf_exp_ops_release(struct dma_buf *dma_buf)
328{
329 struct gntdev_dmabuf *gntdev_dmabuf = dma_buf->priv;
330 struct gntdev_dmabuf_priv *priv = gntdev_dmabuf->priv;
331
332 dmabuf_exp_remove_map(gntdev_dmabuf->u.exp.priv,
333 gntdev_dmabuf->u.exp.map);
334 mutex_lock(&priv->lock);
335 kref_put(&gntdev_dmabuf->u.exp.refcount, dmabuf_exp_release);
336 mutex_unlock(&priv->lock);
337}
338
339static const struct dma_buf_ops dmabuf_exp_ops = {
340 .attach = dmabuf_exp_ops_attach,
341 .detach = dmabuf_exp_ops_detach,
342 .map_dma_buf = dmabuf_exp_ops_map_dma_buf,
343 .unmap_dma_buf = dmabuf_exp_ops_unmap_dma_buf,
344 .release = dmabuf_exp_ops_release,
345};
346
347struct gntdev_dmabuf_export_args {
348 struct gntdev_priv *priv;
349 struct gntdev_grant_map *map;
350 struct gntdev_dmabuf_priv *dmabuf_priv;
351 struct device *dev;
352 int count;
353 struct page **pages;
354 u32 fd;
355};
356
357static int dmabuf_exp_from_pages(struct gntdev_dmabuf_export_args *args)
358{
359 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
360 struct gntdev_dmabuf *gntdev_dmabuf;
361 int ret;
362
363 gntdev_dmabuf = kzalloc(sizeof(*gntdev_dmabuf), GFP_KERNEL);
364 if (!gntdev_dmabuf)
365 return -ENOMEM;
366
367 kref_init(&gntdev_dmabuf->u.exp.refcount);
368
369 gntdev_dmabuf->priv = args->dmabuf_priv;
370 gntdev_dmabuf->nr_pages = args->count;
371 gntdev_dmabuf->pages = args->pages;
372 gntdev_dmabuf->u.exp.priv = args->priv;
373 gntdev_dmabuf->u.exp.map = args->map;
374
375 exp_info.exp_name = KBUILD_MODNAME;
376 if (args->dev->driver && args->dev->driver->owner)
377 exp_info.owner = args->dev->driver->owner;
378 else
379 exp_info.owner = THIS_MODULE;
380 exp_info.ops = &dmabuf_exp_ops;
381 exp_info.size = args->count << PAGE_SHIFT;
382 exp_info.flags = O_RDWR;
383 exp_info.priv = gntdev_dmabuf;
384
385 gntdev_dmabuf->dmabuf = dma_buf_export(&exp_info);
386 if (IS_ERR(gntdev_dmabuf->dmabuf)) {
387 ret = PTR_ERR(gntdev_dmabuf->dmabuf);
388 gntdev_dmabuf->dmabuf = NULL;
389 goto fail;
390 }
391
392 ret = dma_buf_fd(gntdev_dmabuf->dmabuf, O_CLOEXEC);
393 if (ret < 0)
394 goto fail;
395
396 gntdev_dmabuf->fd = ret;
397 args->fd = ret;
398
399 pr_debug("Exporting DMA buffer with fd %d\n", ret);
400
401 mutex_lock(&args->dmabuf_priv->lock);
402 list_add(&gntdev_dmabuf->next, &args->dmabuf_priv->exp_list);
403 mutex_unlock(&args->dmabuf_priv->lock);
404 get_file(gntdev_dmabuf->priv->filp);
405 return 0;
406
407fail:
408 if (gntdev_dmabuf->dmabuf)
409 dma_buf_put(gntdev_dmabuf->dmabuf);
410 kfree(gntdev_dmabuf);
411 return ret;
412}
413
414static struct gntdev_grant_map *
415dmabuf_exp_alloc_backing_storage(struct gntdev_priv *priv, int dmabuf_flags,
416 int count)
417{
418 struct gntdev_grant_map *map;
419
420 if (unlikely(gntdev_test_page_count(count)))
421 return ERR_PTR(-EINVAL);
422
423 if ((dmabuf_flags & GNTDEV_DMA_FLAG_WC) &&
424 (dmabuf_flags & GNTDEV_DMA_FLAG_COHERENT)) {
425 pr_debug("Wrong dma-buf flags: 0x%x\n", dmabuf_flags);
426 return ERR_PTR(-EINVAL);
427 }
428
429 map = gntdev_alloc_map(priv, count, dmabuf_flags);
430 if (!map)
431 return ERR_PTR(-ENOMEM);
432
433 return map;
434}
435
436static int dmabuf_exp_from_refs(struct gntdev_priv *priv, int flags,
437 int count, u32 domid, u32 *refs, u32 *fd)
438{
439 struct gntdev_grant_map *map;
440 struct gntdev_dmabuf_export_args args;
441 int i, ret;
442
443 map = dmabuf_exp_alloc_backing_storage(priv, flags, count);
444 if (IS_ERR(map))
445 return PTR_ERR(map);
446
447 for (i = 0; i < count; i++) {
448 map->grants[i].domid = domid;
449 map->grants[i].ref = refs[i];
450 }
451
452 mutex_lock(&priv->lock);
453 gntdev_add_map(priv, map);
454 mutex_unlock(&priv->lock);
455
456 map->flags |= GNTMAP_host_map;
457#if defined(CONFIG_X86)
458 map->flags |= GNTMAP_device_map;
459#endif
460
461 ret = gntdev_map_grant_pages(map);
462 if (ret < 0)
463 goto out;
464
465 args.priv = priv;
466 args.map = map;
467 args.dev = priv->dma_dev;
468 args.dmabuf_priv = priv->dmabuf_priv;
469 args.count = map->count;
470 args.pages = map->pages;
471 args.fd = -1; /* Shut up unnecessary gcc warning for i386 */
472
473 ret = dmabuf_exp_from_pages(&args);
474 if (ret < 0)
475 goto out;
476
477 *fd = args.fd;
478 return 0;
479
480out:
481 dmabuf_exp_remove_map(priv, map);
482 return ret;
483}
484
485/* DMA buffer import support. */
486
487static int
488dmabuf_imp_grant_foreign_access(unsigned long *gfns, u32 *refs,
489 int count, int domid)
490{
491 grant_ref_t priv_gref_head;
492 int i, ret;
493
494 ret = gnttab_alloc_grant_references(count, &priv_gref_head);
495 if (ret < 0) {
496 pr_debug("Cannot allocate grant references, ret %d\n", ret);
497 return ret;
498 }
499
500 for (i = 0; i < count; i++) {
501 int cur_ref;
502
503 cur_ref = gnttab_claim_grant_reference(&priv_gref_head);
504 if (cur_ref < 0) {
505 ret = cur_ref;
506 pr_debug("Cannot claim grant reference, ret %d\n", ret);
507 goto out;
508 }
509
510 gnttab_grant_foreign_access_ref(cur_ref, domid,
511 gfns[i], 0);
512 refs[i] = cur_ref;
513 }
514
515 return 0;
516
517out:
518 gnttab_free_grant_references(priv_gref_head);
519 return ret;
520}
521
522static void dmabuf_imp_end_foreign_access(u32 *refs, int count)
523{
524 int i;
525
526 for (i = 0; i < count; i++)
527 if (refs[i] != INVALID_GRANT_REF)
528 gnttab_end_foreign_access(refs[i], NULL);
529}
530
531static void dmabuf_imp_free_storage(struct gntdev_dmabuf *gntdev_dmabuf)
532{
533 kfree(gntdev_dmabuf->u.imp.refs);
534 kfree(gntdev_dmabuf);
535}
536
537static struct gntdev_dmabuf *dmabuf_imp_alloc_storage(int count)
538{
539 struct gntdev_dmabuf *gntdev_dmabuf;
540 int i;
541
542 gntdev_dmabuf = kzalloc(sizeof(*gntdev_dmabuf), GFP_KERNEL);
543 if (!gntdev_dmabuf)
544 goto fail_no_free;
545
546 gntdev_dmabuf->u.imp.refs = kcalloc(count,
547 sizeof(gntdev_dmabuf->u.imp.refs[0]),
548 GFP_KERNEL);
549 if (!gntdev_dmabuf->u.imp.refs)
550 goto fail;
551
552 gntdev_dmabuf->nr_pages = count;
553
554 for (i = 0; i < count; i++)
555 gntdev_dmabuf->u.imp.refs[i] = INVALID_GRANT_REF;
556
557 return gntdev_dmabuf;
558
559fail:
560 dmabuf_imp_free_storage(gntdev_dmabuf);
561fail_no_free:
562 return ERR_PTR(-ENOMEM);
563}
564
565static struct gntdev_dmabuf *
566dmabuf_imp_to_refs(struct gntdev_dmabuf_priv *priv, struct device *dev,
567 int fd, int count, int domid)
568{
569 struct gntdev_dmabuf *gntdev_dmabuf, *ret;
570 struct dma_buf *dma_buf;
571 struct dma_buf_attachment *attach;
572 struct sg_table *sgt;
573 struct sg_dma_page_iter sg_iter;
574 unsigned long *gfns;
575 int i;
576
577 dma_buf = dma_buf_get(fd);
578 if (IS_ERR(dma_buf))
579 return ERR_CAST(dma_buf);
580
581 gntdev_dmabuf = dmabuf_imp_alloc_storage(count);
582 if (IS_ERR(gntdev_dmabuf)) {
583 ret = gntdev_dmabuf;
584 goto fail_put;
585 }
586
587 gntdev_dmabuf->priv = priv;
588 gntdev_dmabuf->fd = fd;
589
590 attach = dma_buf_attach(dma_buf, dev);
591 if (IS_ERR(attach)) {
592 ret = ERR_CAST(attach);
593 goto fail_free_obj;
594 }
595
596 gntdev_dmabuf->u.imp.attach = attach;
597
598 sgt = dma_buf_map_attachment_unlocked(attach, DMA_BIDIRECTIONAL);
599 if (IS_ERR(sgt)) {
600 ret = ERR_CAST(sgt);
601 goto fail_detach;
602 }
603
604 /* Check that we have zero offset. */
605 if (sgt->sgl->offset) {
606 ret = ERR_PTR(-EINVAL);
607 pr_debug("DMA buffer has %d bytes offset, user-space expects 0\n",
608 sgt->sgl->offset);
609 goto fail_unmap;
610 }
611
612 /* Check number of pages that imported buffer has. */
613 if (attach->dmabuf->size != gntdev_dmabuf->nr_pages << PAGE_SHIFT) {
614 ret = ERR_PTR(-EINVAL);
615 pr_debug("DMA buffer has %zu pages, user-space expects %d\n",
616 attach->dmabuf->size, gntdev_dmabuf->nr_pages);
617 goto fail_unmap;
618 }
619
620 gntdev_dmabuf->u.imp.sgt = sgt;
621
622 gfns = kcalloc(count, sizeof(*gfns), GFP_KERNEL);
623 if (!gfns) {
624 ret = ERR_PTR(-ENOMEM);
625 goto fail_unmap;
626 }
627
628 /*
629 * Now convert sgt to array of gfns without accessing underlying pages.
630 * It is not allowed to access the underlying struct page of an sg table
631 * exported by DMA-buf, but since we deal with special Xen dma device here
632 * (not a normal physical one) look at the dma addresses in the sg table
633 * and then calculate gfns directly from them.
634 */
635 i = 0;
636 for_each_sgtable_dma_page(sgt, &sg_iter, 0) {
637 dma_addr_t addr = sg_page_iter_dma_address(&sg_iter);
638 unsigned long pfn = bfn_to_pfn(XEN_PFN_DOWN(dma_to_phys(dev, addr)));
639
640 gfns[i++] = pfn_to_gfn(pfn);
641 }
642
643 ret = ERR_PTR(dmabuf_imp_grant_foreign_access(gfns,
644 gntdev_dmabuf->u.imp.refs,
645 count, domid));
646 kfree(gfns);
647 if (IS_ERR(ret))
648 goto fail_end_access;
649
650 pr_debug("Imported DMA buffer with fd %d\n", fd);
651
652 mutex_lock(&priv->lock);
653 list_add(&gntdev_dmabuf->next, &priv->imp_list);
654 mutex_unlock(&priv->lock);
655
656 return gntdev_dmabuf;
657
658fail_end_access:
659 dmabuf_imp_end_foreign_access(gntdev_dmabuf->u.imp.refs, count);
660fail_unmap:
661 dma_buf_unmap_attachment_unlocked(attach, sgt, DMA_BIDIRECTIONAL);
662fail_detach:
663 dma_buf_detach(dma_buf, attach);
664fail_free_obj:
665 dmabuf_imp_free_storage(gntdev_dmabuf);
666fail_put:
667 dma_buf_put(dma_buf);
668 return ret;
669}
670
671/*
672 * Find the hyper dma-buf by its file descriptor and remove
673 * it from the buffer's list.
674 */
675static struct gntdev_dmabuf *
676dmabuf_imp_find_unlink(struct gntdev_dmabuf_priv *priv, int fd)
677{
678 struct gntdev_dmabuf *q, *gntdev_dmabuf, *ret = ERR_PTR(-ENOENT);
679
680 mutex_lock(&priv->lock);
681 list_for_each_entry_safe(gntdev_dmabuf, q, &priv->imp_list, next) {
682 if (gntdev_dmabuf->fd == fd) {
683 pr_debug("Found gntdev_dmabuf in the import list\n");
684 ret = gntdev_dmabuf;
685 list_del(&gntdev_dmabuf->next);
686 break;
687 }
688 }
689 mutex_unlock(&priv->lock);
690 return ret;
691}
692
693static int dmabuf_imp_release(struct gntdev_dmabuf_priv *priv, u32 fd)
694{
695 struct gntdev_dmabuf *gntdev_dmabuf;
696 struct dma_buf_attachment *attach;
697 struct dma_buf *dma_buf;
698
699 gntdev_dmabuf = dmabuf_imp_find_unlink(priv, fd);
700 if (IS_ERR(gntdev_dmabuf))
701 return PTR_ERR(gntdev_dmabuf);
702
703 pr_debug("Releasing DMA buffer with fd %d\n", fd);
704
705 dmabuf_imp_end_foreign_access(gntdev_dmabuf->u.imp.refs,
706 gntdev_dmabuf->nr_pages);
707
708 attach = gntdev_dmabuf->u.imp.attach;
709
710 if (gntdev_dmabuf->u.imp.sgt)
711 dma_buf_unmap_attachment_unlocked(attach, gntdev_dmabuf->u.imp.sgt,
712 DMA_BIDIRECTIONAL);
713 dma_buf = attach->dmabuf;
714 dma_buf_detach(attach->dmabuf, attach);
715 dma_buf_put(dma_buf);
716
717 dmabuf_imp_free_storage(gntdev_dmabuf);
718 return 0;
719}
720
721static void dmabuf_imp_release_all(struct gntdev_dmabuf_priv *priv)
722{
723 struct gntdev_dmabuf *q, *gntdev_dmabuf;
724
725 list_for_each_entry_safe(gntdev_dmabuf, q, &priv->imp_list, next)
726 dmabuf_imp_release(priv, gntdev_dmabuf->fd);
727}
728
729/* DMA buffer IOCTL support. */
730
731long gntdev_ioctl_dmabuf_exp_from_refs(struct gntdev_priv *priv, int use_ptemod,
732 struct ioctl_gntdev_dmabuf_exp_from_refs __user *u)
733{
734 struct ioctl_gntdev_dmabuf_exp_from_refs op;
735 u32 *refs;
736 long ret;
737
738 if (use_ptemod) {
739 pr_debug("Cannot provide dma-buf: use_ptemode %d\n",
740 use_ptemod);
741 return -EINVAL;
742 }
743
744 if (copy_from_user(&op, u, sizeof(op)) != 0)
745 return -EFAULT;
746
747 if (unlikely(gntdev_test_page_count(op.count)))
748 return -EINVAL;
749
750 refs = kcalloc(op.count, sizeof(*refs), GFP_KERNEL);
751 if (!refs)
752 return -ENOMEM;
753
754 if (copy_from_user(refs, u->refs, sizeof(*refs) * op.count) != 0) {
755 ret = -EFAULT;
756 goto out;
757 }
758
759 ret = dmabuf_exp_from_refs(priv, op.flags, op.count,
760 op.domid, refs, &op.fd);
761 if (ret)
762 goto out;
763
764 if (copy_to_user(u, &op, sizeof(op)) != 0)
765 ret = -EFAULT;
766
767out:
768 kfree(refs);
769 return ret;
770}
771
772long gntdev_ioctl_dmabuf_exp_wait_released(struct gntdev_priv *priv,
773 struct ioctl_gntdev_dmabuf_exp_wait_released __user *u)
774{
775 struct ioctl_gntdev_dmabuf_exp_wait_released op;
776
777 if (copy_from_user(&op, u, sizeof(op)) != 0)
778 return -EFAULT;
779
780 return dmabuf_exp_wait_released(priv->dmabuf_priv, op.fd,
781 op.wait_to_ms);
782}
783
784long gntdev_ioctl_dmabuf_imp_to_refs(struct gntdev_priv *priv,
785 struct ioctl_gntdev_dmabuf_imp_to_refs __user *u)
786{
787 struct ioctl_gntdev_dmabuf_imp_to_refs op;
788 struct gntdev_dmabuf *gntdev_dmabuf;
789 long ret;
790
791 if (copy_from_user(&op, u, sizeof(op)) != 0)
792 return -EFAULT;
793
794 if (unlikely(gntdev_test_page_count(op.count)))
795 return -EINVAL;
796
797 gntdev_dmabuf = dmabuf_imp_to_refs(priv->dmabuf_priv,
798 priv->dma_dev, op.fd,
799 op.count, op.domid);
800 if (IS_ERR(gntdev_dmabuf))
801 return PTR_ERR(gntdev_dmabuf);
802
803 if (copy_to_user(u->refs, gntdev_dmabuf->u.imp.refs,
804 sizeof(*u->refs) * op.count) != 0) {
805 ret = -EFAULT;
806 goto out_release;
807 }
808 return 0;
809
810out_release:
811 dmabuf_imp_release(priv->dmabuf_priv, op.fd);
812 return ret;
813}
814
815long gntdev_ioctl_dmabuf_imp_release(struct gntdev_priv *priv,
816 struct ioctl_gntdev_dmabuf_imp_release __user *u)
817{
818 struct ioctl_gntdev_dmabuf_imp_release op;
819
820 if (copy_from_user(&op, u, sizeof(op)) != 0)
821 return -EFAULT;
822
823 return dmabuf_imp_release(priv->dmabuf_priv, op.fd);
824}
825
826struct gntdev_dmabuf_priv *gntdev_dmabuf_init(struct file *filp)
827{
828 struct gntdev_dmabuf_priv *priv;
829
830 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
831 if (!priv)
832 return ERR_PTR(-ENOMEM);
833
834 mutex_init(&priv->lock);
835 INIT_LIST_HEAD(&priv->exp_list);
836 INIT_LIST_HEAD(&priv->exp_wait_list);
837 INIT_LIST_HEAD(&priv->imp_list);
838
839 priv->filp = filp;
840
841 return priv;
842}
843
844void gntdev_dmabuf_fini(struct gntdev_dmabuf_priv *priv)
845{
846 dmabuf_imp_release_all(priv);
847 kfree(priv);
848}
1// SPDX-License-Identifier: GPL-2.0
2
3/*
4 * Xen dma-buf functionality for gntdev.
5 *
6 * DMA buffer implementation is based on drivers/gpu/drm/drm_prime.c.
7 *
8 * Copyright (c) 2018 Oleksandr Andrushchenko, EPAM Systems Inc.
9 */
10
11#include <linux/kernel.h>
12#include <linux/errno.h>
13#include <linux/dma-buf.h>
14#include <linux/slab.h>
15#include <linux/types.h>
16#include <linux/uaccess.h>
17
18#include <xen/xen.h>
19#include <xen/grant_table.h>
20
21#include "gntdev-common.h"
22#include "gntdev-dmabuf.h"
23
24#ifndef GRANT_INVALID_REF
25/*
26 * Note on usage of grant reference 0 as invalid grant reference:
27 * grant reference 0 is valid, but never exposed to a driver,
28 * because of the fact it is already in use/reserved by the PV console.
29 */
30#define GRANT_INVALID_REF 0
31#endif
32
33struct gntdev_dmabuf {
34 struct gntdev_dmabuf_priv *priv;
35 struct dma_buf *dmabuf;
36 struct list_head next;
37 int fd;
38
39 union {
40 struct {
41 /* Exported buffers are reference counted. */
42 struct kref refcount;
43
44 struct gntdev_priv *priv;
45 struct gntdev_grant_map *map;
46 } exp;
47 struct {
48 /* Granted references of the imported buffer. */
49 grant_ref_t *refs;
50 /* Scatter-gather table of the imported buffer. */
51 struct sg_table *sgt;
52 /* dma-buf attachment of the imported buffer. */
53 struct dma_buf_attachment *attach;
54 } imp;
55 } u;
56
57 /* Number of pages this buffer has. */
58 int nr_pages;
59 /* Pages of this buffer. */
60 struct page **pages;
61};
62
63struct gntdev_dmabuf_wait_obj {
64 struct list_head next;
65 struct gntdev_dmabuf *gntdev_dmabuf;
66 struct completion completion;
67};
68
69struct gntdev_dmabuf_attachment {
70 struct sg_table *sgt;
71 enum dma_data_direction dir;
72};
73
74struct gntdev_dmabuf_priv {
75 /* List of exported DMA buffers. */
76 struct list_head exp_list;
77 /* List of wait objects. */
78 struct list_head exp_wait_list;
79 /* List of imported DMA buffers. */
80 struct list_head imp_list;
81 /* This is the lock which protects dma_buf_xxx lists. */
82 struct mutex lock;
83 /*
84 * We reference this file while exporting dma-bufs, so
85 * the grant device context is not destroyed while there are
86 * external users alive.
87 */
88 struct file *filp;
89};
90
91/* DMA buffer export support. */
92
93/* Implementation of wait for exported DMA buffer to be released. */
94
95static void dmabuf_exp_release(struct kref *kref);
96
97static struct gntdev_dmabuf_wait_obj *
98dmabuf_exp_wait_obj_new(struct gntdev_dmabuf_priv *priv,
99 struct gntdev_dmabuf *gntdev_dmabuf)
100{
101 struct gntdev_dmabuf_wait_obj *obj;
102
103 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
104 if (!obj)
105 return ERR_PTR(-ENOMEM);
106
107 init_completion(&obj->completion);
108 obj->gntdev_dmabuf = gntdev_dmabuf;
109
110 mutex_lock(&priv->lock);
111 list_add(&obj->next, &priv->exp_wait_list);
112 /* Put our reference and wait for gntdev_dmabuf's release to fire. */
113 kref_put(&gntdev_dmabuf->u.exp.refcount, dmabuf_exp_release);
114 mutex_unlock(&priv->lock);
115 return obj;
116}
117
118static void dmabuf_exp_wait_obj_free(struct gntdev_dmabuf_priv *priv,
119 struct gntdev_dmabuf_wait_obj *obj)
120{
121 mutex_lock(&priv->lock);
122 list_del(&obj->next);
123 mutex_unlock(&priv->lock);
124 kfree(obj);
125}
126
127static int dmabuf_exp_wait_obj_wait(struct gntdev_dmabuf_wait_obj *obj,
128 u32 wait_to_ms)
129{
130 if (wait_for_completion_timeout(&obj->completion,
131 msecs_to_jiffies(wait_to_ms)) <= 0)
132 return -ETIMEDOUT;
133
134 return 0;
135}
136
137static void dmabuf_exp_wait_obj_signal(struct gntdev_dmabuf_priv *priv,
138 struct gntdev_dmabuf *gntdev_dmabuf)
139{
140 struct gntdev_dmabuf_wait_obj *obj;
141
142 list_for_each_entry(obj, &priv->exp_wait_list, next)
143 if (obj->gntdev_dmabuf == gntdev_dmabuf) {
144 pr_debug("Found gntdev_dmabuf in the wait list, wake\n");
145 complete_all(&obj->completion);
146 break;
147 }
148}
149
150static struct gntdev_dmabuf *
151dmabuf_exp_wait_obj_get_dmabuf(struct gntdev_dmabuf_priv *priv, int fd)
152{
153 struct gntdev_dmabuf *gntdev_dmabuf, *ret = ERR_PTR(-ENOENT);
154
155 mutex_lock(&priv->lock);
156 list_for_each_entry(gntdev_dmabuf, &priv->exp_list, next)
157 if (gntdev_dmabuf->fd == fd) {
158 pr_debug("Found gntdev_dmabuf in the wait list\n");
159 kref_get(&gntdev_dmabuf->u.exp.refcount);
160 ret = gntdev_dmabuf;
161 break;
162 }
163 mutex_unlock(&priv->lock);
164 return ret;
165}
166
167static int dmabuf_exp_wait_released(struct gntdev_dmabuf_priv *priv, int fd,
168 int wait_to_ms)
169{
170 struct gntdev_dmabuf *gntdev_dmabuf;
171 struct gntdev_dmabuf_wait_obj *obj;
172 int ret;
173
174 pr_debug("Will wait for dma-buf with fd %d\n", fd);
175 /*
176 * Try to find the DMA buffer: if not found means that
177 * either the buffer has already been released or file descriptor
178 * provided is wrong.
179 */
180 gntdev_dmabuf = dmabuf_exp_wait_obj_get_dmabuf(priv, fd);
181 if (IS_ERR(gntdev_dmabuf))
182 return PTR_ERR(gntdev_dmabuf);
183
184 /*
185 * gntdev_dmabuf still exists and is reference count locked by us now,
186 * so prepare to wait: allocate wait object and add it to the wait list,
187 * so we can find it on release.
188 */
189 obj = dmabuf_exp_wait_obj_new(priv, gntdev_dmabuf);
190 if (IS_ERR(obj))
191 return PTR_ERR(obj);
192
193 ret = dmabuf_exp_wait_obj_wait(obj, wait_to_ms);
194 dmabuf_exp_wait_obj_free(priv, obj);
195 return ret;
196}
197
198/* DMA buffer export support. */
199
200static struct sg_table *
201dmabuf_pages_to_sgt(struct page **pages, unsigned int nr_pages)
202{
203 struct sg_table *sgt;
204 int ret;
205
206 sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
207 if (!sgt) {
208 ret = -ENOMEM;
209 goto out;
210 }
211
212 ret = sg_alloc_table_from_pages(sgt, pages, nr_pages, 0,
213 nr_pages << PAGE_SHIFT,
214 GFP_KERNEL);
215 if (ret)
216 goto out;
217
218 return sgt;
219
220out:
221 kfree(sgt);
222 return ERR_PTR(ret);
223}
224
225static int dmabuf_exp_ops_attach(struct dma_buf *dma_buf,
226 struct dma_buf_attachment *attach)
227{
228 struct gntdev_dmabuf_attachment *gntdev_dmabuf_attach;
229
230 gntdev_dmabuf_attach = kzalloc(sizeof(*gntdev_dmabuf_attach),
231 GFP_KERNEL);
232 if (!gntdev_dmabuf_attach)
233 return -ENOMEM;
234
235 gntdev_dmabuf_attach->dir = DMA_NONE;
236 attach->priv = gntdev_dmabuf_attach;
237 return 0;
238}
239
240static void dmabuf_exp_ops_detach(struct dma_buf *dma_buf,
241 struct dma_buf_attachment *attach)
242{
243 struct gntdev_dmabuf_attachment *gntdev_dmabuf_attach = attach->priv;
244
245 if (gntdev_dmabuf_attach) {
246 struct sg_table *sgt = gntdev_dmabuf_attach->sgt;
247
248 if (sgt) {
249 if (gntdev_dmabuf_attach->dir != DMA_NONE)
250 dma_unmap_sgtable(attach->dev, sgt,
251 gntdev_dmabuf_attach->dir,
252 DMA_ATTR_SKIP_CPU_SYNC);
253 sg_free_table(sgt);
254 }
255
256 kfree(sgt);
257 kfree(gntdev_dmabuf_attach);
258 attach->priv = NULL;
259 }
260}
261
262static struct sg_table *
263dmabuf_exp_ops_map_dma_buf(struct dma_buf_attachment *attach,
264 enum dma_data_direction dir)
265{
266 struct gntdev_dmabuf_attachment *gntdev_dmabuf_attach = attach->priv;
267 struct gntdev_dmabuf *gntdev_dmabuf = attach->dmabuf->priv;
268 struct sg_table *sgt;
269
270 pr_debug("Mapping %d pages for dev %p\n", gntdev_dmabuf->nr_pages,
271 attach->dev);
272
273 if (dir == DMA_NONE || !gntdev_dmabuf_attach)
274 return ERR_PTR(-EINVAL);
275
276 /* Return the cached mapping when possible. */
277 if (gntdev_dmabuf_attach->dir == dir)
278 return gntdev_dmabuf_attach->sgt;
279
280 /*
281 * Two mappings with different directions for the same attachment are
282 * not allowed.
283 */
284 if (gntdev_dmabuf_attach->dir != DMA_NONE)
285 return ERR_PTR(-EBUSY);
286
287 sgt = dmabuf_pages_to_sgt(gntdev_dmabuf->pages,
288 gntdev_dmabuf->nr_pages);
289 if (!IS_ERR(sgt)) {
290 if (dma_map_sgtable(attach->dev, sgt, dir,
291 DMA_ATTR_SKIP_CPU_SYNC)) {
292 sg_free_table(sgt);
293 kfree(sgt);
294 sgt = ERR_PTR(-ENOMEM);
295 } else {
296 gntdev_dmabuf_attach->sgt = sgt;
297 gntdev_dmabuf_attach->dir = dir;
298 }
299 }
300 if (IS_ERR(sgt))
301 pr_debug("Failed to map sg table for dev %p\n", attach->dev);
302 return sgt;
303}
304
305static void dmabuf_exp_ops_unmap_dma_buf(struct dma_buf_attachment *attach,
306 struct sg_table *sgt,
307 enum dma_data_direction dir)
308{
309 /* Not implemented. The unmap is done at dmabuf_exp_ops_detach(). */
310}
311
312static void dmabuf_exp_release(struct kref *kref)
313{
314 struct gntdev_dmabuf *gntdev_dmabuf =
315 container_of(kref, struct gntdev_dmabuf, u.exp.refcount);
316
317 dmabuf_exp_wait_obj_signal(gntdev_dmabuf->priv, gntdev_dmabuf);
318 list_del(&gntdev_dmabuf->next);
319 fput(gntdev_dmabuf->priv->filp);
320 kfree(gntdev_dmabuf);
321}
322
323static void dmabuf_exp_remove_map(struct gntdev_priv *priv,
324 struct gntdev_grant_map *map)
325{
326 mutex_lock(&priv->lock);
327 list_del(&map->next);
328 gntdev_put_map(NULL /* already removed */, map);
329 mutex_unlock(&priv->lock);
330}
331
332static void dmabuf_exp_ops_release(struct dma_buf *dma_buf)
333{
334 struct gntdev_dmabuf *gntdev_dmabuf = dma_buf->priv;
335 struct gntdev_dmabuf_priv *priv = gntdev_dmabuf->priv;
336
337 dmabuf_exp_remove_map(gntdev_dmabuf->u.exp.priv,
338 gntdev_dmabuf->u.exp.map);
339 mutex_lock(&priv->lock);
340 kref_put(&gntdev_dmabuf->u.exp.refcount, dmabuf_exp_release);
341 mutex_unlock(&priv->lock);
342}
343
344static const struct dma_buf_ops dmabuf_exp_ops = {
345 .attach = dmabuf_exp_ops_attach,
346 .detach = dmabuf_exp_ops_detach,
347 .map_dma_buf = dmabuf_exp_ops_map_dma_buf,
348 .unmap_dma_buf = dmabuf_exp_ops_unmap_dma_buf,
349 .release = dmabuf_exp_ops_release,
350};
351
352struct gntdev_dmabuf_export_args {
353 struct gntdev_priv *priv;
354 struct gntdev_grant_map *map;
355 struct gntdev_dmabuf_priv *dmabuf_priv;
356 struct device *dev;
357 int count;
358 struct page **pages;
359 u32 fd;
360};
361
362static int dmabuf_exp_from_pages(struct gntdev_dmabuf_export_args *args)
363{
364 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
365 struct gntdev_dmabuf *gntdev_dmabuf;
366 int ret;
367
368 gntdev_dmabuf = kzalloc(sizeof(*gntdev_dmabuf), GFP_KERNEL);
369 if (!gntdev_dmabuf)
370 return -ENOMEM;
371
372 kref_init(&gntdev_dmabuf->u.exp.refcount);
373
374 gntdev_dmabuf->priv = args->dmabuf_priv;
375 gntdev_dmabuf->nr_pages = args->count;
376 gntdev_dmabuf->pages = args->pages;
377 gntdev_dmabuf->u.exp.priv = args->priv;
378 gntdev_dmabuf->u.exp.map = args->map;
379
380 exp_info.exp_name = KBUILD_MODNAME;
381 if (args->dev->driver && args->dev->driver->owner)
382 exp_info.owner = args->dev->driver->owner;
383 else
384 exp_info.owner = THIS_MODULE;
385 exp_info.ops = &dmabuf_exp_ops;
386 exp_info.size = args->count << PAGE_SHIFT;
387 exp_info.flags = O_RDWR;
388 exp_info.priv = gntdev_dmabuf;
389
390 gntdev_dmabuf->dmabuf = dma_buf_export(&exp_info);
391 if (IS_ERR(gntdev_dmabuf->dmabuf)) {
392 ret = PTR_ERR(gntdev_dmabuf->dmabuf);
393 gntdev_dmabuf->dmabuf = NULL;
394 goto fail;
395 }
396
397 ret = dma_buf_fd(gntdev_dmabuf->dmabuf, O_CLOEXEC);
398 if (ret < 0)
399 goto fail;
400
401 gntdev_dmabuf->fd = ret;
402 args->fd = ret;
403
404 pr_debug("Exporting DMA buffer with fd %d\n", ret);
405
406 mutex_lock(&args->dmabuf_priv->lock);
407 list_add(&gntdev_dmabuf->next, &args->dmabuf_priv->exp_list);
408 mutex_unlock(&args->dmabuf_priv->lock);
409 get_file(gntdev_dmabuf->priv->filp);
410 return 0;
411
412fail:
413 if (gntdev_dmabuf->dmabuf)
414 dma_buf_put(gntdev_dmabuf->dmabuf);
415 kfree(gntdev_dmabuf);
416 return ret;
417}
418
419static struct gntdev_grant_map *
420dmabuf_exp_alloc_backing_storage(struct gntdev_priv *priv, int dmabuf_flags,
421 int count)
422{
423 struct gntdev_grant_map *map;
424
425 if (unlikely(gntdev_test_page_count(count)))
426 return ERR_PTR(-EINVAL);
427
428 if ((dmabuf_flags & GNTDEV_DMA_FLAG_WC) &&
429 (dmabuf_flags & GNTDEV_DMA_FLAG_COHERENT)) {
430 pr_debug("Wrong dma-buf flags: 0x%x\n", dmabuf_flags);
431 return ERR_PTR(-EINVAL);
432 }
433
434 map = gntdev_alloc_map(priv, count, dmabuf_flags);
435 if (!map)
436 return ERR_PTR(-ENOMEM);
437
438 return map;
439}
440
441static int dmabuf_exp_from_refs(struct gntdev_priv *priv, int flags,
442 int count, u32 domid, u32 *refs, u32 *fd)
443{
444 struct gntdev_grant_map *map;
445 struct gntdev_dmabuf_export_args args;
446 int i, ret;
447
448 map = dmabuf_exp_alloc_backing_storage(priv, flags, count);
449 if (IS_ERR(map))
450 return PTR_ERR(map);
451
452 for (i = 0; i < count; i++) {
453 map->grants[i].domid = domid;
454 map->grants[i].ref = refs[i];
455 }
456
457 mutex_lock(&priv->lock);
458 gntdev_add_map(priv, map);
459 mutex_unlock(&priv->lock);
460
461 map->flags |= GNTMAP_host_map;
462#if defined(CONFIG_X86)
463 map->flags |= GNTMAP_device_map;
464#endif
465
466 ret = gntdev_map_grant_pages(map);
467 if (ret < 0)
468 goto out;
469
470 args.priv = priv;
471 args.map = map;
472 args.dev = priv->dma_dev;
473 args.dmabuf_priv = priv->dmabuf_priv;
474 args.count = map->count;
475 args.pages = map->pages;
476 args.fd = -1; /* Shut up unnecessary gcc warning for i386 */
477
478 ret = dmabuf_exp_from_pages(&args);
479 if (ret < 0)
480 goto out;
481
482 *fd = args.fd;
483 return 0;
484
485out:
486 dmabuf_exp_remove_map(priv, map);
487 return ret;
488}
489
490/* DMA buffer import support. */
491
492static int
493dmabuf_imp_grant_foreign_access(struct page **pages, u32 *refs,
494 int count, int domid)
495{
496 grant_ref_t priv_gref_head;
497 int i, ret;
498
499 ret = gnttab_alloc_grant_references(count, &priv_gref_head);
500 if (ret < 0) {
501 pr_debug("Cannot allocate grant references, ret %d\n", ret);
502 return ret;
503 }
504
505 for (i = 0; i < count; i++) {
506 int cur_ref;
507
508 cur_ref = gnttab_claim_grant_reference(&priv_gref_head);
509 if (cur_ref < 0) {
510 ret = cur_ref;
511 pr_debug("Cannot claim grant reference, ret %d\n", ret);
512 goto out;
513 }
514
515 gnttab_grant_foreign_access_ref(cur_ref, domid,
516 xen_page_to_gfn(pages[i]), 0);
517 refs[i] = cur_ref;
518 }
519
520 return 0;
521
522out:
523 gnttab_free_grant_references(priv_gref_head);
524 return ret;
525}
526
527static void dmabuf_imp_end_foreign_access(u32 *refs, int count)
528{
529 int i;
530
531 for (i = 0; i < count; i++)
532 if (refs[i] != GRANT_INVALID_REF)
533 gnttab_end_foreign_access(refs[i], 0, 0UL);
534}
535
536static void dmabuf_imp_free_storage(struct gntdev_dmabuf *gntdev_dmabuf)
537{
538 kfree(gntdev_dmabuf->pages);
539 kfree(gntdev_dmabuf->u.imp.refs);
540 kfree(gntdev_dmabuf);
541}
542
543static struct gntdev_dmabuf *dmabuf_imp_alloc_storage(int count)
544{
545 struct gntdev_dmabuf *gntdev_dmabuf;
546 int i;
547
548 gntdev_dmabuf = kzalloc(sizeof(*gntdev_dmabuf), GFP_KERNEL);
549 if (!gntdev_dmabuf)
550 goto fail_no_free;
551
552 gntdev_dmabuf->u.imp.refs = kcalloc(count,
553 sizeof(gntdev_dmabuf->u.imp.refs[0]),
554 GFP_KERNEL);
555 if (!gntdev_dmabuf->u.imp.refs)
556 goto fail;
557
558 gntdev_dmabuf->pages = kcalloc(count,
559 sizeof(gntdev_dmabuf->pages[0]),
560 GFP_KERNEL);
561 if (!gntdev_dmabuf->pages)
562 goto fail;
563
564 gntdev_dmabuf->nr_pages = count;
565
566 for (i = 0; i < count; i++)
567 gntdev_dmabuf->u.imp.refs[i] = GRANT_INVALID_REF;
568
569 return gntdev_dmabuf;
570
571fail:
572 dmabuf_imp_free_storage(gntdev_dmabuf);
573fail_no_free:
574 return ERR_PTR(-ENOMEM);
575}
576
577static struct gntdev_dmabuf *
578dmabuf_imp_to_refs(struct gntdev_dmabuf_priv *priv, struct device *dev,
579 int fd, int count, int domid)
580{
581 struct gntdev_dmabuf *gntdev_dmabuf, *ret;
582 struct dma_buf *dma_buf;
583 struct dma_buf_attachment *attach;
584 struct sg_table *sgt;
585 struct sg_page_iter sg_iter;
586 int i;
587
588 dma_buf = dma_buf_get(fd);
589 if (IS_ERR(dma_buf))
590 return ERR_CAST(dma_buf);
591
592 gntdev_dmabuf = dmabuf_imp_alloc_storage(count);
593 if (IS_ERR(gntdev_dmabuf)) {
594 ret = gntdev_dmabuf;
595 goto fail_put;
596 }
597
598 gntdev_dmabuf->priv = priv;
599 gntdev_dmabuf->fd = fd;
600
601 attach = dma_buf_attach(dma_buf, dev);
602 if (IS_ERR(attach)) {
603 ret = ERR_CAST(attach);
604 goto fail_free_obj;
605 }
606
607 gntdev_dmabuf->u.imp.attach = attach;
608
609 sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
610 if (IS_ERR(sgt)) {
611 ret = ERR_CAST(sgt);
612 goto fail_detach;
613 }
614
615 /* Check that we have zero offset. */
616 if (sgt->sgl->offset) {
617 ret = ERR_PTR(-EINVAL);
618 pr_debug("DMA buffer has %d bytes offset, user-space expects 0\n",
619 sgt->sgl->offset);
620 goto fail_unmap;
621 }
622
623 /* Check number of pages that imported buffer has. */
624 if (attach->dmabuf->size != gntdev_dmabuf->nr_pages << PAGE_SHIFT) {
625 ret = ERR_PTR(-EINVAL);
626 pr_debug("DMA buffer has %zu pages, user-space expects %d\n",
627 attach->dmabuf->size, gntdev_dmabuf->nr_pages);
628 goto fail_unmap;
629 }
630
631 gntdev_dmabuf->u.imp.sgt = sgt;
632
633 /* Now convert sgt to array of pages and check for page validity. */
634 i = 0;
635 for_each_sgtable_page(sgt, &sg_iter, 0) {
636 struct page *page = sg_page_iter_page(&sg_iter);
637 /*
638 * Check if page is valid: this can happen if we are given
639 * a page from VRAM or other resources which are not backed
640 * by a struct page.
641 */
642 if (!pfn_valid(page_to_pfn(page))) {
643 ret = ERR_PTR(-EINVAL);
644 goto fail_unmap;
645 }
646
647 gntdev_dmabuf->pages[i++] = page;
648 }
649
650 ret = ERR_PTR(dmabuf_imp_grant_foreign_access(gntdev_dmabuf->pages,
651 gntdev_dmabuf->u.imp.refs,
652 count, domid));
653 if (IS_ERR(ret))
654 goto fail_end_access;
655
656 pr_debug("Imported DMA buffer with fd %d\n", fd);
657
658 mutex_lock(&priv->lock);
659 list_add(&gntdev_dmabuf->next, &priv->imp_list);
660 mutex_unlock(&priv->lock);
661
662 return gntdev_dmabuf;
663
664fail_end_access:
665 dmabuf_imp_end_foreign_access(gntdev_dmabuf->u.imp.refs, count);
666fail_unmap:
667 dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
668fail_detach:
669 dma_buf_detach(dma_buf, attach);
670fail_free_obj:
671 dmabuf_imp_free_storage(gntdev_dmabuf);
672fail_put:
673 dma_buf_put(dma_buf);
674 return ret;
675}
676
677/*
678 * Find the hyper dma-buf by its file descriptor and remove
679 * it from the buffer's list.
680 */
681static struct gntdev_dmabuf *
682dmabuf_imp_find_unlink(struct gntdev_dmabuf_priv *priv, int fd)
683{
684 struct gntdev_dmabuf *q, *gntdev_dmabuf, *ret = ERR_PTR(-ENOENT);
685
686 mutex_lock(&priv->lock);
687 list_for_each_entry_safe(gntdev_dmabuf, q, &priv->imp_list, next) {
688 if (gntdev_dmabuf->fd == fd) {
689 pr_debug("Found gntdev_dmabuf in the import list\n");
690 ret = gntdev_dmabuf;
691 list_del(&gntdev_dmabuf->next);
692 break;
693 }
694 }
695 mutex_unlock(&priv->lock);
696 return ret;
697}
698
699static int dmabuf_imp_release(struct gntdev_dmabuf_priv *priv, u32 fd)
700{
701 struct gntdev_dmabuf *gntdev_dmabuf;
702 struct dma_buf_attachment *attach;
703 struct dma_buf *dma_buf;
704
705 gntdev_dmabuf = dmabuf_imp_find_unlink(priv, fd);
706 if (IS_ERR(gntdev_dmabuf))
707 return PTR_ERR(gntdev_dmabuf);
708
709 pr_debug("Releasing DMA buffer with fd %d\n", fd);
710
711 dmabuf_imp_end_foreign_access(gntdev_dmabuf->u.imp.refs,
712 gntdev_dmabuf->nr_pages);
713
714 attach = gntdev_dmabuf->u.imp.attach;
715
716 if (gntdev_dmabuf->u.imp.sgt)
717 dma_buf_unmap_attachment(attach, gntdev_dmabuf->u.imp.sgt,
718 DMA_BIDIRECTIONAL);
719 dma_buf = attach->dmabuf;
720 dma_buf_detach(attach->dmabuf, attach);
721 dma_buf_put(dma_buf);
722
723 dmabuf_imp_free_storage(gntdev_dmabuf);
724 return 0;
725}
726
727static void dmabuf_imp_release_all(struct gntdev_dmabuf_priv *priv)
728{
729 struct gntdev_dmabuf *q, *gntdev_dmabuf;
730
731 list_for_each_entry_safe(gntdev_dmabuf, q, &priv->imp_list, next)
732 dmabuf_imp_release(priv, gntdev_dmabuf->fd);
733}
734
735/* DMA buffer IOCTL support. */
736
737long gntdev_ioctl_dmabuf_exp_from_refs(struct gntdev_priv *priv, int use_ptemod,
738 struct ioctl_gntdev_dmabuf_exp_from_refs __user *u)
739{
740 struct ioctl_gntdev_dmabuf_exp_from_refs op;
741 u32 *refs;
742 long ret;
743
744 if (use_ptemod) {
745 pr_debug("Cannot provide dma-buf: use_ptemode %d\n",
746 use_ptemod);
747 return -EINVAL;
748 }
749
750 if (copy_from_user(&op, u, sizeof(op)) != 0)
751 return -EFAULT;
752
753 if (unlikely(gntdev_test_page_count(op.count)))
754 return -EINVAL;
755
756 refs = kcalloc(op.count, sizeof(*refs), GFP_KERNEL);
757 if (!refs)
758 return -ENOMEM;
759
760 if (copy_from_user(refs, u->refs, sizeof(*refs) * op.count) != 0) {
761 ret = -EFAULT;
762 goto out;
763 }
764
765 ret = dmabuf_exp_from_refs(priv, op.flags, op.count,
766 op.domid, refs, &op.fd);
767 if (ret)
768 goto out;
769
770 if (copy_to_user(u, &op, sizeof(op)) != 0)
771 ret = -EFAULT;
772
773out:
774 kfree(refs);
775 return ret;
776}
777
778long gntdev_ioctl_dmabuf_exp_wait_released(struct gntdev_priv *priv,
779 struct ioctl_gntdev_dmabuf_exp_wait_released __user *u)
780{
781 struct ioctl_gntdev_dmabuf_exp_wait_released op;
782
783 if (copy_from_user(&op, u, sizeof(op)) != 0)
784 return -EFAULT;
785
786 return dmabuf_exp_wait_released(priv->dmabuf_priv, op.fd,
787 op.wait_to_ms);
788}
789
790long gntdev_ioctl_dmabuf_imp_to_refs(struct gntdev_priv *priv,
791 struct ioctl_gntdev_dmabuf_imp_to_refs __user *u)
792{
793 struct ioctl_gntdev_dmabuf_imp_to_refs op;
794 struct gntdev_dmabuf *gntdev_dmabuf;
795 long ret;
796
797 if (copy_from_user(&op, u, sizeof(op)) != 0)
798 return -EFAULT;
799
800 if (unlikely(gntdev_test_page_count(op.count)))
801 return -EINVAL;
802
803 gntdev_dmabuf = dmabuf_imp_to_refs(priv->dmabuf_priv,
804 priv->dma_dev, op.fd,
805 op.count, op.domid);
806 if (IS_ERR(gntdev_dmabuf))
807 return PTR_ERR(gntdev_dmabuf);
808
809 if (copy_to_user(u->refs, gntdev_dmabuf->u.imp.refs,
810 sizeof(*u->refs) * op.count) != 0) {
811 ret = -EFAULT;
812 goto out_release;
813 }
814 return 0;
815
816out_release:
817 dmabuf_imp_release(priv->dmabuf_priv, op.fd);
818 return ret;
819}
820
821long gntdev_ioctl_dmabuf_imp_release(struct gntdev_priv *priv,
822 struct ioctl_gntdev_dmabuf_imp_release __user *u)
823{
824 struct ioctl_gntdev_dmabuf_imp_release op;
825
826 if (copy_from_user(&op, u, sizeof(op)) != 0)
827 return -EFAULT;
828
829 return dmabuf_imp_release(priv->dmabuf_priv, op.fd);
830}
831
832struct gntdev_dmabuf_priv *gntdev_dmabuf_init(struct file *filp)
833{
834 struct gntdev_dmabuf_priv *priv;
835
836 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
837 if (!priv)
838 return ERR_PTR(-ENOMEM);
839
840 mutex_init(&priv->lock);
841 INIT_LIST_HEAD(&priv->exp_list);
842 INIT_LIST_HEAD(&priv->exp_wait_list);
843 INIT_LIST_HEAD(&priv->imp_list);
844
845 priv->filp = filp;
846
847 return priv;
848}
849
850void gntdev_dmabuf_fini(struct gntdev_dmabuf_priv *priv)
851{
852 dmabuf_imp_release_all(priv);
853 kfree(priv);
854}