Linux Audio

Check our new training course

Loading...
v5.9
  1// SPDX-License-Identifier: GPL-2.0
  2
  3/*
  4 * Xen dma-buf functionality for gntdev.
  5 *
  6 * DMA buffer implementation is based on drivers/gpu/drm/drm_prime.c.
  7 *
  8 * Copyright (c) 2018 Oleksandr Andrushchenko, EPAM Systems Inc.
  9 */
 10
 11#include <linux/kernel.h>
 12#include <linux/errno.h>
 13#include <linux/dma-buf.h>
 14#include <linux/slab.h>
 15#include <linux/types.h>
 16#include <linux/uaccess.h>
 
 17
 18#include <xen/xen.h>
 19#include <xen/grant_table.h>
 20
 21#include "gntdev-common.h"
 22#include "gntdev-dmabuf.h"
 23
 24#ifndef GRANT_INVALID_REF
 25/*
 26 * Note on usage of grant reference 0 as invalid grant reference:
 27 * grant reference 0 is valid, but never exposed to a driver,
 28 * because of the fact it is already in use/reserved by the PV console.
 29 */
 30#define GRANT_INVALID_REF	0
 31#endif
 32
 33struct gntdev_dmabuf {
 34	struct gntdev_dmabuf_priv *priv;
 35	struct dma_buf *dmabuf;
 36	struct list_head next;
 37	int fd;
 38
 39	union {
 40		struct {
 41			/* Exported buffers are reference counted. */
 42			struct kref refcount;
 43
 44			struct gntdev_priv *priv;
 45			struct gntdev_grant_map *map;
 46		} exp;
 47		struct {
 48			/* Granted references of the imported buffer. */
 49			grant_ref_t *refs;
 50			/* Scatter-gather table of the imported buffer. */
 51			struct sg_table *sgt;
 52			/* dma-buf attachment of the imported buffer. */
 53			struct dma_buf_attachment *attach;
 54		} imp;
 55	} u;
 56
 57	/* Number of pages this buffer has. */
 58	int nr_pages;
 59	/* Pages of this buffer. */
 60	struct page **pages;
 61};
 62
 63struct gntdev_dmabuf_wait_obj {
 64	struct list_head next;
 65	struct gntdev_dmabuf *gntdev_dmabuf;
 66	struct completion completion;
 67};
 68
 69struct gntdev_dmabuf_attachment {
 70	struct sg_table *sgt;
 71	enum dma_data_direction dir;
 72};
 73
 74struct gntdev_dmabuf_priv {
 75	/* List of exported DMA buffers. */
 76	struct list_head exp_list;
 77	/* List of wait objects. */
 78	struct list_head exp_wait_list;
 79	/* List of imported DMA buffers. */
 80	struct list_head imp_list;
 81	/* This is the lock which protects dma_buf_xxx lists. */
 82	struct mutex lock;
 83	/*
 84	 * We reference this file while exporting dma-bufs, so
 85	 * the grant device context is not destroyed while there are
 86	 * external users alive.
 87	 */
 88	struct file *filp;
 89};
 90
 91/* DMA buffer export support. */
 92
 93/* Implementation of wait for exported DMA buffer to be released. */
 94
 95static void dmabuf_exp_release(struct kref *kref);
 96
 97static struct gntdev_dmabuf_wait_obj *
 98dmabuf_exp_wait_obj_new(struct gntdev_dmabuf_priv *priv,
 99			struct gntdev_dmabuf *gntdev_dmabuf)
100{
101	struct gntdev_dmabuf_wait_obj *obj;
102
103	obj = kzalloc(sizeof(*obj), GFP_KERNEL);
104	if (!obj)
105		return ERR_PTR(-ENOMEM);
106
107	init_completion(&obj->completion);
108	obj->gntdev_dmabuf = gntdev_dmabuf;
109
110	mutex_lock(&priv->lock);
111	list_add(&obj->next, &priv->exp_wait_list);
112	/* Put our reference and wait for gntdev_dmabuf's release to fire. */
113	kref_put(&gntdev_dmabuf->u.exp.refcount, dmabuf_exp_release);
114	mutex_unlock(&priv->lock);
115	return obj;
116}
117
118static void dmabuf_exp_wait_obj_free(struct gntdev_dmabuf_priv *priv,
119				     struct gntdev_dmabuf_wait_obj *obj)
120{
121	mutex_lock(&priv->lock);
122	list_del(&obj->next);
123	mutex_unlock(&priv->lock);
124	kfree(obj);
125}
126
127static int dmabuf_exp_wait_obj_wait(struct gntdev_dmabuf_wait_obj *obj,
128				    u32 wait_to_ms)
129{
130	if (wait_for_completion_timeout(&obj->completion,
131			msecs_to_jiffies(wait_to_ms)) <= 0)
132		return -ETIMEDOUT;
133
134	return 0;
135}
136
137static void dmabuf_exp_wait_obj_signal(struct gntdev_dmabuf_priv *priv,
138				       struct gntdev_dmabuf *gntdev_dmabuf)
139{
140	struct gntdev_dmabuf_wait_obj *obj;
141
142	list_for_each_entry(obj, &priv->exp_wait_list, next)
143		if (obj->gntdev_dmabuf == gntdev_dmabuf) {
144			pr_debug("Found gntdev_dmabuf in the wait list, wake\n");
145			complete_all(&obj->completion);
146			break;
147		}
148}
149
150static struct gntdev_dmabuf *
151dmabuf_exp_wait_obj_get_dmabuf(struct gntdev_dmabuf_priv *priv, int fd)
152{
153	struct gntdev_dmabuf *gntdev_dmabuf, *ret = ERR_PTR(-ENOENT);
154
155	mutex_lock(&priv->lock);
156	list_for_each_entry(gntdev_dmabuf, &priv->exp_list, next)
157		if (gntdev_dmabuf->fd == fd) {
158			pr_debug("Found gntdev_dmabuf in the wait list\n");
159			kref_get(&gntdev_dmabuf->u.exp.refcount);
160			ret = gntdev_dmabuf;
161			break;
162		}
163	mutex_unlock(&priv->lock);
164	return ret;
165}
166
167static int dmabuf_exp_wait_released(struct gntdev_dmabuf_priv *priv, int fd,
168				    int wait_to_ms)
169{
170	struct gntdev_dmabuf *gntdev_dmabuf;
171	struct gntdev_dmabuf_wait_obj *obj;
172	int ret;
173
174	pr_debug("Will wait for dma-buf with fd %d\n", fd);
175	/*
176	 * Try to find the DMA buffer: if not found means that
177	 * either the buffer has already been released or file descriptor
178	 * provided is wrong.
179	 */
180	gntdev_dmabuf = dmabuf_exp_wait_obj_get_dmabuf(priv, fd);
181	if (IS_ERR(gntdev_dmabuf))
182		return PTR_ERR(gntdev_dmabuf);
183
184	/*
185	 * gntdev_dmabuf still exists and is reference count locked by us now,
186	 * so prepare to wait: allocate wait object and add it to the wait list,
187	 * so we can find it on release.
188	 */
189	obj = dmabuf_exp_wait_obj_new(priv, gntdev_dmabuf);
190	if (IS_ERR(obj))
191		return PTR_ERR(obj);
192
193	ret = dmabuf_exp_wait_obj_wait(obj, wait_to_ms);
194	dmabuf_exp_wait_obj_free(priv, obj);
195	return ret;
196}
197
198/* DMA buffer export support. */
199
200static struct sg_table *
201dmabuf_pages_to_sgt(struct page **pages, unsigned int nr_pages)
202{
203	struct sg_table *sgt;
204	int ret;
205
206	sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
207	if (!sgt) {
208		ret = -ENOMEM;
209		goto out;
210	}
211
212	ret = sg_alloc_table_from_pages(sgt, pages, nr_pages, 0,
213					nr_pages << PAGE_SHIFT,
214					GFP_KERNEL);
215	if (ret)
216		goto out;
217
218	return sgt;
219
220out:
221	kfree(sgt);
222	return ERR_PTR(ret);
223}
224
225static int dmabuf_exp_ops_attach(struct dma_buf *dma_buf,
226				 struct dma_buf_attachment *attach)
227{
228	struct gntdev_dmabuf_attachment *gntdev_dmabuf_attach;
229
230	gntdev_dmabuf_attach = kzalloc(sizeof(*gntdev_dmabuf_attach),
231				       GFP_KERNEL);
232	if (!gntdev_dmabuf_attach)
233		return -ENOMEM;
234
235	gntdev_dmabuf_attach->dir = DMA_NONE;
236	attach->priv = gntdev_dmabuf_attach;
237	return 0;
238}
239
240static void dmabuf_exp_ops_detach(struct dma_buf *dma_buf,
241				  struct dma_buf_attachment *attach)
242{
243	struct gntdev_dmabuf_attachment *gntdev_dmabuf_attach = attach->priv;
244
245	if (gntdev_dmabuf_attach) {
246		struct sg_table *sgt = gntdev_dmabuf_attach->sgt;
247
248		if (sgt) {
249			if (gntdev_dmabuf_attach->dir != DMA_NONE)
250				dma_unmap_sg_attrs(attach->dev, sgt->sgl,
251						   sgt->nents,
252						   gntdev_dmabuf_attach->dir,
253						   DMA_ATTR_SKIP_CPU_SYNC);
254			sg_free_table(sgt);
255		}
256
257		kfree(sgt);
258		kfree(gntdev_dmabuf_attach);
259		attach->priv = NULL;
260	}
261}
262
263static struct sg_table *
264dmabuf_exp_ops_map_dma_buf(struct dma_buf_attachment *attach,
265			   enum dma_data_direction dir)
266{
267	struct gntdev_dmabuf_attachment *gntdev_dmabuf_attach = attach->priv;
268	struct gntdev_dmabuf *gntdev_dmabuf = attach->dmabuf->priv;
269	struct sg_table *sgt;
270
271	pr_debug("Mapping %d pages for dev %p\n", gntdev_dmabuf->nr_pages,
272		 attach->dev);
273
274	if (dir == DMA_NONE || !gntdev_dmabuf_attach)
275		return ERR_PTR(-EINVAL);
276
277	/* Return the cached mapping when possible. */
278	if (gntdev_dmabuf_attach->dir == dir)
279		return gntdev_dmabuf_attach->sgt;
280
281	/*
282	 * Two mappings with different directions for the same attachment are
283	 * not allowed.
284	 */
285	if (gntdev_dmabuf_attach->dir != DMA_NONE)
286		return ERR_PTR(-EBUSY);
287
288	sgt = dmabuf_pages_to_sgt(gntdev_dmabuf->pages,
289				  gntdev_dmabuf->nr_pages);
290	if (!IS_ERR(sgt)) {
291		if (!dma_map_sg_attrs(attach->dev, sgt->sgl, sgt->nents, dir,
292				      DMA_ATTR_SKIP_CPU_SYNC)) {
293			sg_free_table(sgt);
294			kfree(sgt);
295			sgt = ERR_PTR(-ENOMEM);
296		} else {
297			gntdev_dmabuf_attach->sgt = sgt;
298			gntdev_dmabuf_attach->dir = dir;
299		}
300	}
301	if (IS_ERR(sgt))
302		pr_debug("Failed to map sg table for dev %p\n", attach->dev);
303	return sgt;
304}
305
306static void dmabuf_exp_ops_unmap_dma_buf(struct dma_buf_attachment *attach,
307					 struct sg_table *sgt,
308					 enum dma_data_direction dir)
309{
310	/* Not implemented. The unmap is done at dmabuf_exp_ops_detach(). */
311}
312
313static void dmabuf_exp_release(struct kref *kref)
314{
315	struct gntdev_dmabuf *gntdev_dmabuf =
316		container_of(kref, struct gntdev_dmabuf, u.exp.refcount);
317
318	dmabuf_exp_wait_obj_signal(gntdev_dmabuf->priv, gntdev_dmabuf);
319	list_del(&gntdev_dmabuf->next);
320	fput(gntdev_dmabuf->priv->filp);
321	kfree(gntdev_dmabuf);
322}
323
324static void dmabuf_exp_remove_map(struct gntdev_priv *priv,
325				  struct gntdev_grant_map *map)
326{
327	mutex_lock(&priv->lock);
328	list_del(&map->next);
329	gntdev_put_map(NULL /* already removed */, map);
330	mutex_unlock(&priv->lock);
331}
332
333static void dmabuf_exp_ops_release(struct dma_buf *dma_buf)
334{
335	struct gntdev_dmabuf *gntdev_dmabuf = dma_buf->priv;
336	struct gntdev_dmabuf_priv *priv = gntdev_dmabuf->priv;
337
338	dmabuf_exp_remove_map(gntdev_dmabuf->u.exp.priv,
339			      gntdev_dmabuf->u.exp.map);
340	mutex_lock(&priv->lock);
341	kref_put(&gntdev_dmabuf->u.exp.refcount, dmabuf_exp_release);
342	mutex_unlock(&priv->lock);
343}
344
345static const struct dma_buf_ops dmabuf_exp_ops =  {
346	.attach = dmabuf_exp_ops_attach,
347	.detach = dmabuf_exp_ops_detach,
348	.map_dma_buf = dmabuf_exp_ops_map_dma_buf,
349	.unmap_dma_buf = dmabuf_exp_ops_unmap_dma_buf,
350	.release = dmabuf_exp_ops_release,
351};
352
353struct gntdev_dmabuf_export_args {
354	struct gntdev_priv *priv;
355	struct gntdev_grant_map *map;
356	struct gntdev_dmabuf_priv *dmabuf_priv;
357	struct device *dev;
358	int count;
359	struct page **pages;
360	u32 fd;
361};
362
363static int dmabuf_exp_from_pages(struct gntdev_dmabuf_export_args *args)
364{
365	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
366	struct gntdev_dmabuf *gntdev_dmabuf;
367	int ret;
368
369	gntdev_dmabuf = kzalloc(sizeof(*gntdev_dmabuf), GFP_KERNEL);
370	if (!gntdev_dmabuf)
371		return -ENOMEM;
372
373	kref_init(&gntdev_dmabuf->u.exp.refcount);
374
375	gntdev_dmabuf->priv = args->dmabuf_priv;
376	gntdev_dmabuf->nr_pages = args->count;
377	gntdev_dmabuf->pages = args->pages;
378	gntdev_dmabuf->u.exp.priv = args->priv;
379	gntdev_dmabuf->u.exp.map = args->map;
380
381	exp_info.exp_name = KBUILD_MODNAME;
382	if (args->dev->driver && args->dev->driver->owner)
383		exp_info.owner = args->dev->driver->owner;
384	else
385		exp_info.owner = THIS_MODULE;
386	exp_info.ops = &dmabuf_exp_ops;
387	exp_info.size = args->count << PAGE_SHIFT;
388	exp_info.flags = O_RDWR;
389	exp_info.priv = gntdev_dmabuf;
390
391	gntdev_dmabuf->dmabuf = dma_buf_export(&exp_info);
392	if (IS_ERR(gntdev_dmabuf->dmabuf)) {
393		ret = PTR_ERR(gntdev_dmabuf->dmabuf);
394		gntdev_dmabuf->dmabuf = NULL;
395		goto fail;
396	}
397
398	ret = dma_buf_fd(gntdev_dmabuf->dmabuf, O_CLOEXEC);
399	if (ret < 0)
400		goto fail;
401
402	gntdev_dmabuf->fd = ret;
403	args->fd = ret;
404
405	pr_debug("Exporting DMA buffer with fd %d\n", ret);
406
407	mutex_lock(&args->dmabuf_priv->lock);
408	list_add(&gntdev_dmabuf->next, &args->dmabuf_priv->exp_list);
409	mutex_unlock(&args->dmabuf_priv->lock);
410	get_file(gntdev_dmabuf->priv->filp);
411	return 0;
412
413fail:
414	if (gntdev_dmabuf->dmabuf)
415		dma_buf_put(gntdev_dmabuf->dmabuf);
416	kfree(gntdev_dmabuf);
417	return ret;
418}
419
420static struct gntdev_grant_map *
421dmabuf_exp_alloc_backing_storage(struct gntdev_priv *priv, int dmabuf_flags,
422				 int count)
423{
424	struct gntdev_grant_map *map;
425
426	if (unlikely(gntdev_test_page_count(count)))
427		return ERR_PTR(-EINVAL);
428
429	if ((dmabuf_flags & GNTDEV_DMA_FLAG_WC) &&
430	    (dmabuf_flags & GNTDEV_DMA_FLAG_COHERENT)) {
431		pr_debug("Wrong dma-buf flags: 0x%x\n", dmabuf_flags);
432		return ERR_PTR(-EINVAL);
433	}
434
435	map = gntdev_alloc_map(priv, count, dmabuf_flags);
436	if (!map)
437		return ERR_PTR(-ENOMEM);
438
439	return map;
440}
441
442static int dmabuf_exp_from_refs(struct gntdev_priv *priv, int flags,
443				int count, u32 domid, u32 *refs, u32 *fd)
444{
445	struct gntdev_grant_map *map;
446	struct gntdev_dmabuf_export_args args;
447	int i, ret;
448
449	map = dmabuf_exp_alloc_backing_storage(priv, flags, count);
450	if (IS_ERR(map))
451		return PTR_ERR(map);
452
453	for (i = 0; i < count; i++) {
454		map->grants[i].domid = domid;
455		map->grants[i].ref = refs[i];
456	}
457
458	mutex_lock(&priv->lock);
459	gntdev_add_map(priv, map);
460	mutex_unlock(&priv->lock);
461
462	map->flags |= GNTMAP_host_map;
463#if defined(CONFIG_X86)
464	map->flags |= GNTMAP_device_map;
465#endif
466
467	ret = gntdev_map_grant_pages(map);
468	if (ret < 0)
469		goto out;
470
471	args.priv = priv;
472	args.map = map;
473	args.dev = priv->dma_dev;
474	args.dmabuf_priv = priv->dmabuf_priv;
475	args.count = map->count;
476	args.pages = map->pages;
477	args.fd = -1; /* Shut up unnecessary gcc warning for i386 */
478
479	ret = dmabuf_exp_from_pages(&args);
480	if (ret < 0)
481		goto out;
482
483	*fd = args.fd;
484	return 0;
485
486out:
487	dmabuf_exp_remove_map(priv, map);
488	return ret;
489}
490
491/* DMA buffer import support. */
492
493static int
494dmabuf_imp_grant_foreign_access(struct page **pages, u32 *refs,
495				int count, int domid)
496{
497	grant_ref_t priv_gref_head;
498	int i, ret;
499
500	ret = gnttab_alloc_grant_references(count, &priv_gref_head);
501	if (ret < 0) {
502		pr_debug("Cannot allocate grant references, ret %d\n", ret);
503		return ret;
504	}
505
506	for (i = 0; i < count; i++) {
507		int cur_ref;
508
509		cur_ref = gnttab_claim_grant_reference(&priv_gref_head);
510		if (cur_ref < 0) {
511			ret = cur_ref;
512			pr_debug("Cannot claim grant reference, ret %d\n", ret);
513			goto out;
514		}
515
516		gnttab_grant_foreign_access_ref(cur_ref, domid,
517						xen_page_to_gfn(pages[i]), 0);
518		refs[i] = cur_ref;
519	}
520
521	return 0;
522
523out:
524	gnttab_free_grant_references(priv_gref_head);
525	return ret;
526}
527
528static void dmabuf_imp_end_foreign_access(u32 *refs, int count)
529{
530	int i;
531
532	for (i = 0; i < count; i++)
533		if (refs[i] != GRANT_INVALID_REF)
534			gnttab_end_foreign_access(refs[i], 0, 0UL);
535}
536
537static void dmabuf_imp_free_storage(struct gntdev_dmabuf *gntdev_dmabuf)
538{
539	kfree(gntdev_dmabuf->pages);
540	kfree(gntdev_dmabuf->u.imp.refs);
541	kfree(gntdev_dmabuf);
542}
543
544static struct gntdev_dmabuf *dmabuf_imp_alloc_storage(int count)
545{
546	struct gntdev_dmabuf *gntdev_dmabuf;
547	int i;
548
549	gntdev_dmabuf = kzalloc(sizeof(*gntdev_dmabuf), GFP_KERNEL);
550	if (!gntdev_dmabuf)
551		goto fail_no_free;
552
553	gntdev_dmabuf->u.imp.refs = kcalloc(count,
554					    sizeof(gntdev_dmabuf->u.imp.refs[0]),
555					    GFP_KERNEL);
556	if (!gntdev_dmabuf->u.imp.refs)
557		goto fail;
558
559	gntdev_dmabuf->pages = kcalloc(count,
560				       sizeof(gntdev_dmabuf->pages[0]),
561				       GFP_KERNEL);
562	if (!gntdev_dmabuf->pages)
563		goto fail;
564
565	gntdev_dmabuf->nr_pages = count;
566
567	for (i = 0; i < count; i++)
568		gntdev_dmabuf->u.imp.refs[i] = GRANT_INVALID_REF;
569
570	return gntdev_dmabuf;
571
572fail:
573	dmabuf_imp_free_storage(gntdev_dmabuf);
574fail_no_free:
575	return ERR_PTR(-ENOMEM);
576}
577
578static struct gntdev_dmabuf *
579dmabuf_imp_to_refs(struct gntdev_dmabuf_priv *priv, struct device *dev,
580		   int fd, int count, int domid)
581{
582	struct gntdev_dmabuf *gntdev_dmabuf, *ret;
583	struct dma_buf *dma_buf;
584	struct dma_buf_attachment *attach;
585	struct sg_table *sgt;
586	struct sg_page_iter sg_iter;
587	int i;
588
589	dma_buf = dma_buf_get(fd);
590	if (IS_ERR(dma_buf))
591		return ERR_CAST(dma_buf);
592
593	gntdev_dmabuf = dmabuf_imp_alloc_storage(count);
594	if (IS_ERR(gntdev_dmabuf)) {
595		ret = gntdev_dmabuf;
596		goto fail_put;
597	}
598
599	gntdev_dmabuf->priv = priv;
600	gntdev_dmabuf->fd = fd;
601
602	attach = dma_buf_attach(dma_buf, dev);
603	if (IS_ERR(attach)) {
604		ret = ERR_CAST(attach);
605		goto fail_free_obj;
606	}
607
608	gntdev_dmabuf->u.imp.attach = attach;
609
610	sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
611	if (IS_ERR(sgt)) {
612		ret = ERR_CAST(sgt);
613		goto fail_detach;
614	}
615
616	/* Check that we have zero offset. */
617	if (sgt->sgl->offset) {
618		ret = ERR_PTR(-EINVAL);
619		pr_debug("DMA buffer has %d bytes offset, user-space expects 0\n",
620			 sgt->sgl->offset);
621		goto fail_unmap;
622	}
623
624	/* Check number of pages that imported buffer has. */
625	if (attach->dmabuf->size != gntdev_dmabuf->nr_pages << PAGE_SHIFT) {
626		ret = ERR_PTR(-EINVAL);
627		pr_debug("DMA buffer has %zu pages, user-space expects %d\n",
628			 attach->dmabuf->size, gntdev_dmabuf->nr_pages);
629		goto fail_unmap;
630	}
631
632	gntdev_dmabuf->u.imp.sgt = sgt;
633
634	/* Now convert sgt to array of pages and check for page validity. */
635	i = 0;
636	for_each_sg_page(sgt->sgl, &sg_iter, sgt->nents, 0) {
637		struct page *page = sg_page_iter_page(&sg_iter);
638		/*
639		 * Check if page is valid: this can happen if we are given
640		 * a page from VRAM or other resources which are not backed
641		 * by a struct page.
642		 */
643		if (!pfn_valid(page_to_pfn(page))) {
644			ret = ERR_PTR(-EINVAL);
645			goto fail_unmap;
646		}
647
648		gntdev_dmabuf->pages[i++] = page;
649	}
650
651	ret = ERR_PTR(dmabuf_imp_grant_foreign_access(gntdev_dmabuf->pages,
652						      gntdev_dmabuf->u.imp.refs,
653						      count, domid));
654	if (IS_ERR(ret))
655		goto fail_end_access;
656
657	pr_debug("Imported DMA buffer with fd %d\n", fd);
658
659	mutex_lock(&priv->lock);
660	list_add(&gntdev_dmabuf->next, &priv->imp_list);
661	mutex_unlock(&priv->lock);
662
663	return gntdev_dmabuf;
664
665fail_end_access:
666	dmabuf_imp_end_foreign_access(gntdev_dmabuf->u.imp.refs, count);
667fail_unmap:
668	dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
669fail_detach:
670	dma_buf_detach(dma_buf, attach);
671fail_free_obj:
672	dmabuf_imp_free_storage(gntdev_dmabuf);
673fail_put:
674	dma_buf_put(dma_buf);
675	return ret;
676}
677
678/*
679 * Find the hyper dma-buf by its file descriptor and remove
680 * it from the buffer's list.
681 */
682static struct gntdev_dmabuf *
683dmabuf_imp_find_unlink(struct gntdev_dmabuf_priv *priv, int fd)
684{
685	struct gntdev_dmabuf *q, *gntdev_dmabuf, *ret = ERR_PTR(-ENOENT);
686
687	mutex_lock(&priv->lock);
688	list_for_each_entry_safe(gntdev_dmabuf, q, &priv->imp_list, next) {
689		if (gntdev_dmabuf->fd == fd) {
690			pr_debug("Found gntdev_dmabuf in the import list\n");
691			ret = gntdev_dmabuf;
692			list_del(&gntdev_dmabuf->next);
693			break;
694		}
695	}
696	mutex_unlock(&priv->lock);
697	return ret;
698}
699
700static int dmabuf_imp_release(struct gntdev_dmabuf_priv *priv, u32 fd)
701{
702	struct gntdev_dmabuf *gntdev_dmabuf;
703	struct dma_buf_attachment *attach;
704	struct dma_buf *dma_buf;
705
706	gntdev_dmabuf = dmabuf_imp_find_unlink(priv, fd);
707	if (IS_ERR(gntdev_dmabuf))
708		return PTR_ERR(gntdev_dmabuf);
709
710	pr_debug("Releasing DMA buffer with fd %d\n", fd);
711
712	dmabuf_imp_end_foreign_access(gntdev_dmabuf->u.imp.refs,
713				      gntdev_dmabuf->nr_pages);
714
715	attach = gntdev_dmabuf->u.imp.attach;
716
717	if (gntdev_dmabuf->u.imp.sgt)
718		dma_buf_unmap_attachment(attach, gntdev_dmabuf->u.imp.sgt,
719					 DMA_BIDIRECTIONAL);
720	dma_buf = attach->dmabuf;
721	dma_buf_detach(attach->dmabuf, attach);
722	dma_buf_put(dma_buf);
723
724	dmabuf_imp_free_storage(gntdev_dmabuf);
725	return 0;
726}
727
728static void dmabuf_imp_release_all(struct gntdev_dmabuf_priv *priv)
729{
730	struct gntdev_dmabuf *q, *gntdev_dmabuf;
731
732	list_for_each_entry_safe(gntdev_dmabuf, q, &priv->imp_list, next)
733		dmabuf_imp_release(priv, gntdev_dmabuf->fd);
734}
735
736/* DMA buffer IOCTL support. */
737
738long gntdev_ioctl_dmabuf_exp_from_refs(struct gntdev_priv *priv, int use_ptemod,
739				       struct ioctl_gntdev_dmabuf_exp_from_refs __user *u)
740{
741	struct ioctl_gntdev_dmabuf_exp_from_refs op;
742	u32 *refs;
743	long ret;
744
745	if (use_ptemod) {
746		pr_debug("Cannot provide dma-buf: use_ptemode %d\n",
747			 use_ptemod);
748		return -EINVAL;
749	}
750
751	if (copy_from_user(&op, u, sizeof(op)) != 0)
752		return -EFAULT;
753
754	if (unlikely(gntdev_test_page_count(op.count)))
755		return -EINVAL;
756
757	refs = kcalloc(op.count, sizeof(*refs), GFP_KERNEL);
758	if (!refs)
759		return -ENOMEM;
760
761	if (copy_from_user(refs, u->refs, sizeof(*refs) * op.count) != 0) {
762		ret = -EFAULT;
763		goto out;
764	}
765
766	ret = dmabuf_exp_from_refs(priv, op.flags, op.count,
767				   op.domid, refs, &op.fd);
768	if (ret)
769		goto out;
770
771	if (copy_to_user(u, &op, sizeof(op)) != 0)
772		ret = -EFAULT;
773
774out:
775	kfree(refs);
776	return ret;
777}
778
779long gntdev_ioctl_dmabuf_exp_wait_released(struct gntdev_priv *priv,
780					   struct ioctl_gntdev_dmabuf_exp_wait_released __user *u)
781{
782	struct ioctl_gntdev_dmabuf_exp_wait_released op;
783
784	if (copy_from_user(&op, u, sizeof(op)) != 0)
785		return -EFAULT;
786
787	return dmabuf_exp_wait_released(priv->dmabuf_priv, op.fd,
788					op.wait_to_ms);
789}
790
791long gntdev_ioctl_dmabuf_imp_to_refs(struct gntdev_priv *priv,
792				     struct ioctl_gntdev_dmabuf_imp_to_refs __user *u)
793{
794	struct ioctl_gntdev_dmabuf_imp_to_refs op;
795	struct gntdev_dmabuf *gntdev_dmabuf;
796	long ret;
797
798	if (copy_from_user(&op, u, sizeof(op)) != 0)
799		return -EFAULT;
800
801	if (unlikely(gntdev_test_page_count(op.count)))
802		return -EINVAL;
803
804	gntdev_dmabuf = dmabuf_imp_to_refs(priv->dmabuf_priv,
805					   priv->dma_dev, op.fd,
806					   op.count, op.domid);
807	if (IS_ERR(gntdev_dmabuf))
808		return PTR_ERR(gntdev_dmabuf);
809
810	if (copy_to_user(u->refs, gntdev_dmabuf->u.imp.refs,
811			 sizeof(*u->refs) * op.count) != 0) {
812		ret = -EFAULT;
813		goto out_release;
814	}
815	return 0;
816
817out_release:
818	dmabuf_imp_release(priv->dmabuf_priv, op.fd);
819	return ret;
820}
821
822long gntdev_ioctl_dmabuf_imp_release(struct gntdev_priv *priv,
823				     struct ioctl_gntdev_dmabuf_imp_release __user *u)
824{
825	struct ioctl_gntdev_dmabuf_imp_release op;
826
827	if (copy_from_user(&op, u, sizeof(op)) != 0)
828		return -EFAULT;
829
830	return dmabuf_imp_release(priv->dmabuf_priv, op.fd);
831}
832
833struct gntdev_dmabuf_priv *gntdev_dmabuf_init(struct file *filp)
834{
835	struct gntdev_dmabuf_priv *priv;
836
837	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
838	if (!priv)
839		return ERR_PTR(-ENOMEM);
840
841	mutex_init(&priv->lock);
842	INIT_LIST_HEAD(&priv->exp_list);
843	INIT_LIST_HEAD(&priv->exp_wait_list);
844	INIT_LIST_HEAD(&priv->imp_list);
845
846	priv->filp = filp;
847
848	return priv;
849}
850
851void gntdev_dmabuf_fini(struct gntdev_dmabuf_priv *priv)
852{
853	dmabuf_imp_release_all(priv);
854	kfree(priv);
855}
v6.2
  1// SPDX-License-Identifier: GPL-2.0
  2
  3/*
  4 * Xen dma-buf functionality for gntdev.
  5 *
  6 * DMA buffer implementation is based on drivers/gpu/drm/drm_prime.c.
  7 *
  8 * Copyright (c) 2018 Oleksandr Andrushchenko, EPAM Systems Inc.
  9 */
 10
 11#include <linux/kernel.h>
 12#include <linux/errno.h>
 13#include <linux/dma-buf.h>
 14#include <linux/slab.h>
 15#include <linux/types.h>
 16#include <linux/uaccess.h>
 17#include <linux/module.h>
 18
 19#include <xen/xen.h>
 20#include <xen/grant_table.h>
 21
 22#include "gntdev-common.h"
 23#include "gntdev-dmabuf.h"
 24
 25MODULE_IMPORT_NS(DMA_BUF);
 
 
 
 
 
 
 
 26
 27struct gntdev_dmabuf {
 28	struct gntdev_dmabuf_priv *priv;
 29	struct dma_buf *dmabuf;
 30	struct list_head next;
 31	int fd;
 32
 33	union {
 34		struct {
 35			/* Exported buffers are reference counted. */
 36			struct kref refcount;
 37
 38			struct gntdev_priv *priv;
 39			struct gntdev_grant_map *map;
 40		} exp;
 41		struct {
 42			/* Granted references of the imported buffer. */
 43			grant_ref_t *refs;
 44			/* Scatter-gather table of the imported buffer. */
 45			struct sg_table *sgt;
 46			/* dma-buf attachment of the imported buffer. */
 47			struct dma_buf_attachment *attach;
 48		} imp;
 49	} u;
 50
 51	/* Number of pages this buffer has. */
 52	int nr_pages;
 53	/* Pages of this buffer. */
 54	struct page **pages;
 55};
 56
 57struct gntdev_dmabuf_wait_obj {
 58	struct list_head next;
 59	struct gntdev_dmabuf *gntdev_dmabuf;
 60	struct completion completion;
 61};
 62
 63struct gntdev_dmabuf_attachment {
 64	struct sg_table *sgt;
 65	enum dma_data_direction dir;
 66};
 67
 68struct gntdev_dmabuf_priv {
 69	/* List of exported DMA buffers. */
 70	struct list_head exp_list;
 71	/* List of wait objects. */
 72	struct list_head exp_wait_list;
 73	/* List of imported DMA buffers. */
 74	struct list_head imp_list;
 75	/* This is the lock which protects dma_buf_xxx lists. */
 76	struct mutex lock;
 77	/*
 78	 * We reference this file while exporting dma-bufs, so
 79	 * the grant device context is not destroyed while there are
 80	 * external users alive.
 81	 */
 82	struct file *filp;
 83};
 84
 85/* DMA buffer export support. */
 86
 87/* Implementation of wait for exported DMA buffer to be released. */
 88
 89static void dmabuf_exp_release(struct kref *kref);
 90
 91static struct gntdev_dmabuf_wait_obj *
 92dmabuf_exp_wait_obj_new(struct gntdev_dmabuf_priv *priv,
 93			struct gntdev_dmabuf *gntdev_dmabuf)
 94{
 95	struct gntdev_dmabuf_wait_obj *obj;
 96
 97	obj = kzalloc(sizeof(*obj), GFP_KERNEL);
 98	if (!obj)
 99		return ERR_PTR(-ENOMEM);
100
101	init_completion(&obj->completion);
102	obj->gntdev_dmabuf = gntdev_dmabuf;
103
104	mutex_lock(&priv->lock);
105	list_add(&obj->next, &priv->exp_wait_list);
106	/* Put our reference and wait for gntdev_dmabuf's release to fire. */
107	kref_put(&gntdev_dmabuf->u.exp.refcount, dmabuf_exp_release);
108	mutex_unlock(&priv->lock);
109	return obj;
110}
111
112static void dmabuf_exp_wait_obj_free(struct gntdev_dmabuf_priv *priv,
113				     struct gntdev_dmabuf_wait_obj *obj)
114{
115	mutex_lock(&priv->lock);
116	list_del(&obj->next);
117	mutex_unlock(&priv->lock);
118	kfree(obj);
119}
120
121static int dmabuf_exp_wait_obj_wait(struct gntdev_dmabuf_wait_obj *obj,
122				    u32 wait_to_ms)
123{
124	if (wait_for_completion_timeout(&obj->completion,
125			msecs_to_jiffies(wait_to_ms)) <= 0)
126		return -ETIMEDOUT;
127
128	return 0;
129}
130
131static void dmabuf_exp_wait_obj_signal(struct gntdev_dmabuf_priv *priv,
132				       struct gntdev_dmabuf *gntdev_dmabuf)
133{
134	struct gntdev_dmabuf_wait_obj *obj;
135
136	list_for_each_entry(obj, &priv->exp_wait_list, next)
137		if (obj->gntdev_dmabuf == gntdev_dmabuf) {
138			pr_debug("Found gntdev_dmabuf in the wait list, wake\n");
139			complete_all(&obj->completion);
140			break;
141		}
142}
143
144static struct gntdev_dmabuf *
145dmabuf_exp_wait_obj_get_dmabuf(struct gntdev_dmabuf_priv *priv, int fd)
146{
147	struct gntdev_dmabuf *gntdev_dmabuf, *ret = ERR_PTR(-ENOENT);
148
149	mutex_lock(&priv->lock);
150	list_for_each_entry(gntdev_dmabuf, &priv->exp_list, next)
151		if (gntdev_dmabuf->fd == fd) {
152			pr_debug("Found gntdev_dmabuf in the wait list\n");
153			kref_get(&gntdev_dmabuf->u.exp.refcount);
154			ret = gntdev_dmabuf;
155			break;
156		}
157	mutex_unlock(&priv->lock);
158	return ret;
159}
160
161static int dmabuf_exp_wait_released(struct gntdev_dmabuf_priv *priv, int fd,
162				    int wait_to_ms)
163{
164	struct gntdev_dmabuf *gntdev_dmabuf;
165	struct gntdev_dmabuf_wait_obj *obj;
166	int ret;
167
168	pr_debug("Will wait for dma-buf with fd %d\n", fd);
169	/*
170	 * Try to find the DMA buffer: if not found means that
171	 * either the buffer has already been released or file descriptor
172	 * provided is wrong.
173	 */
174	gntdev_dmabuf = dmabuf_exp_wait_obj_get_dmabuf(priv, fd);
175	if (IS_ERR(gntdev_dmabuf))
176		return PTR_ERR(gntdev_dmabuf);
177
178	/*
179	 * gntdev_dmabuf still exists and is reference count locked by us now,
180	 * so prepare to wait: allocate wait object and add it to the wait list,
181	 * so we can find it on release.
182	 */
183	obj = dmabuf_exp_wait_obj_new(priv, gntdev_dmabuf);
184	if (IS_ERR(obj))
185		return PTR_ERR(obj);
186
187	ret = dmabuf_exp_wait_obj_wait(obj, wait_to_ms);
188	dmabuf_exp_wait_obj_free(priv, obj);
189	return ret;
190}
191
192/* DMA buffer export support. */
193
194static struct sg_table *
195dmabuf_pages_to_sgt(struct page **pages, unsigned int nr_pages)
196{
197	struct sg_table *sgt;
198	int ret;
199
200	sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
201	if (!sgt) {
202		ret = -ENOMEM;
203		goto out;
204	}
205
206	ret = sg_alloc_table_from_pages(sgt, pages, nr_pages, 0,
207					nr_pages << PAGE_SHIFT,
208					GFP_KERNEL);
209	if (ret)
210		goto out;
211
212	return sgt;
213
214out:
215	kfree(sgt);
216	return ERR_PTR(ret);
217}
218
219static int dmabuf_exp_ops_attach(struct dma_buf *dma_buf,
220				 struct dma_buf_attachment *attach)
221{
222	struct gntdev_dmabuf_attachment *gntdev_dmabuf_attach;
223
224	gntdev_dmabuf_attach = kzalloc(sizeof(*gntdev_dmabuf_attach),
225				       GFP_KERNEL);
226	if (!gntdev_dmabuf_attach)
227		return -ENOMEM;
228
229	gntdev_dmabuf_attach->dir = DMA_NONE;
230	attach->priv = gntdev_dmabuf_attach;
231	return 0;
232}
233
234static void dmabuf_exp_ops_detach(struct dma_buf *dma_buf,
235				  struct dma_buf_attachment *attach)
236{
237	struct gntdev_dmabuf_attachment *gntdev_dmabuf_attach = attach->priv;
238
239	if (gntdev_dmabuf_attach) {
240		struct sg_table *sgt = gntdev_dmabuf_attach->sgt;
241
242		if (sgt) {
243			if (gntdev_dmabuf_attach->dir != DMA_NONE)
244				dma_unmap_sgtable(attach->dev, sgt,
245						  gntdev_dmabuf_attach->dir,
246						  DMA_ATTR_SKIP_CPU_SYNC);
 
247			sg_free_table(sgt);
248		}
249
250		kfree(sgt);
251		kfree(gntdev_dmabuf_attach);
252		attach->priv = NULL;
253	}
254}
255
256static struct sg_table *
257dmabuf_exp_ops_map_dma_buf(struct dma_buf_attachment *attach,
258			   enum dma_data_direction dir)
259{
260	struct gntdev_dmabuf_attachment *gntdev_dmabuf_attach = attach->priv;
261	struct gntdev_dmabuf *gntdev_dmabuf = attach->dmabuf->priv;
262	struct sg_table *sgt;
263
264	pr_debug("Mapping %d pages for dev %p\n", gntdev_dmabuf->nr_pages,
265		 attach->dev);
266
267	if (dir == DMA_NONE || !gntdev_dmabuf_attach)
268		return ERR_PTR(-EINVAL);
269
270	/* Return the cached mapping when possible. */
271	if (gntdev_dmabuf_attach->dir == dir)
272		return gntdev_dmabuf_attach->sgt;
273
274	/*
275	 * Two mappings with different directions for the same attachment are
276	 * not allowed.
277	 */
278	if (gntdev_dmabuf_attach->dir != DMA_NONE)
279		return ERR_PTR(-EBUSY);
280
281	sgt = dmabuf_pages_to_sgt(gntdev_dmabuf->pages,
282				  gntdev_dmabuf->nr_pages);
283	if (!IS_ERR(sgt)) {
284		if (dma_map_sgtable(attach->dev, sgt, dir,
285				    DMA_ATTR_SKIP_CPU_SYNC)) {
286			sg_free_table(sgt);
287			kfree(sgt);
288			sgt = ERR_PTR(-ENOMEM);
289		} else {
290			gntdev_dmabuf_attach->sgt = sgt;
291			gntdev_dmabuf_attach->dir = dir;
292		}
293	}
294	if (IS_ERR(sgt))
295		pr_debug("Failed to map sg table for dev %p\n", attach->dev);
296	return sgt;
297}
298
299static void dmabuf_exp_ops_unmap_dma_buf(struct dma_buf_attachment *attach,
300					 struct sg_table *sgt,
301					 enum dma_data_direction dir)
302{
303	/* Not implemented. The unmap is done at dmabuf_exp_ops_detach(). */
304}
305
306static void dmabuf_exp_release(struct kref *kref)
307{
308	struct gntdev_dmabuf *gntdev_dmabuf =
309		container_of(kref, struct gntdev_dmabuf, u.exp.refcount);
310
311	dmabuf_exp_wait_obj_signal(gntdev_dmabuf->priv, gntdev_dmabuf);
312	list_del(&gntdev_dmabuf->next);
313	fput(gntdev_dmabuf->priv->filp);
314	kfree(gntdev_dmabuf);
315}
316
317static void dmabuf_exp_remove_map(struct gntdev_priv *priv,
318				  struct gntdev_grant_map *map)
319{
320	mutex_lock(&priv->lock);
321	list_del(&map->next);
322	gntdev_put_map(NULL /* already removed */, map);
323	mutex_unlock(&priv->lock);
324}
325
326static void dmabuf_exp_ops_release(struct dma_buf *dma_buf)
327{
328	struct gntdev_dmabuf *gntdev_dmabuf = dma_buf->priv;
329	struct gntdev_dmabuf_priv *priv = gntdev_dmabuf->priv;
330
331	dmabuf_exp_remove_map(gntdev_dmabuf->u.exp.priv,
332			      gntdev_dmabuf->u.exp.map);
333	mutex_lock(&priv->lock);
334	kref_put(&gntdev_dmabuf->u.exp.refcount, dmabuf_exp_release);
335	mutex_unlock(&priv->lock);
336}
337
338static const struct dma_buf_ops dmabuf_exp_ops =  {
339	.attach = dmabuf_exp_ops_attach,
340	.detach = dmabuf_exp_ops_detach,
341	.map_dma_buf = dmabuf_exp_ops_map_dma_buf,
342	.unmap_dma_buf = dmabuf_exp_ops_unmap_dma_buf,
343	.release = dmabuf_exp_ops_release,
344};
345
346struct gntdev_dmabuf_export_args {
347	struct gntdev_priv *priv;
348	struct gntdev_grant_map *map;
349	struct gntdev_dmabuf_priv *dmabuf_priv;
350	struct device *dev;
351	int count;
352	struct page **pages;
353	u32 fd;
354};
355
356static int dmabuf_exp_from_pages(struct gntdev_dmabuf_export_args *args)
357{
358	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
359	struct gntdev_dmabuf *gntdev_dmabuf;
360	int ret;
361
362	gntdev_dmabuf = kzalloc(sizeof(*gntdev_dmabuf), GFP_KERNEL);
363	if (!gntdev_dmabuf)
364		return -ENOMEM;
365
366	kref_init(&gntdev_dmabuf->u.exp.refcount);
367
368	gntdev_dmabuf->priv = args->dmabuf_priv;
369	gntdev_dmabuf->nr_pages = args->count;
370	gntdev_dmabuf->pages = args->pages;
371	gntdev_dmabuf->u.exp.priv = args->priv;
372	gntdev_dmabuf->u.exp.map = args->map;
373
374	exp_info.exp_name = KBUILD_MODNAME;
375	if (args->dev->driver && args->dev->driver->owner)
376		exp_info.owner = args->dev->driver->owner;
377	else
378		exp_info.owner = THIS_MODULE;
379	exp_info.ops = &dmabuf_exp_ops;
380	exp_info.size = args->count << PAGE_SHIFT;
381	exp_info.flags = O_RDWR;
382	exp_info.priv = gntdev_dmabuf;
383
384	gntdev_dmabuf->dmabuf = dma_buf_export(&exp_info);
385	if (IS_ERR(gntdev_dmabuf->dmabuf)) {
386		ret = PTR_ERR(gntdev_dmabuf->dmabuf);
387		gntdev_dmabuf->dmabuf = NULL;
388		goto fail;
389	}
390
391	ret = dma_buf_fd(gntdev_dmabuf->dmabuf, O_CLOEXEC);
392	if (ret < 0)
393		goto fail;
394
395	gntdev_dmabuf->fd = ret;
396	args->fd = ret;
397
398	pr_debug("Exporting DMA buffer with fd %d\n", ret);
399
400	mutex_lock(&args->dmabuf_priv->lock);
401	list_add(&gntdev_dmabuf->next, &args->dmabuf_priv->exp_list);
402	mutex_unlock(&args->dmabuf_priv->lock);
403	get_file(gntdev_dmabuf->priv->filp);
404	return 0;
405
406fail:
407	if (gntdev_dmabuf->dmabuf)
408		dma_buf_put(gntdev_dmabuf->dmabuf);
409	kfree(gntdev_dmabuf);
410	return ret;
411}
412
413static struct gntdev_grant_map *
414dmabuf_exp_alloc_backing_storage(struct gntdev_priv *priv, int dmabuf_flags,
415				 int count)
416{
417	struct gntdev_grant_map *map;
418
419	if (unlikely(gntdev_test_page_count(count)))
420		return ERR_PTR(-EINVAL);
421
422	if ((dmabuf_flags & GNTDEV_DMA_FLAG_WC) &&
423	    (dmabuf_flags & GNTDEV_DMA_FLAG_COHERENT)) {
424		pr_debug("Wrong dma-buf flags: 0x%x\n", dmabuf_flags);
425		return ERR_PTR(-EINVAL);
426	}
427
428	map = gntdev_alloc_map(priv, count, dmabuf_flags);
429	if (!map)
430		return ERR_PTR(-ENOMEM);
431
432	return map;
433}
434
435static int dmabuf_exp_from_refs(struct gntdev_priv *priv, int flags,
436				int count, u32 domid, u32 *refs, u32 *fd)
437{
438	struct gntdev_grant_map *map;
439	struct gntdev_dmabuf_export_args args;
440	int i, ret;
441
442	map = dmabuf_exp_alloc_backing_storage(priv, flags, count);
443	if (IS_ERR(map))
444		return PTR_ERR(map);
445
446	for (i = 0; i < count; i++) {
447		map->grants[i].domid = domid;
448		map->grants[i].ref = refs[i];
449	}
450
451	mutex_lock(&priv->lock);
452	gntdev_add_map(priv, map);
453	mutex_unlock(&priv->lock);
454
455	map->flags |= GNTMAP_host_map;
456#if defined(CONFIG_X86)
457	map->flags |= GNTMAP_device_map;
458#endif
459
460	ret = gntdev_map_grant_pages(map);
461	if (ret < 0)
462		goto out;
463
464	args.priv = priv;
465	args.map = map;
466	args.dev = priv->dma_dev;
467	args.dmabuf_priv = priv->dmabuf_priv;
468	args.count = map->count;
469	args.pages = map->pages;
470	args.fd = -1; /* Shut up unnecessary gcc warning for i386 */
471
472	ret = dmabuf_exp_from_pages(&args);
473	if (ret < 0)
474		goto out;
475
476	*fd = args.fd;
477	return 0;
478
479out:
480	dmabuf_exp_remove_map(priv, map);
481	return ret;
482}
483
484/* DMA buffer import support. */
485
486static int
487dmabuf_imp_grant_foreign_access(struct page **pages, u32 *refs,
488				int count, int domid)
489{
490	grant_ref_t priv_gref_head;
491	int i, ret;
492
493	ret = gnttab_alloc_grant_references(count, &priv_gref_head);
494	if (ret < 0) {
495		pr_debug("Cannot allocate grant references, ret %d\n", ret);
496		return ret;
497	}
498
499	for (i = 0; i < count; i++) {
500		int cur_ref;
501
502		cur_ref = gnttab_claim_grant_reference(&priv_gref_head);
503		if (cur_ref < 0) {
504			ret = cur_ref;
505			pr_debug("Cannot claim grant reference, ret %d\n", ret);
506			goto out;
507		}
508
509		gnttab_grant_foreign_access_ref(cur_ref, domid,
510						xen_page_to_gfn(pages[i]), 0);
511		refs[i] = cur_ref;
512	}
513
514	return 0;
515
516out:
517	gnttab_free_grant_references(priv_gref_head);
518	return ret;
519}
520
521static void dmabuf_imp_end_foreign_access(u32 *refs, int count)
522{
523	int i;
524
525	for (i = 0; i < count; i++)
526		if (refs[i] != INVALID_GRANT_REF)
527			gnttab_end_foreign_access(refs[i], NULL);
528}
529
530static void dmabuf_imp_free_storage(struct gntdev_dmabuf *gntdev_dmabuf)
531{
532	kfree(gntdev_dmabuf->pages);
533	kfree(gntdev_dmabuf->u.imp.refs);
534	kfree(gntdev_dmabuf);
535}
536
537static struct gntdev_dmabuf *dmabuf_imp_alloc_storage(int count)
538{
539	struct gntdev_dmabuf *gntdev_dmabuf;
540	int i;
541
542	gntdev_dmabuf = kzalloc(sizeof(*gntdev_dmabuf), GFP_KERNEL);
543	if (!gntdev_dmabuf)
544		goto fail_no_free;
545
546	gntdev_dmabuf->u.imp.refs = kcalloc(count,
547					    sizeof(gntdev_dmabuf->u.imp.refs[0]),
548					    GFP_KERNEL);
549	if (!gntdev_dmabuf->u.imp.refs)
550		goto fail;
551
552	gntdev_dmabuf->pages = kcalloc(count,
553				       sizeof(gntdev_dmabuf->pages[0]),
554				       GFP_KERNEL);
555	if (!gntdev_dmabuf->pages)
556		goto fail;
557
558	gntdev_dmabuf->nr_pages = count;
559
560	for (i = 0; i < count; i++)
561		gntdev_dmabuf->u.imp.refs[i] = INVALID_GRANT_REF;
562
563	return gntdev_dmabuf;
564
565fail:
566	dmabuf_imp_free_storage(gntdev_dmabuf);
567fail_no_free:
568	return ERR_PTR(-ENOMEM);
569}
570
571static struct gntdev_dmabuf *
572dmabuf_imp_to_refs(struct gntdev_dmabuf_priv *priv, struct device *dev,
573		   int fd, int count, int domid)
574{
575	struct gntdev_dmabuf *gntdev_dmabuf, *ret;
576	struct dma_buf *dma_buf;
577	struct dma_buf_attachment *attach;
578	struct sg_table *sgt;
579	struct sg_page_iter sg_iter;
580	int i;
581
582	dma_buf = dma_buf_get(fd);
583	if (IS_ERR(dma_buf))
584		return ERR_CAST(dma_buf);
585
586	gntdev_dmabuf = dmabuf_imp_alloc_storage(count);
587	if (IS_ERR(gntdev_dmabuf)) {
588		ret = gntdev_dmabuf;
589		goto fail_put;
590	}
591
592	gntdev_dmabuf->priv = priv;
593	gntdev_dmabuf->fd = fd;
594
595	attach = dma_buf_attach(dma_buf, dev);
596	if (IS_ERR(attach)) {
597		ret = ERR_CAST(attach);
598		goto fail_free_obj;
599	}
600
601	gntdev_dmabuf->u.imp.attach = attach;
602
603	sgt = dma_buf_map_attachment_unlocked(attach, DMA_BIDIRECTIONAL);
604	if (IS_ERR(sgt)) {
605		ret = ERR_CAST(sgt);
606		goto fail_detach;
607	}
608
609	/* Check that we have zero offset. */
610	if (sgt->sgl->offset) {
611		ret = ERR_PTR(-EINVAL);
612		pr_debug("DMA buffer has %d bytes offset, user-space expects 0\n",
613			 sgt->sgl->offset);
614		goto fail_unmap;
615	}
616
617	/* Check number of pages that imported buffer has. */
618	if (attach->dmabuf->size != gntdev_dmabuf->nr_pages << PAGE_SHIFT) {
619		ret = ERR_PTR(-EINVAL);
620		pr_debug("DMA buffer has %zu pages, user-space expects %d\n",
621			 attach->dmabuf->size, gntdev_dmabuf->nr_pages);
622		goto fail_unmap;
623	}
624
625	gntdev_dmabuf->u.imp.sgt = sgt;
626
627	/* Now convert sgt to array of pages and check for page validity. */
628	i = 0;
629	for_each_sgtable_page(sgt, &sg_iter, 0) {
630		struct page *page = sg_page_iter_page(&sg_iter);
631		/*
632		 * Check if page is valid: this can happen if we are given
633		 * a page from VRAM or other resources which are not backed
634		 * by a struct page.
635		 */
636		if (!pfn_valid(page_to_pfn(page))) {
637			ret = ERR_PTR(-EINVAL);
638			goto fail_unmap;
639		}
640
641		gntdev_dmabuf->pages[i++] = page;
642	}
643
644	ret = ERR_PTR(dmabuf_imp_grant_foreign_access(gntdev_dmabuf->pages,
645						      gntdev_dmabuf->u.imp.refs,
646						      count, domid));
647	if (IS_ERR(ret))
648		goto fail_end_access;
649
650	pr_debug("Imported DMA buffer with fd %d\n", fd);
651
652	mutex_lock(&priv->lock);
653	list_add(&gntdev_dmabuf->next, &priv->imp_list);
654	mutex_unlock(&priv->lock);
655
656	return gntdev_dmabuf;
657
658fail_end_access:
659	dmabuf_imp_end_foreign_access(gntdev_dmabuf->u.imp.refs, count);
660fail_unmap:
661	dma_buf_unmap_attachment_unlocked(attach, sgt, DMA_BIDIRECTIONAL);
662fail_detach:
663	dma_buf_detach(dma_buf, attach);
664fail_free_obj:
665	dmabuf_imp_free_storage(gntdev_dmabuf);
666fail_put:
667	dma_buf_put(dma_buf);
668	return ret;
669}
670
671/*
672 * Find the hyper dma-buf by its file descriptor and remove
673 * it from the buffer's list.
674 */
675static struct gntdev_dmabuf *
676dmabuf_imp_find_unlink(struct gntdev_dmabuf_priv *priv, int fd)
677{
678	struct gntdev_dmabuf *q, *gntdev_dmabuf, *ret = ERR_PTR(-ENOENT);
679
680	mutex_lock(&priv->lock);
681	list_for_each_entry_safe(gntdev_dmabuf, q, &priv->imp_list, next) {
682		if (gntdev_dmabuf->fd == fd) {
683			pr_debug("Found gntdev_dmabuf in the import list\n");
684			ret = gntdev_dmabuf;
685			list_del(&gntdev_dmabuf->next);
686			break;
687		}
688	}
689	mutex_unlock(&priv->lock);
690	return ret;
691}
692
693static int dmabuf_imp_release(struct gntdev_dmabuf_priv *priv, u32 fd)
694{
695	struct gntdev_dmabuf *gntdev_dmabuf;
696	struct dma_buf_attachment *attach;
697	struct dma_buf *dma_buf;
698
699	gntdev_dmabuf = dmabuf_imp_find_unlink(priv, fd);
700	if (IS_ERR(gntdev_dmabuf))
701		return PTR_ERR(gntdev_dmabuf);
702
703	pr_debug("Releasing DMA buffer with fd %d\n", fd);
704
705	dmabuf_imp_end_foreign_access(gntdev_dmabuf->u.imp.refs,
706				      gntdev_dmabuf->nr_pages);
707
708	attach = gntdev_dmabuf->u.imp.attach;
709
710	if (gntdev_dmabuf->u.imp.sgt)
711		dma_buf_unmap_attachment_unlocked(attach, gntdev_dmabuf->u.imp.sgt,
712						  DMA_BIDIRECTIONAL);
713	dma_buf = attach->dmabuf;
714	dma_buf_detach(attach->dmabuf, attach);
715	dma_buf_put(dma_buf);
716
717	dmabuf_imp_free_storage(gntdev_dmabuf);
718	return 0;
719}
720
721static void dmabuf_imp_release_all(struct gntdev_dmabuf_priv *priv)
722{
723	struct gntdev_dmabuf *q, *gntdev_dmabuf;
724
725	list_for_each_entry_safe(gntdev_dmabuf, q, &priv->imp_list, next)
726		dmabuf_imp_release(priv, gntdev_dmabuf->fd);
727}
728
729/* DMA buffer IOCTL support. */
730
731long gntdev_ioctl_dmabuf_exp_from_refs(struct gntdev_priv *priv, int use_ptemod,
732				       struct ioctl_gntdev_dmabuf_exp_from_refs __user *u)
733{
734	struct ioctl_gntdev_dmabuf_exp_from_refs op;
735	u32 *refs;
736	long ret;
737
738	if (use_ptemod) {
739		pr_debug("Cannot provide dma-buf: use_ptemode %d\n",
740			 use_ptemod);
741		return -EINVAL;
742	}
743
744	if (copy_from_user(&op, u, sizeof(op)) != 0)
745		return -EFAULT;
746
747	if (unlikely(gntdev_test_page_count(op.count)))
748		return -EINVAL;
749
750	refs = kcalloc(op.count, sizeof(*refs), GFP_KERNEL);
751	if (!refs)
752		return -ENOMEM;
753
754	if (copy_from_user(refs, u->refs, sizeof(*refs) * op.count) != 0) {
755		ret = -EFAULT;
756		goto out;
757	}
758
759	ret = dmabuf_exp_from_refs(priv, op.flags, op.count,
760				   op.domid, refs, &op.fd);
761	if (ret)
762		goto out;
763
764	if (copy_to_user(u, &op, sizeof(op)) != 0)
765		ret = -EFAULT;
766
767out:
768	kfree(refs);
769	return ret;
770}
771
772long gntdev_ioctl_dmabuf_exp_wait_released(struct gntdev_priv *priv,
773					   struct ioctl_gntdev_dmabuf_exp_wait_released __user *u)
774{
775	struct ioctl_gntdev_dmabuf_exp_wait_released op;
776
777	if (copy_from_user(&op, u, sizeof(op)) != 0)
778		return -EFAULT;
779
780	return dmabuf_exp_wait_released(priv->dmabuf_priv, op.fd,
781					op.wait_to_ms);
782}
783
784long gntdev_ioctl_dmabuf_imp_to_refs(struct gntdev_priv *priv,
785				     struct ioctl_gntdev_dmabuf_imp_to_refs __user *u)
786{
787	struct ioctl_gntdev_dmabuf_imp_to_refs op;
788	struct gntdev_dmabuf *gntdev_dmabuf;
789	long ret;
790
791	if (copy_from_user(&op, u, sizeof(op)) != 0)
792		return -EFAULT;
793
794	if (unlikely(gntdev_test_page_count(op.count)))
795		return -EINVAL;
796
797	gntdev_dmabuf = dmabuf_imp_to_refs(priv->dmabuf_priv,
798					   priv->dma_dev, op.fd,
799					   op.count, op.domid);
800	if (IS_ERR(gntdev_dmabuf))
801		return PTR_ERR(gntdev_dmabuf);
802
803	if (copy_to_user(u->refs, gntdev_dmabuf->u.imp.refs,
804			 sizeof(*u->refs) * op.count) != 0) {
805		ret = -EFAULT;
806		goto out_release;
807	}
808	return 0;
809
810out_release:
811	dmabuf_imp_release(priv->dmabuf_priv, op.fd);
812	return ret;
813}
814
815long gntdev_ioctl_dmabuf_imp_release(struct gntdev_priv *priv,
816				     struct ioctl_gntdev_dmabuf_imp_release __user *u)
817{
818	struct ioctl_gntdev_dmabuf_imp_release op;
819
820	if (copy_from_user(&op, u, sizeof(op)) != 0)
821		return -EFAULT;
822
823	return dmabuf_imp_release(priv->dmabuf_priv, op.fd);
824}
825
826struct gntdev_dmabuf_priv *gntdev_dmabuf_init(struct file *filp)
827{
828	struct gntdev_dmabuf_priv *priv;
829
830	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
831	if (!priv)
832		return ERR_PTR(-ENOMEM);
833
834	mutex_init(&priv->lock);
835	INIT_LIST_HEAD(&priv->exp_list);
836	INIT_LIST_HEAD(&priv->exp_wait_list);
837	INIT_LIST_HEAD(&priv->imp_list);
838
839	priv->filp = filp;
840
841	return priv;
842}
843
844void gntdev_dmabuf_fini(struct gntdev_dmabuf_priv *priv)
845{
846	dmabuf_imp_release_all(priv);
847	kfree(priv);
848}