Linux Audio

Check our new training course

Loading...
v5.9
  1// SPDX-License-Identifier: GPL-2.0
  2
  3/*
  4 * Xen dma-buf functionality for gntdev.
  5 *
  6 * DMA buffer implementation is based on drivers/gpu/drm/drm_prime.c.
  7 *
  8 * Copyright (c) 2018 Oleksandr Andrushchenko, EPAM Systems Inc.
  9 */
 10
 11#include <linux/kernel.h>
 12#include <linux/errno.h>
 13#include <linux/dma-buf.h>
 14#include <linux/slab.h>
 15#include <linux/types.h>
 16#include <linux/uaccess.h>
 17
 18#include <xen/xen.h>
 19#include <xen/grant_table.h>
 20
 21#include "gntdev-common.h"
 22#include "gntdev-dmabuf.h"
 23
 24#ifndef GRANT_INVALID_REF
 25/*
 26 * Note on usage of grant reference 0 as invalid grant reference:
 27 * grant reference 0 is valid, but never exposed to a driver,
 28 * because of the fact it is already in use/reserved by the PV console.
 29 */
 30#define GRANT_INVALID_REF	0
 31#endif
 32
 33struct gntdev_dmabuf {
 34	struct gntdev_dmabuf_priv *priv;
 35	struct dma_buf *dmabuf;
 36	struct list_head next;
 37	int fd;
 38
 39	union {
 40		struct {
 41			/* Exported buffers are reference counted. */
 42			struct kref refcount;
 43
 44			struct gntdev_priv *priv;
 45			struct gntdev_grant_map *map;
 46		} exp;
 47		struct {
 48			/* Granted references of the imported buffer. */
 49			grant_ref_t *refs;
 50			/* Scatter-gather table of the imported buffer. */
 51			struct sg_table *sgt;
 52			/* dma-buf attachment of the imported buffer. */
 53			struct dma_buf_attachment *attach;
 54		} imp;
 55	} u;
 56
 57	/* Number of pages this buffer has. */
 58	int nr_pages;
 59	/* Pages of this buffer. */
 60	struct page **pages;
 61};
 62
 63struct gntdev_dmabuf_wait_obj {
 64	struct list_head next;
 65	struct gntdev_dmabuf *gntdev_dmabuf;
 66	struct completion completion;
 67};
 68
 69struct gntdev_dmabuf_attachment {
 70	struct sg_table *sgt;
 71	enum dma_data_direction dir;
 72};
 73
 74struct gntdev_dmabuf_priv {
 75	/* List of exported DMA buffers. */
 76	struct list_head exp_list;
 77	/* List of wait objects. */
 78	struct list_head exp_wait_list;
 79	/* List of imported DMA buffers. */
 80	struct list_head imp_list;
 81	/* This is the lock which protects dma_buf_xxx lists. */
 82	struct mutex lock;
 83	/*
 84	 * We reference this file while exporting dma-bufs, so
 85	 * the grant device context is not destroyed while there are
 86	 * external users alive.
 87	 */
 88	struct file *filp;
 89};
 90
 91/* DMA buffer export support. */
 92
 93/* Implementation of wait for exported DMA buffer to be released. */
 94
 95static void dmabuf_exp_release(struct kref *kref);
 96
 97static struct gntdev_dmabuf_wait_obj *
 98dmabuf_exp_wait_obj_new(struct gntdev_dmabuf_priv *priv,
 99			struct gntdev_dmabuf *gntdev_dmabuf)
100{
101	struct gntdev_dmabuf_wait_obj *obj;
102
103	obj = kzalloc(sizeof(*obj), GFP_KERNEL);
104	if (!obj)
105		return ERR_PTR(-ENOMEM);
106
107	init_completion(&obj->completion);
108	obj->gntdev_dmabuf = gntdev_dmabuf;
109
110	mutex_lock(&priv->lock);
111	list_add(&obj->next, &priv->exp_wait_list);
112	/* Put our reference and wait for gntdev_dmabuf's release to fire. */
113	kref_put(&gntdev_dmabuf->u.exp.refcount, dmabuf_exp_release);
114	mutex_unlock(&priv->lock);
115	return obj;
116}
117
118static void dmabuf_exp_wait_obj_free(struct gntdev_dmabuf_priv *priv,
119				     struct gntdev_dmabuf_wait_obj *obj)
120{
121	mutex_lock(&priv->lock);
122	list_del(&obj->next);
123	mutex_unlock(&priv->lock);
124	kfree(obj);
125}
126
127static int dmabuf_exp_wait_obj_wait(struct gntdev_dmabuf_wait_obj *obj,
128				    u32 wait_to_ms)
129{
130	if (wait_for_completion_timeout(&obj->completion,
131			msecs_to_jiffies(wait_to_ms)) <= 0)
132		return -ETIMEDOUT;
133
134	return 0;
135}
136
137static void dmabuf_exp_wait_obj_signal(struct gntdev_dmabuf_priv *priv,
138				       struct gntdev_dmabuf *gntdev_dmabuf)
139{
140	struct gntdev_dmabuf_wait_obj *obj;
141
142	list_for_each_entry(obj, &priv->exp_wait_list, next)
143		if (obj->gntdev_dmabuf == gntdev_dmabuf) {
144			pr_debug("Found gntdev_dmabuf in the wait list, wake\n");
145			complete_all(&obj->completion);
146			break;
147		}
148}
149
150static struct gntdev_dmabuf *
151dmabuf_exp_wait_obj_get_dmabuf(struct gntdev_dmabuf_priv *priv, int fd)
152{
153	struct gntdev_dmabuf *gntdev_dmabuf, *ret = ERR_PTR(-ENOENT);
154
155	mutex_lock(&priv->lock);
156	list_for_each_entry(gntdev_dmabuf, &priv->exp_list, next)
157		if (gntdev_dmabuf->fd == fd) {
158			pr_debug("Found gntdev_dmabuf in the wait list\n");
159			kref_get(&gntdev_dmabuf->u.exp.refcount);
160			ret = gntdev_dmabuf;
161			break;
162		}
163	mutex_unlock(&priv->lock);
164	return ret;
165}
166
167static int dmabuf_exp_wait_released(struct gntdev_dmabuf_priv *priv, int fd,
168				    int wait_to_ms)
169{
170	struct gntdev_dmabuf *gntdev_dmabuf;
171	struct gntdev_dmabuf_wait_obj *obj;
172	int ret;
173
174	pr_debug("Will wait for dma-buf with fd %d\n", fd);
175	/*
176	 * Try to find the DMA buffer: if not found means that
177	 * either the buffer has already been released or file descriptor
178	 * provided is wrong.
179	 */
180	gntdev_dmabuf = dmabuf_exp_wait_obj_get_dmabuf(priv, fd);
181	if (IS_ERR(gntdev_dmabuf))
182		return PTR_ERR(gntdev_dmabuf);
183
184	/*
185	 * gntdev_dmabuf still exists and is reference count locked by us now,
186	 * so prepare to wait: allocate wait object and add it to the wait list,
187	 * so we can find it on release.
188	 */
189	obj = dmabuf_exp_wait_obj_new(priv, gntdev_dmabuf);
190	if (IS_ERR(obj))
191		return PTR_ERR(obj);
192
193	ret = dmabuf_exp_wait_obj_wait(obj, wait_to_ms);
194	dmabuf_exp_wait_obj_free(priv, obj);
195	return ret;
196}
197
198/* DMA buffer export support. */
199
200static struct sg_table *
201dmabuf_pages_to_sgt(struct page **pages, unsigned int nr_pages)
202{
203	struct sg_table *sgt;
204	int ret;
205
206	sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
207	if (!sgt) {
208		ret = -ENOMEM;
209		goto out;
210	}
211
212	ret = sg_alloc_table_from_pages(sgt, pages, nr_pages, 0,
213					nr_pages << PAGE_SHIFT,
214					GFP_KERNEL);
215	if (ret)
216		goto out;
217
218	return sgt;
219
220out:
221	kfree(sgt);
222	return ERR_PTR(ret);
223}
224
225static int dmabuf_exp_ops_attach(struct dma_buf *dma_buf,
226				 struct dma_buf_attachment *attach)
227{
228	struct gntdev_dmabuf_attachment *gntdev_dmabuf_attach;
229
230	gntdev_dmabuf_attach = kzalloc(sizeof(*gntdev_dmabuf_attach),
231				       GFP_KERNEL);
232	if (!gntdev_dmabuf_attach)
233		return -ENOMEM;
234
235	gntdev_dmabuf_attach->dir = DMA_NONE;
236	attach->priv = gntdev_dmabuf_attach;
237	return 0;
238}
239
240static void dmabuf_exp_ops_detach(struct dma_buf *dma_buf,
241				  struct dma_buf_attachment *attach)
242{
243	struct gntdev_dmabuf_attachment *gntdev_dmabuf_attach = attach->priv;
244
245	if (gntdev_dmabuf_attach) {
246		struct sg_table *sgt = gntdev_dmabuf_attach->sgt;
247
248		if (sgt) {
249			if (gntdev_dmabuf_attach->dir != DMA_NONE)
250				dma_unmap_sg_attrs(attach->dev, sgt->sgl,
251						   sgt->nents,
252						   gntdev_dmabuf_attach->dir,
253						   DMA_ATTR_SKIP_CPU_SYNC);
254			sg_free_table(sgt);
255		}
256
257		kfree(sgt);
258		kfree(gntdev_dmabuf_attach);
259		attach->priv = NULL;
260	}
261}
262
263static struct sg_table *
264dmabuf_exp_ops_map_dma_buf(struct dma_buf_attachment *attach,
265			   enum dma_data_direction dir)
266{
267	struct gntdev_dmabuf_attachment *gntdev_dmabuf_attach = attach->priv;
268	struct gntdev_dmabuf *gntdev_dmabuf = attach->dmabuf->priv;
269	struct sg_table *sgt;
270
271	pr_debug("Mapping %d pages for dev %p\n", gntdev_dmabuf->nr_pages,
272		 attach->dev);
273
274	if (dir == DMA_NONE || !gntdev_dmabuf_attach)
275		return ERR_PTR(-EINVAL);
276
277	/* Return the cached mapping when possible. */
278	if (gntdev_dmabuf_attach->dir == dir)
279		return gntdev_dmabuf_attach->sgt;
280
281	/*
282	 * Two mappings with different directions for the same attachment are
283	 * not allowed.
284	 */
285	if (gntdev_dmabuf_attach->dir != DMA_NONE)
286		return ERR_PTR(-EBUSY);
287
288	sgt = dmabuf_pages_to_sgt(gntdev_dmabuf->pages,
289				  gntdev_dmabuf->nr_pages);
290	if (!IS_ERR(sgt)) {
291		if (!dma_map_sg_attrs(attach->dev, sgt->sgl, sgt->nents, dir,
292				      DMA_ATTR_SKIP_CPU_SYNC)) {
293			sg_free_table(sgt);
294			kfree(sgt);
295			sgt = ERR_PTR(-ENOMEM);
296		} else {
297			gntdev_dmabuf_attach->sgt = sgt;
298			gntdev_dmabuf_attach->dir = dir;
299		}
300	}
301	if (IS_ERR(sgt))
302		pr_debug("Failed to map sg table for dev %p\n", attach->dev);
303	return sgt;
304}
305
306static void dmabuf_exp_ops_unmap_dma_buf(struct dma_buf_attachment *attach,
307					 struct sg_table *sgt,
308					 enum dma_data_direction dir)
309{
310	/* Not implemented. The unmap is done at dmabuf_exp_ops_detach(). */
311}
312
313static void dmabuf_exp_release(struct kref *kref)
314{
315	struct gntdev_dmabuf *gntdev_dmabuf =
316		container_of(kref, struct gntdev_dmabuf, u.exp.refcount);
317
318	dmabuf_exp_wait_obj_signal(gntdev_dmabuf->priv, gntdev_dmabuf);
319	list_del(&gntdev_dmabuf->next);
320	fput(gntdev_dmabuf->priv->filp);
321	kfree(gntdev_dmabuf);
322}
323
324static void dmabuf_exp_remove_map(struct gntdev_priv *priv,
325				  struct gntdev_grant_map *map)
326{
327	mutex_lock(&priv->lock);
328	list_del(&map->next);
329	gntdev_put_map(NULL /* already removed */, map);
330	mutex_unlock(&priv->lock);
331}
332
333static void dmabuf_exp_ops_release(struct dma_buf *dma_buf)
334{
335	struct gntdev_dmabuf *gntdev_dmabuf = dma_buf->priv;
336	struct gntdev_dmabuf_priv *priv = gntdev_dmabuf->priv;
337
338	dmabuf_exp_remove_map(gntdev_dmabuf->u.exp.priv,
339			      gntdev_dmabuf->u.exp.map);
340	mutex_lock(&priv->lock);
341	kref_put(&gntdev_dmabuf->u.exp.refcount, dmabuf_exp_release);
342	mutex_unlock(&priv->lock);
343}
344
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
345static const struct dma_buf_ops dmabuf_exp_ops =  {
346	.attach = dmabuf_exp_ops_attach,
347	.detach = dmabuf_exp_ops_detach,
348	.map_dma_buf = dmabuf_exp_ops_map_dma_buf,
349	.unmap_dma_buf = dmabuf_exp_ops_unmap_dma_buf,
350	.release = dmabuf_exp_ops_release,
 
 
 
351};
352
353struct gntdev_dmabuf_export_args {
354	struct gntdev_priv *priv;
355	struct gntdev_grant_map *map;
356	struct gntdev_dmabuf_priv *dmabuf_priv;
357	struct device *dev;
358	int count;
359	struct page **pages;
360	u32 fd;
361};
362
363static int dmabuf_exp_from_pages(struct gntdev_dmabuf_export_args *args)
364{
365	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
366	struct gntdev_dmabuf *gntdev_dmabuf;
367	int ret;
368
369	gntdev_dmabuf = kzalloc(sizeof(*gntdev_dmabuf), GFP_KERNEL);
370	if (!gntdev_dmabuf)
371		return -ENOMEM;
372
373	kref_init(&gntdev_dmabuf->u.exp.refcount);
374
375	gntdev_dmabuf->priv = args->dmabuf_priv;
376	gntdev_dmabuf->nr_pages = args->count;
377	gntdev_dmabuf->pages = args->pages;
378	gntdev_dmabuf->u.exp.priv = args->priv;
379	gntdev_dmabuf->u.exp.map = args->map;
380
381	exp_info.exp_name = KBUILD_MODNAME;
382	if (args->dev->driver && args->dev->driver->owner)
383		exp_info.owner = args->dev->driver->owner;
384	else
385		exp_info.owner = THIS_MODULE;
386	exp_info.ops = &dmabuf_exp_ops;
387	exp_info.size = args->count << PAGE_SHIFT;
388	exp_info.flags = O_RDWR;
389	exp_info.priv = gntdev_dmabuf;
390
391	gntdev_dmabuf->dmabuf = dma_buf_export(&exp_info);
392	if (IS_ERR(gntdev_dmabuf->dmabuf)) {
393		ret = PTR_ERR(gntdev_dmabuf->dmabuf);
394		gntdev_dmabuf->dmabuf = NULL;
395		goto fail;
396	}
397
398	ret = dma_buf_fd(gntdev_dmabuf->dmabuf, O_CLOEXEC);
399	if (ret < 0)
400		goto fail;
401
402	gntdev_dmabuf->fd = ret;
403	args->fd = ret;
404
405	pr_debug("Exporting DMA buffer with fd %d\n", ret);
406
407	mutex_lock(&args->dmabuf_priv->lock);
408	list_add(&gntdev_dmabuf->next, &args->dmabuf_priv->exp_list);
409	mutex_unlock(&args->dmabuf_priv->lock);
410	get_file(gntdev_dmabuf->priv->filp);
411	return 0;
412
413fail:
414	if (gntdev_dmabuf->dmabuf)
415		dma_buf_put(gntdev_dmabuf->dmabuf);
416	kfree(gntdev_dmabuf);
417	return ret;
418}
419
420static struct gntdev_grant_map *
421dmabuf_exp_alloc_backing_storage(struct gntdev_priv *priv, int dmabuf_flags,
422				 int count)
423{
424	struct gntdev_grant_map *map;
425
426	if (unlikely(gntdev_test_page_count(count)))
427		return ERR_PTR(-EINVAL);
428
429	if ((dmabuf_flags & GNTDEV_DMA_FLAG_WC) &&
430	    (dmabuf_flags & GNTDEV_DMA_FLAG_COHERENT)) {
431		pr_debug("Wrong dma-buf flags: 0x%x\n", dmabuf_flags);
432		return ERR_PTR(-EINVAL);
433	}
434
435	map = gntdev_alloc_map(priv, count, dmabuf_flags);
436	if (!map)
437		return ERR_PTR(-ENOMEM);
438
 
 
 
 
 
439	return map;
440}
441
442static int dmabuf_exp_from_refs(struct gntdev_priv *priv, int flags,
443				int count, u32 domid, u32 *refs, u32 *fd)
444{
445	struct gntdev_grant_map *map;
446	struct gntdev_dmabuf_export_args args;
447	int i, ret;
448
449	map = dmabuf_exp_alloc_backing_storage(priv, flags, count);
450	if (IS_ERR(map))
451		return PTR_ERR(map);
452
453	for (i = 0; i < count; i++) {
454		map->grants[i].domid = domid;
455		map->grants[i].ref = refs[i];
456	}
457
458	mutex_lock(&priv->lock);
459	gntdev_add_map(priv, map);
460	mutex_unlock(&priv->lock);
461
462	map->flags |= GNTMAP_host_map;
463#if defined(CONFIG_X86)
464	map->flags |= GNTMAP_device_map;
465#endif
466
467	ret = gntdev_map_grant_pages(map);
468	if (ret < 0)
469		goto out;
470
471	args.priv = priv;
472	args.map = map;
473	args.dev = priv->dma_dev;
474	args.dmabuf_priv = priv->dmabuf_priv;
475	args.count = map->count;
476	args.pages = map->pages;
477	args.fd = -1; /* Shut up unnecessary gcc warning for i386 */
478
479	ret = dmabuf_exp_from_pages(&args);
480	if (ret < 0)
481		goto out;
482
483	*fd = args.fd;
484	return 0;
485
486out:
487	dmabuf_exp_remove_map(priv, map);
488	return ret;
489}
490
491/* DMA buffer import support. */
492
493static int
494dmabuf_imp_grant_foreign_access(struct page **pages, u32 *refs,
495				int count, int domid)
496{
497	grant_ref_t priv_gref_head;
498	int i, ret;
499
500	ret = gnttab_alloc_grant_references(count, &priv_gref_head);
501	if (ret < 0) {
502		pr_debug("Cannot allocate grant references, ret %d\n", ret);
503		return ret;
504	}
505
506	for (i = 0; i < count; i++) {
507		int cur_ref;
508
509		cur_ref = gnttab_claim_grant_reference(&priv_gref_head);
510		if (cur_ref < 0) {
511			ret = cur_ref;
512			pr_debug("Cannot claim grant reference, ret %d\n", ret);
513			goto out;
514		}
515
516		gnttab_grant_foreign_access_ref(cur_ref, domid,
517						xen_page_to_gfn(pages[i]), 0);
518		refs[i] = cur_ref;
519	}
520
521	return 0;
522
523out:
524	gnttab_free_grant_references(priv_gref_head);
525	return ret;
526}
527
528static void dmabuf_imp_end_foreign_access(u32 *refs, int count)
529{
530	int i;
531
532	for (i = 0; i < count; i++)
533		if (refs[i] != GRANT_INVALID_REF)
534			gnttab_end_foreign_access(refs[i], 0, 0UL);
535}
536
537static void dmabuf_imp_free_storage(struct gntdev_dmabuf *gntdev_dmabuf)
538{
539	kfree(gntdev_dmabuf->pages);
540	kfree(gntdev_dmabuf->u.imp.refs);
541	kfree(gntdev_dmabuf);
542}
543
544static struct gntdev_dmabuf *dmabuf_imp_alloc_storage(int count)
545{
546	struct gntdev_dmabuf *gntdev_dmabuf;
547	int i;
548
549	gntdev_dmabuf = kzalloc(sizeof(*gntdev_dmabuf), GFP_KERNEL);
550	if (!gntdev_dmabuf)
551		goto fail_no_free;
552
553	gntdev_dmabuf->u.imp.refs = kcalloc(count,
554					    sizeof(gntdev_dmabuf->u.imp.refs[0]),
555					    GFP_KERNEL);
556	if (!gntdev_dmabuf->u.imp.refs)
557		goto fail;
558
559	gntdev_dmabuf->pages = kcalloc(count,
560				       sizeof(gntdev_dmabuf->pages[0]),
561				       GFP_KERNEL);
562	if (!gntdev_dmabuf->pages)
563		goto fail;
564
565	gntdev_dmabuf->nr_pages = count;
566
567	for (i = 0; i < count; i++)
568		gntdev_dmabuf->u.imp.refs[i] = GRANT_INVALID_REF;
569
570	return gntdev_dmabuf;
571
572fail:
573	dmabuf_imp_free_storage(gntdev_dmabuf);
574fail_no_free:
575	return ERR_PTR(-ENOMEM);
576}
577
578static struct gntdev_dmabuf *
579dmabuf_imp_to_refs(struct gntdev_dmabuf_priv *priv, struct device *dev,
580		   int fd, int count, int domid)
581{
582	struct gntdev_dmabuf *gntdev_dmabuf, *ret;
583	struct dma_buf *dma_buf;
584	struct dma_buf_attachment *attach;
585	struct sg_table *sgt;
586	struct sg_page_iter sg_iter;
587	int i;
588
589	dma_buf = dma_buf_get(fd);
590	if (IS_ERR(dma_buf))
591		return ERR_CAST(dma_buf);
592
593	gntdev_dmabuf = dmabuf_imp_alloc_storage(count);
594	if (IS_ERR(gntdev_dmabuf)) {
595		ret = gntdev_dmabuf;
596		goto fail_put;
597	}
598
599	gntdev_dmabuf->priv = priv;
600	gntdev_dmabuf->fd = fd;
601
602	attach = dma_buf_attach(dma_buf, dev);
603	if (IS_ERR(attach)) {
604		ret = ERR_CAST(attach);
605		goto fail_free_obj;
606	}
607
608	gntdev_dmabuf->u.imp.attach = attach;
609
610	sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
611	if (IS_ERR(sgt)) {
612		ret = ERR_CAST(sgt);
613		goto fail_detach;
614	}
615
616	/* Check that we have zero offset. */
617	if (sgt->sgl->offset) {
618		ret = ERR_PTR(-EINVAL);
619		pr_debug("DMA buffer has %d bytes offset, user-space expects 0\n",
620			 sgt->sgl->offset);
621		goto fail_unmap;
622	}
623
624	/* Check number of pages that imported buffer has. */
625	if (attach->dmabuf->size != gntdev_dmabuf->nr_pages << PAGE_SHIFT) {
626		ret = ERR_PTR(-EINVAL);
627		pr_debug("DMA buffer has %zu pages, user-space expects %d\n",
628			 attach->dmabuf->size, gntdev_dmabuf->nr_pages);
629		goto fail_unmap;
630	}
631
632	gntdev_dmabuf->u.imp.sgt = sgt;
633
634	/* Now convert sgt to array of pages and check for page validity. */
635	i = 0;
636	for_each_sg_page(sgt->sgl, &sg_iter, sgt->nents, 0) {
637		struct page *page = sg_page_iter_page(&sg_iter);
638		/*
639		 * Check if page is valid: this can happen if we are given
640		 * a page from VRAM or other resources which are not backed
641		 * by a struct page.
642		 */
643		if (!pfn_valid(page_to_pfn(page))) {
644			ret = ERR_PTR(-EINVAL);
645			goto fail_unmap;
646		}
647
648		gntdev_dmabuf->pages[i++] = page;
649	}
650
651	ret = ERR_PTR(dmabuf_imp_grant_foreign_access(gntdev_dmabuf->pages,
652						      gntdev_dmabuf->u.imp.refs,
653						      count, domid));
654	if (IS_ERR(ret))
655		goto fail_end_access;
656
657	pr_debug("Imported DMA buffer with fd %d\n", fd);
658
659	mutex_lock(&priv->lock);
660	list_add(&gntdev_dmabuf->next, &priv->imp_list);
661	mutex_unlock(&priv->lock);
662
663	return gntdev_dmabuf;
664
665fail_end_access:
666	dmabuf_imp_end_foreign_access(gntdev_dmabuf->u.imp.refs, count);
667fail_unmap:
668	dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
669fail_detach:
670	dma_buf_detach(dma_buf, attach);
671fail_free_obj:
672	dmabuf_imp_free_storage(gntdev_dmabuf);
673fail_put:
674	dma_buf_put(dma_buf);
675	return ret;
676}
677
678/*
679 * Find the hyper dma-buf by its file descriptor and remove
680 * it from the buffer's list.
681 */
682static struct gntdev_dmabuf *
683dmabuf_imp_find_unlink(struct gntdev_dmabuf_priv *priv, int fd)
684{
685	struct gntdev_dmabuf *q, *gntdev_dmabuf, *ret = ERR_PTR(-ENOENT);
686
687	mutex_lock(&priv->lock);
688	list_for_each_entry_safe(gntdev_dmabuf, q, &priv->imp_list, next) {
689		if (gntdev_dmabuf->fd == fd) {
690			pr_debug("Found gntdev_dmabuf in the import list\n");
691			ret = gntdev_dmabuf;
692			list_del(&gntdev_dmabuf->next);
693			break;
694		}
695	}
696	mutex_unlock(&priv->lock);
697	return ret;
698}
699
700static int dmabuf_imp_release(struct gntdev_dmabuf_priv *priv, u32 fd)
701{
702	struct gntdev_dmabuf *gntdev_dmabuf;
703	struct dma_buf_attachment *attach;
704	struct dma_buf *dma_buf;
705
706	gntdev_dmabuf = dmabuf_imp_find_unlink(priv, fd);
707	if (IS_ERR(gntdev_dmabuf))
708		return PTR_ERR(gntdev_dmabuf);
709
710	pr_debug("Releasing DMA buffer with fd %d\n", fd);
711
712	dmabuf_imp_end_foreign_access(gntdev_dmabuf->u.imp.refs,
713				      gntdev_dmabuf->nr_pages);
714
715	attach = gntdev_dmabuf->u.imp.attach;
716
717	if (gntdev_dmabuf->u.imp.sgt)
718		dma_buf_unmap_attachment(attach, gntdev_dmabuf->u.imp.sgt,
719					 DMA_BIDIRECTIONAL);
720	dma_buf = attach->dmabuf;
721	dma_buf_detach(attach->dmabuf, attach);
722	dma_buf_put(dma_buf);
723
724	dmabuf_imp_free_storage(gntdev_dmabuf);
725	return 0;
726}
727
728static void dmabuf_imp_release_all(struct gntdev_dmabuf_priv *priv)
729{
730	struct gntdev_dmabuf *q, *gntdev_dmabuf;
731
732	list_for_each_entry_safe(gntdev_dmabuf, q, &priv->imp_list, next)
733		dmabuf_imp_release(priv, gntdev_dmabuf->fd);
734}
735
736/* DMA buffer IOCTL support. */
737
738long gntdev_ioctl_dmabuf_exp_from_refs(struct gntdev_priv *priv, int use_ptemod,
739				       struct ioctl_gntdev_dmabuf_exp_from_refs __user *u)
740{
741	struct ioctl_gntdev_dmabuf_exp_from_refs op;
742	u32 *refs;
743	long ret;
744
745	if (use_ptemod) {
746		pr_debug("Cannot provide dma-buf: use_ptemode %d\n",
747			 use_ptemod);
748		return -EINVAL;
749	}
750
751	if (copy_from_user(&op, u, sizeof(op)) != 0)
752		return -EFAULT;
753
754	if (unlikely(gntdev_test_page_count(op.count)))
755		return -EINVAL;
756
757	refs = kcalloc(op.count, sizeof(*refs), GFP_KERNEL);
758	if (!refs)
759		return -ENOMEM;
760
761	if (copy_from_user(refs, u->refs, sizeof(*refs) * op.count) != 0) {
762		ret = -EFAULT;
763		goto out;
764	}
765
766	ret = dmabuf_exp_from_refs(priv, op.flags, op.count,
767				   op.domid, refs, &op.fd);
768	if (ret)
769		goto out;
770
771	if (copy_to_user(u, &op, sizeof(op)) != 0)
772		ret = -EFAULT;
773
774out:
775	kfree(refs);
776	return ret;
777}
778
779long gntdev_ioctl_dmabuf_exp_wait_released(struct gntdev_priv *priv,
780					   struct ioctl_gntdev_dmabuf_exp_wait_released __user *u)
781{
782	struct ioctl_gntdev_dmabuf_exp_wait_released op;
783
784	if (copy_from_user(&op, u, sizeof(op)) != 0)
785		return -EFAULT;
786
787	return dmabuf_exp_wait_released(priv->dmabuf_priv, op.fd,
788					op.wait_to_ms);
789}
790
791long gntdev_ioctl_dmabuf_imp_to_refs(struct gntdev_priv *priv,
792				     struct ioctl_gntdev_dmabuf_imp_to_refs __user *u)
793{
794	struct ioctl_gntdev_dmabuf_imp_to_refs op;
795	struct gntdev_dmabuf *gntdev_dmabuf;
796	long ret;
797
798	if (copy_from_user(&op, u, sizeof(op)) != 0)
799		return -EFAULT;
800
801	if (unlikely(gntdev_test_page_count(op.count)))
802		return -EINVAL;
803
804	gntdev_dmabuf = dmabuf_imp_to_refs(priv->dmabuf_priv,
805					   priv->dma_dev, op.fd,
806					   op.count, op.domid);
807	if (IS_ERR(gntdev_dmabuf))
808		return PTR_ERR(gntdev_dmabuf);
809
810	if (copy_to_user(u->refs, gntdev_dmabuf->u.imp.refs,
811			 sizeof(*u->refs) * op.count) != 0) {
812		ret = -EFAULT;
813		goto out_release;
814	}
815	return 0;
816
817out_release:
818	dmabuf_imp_release(priv->dmabuf_priv, op.fd);
819	return ret;
820}
821
822long gntdev_ioctl_dmabuf_imp_release(struct gntdev_priv *priv,
823				     struct ioctl_gntdev_dmabuf_imp_release __user *u)
824{
825	struct ioctl_gntdev_dmabuf_imp_release op;
826
827	if (copy_from_user(&op, u, sizeof(op)) != 0)
828		return -EFAULT;
829
830	return dmabuf_imp_release(priv->dmabuf_priv, op.fd);
831}
832
833struct gntdev_dmabuf_priv *gntdev_dmabuf_init(struct file *filp)
834{
835	struct gntdev_dmabuf_priv *priv;
836
837	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
838	if (!priv)
839		return ERR_PTR(-ENOMEM);
840
841	mutex_init(&priv->lock);
842	INIT_LIST_HEAD(&priv->exp_list);
843	INIT_LIST_HEAD(&priv->exp_wait_list);
844	INIT_LIST_HEAD(&priv->imp_list);
845
846	priv->filp = filp;
847
848	return priv;
849}
850
851void gntdev_dmabuf_fini(struct gntdev_dmabuf_priv *priv)
852{
853	dmabuf_imp_release_all(priv);
854	kfree(priv);
855}
v5.4
  1// SPDX-License-Identifier: GPL-2.0
  2
  3/*
  4 * Xen dma-buf functionality for gntdev.
  5 *
  6 * DMA buffer implementation is based on drivers/gpu/drm/drm_prime.c.
  7 *
  8 * Copyright (c) 2018 Oleksandr Andrushchenko, EPAM Systems Inc.
  9 */
 10
 11#include <linux/kernel.h>
 12#include <linux/errno.h>
 13#include <linux/dma-buf.h>
 14#include <linux/slab.h>
 15#include <linux/types.h>
 16#include <linux/uaccess.h>
 17
 18#include <xen/xen.h>
 19#include <xen/grant_table.h>
 20
 21#include "gntdev-common.h"
 22#include "gntdev-dmabuf.h"
 23
 24#ifndef GRANT_INVALID_REF
 25/*
 26 * Note on usage of grant reference 0 as invalid grant reference:
 27 * grant reference 0 is valid, but never exposed to a driver,
 28 * because of the fact it is already in use/reserved by the PV console.
 29 */
 30#define GRANT_INVALID_REF	0
 31#endif
 32
 33struct gntdev_dmabuf {
 34	struct gntdev_dmabuf_priv *priv;
 35	struct dma_buf *dmabuf;
 36	struct list_head next;
 37	int fd;
 38
 39	union {
 40		struct {
 41			/* Exported buffers are reference counted. */
 42			struct kref refcount;
 43
 44			struct gntdev_priv *priv;
 45			struct gntdev_grant_map *map;
 46		} exp;
 47		struct {
 48			/* Granted references of the imported buffer. */
 49			grant_ref_t *refs;
 50			/* Scatter-gather table of the imported buffer. */
 51			struct sg_table *sgt;
 52			/* dma-buf attachment of the imported buffer. */
 53			struct dma_buf_attachment *attach;
 54		} imp;
 55	} u;
 56
 57	/* Number of pages this buffer has. */
 58	int nr_pages;
 59	/* Pages of this buffer. */
 60	struct page **pages;
 61};
 62
 63struct gntdev_dmabuf_wait_obj {
 64	struct list_head next;
 65	struct gntdev_dmabuf *gntdev_dmabuf;
 66	struct completion completion;
 67};
 68
 69struct gntdev_dmabuf_attachment {
 70	struct sg_table *sgt;
 71	enum dma_data_direction dir;
 72};
 73
 74struct gntdev_dmabuf_priv {
 75	/* List of exported DMA buffers. */
 76	struct list_head exp_list;
 77	/* List of wait objects. */
 78	struct list_head exp_wait_list;
 79	/* List of imported DMA buffers. */
 80	struct list_head imp_list;
 81	/* This is the lock which protects dma_buf_xxx lists. */
 82	struct mutex lock;
 83	/*
 84	 * We reference this file while exporting dma-bufs, so
 85	 * the grant device context is not destroyed while there are
 86	 * external users alive.
 87	 */
 88	struct file *filp;
 89};
 90
 91/* DMA buffer export support. */
 92
 93/* Implementation of wait for exported DMA buffer to be released. */
 94
 95static void dmabuf_exp_release(struct kref *kref);
 96
 97static struct gntdev_dmabuf_wait_obj *
 98dmabuf_exp_wait_obj_new(struct gntdev_dmabuf_priv *priv,
 99			struct gntdev_dmabuf *gntdev_dmabuf)
100{
101	struct gntdev_dmabuf_wait_obj *obj;
102
103	obj = kzalloc(sizeof(*obj), GFP_KERNEL);
104	if (!obj)
105		return ERR_PTR(-ENOMEM);
106
107	init_completion(&obj->completion);
108	obj->gntdev_dmabuf = gntdev_dmabuf;
109
110	mutex_lock(&priv->lock);
111	list_add(&obj->next, &priv->exp_wait_list);
112	/* Put our reference and wait for gntdev_dmabuf's release to fire. */
113	kref_put(&gntdev_dmabuf->u.exp.refcount, dmabuf_exp_release);
114	mutex_unlock(&priv->lock);
115	return obj;
116}
117
118static void dmabuf_exp_wait_obj_free(struct gntdev_dmabuf_priv *priv,
119				     struct gntdev_dmabuf_wait_obj *obj)
120{
121	mutex_lock(&priv->lock);
122	list_del(&obj->next);
123	mutex_unlock(&priv->lock);
124	kfree(obj);
125}
126
127static int dmabuf_exp_wait_obj_wait(struct gntdev_dmabuf_wait_obj *obj,
128				    u32 wait_to_ms)
129{
130	if (wait_for_completion_timeout(&obj->completion,
131			msecs_to_jiffies(wait_to_ms)) <= 0)
132		return -ETIMEDOUT;
133
134	return 0;
135}
136
137static void dmabuf_exp_wait_obj_signal(struct gntdev_dmabuf_priv *priv,
138				       struct gntdev_dmabuf *gntdev_dmabuf)
139{
140	struct gntdev_dmabuf_wait_obj *obj;
141
142	list_for_each_entry(obj, &priv->exp_wait_list, next)
143		if (obj->gntdev_dmabuf == gntdev_dmabuf) {
144			pr_debug("Found gntdev_dmabuf in the wait list, wake\n");
145			complete_all(&obj->completion);
146			break;
147		}
148}
149
150static struct gntdev_dmabuf *
151dmabuf_exp_wait_obj_get_dmabuf(struct gntdev_dmabuf_priv *priv, int fd)
152{
153	struct gntdev_dmabuf *gntdev_dmabuf, *ret = ERR_PTR(-ENOENT);
154
155	mutex_lock(&priv->lock);
156	list_for_each_entry(gntdev_dmabuf, &priv->exp_list, next)
157		if (gntdev_dmabuf->fd == fd) {
158			pr_debug("Found gntdev_dmabuf in the wait list\n");
159			kref_get(&gntdev_dmabuf->u.exp.refcount);
160			ret = gntdev_dmabuf;
161			break;
162		}
163	mutex_unlock(&priv->lock);
164	return ret;
165}
166
167static int dmabuf_exp_wait_released(struct gntdev_dmabuf_priv *priv, int fd,
168				    int wait_to_ms)
169{
170	struct gntdev_dmabuf *gntdev_dmabuf;
171	struct gntdev_dmabuf_wait_obj *obj;
172	int ret;
173
174	pr_debug("Will wait for dma-buf with fd %d\n", fd);
175	/*
176	 * Try to find the DMA buffer: if not found means that
177	 * either the buffer has already been released or file descriptor
178	 * provided is wrong.
179	 */
180	gntdev_dmabuf = dmabuf_exp_wait_obj_get_dmabuf(priv, fd);
181	if (IS_ERR(gntdev_dmabuf))
182		return PTR_ERR(gntdev_dmabuf);
183
184	/*
185	 * gntdev_dmabuf still exists and is reference count locked by us now,
186	 * so prepare to wait: allocate wait object and add it to the wait list,
187	 * so we can find it on release.
188	 */
189	obj = dmabuf_exp_wait_obj_new(priv, gntdev_dmabuf);
190	if (IS_ERR(obj))
191		return PTR_ERR(obj);
192
193	ret = dmabuf_exp_wait_obj_wait(obj, wait_to_ms);
194	dmabuf_exp_wait_obj_free(priv, obj);
195	return ret;
196}
197
198/* DMA buffer export support. */
199
200static struct sg_table *
201dmabuf_pages_to_sgt(struct page **pages, unsigned int nr_pages)
202{
203	struct sg_table *sgt;
204	int ret;
205
206	sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
207	if (!sgt) {
208		ret = -ENOMEM;
209		goto out;
210	}
211
212	ret = sg_alloc_table_from_pages(sgt, pages, nr_pages, 0,
213					nr_pages << PAGE_SHIFT,
214					GFP_KERNEL);
215	if (ret)
216		goto out;
217
218	return sgt;
219
220out:
221	kfree(sgt);
222	return ERR_PTR(ret);
223}
224
225static int dmabuf_exp_ops_attach(struct dma_buf *dma_buf,
226				 struct dma_buf_attachment *attach)
227{
228	struct gntdev_dmabuf_attachment *gntdev_dmabuf_attach;
229
230	gntdev_dmabuf_attach = kzalloc(sizeof(*gntdev_dmabuf_attach),
231				       GFP_KERNEL);
232	if (!gntdev_dmabuf_attach)
233		return -ENOMEM;
234
235	gntdev_dmabuf_attach->dir = DMA_NONE;
236	attach->priv = gntdev_dmabuf_attach;
237	return 0;
238}
239
240static void dmabuf_exp_ops_detach(struct dma_buf *dma_buf,
241				  struct dma_buf_attachment *attach)
242{
243	struct gntdev_dmabuf_attachment *gntdev_dmabuf_attach = attach->priv;
244
245	if (gntdev_dmabuf_attach) {
246		struct sg_table *sgt = gntdev_dmabuf_attach->sgt;
247
248		if (sgt) {
249			if (gntdev_dmabuf_attach->dir != DMA_NONE)
250				dma_unmap_sg_attrs(attach->dev, sgt->sgl,
251						   sgt->nents,
252						   gntdev_dmabuf_attach->dir,
253						   DMA_ATTR_SKIP_CPU_SYNC);
254			sg_free_table(sgt);
255		}
256
257		kfree(sgt);
258		kfree(gntdev_dmabuf_attach);
259		attach->priv = NULL;
260	}
261}
262
263static struct sg_table *
264dmabuf_exp_ops_map_dma_buf(struct dma_buf_attachment *attach,
265			   enum dma_data_direction dir)
266{
267	struct gntdev_dmabuf_attachment *gntdev_dmabuf_attach = attach->priv;
268	struct gntdev_dmabuf *gntdev_dmabuf = attach->dmabuf->priv;
269	struct sg_table *sgt;
270
271	pr_debug("Mapping %d pages for dev %p\n", gntdev_dmabuf->nr_pages,
272		 attach->dev);
273
274	if (dir == DMA_NONE || !gntdev_dmabuf_attach)
275		return ERR_PTR(-EINVAL);
276
277	/* Return the cached mapping when possible. */
278	if (gntdev_dmabuf_attach->dir == dir)
279		return gntdev_dmabuf_attach->sgt;
280
281	/*
282	 * Two mappings with different directions for the same attachment are
283	 * not allowed.
284	 */
285	if (gntdev_dmabuf_attach->dir != DMA_NONE)
286		return ERR_PTR(-EBUSY);
287
288	sgt = dmabuf_pages_to_sgt(gntdev_dmabuf->pages,
289				  gntdev_dmabuf->nr_pages);
290	if (!IS_ERR(sgt)) {
291		if (!dma_map_sg_attrs(attach->dev, sgt->sgl, sgt->nents, dir,
292				      DMA_ATTR_SKIP_CPU_SYNC)) {
293			sg_free_table(sgt);
294			kfree(sgt);
295			sgt = ERR_PTR(-ENOMEM);
296		} else {
297			gntdev_dmabuf_attach->sgt = sgt;
298			gntdev_dmabuf_attach->dir = dir;
299		}
300	}
301	if (IS_ERR(sgt))
302		pr_debug("Failed to map sg table for dev %p\n", attach->dev);
303	return sgt;
304}
305
306static void dmabuf_exp_ops_unmap_dma_buf(struct dma_buf_attachment *attach,
307					 struct sg_table *sgt,
308					 enum dma_data_direction dir)
309{
310	/* Not implemented. The unmap is done at dmabuf_exp_ops_detach(). */
311}
312
313static void dmabuf_exp_release(struct kref *kref)
314{
315	struct gntdev_dmabuf *gntdev_dmabuf =
316		container_of(kref, struct gntdev_dmabuf, u.exp.refcount);
317
318	dmabuf_exp_wait_obj_signal(gntdev_dmabuf->priv, gntdev_dmabuf);
319	list_del(&gntdev_dmabuf->next);
320	fput(gntdev_dmabuf->priv->filp);
321	kfree(gntdev_dmabuf);
322}
323
324static void dmabuf_exp_remove_map(struct gntdev_priv *priv,
325				  struct gntdev_grant_map *map)
326{
327	mutex_lock(&priv->lock);
328	list_del(&map->next);
329	gntdev_put_map(NULL /* already removed */, map);
330	mutex_unlock(&priv->lock);
331}
332
333static void dmabuf_exp_ops_release(struct dma_buf *dma_buf)
334{
335	struct gntdev_dmabuf *gntdev_dmabuf = dma_buf->priv;
336	struct gntdev_dmabuf_priv *priv = gntdev_dmabuf->priv;
337
338	dmabuf_exp_remove_map(gntdev_dmabuf->u.exp.priv,
339			      gntdev_dmabuf->u.exp.map);
340	mutex_lock(&priv->lock);
341	kref_put(&gntdev_dmabuf->u.exp.refcount, dmabuf_exp_release);
342	mutex_unlock(&priv->lock);
343}
344
345static void *dmabuf_exp_ops_kmap(struct dma_buf *dma_buf,
346				 unsigned long page_num)
347{
348	/* Not implemented. */
349	return NULL;
350}
351
352static void dmabuf_exp_ops_kunmap(struct dma_buf *dma_buf,
353				  unsigned long page_num, void *addr)
354{
355	/* Not implemented. */
356}
357
358static int dmabuf_exp_ops_mmap(struct dma_buf *dma_buf,
359			       struct vm_area_struct *vma)
360{
361	/* Not implemented. */
362	return 0;
363}
364
365static const struct dma_buf_ops dmabuf_exp_ops =  {
366	.attach = dmabuf_exp_ops_attach,
367	.detach = dmabuf_exp_ops_detach,
368	.map_dma_buf = dmabuf_exp_ops_map_dma_buf,
369	.unmap_dma_buf = dmabuf_exp_ops_unmap_dma_buf,
370	.release = dmabuf_exp_ops_release,
371	.map = dmabuf_exp_ops_kmap,
372	.unmap = dmabuf_exp_ops_kunmap,
373	.mmap = dmabuf_exp_ops_mmap,
374};
375
376struct gntdev_dmabuf_export_args {
377	struct gntdev_priv *priv;
378	struct gntdev_grant_map *map;
379	struct gntdev_dmabuf_priv *dmabuf_priv;
380	struct device *dev;
381	int count;
382	struct page **pages;
383	u32 fd;
384};
385
386static int dmabuf_exp_from_pages(struct gntdev_dmabuf_export_args *args)
387{
388	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
389	struct gntdev_dmabuf *gntdev_dmabuf;
390	int ret;
391
392	gntdev_dmabuf = kzalloc(sizeof(*gntdev_dmabuf), GFP_KERNEL);
393	if (!gntdev_dmabuf)
394		return -ENOMEM;
395
396	kref_init(&gntdev_dmabuf->u.exp.refcount);
397
398	gntdev_dmabuf->priv = args->dmabuf_priv;
399	gntdev_dmabuf->nr_pages = args->count;
400	gntdev_dmabuf->pages = args->pages;
401	gntdev_dmabuf->u.exp.priv = args->priv;
402	gntdev_dmabuf->u.exp.map = args->map;
403
404	exp_info.exp_name = KBUILD_MODNAME;
405	if (args->dev->driver && args->dev->driver->owner)
406		exp_info.owner = args->dev->driver->owner;
407	else
408		exp_info.owner = THIS_MODULE;
409	exp_info.ops = &dmabuf_exp_ops;
410	exp_info.size = args->count << PAGE_SHIFT;
411	exp_info.flags = O_RDWR;
412	exp_info.priv = gntdev_dmabuf;
413
414	gntdev_dmabuf->dmabuf = dma_buf_export(&exp_info);
415	if (IS_ERR(gntdev_dmabuf->dmabuf)) {
416		ret = PTR_ERR(gntdev_dmabuf->dmabuf);
417		gntdev_dmabuf->dmabuf = NULL;
418		goto fail;
419	}
420
421	ret = dma_buf_fd(gntdev_dmabuf->dmabuf, O_CLOEXEC);
422	if (ret < 0)
423		goto fail;
424
425	gntdev_dmabuf->fd = ret;
426	args->fd = ret;
427
428	pr_debug("Exporting DMA buffer with fd %d\n", ret);
429
430	mutex_lock(&args->dmabuf_priv->lock);
431	list_add(&gntdev_dmabuf->next, &args->dmabuf_priv->exp_list);
432	mutex_unlock(&args->dmabuf_priv->lock);
433	get_file(gntdev_dmabuf->priv->filp);
434	return 0;
435
436fail:
437	if (gntdev_dmabuf->dmabuf)
438		dma_buf_put(gntdev_dmabuf->dmabuf);
439	kfree(gntdev_dmabuf);
440	return ret;
441}
442
443static struct gntdev_grant_map *
444dmabuf_exp_alloc_backing_storage(struct gntdev_priv *priv, int dmabuf_flags,
445				 int count)
446{
447	struct gntdev_grant_map *map;
448
449	if (unlikely(count <= 0))
450		return ERR_PTR(-EINVAL);
451
452	if ((dmabuf_flags & GNTDEV_DMA_FLAG_WC) &&
453	    (dmabuf_flags & GNTDEV_DMA_FLAG_COHERENT)) {
454		pr_debug("Wrong dma-buf flags: 0x%x\n", dmabuf_flags);
455		return ERR_PTR(-EINVAL);
456	}
457
458	map = gntdev_alloc_map(priv, count, dmabuf_flags);
459	if (!map)
460		return ERR_PTR(-ENOMEM);
461
462	if (unlikely(gntdev_account_mapped_pages(count))) {
463		pr_debug("can't map %d pages: over limit\n", count);
464		gntdev_put_map(NULL, map);
465		return ERR_PTR(-ENOMEM);
466	}
467	return map;
468}
469
470static int dmabuf_exp_from_refs(struct gntdev_priv *priv, int flags,
471				int count, u32 domid, u32 *refs, u32 *fd)
472{
473	struct gntdev_grant_map *map;
474	struct gntdev_dmabuf_export_args args;
475	int i, ret;
476
477	map = dmabuf_exp_alloc_backing_storage(priv, flags, count);
478	if (IS_ERR(map))
479		return PTR_ERR(map);
480
481	for (i = 0; i < count; i++) {
482		map->grants[i].domid = domid;
483		map->grants[i].ref = refs[i];
484	}
485
486	mutex_lock(&priv->lock);
487	gntdev_add_map(priv, map);
488	mutex_unlock(&priv->lock);
489
490	map->flags |= GNTMAP_host_map;
491#if defined(CONFIG_X86)
492	map->flags |= GNTMAP_device_map;
493#endif
494
495	ret = gntdev_map_grant_pages(map);
496	if (ret < 0)
497		goto out;
498
499	args.priv = priv;
500	args.map = map;
501	args.dev = priv->dma_dev;
502	args.dmabuf_priv = priv->dmabuf_priv;
503	args.count = map->count;
504	args.pages = map->pages;
505	args.fd = -1; /* Shut up unnecessary gcc warning for i386 */
506
507	ret = dmabuf_exp_from_pages(&args);
508	if (ret < 0)
509		goto out;
510
511	*fd = args.fd;
512	return 0;
513
514out:
515	dmabuf_exp_remove_map(priv, map);
516	return ret;
517}
518
519/* DMA buffer import support. */
520
521static int
522dmabuf_imp_grant_foreign_access(struct page **pages, u32 *refs,
523				int count, int domid)
524{
525	grant_ref_t priv_gref_head;
526	int i, ret;
527
528	ret = gnttab_alloc_grant_references(count, &priv_gref_head);
529	if (ret < 0) {
530		pr_debug("Cannot allocate grant references, ret %d\n", ret);
531		return ret;
532	}
533
534	for (i = 0; i < count; i++) {
535		int cur_ref;
536
537		cur_ref = gnttab_claim_grant_reference(&priv_gref_head);
538		if (cur_ref < 0) {
539			ret = cur_ref;
540			pr_debug("Cannot claim grant reference, ret %d\n", ret);
541			goto out;
542		}
543
544		gnttab_grant_foreign_access_ref(cur_ref, domid,
545						xen_page_to_gfn(pages[i]), 0);
546		refs[i] = cur_ref;
547	}
548
549	return 0;
550
551out:
552	gnttab_free_grant_references(priv_gref_head);
553	return ret;
554}
555
556static void dmabuf_imp_end_foreign_access(u32 *refs, int count)
557{
558	int i;
559
560	for (i = 0; i < count; i++)
561		if (refs[i] != GRANT_INVALID_REF)
562			gnttab_end_foreign_access(refs[i], 0, 0UL);
563}
564
565static void dmabuf_imp_free_storage(struct gntdev_dmabuf *gntdev_dmabuf)
566{
567	kfree(gntdev_dmabuf->pages);
568	kfree(gntdev_dmabuf->u.imp.refs);
569	kfree(gntdev_dmabuf);
570}
571
572static struct gntdev_dmabuf *dmabuf_imp_alloc_storage(int count)
573{
574	struct gntdev_dmabuf *gntdev_dmabuf;
575	int i;
576
577	gntdev_dmabuf = kzalloc(sizeof(*gntdev_dmabuf), GFP_KERNEL);
578	if (!gntdev_dmabuf)
579		goto fail_no_free;
580
581	gntdev_dmabuf->u.imp.refs = kcalloc(count,
582					    sizeof(gntdev_dmabuf->u.imp.refs[0]),
583					    GFP_KERNEL);
584	if (!gntdev_dmabuf->u.imp.refs)
585		goto fail;
586
587	gntdev_dmabuf->pages = kcalloc(count,
588				       sizeof(gntdev_dmabuf->pages[0]),
589				       GFP_KERNEL);
590	if (!gntdev_dmabuf->pages)
591		goto fail;
592
593	gntdev_dmabuf->nr_pages = count;
594
595	for (i = 0; i < count; i++)
596		gntdev_dmabuf->u.imp.refs[i] = GRANT_INVALID_REF;
597
598	return gntdev_dmabuf;
599
600fail:
601	dmabuf_imp_free_storage(gntdev_dmabuf);
602fail_no_free:
603	return ERR_PTR(-ENOMEM);
604}
605
606static struct gntdev_dmabuf *
607dmabuf_imp_to_refs(struct gntdev_dmabuf_priv *priv, struct device *dev,
608		   int fd, int count, int domid)
609{
610	struct gntdev_dmabuf *gntdev_dmabuf, *ret;
611	struct dma_buf *dma_buf;
612	struct dma_buf_attachment *attach;
613	struct sg_table *sgt;
614	struct sg_page_iter sg_iter;
615	int i;
616
617	dma_buf = dma_buf_get(fd);
618	if (IS_ERR(dma_buf))
619		return ERR_CAST(dma_buf);
620
621	gntdev_dmabuf = dmabuf_imp_alloc_storage(count);
622	if (IS_ERR(gntdev_dmabuf)) {
623		ret = gntdev_dmabuf;
624		goto fail_put;
625	}
626
627	gntdev_dmabuf->priv = priv;
628	gntdev_dmabuf->fd = fd;
629
630	attach = dma_buf_attach(dma_buf, dev);
631	if (IS_ERR(attach)) {
632		ret = ERR_CAST(attach);
633		goto fail_free_obj;
634	}
635
636	gntdev_dmabuf->u.imp.attach = attach;
637
638	sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
639	if (IS_ERR(sgt)) {
640		ret = ERR_CAST(sgt);
641		goto fail_detach;
642	}
643
 
 
 
 
 
 
 
 
644	/* Check number of pages that imported buffer has. */
645	if (attach->dmabuf->size != gntdev_dmabuf->nr_pages << PAGE_SHIFT) {
646		ret = ERR_PTR(-EINVAL);
647		pr_debug("DMA buffer has %zu pages, user-space expects %d\n",
648			 attach->dmabuf->size, gntdev_dmabuf->nr_pages);
649		goto fail_unmap;
650	}
651
652	gntdev_dmabuf->u.imp.sgt = sgt;
653
654	/* Now convert sgt to array of pages and check for page validity. */
655	i = 0;
656	for_each_sg_page(sgt->sgl, &sg_iter, sgt->nents, 0) {
657		struct page *page = sg_page_iter_page(&sg_iter);
658		/*
659		 * Check if page is valid: this can happen if we are given
660		 * a page from VRAM or other resources which are not backed
661		 * by a struct page.
662		 */
663		if (!pfn_valid(page_to_pfn(page))) {
664			ret = ERR_PTR(-EINVAL);
665			goto fail_unmap;
666		}
667
668		gntdev_dmabuf->pages[i++] = page;
669	}
670
671	ret = ERR_PTR(dmabuf_imp_grant_foreign_access(gntdev_dmabuf->pages,
672						      gntdev_dmabuf->u.imp.refs,
673						      count, domid));
674	if (IS_ERR(ret))
675		goto fail_end_access;
676
677	pr_debug("Imported DMA buffer with fd %d\n", fd);
678
679	mutex_lock(&priv->lock);
680	list_add(&gntdev_dmabuf->next, &priv->imp_list);
681	mutex_unlock(&priv->lock);
682
683	return gntdev_dmabuf;
684
685fail_end_access:
686	dmabuf_imp_end_foreign_access(gntdev_dmabuf->u.imp.refs, count);
687fail_unmap:
688	dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
689fail_detach:
690	dma_buf_detach(dma_buf, attach);
691fail_free_obj:
692	dmabuf_imp_free_storage(gntdev_dmabuf);
693fail_put:
694	dma_buf_put(dma_buf);
695	return ret;
696}
697
698/*
699 * Find the hyper dma-buf by its file descriptor and remove
700 * it from the buffer's list.
701 */
702static struct gntdev_dmabuf *
703dmabuf_imp_find_unlink(struct gntdev_dmabuf_priv *priv, int fd)
704{
705	struct gntdev_dmabuf *q, *gntdev_dmabuf, *ret = ERR_PTR(-ENOENT);
706
707	mutex_lock(&priv->lock);
708	list_for_each_entry_safe(gntdev_dmabuf, q, &priv->imp_list, next) {
709		if (gntdev_dmabuf->fd == fd) {
710			pr_debug("Found gntdev_dmabuf in the import list\n");
711			ret = gntdev_dmabuf;
712			list_del(&gntdev_dmabuf->next);
713			break;
714		}
715	}
716	mutex_unlock(&priv->lock);
717	return ret;
718}
719
720static int dmabuf_imp_release(struct gntdev_dmabuf_priv *priv, u32 fd)
721{
722	struct gntdev_dmabuf *gntdev_dmabuf;
723	struct dma_buf_attachment *attach;
724	struct dma_buf *dma_buf;
725
726	gntdev_dmabuf = dmabuf_imp_find_unlink(priv, fd);
727	if (IS_ERR(gntdev_dmabuf))
728		return PTR_ERR(gntdev_dmabuf);
729
730	pr_debug("Releasing DMA buffer with fd %d\n", fd);
731
732	dmabuf_imp_end_foreign_access(gntdev_dmabuf->u.imp.refs,
733				      gntdev_dmabuf->nr_pages);
734
735	attach = gntdev_dmabuf->u.imp.attach;
736
737	if (gntdev_dmabuf->u.imp.sgt)
738		dma_buf_unmap_attachment(attach, gntdev_dmabuf->u.imp.sgt,
739					 DMA_BIDIRECTIONAL);
740	dma_buf = attach->dmabuf;
741	dma_buf_detach(attach->dmabuf, attach);
742	dma_buf_put(dma_buf);
743
744	dmabuf_imp_free_storage(gntdev_dmabuf);
745	return 0;
746}
747
748static void dmabuf_imp_release_all(struct gntdev_dmabuf_priv *priv)
749{
750	struct gntdev_dmabuf *q, *gntdev_dmabuf;
751
752	list_for_each_entry_safe(gntdev_dmabuf, q, &priv->imp_list, next)
753		dmabuf_imp_release(priv, gntdev_dmabuf->fd);
754}
755
756/* DMA buffer IOCTL support. */
757
758long gntdev_ioctl_dmabuf_exp_from_refs(struct gntdev_priv *priv, int use_ptemod,
759				       struct ioctl_gntdev_dmabuf_exp_from_refs __user *u)
760{
761	struct ioctl_gntdev_dmabuf_exp_from_refs op;
762	u32 *refs;
763	long ret;
764
765	if (use_ptemod) {
766		pr_debug("Cannot provide dma-buf: use_ptemode %d\n",
767			 use_ptemod);
768		return -EINVAL;
769	}
770
771	if (copy_from_user(&op, u, sizeof(op)) != 0)
772		return -EFAULT;
773
774	if (unlikely(op.count <= 0))
775		return -EINVAL;
776
777	refs = kcalloc(op.count, sizeof(*refs), GFP_KERNEL);
778	if (!refs)
779		return -ENOMEM;
780
781	if (copy_from_user(refs, u->refs, sizeof(*refs) * op.count) != 0) {
782		ret = -EFAULT;
783		goto out;
784	}
785
786	ret = dmabuf_exp_from_refs(priv, op.flags, op.count,
787				   op.domid, refs, &op.fd);
788	if (ret)
789		goto out;
790
791	if (copy_to_user(u, &op, sizeof(op)) != 0)
792		ret = -EFAULT;
793
794out:
795	kfree(refs);
796	return ret;
797}
798
799long gntdev_ioctl_dmabuf_exp_wait_released(struct gntdev_priv *priv,
800					   struct ioctl_gntdev_dmabuf_exp_wait_released __user *u)
801{
802	struct ioctl_gntdev_dmabuf_exp_wait_released op;
803
804	if (copy_from_user(&op, u, sizeof(op)) != 0)
805		return -EFAULT;
806
807	return dmabuf_exp_wait_released(priv->dmabuf_priv, op.fd,
808					op.wait_to_ms);
809}
810
811long gntdev_ioctl_dmabuf_imp_to_refs(struct gntdev_priv *priv,
812				     struct ioctl_gntdev_dmabuf_imp_to_refs __user *u)
813{
814	struct ioctl_gntdev_dmabuf_imp_to_refs op;
815	struct gntdev_dmabuf *gntdev_dmabuf;
816	long ret;
817
818	if (copy_from_user(&op, u, sizeof(op)) != 0)
819		return -EFAULT;
820
821	if (unlikely(op.count <= 0))
822		return -EINVAL;
823
824	gntdev_dmabuf = dmabuf_imp_to_refs(priv->dmabuf_priv,
825					   priv->dma_dev, op.fd,
826					   op.count, op.domid);
827	if (IS_ERR(gntdev_dmabuf))
828		return PTR_ERR(gntdev_dmabuf);
829
830	if (copy_to_user(u->refs, gntdev_dmabuf->u.imp.refs,
831			 sizeof(*u->refs) * op.count) != 0) {
832		ret = -EFAULT;
833		goto out_release;
834	}
835	return 0;
836
837out_release:
838	dmabuf_imp_release(priv->dmabuf_priv, op.fd);
839	return ret;
840}
841
842long gntdev_ioctl_dmabuf_imp_release(struct gntdev_priv *priv,
843				     struct ioctl_gntdev_dmabuf_imp_release __user *u)
844{
845	struct ioctl_gntdev_dmabuf_imp_release op;
846
847	if (copy_from_user(&op, u, sizeof(op)) != 0)
848		return -EFAULT;
849
850	return dmabuf_imp_release(priv->dmabuf_priv, op.fd);
851}
852
853struct gntdev_dmabuf_priv *gntdev_dmabuf_init(struct file *filp)
854{
855	struct gntdev_dmabuf_priv *priv;
856
857	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
858	if (!priv)
859		return ERR_PTR(-ENOMEM);
860
861	mutex_init(&priv->lock);
862	INIT_LIST_HEAD(&priv->exp_list);
863	INIT_LIST_HEAD(&priv->exp_wait_list);
864	INIT_LIST_HEAD(&priv->imp_list);
865
866	priv->filp = filp;
867
868	return priv;
869}
870
871void gntdev_dmabuf_fini(struct gntdev_dmabuf_priv *priv)
872{
873	dmabuf_imp_release_all(priv);
874	kfree(priv);
875}