Linux Audio

Check our new training course

Loading...
v6.8
   1/******************************************************************************
   2 * gntdev.c
   3 *
   4 * Device for accessing (in user-space) pages that have been granted by other
   5 * domains.
   6 *
   7 * Copyright (c) 2006-2007, D G Murray.
   8 *           (c) 2009 Gerd Hoffmann <kraxel@redhat.com>
   9 *           (c) 2018 Oleksandr Andrushchenko, EPAM Systems Inc.
  10 *
  11 * This program is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  14 * GNU General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU General Public License
  17 * along with this program; if not, write to the Free Software
  18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  19 */
  20
  21#undef DEBUG
  22
  23#define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
  24
  25#include <linux/dma-mapping.h>
  26#include <linux/module.h>
  27#include <linux/kernel.h>
  28#include <linux/init.h>
  29#include <linux/miscdevice.h>
  30#include <linux/fs.h>
  31#include <linux/uaccess.h>
  32#include <linux/sched.h>
  33#include <linux/sched/mm.h>
  34#include <linux/spinlock.h>
  35#include <linux/slab.h>
  36#include <linux/highmem.h>
  37#include <linux/refcount.h>
  38#include <linux/workqueue.h>
  39
  40#include <xen/xen.h>
  41#include <xen/grant_table.h>
  42#include <xen/balloon.h>
  43#include <xen/gntdev.h>
  44#include <xen/events.h>
  45#include <xen/page.h>
  46#include <asm/xen/hypervisor.h>
  47#include <asm/xen/hypercall.h>
  48
  49#include "gntdev-common.h"
  50#ifdef CONFIG_XEN_GNTDEV_DMABUF
  51#include "gntdev-dmabuf.h"
  52#endif
  53
  54MODULE_LICENSE("GPL");
  55MODULE_AUTHOR("Derek G. Murray <Derek.Murray@cl.cam.ac.uk>, "
  56	      "Gerd Hoffmann <kraxel@redhat.com>");
  57MODULE_DESCRIPTION("User-space granted page access driver");
  58
  59static unsigned int limit = 64*1024;
  60module_param(limit, uint, 0644);
  61MODULE_PARM_DESC(limit,
  62	"Maximum number of grants that may be mapped by one mapping request");
  63
  64/* True in PV mode, false otherwise */
  65static int use_ptemod;
  66
  67static void unmap_grant_pages(struct gntdev_grant_map *map,
  68			      int offset, int pages);
  69
  70static struct miscdevice gntdev_miscdev;
  71
  72/* ------------------------------------------------------------------ */
  73
  74bool gntdev_test_page_count(unsigned int count)
  75{
  76	return !count || count > limit;
  77}
  78
  79static void gntdev_print_maps(struct gntdev_priv *priv,
  80			      char *text, int text_index)
  81{
  82#ifdef DEBUG
  83	struct gntdev_grant_map *map;
  84
  85	pr_debug("%s: maps list (priv %p)\n", __func__, priv);
  86	list_for_each_entry(map, &priv->maps, next)
  87		pr_debug("  index %2d, count %2d %s\n",
  88		       map->index, map->count,
  89		       map->index == text_index && text ? text : "");
  90#endif
  91}
  92
  93static void gntdev_free_map(struct gntdev_grant_map *map)
  94{
  95	if (map == NULL)
  96		return;
  97
  98#ifdef CONFIG_XEN_GRANT_DMA_ALLOC
  99	if (map->dma_vaddr) {
 100		struct gnttab_dma_alloc_args args;
 101
 102		args.dev = map->dma_dev;
 103		args.coherent = !!(map->dma_flags & GNTDEV_DMA_FLAG_COHERENT);
 104		args.nr_pages = map->count;
 105		args.pages = map->pages;
 106		args.frames = map->frames;
 107		args.vaddr = map->dma_vaddr;
 108		args.dev_bus_addr = map->dma_bus_addr;
 109
 110		gnttab_dma_free_pages(&args);
 111	} else
 112#endif
 113	if (map->pages)
 114		gnttab_free_pages(map->count, map->pages);
 115
 116#ifdef CONFIG_XEN_GRANT_DMA_ALLOC
 117	kvfree(map->frames);
 118#endif
 119	kvfree(map->pages);
 120	kvfree(map->grants);
 121	kvfree(map->map_ops);
 122	kvfree(map->unmap_ops);
 123	kvfree(map->kmap_ops);
 124	kvfree(map->kunmap_ops);
 125	kvfree(map->being_removed);
 126	kfree(map);
 127}
 128
 129struct gntdev_grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count,
 130					  int dma_flags)
 131{
 132	struct gntdev_grant_map *add;
 133	int i;
 134
 135	add = kzalloc(sizeof(*add), GFP_KERNEL);
 136	if (NULL == add)
 137		return NULL;
 138
 139	add->grants    = kvmalloc_array(count, sizeof(add->grants[0]),
 140					GFP_KERNEL);
 141	add->map_ops   = kvmalloc_array(count, sizeof(add->map_ops[0]),
 142					GFP_KERNEL);
 143	add->unmap_ops = kvmalloc_array(count, sizeof(add->unmap_ops[0]),
 144					GFP_KERNEL);
 145	add->pages     = kvcalloc(count, sizeof(add->pages[0]), GFP_KERNEL);
 146	add->being_removed =
 147		kvcalloc(count, sizeof(add->being_removed[0]), GFP_KERNEL);
 148	if (NULL == add->grants    ||
 149	    NULL == add->map_ops   ||
 150	    NULL == add->unmap_ops ||
 151	    NULL == add->pages     ||
 152	    NULL == add->being_removed)
 153		goto err;
 154	if (use_ptemod) {
 155		add->kmap_ops   = kvmalloc_array(count, sizeof(add->kmap_ops[0]),
 156						 GFP_KERNEL);
 157		add->kunmap_ops = kvmalloc_array(count, sizeof(add->kunmap_ops[0]),
 158						 GFP_KERNEL);
 159		if (NULL == add->kmap_ops || NULL == add->kunmap_ops)
 160			goto err;
 161	}
 162
 163#ifdef CONFIG_XEN_GRANT_DMA_ALLOC
 164	add->dma_flags = dma_flags;
 165
 166	/*
 167	 * Check if this mapping is requested to be backed
 168	 * by a DMA buffer.
 169	 */
 170	if (dma_flags & (GNTDEV_DMA_FLAG_WC | GNTDEV_DMA_FLAG_COHERENT)) {
 171		struct gnttab_dma_alloc_args args;
 172
 173		add->frames = kvcalloc(count, sizeof(add->frames[0]),
 174				       GFP_KERNEL);
 175		if (!add->frames)
 176			goto err;
 177
 178		/* Remember the device, so we can free DMA memory. */
 179		add->dma_dev = priv->dma_dev;
 180
 181		args.dev = priv->dma_dev;
 182		args.coherent = !!(dma_flags & GNTDEV_DMA_FLAG_COHERENT);
 183		args.nr_pages = count;
 184		args.pages = add->pages;
 185		args.frames = add->frames;
 186
 187		if (gnttab_dma_alloc_pages(&args))
 188			goto err;
 189
 190		add->dma_vaddr = args.vaddr;
 191		add->dma_bus_addr = args.dev_bus_addr;
 192	} else
 193#endif
 194	if (gnttab_alloc_pages(count, add->pages))
 195		goto err;
 196
 197	for (i = 0; i < count; i++) {
 198		add->grants[i].domid = DOMID_INVALID;
 199		add->grants[i].ref = INVALID_GRANT_REF;
 200		add->map_ops[i].handle = INVALID_GRANT_HANDLE;
 201		add->unmap_ops[i].handle = INVALID_GRANT_HANDLE;
 202		if (use_ptemod) {
 203			add->kmap_ops[i].handle = INVALID_GRANT_HANDLE;
 204			add->kunmap_ops[i].handle = INVALID_GRANT_HANDLE;
 205		}
 206	}
 207
 208	add->index = 0;
 209	add->count = count;
 210	refcount_set(&add->users, 1);
 211
 212	return add;
 213
 214err:
 215	gntdev_free_map(add);
 216	return NULL;
 217}
 218
 219void gntdev_add_map(struct gntdev_priv *priv, struct gntdev_grant_map *add)
 220{
 221	struct gntdev_grant_map *map;
 222
 223	list_for_each_entry(map, &priv->maps, next) {
 224		if (add->index + add->count < map->index) {
 225			list_add_tail(&add->next, &map->next);
 226			goto done;
 227		}
 228		add->index = map->index + map->count;
 229	}
 230	list_add_tail(&add->next, &priv->maps);
 231
 232done:
 233	gntdev_print_maps(priv, "[new]", add->index);
 234}
 235
 236static struct gntdev_grant_map *gntdev_find_map_index(struct gntdev_priv *priv,
 237						      int index, int count)
 238{
 239	struct gntdev_grant_map *map;
 240
 241	list_for_each_entry(map, &priv->maps, next) {
 242		if (map->index != index)
 243			continue;
 244		if (count && map->count != count)
 245			continue;
 246		return map;
 247	}
 248	return NULL;
 249}
 250
 251void gntdev_put_map(struct gntdev_priv *priv, struct gntdev_grant_map *map)
 252{
 253	if (!map)
 254		return;
 255
 256	if (!refcount_dec_and_test(&map->users))
 257		return;
 258
 259	if (map->pages && !use_ptemod) {
 260		/*
 261		 * Increment the reference count.  This ensures that the
 262		 * subsequent call to unmap_grant_pages() will not wind up
 263		 * re-entering itself.  It *can* wind up calling
 264		 * gntdev_put_map() recursively, but such calls will be with a
 265		 * reference count greater than 1, so they will return before
 266		 * this code is reached.  The recursion depth is thus limited to
 267		 * 1.  Do NOT use refcount_inc() here, as it will detect that
 268		 * the reference count is zero and WARN().
 269		 */
 270		refcount_set(&map->users, 1);
 271
 272		/*
 273		 * Unmap the grants.  This may or may not be asynchronous, so it
 274		 * is possible that the reference count is 1 on return, but it
 275		 * could also be greater than 1.
 276		 */
 277		unmap_grant_pages(map, 0, map->count);
 278
 279		/* Check if the memory now needs to be freed */
 280		if (!refcount_dec_and_test(&map->users))
 281			return;
 282
 283		/*
 284		 * All pages have been returned to the hypervisor, so free the
 285		 * map.
 286		 */
 287	}
 288
 289	if (use_ptemod && map->notifier_init)
 290		mmu_interval_notifier_remove(&map->notifier);
 291
 292	if (map->notify.flags & UNMAP_NOTIFY_SEND_EVENT) {
 293		notify_remote_via_evtchn(map->notify.event);
 294		evtchn_put(map->notify.event);
 295	}
 
 
 
 296	gntdev_free_map(map);
 297}
 298
 299/* ------------------------------------------------------------------ */
 300
 301static int find_grant_ptes(pte_t *pte, unsigned long addr, void *data)
 302{
 303	struct gntdev_grant_map *map = data;
 304	unsigned int pgnr = (addr - map->pages_vm_start) >> PAGE_SHIFT;
 305	int flags = map->flags | GNTMAP_application_map | GNTMAP_contains_pte |
 306		    (1 << _GNTMAP_guest_avail0);
 307	u64 pte_maddr;
 308
 309	BUG_ON(pgnr >= map->count);
 310	pte_maddr = arbitrary_virt_to_machine(pte).maddr;
 311
 
 
 
 
 
 
 
 
 312	gnttab_set_map_op(&map->map_ops[pgnr], pte_maddr, flags,
 313			  map->grants[pgnr].ref,
 314			  map->grants[pgnr].domid);
 315	gnttab_set_unmap_op(&map->unmap_ops[pgnr], pte_maddr, flags,
 316			    INVALID_GRANT_HANDLE);
 317	return 0;
 318}
 319
 
 
 
 
 
 
 
 
 320int gntdev_map_grant_pages(struct gntdev_grant_map *map)
 321{
 322	size_t alloced = 0;
 323	int i, err = 0;
 324
 325	if (!use_ptemod) {
 326		/* Note: it could already be mapped */
 327		if (map->map_ops[0].handle != INVALID_GRANT_HANDLE)
 328			return 0;
 329		for (i = 0; i < map->count; i++) {
 330			unsigned long addr = (unsigned long)
 331				pfn_to_kaddr(page_to_pfn(map->pages[i]));
 332			gnttab_set_map_op(&map->map_ops[i], addr, map->flags,
 333				map->grants[i].ref,
 334				map->grants[i].domid);
 335			gnttab_set_unmap_op(&map->unmap_ops[i], addr,
 336				map->flags, INVALID_GRANT_HANDLE);
 337		}
 338	} else {
 339		/*
 340		 * Setup the map_ops corresponding to the pte entries pointing
 341		 * to the kernel linear addresses of the struct pages.
 342		 * These ptes are completely different from the user ptes dealt
 343		 * with find_grant_ptes.
 344		 * Note that GNTMAP_device_map isn't needed here: The
 345		 * dev_bus_addr output field gets consumed only from ->map_ops,
 346		 * and by not requesting it when mapping we also avoid needing
 347		 * to mirror dev_bus_addr into ->unmap_ops (and holding an extra
 348		 * reference to the page in the hypervisor).
 349		 */
 350		unsigned int flags = (map->flags & ~GNTMAP_device_map) |
 351				     GNTMAP_host_map;
 352
 353		for (i = 0; i < map->count; i++) {
 354			unsigned long address = (unsigned long)
 355				pfn_to_kaddr(page_to_pfn(map->pages[i]));
 356			BUG_ON(PageHighMem(map->pages[i]));
 357
 358			gnttab_set_map_op(&map->kmap_ops[i], address, flags,
 359				map->grants[i].ref,
 360				map->grants[i].domid);
 361			gnttab_set_unmap_op(&map->kunmap_ops[i], address,
 362				flags, INVALID_GRANT_HANDLE);
 363		}
 364	}
 365
 366	pr_debug("map %d+%d\n", map->index, map->count);
 367	err = gnttab_map_refs(map->map_ops, map->kmap_ops, map->pages,
 368			map->count);
 369
 370	for (i = 0; i < map->count; i++) {
 371		if (map->map_ops[i].status == GNTST_okay) {
 372			map->unmap_ops[i].handle = map->map_ops[i].handle;
 373			alloced++;
 374		} else if (!err)
 375			err = -EINVAL;
 376
 377		if (map->flags & GNTMAP_device_map)
 378			map->unmap_ops[i].dev_bus_addr = map->map_ops[i].dev_bus_addr;
 379
 380		if (use_ptemod) {
 381			if (map->kmap_ops[i].status == GNTST_okay) {
 382				alloced++;
 383				map->kunmap_ops[i].handle = map->kmap_ops[i].handle;
 384			} else if (!err)
 385				err = -EINVAL;
 386		}
 387	}
 388	atomic_add(alloced, &map->live_grants);
 389	return err;
 390}
 391
 392static void __unmap_grant_pages_done(int result,
 393		struct gntab_unmap_queue_data *data)
 394{
 395	unsigned int i;
 396	struct gntdev_grant_map *map = data->data;
 397	unsigned int offset = data->unmap_ops - map->unmap_ops;
 398	int successful_unmaps = 0;
 399	int live_grants;
 400
 401	for (i = 0; i < data->count; i++) {
 402		if (map->unmap_ops[offset + i].status == GNTST_okay &&
 403		    map->unmap_ops[offset + i].handle != INVALID_GRANT_HANDLE)
 404			successful_unmaps++;
 405
 406		WARN_ON(map->unmap_ops[offset + i].status != GNTST_okay &&
 407			map->unmap_ops[offset + i].handle != INVALID_GRANT_HANDLE);
 408		pr_debug("unmap handle=%d st=%d\n",
 409			map->unmap_ops[offset+i].handle,
 410			map->unmap_ops[offset+i].status);
 411		map->unmap_ops[offset+i].handle = INVALID_GRANT_HANDLE;
 412		if (use_ptemod) {
 413			if (map->kunmap_ops[offset + i].status == GNTST_okay &&
 414			    map->kunmap_ops[offset + i].handle != INVALID_GRANT_HANDLE)
 415				successful_unmaps++;
 416
 417			WARN_ON(map->kunmap_ops[offset + i].status != GNTST_okay &&
 418				map->kunmap_ops[offset + i].handle != INVALID_GRANT_HANDLE);
 419			pr_debug("kunmap handle=%u st=%d\n",
 420				 map->kunmap_ops[offset+i].handle,
 421				 map->kunmap_ops[offset+i].status);
 422			map->kunmap_ops[offset+i].handle = INVALID_GRANT_HANDLE;
 423		}
 424	}
 425
 426	/*
 427	 * Decrease the live-grant counter.  This must happen after the loop to
 428	 * prevent premature reuse of the grants by gnttab_mmap().
 429	 */
 430	live_grants = atomic_sub_return(successful_unmaps, &map->live_grants);
 431	if (WARN_ON(live_grants < 0))
 432		pr_err("%s: live_grants became negative (%d) after unmapping %d pages!\n",
 433		       __func__, live_grants, successful_unmaps);
 434
 435	/* Release reference taken by __unmap_grant_pages */
 436	gntdev_put_map(NULL, map);
 437}
 438
 439static void __unmap_grant_pages(struct gntdev_grant_map *map, int offset,
 440			       int pages)
 441{
 
 
 
 442	if (map->notify.flags & UNMAP_NOTIFY_CLEAR_BYTE) {
 443		int pgno = (map->notify.addr >> PAGE_SHIFT);
 444
 445		if (pgno >= offset && pgno < offset + pages) {
 446			/* No need for kmap, pages are in lowmem */
 447			uint8_t *tmp = pfn_to_kaddr(page_to_pfn(map->pages[pgno]));
 448
 449			tmp[map->notify.addr & (PAGE_SIZE-1)] = 0;
 450			map->notify.flags &= ~UNMAP_NOTIFY_CLEAR_BYTE;
 451		}
 452	}
 453
 454	map->unmap_data.unmap_ops = map->unmap_ops + offset;
 455	map->unmap_data.kunmap_ops = use_ptemod ? map->kunmap_ops + offset : NULL;
 456	map->unmap_data.pages = map->pages + offset;
 457	map->unmap_data.count = pages;
 458	map->unmap_data.done = __unmap_grant_pages_done;
 459	map->unmap_data.data = map;
 460	refcount_inc(&map->users); /* to keep map alive during async call below */
 
 461
 462	gnttab_unmap_refs_async(&map->unmap_data);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 463}
 464
 465static void unmap_grant_pages(struct gntdev_grant_map *map, int offset,
 466			      int pages)
 467{
 468	int range;
 469
 470	if (atomic_read(&map->live_grants) == 0)
 471		return; /* Nothing to do */
 472
 473	pr_debug("unmap %d+%d [%d+%d]\n", map->index, map->count, offset, pages);
 474
 475	/* It is possible the requested range will have a "hole" where we
 476	 * already unmapped some of the grants. Only unmap valid ranges.
 477	 */
 478	while (pages) {
 479		while (pages && map->being_removed[offset]) {
 
 480			offset++;
 481			pages--;
 482		}
 483		range = 0;
 484		while (range < pages) {
 485			if (map->being_removed[offset + range])
 
 486				break;
 487			map->being_removed[offset + range] = true;
 488			range++;
 489		}
 490		if (range)
 491			__unmap_grant_pages(map, offset, range);
 492		offset += range;
 493		pages -= range;
 494	}
 
 
 495}
 496
 497/* ------------------------------------------------------------------ */
 498
 499static void gntdev_vma_open(struct vm_area_struct *vma)
 500{
 501	struct gntdev_grant_map *map = vma->vm_private_data;
 502
 503	pr_debug("gntdev_vma_open %p\n", vma);
 504	refcount_inc(&map->users);
 505}
 506
 507static void gntdev_vma_close(struct vm_area_struct *vma)
 508{
 509	struct gntdev_grant_map *map = vma->vm_private_data;
 510	struct file *file = vma->vm_file;
 511	struct gntdev_priv *priv = file->private_data;
 512
 513	pr_debug("gntdev_vma_close %p\n", vma);
 514
 
 
 
 
 515	vma->vm_private_data = NULL;
 516	gntdev_put_map(priv, map);
 517}
 518
 519static struct page *gntdev_vma_find_special_page(struct vm_area_struct *vma,
 520						 unsigned long addr)
 521{
 522	struct gntdev_grant_map *map = vma->vm_private_data;
 523
 524	return map->pages[(addr - map->pages_vm_start) >> PAGE_SHIFT];
 525}
 526
 527static const struct vm_operations_struct gntdev_vmops = {
 528	.open = gntdev_vma_open,
 529	.close = gntdev_vma_close,
 530	.find_special_page = gntdev_vma_find_special_page,
 531};
 532
 533/* ------------------------------------------------------------------ */
 534
 535static bool gntdev_invalidate(struct mmu_interval_notifier *mn,
 536			      const struct mmu_notifier_range *range,
 537			      unsigned long cur_seq)
 538{
 539	struct gntdev_grant_map *map =
 540		container_of(mn, struct gntdev_grant_map, notifier);
 541	unsigned long mstart, mend;
 542	unsigned long map_start, map_end;
 543
 544	if (!mmu_notifier_range_blockable(range))
 545		return false;
 546
 547	map_start = map->pages_vm_start;
 548	map_end = map->pages_vm_start + (map->count << PAGE_SHIFT);
 549
 550	/*
 551	 * If the VMA is split or otherwise changed the notifier is not
 552	 * updated, but we don't want to process VA's outside the modified
 553	 * VMA. FIXME: It would be much more understandable to just prevent
 554	 * modifying the VMA in the first place.
 555	 */
 556	if (map_start >= range->end || map_end <= range->start)
 
 557		return true;
 558
 559	mstart = max(range->start, map_start);
 560	mend = min(range->end, map_end);
 561	pr_debug("map %d+%d (%lx %lx), range %lx %lx, mrange %lx %lx\n",
 562		 map->index, map->count, map_start, map_end,
 563		 range->start, range->end, mstart, mend);
 564	unmap_grant_pages(map, (mstart - map_start) >> PAGE_SHIFT,
 565			  (mend - mstart) >> PAGE_SHIFT);
 
 
 
 566
 567	return true;
 568}
 569
 570static const struct mmu_interval_notifier_ops gntdev_mmu_ops = {
 571	.invalidate = gntdev_invalidate,
 572};
 573
 574/* ------------------------------------------------------------------ */
 575
 576static int gntdev_open(struct inode *inode, struct file *flip)
 577{
 578	struct gntdev_priv *priv;
 579
 580	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
 581	if (!priv)
 582		return -ENOMEM;
 583
 584	INIT_LIST_HEAD(&priv->maps);
 585	mutex_init(&priv->lock);
 586
 587#ifdef CONFIG_XEN_GNTDEV_DMABUF
 588	priv->dmabuf_priv = gntdev_dmabuf_init(flip);
 589	if (IS_ERR(priv->dmabuf_priv)) {
 590		int ret = PTR_ERR(priv->dmabuf_priv);
 591
 592		kfree(priv);
 593		return ret;
 594	}
 595#endif
 596
 597	flip->private_data = priv;
 598#ifdef CONFIG_XEN_GRANT_DMA_ALLOC
 599	priv->dma_dev = gntdev_miscdev.this_device;
 600	dma_coerce_mask_and_coherent(priv->dma_dev, DMA_BIT_MASK(64));
 601#endif
 602	pr_debug("priv %p\n", priv);
 603
 604	return 0;
 605}
 606
 607static int gntdev_release(struct inode *inode, struct file *flip)
 608{
 609	struct gntdev_priv *priv = flip->private_data;
 610	struct gntdev_grant_map *map;
 611
 612	pr_debug("priv %p\n", priv);
 613
 614	mutex_lock(&priv->lock);
 615	while (!list_empty(&priv->maps)) {
 616		map = list_entry(priv->maps.next,
 617				 struct gntdev_grant_map, next);
 618		list_del(&map->next);
 619		gntdev_put_map(NULL /* already removed */, map);
 620	}
 621	mutex_unlock(&priv->lock);
 622
 623#ifdef CONFIG_XEN_GNTDEV_DMABUF
 624	gntdev_dmabuf_fini(priv->dmabuf_priv);
 625#endif
 626
 627	kfree(priv);
 628	return 0;
 629}
 630
 631static long gntdev_ioctl_map_grant_ref(struct gntdev_priv *priv,
 632				       struct ioctl_gntdev_map_grant_ref __user *u)
 633{
 634	struct ioctl_gntdev_map_grant_ref op;
 635	struct gntdev_grant_map *map;
 636	int err;
 637
 638	if (copy_from_user(&op, u, sizeof(op)) != 0)
 639		return -EFAULT;
 640	pr_debug("priv %p, add %d\n", priv, op.count);
 641	if (unlikely(gntdev_test_page_count(op.count)))
 642		return -EINVAL;
 643
 644	err = -ENOMEM;
 645	map = gntdev_alloc_map(priv, op.count, 0 /* This is not a dma-buf. */);
 646	if (!map)
 647		return err;
 648
 649	if (copy_from_user(map->grants, &u->refs,
 650			   sizeof(map->grants[0]) * op.count) != 0) {
 651		gntdev_put_map(NULL, map);
 652		return -EFAULT;
 653	}
 654
 655	mutex_lock(&priv->lock);
 656	gntdev_add_map(priv, map);
 657	op.index = map->index << PAGE_SHIFT;
 658	mutex_unlock(&priv->lock);
 659
 660	if (copy_to_user(u, &op, sizeof(op)) != 0)
 661		return -EFAULT;
 662
 663	return 0;
 664}
 665
 666static long gntdev_ioctl_unmap_grant_ref(struct gntdev_priv *priv,
 667					 struct ioctl_gntdev_unmap_grant_ref __user *u)
 668{
 669	struct ioctl_gntdev_unmap_grant_ref op;
 670	struct gntdev_grant_map *map;
 671	int err = -ENOENT;
 672
 673	if (copy_from_user(&op, u, sizeof(op)) != 0)
 674		return -EFAULT;
 675	pr_debug("priv %p, del %d+%d\n", priv, (int)op.index, (int)op.count);
 676
 677	mutex_lock(&priv->lock);
 678	map = gntdev_find_map_index(priv, op.index >> PAGE_SHIFT, op.count);
 679	if (map) {
 680		list_del(&map->next);
 681		err = 0;
 682	}
 683	mutex_unlock(&priv->lock);
 684	if (map)
 685		gntdev_put_map(priv, map);
 686	return err;
 687}
 688
 689static long gntdev_ioctl_get_offset_for_vaddr(struct gntdev_priv *priv,
 690					      struct ioctl_gntdev_get_offset_for_vaddr __user *u)
 691{
 692	struct ioctl_gntdev_get_offset_for_vaddr op;
 693	struct vm_area_struct *vma;
 694	struct gntdev_grant_map *map;
 695	int rv = -EINVAL;
 696
 697	if (copy_from_user(&op, u, sizeof(op)) != 0)
 698		return -EFAULT;
 699	pr_debug("priv %p, offset for vaddr %lx\n", priv, (unsigned long)op.vaddr);
 700
 701	mmap_read_lock(current->mm);
 702	vma = find_vma(current->mm, op.vaddr);
 703	if (!vma || vma->vm_ops != &gntdev_vmops)
 704		goto out_unlock;
 705
 706	map = vma->vm_private_data;
 707	if (!map)
 708		goto out_unlock;
 709
 710	op.offset = map->index << PAGE_SHIFT;
 711	op.count = map->count;
 712	rv = 0;
 713
 714 out_unlock:
 715	mmap_read_unlock(current->mm);
 716
 717	if (rv == 0 && copy_to_user(u, &op, sizeof(op)) != 0)
 718		return -EFAULT;
 719	return rv;
 720}
 721
 722static long gntdev_ioctl_notify(struct gntdev_priv *priv, void __user *u)
 723{
 724	struct ioctl_gntdev_unmap_notify op;
 725	struct gntdev_grant_map *map;
 726	int rc;
 727	int out_flags;
 728	evtchn_port_t out_event;
 729
 730	if (copy_from_user(&op, u, sizeof(op)))
 731		return -EFAULT;
 732
 733	if (op.action & ~(UNMAP_NOTIFY_CLEAR_BYTE|UNMAP_NOTIFY_SEND_EVENT))
 734		return -EINVAL;
 735
 736	/* We need to grab a reference to the event channel we are going to use
 737	 * to send the notify before releasing the reference we may already have
 738	 * (if someone has called this ioctl twice). This is required so that
 739	 * it is possible to change the clear_byte part of the notification
 740	 * without disturbing the event channel part, which may now be the last
 741	 * reference to that event channel.
 742	 */
 743	if (op.action & UNMAP_NOTIFY_SEND_EVENT) {
 744		if (evtchn_get(op.event_channel_port))
 745			return -EINVAL;
 746	}
 747
 748	out_flags = op.action;
 749	out_event = op.event_channel_port;
 750
 751	mutex_lock(&priv->lock);
 752
 753	list_for_each_entry(map, &priv->maps, next) {
 754		uint64_t begin = map->index << PAGE_SHIFT;
 755		uint64_t end = (map->index + map->count) << PAGE_SHIFT;
 756		if (op.index >= begin && op.index < end)
 757			goto found;
 758	}
 759	rc = -ENOENT;
 760	goto unlock_out;
 761
 762 found:
 763	if ((op.action & UNMAP_NOTIFY_CLEAR_BYTE) &&
 764			(map->flags & GNTMAP_readonly)) {
 765		rc = -EINVAL;
 766		goto unlock_out;
 767	}
 768
 769	out_flags = map->notify.flags;
 770	out_event = map->notify.event;
 771
 772	map->notify.flags = op.action;
 773	map->notify.addr = op.index - (map->index << PAGE_SHIFT);
 774	map->notify.event = op.event_channel_port;
 775
 776	rc = 0;
 777
 778 unlock_out:
 779	mutex_unlock(&priv->lock);
 780
 781	/* Drop the reference to the event channel we did not save in the map */
 782	if (out_flags & UNMAP_NOTIFY_SEND_EVENT)
 783		evtchn_put(out_event);
 784
 785	return rc;
 786}
 787
 788#define GNTDEV_COPY_BATCH 16
 789
 790struct gntdev_copy_batch {
 791	struct gnttab_copy ops[GNTDEV_COPY_BATCH];
 792	struct page *pages[GNTDEV_COPY_BATCH];
 793	s16 __user *status[GNTDEV_COPY_BATCH];
 794	unsigned int nr_ops;
 795	unsigned int nr_pages;
 796	bool writeable;
 797};
 798
 799static int gntdev_get_page(struct gntdev_copy_batch *batch, void __user *virt,
 800				unsigned long *gfn)
 801{
 802	unsigned long addr = (unsigned long)virt;
 803	struct page *page;
 804	unsigned long xen_pfn;
 805	int ret;
 806
 807	ret = pin_user_pages_fast(addr, 1, batch->writeable ? FOLL_WRITE : 0, &page);
 808	if (ret < 0)
 809		return ret;
 810
 811	batch->pages[batch->nr_pages++] = page;
 812
 813	xen_pfn = page_to_xen_pfn(page) + XEN_PFN_DOWN(addr & ~PAGE_MASK);
 814	*gfn = pfn_to_gfn(xen_pfn);
 815
 816	return 0;
 817}
 818
 819static void gntdev_put_pages(struct gntdev_copy_batch *batch)
 820{
 821	unpin_user_pages_dirty_lock(batch->pages, batch->nr_pages, batch->writeable);
 822	batch->nr_pages = 0;
 823	batch->writeable = false;
 824}
 825
 826static int gntdev_copy(struct gntdev_copy_batch *batch)
 827{
 828	unsigned int i;
 829
 830	gnttab_batch_copy(batch->ops, batch->nr_ops);
 831	gntdev_put_pages(batch);
 832
 833	/*
 834	 * For each completed op, update the status if the op failed
 835	 * and all previous ops for the segment were successful.
 836	 */
 837	for (i = 0; i < batch->nr_ops; i++) {
 838		s16 status = batch->ops[i].status;
 839		s16 old_status;
 840
 841		if (status == GNTST_okay)
 842			continue;
 843
 844		if (__get_user(old_status, batch->status[i]))
 845			return -EFAULT;
 846
 847		if (old_status != GNTST_okay)
 848			continue;
 849
 850		if (__put_user(status, batch->status[i]))
 851			return -EFAULT;
 852	}
 853
 854	batch->nr_ops = 0;
 855	return 0;
 856}
 857
 858static int gntdev_grant_copy_seg(struct gntdev_copy_batch *batch,
 859				 struct gntdev_grant_copy_segment *seg,
 860				 s16 __user *status)
 861{
 862	uint16_t copied = 0;
 863
 864	/*
 865	 * Disallow local -> local copies since there is only space in
 866	 * batch->pages for one page per-op and this would be a very
 867	 * expensive memcpy().
 868	 */
 869	if (!(seg->flags & (GNTCOPY_source_gref | GNTCOPY_dest_gref)))
 870		return -EINVAL;
 871
 872	/* Can't cross page if source/dest is a grant ref. */
 873	if (seg->flags & GNTCOPY_source_gref) {
 874		if (seg->source.foreign.offset + seg->len > XEN_PAGE_SIZE)
 875			return -EINVAL;
 876	}
 877	if (seg->flags & GNTCOPY_dest_gref) {
 878		if (seg->dest.foreign.offset + seg->len > XEN_PAGE_SIZE)
 879			return -EINVAL;
 880	}
 881
 882	if (put_user(GNTST_okay, status))
 883		return -EFAULT;
 884
 885	while (copied < seg->len) {
 886		struct gnttab_copy *op;
 887		void __user *virt;
 888		size_t len, off;
 889		unsigned long gfn;
 890		int ret;
 891
 892		if (batch->nr_ops >= GNTDEV_COPY_BATCH) {
 893			ret = gntdev_copy(batch);
 894			if (ret < 0)
 895				return ret;
 896		}
 897
 898		len = seg->len - copied;
 899
 900		op = &batch->ops[batch->nr_ops];
 901		op->flags = 0;
 902
 903		if (seg->flags & GNTCOPY_source_gref) {
 904			op->source.u.ref = seg->source.foreign.ref;
 905			op->source.domid = seg->source.foreign.domid;
 906			op->source.offset = seg->source.foreign.offset + copied;
 907			op->flags |= GNTCOPY_source_gref;
 908		} else {
 909			virt = seg->source.virt + copied;
 910			off = (unsigned long)virt & ~XEN_PAGE_MASK;
 911			len = min(len, (size_t)XEN_PAGE_SIZE - off);
 912			batch->writeable = false;
 913
 914			ret = gntdev_get_page(batch, virt, &gfn);
 915			if (ret < 0)
 916				return ret;
 917
 918			op->source.u.gmfn = gfn;
 919			op->source.domid = DOMID_SELF;
 920			op->source.offset = off;
 921		}
 922
 923		if (seg->flags & GNTCOPY_dest_gref) {
 924			op->dest.u.ref = seg->dest.foreign.ref;
 925			op->dest.domid = seg->dest.foreign.domid;
 926			op->dest.offset = seg->dest.foreign.offset + copied;
 927			op->flags |= GNTCOPY_dest_gref;
 928		} else {
 929			virt = seg->dest.virt + copied;
 930			off = (unsigned long)virt & ~XEN_PAGE_MASK;
 931			len = min(len, (size_t)XEN_PAGE_SIZE - off);
 932			batch->writeable = true;
 933
 934			ret = gntdev_get_page(batch, virt, &gfn);
 935			if (ret < 0)
 936				return ret;
 937
 938			op->dest.u.gmfn = gfn;
 939			op->dest.domid = DOMID_SELF;
 940			op->dest.offset = off;
 941		}
 942
 943		op->len = len;
 944		copied += len;
 945
 946		batch->status[batch->nr_ops] = status;
 947		batch->nr_ops++;
 948	}
 949
 950	return 0;
 951}
 952
 953static long gntdev_ioctl_grant_copy(struct gntdev_priv *priv, void __user *u)
 954{
 955	struct ioctl_gntdev_grant_copy copy;
 956	struct gntdev_copy_batch batch;
 957	unsigned int i;
 958	int ret = 0;
 959
 960	if (copy_from_user(&copy, u, sizeof(copy)))
 961		return -EFAULT;
 962
 963	batch.nr_ops = 0;
 964	batch.nr_pages = 0;
 965
 966	for (i = 0; i < copy.count; i++) {
 967		struct gntdev_grant_copy_segment seg;
 968
 969		if (copy_from_user(&seg, &copy.segments[i], sizeof(seg))) {
 970			ret = -EFAULT;
 971			goto out;
 972		}
 973
 974		ret = gntdev_grant_copy_seg(&batch, &seg, &copy.segments[i].status);
 975		if (ret < 0)
 976			goto out;
 977
 978		cond_resched();
 979	}
 980	if (batch.nr_ops)
 981		ret = gntdev_copy(&batch);
 982	return ret;
 983
 984  out:
 985	gntdev_put_pages(&batch);
 986	return ret;
 987}
 988
 989static long gntdev_ioctl(struct file *flip,
 990			 unsigned int cmd, unsigned long arg)
 991{
 992	struct gntdev_priv *priv = flip->private_data;
 993	void __user *ptr = (void __user *)arg;
 994
 995	switch (cmd) {
 996	case IOCTL_GNTDEV_MAP_GRANT_REF:
 997		return gntdev_ioctl_map_grant_ref(priv, ptr);
 998
 999	case IOCTL_GNTDEV_UNMAP_GRANT_REF:
1000		return gntdev_ioctl_unmap_grant_ref(priv, ptr);
1001
1002	case IOCTL_GNTDEV_GET_OFFSET_FOR_VADDR:
1003		return gntdev_ioctl_get_offset_for_vaddr(priv, ptr);
1004
1005	case IOCTL_GNTDEV_SET_UNMAP_NOTIFY:
1006		return gntdev_ioctl_notify(priv, ptr);
1007
1008	case IOCTL_GNTDEV_GRANT_COPY:
1009		return gntdev_ioctl_grant_copy(priv, ptr);
1010
1011#ifdef CONFIG_XEN_GNTDEV_DMABUF
1012	case IOCTL_GNTDEV_DMABUF_EXP_FROM_REFS:
1013		return gntdev_ioctl_dmabuf_exp_from_refs(priv, use_ptemod, ptr);
1014
1015	case IOCTL_GNTDEV_DMABUF_EXP_WAIT_RELEASED:
1016		return gntdev_ioctl_dmabuf_exp_wait_released(priv, ptr);
1017
1018	case IOCTL_GNTDEV_DMABUF_IMP_TO_REFS:
1019		return gntdev_ioctl_dmabuf_imp_to_refs(priv, ptr);
1020
1021	case IOCTL_GNTDEV_DMABUF_IMP_RELEASE:
1022		return gntdev_ioctl_dmabuf_imp_release(priv, ptr);
1023#endif
1024
1025	default:
1026		pr_debug("priv %p, unknown cmd %x\n", priv, cmd);
1027		return -ENOIOCTLCMD;
1028	}
1029
1030	return 0;
1031}
1032
1033static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
1034{
1035	struct gntdev_priv *priv = flip->private_data;
1036	int index = vma->vm_pgoff;
1037	int count = vma_pages(vma);
1038	struct gntdev_grant_map *map;
1039	int err = -EINVAL;
1040
1041	if ((vma->vm_flags & VM_WRITE) && !(vma->vm_flags & VM_SHARED))
1042		return -EINVAL;
1043
1044	pr_debug("map %d+%d at %lx (pgoff %lx)\n",
1045		 index, count, vma->vm_start, vma->vm_pgoff);
1046
1047	mutex_lock(&priv->lock);
1048	map = gntdev_find_map_index(priv, index, count);
1049	if (!map)
1050		goto unlock_out;
1051	if (!atomic_add_unless(&map->in_use, 1, 1))
1052		goto unlock_out;
1053
1054	refcount_inc(&map->users);
1055
1056	vma->vm_ops = &gntdev_vmops;
1057
1058	vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP | VM_MIXEDMAP);
1059
1060	if (use_ptemod)
1061		vm_flags_set(vma, VM_DONTCOPY);
1062
1063	vma->vm_private_data = map;
1064	if (map->flags) {
1065		if ((vma->vm_flags & VM_WRITE) &&
1066				(map->flags & GNTMAP_readonly))
1067			goto out_unlock_put;
1068	} else {
1069		map->flags = GNTMAP_host_map;
1070		if (!(vma->vm_flags & VM_WRITE))
1071			map->flags |= GNTMAP_readonly;
1072	}
1073
1074	map->pages_vm_start = vma->vm_start;
1075
1076	if (use_ptemod) {
 
1077		err = mmu_interval_notifier_insert_locked(
1078			&map->notifier, vma->vm_mm, vma->vm_start,
1079			vma->vm_end - vma->vm_start, &gntdev_mmu_ops);
1080		if (err)
 
1081			goto out_unlock_put;
1082
1083		map->notifier_init = true;
1084	}
1085	mutex_unlock(&priv->lock);
1086
1087	if (use_ptemod) {
1088		/*
1089		 * gntdev takes the address of the PTE in find_grant_ptes() and
1090		 * passes it to the hypervisor in gntdev_map_grant_pages(). The
1091		 * purpose of the notifier is to prevent the hypervisor pointer
1092		 * to the PTE from going stale.
1093		 *
1094		 * Since this vma's mappings can't be touched without the
1095		 * mmap_lock, and we are holding it now, there is no need for
1096		 * the notifier_range locking pattern.
1097		 */
1098		mmu_interval_read_begin(&map->notifier);
1099
 
1100		err = apply_to_page_range(vma->vm_mm, vma->vm_start,
1101					  vma->vm_end - vma->vm_start,
1102					  find_grant_ptes, map);
1103		if (err) {
1104			pr_warn("find_grant_ptes() failure.\n");
1105			goto out_put_map;
1106		}
1107	}
1108
1109	err = gntdev_map_grant_pages(map);
1110	if (err)
1111		goto out_put_map;
1112
1113	if (!use_ptemod) {
1114		err = vm_map_pages_zero(vma, map->pages, map->count);
1115		if (err)
1116			goto out_put_map;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1117	}
1118
1119	return 0;
1120
1121unlock_out:
1122	mutex_unlock(&priv->lock);
1123	return err;
1124
1125out_unlock_put:
1126	mutex_unlock(&priv->lock);
1127out_put_map:
1128	if (use_ptemod)
1129		unmap_grant_pages(map, 0, map->count);
 
 
 
 
 
1130	gntdev_put_map(priv, map);
1131	return err;
1132}
1133
1134static const struct file_operations gntdev_fops = {
1135	.owner = THIS_MODULE,
1136	.open = gntdev_open,
1137	.release = gntdev_release,
1138	.mmap = gntdev_mmap,
1139	.unlocked_ioctl = gntdev_ioctl
1140};
1141
1142static struct miscdevice gntdev_miscdev = {
1143	.minor        = MISC_DYNAMIC_MINOR,
1144	.name         = "xen/gntdev",
1145	.fops         = &gntdev_fops,
1146};
1147
1148/* ------------------------------------------------------------------ */
1149
1150static int __init gntdev_init(void)
1151{
1152	int err;
1153
1154	if (!xen_domain())
1155		return -ENODEV;
1156
1157	use_ptemod = !xen_feature(XENFEAT_auto_translated_physmap);
1158
1159	err = misc_register(&gntdev_miscdev);
1160	if (err != 0) {
1161		pr_err("Could not register gntdev device\n");
1162		return err;
1163	}
1164	return 0;
1165}
1166
1167static void __exit gntdev_exit(void)
1168{
1169	misc_deregister(&gntdev_miscdev);
1170}
1171
1172module_init(gntdev_init);
1173module_exit(gntdev_exit);
1174
1175/* ------------------------------------------------------------------ */
v5.14.15
   1/******************************************************************************
   2 * gntdev.c
   3 *
   4 * Device for accessing (in user-space) pages that have been granted by other
   5 * domains.
   6 *
   7 * Copyright (c) 2006-2007, D G Murray.
   8 *           (c) 2009 Gerd Hoffmann <kraxel@redhat.com>
   9 *           (c) 2018 Oleksandr Andrushchenko, EPAM Systems Inc.
  10 *
  11 * This program is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  14 * GNU General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU General Public License
  17 * along with this program; if not, write to the Free Software
  18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  19 */
  20
  21#undef DEBUG
  22
  23#define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
  24
  25#include <linux/dma-mapping.h>
  26#include <linux/module.h>
  27#include <linux/kernel.h>
  28#include <linux/init.h>
  29#include <linux/miscdevice.h>
  30#include <linux/fs.h>
  31#include <linux/uaccess.h>
  32#include <linux/sched.h>
  33#include <linux/sched/mm.h>
  34#include <linux/spinlock.h>
  35#include <linux/slab.h>
  36#include <linux/highmem.h>
  37#include <linux/refcount.h>
 
  38
  39#include <xen/xen.h>
  40#include <xen/grant_table.h>
  41#include <xen/balloon.h>
  42#include <xen/gntdev.h>
  43#include <xen/events.h>
  44#include <xen/page.h>
  45#include <asm/xen/hypervisor.h>
  46#include <asm/xen/hypercall.h>
  47
  48#include "gntdev-common.h"
  49#ifdef CONFIG_XEN_GNTDEV_DMABUF
  50#include "gntdev-dmabuf.h"
  51#endif
  52
  53MODULE_LICENSE("GPL");
  54MODULE_AUTHOR("Derek G. Murray <Derek.Murray@cl.cam.ac.uk>, "
  55	      "Gerd Hoffmann <kraxel@redhat.com>");
  56MODULE_DESCRIPTION("User-space granted page access driver");
  57
  58static unsigned int limit = 64*1024;
  59module_param(limit, uint, 0644);
  60MODULE_PARM_DESC(limit,
  61	"Maximum number of grants that may be mapped by one mapping request");
  62
 
  63static int use_ptemod;
  64
  65static int unmap_grant_pages(struct gntdev_grant_map *map,
  66			     int offset, int pages);
  67
  68static struct miscdevice gntdev_miscdev;
  69
  70/* ------------------------------------------------------------------ */
  71
  72bool gntdev_test_page_count(unsigned int count)
  73{
  74	return !count || count > limit;
  75}
  76
  77static void gntdev_print_maps(struct gntdev_priv *priv,
  78			      char *text, int text_index)
  79{
  80#ifdef DEBUG
  81	struct gntdev_grant_map *map;
  82
  83	pr_debug("%s: maps list (priv %p)\n", __func__, priv);
  84	list_for_each_entry(map, &priv->maps, next)
  85		pr_debug("  index %2d, count %2d %s\n",
  86		       map->index, map->count,
  87		       map->index == text_index && text ? text : "");
  88#endif
  89}
  90
  91static void gntdev_free_map(struct gntdev_grant_map *map)
  92{
  93	if (map == NULL)
  94		return;
  95
  96#ifdef CONFIG_XEN_GRANT_DMA_ALLOC
  97	if (map->dma_vaddr) {
  98		struct gnttab_dma_alloc_args args;
  99
 100		args.dev = map->dma_dev;
 101		args.coherent = !!(map->dma_flags & GNTDEV_DMA_FLAG_COHERENT);
 102		args.nr_pages = map->count;
 103		args.pages = map->pages;
 104		args.frames = map->frames;
 105		args.vaddr = map->dma_vaddr;
 106		args.dev_bus_addr = map->dma_bus_addr;
 107
 108		gnttab_dma_free_pages(&args);
 109	} else
 110#endif
 111	if (map->pages)
 112		gnttab_free_pages(map->count, map->pages);
 113
 114#ifdef CONFIG_XEN_GRANT_DMA_ALLOC
 115	kvfree(map->frames);
 116#endif
 117	kvfree(map->pages);
 118	kvfree(map->grants);
 119	kvfree(map->map_ops);
 120	kvfree(map->unmap_ops);
 121	kvfree(map->kmap_ops);
 122	kvfree(map->kunmap_ops);
 
 123	kfree(map);
 124}
 125
 126struct gntdev_grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count,
 127					  int dma_flags)
 128{
 129	struct gntdev_grant_map *add;
 130	int i;
 131
 132	add = kzalloc(sizeof(*add), GFP_KERNEL);
 133	if (NULL == add)
 134		return NULL;
 135
 136	add->grants    = kvmalloc_array(count, sizeof(add->grants[0]),
 137					GFP_KERNEL);
 138	add->map_ops   = kvmalloc_array(count, sizeof(add->map_ops[0]),
 139					GFP_KERNEL);
 140	add->unmap_ops = kvmalloc_array(count, sizeof(add->unmap_ops[0]),
 141					GFP_KERNEL);
 142	add->pages     = kvcalloc(count, sizeof(add->pages[0]), GFP_KERNEL);
 
 
 143	if (NULL == add->grants    ||
 144	    NULL == add->map_ops   ||
 145	    NULL == add->unmap_ops ||
 146	    NULL == add->pages)
 
 147		goto err;
 148	if (use_ptemod) {
 149		add->kmap_ops   = kvmalloc_array(count, sizeof(add->kmap_ops[0]),
 150						 GFP_KERNEL);
 151		add->kunmap_ops = kvmalloc_array(count, sizeof(add->kunmap_ops[0]),
 152						 GFP_KERNEL);
 153		if (NULL == add->kmap_ops || NULL == add->kunmap_ops)
 154			goto err;
 155	}
 156
 157#ifdef CONFIG_XEN_GRANT_DMA_ALLOC
 158	add->dma_flags = dma_flags;
 159
 160	/*
 161	 * Check if this mapping is requested to be backed
 162	 * by a DMA buffer.
 163	 */
 164	if (dma_flags & (GNTDEV_DMA_FLAG_WC | GNTDEV_DMA_FLAG_COHERENT)) {
 165		struct gnttab_dma_alloc_args args;
 166
 167		add->frames = kvcalloc(count, sizeof(add->frames[0]),
 168				       GFP_KERNEL);
 169		if (!add->frames)
 170			goto err;
 171
 172		/* Remember the device, so we can free DMA memory. */
 173		add->dma_dev = priv->dma_dev;
 174
 175		args.dev = priv->dma_dev;
 176		args.coherent = !!(dma_flags & GNTDEV_DMA_FLAG_COHERENT);
 177		args.nr_pages = count;
 178		args.pages = add->pages;
 179		args.frames = add->frames;
 180
 181		if (gnttab_dma_alloc_pages(&args))
 182			goto err;
 183
 184		add->dma_vaddr = args.vaddr;
 185		add->dma_bus_addr = args.dev_bus_addr;
 186	} else
 187#endif
 188	if (gnttab_alloc_pages(count, add->pages))
 189		goto err;
 190
 191	for (i = 0; i < count; i++) {
 192		add->grants[i].domid = DOMID_INVALID;
 193		add->grants[i].ref = INVALID_GRANT_REF;
 194		add->map_ops[i].handle = INVALID_GRANT_HANDLE;
 195		add->unmap_ops[i].handle = INVALID_GRANT_HANDLE;
 196		if (use_ptemod) {
 197			add->kmap_ops[i].handle = INVALID_GRANT_HANDLE;
 198			add->kunmap_ops[i].handle = INVALID_GRANT_HANDLE;
 199		}
 200	}
 201
 202	add->index = 0;
 203	add->count = count;
 204	refcount_set(&add->users, 1);
 205
 206	return add;
 207
 208err:
 209	gntdev_free_map(add);
 210	return NULL;
 211}
 212
 213void gntdev_add_map(struct gntdev_priv *priv, struct gntdev_grant_map *add)
 214{
 215	struct gntdev_grant_map *map;
 216
 217	list_for_each_entry(map, &priv->maps, next) {
 218		if (add->index + add->count < map->index) {
 219			list_add_tail(&add->next, &map->next);
 220			goto done;
 221		}
 222		add->index = map->index + map->count;
 223	}
 224	list_add_tail(&add->next, &priv->maps);
 225
 226done:
 227	gntdev_print_maps(priv, "[new]", add->index);
 228}
 229
 230static struct gntdev_grant_map *gntdev_find_map_index(struct gntdev_priv *priv,
 231						      int index, int count)
 232{
 233	struct gntdev_grant_map *map;
 234
 235	list_for_each_entry(map, &priv->maps, next) {
 236		if (map->index != index)
 237			continue;
 238		if (count && map->count != count)
 239			continue;
 240		return map;
 241	}
 242	return NULL;
 243}
 244
 245void gntdev_put_map(struct gntdev_priv *priv, struct gntdev_grant_map *map)
 246{
 247	if (!map)
 248		return;
 249
 250	if (!refcount_dec_and_test(&map->users))
 251		return;
 252
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 253	if (map->notify.flags & UNMAP_NOTIFY_SEND_EVENT) {
 254		notify_remote_via_evtchn(map->notify.event);
 255		evtchn_put(map->notify.event);
 256	}
 257
 258	if (map->pages && !use_ptemod)
 259		unmap_grant_pages(map, 0, map->count);
 260	gntdev_free_map(map);
 261}
 262
 263/* ------------------------------------------------------------------ */
 264
 265static int find_grant_ptes(pte_t *pte, unsigned long addr, void *data)
 266{
 267	struct gntdev_grant_map *map = data;
 268	unsigned int pgnr = (addr - map->vma->vm_start) >> PAGE_SHIFT;
 269	int flags = map->flags | GNTMAP_application_map | GNTMAP_contains_pte;
 
 270	u64 pte_maddr;
 271
 272	BUG_ON(pgnr >= map->count);
 273	pte_maddr = arbitrary_virt_to_machine(pte).maddr;
 274
 275	/*
 276	 * Set the PTE as special to force get_user_pages_fast() fall
 277	 * back to the slow path.  If this is not supported as part of
 278	 * the grant map, it will be done afterwards.
 279	 */
 280	if (xen_feature(XENFEAT_gnttab_map_avail_bits))
 281		flags |= (1 << _GNTMAP_guest_avail0);
 282
 283	gnttab_set_map_op(&map->map_ops[pgnr], pte_maddr, flags,
 284			  map->grants[pgnr].ref,
 285			  map->grants[pgnr].domid);
 286	gnttab_set_unmap_op(&map->unmap_ops[pgnr], pte_maddr, flags,
 287			    INVALID_GRANT_HANDLE);
 288	return 0;
 289}
 290
 291#ifdef CONFIG_X86
 292static int set_grant_ptes_as_special(pte_t *pte, unsigned long addr, void *data)
 293{
 294	set_pte_at(current->mm, addr, pte, pte_mkspecial(*pte));
 295	return 0;
 296}
 297#endif
 298
 299int gntdev_map_grant_pages(struct gntdev_grant_map *map)
 300{
 
 301	int i, err = 0;
 302
 303	if (!use_ptemod) {
 304		/* Note: it could already be mapped */
 305		if (map->map_ops[0].handle != INVALID_GRANT_HANDLE)
 306			return 0;
 307		for (i = 0; i < map->count; i++) {
 308			unsigned long addr = (unsigned long)
 309				pfn_to_kaddr(page_to_pfn(map->pages[i]));
 310			gnttab_set_map_op(&map->map_ops[i], addr, map->flags,
 311				map->grants[i].ref,
 312				map->grants[i].domid);
 313			gnttab_set_unmap_op(&map->unmap_ops[i], addr,
 314				map->flags, INVALID_GRANT_HANDLE);
 315		}
 316	} else {
 317		/*
 318		 * Setup the map_ops corresponding to the pte entries pointing
 319		 * to the kernel linear addresses of the struct pages.
 320		 * These ptes are completely different from the user ptes dealt
 321		 * with find_grant_ptes.
 322		 * Note that GNTMAP_device_map isn't needed here: The
 323		 * dev_bus_addr output field gets consumed only from ->map_ops,
 324		 * and by not requesting it when mapping we also avoid needing
 325		 * to mirror dev_bus_addr into ->unmap_ops (and holding an extra
 326		 * reference to the page in the hypervisor).
 327		 */
 328		unsigned int flags = (map->flags & ~GNTMAP_device_map) |
 329				     GNTMAP_host_map;
 330
 331		for (i = 0; i < map->count; i++) {
 332			unsigned long address = (unsigned long)
 333				pfn_to_kaddr(page_to_pfn(map->pages[i]));
 334			BUG_ON(PageHighMem(map->pages[i]));
 335
 336			gnttab_set_map_op(&map->kmap_ops[i], address, flags,
 337				map->grants[i].ref,
 338				map->grants[i].domid);
 339			gnttab_set_unmap_op(&map->kunmap_ops[i], address,
 340				flags, INVALID_GRANT_HANDLE);
 341		}
 342	}
 343
 344	pr_debug("map %d+%d\n", map->index, map->count);
 345	err = gnttab_map_refs(map->map_ops, map->kmap_ops, map->pages,
 346			map->count);
 347
 348	for (i = 0; i < map->count; i++) {
 349		if (map->map_ops[i].status == GNTST_okay)
 350			map->unmap_ops[i].handle = map->map_ops[i].handle;
 351		else if (!err)
 
 352			err = -EINVAL;
 353
 354		if (map->flags & GNTMAP_device_map)
 355			map->unmap_ops[i].dev_bus_addr = map->map_ops[i].dev_bus_addr;
 356
 357		if (use_ptemod) {
 358			if (map->kmap_ops[i].status == GNTST_okay)
 
 359				map->kunmap_ops[i].handle = map->kmap_ops[i].handle;
 360			else if (!err)
 361				err = -EINVAL;
 362		}
 363	}
 
 364	return err;
 365}
 366
 367static int __unmap_grant_pages(struct gntdev_grant_map *map, int offset,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 368			       int pages)
 369{
 370	int i, err = 0;
 371	struct gntab_unmap_queue_data unmap_data;
 372
 373	if (map->notify.flags & UNMAP_NOTIFY_CLEAR_BYTE) {
 374		int pgno = (map->notify.addr >> PAGE_SHIFT);
 
 375		if (pgno >= offset && pgno < offset + pages) {
 376			/* No need for kmap, pages are in lowmem */
 377			uint8_t *tmp = pfn_to_kaddr(page_to_pfn(map->pages[pgno]));
 
 378			tmp[map->notify.addr & (PAGE_SIZE-1)] = 0;
 379			map->notify.flags &= ~UNMAP_NOTIFY_CLEAR_BYTE;
 380		}
 381	}
 382
 383	unmap_data.unmap_ops = map->unmap_ops + offset;
 384	unmap_data.kunmap_ops = use_ptemod ? map->kunmap_ops + offset : NULL;
 385	unmap_data.pages = map->pages + offset;
 386	unmap_data.count = pages;
 387
 388	err = gnttab_unmap_refs_sync(&unmap_data);
 389	if (err)
 390		return err;
 391
 392	for (i = 0; i < pages; i++) {
 393		if (map->unmap_ops[offset+i].status)
 394			err = -EINVAL;
 395		pr_debug("unmap handle=%d st=%d\n",
 396			map->unmap_ops[offset+i].handle,
 397			map->unmap_ops[offset+i].status);
 398		map->unmap_ops[offset+i].handle = INVALID_GRANT_HANDLE;
 399		if (use_ptemod) {
 400			if (map->kunmap_ops[offset+i].status)
 401				err = -EINVAL;
 402			pr_debug("kunmap handle=%u st=%d\n",
 403				 map->kunmap_ops[offset+i].handle,
 404				 map->kunmap_ops[offset+i].status);
 405			map->kunmap_ops[offset+i].handle = INVALID_GRANT_HANDLE;
 406		}
 407	}
 408	return err;
 409}
 410
 411static int unmap_grant_pages(struct gntdev_grant_map *map, int offset,
 412			     int pages)
 413{
 414	int range, err = 0;
 
 
 
 415
 416	pr_debug("unmap %d+%d [%d+%d]\n", map->index, map->count, offset, pages);
 417
 418	/* It is possible the requested range will have a "hole" where we
 419	 * already unmapped some of the grants. Only unmap valid ranges.
 420	 */
 421	while (pages && !err) {
 422		while (pages &&
 423		       map->unmap_ops[offset].handle == INVALID_GRANT_HANDLE) {
 424			offset++;
 425			pages--;
 426		}
 427		range = 0;
 428		while (range < pages) {
 429			if (map->unmap_ops[offset + range].handle ==
 430			    INVALID_GRANT_HANDLE)
 431				break;
 
 432			range++;
 433		}
 434		err = __unmap_grant_pages(map, offset, range);
 
 435		offset += range;
 436		pages -= range;
 437	}
 438
 439	return err;
 440}
 441
 442/* ------------------------------------------------------------------ */
 443
 444static void gntdev_vma_open(struct vm_area_struct *vma)
 445{
 446	struct gntdev_grant_map *map = vma->vm_private_data;
 447
 448	pr_debug("gntdev_vma_open %p\n", vma);
 449	refcount_inc(&map->users);
 450}
 451
 452static void gntdev_vma_close(struct vm_area_struct *vma)
 453{
 454	struct gntdev_grant_map *map = vma->vm_private_data;
 455	struct file *file = vma->vm_file;
 456	struct gntdev_priv *priv = file->private_data;
 457
 458	pr_debug("gntdev_vma_close %p\n", vma);
 459	if (use_ptemod) {
 460		WARN_ON(map->vma != vma);
 461		mmu_interval_notifier_remove(&map->notifier);
 462		map->vma = NULL;
 463	}
 464	vma->vm_private_data = NULL;
 465	gntdev_put_map(priv, map);
 466}
 467
 468static struct page *gntdev_vma_find_special_page(struct vm_area_struct *vma,
 469						 unsigned long addr)
 470{
 471	struct gntdev_grant_map *map = vma->vm_private_data;
 472
 473	return map->pages[(addr - map->pages_vm_start) >> PAGE_SHIFT];
 474}
 475
 476static const struct vm_operations_struct gntdev_vmops = {
 477	.open = gntdev_vma_open,
 478	.close = gntdev_vma_close,
 479	.find_special_page = gntdev_vma_find_special_page,
 480};
 481
 482/* ------------------------------------------------------------------ */
 483
 484static bool gntdev_invalidate(struct mmu_interval_notifier *mn,
 485			      const struct mmu_notifier_range *range,
 486			      unsigned long cur_seq)
 487{
 488	struct gntdev_grant_map *map =
 489		container_of(mn, struct gntdev_grant_map, notifier);
 490	unsigned long mstart, mend;
 491	int err;
 492
 493	if (!mmu_notifier_range_blockable(range))
 494		return false;
 495
 
 
 
 496	/*
 497	 * If the VMA is split or otherwise changed the notifier is not
 498	 * updated, but we don't want to process VA's outside the modified
 499	 * VMA. FIXME: It would be much more understandable to just prevent
 500	 * modifying the VMA in the first place.
 501	 */
 502	if (map->vma->vm_start >= range->end ||
 503	    map->vma->vm_end <= range->start)
 504		return true;
 505
 506	mstart = max(range->start, map->vma->vm_start);
 507	mend = min(range->end, map->vma->vm_end);
 508	pr_debug("map %d+%d (%lx %lx), range %lx %lx, mrange %lx %lx\n",
 509			map->index, map->count,
 510			map->vma->vm_start, map->vma->vm_end,
 511			range->start, range->end, mstart, mend);
 512	err = unmap_grant_pages(map,
 513				(mstart - map->vma->vm_start) >> PAGE_SHIFT,
 514				(mend - mstart) >> PAGE_SHIFT);
 515	WARN_ON(err);
 516
 517	return true;
 518}
 519
 520static const struct mmu_interval_notifier_ops gntdev_mmu_ops = {
 521	.invalidate = gntdev_invalidate,
 522};
 523
 524/* ------------------------------------------------------------------ */
 525
 526static int gntdev_open(struct inode *inode, struct file *flip)
 527{
 528	struct gntdev_priv *priv;
 529
 530	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
 531	if (!priv)
 532		return -ENOMEM;
 533
 534	INIT_LIST_HEAD(&priv->maps);
 535	mutex_init(&priv->lock);
 536
 537#ifdef CONFIG_XEN_GNTDEV_DMABUF
 538	priv->dmabuf_priv = gntdev_dmabuf_init(flip);
 539	if (IS_ERR(priv->dmabuf_priv)) {
 540		int ret = PTR_ERR(priv->dmabuf_priv);
 541
 542		kfree(priv);
 543		return ret;
 544	}
 545#endif
 546
 547	flip->private_data = priv;
 548#ifdef CONFIG_XEN_GRANT_DMA_ALLOC
 549	priv->dma_dev = gntdev_miscdev.this_device;
 550	dma_coerce_mask_and_coherent(priv->dma_dev, DMA_BIT_MASK(64));
 551#endif
 552	pr_debug("priv %p\n", priv);
 553
 554	return 0;
 555}
 556
 557static int gntdev_release(struct inode *inode, struct file *flip)
 558{
 559	struct gntdev_priv *priv = flip->private_data;
 560	struct gntdev_grant_map *map;
 561
 562	pr_debug("priv %p\n", priv);
 563
 564	mutex_lock(&priv->lock);
 565	while (!list_empty(&priv->maps)) {
 566		map = list_entry(priv->maps.next,
 567				 struct gntdev_grant_map, next);
 568		list_del(&map->next);
 569		gntdev_put_map(NULL /* already removed */, map);
 570	}
 571	mutex_unlock(&priv->lock);
 572
 573#ifdef CONFIG_XEN_GNTDEV_DMABUF
 574	gntdev_dmabuf_fini(priv->dmabuf_priv);
 575#endif
 576
 577	kfree(priv);
 578	return 0;
 579}
 580
 581static long gntdev_ioctl_map_grant_ref(struct gntdev_priv *priv,
 582				       struct ioctl_gntdev_map_grant_ref __user *u)
 583{
 584	struct ioctl_gntdev_map_grant_ref op;
 585	struct gntdev_grant_map *map;
 586	int err;
 587
 588	if (copy_from_user(&op, u, sizeof(op)) != 0)
 589		return -EFAULT;
 590	pr_debug("priv %p, add %d\n", priv, op.count);
 591	if (unlikely(gntdev_test_page_count(op.count)))
 592		return -EINVAL;
 593
 594	err = -ENOMEM;
 595	map = gntdev_alloc_map(priv, op.count, 0 /* This is not a dma-buf. */);
 596	if (!map)
 597		return err;
 598
 599	if (copy_from_user(map->grants, &u->refs,
 600			   sizeof(map->grants[0]) * op.count) != 0) {
 601		gntdev_put_map(NULL, map);
 602		return -EFAULT;
 603	}
 604
 605	mutex_lock(&priv->lock);
 606	gntdev_add_map(priv, map);
 607	op.index = map->index << PAGE_SHIFT;
 608	mutex_unlock(&priv->lock);
 609
 610	if (copy_to_user(u, &op, sizeof(op)) != 0)
 611		return -EFAULT;
 612
 613	return 0;
 614}
 615
 616static long gntdev_ioctl_unmap_grant_ref(struct gntdev_priv *priv,
 617					 struct ioctl_gntdev_unmap_grant_ref __user *u)
 618{
 619	struct ioctl_gntdev_unmap_grant_ref op;
 620	struct gntdev_grant_map *map;
 621	int err = -ENOENT;
 622
 623	if (copy_from_user(&op, u, sizeof(op)) != 0)
 624		return -EFAULT;
 625	pr_debug("priv %p, del %d+%d\n", priv, (int)op.index, (int)op.count);
 626
 627	mutex_lock(&priv->lock);
 628	map = gntdev_find_map_index(priv, op.index >> PAGE_SHIFT, op.count);
 629	if (map) {
 630		list_del(&map->next);
 631		err = 0;
 632	}
 633	mutex_unlock(&priv->lock);
 634	if (map)
 635		gntdev_put_map(priv, map);
 636	return err;
 637}
 638
 639static long gntdev_ioctl_get_offset_for_vaddr(struct gntdev_priv *priv,
 640					      struct ioctl_gntdev_get_offset_for_vaddr __user *u)
 641{
 642	struct ioctl_gntdev_get_offset_for_vaddr op;
 643	struct vm_area_struct *vma;
 644	struct gntdev_grant_map *map;
 645	int rv = -EINVAL;
 646
 647	if (copy_from_user(&op, u, sizeof(op)) != 0)
 648		return -EFAULT;
 649	pr_debug("priv %p, offset for vaddr %lx\n", priv, (unsigned long)op.vaddr);
 650
 651	mmap_read_lock(current->mm);
 652	vma = find_vma(current->mm, op.vaddr);
 653	if (!vma || vma->vm_ops != &gntdev_vmops)
 654		goto out_unlock;
 655
 656	map = vma->vm_private_data;
 657	if (!map)
 658		goto out_unlock;
 659
 660	op.offset = map->index << PAGE_SHIFT;
 661	op.count = map->count;
 662	rv = 0;
 663
 664 out_unlock:
 665	mmap_read_unlock(current->mm);
 666
 667	if (rv == 0 && copy_to_user(u, &op, sizeof(op)) != 0)
 668		return -EFAULT;
 669	return rv;
 670}
 671
 672static long gntdev_ioctl_notify(struct gntdev_priv *priv, void __user *u)
 673{
 674	struct ioctl_gntdev_unmap_notify op;
 675	struct gntdev_grant_map *map;
 676	int rc;
 677	int out_flags;
 678	evtchn_port_t out_event;
 679
 680	if (copy_from_user(&op, u, sizeof(op)))
 681		return -EFAULT;
 682
 683	if (op.action & ~(UNMAP_NOTIFY_CLEAR_BYTE|UNMAP_NOTIFY_SEND_EVENT))
 684		return -EINVAL;
 685
 686	/* We need to grab a reference to the event channel we are going to use
 687	 * to send the notify before releasing the reference we may already have
 688	 * (if someone has called this ioctl twice). This is required so that
 689	 * it is possible to change the clear_byte part of the notification
 690	 * without disturbing the event channel part, which may now be the last
 691	 * reference to that event channel.
 692	 */
 693	if (op.action & UNMAP_NOTIFY_SEND_EVENT) {
 694		if (evtchn_get(op.event_channel_port))
 695			return -EINVAL;
 696	}
 697
 698	out_flags = op.action;
 699	out_event = op.event_channel_port;
 700
 701	mutex_lock(&priv->lock);
 702
 703	list_for_each_entry(map, &priv->maps, next) {
 704		uint64_t begin = map->index << PAGE_SHIFT;
 705		uint64_t end = (map->index + map->count) << PAGE_SHIFT;
 706		if (op.index >= begin && op.index < end)
 707			goto found;
 708	}
 709	rc = -ENOENT;
 710	goto unlock_out;
 711
 712 found:
 713	if ((op.action & UNMAP_NOTIFY_CLEAR_BYTE) &&
 714			(map->flags & GNTMAP_readonly)) {
 715		rc = -EINVAL;
 716		goto unlock_out;
 717	}
 718
 719	out_flags = map->notify.flags;
 720	out_event = map->notify.event;
 721
 722	map->notify.flags = op.action;
 723	map->notify.addr = op.index - (map->index << PAGE_SHIFT);
 724	map->notify.event = op.event_channel_port;
 725
 726	rc = 0;
 727
 728 unlock_out:
 729	mutex_unlock(&priv->lock);
 730
 731	/* Drop the reference to the event channel we did not save in the map */
 732	if (out_flags & UNMAP_NOTIFY_SEND_EVENT)
 733		evtchn_put(out_event);
 734
 735	return rc;
 736}
 737
 738#define GNTDEV_COPY_BATCH 16
 739
 740struct gntdev_copy_batch {
 741	struct gnttab_copy ops[GNTDEV_COPY_BATCH];
 742	struct page *pages[GNTDEV_COPY_BATCH];
 743	s16 __user *status[GNTDEV_COPY_BATCH];
 744	unsigned int nr_ops;
 745	unsigned int nr_pages;
 746	bool writeable;
 747};
 748
 749static int gntdev_get_page(struct gntdev_copy_batch *batch, void __user *virt,
 750				unsigned long *gfn)
 751{
 752	unsigned long addr = (unsigned long)virt;
 753	struct page *page;
 754	unsigned long xen_pfn;
 755	int ret;
 756
 757	ret = pin_user_pages_fast(addr, 1, batch->writeable ? FOLL_WRITE : 0, &page);
 758	if (ret < 0)
 759		return ret;
 760
 761	batch->pages[batch->nr_pages++] = page;
 762
 763	xen_pfn = page_to_xen_pfn(page) + XEN_PFN_DOWN(addr & ~PAGE_MASK);
 764	*gfn = pfn_to_gfn(xen_pfn);
 765
 766	return 0;
 767}
 768
 769static void gntdev_put_pages(struct gntdev_copy_batch *batch)
 770{
 771	unpin_user_pages_dirty_lock(batch->pages, batch->nr_pages, batch->writeable);
 772	batch->nr_pages = 0;
 773	batch->writeable = false;
 774}
 775
 776static int gntdev_copy(struct gntdev_copy_batch *batch)
 777{
 778	unsigned int i;
 779
 780	gnttab_batch_copy(batch->ops, batch->nr_ops);
 781	gntdev_put_pages(batch);
 782
 783	/*
 784	 * For each completed op, update the status if the op failed
 785	 * and all previous ops for the segment were successful.
 786	 */
 787	for (i = 0; i < batch->nr_ops; i++) {
 788		s16 status = batch->ops[i].status;
 789		s16 old_status;
 790
 791		if (status == GNTST_okay)
 792			continue;
 793
 794		if (__get_user(old_status, batch->status[i]))
 795			return -EFAULT;
 796
 797		if (old_status != GNTST_okay)
 798			continue;
 799
 800		if (__put_user(status, batch->status[i]))
 801			return -EFAULT;
 802	}
 803
 804	batch->nr_ops = 0;
 805	return 0;
 806}
 807
 808static int gntdev_grant_copy_seg(struct gntdev_copy_batch *batch,
 809				 struct gntdev_grant_copy_segment *seg,
 810				 s16 __user *status)
 811{
 812	uint16_t copied = 0;
 813
 814	/*
 815	 * Disallow local -> local copies since there is only space in
 816	 * batch->pages for one page per-op and this would be a very
 817	 * expensive memcpy().
 818	 */
 819	if (!(seg->flags & (GNTCOPY_source_gref | GNTCOPY_dest_gref)))
 820		return -EINVAL;
 821
 822	/* Can't cross page if source/dest is a grant ref. */
 823	if (seg->flags & GNTCOPY_source_gref) {
 824		if (seg->source.foreign.offset + seg->len > XEN_PAGE_SIZE)
 825			return -EINVAL;
 826	}
 827	if (seg->flags & GNTCOPY_dest_gref) {
 828		if (seg->dest.foreign.offset + seg->len > XEN_PAGE_SIZE)
 829			return -EINVAL;
 830	}
 831
 832	if (put_user(GNTST_okay, status))
 833		return -EFAULT;
 834
 835	while (copied < seg->len) {
 836		struct gnttab_copy *op;
 837		void __user *virt;
 838		size_t len, off;
 839		unsigned long gfn;
 840		int ret;
 841
 842		if (batch->nr_ops >= GNTDEV_COPY_BATCH) {
 843			ret = gntdev_copy(batch);
 844			if (ret < 0)
 845				return ret;
 846		}
 847
 848		len = seg->len - copied;
 849
 850		op = &batch->ops[batch->nr_ops];
 851		op->flags = 0;
 852
 853		if (seg->flags & GNTCOPY_source_gref) {
 854			op->source.u.ref = seg->source.foreign.ref;
 855			op->source.domid = seg->source.foreign.domid;
 856			op->source.offset = seg->source.foreign.offset + copied;
 857			op->flags |= GNTCOPY_source_gref;
 858		} else {
 859			virt = seg->source.virt + copied;
 860			off = (unsigned long)virt & ~XEN_PAGE_MASK;
 861			len = min(len, (size_t)XEN_PAGE_SIZE - off);
 862			batch->writeable = false;
 863
 864			ret = gntdev_get_page(batch, virt, &gfn);
 865			if (ret < 0)
 866				return ret;
 867
 868			op->source.u.gmfn = gfn;
 869			op->source.domid = DOMID_SELF;
 870			op->source.offset = off;
 871		}
 872
 873		if (seg->flags & GNTCOPY_dest_gref) {
 874			op->dest.u.ref = seg->dest.foreign.ref;
 875			op->dest.domid = seg->dest.foreign.domid;
 876			op->dest.offset = seg->dest.foreign.offset + copied;
 877			op->flags |= GNTCOPY_dest_gref;
 878		} else {
 879			virt = seg->dest.virt + copied;
 880			off = (unsigned long)virt & ~XEN_PAGE_MASK;
 881			len = min(len, (size_t)XEN_PAGE_SIZE - off);
 882			batch->writeable = true;
 883
 884			ret = gntdev_get_page(batch, virt, &gfn);
 885			if (ret < 0)
 886				return ret;
 887
 888			op->dest.u.gmfn = gfn;
 889			op->dest.domid = DOMID_SELF;
 890			op->dest.offset = off;
 891		}
 892
 893		op->len = len;
 894		copied += len;
 895
 896		batch->status[batch->nr_ops] = status;
 897		batch->nr_ops++;
 898	}
 899
 900	return 0;
 901}
 902
 903static long gntdev_ioctl_grant_copy(struct gntdev_priv *priv, void __user *u)
 904{
 905	struct ioctl_gntdev_grant_copy copy;
 906	struct gntdev_copy_batch batch;
 907	unsigned int i;
 908	int ret = 0;
 909
 910	if (copy_from_user(&copy, u, sizeof(copy)))
 911		return -EFAULT;
 912
 913	batch.nr_ops = 0;
 914	batch.nr_pages = 0;
 915
 916	for (i = 0; i < copy.count; i++) {
 917		struct gntdev_grant_copy_segment seg;
 918
 919		if (copy_from_user(&seg, &copy.segments[i], sizeof(seg))) {
 920			ret = -EFAULT;
 921			goto out;
 922		}
 923
 924		ret = gntdev_grant_copy_seg(&batch, &seg, &copy.segments[i].status);
 925		if (ret < 0)
 926			goto out;
 927
 928		cond_resched();
 929	}
 930	if (batch.nr_ops)
 931		ret = gntdev_copy(&batch);
 932	return ret;
 933
 934  out:
 935	gntdev_put_pages(&batch);
 936	return ret;
 937}
 938
 939static long gntdev_ioctl(struct file *flip,
 940			 unsigned int cmd, unsigned long arg)
 941{
 942	struct gntdev_priv *priv = flip->private_data;
 943	void __user *ptr = (void __user *)arg;
 944
 945	switch (cmd) {
 946	case IOCTL_GNTDEV_MAP_GRANT_REF:
 947		return gntdev_ioctl_map_grant_ref(priv, ptr);
 948
 949	case IOCTL_GNTDEV_UNMAP_GRANT_REF:
 950		return gntdev_ioctl_unmap_grant_ref(priv, ptr);
 951
 952	case IOCTL_GNTDEV_GET_OFFSET_FOR_VADDR:
 953		return gntdev_ioctl_get_offset_for_vaddr(priv, ptr);
 954
 955	case IOCTL_GNTDEV_SET_UNMAP_NOTIFY:
 956		return gntdev_ioctl_notify(priv, ptr);
 957
 958	case IOCTL_GNTDEV_GRANT_COPY:
 959		return gntdev_ioctl_grant_copy(priv, ptr);
 960
 961#ifdef CONFIG_XEN_GNTDEV_DMABUF
 962	case IOCTL_GNTDEV_DMABUF_EXP_FROM_REFS:
 963		return gntdev_ioctl_dmabuf_exp_from_refs(priv, use_ptemod, ptr);
 964
 965	case IOCTL_GNTDEV_DMABUF_EXP_WAIT_RELEASED:
 966		return gntdev_ioctl_dmabuf_exp_wait_released(priv, ptr);
 967
 968	case IOCTL_GNTDEV_DMABUF_IMP_TO_REFS:
 969		return gntdev_ioctl_dmabuf_imp_to_refs(priv, ptr);
 970
 971	case IOCTL_GNTDEV_DMABUF_IMP_RELEASE:
 972		return gntdev_ioctl_dmabuf_imp_release(priv, ptr);
 973#endif
 974
 975	default:
 976		pr_debug("priv %p, unknown cmd %x\n", priv, cmd);
 977		return -ENOIOCTLCMD;
 978	}
 979
 980	return 0;
 981}
 982
 983static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
 984{
 985	struct gntdev_priv *priv = flip->private_data;
 986	int index = vma->vm_pgoff;
 987	int count = vma_pages(vma);
 988	struct gntdev_grant_map *map;
 989	int err = -EINVAL;
 990
 991	if ((vma->vm_flags & VM_WRITE) && !(vma->vm_flags & VM_SHARED))
 992		return -EINVAL;
 993
 994	pr_debug("map %d+%d at %lx (pgoff %lx)\n",
 995			index, count, vma->vm_start, vma->vm_pgoff);
 996
 997	mutex_lock(&priv->lock);
 998	map = gntdev_find_map_index(priv, index, count);
 999	if (!map)
1000		goto unlock_out;
1001	if (use_ptemod && map->vma)
1002		goto unlock_out;
 
1003	refcount_inc(&map->users);
1004
1005	vma->vm_ops = &gntdev_vmops;
1006
1007	vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP | VM_MIXEDMAP;
1008
1009	if (use_ptemod)
1010		vma->vm_flags |= VM_DONTCOPY;
1011
1012	vma->vm_private_data = map;
1013	if (map->flags) {
1014		if ((vma->vm_flags & VM_WRITE) &&
1015				(map->flags & GNTMAP_readonly))
1016			goto out_unlock_put;
1017	} else {
1018		map->flags = GNTMAP_host_map;
1019		if (!(vma->vm_flags & VM_WRITE))
1020			map->flags |= GNTMAP_readonly;
1021	}
1022
 
 
1023	if (use_ptemod) {
1024		map->vma = vma;
1025		err = mmu_interval_notifier_insert_locked(
1026			&map->notifier, vma->vm_mm, vma->vm_start,
1027			vma->vm_end - vma->vm_start, &gntdev_mmu_ops);
1028		if (err) {
1029			map->vma = NULL;
1030			goto out_unlock_put;
1031		}
 
1032	}
1033	mutex_unlock(&priv->lock);
1034
1035	if (use_ptemod) {
1036		/*
1037		 * gntdev takes the address of the PTE in find_grant_ptes() and
1038		 * passes it to the hypervisor in gntdev_map_grant_pages(). The
1039		 * purpose of the notifier is to prevent the hypervisor pointer
1040		 * to the PTE from going stale.
1041		 *
1042		 * Since this vma's mappings can't be touched without the
1043		 * mmap_lock, and we are holding it now, there is no need for
1044		 * the notifier_range locking pattern.
1045		 */
1046		mmu_interval_read_begin(&map->notifier);
1047
1048		map->pages_vm_start = vma->vm_start;
1049		err = apply_to_page_range(vma->vm_mm, vma->vm_start,
1050					  vma->vm_end - vma->vm_start,
1051					  find_grant_ptes, map);
1052		if (err) {
1053			pr_warn("find_grant_ptes() failure.\n");
1054			goto out_put_map;
1055		}
1056	}
1057
1058	err = gntdev_map_grant_pages(map);
1059	if (err)
1060		goto out_put_map;
1061
1062	if (!use_ptemod) {
1063		err = vm_map_pages_zero(vma, map->pages, map->count);
1064		if (err)
1065			goto out_put_map;
1066	} else {
1067#ifdef CONFIG_X86
1068		/*
1069		 * If the PTEs were not made special by the grant map
1070		 * hypercall, do so here.
1071		 *
1072		 * This is racy since the mapping is already visible
1073		 * to userspace but userspace should be well-behaved
1074		 * enough to not touch it until the mmap() call
1075		 * returns.
1076		 */
1077		if (!xen_feature(XENFEAT_gnttab_map_avail_bits)) {
1078			apply_to_page_range(vma->vm_mm, vma->vm_start,
1079					    vma->vm_end - vma->vm_start,
1080					    set_grant_ptes_as_special, NULL);
1081		}
1082#endif
1083	}
1084
1085	return 0;
1086
1087unlock_out:
1088	mutex_unlock(&priv->lock);
1089	return err;
1090
1091out_unlock_put:
1092	mutex_unlock(&priv->lock);
1093out_put_map:
1094	if (use_ptemod) {
1095		unmap_grant_pages(map, 0, map->count);
1096		if (map->vma) {
1097			mmu_interval_notifier_remove(&map->notifier);
1098			map->vma = NULL;
1099		}
1100	}
1101	gntdev_put_map(priv, map);
1102	return err;
1103}
1104
1105static const struct file_operations gntdev_fops = {
1106	.owner = THIS_MODULE,
1107	.open = gntdev_open,
1108	.release = gntdev_release,
1109	.mmap = gntdev_mmap,
1110	.unlocked_ioctl = gntdev_ioctl
1111};
1112
1113static struct miscdevice gntdev_miscdev = {
1114	.minor        = MISC_DYNAMIC_MINOR,
1115	.name         = "xen/gntdev",
1116	.fops         = &gntdev_fops,
1117};
1118
1119/* ------------------------------------------------------------------ */
1120
1121static int __init gntdev_init(void)
1122{
1123	int err;
1124
1125	if (!xen_domain())
1126		return -ENODEV;
1127
1128	use_ptemod = !xen_feature(XENFEAT_auto_translated_physmap);
1129
1130	err = misc_register(&gntdev_miscdev);
1131	if (err != 0) {
1132		pr_err("Could not register gntdev device\n");
1133		return err;
1134	}
1135	return 0;
1136}
1137
1138static void __exit gntdev_exit(void)
1139{
1140	misc_deregister(&gntdev_miscdev);
1141}
1142
1143module_init(gntdev_init);
1144module_exit(gntdev_exit);
1145
1146/* ------------------------------------------------------------------ */