Linux Audio

Check our new training course

Loading...
v5.9
   1/******************************************************************************
   2 * gntdev.c
   3 *
   4 * Device for accessing (in user-space) pages that have been granted by other
   5 * domains.
   6 *
   7 * Copyright (c) 2006-2007, D G Murray.
   8 *           (c) 2009 Gerd Hoffmann <kraxel@redhat.com>
   9 *           (c) 2018 Oleksandr Andrushchenko, EPAM Systems Inc.
  10 *
  11 * This program is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  14 * GNU General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU General Public License
  17 * along with this program; if not, write to the Free Software
  18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  19 */
  20
  21#undef DEBUG
  22
  23#define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
  24
  25#include <linux/dma-mapping.h>
  26#include <linux/module.h>
  27#include <linux/kernel.h>
  28#include <linux/init.h>
  29#include <linux/miscdevice.h>
  30#include <linux/fs.h>
 
 
 
 
  31#include <linux/uaccess.h>
  32#include <linux/sched.h>
  33#include <linux/sched/mm.h>
  34#include <linux/spinlock.h>
  35#include <linux/slab.h>
  36#include <linux/highmem.h>
  37#include <linux/refcount.h>
  38
  39#include <xen/xen.h>
  40#include <xen/grant_table.h>
  41#include <xen/balloon.h>
  42#include <xen/gntdev.h>
  43#include <xen/events.h>
  44#include <xen/page.h>
  45#include <asm/xen/hypervisor.h>
  46#include <asm/xen/hypercall.h>
  47
  48#include "gntdev-common.h"
  49#ifdef CONFIG_XEN_GNTDEV_DMABUF
  50#include "gntdev-dmabuf.h"
  51#endif
  52
  53MODULE_LICENSE("GPL");
  54MODULE_AUTHOR("Derek G. Murray <Derek.Murray@cl.cam.ac.uk>, "
  55	      "Gerd Hoffmann <kraxel@redhat.com>");
  56MODULE_DESCRIPTION("User-space granted page access driver");
  57
  58static unsigned int limit = 64*1024;
  59module_param(limit, uint, 0644);
  60MODULE_PARM_DESC(limit,
  61	"Maximum number of grants that may be mapped by one mapping request");
 
 
  62
  63static int use_ptemod;
 
  64
  65static int unmap_grant_pages(struct gntdev_grant_map *map,
  66			     int offset, int pages);
 
 
 
 
 
 
 
 
 
  67
  68static struct miscdevice gntdev_miscdev;
 
 
 
 
 
  69
  70/* ------------------------------------------------------------------ */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  71
  72bool gntdev_test_page_count(unsigned int count)
  73{
  74	return !count || count > limit;
  75}
  76
  77static void gntdev_print_maps(struct gntdev_priv *priv,
  78			      char *text, int text_index)
  79{
  80#ifdef DEBUG
  81	struct gntdev_grant_map *map;
  82
  83	pr_debug("%s: maps list (priv %p)\n", __func__, priv);
  84	list_for_each_entry(map, &priv->maps, next)
  85		pr_debug("  index %2d, count %2d %s\n",
  86		       map->index, map->count,
  87		       map->index == text_index && text ? text : "");
  88#endif
  89}
  90
  91static void gntdev_free_map(struct gntdev_grant_map *map)
  92{
  93	if (map == NULL)
  94		return;
  95
  96#ifdef CONFIG_XEN_GRANT_DMA_ALLOC
  97	if (map->dma_vaddr) {
  98		struct gnttab_dma_alloc_args args;
  99
 100		args.dev = map->dma_dev;
 101		args.coherent = !!(map->dma_flags & GNTDEV_DMA_FLAG_COHERENT);
 102		args.nr_pages = map->count;
 103		args.pages = map->pages;
 104		args.frames = map->frames;
 105		args.vaddr = map->dma_vaddr;
 106		args.dev_bus_addr = map->dma_bus_addr;
 107
 108		gnttab_dma_free_pages(&args);
 109	} else
 110#endif
 111	if (map->pages)
 112		gnttab_free_pages(map->count, map->pages);
 113
 114#ifdef CONFIG_XEN_GRANT_DMA_ALLOC
 115	kvfree(map->frames);
 116#endif
 117	kvfree(map->pages);
 118	kvfree(map->grants);
 119	kvfree(map->map_ops);
 120	kvfree(map->unmap_ops);
 121	kvfree(map->kmap_ops);
 122	kvfree(map->kunmap_ops);
 123	kfree(map);
 124}
 125
 126struct gntdev_grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count,
 127					  int dma_flags)
 128{
 129	struct gntdev_grant_map *add;
 130	int i;
 131
 132	add = kzalloc(sizeof(*add), GFP_KERNEL);
 133	if (NULL == add)
 134		return NULL;
 135
 136	add->grants    = kvcalloc(count, sizeof(add->grants[0]), GFP_KERNEL);
 137	add->map_ops   = kvcalloc(count, sizeof(add->map_ops[0]), GFP_KERNEL);
 138	add->unmap_ops = kvcalloc(count, sizeof(add->unmap_ops[0]), GFP_KERNEL);
 139	add->kmap_ops  = kvcalloc(count, sizeof(add->kmap_ops[0]), GFP_KERNEL);
 140	add->kunmap_ops = kvcalloc(count,
 141				   sizeof(add->kunmap_ops[0]), GFP_KERNEL);
 142	add->pages     = kvcalloc(count, sizeof(add->pages[0]), GFP_KERNEL);
 143	if (NULL == add->grants    ||
 144	    NULL == add->map_ops   ||
 145	    NULL == add->unmap_ops ||
 146	    NULL == add->kmap_ops  ||
 147	    NULL == add->kunmap_ops ||
 148	    NULL == add->pages)
 149		goto err;
 150
 151#ifdef CONFIG_XEN_GRANT_DMA_ALLOC
 152	add->dma_flags = dma_flags;
 153
 154	/*
 155	 * Check if this mapping is requested to be backed
 156	 * by a DMA buffer.
 157	 */
 158	if (dma_flags & (GNTDEV_DMA_FLAG_WC | GNTDEV_DMA_FLAG_COHERENT)) {
 159		struct gnttab_dma_alloc_args args;
 160
 161		add->frames = kvcalloc(count, sizeof(add->frames[0]),
 162				       GFP_KERNEL);
 163		if (!add->frames)
 164			goto err;
 165
 166		/* Remember the device, so we can free DMA memory. */
 167		add->dma_dev = priv->dma_dev;
 168
 169		args.dev = priv->dma_dev;
 170		args.coherent = !!(dma_flags & GNTDEV_DMA_FLAG_COHERENT);
 171		args.nr_pages = count;
 172		args.pages = add->pages;
 173		args.frames = add->frames;
 174
 175		if (gnttab_dma_alloc_pages(&args))
 176			goto err;
 177
 178		add->dma_vaddr = args.vaddr;
 179		add->dma_bus_addr = args.dev_bus_addr;
 180	} else
 181#endif
 182	if (gnttab_alloc_pages(count, add->pages))
 183		goto err;
 184
 185	for (i = 0; i < count; i++) {
 186		add->map_ops[i].handle = -1;
 187		add->unmap_ops[i].handle = -1;
 188		add->kmap_ops[i].handle = -1;
 189		add->kunmap_ops[i].handle = -1;
 190	}
 191
 192	add->index = 0;
 193	add->count = count;
 194	refcount_set(&add->users, 1);
 195
 196	return add;
 197
 198err:
 199	gntdev_free_map(add);
 200	return NULL;
 201}
 202
 203void gntdev_add_map(struct gntdev_priv *priv, struct gntdev_grant_map *add)
 204{
 205	struct gntdev_grant_map *map;
 206
 207	list_for_each_entry(map, &priv->maps, next) {
 208		if (add->index + add->count < map->index) {
 209			list_add_tail(&add->next, &map->next);
 210			goto done;
 211		}
 212		add->index = map->index + map->count;
 213	}
 214	list_add_tail(&add->next, &priv->maps);
 215
 216done:
 217	gntdev_print_maps(priv, "[new]", add->index);
 218}
 219
 220static struct gntdev_grant_map *gntdev_find_map_index(struct gntdev_priv *priv,
 221						      int index, int count)
 222{
 223	struct gntdev_grant_map *map;
 224
 225	list_for_each_entry(map, &priv->maps, next) {
 226		if (map->index != index)
 227			continue;
 228		if (count && map->count != count)
 229			continue;
 230		return map;
 231	}
 232	return NULL;
 233}
 234
 235void gntdev_put_map(struct gntdev_priv *priv, struct gntdev_grant_map *map)
 236{
 237	if (!map)
 238		return;
 239
 240	if (!refcount_dec_and_test(&map->users))
 241		return;
 242
 
 
 243	if (map->notify.flags & UNMAP_NOTIFY_SEND_EVENT) {
 244		notify_remote_via_evtchn(map->notify.event);
 245		evtchn_put(map->notify.event);
 246	}
 247
 
 
 
 
 
 
 248	if (map->pages && !use_ptemod)
 249		unmap_grant_pages(map, 0, map->count);
 250	gntdev_free_map(map);
 251}
 252
 253/* ------------------------------------------------------------------ */
 254
 255static int find_grant_ptes(pte_t *pte, unsigned long addr, void *data)
 
 256{
 257	struct gntdev_grant_map *map = data;
 258	unsigned int pgnr = (addr - map->vma->vm_start) >> PAGE_SHIFT;
 259	int flags = map->flags | GNTMAP_application_map | GNTMAP_contains_pte;
 260	u64 pte_maddr;
 261
 262	BUG_ON(pgnr >= map->count);
 263	pte_maddr = arbitrary_virt_to_machine(pte).maddr;
 264
 265	/*
 266	 * Set the PTE as special to force get_user_pages_fast() fall
 267	 * back to the slow path.  If this is not supported as part of
 268	 * the grant map, it will be done afterwards.
 269	 */
 270	if (xen_feature(XENFEAT_gnttab_map_avail_bits))
 271		flags |= (1 << _GNTMAP_guest_avail0);
 272
 273	gnttab_set_map_op(&map->map_ops[pgnr], pte_maddr, flags,
 274			  map->grants[pgnr].ref,
 275			  map->grants[pgnr].domid);
 276	gnttab_set_unmap_op(&map->unmap_ops[pgnr], pte_maddr, flags,
 277			    -1 /* handle */);
 278	return 0;
 279}
 280
 281#ifdef CONFIG_X86
 282static int set_grant_ptes_as_special(pte_t *pte, unsigned long addr, void *data)
 
 283{
 284	set_pte_at(current->mm, addr, pte, pte_mkspecial(*pte));
 285	return 0;
 286}
 287#endif
 288
 289int gntdev_map_grant_pages(struct gntdev_grant_map *map)
 290{
 291	int i, err = 0;
 292
 293	if (!use_ptemod) {
 294		/* Note: it could already be mapped */
 295		if (map->map_ops[0].handle != -1)
 296			return 0;
 297		for (i = 0; i < map->count; i++) {
 298			unsigned long addr = (unsigned long)
 299				pfn_to_kaddr(page_to_pfn(map->pages[i]));
 300			gnttab_set_map_op(&map->map_ops[i], addr, map->flags,
 301				map->grants[i].ref,
 302				map->grants[i].domid);
 303			gnttab_set_unmap_op(&map->unmap_ops[i], addr,
 304				map->flags, -1 /* handle */);
 305		}
 306	} else {
 307		/*
 308		 * Setup the map_ops corresponding to the pte entries pointing
 309		 * to the kernel linear addresses of the struct pages.
 310		 * These ptes are completely different from the user ptes dealt
 311		 * with find_grant_ptes.
 312		 */
 313		for (i = 0; i < map->count; i++) {
 314			unsigned long address = (unsigned long)
 315				pfn_to_kaddr(page_to_pfn(map->pages[i]));
 316			BUG_ON(PageHighMem(map->pages[i]));
 317
 318			gnttab_set_map_op(&map->kmap_ops[i], address,
 319				map->flags | GNTMAP_host_map,
 320				map->grants[i].ref,
 321				map->grants[i].domid);
 322			gnttab_set_unmap_op(&map->kunmap_ops[i], address,
 323				map->flags | GNTMAP_host_map, -1);
 324		}
 325	}
 326
 327	pr_debug("map %d+%d\n", map->index, map->count);
 328	err = gnttab_map_refs(map->map_ops, use_ptemod ? map->kmap_ops : NULL,
 329			map->pages, map->count);
 330	if (err)
 331		return err;
 332
 333	for (i = 0; i < map->count; i++) {
 334		if (map->map_ops[i].status) {
 335			err = -EINVAL;
 336			continue;
 337		}
 338
 339		map->unmap_ops[i].handle = map->map_ops[i].handle;
 340		if (use_ptemod)
 341			map->kunmap_ops[i].handle = map->kmap_ops[i].handle;
 342#ifdef CONFIG_XEN_GRANT_DMA_ALLOC
 343		else if (map->dma_vaddr) {
 344			unsigned long bfn;
 345
 346			bfn = pfn_to_bfn(page_to_pfn(map->pages[i]));
 347			map->unmap_ops[i].dev_bus_addr = __pfn_to_phys(bfn);
 348		}
 349#endif
 350	}
 351	return err;
 352}
 353
 354static int __unmap_grant_pages(struct gntdev_grant_map *map, int offset,
 355			       int pages)
 356{
 357	int i, err = 0;
 358	struct gntab_unmap_queue_data unmap_data;
 359
 360	if (map->notify.flags & UNMAP_NOTIFY_CLEAR_BYTE) {
 361		int pgno = (map->notify.addr >> PAGE_SHIFT);
 362		if (pgno >= offset && pgno < offset + pages) {
 363			/* No need for kmap, pages are in lowmem */
 364			uint8_t *tmp = pfn_to_kaddr(page_to_pfn(map->pages[pgno]));
 365			tmp[map->notify.addr & (PAGE_SIZE-1)] = 0;
 366			map->notify.flags &= ~UNMAP_NOTIFY_CLEAR_BYTE;
 367		}
 368	}
 369
 370	unmap_data.unmap_ops = map->unmap_ops + offset;
 371	unmap_data.kunmap_ops = use_ptemod ? map->kunmap_ops + offset : NULL;
 372	unmap_data.pages = map->pages + offset;
 373	unmap_data.count = pages;
 374
 375	err = gnttab_unmap_refs_sync(&unmap_data);
 376	if (err)
 377		return err;
 378
 379	for (i = 0; i < pages; i++) {
 380		if (map->unmap_ops[offset+i].status)
 381			err = -EINVAL;
 382		pr_debug("unmap handle=%d st=%d\n",
 383			map->unmap_ops[offset+i].handle,
 384			map->unmap_ops[offset+i].status);
 385		map->unmap_ops[offset+i].handle = -1;
 386	}
 387	return err;
 388}
 389
 390static int unmap_grant_pages(struct gntdev_grant_map *map, int offset,
 391			     int pages)
 392{
 393	int range, err = 0;
 394
 395	pr_debug("unmap %d+%d [%d+%d]\n", map->index, map->count, offset, pages);
 396
 397	/* It is possible the requested range will have a "hole" where we
 398	 * already unmapped some of the grants. Only unmap valid ranges.
 399	 */
 400	while (pages && !err) {
 401		while (pages && map->unmap_ops[offset].handle == -1) {
 402			offset++;
 403			pages--;
 404		}
 405		range = 0;
 406		while (range < pages) {
 407			if (map->unmap_ops[offset+range].handle == -1)
 
 408				break;
 
 409			range++;
 410		}
 411		err = __unmap_grant_pages(map, offset, range);
 412		offset += range;
 413		pages -= range;
 414	}
 415
 416	return err;
 417}
 418
 419/* ------------------------------------------------------------------ */
 420
 421static void gntdev_vma_open(struct vm_area_struct *vma)
 422{
 423	struct gntdev_grant_map *map = vma->vm_private_data;
 424
 425	pr_debug("gntdev_vma_open %p\n", vma);
 426	refcount_inc(&map->users);
 427}
 428
 429static void gntdev_vma_close(struct vm_area_struct *vma)
 430{
 431	struct gntdev_grant_map *map = vma->vm_private_data;
 432	struct file *file = vma->vm_file;
 433	struct gntdev_priv *priv = file->private_data;
 434
 435	pr_debug("gntdev_vma_close %p\n", vma);
 436	if (use_ptemod) {
 437		WARN_ON(map->vma != vma);
 438		mmu_interval_notifier_remove(&map->notifier);
 
 
 
 
 
 
 439		map->vma = NULL;
 
 440	}
 441	vma->vm_private_data = NULL;
 442	gntdev_put_map(priv, map);
 443}
 444
 445static struct page *gntdev_vma_find_special_page(struct vm_area_struct *vma,
 446						 unsigned long addr)
 447{
 448	struct gntdev_grant_map *map = vma->vm_private_data;
 449
 450	return map->pages[(addr - map->pages_vm_start) >> PAGE_SHIFT];
 451}
 452
 453static const struct vm_operations_struct gntdev_vmops = {
 454	.open = gntdev_vma_open,
 455	.close = gntdev_vma_close,
 456	.find_special_page = gntdev_vma_find_special_page,
 457};
 458
 459/* ------------------------------------------------------------------ */
 460
 461static bool gntdev_invalidate(struct mmu_interval_notifier *mn,
 462			      const struct mmu_notifier_range *range,
 463			      unsigned long cur_seq)
 464{
 465	struct gntdev_grant_map *map =
 466		container_of(mn, struct gntdev_grant_map, notifier);
 467	unsigned long mstart, mend;
 468	int err;
 469
 470	if (!mmu_notifier_range_blockable(range))
 471		return false;
 472
 473	/*
 474	 * If the VMA is split or otherwise changed the notifier is not
 475	 * updated, but we don't want to process VA's outside the modified
 476	 * VMA. FIXME: It would be much more understandable to just prevent
 477	 * modifying the VMA in the first place.
 478	 */
 479	if (map->vma->vm_start >= range->end ||
 480	    map->vma->vm_end <= range->start)
 481		return true;
 482
 483	mstart = max(range->start, map->vma->vm_start);
 484	mend = min(range->end, map->vma->vm_end);
 485	pr_debug("map %d+%d (%lx %lx), range %lx %lx, mrange %lx %lx\n",
 486			map->index, map->count,
 487			map->vma->vm_start, map->vma->vm_end,
 488			range->start, range->end, mstart, mend);
 489	err = unmap_grant_pages(map,
 490				(mstart - map->vma->vm_start) >> PAGE_SHIFT,
 491				(mend - mstart) >> PAGE_SHIFT);
 492	WARN_ON(err);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 493
 494	return true;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 495}
 496
 497static const struct mmu_interval_notifier_ops gntdev_mmu_ops = {
 498	.invalidate = gntdev_invalidate,
 
 
 499};
 500
 501/* ------------------------------------------------------------------ */
 502
 503static int gntdev_open(struct inode *inode, struct file *flip)
 504{
 505	struct gntdev_priv *priv;
 
 506
 507	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
 508	if (!priv)
 509		return -ENOMEM;
 510
 511	INIT_LIST_HEAD(&priv->maps);
 
 512	mutex_init(&priv->lock);
 513
 514#ifdef CONFIG_XEN_GNTDEV_DMABUF
 515	priv->dmabuf_priv = gntdev_dmabuf_init(flip);
 516	if (IS_ERR(priv->dmabuf_priv)) {
 517		int ret = PTR_ERR(priv->dmabuf_priv);
 
 
 
 
 
 
 518
 
 519		kfree(priv);
 520		return ret;
 521	}
 522#endif
 523
 524	flip->private_data = priv;
 525#ifdef CONFIG_XEN_GRANT_DMA_ALLOC
 526	priv->dma_dev = gntdev_miscdev.this_device;
 527	dma_coerce_mask_and_coherent(priv->dma_dev, DMA_BIT_MASK(64));
 528#endif
 529	pr_debug("priv %p\n", priv);
 530
 531	return 0;
 532}
 533
 534static int gntdev_release(struct inode *inode, struct file *flip)
 535{
 536	struct gntdev_priv *priv = flip->private_data;
 537	struct gntdev_grant_map *map;
 538
 539	pr_debug("priv %p\n", priv);
 540
 541	mutex_lock(&priv->lock);
 542	while (!list_empty(&priv->maps)) {
 543		map = list_entry(priv->maps.next,
 544				 struct gntdev_grant_map, next);
 545		list_del(&map->next);
 546		gntdev_put_map(NULL /* already removed */, map);
 547	}
 
 548	mutex_unlock(&priv->lock);
 549
 550#ifdef CONFIG_XEN_GNTDEV_DMABUF
 551	gntdev_dmabuf_fini(priv->dmabuf_priv);
 552#endif
 553
 554	kfree(priv);
 555	return 0;
 556}
 557
 558static long gntdev_ioctl_map_grant_ref(struct gntdev_priv *priv,
 559				       struct ioctl_gntdev_map_grant_ref __user *u)
 560{
 561	struct ioctl_gntdev_map_grant_ref op;
 562	struct gntdev_grant_map *map;
 563	int err;
 564
 565	if (copy_from_user(&op, u, sizeof(op)) != 0)
 566		return -EFAULT;
 567	pr_debug("priv %p, add %d\n", priv, op.count);
 568	if (unlikely(gntdev_test_page_count(op.count)))
 569		return -EINVAL;
 570
 571	err = -ENOMEM;
 572	map = gntdev_alloc_map(priv, op.count, 0 /* This is not a dma-buf. */);
 573	if (!map)
 574		return err;
 575
 
 
 
 
 
 
 576	if (copy_from_user(map->grants, &u->refs,
 577			   sizeof(map->grants[0]) * op.count) != 0) {
 578		gntdev_put_map(NULL, map);
 579		return -EFAULT;
 580	}
 581
 582	mutex_lock(&priv->lock);
 583	gntdev_add_map(priv, map);
 584	op.index = map->index << PAGE_SHIFT;
 585	mutex_unlock(&priv->lock);
 586
 587	if (copy_to_user(u, &op, sizeof(op)) != 0)
 588		return -EFAULT;
 589
 590	return 0;
 591}
 592
 593static long gntdev_ioctl_unmap_grant_ref(struct gntdev_priv *priv,
 594					 struct ioctl_gntdev_unmap_grant_ref __user *u)
 595{
 596	struct ioctl_gntdev_unmap_grant_ref op;
 597	struct gntdev_grant_map *map;
 598	int err = -ENOENT;
 599
 600	if (copy_from_user(&op, u, sizeof(op)) != 0)
 601		return -EFAULT;
 602	pr_debug("priv %p, del %d+%d\n", priv, (int)op.index, (int)op.count);
 603
 604	mutex_lock(&priv->lock);
 605	map = gntdev_find_map_index(priv, op.index >> PAGE_SHIFT, op.count);
 606	if (map) {
 607		list_del(&map->next);
 
 
 608		err = 0;
 609	}
 610	mutex_unlock(&priv->lock);
 611	if (map)
 612		gntdev_put_map(priv, map);
 613	return err;
 614}
 615
 616static long gntdev_ioctl_get_offset_for_vaddr(struct gntdev_priv *priv,
 617					      struct ioctl_gntdev_get_offset_for_vaddr __user *u)
 618{
 619	struct ioctl_gntdev_get_offset_for_vaddr op;
 620	struct vm_area_struct *vma;
 621	struct gntdev_grant_map *map;
 622	int rv = -EINVAL;
 623
 624	if (copy_from_user(&op, u, sizeof(op)) != 0)
 625		return -EFAULT;
 626	pr_debug("priv %p, offset for vaddr %lx\n", priv, (unsigned long)op.vaddr);
 627
 628	mmap_read_lock(current->mm);
 629	vma = find_vma(current->mm, op.vaddr);
 630	if (!vma || vma->vm_ops != &gntdev_vmops)
 631		goto out_unlock;
 632
 633	map = vma->vm_private_data;
 634	if (!map)
 635		goto out_unlock;
 636
 637	op.offset = map->index << PAGE_SHIFT;
 638	op.count = map->count;
 639	rv = 0;
 640
 641 out_unlock:
 642	mmap_read_unlock(current->mm);
 643
 644	if (rv == 0 && copy_to_user(u, &op, sizeof(op)) != 0)
 645		return -EFAULT;
 646	return rv;
 647}
 648
 649static long gntdev_ioctl_notify(struct gntdev_priv *priv, void __user *u)
 650{
 651	struct ioctl_gntdev_unmap_notify op;
 652	struct gntdev_grant_map *map;
 653	int rc;
 654	int out_flags;
 655	evtchn_port_t out_event;
 656
 657	if (copy_from_user(&op, u, sizeof(op)))
 658		return -EFAULT;
 659
 660	if (op.action & ~(UNMAP_NOTIFY_CLEAR_BYTE|UNMAP_NOTIFY_SEND_EVENT))
 661		return -EINVAL;
 662
 663	/* We need to grab a reference to the event channel we are going to use
 664	 * to send the notify before releasing the reference we may already have
 665	 * (if someone has called this ioctl twice). This is required so that
 666	 * it is possible to change the clear_byte part of the notification
 667	 * without disturbing the event channel part, which may now be the last
 668	 * reference to that event channel.
 669	 */
 670	if (op.action & UNMAP_NOTIFY_SEND_EVENT) {
 671		if (evtchn_get(op.event_channel_port))
 672			return -EINVAL;
 673	}
 674
 675	out_flags = op.action;
 676	out_event = op.event_channel_port;
 677
 678	mutex_lock(&priv->lock);
 679
 680	list_for_each_entry(map, &priv->maps, next) {
 681		uint64_t begin = map->index << PAGE_SHIFT;
 682		uint64_t end = (map->index + map->count) << PAGE_SHIFT;
 683		if (op.index >= begin && op.index < end)
 684			goto found;
 685	}
 686	rc = -ENOENT;
 687	goto unlock_out;
 688
 689 found:
 690	if ((op.action & UNMAP_NOTIFY_CLEAR_BYTE) &&
 691			(map->flags & GNTMAP_readonly)) {
 692		rc = -EINVAL;
 693		goto unlock_out;
 694	}
 695
 696	out_flags = map->notify.flags;
 697	out_event = map->notify.event;
 698
 699	map->notify.flags = op.action;
 700	map->notify.addr = op.index - (map->index << PAGE_SHIFT);
 701	map->notify.event = op.event_channel_port;
 702
 703	rc = 0;
 704
 705 unlock_out:
 706	mutex_unlock(&priv->lock);
 707
 708	/* Drop the reference to the event channel we did not save in the map */
 709	if (out_flags & UNMAP_NOTIFY_SEND_EVENT)
 710		evtchn_put(out_event);
 711
 712	return rc;
 713}
 714
 715#define GNTDEV_COPY_BATCH 16
 716
 717struct gntdev_copy_batch {
 718	struct gnttab_copy ops[GNTDEV_COPY_BATCH];
 719	struct page *pages[GNTDEV_COPY_BATCH];
 720	s16 __user *status[GNTDEV_COPY_BATCH];
 721	unsigned int nr_ops;
 722	unsigned int nr_pages;
 723};
 724
 725static int gntdev_get_page(struct gntdev_copy_batch *batch, void __user *virt,
 726			   bool writeable, unsigned long *gfn)
 727{
 728	unsigned long addr = (unsigned long)virt;
 729	struct page *page;
 730	unsigned long xen_pfn;
 731	int ret;
 732
 733	ret = get_user_pages_fast(addr, 1, writeable ? FOLL_WRITE : 0, &page);
 734	if (ret < 0)
 735		return ret;
 736
 737	batch->pages[batch->nr_pages++] = page;
 738
 739	xen_pfn = page_to_xen_pfn(page) + XEN_PFN_DOWN(addr & ~PAGE_MASK);
 740	*gfn = pfn_to_gfn(xen_pfn);
 741
 742	return 0;
 743}
 744
 745static void gntdev_put_pages(struct gntdev_copy_batch *batch)
 746{
 747	unsigned int i;
 748
 749	for (i = 0; i < batch->nr_pages; i++)
 750		put_page(batch->pages[i]);
 751	batch->nr_pages = 0;
 752}
 753
 754static int gntdev_copy(struct gntdev_copy_batch *batch)
 755{
 756	unsigned int i;
 757
 758	gnttab_batch_copy(batch->ops, batch->nr_ops);
 759	gntdev_put_pages(batch);
 760
 761	/*
 762	 * For each completed op, update the status if the op failed
 763	 * and all previous ops for the segment were successful.
 764	 */
 765	for (i = 0; i < batch->nr_ops; i++) {
 766		s16 status = batch->ops[i].status;
 767		s16 old_status;
 768
 769		if (status == GNTST_okay)
 770			continue;
 771
 772		if (__get_user(old_status, batch->status[i]))
 773			return -EFAULT;
 774
 775		if (old_status != GNTST_okay)
 776			continue;
 777
 778		if (__put_user(status, batch->status[i]))
 779			return -EFAULT;
 780	}
 781
 782	batch->nr_ops = 0;
 783	return 0;
 784}
 785
 786static int gntdev_grant_copy_seg(struct gntdev_copy_batch *batch,
 787				 struct gntdev_grant_copy_segment *seg,
 788				 s16 __user *status)
 789{
 790	uint16_t copied = 0;
 791
 792	/*
 793	 * Disallow local -> local copies since there is only space in
 794	 * batch->pages for one page per-op and this would be a very
 795	 * expensive memcpy().
 796	 */
 797	if (!(seg->flags & (GNTCOPY_source_gref | GNTCOPY_dest_gref)))
 798		return -EINVAL;
 799
 800	/* Can't cross page if source/dest is a grant ref. */
 801	if (seg->flags & GNTCOPY_source_gref) {
 802		if (seg->source.foreign.offset + seg->len > XEN_PAGE_SIZE)
 803			return -EINVAL;
 804	}
 805	if (seg->flags & GNTCOPY_dest_gref) {
 806		if (seg->dest.foreign.offset + seg->len > XEN_PAGE_SIZE)
 807			return -EINVAL;
 808	}
 809
 810	if (put_user(GNTST_okay, status))
 811		return -EFAULT;
 812
 813	while (copied < seg->len) {
 814		struct gnttab_copy *op;
 815		void __user *virt;
 816		size_t len, off;
 817		unsigned long gfn;
 818		int ret;
 819
 820		if (batch->nr_ops >= GNTDEV_COPY_BATCH) {
 821			ret = gntdev_copy(batch);
 822			if (ret < 0)
 823				return ret;
 824		}
 825
 826		len = seg->len - copied;
 827
 828		op = &batch->ops[batch->nr_ops];
 829		op->flags = 0;
 830
 831		if (seg->flags & GNTCOPY_source_gref) {
 832			op->source.u.ref = seg->source.foreign.ref;
 833			op->source.domid = seg->source.foreign.domid;
 834			op->source.offset = seg->source.foreign.offset + copied;
 835			op->flags |= GNTCOPY_source_gref;
 836		} else {
 837			virt = seg->source.virt + copied;
 838			off = (unsigned long)virt & ~XEN_PAGE_MASK;
 839			len = min(len, (size_t)XEN_PAGE_SIZE - off);
 840
 841			ret = gntdev_get_page(batch, virt, false, &gfn);
 842			if (ret < 0)
 843				return ret;
 844
 845			op->source.u.gmfn = gfn;
 846			op->source.domid = DOMID_SELF;
 847			op->source.offset = off;
 848		}
 849
 850		if (seg->flags & GNTCOPY_dest_gref) {
 851			op->dest.u.ref = seg->dest.foreign.ref;
 852			op->dest.domid = seg->dest.foreign.domid;
 853			op->dest.offset = seg->dest.foreign.offset + copied;
 854			op->flags |= GNTCOPY_dest_gref;
 855		} else {
 856			virt = seg->dest.virt + copied;
 857			off = (unsigned long)virt & ~XEN_PAGE_MASK;
 858			len = min(len, (size_t)XEN_PAGE_SIZE - off);
 859
 860			ret = gntdev_get_page(batch, virt, true, &gfn);
 861			if (ret < 0)
 862				return ret;
 863
 864			op->dest.u.gmfn = gfn;
 865			op->dest.domid = DOMID_SELF;
 866			op->dest.offset = off;
 867		}
 868
 869		op->len = len;
 870		copied += len;
 871
 872		batch->status[batch->nr_ops] = status;
 873		batch->nr_ops++;
 874	}
 875
 876	return 0;
 877}
 878
 879static long gntdev_ioctl_grant_copy(struct gntdev_priv *priv, void __user *u)
 880{
 881	struct ioctl_gntdev_grant_copy copy;
 882	struct gntdev_copy_batch batch;
 883	unsigned int i;
 884	int ret = 0;
 885
 886	if (copy_from_user(&copy, u, sizeof(copy)))
 887		return -EFAULT;
 888
 889	batch.nr_ops = 0;
 890	batch.nr_pages = 0;
 891
 892	for (i = 0; i < copy.count; i++) {
 893		struct gntdev_grant_copy_segment seg;
 894
 895		if (copy_from_user(&seg, &copy.segments[i], sizeof(seg))) {
 896			ret = -EFAULT;
 897			goto out;
 898		}
 899
 900		ret = gntdev_grant_copy_seg(&batch, &seg, &copy.segments[i].status);
 901		if (ret < 0)
 902			goto out;
 903
 904		cond_resched();
 905	}
 906	if (batch.nr_ops)
 907		ret = gntdev_copy(&batch);
 908	return ret;
 909
 910  out:
 911	gntdev_put_pages(&batch);
 912	return ret;
 913}
 914
 915static long gntdev_ioctl(struct file *flip,
 916			 unsigned int cmd, unsigned long arg)
 917{
 918	struct gntdev_priv *priv = flip->private_data;
 919	void __user *ptr = (void __user *)arg;
 920
 921	switch (cmd) {
 922	case IOCTL_GNTDEV_MAP_GRANT_REF:
 923		return gntdev_ioctl_map_grant_ref(priv, ptr);
 924
 925	case IOCTL_GNTDEV_UNMAP_GRANT_REF:
 926		return gntdev_ioctl_unmap_grant_ref(priv, ptr);
 927
 928	case IOCTL_GNTDEV_GET_OFFSET_FOR_VADDR:
 929		return gntdev_ioctl_get_offset_for_vaddr(priv, ptr);
 930
 931	case IOCTL_GNTDEV_SET_UNMAP_NOTIFY:
 932		return gntdev_ioctl_notify(priv, ptr);
 933
 934	case IOCTL_GNTDEV_GRANT_COPY:
 935		return gntdev_ioctl_grant_copy(priv, ptr);
 936
 937#ifdef CONFIG_XEN_GNTDEV_DMABUF
 938	case IOCTL_GNTDEV_DMABUF_EXP_FROM_REFS:
 939		return gntdev_ioctl_dmabuf_exp_from_refs(priv, use_ptemod, ptr);
 940
 941	case IOCTL_GNTDEV_DMABUF_EXP_WAIT_RELEASED:
 942		return gntdev_ioctl_dmabuf_exp_wait_released(priv, ptr);
 943
 944	case IOCTL_GNTDEV_DMABUF_IMP_TO_REFS:
 945		return gntdev_ioctl_dmabuf_imp_to_refs(priv, ptr);
 946
 947	case IOCTL_GNTDEV_DMABUF_IMP_RELEASE:
 948		return gntdev_ioctl_dmabuf_imp_release(priv, ptr);
 949#endif
 950
 951	default:
 952		pr_debug("priv %p, unknown cmd %x\n", priv, cmd);
 953		return -ENOIOCTLCMD;
 954	}
 955
 956	return 0;
 957}
 958
 959static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
 960{
 961	struct gntdev_priv *priv = flip->private_data;
 962	int index = vma->vm_pgoff;
 963	int count = vma_pages(vma);
 964	struct gntdev_grant_map *map;
 965	int err = -EINVAL;
 966
 967	if ((vma->vm_flags & VM_WRITE) && !(vma->vm_flags & VM_SHARED))
 968		return -EINVAL;
 969
 970	pr_debug("map %d+%d at %lx (pgoff %lx)\n",
 971			index, count, vma->vm_start, vma->vm_pgoff);
 972
 973	mutex_lock(&priv->lock);
 974	map = gntdev_find_map_index(priv, index, count);
 975	if (!map)
 976		goto unlock_out;
 977	if (use_ptemod && map->vma)
 978		goto unlock_out;
 979	refcount_inc(&map->users);
 
 
 
 
 
 980
 981	vma->vm_ops = &gntdev_vmops;
 982
 983	vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP | VM_MIXEDMAP;
 984
 985	if (use_ptemod)
 986		vma->vm_flags |= VM_DONTCOPY;
 987
 988	vma->vm_private_data = map;
 
 
 
 
 989	if (map->flags) {
 990		if ((vma->vm_flags & VM_WRITE) &&
 991				(map->flags & GNTMAP_readonly))
 992			goto out_unlock_put;
 993	} else {
 994		map->flags = GNTMAP_host_map;
 995		if (!(vma->vm_flags & VM_WRITE))
 996			map->flags |= GNTMAP_readonly;
 997	}
 998
 999	if (use_ptemod) {
1000		map->vma = vma;
1001		err = mmu_interval_notifier_insert_locked(
1002			&map->notifier, vma->vm_mm, vma->vm_start,
1003			vma->vm_end - vma->vm_start, &gntdev_mmu_ops);
1004		if (err)
1005			goto out_unlock_put;
1006	}
1007	mutex_unlock(&priv->lock);
1008
1009	if (use_ptemod) {
1010		/*
1011		 * gntdev takes the address of the PTE in find_grant_ptes() and
1012		 * passes it to the hypervisor in gntdev_map_grant_pages(). The
1013		 * purpose of the notifier is to prevent the hypervisor pointer
1014		 * to the PTE from going stale.
1015		 *
1016		 * Since this vma's mappings can't be touched without the
1017		 * mmap_lock, and we are holding it now, there is no need for
1018		 * the notifier_range locking pattern.
1019		 */
1020		mmu_interval_read_begin(&map->notifier);
1021
1022		map->pages_vm_start = vma->vm_start;
1023		err = apply_to_page_range(vma->vm_mm, vma->vm_start,
1024					  vma->vm_end - vma->vm_start,
1025					  find_grant_ptes, map);
1026		if (err) {
1027			pr_warn("find_grant_ptes() failure.\n");
1028			goto out_put_map;
1029		}
1030	}
1031
1032	err = gntdev_map_grant_pages(map);
1033	if (err)
1034		goto out_put_map;
1035
1036	if (!use_ptemod) {
1037		err = vm_map_pages_zero(vma, map->pages, map->count);
1038		if (err)
1039			goto out_put_map;
 
 
 
1040	} else {
1041#ifdef CONFIG_X86
1042		/*
1043		 * If the PTEs were not made special by the grant map
1044		 * hypercall, do so here.
1045		 *
1046		 * This is racy since the mapping is already visible
1047		 * to userspace but userspace should be well-behaved
1048		 * enough to not touch it until the mmap() call
1049		 * returns.
1050		 */
1051		if (!xen_feature(XENFEAT_gnttab_map_avail_bits)) {
1052			apply_to_page_range(vma->vm_mm, vma->vm_start,
1053					    vma->vm_end - vma->vm_start,
1054					    set_grant_ptes_as_special, NULL);
1055		}
1056#endif
 
1057	}
1058
1059	return 0;
1060
1061unlock_out:
1062	mutex_unlock(&priv->lock);
1063	return err;
1064
1065out_unlock_put:
1066	mutex_unlock(&priv->lock);
1067out_put_map:
1068	if (use_ptemod) {
1069		unmap_grant_pages(map, 0, map->count);
1070		if (map->vma) {
1071			mmu_interval_notifier_remove(&map->notifier);
1072			map->vma = NULL;
1073		}
1074	}
1075	gntdev_put_map(priv, map);
1076	return err;
1077}
1078
1079static const struct file_operations gntdev_fops = {
1080	.owner = THIS_MODULE,
1081	.open = gntdev_open,
1082	.release = gntdev_release,
1083	.mmap = gntdev_mmap,
1084	.unlocked_ioctl = gntdev_ioctl
1085};
1086
1087static struct miscdevice gntdev_miscdev = {
1088	.minor        = MISC_DYNAMIC_MINOR,
1089	.name         = "xen/gntdev",
1090	.fops         = &gntdev_fops,
1091};
1092
1093/* ------------------------------------------------------------------ */
1094
1095static int __init gntdev_init(void)
1096{
1097	int err;
1098
1099	if (!xen_domain())
1100		return -ENODEV;
1101
1102	use_ptemod = !xen_feature(XENFEAT_auto_translated_physmap);
1103
1104	err = misc_register(&gntdev_miscdev);
1105	if (err != 0) {
1106		pr_err("Could not register gntdev device\n");
1107		return err;
1108	}
1109	return 0;
1110}
1111
1112static void __exit gntdev_exit(void)
1113{
1114	misc_deregister(&gntdev_miscdev);
1115}
1116
1117module_init(gntdev_init);
1118module_exit(gntdev_exit);
1119
1120/* ------------------------------------------------------------------ */
v4.6
   1/******************************************************************************
   2 * gntdev.c
   3 *
   4 * Device for accessing (in user-space) pages that have been granted by other
   5 * domains.
   6 *
   7 * Copyright (c) 2006-2007, D G Murray.
   8 *           (c) 2009 Gerd Hoffmann <kraxel@redhat.com>
 
   9 *
  10 * This program is distributed in the hope that it will be useful,
  11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  13 * GNU General Public License for more details.
  14 *
  15 * You should have received a copy of the GNU General Public License
  16 * along with this program; if not, write to the Free Software
  17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  18 */
  19
  20#undef DEBUG
  21
  22#define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
  23
 
  24#include <linux/module.h>
  25#include <linux/kernel.h>
  26#include <linux/init.h>
  27#include <linux/miscdevice.h>
  28#include <linux/fs.h>
  29#include <linux/mm.h>
  30#include <linux/mman.h>
  31#include <linux/mmu_notifier.h>
  32#include <linux/types.h>
  33#include <linux/uaccess.h>
  34#include <linux/sched.h>
 
  35#include <linux/spinlock.h>
  36#include <linux/slab.h>
  37#include <linux/highmem.h>
 
  38
  39#include <xen/xen.h>
  40#include <xen/grant_table.h>
  41#include <xen/balloon.h>
  42#include <xen/gntdev.h>
  43#include <xen/events.h>
  44#include <xen/page.h>
  45#include <asm/xen/hypervisor.h>
  46#include <asm/xen/hypercall.h>
  47
 
 
 
 
 
  48MODULE_LICENSE("GPL");
  49MODULE_AUTHOR("Derek G. Murray <Derek.Murray@cl.cam.ac.uk>, "
  50	      "Gerd Hoffmann <kraxel@redhat.com>");
  51MODULE_DESCRIPTION("User-space granted page access driver");
  52
  53static int limit = 1024*1024;
  54module_param(limit, int, 0644);
  55MODULE_PARM_DESC(limit, "Maximum number of grants that may be mapped by "
  56		"the gntdev device");
  57
  58static atomic_t pages_mapped = ATOMIC_INIT(0);
  59
  60static int use_ptemod;
  61#define populate_freeable_maps use_ptemod
  62
  63struct gntdev_priv {
  64	/* maps with visible offsets in the file descriptor */
  65	struct list_head maps;
  66	/* maps that are not visible; will be freed on munmap.
  67	 * Only populated if populate_freeable_maps == 1 */
  68	struct list_head freeable_maps;
  69	/* lock protects maps and freeable_maps */
  70	struct mutex lock;
  71	struct mm_struct *mm;
  72	struct mmu_notifier mn;
  73};
  74
  75struct unmap_notify {
  76	int flags;
  77	/* Address relative to the start of the grant_map */
  78	int addr;
  79	int event;
  80};
  81
  82struct grant_map {
  83	struct list_head next;
  84	struct vm_area_struct *vma;
  85	int index;
  86	int count;
  87	int flags;
  88	atomic_t users;
  89	struct unmap_notify notify;
  90	struct ioctl_gntdev_grant_ref *grants;
  91	struct gnttab_map_grant_ref   *map_ops;
  92	struct gnttab_unmap_grant_ref *unmap_ops;
  93	struct gnttab_map_grant_ref   *kmap_ops;
  94	struct gnttab_unmap_grant_ref *kunmap_ops;
  95	struct page **pages;
  96	unsigned long pages_vm_start;
  97};
  98
  99static int unmap_grant_pages(struct grant_map *map, int offset, int pages);
 100
 101/* ------------------------------------------------------------------ */
 
 102
 103static void gntdev_print_maps(struct gntdev_priv *priv,
 104			      char *text, int text_index)
 105{
 106#ifdef DEBUG
 107	struct grant_map *map;
 108
 109	pr_debug("%s: maps list (priv %p)\n", __func__, priv);
 110	list_for_each_entry(map, &priv->maps, next)
 111		pr_debug("  index %2d, count %2d %s\n",
 112		       map->index, map->count,
 113		       map->index == text_index && text ? text : "");
 114#endif
 115}
 116
 117static void gntdev_free_map(struct grant_map *map)
 118{
 119	if (map == NULL)
 120		return;
 121
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 122	if (map->pages)
 123		gnttab_free_pages(map->count, map->pages);
 124	kfree(map->pages);
 125	kfree(map->grants);
 126	kfree(map->map_ops);
 127	kfree(map->unmap_ops);
 128	kfree(map->kmap_ops);
 129	kfree(map->kunmap_ops);
 
 
 
 
 130	kfree(map);
 131}
 132
 133static struct grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count)
 
 134{
 135	struct grant_map *add;
 136	int i;
 137
 138	add = kzalloc(sizeof(struct grant_map), GFP_KERNEL);
 139	if (NULL == add)
 140		return NULL;
 141
 142	add->grants    = kcalloc(count, sizeof(add->grants[0]), GFP_KERNEL);
 143	add->map_ops   = kcalloc(count, sizeof(add->map_ops[0]), GFP_KERNEL);
 144	add->unmap_ops = kcalloc(count, sizeof(add->unmap_ops[0]), GFP_KERNEL);
 145	add->kmap_ops  = kcalloc(count, sizeof(add->kmap_ops[0]), GFP_KERNEL);
 146	add->kunmap_ops = kcalloc(count, sizeof(add->kunmap_ops[0]), GFP_KERNEL);
 147	add->pages     = kcalloc(count, sizeof(add->pages[0]), GFP_KERNEL);
 
 148	if (NULL == add->grants    ||
 149	    NULL == add->map_ops   ||
 150	    NULL == add->unmap_ops ||
 151	    NULL == add->kmap_ops  ||
 152	    NULL == add->kunmap_ops ||
 153	    NULL == add->pages)
 154		goto err;
 155
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 156	if (gnttab_alloc_pages(count, add->pages))
 157		goto err;
 158
 159	for (i = 0; i < count; i++) {
 160		add->map_ops[i].handle = -1;
 161		add->unmap_ops[i].handle = -1;
 162		add->kmap_ops[i].handle = -1;
 163		add->kunmap_ops[i].handle = -1;
 164	}
 165
 166	add->index = 0;
 167	add->count = count;
 168	atomic_set(&add->users, 1);
 169
 170	return add;
 171
 172err:
 173	gntdev_free_map(add);
 174	return NULL;
 175}
 176
 177static void gntdev_add_map(struct gntdev_priv *priv, struct grant_map *add)
 178{
 179	struct grant_map *map;
 180
 181	list_for_each_entry(map, &priv->maps, next) {
 182		if (add->index + add->count < map->index) {
 183			list_add_tail(&add->next, &map->next);
 184			goto done;
 185		}
 186		add->index = map->index + map->count;
 187	}
 188	list_add_tail(&add->next, &priv->maps);
 189
 190done:
 191	gntdev_print_maps(priv, "[new]", add->index);
 192}
 193
 194static struct grant_map *gntdev_find_map_index(struct gntdev_priv *priv,
 195		int index, int count)
 196{
 197	struct grant_map *map;
 198
 199	list_for_each_entry(map, &priv->maps, next) {
 200		if (map->index != index)
 201			continue;
 202		if (count && map->count != count)
 203			continue;
 204		return map;
 205	}
 206	return NULL;
 207}
 208
 209static void gntdev_put_map(struct gntdev_priv *priv, struct grant_map *map)
 210{
 211	if (!map)
 212		return;
 213
 214	if (!atomic_dec_and_test(&map->users))
 215		return;
 216
 217	atomic_sub(map->count, &pages_mapped);
 218
 219	if (map->notify.flags & UNMAP_NOTIFY_SEND_EVENT) {
 220		notify_remote_via_evtchn(map->notify.event);
 221		evtchn_put(map->notify.event);
 222	}
 223
 224	if (populate_freeable_maps && priv) {
 225		mutex_lock(&priv->lock);
 226		list_del(&map->next);
 227		mutex_unlock(&priv->lock);
 228	}
 229
 230	if (map->pages && !use_ptemod)
 231		unmap_grant_pages(map, 0, map->count);
 232	gntdev_free_map(map);
 233}
 234
 235/* ------------------------------------------------------------------ */
 236
 237static int find_grant_ptes(pte_t *pte, pgtable_t token,
 238		unsigned long addr, void *data)
 239{
 240	struct grant_map *map = data;
 241	unsigned int pgnr = (addr - map->vma->vm_start) >> PAGE_SHIFT;
 242	int flags = map->flags | GNTMAP_application_map | GNTMAP_contains_pte;
 243	u64 pte_maddr;
 244
 245	BUG_ON(pgnr >= map->count);
 246	pte_maddr = arbitrary_virt_to_machine(pte).maddr;
 247
 248	/*
 249	 * Set the PTE as special to force get_user_pages_fast() fall
 250	 * back to the slow path.  If this is not supported as part of
 251	 * the grant map, it will be done afterwards.
 252	 */
 253	if (xen_feature(XENFEAT_gnttab_map_avail_bits))
 254		flags |= (1 << _GNTMAP_guest_avail0);
 255
 256	gnttab_set_map_op(&map->map_ops[pgnr], pte_maddr, flags,
 257			  map->grants[pgnr].ref,
 258			  map->grants[pgnr].domid);
 259	gnttab_set_unmap_op(&map->unmap_ops[pgnr], pte_maddr, flags,
 260			    -1 /* handle */);
 261	return 0;
 262}
 263
 264#ifdef CONFIG_X86
 265static int set_grant_ptes_as_special(pte_t *pte, pgtable_t token,
 266				     unsigned long addr, void *data)
 267{
 268	set_pte_at(current->mm, addr, pte, pte_mkspecial(*pte));
 269	return 0;
 270}
 271#endif
 272
 273static int map_grant_pages(struct grant_map *map)
 274{
 275	int i, err = 0;
 276
 277	if (!use_ptemod) {
 278		/* Note: it could already be mapped */
 279		if (map->map_ops[0].handle != -1)
 280			return 0;
 281		for (i = 0; i < map->count; i++) {
 282			unsigned long addr = (unsigned long)
 283				pfn_to_kaddr(page_to_pfn(map->pages[i]));
 284			gnttab_set_map_op(&map->map_ops[i], addr, map->flags,
 285				map->grants[i].ref,
 286				map->grants[i].domid);
 287			gnttab_set_unmap_op(&map->unmap_ops[i], addr,
 288				map->flags, -1 /* handle */);
 289		}
 290	} else {
 291		/*
 292		 * Setup the map_ops corresponding to the pte entries pointing
 293		 * to the kernel linear addresses of the struct pages.
 294		 * These ptes are completely different from the user ptes dealt
 295		 * with find_grant_ptes.
 296		 */
 297		for (i = 0; i < map->count; i++) {
 298			unsigned long address = (unsigned long)
 299				pfn_to_kaddr(page_to_pfn(map->pages[i]));
 300			BUG_ON(PageHighMem(map->pages[i]));
 301
 302			gnttab_set_map_op(&map->kmap_ops[i], address,
 303				map->flags | GNTMAP_host_map,
 304				map->grants[i].ref,
 305				map->grants[i].domid);
 306			gnttab_set_unmap_op(&map->kunmap_ops[i], address,
 307				map->flags | GNTMAP_host_map, -1);
 308		}
 309	}
 310
 311	pr_debug("map %d+%d\n", map->index, map->count);
 312	err = gnttab_map_refs(map->map_ops, use_ptemod ? map->kmap_ops : NULL,
 313			map->pages, map->count);
 314	if (err)
 315		return err;
 316
 317	for (i = 0; i < map->count; i++) {
 318		if (map->map_ops[i].status) {
 319			err = -EINVAL;
 320			continue;
 321		}
 322
 323		map->unmap_ops[i].handle = map->map_ops[i].handle;
 324		if (use_ptemod)
 325			map->kunmap_ops[i].handle = map->kmap_ops[i].handle;
 
 
 
 
 
 
 
 
 326	}
 327	return err;
 328}
 329
 330static int __unmap_grant_pages(struct grant_map *map, int offset, int pages)
 
 331{
 332	int i, err = 0;
 333	struct gntab_unmap_queue_data unmap_data;
 334
 335	if (map->notify.flags & UNMAP_NOTIFY_CLEAR_BYTE) {
 336		int pgno = (map->notify.addr >> PAGE_SHIFT);
 337		if (pgno >= offset && pgno < offset + pages) {
 338			/* No need for kmap, pages are in lowmem */
 339			uint8_t *tmp = pfn_to_kaddr(page_to_pfn(map->pages[pgno]));
 340			tmp[map->notify.addr & (PAGE_SIZE-1)] = 0;
 341			map->notify.flags &= ~UNMAP_NOTIFY_CLEAR_BYTE;
 342		}
 343	}
 344
 345	unmap_data.unmap_ops = map->unmap_ops + offset;
 346	unmap_data.kunmap_ops = use_ptemod ? map->kunmap_ops + offset : NULL;
 347	unmap_data.pages = map->pages + offset;
 348	unmap_data.count = pages;
 349
 350	err = gnttab_unmap_refs_sync(&unmap_data);
 351	if (err)
 352		return err;
 353
 354	for (i = 0; i < pages; i++) {
 355		if (map->unmap_ops[offset+i].status)
 356			err = -EINVAL;
 357		pr_debug("unmap handle=%d st=%d\n",
 358			map->unmap_ops[offset+i].handle,
 359			map->unmap_ops[offset+i].status);
 360		map->unmap_ops[offset+i].handle = -1;
 361	}
 362	return err;
 363}
 364
 365static int unmap_grant_pages(struct grant_map *map, int offset, int pages)
 
 366{
 367	int range, err = 0;
 368
 369	pr_debug("unmap %d+%d [%d+%d]\n", map->index, map->count, offset, pages);
 370
 371	/* It is possible the requested range will have a "hole" where we
 372	 * already unmapped some of the grants. Only unmap valid ranges.
 373	 */
 374	while (pages && !err) {
 375		while (pages && map->unmap_ops[offset].handle == -1) {
 376			offset++;
 377			pages--;
 378		}
 379		range = 0;
 380		while (range < pages) {
 381			if (map->unmap_ops[offset+range].handle == -1) {
 382				range--;
 383				break;
 384			}
 385			range++;
 386		}
 387		err = __unmap_grant_pages(map, offset, range);
 388		offset += range;
 389		pages -= range;
 390	}
 391
 392	return err;
 393}
 394
 395/* ------------------------------------------------------------------ */
 396
 397static void gntdev_vma_open(struct vm_area_struct *vma)
 398{
 399	struct grant_map *map = vma->vm_private_data;
 400
 401	pr_debug("gntdev_vma_open %p\n", vma);
 402	atomic_inc(&map->users);
 403}
 404
 405static void gntdev_vma_close(struct vm_area_struct *vma)
 406{
 407	struct grant_map *map = vma->vm_private_data;
 408	struct file *file = vma->vm_file;
 409	struct gntdev_priv *priv = file->private_data;
 410
 411	pr_debug("gntdev_vma_close %p\n", vma);
 412	if (use_ptemod) {
 413		/* It is possible that an mmu notifier could be running
 414		 * concurrently, so take priv->lock to ensure that the vma won't
 415		 * vanishing during the unmap_grant_pages call, since we will
 416		 * spin here until that completes. Such a concurrent call will
 417		 * not do any unmapping, since that has been done prior to
 418		 * closing the vma, but it may still iterate the unmap_ops list.
 419		 */
 420		mutex_lock(&priv->lock);
 421		map->vma = NULL;
 422		mutex_unlock(&priv->lock);
 423	}
 424	vma->vm_private_data = NULL;
 425	gntdev_put_map(priv, map);
 426}
 427
 428static struct page *gntdev_vma_find_special_page(struct vm_area_struct *vma,
 429						 unsigned long addr)
 430{
 431	struct grant_map *map = vma->vm_private_data;
 432
 433	return map->pages[(addr - map->pages_vm_start) >> PAGE_SHIFT];
 434}
 435
 436static const struct vm_operations_struct gntdev_vmops = {
 437	.open = gntdev_vma_open,
 438	.close = gntdev_vma_close,
 439	.find_special_page = gntdev_vma_find_special_page,
 440};
 441
 442/* ------------------------------------------------------------------ */
 443
 444static void unmap_if_in_range(struct grant_map *map,
 445			      unsigned long start, unsigned long end)
 
 446{
 
 
 447	unsigned long mstart, mend;
 448	int err;
 449
 450	if (!map->vma)
 451		return;
 452	if (map->vma->vm_start >= end)
 453		return;
 454	if (map->vma->vm_end <= start)
 455		return;
 456	mstart = max(start, map->vma->vm_start);
 457	mend   = min(end,   map->vma->vm_end);
 
 
 
 
 
 
 
 458	pr_debug("map %d+%d (%lx %lx), range %lx %lx, mrange %lx %lx\n",
 459			map->index, map->count,
 460			map->vma->vm_start, map->vma->vm_end,
 461			start, end, mstart, mend);
 462	err = unmap_grant_pages(map,
 463				(mstart - map->vma->vm_start) >> PAGE_SHIFT,
 464				(mend - mstart) >> PAGE_SHIFT);
 465	WARN_ON(err);
 466}
 467
 468static void mn_invl_range_start(struct mmu_notifier *mn,
 469				struct mm_struct *mm,
 470				unsigned long start, unsigned long end)
 471{
 472	struct gntdev_priv *priv = container_of(mn, struct gntdev_priv, mn);
 473	struct grant_map *map;
 474
 475	mutex_lock(&priv->lock);
 476	list_for_each_entry(map, &priv->maps, next) {
 477		unmap_if_in_range(map, start, end);
 478	}
 479	list_for_each_entry(map, &priv->freeable_maps, next) {
 480		unmap_if_in_range(map, start, end);
 481	}
 482	mutex_unlock(&priv->lock);
 483}
 484
 485static void mn_invl_page(struct mmu_notifier *mn,
 486			 struct mm_struct *mm,
 487			 unsigned long address)
 488{
 489	mn_invl_range_start(mn, mm, address, address + PAGE_SIZE);
 490}
 491
 492static void mn_release(struct mmu_notifier *mn,
 493		       struct mm_struct *mm)
 494{
 495	struct gntdev_priv *priv = container_of(mn, struct gntdev_priv, mn);
 496	struct grant_map *map;
 497	int err;
 498
 499	mutex_lock(&priv->lock);
 500	list_for_each_entry(map, &priv->maps, next) {
 501		if (!map->vma)
 502			continue;
 503		pr_debug("map %d+%d (%lx %lx)\n",
 504				map->index, map->count,
 505				map->vma->vm_start, map->vma->vm_end);
 506		err = unmap_grant_pages(map, /* offset */ 0, map->count);
 507		WARN_ON(err);
 508	}
 509	list_for_each_entry(map, &priv->freeable_maps, next) {
 510		if (!map->vma)
 511			continue;
 512		pr_debug("map %d+%d (%lx %lx)\n",
 513				map->index, map->count,
 514				map->vma->vm_start, map->vma->vm_end);
 515		err = unmap_grant_pages(map, /* offset */ 0, map->count);
 516		WARN_ON(err);
 517	}
 518	mutex_unlock(&priv->lock);
 519}
 520
 521static const struct mmu_notifier_ops gntdev_mmu_ops = {
 522	.release                = mn_release,
 523	.invalidate_page        = mn_invl_page,
 524	.invalidate_range_start = mn_invl_range_start,
 525};
 526
 527/* ------------------------------------------------------------------ */
 528
 529static int gntdev_open(struct inode *inode, struct file *flip)
 530{
 531	struct gntdev_priv *priv;
 532	int ret = 0;
 533
 534	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
 535	if (!priv)
 536		return -ENOMEM;
 537
 538	INIT_LIST_HEAD(&priv->maps);
 539	INIT_LIST_HEAD(&priv->freeable_maps);
 540	mutex_init(&priv->lock);
 541
 542	if (use_ptemod) {
 543		priv->mm = get_task_mm(current);
 544		if (!priv->mm) {
 545			kfree(priv);
 546			return -ENOMEM;
 547		}
 548		priv->mn.ops = &gntdev_mmu_ops;
 549		ret = mmu_notifier_register(&priv->mn, priv->mm);
 550		mmput(priv->mm);
 551	}
 552
 553	if (ret) {
 554		kfree(priv);
 555		return ret;
 556	}
 
 557
 558	flip->private_data = priv;
 
 
 
 
 559	pr_debug("priv %p\n", priv);
 560
 561	return 0;
 562}
 563
 564static int gntdev_release(struct inode *inode, struct file *flip)
 565{
 566	struct gntdev_priv *priv = flip->private_data;
 567	struct grant_map *map;
 568
 569	pr_debug("priv %p\n", priv);
 570
 571	mutex_lock(&priv->lock);
 572	while (!list_empty(&priv->maps)) {
 573		map = list_entry(priv->maps.next, struct grant_map, next);
 
 574		list_del(&map->next);
 575		gntdev_put_map(NULL /* already removed */, map);
 576	}
 577	WARN_ON(!list_empty(&priv->freeable_maps));
 578	mutex_unlock(&priv->lock);
 579
 580	if (use_ptemod)
 581		mmu_notifier_unregister(&priv->mn, priv->mm);
 
 
 582	kfree(priv);
 583	return 0;
 584}
 585
 586static long gntdev_ioctl_map_grant_ref(struct gntdev_priv *priv,
 587				       struct ioctl_gntdev_map_grant_ref __user *u)
 588{
 589	struct ioctl_gntdev_map_grant_ref op;
 590	struct grant_map *map;
 591	int err;
 592
 593	if (copy_from_user(&op, u, sizeof(op)) != 0)
 594		return -EFAULT;
 595	pr_debug("priv %p, add %d\n", priv, op.count);
 596	if (unlikely(op.count <= 0))
 597		return -EINVAL;
 598
 599	err = -ENOMEM;
 600	map = gntdev_alloc_map(priv, op.count);
 601	if (!map)
 602		return err;
 603
 604	if (unlikely(atomic_add_return(op.count, &pages_mapped) > limit)) {
 605		pr_debug("can't map: over limit\n");
 606		gntdev_put_map(NULL, map);
 607		return err;
 608	}
 609
 610	if (copy_from_user(map->grants, &u->refs,
 611			   sizeof(map->grants[0]) * op.count) != 0) {
 612		gntdev_put_map(NULL, map);
 613		return -EFAULT;
 614	}
 615
 616	mutex_lock(&priv->lock);
 617	gntdev_add_map(priv, map);
 618	op.index = map->index << PAGE_SHIFT;
 619	mutex_unlock(&priv->lock);
 620
 621	if (copy_to_user(u, &op, sizeof(op)) != 0)
 622		return -EFAULT;
 623
 624	return 0;
 625}
 626
 627static long gntdev_ioctl_unmap_grant_ref(struct gntdev_priv *priv,
 628					 struct ioctl_gntdev_unmap_grant_ref __user *u)
 629{
 630	struct ioctl_gntdev_unmap_grant_ref op;
 631	struct grant_map *map;
 632	int err = -ENOENT;
 633
 634	if (copy_from_user(&op, u, sizeof(op)) != 0)
 635		return -EFAULT;
 636	pr_debug("priv %p, del %d+%d\n", priv, (int)op.index, (int)op.count);
 637
 638	mutex_lock(&priv->lock);
 639	map = gntdev_find_map_index(priv, op.index >> PAGE_SHIFT, op.count);
 640	if (map) {
 641		list_del(&map->next);
 642		if (populate_freeable_maps)
 643			list_add_tail(&map->next, &priv->freeable_maps);
 644		err = 0;
 645	}
 646	mutex_unlock(&priv->lock);
 647	if (map)
 648		gntdev_put_map(priv, map);
 649	return err;
 650}
 651
 652static long gntdev_ioctl_get_offset_for_vaddr(struct gntdev_priv *priv,
 653					      struct ioctl_gntdev_get_offset_for_vaddr __user *u)
 654{
 655	struct ioctl_gntdev_get_offset_for_vaddr op;
 656	struct vm_area_struct *vma;
 657	struct grant_map *map;
 658	int rv = -EINVAL;
 659
 660	if (copy_from_user(&op, u, sizeof(op)) != 0)
 661		return -EFAULT;
 662	pr_debug("priv %p, offset for vaddr %lx\n", priv, (unsigned long)op.vaddr);
 663
 664	down_read(&current->mm->mmap_sem);
 665	vma = find_vma(current->mm, op.vaddr);
 666	if (!vma || vma->vm_ops != &gntdev_vmops)
 667		goto out_unlock;
 668
 669	map = vma->vm_private_data;
 670	if (!map)
 671		goto out_unlock;
 672
 673	op.offset = map->index << PAGE_SHIFT;
 674	op.count = map->count;
 675	rv = 0;
 676
 677 out_unlock:
 678	up_read(&current->mm->mmap_sem);
 679
 680	if (rv == 0 && copy_to_user(u, &op, sizeof(op)) != 0)
 681		return -EFAULT;
 682	return rv;
 683}
 684
 685static long gntdev_ioctl_notify(struct gntdev_priv *priv, void __user *u)
 686{
 687	struct ioctl_gntdev_unmap_notify op;
 688	struct grant_map *map;
 689	int rc;
 690	int out_flags;
 691	unsigned int out_event;
 692
 693	if (copy_from_user(&op, u, sizeof(op)))
 694		return -EFAULT;
 695
 696	if (op.action & ~(UNMAP_NOTIFY_CLEAR_BYTE|UNMAP_NOTIFY_SEND_EVENT))
 697		return -EINVAL;
 698
 699	/* We need to grab a reference to the event channel we are going to use
 700	 * to send the notify before releasing the reference we may already have
 701	 * (if someone has called this ioctl twice). This is required so that
 702	 * it is possible to change the clear_byte part of the notification
 703	 * without disturbing the event channel part, which may now be the last
 704	 * reference to that event channel.
 705	 */
 706	if (op.action & UNMAP_NOTIFY_SEND_EVENT) {
 707		if (evtchn_get(op.event_channel_port))
 708			return -EINVAL;
 709	}
 710
 711	out_flags = op.action;
 712	out_event = op.event_channel_port;
 713
 714	mutex_lock(&priv->lock);
 715
 716	list_for_each_entry(map, &priv->maps, next) {
 717		uint64_t begin = map->index << PAGE_SHIFT;
 718		uint64_t end = (map->index + map->count) << PAGE_SHIFT;
 719		if (op.index >= begin && op.index < end)
 720			goto found;
 721	}
 722	rc = -ENOENT;
 723	goto unlock_out;
 724
 725 found:
 726	if ((op.action & UNMAP_NOTIFY_CLEAR_BYTE) &&
 727			(map->flags & GNTMAP_readonly)) {
 728		rc = -EINVAL;
 729		goto unlock_out;
 730	}
 731
 732	out_flags = map->notify.flags;
 733	out_event = map->notify.event;
 734
 735	map->notify.flags = op.action;
 736	map->notify.addr = op.index - (map->index << PAGE_SHIFT);
 737	map->notify.event = op.event_channel_port;
 738
 739	rc = 0;
 740
 741 unlock_out:
 742	mutex_unlock(&priv->lock);
 743
 744	/* Drop the reference to the event channel we did not save in the map */
 745	if (out_flags & UNMAP_NOTIFY_SEND_EVENT)
 746		evtchn_put(out_event);
 747
 748	return rc;
 749}
 750
 751#define GNTDEV_COPY_BATCH 24
 752
 753struct gntdev_copy_batch {
 754	struct gnttab_copy ops[GNTDEV_COPY_BATCH];
 755	struct page *pages[GNTDEV_COPY_BATCH];
 756	s16 __user *status[GNTDEV_COPY_BATCH];
 757	unsigned int nr_ops;
 758	unsigned int nr_pages;
 759};
 760
 761static int gntdev_get_page(struct gntdev_copy_batch *batch, void __user *virt,
 762			   bool writeable, unsigned long *gfn)
 763{
 764	unsigned long addr = (unsigned long)virt;
 765	struct page *page;
 766	unsigned long xen_pfn;
 767	int ret;
 768
 769	ret = get_user_pages_fast(addr, 1, writeable, &page);
 770	if (ret < 0)
 771		return ret;
 772
 773	batch->pages[batch->nr_pages++] = page;
 774
 775	xen_pfn = page_to_xen_pfn(page) + XEN_PFN_DOWN(addr & ~PAGE_MASK);
 776	*gfn = pfn_to_gfn(xen_pfn);
 777
 778	return 0;
 779}
 780
 781static void gntdev_put_pages(struct gntdev_copy_batch *batch)
 782{
 783	unsigned int i;
 784
 785	for (i = 0; i < batch->nr_pages; i++)
 786		put_page(batch->pages[i]);
 787	batch->nr_pages = 0;
 788}
 789
 790static int gntdev_copy(struct gntdev_copy_batch *batch)
 791{
 792	unsigned int i;
 793
 794	gnttab_batch_copy(batch->ops, batch->nr_ops);
 795	gntdev_put_pages(batch);
 796
 797	/*
 798	 * For each completed op, update the status if the op failed
 799	 * and all previous ops for the segment were successful.
 800	 */
 801	for (i = 0; i < batch->nr_ops; i++) {
 802		s16 status = batch->ops[i].status;
 803		s16 old_status;
 804
 805		if (status == GNTST_okay)
 806			continue;
 807
 808		if (__get_user(old_status, batch->status[i]))
 809			return -EFAULT;
 810
 811		if (old_status != GNTST_okay)
 812			continue;
 813
 814		if (__put_user(status, batch->status[i]))
 815			return -EFAULT;
 816	}
 817
 818	batch->nr_ops = 0;
 819	return 0;
 820}
 821
 822static int gntdev_grant_copy_seg(struct gntdev_copy_batch *batch,
 823				 struct gntdev_grant_copy_segment *seg,
 824				 s16 __user *status)
 825{
 826	uint16_t copied = 0;
 827
 828	/*
 829	 * Disallow local -> local copies since there is only space in
 830	 * batch->pages for one page per-op and this would be a very
 831	 * expensive memcpy().
 832	 */
 833	if (!(seg->flags & (GNTCOPY_source_gref | GNTCOPY_dest_gref)))
 834		return -EINVAL;
 835
 836	/* Can't cross page if source/dest is a grant ref. */
 837	if (seg->flags & GNTCOPY_source_gref) {
 838		if (seg->source.foreign.offset + seg->len > XEN_PAGE_SIZE)
 839			return -EINVAL;
 840	}
 841	if (seg->flags & GNTCOPY_dest_gref) {
 842		if (seg->dest.foreign.offset + seg->len > XEN_PAGE_SIZE)
 843			return -EINVAL;
 844	}
 845
 846	if (put_user(GNTST_okay, status))
 847		return -EFAULT;
 848
 849	while (copied < seg->len) {
 850		struct gnttab_copy *op;
 851		void __user *virt;
 852		size_t len, off;
 853		unsigned long gfn;
 854		int ret;
 855
 856		if (batch->nr_ops >= GNTDEV_COPY_BATCH) {
 857			ret = gntdev_copy(batch);
 858			if (ret < 0)
 859				return ret;
 860		}
 861
 862		len = seg->len - copied;
 863
 864		op = &batch->ops[batch->nr_ops];
 865		op->flags = 0;
 866
 867		if (seg->flags & GNTCOPY_source_gref) {
 868			op->source.u.ref = seg->source.foreign.ref;
 869			op->source.domid = seg->source.foreign.domid;
 870			op->source.offset = seg->source.foreign.offset + copied;
 871			op->flags |= GNTCOPY_source_gref;
 872		} else {
 873			virt = seg->source.virt + copied;
 874			off = (unsigned long)virt & ~XEN_PAGE_MASK;
 875			len = min(len, (size_t)XEN_PAGE_SIZE - off);
 876
 877			ret = gntdev_get_page(batch, virt, false, &gfn);
 878			if (ret < 0)
 879				return ret;
 880
 881			op->source.u.gmfn = gfn;
 882			op->source.domid = DOMID_SELF;
 883			op->source.offset = off;
 884		}
 885
 886		if (seg->flags & GNTCOPY_dest_gref) {
 887			op->dest.u.ref = seg->dest.foreign.ref;
 888			op->dest.domid = seg->dest.foreign.domid;
 889			op->dest.offset = seg->dest.foreign.offset + copied;
 890			op->flags |= GNTCOPY_dest_gref;
 891		} else {
 892			virt = seg->dest.virt + copied;
 893			off = (unsigned long)virt & ~XEN_PAGE_MASK;
 894			len = min(len, (size_t)XEN_PAGE_SIZE - off);
 895
 896			ret = gntdev_get_page(batch, virt, true, &gfn);
 897			if (ret < 0)
 898				return ret;
 899
 900			op->dest.u.gmfn = gfn;
 901			op->dest.domid = DOMID_SELF;
 902			op->dest.offset = off;
 903		}
 904
 905		op->len = len;
 906		copied += len;
 907
 908		batch->status[batch->nr_ops] = status;
 909		batch->nr_ops++;
 910	}
 911
 912	return 0;
 913}
 914
 915static long gntdev_ioctl_grant_copy(struct gntdev_priv *priv, void __user *u)
 916{
 917	struct ioctl_gntdev_grant_copy copy;
 918	struct gntdev_copy_batch batch;
 919	unsigned int i;
 920	int ret = 0;
 921
 922	if (copy_from_user(&copy, u, sizeof(copy)))
 923		return -EFAULT;
 924
 925	batch.nr_ops = 0;
 926	batch.nr_pages = 0;
 927
 928	for (i = 0; i < copy.count; i++) {
 929		struct gntdev_grant_copy_segment seg;
 930
 931		if (copy_from_user(&seg, &copy.segments[i], sizeof(seg))) {
 932			ret = -EFAULT;
 933			goto out;
 934		}
 935
 936		ret = gntdev_grant_copy_seg(&batch, &seg, &copy.segments[i].status);
 937		if (ret < 0)
 938			goto out;
 939
 940		cond_resched();
 941	}
 942	if (batch.nr_ops)
 943		ret = gntdev_copy(&batch);
 944	return ret;
 945
 946  out:
 947	gntdev_put_pages(&batch);
 948	return ret;
 949}
 950
 951static long gntdev_ioctl(struct file *flip,
 952			 unsigned int cmd, unsigned long arg)
 953{
 954	struct gntdev_priv *priv = flip->private_data;
 955	void __user *ptr = (void __user *)arg;
 956
 957	switch (cmd) {
 958	case IOCTL_GNTDEV_MAP_GRANT_REF:
 959		return gntdev_ioctl_map_grant_ref(priv, ptr);
 960
 961	case IOCTL_GNTDEV_UNMAP_GRANT_REF:
 962		return gntdev_ioctl_unmap_grant_ref(priv, ptr);
 963
 964	case IOCTL_GNTDEV_GET_OFFSET_FOR_VADDR:
 965		return gntdev_ioctl_get_offset_for_vaddr(priv, ptr);
 966
 967	case IOCTL_GNTDEV_SET_UNMAP_NOTIFY:
 968		return gntdev_ioctl_notify(priv, ptr);
 969
 970	case IOCTL_GNTDEV_GRANT_COPY:
 971		return gntdev_ioctl_grant_copy(priv, ptr);
 972
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 973	default:
 974		pr_debug("priv %p, unknown cmd %x\n", priv, cmd);
 975		return -ENOIOCTLCMD;
 976	}
 977
 978	return 0;
 979}
 980
 981static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
 982{
 983	struct gntdev_priv *priv = flip->private_data;
 984	int index = vma->vm_pgoff;
 985	int count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
 986	struct grant_map *map;
 987	int i, err = -EINVAL;
 988
 989	if ((vma->vm_flags & VM_WRITE) && !(vma->vm_flags & VM_SHARED))
 990		return -EINVAL;
 991
 992	pr_debug("map %d+%d at %lx (pgoff %lx)\n",
 993			index, count, vma->vm_start, vma->vm_pgoff);
 994
 995	mutex_lock(&priv->lock);
 996	map = gntdev_find_map_index(priv, index, count);
 997	if (!map)
 998		goto unlock_out;
 999	if (use_ptemod && map->vma)
1000		goto unlock_out;
1001	if (use_ptemod && priv->mm != vma->vm_mm) {
1002		pr_warn("Huh? Other mm?\n");
1003		goto unlock_out;
1004	}
1005
1006	atomic_inc(&map->users);
1007
1008	vma->vm_ops = &gntdev_vmops;
1009
1010	vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP | VM_IO;
1011
1012	if (use_ptemod)
1013		vma->vm_flags |= VM_DONTCOPY;
1014
1015	vma->vm_private_data = map;
1016
1017	if (use_ptemod)
1018		map->vma = vma;
1019
1020	if (map->flags) {
1021		if ((vma->vm_flags & VM_WRITE) &&
1022				(map->flags & GNTMAP_readonly))
1023			goto out_unlock_put;
1024	} else {
1025		map->flags = GNTMAP_host_map;
1026		if (!(vma->vm_flags & VM_WRITE))
1027			map->flags |= GNTMAP_readonly;
1028	}
1029
 
 
 
 
 
 
 
 
1030	mutex_unlock(&priv->lock);
1031
1032	if (use_ptemod) {
 
 
 
 
 
 
 
 
 
 
 
 
 
1033		err = apply_to_page_range(vma->vm_mm, vma->vm_start,
1034					  vma->vm_end - vma->vm_start,
1035					  find_grant_ptes, map);
1036		if (err) {
1037			pr_warn("find_grant_ptes() failure.\n");
1038			goto out_put_map;
1039		}
1040	}
1041
1042	err = map_grant_pages(map);
1043	if (err)
1044		goto out_put_map;
1045
1046	if (!use_ptemod) {
1047		for (i = 0; i < count; i++) {
1048			err = vm_insert_page(vma, vma->vm_start + i*PAGE_SIZE,
1049				map->pages[i]);
1050			if (err)
1051				goto out_put_map;
1052		}
1053	} else {
1054#ifdef CONFIG_X86
1055		/*
1056		 * If the PTEs were not made special by the grant map
1057		 * hypercall, do so here.
1058		 *
1059		 * This is racy since the mapping is already visible
1060		 * to userspace but userspace should be well-behaved
1061		 * enough to not touch it until the mmap() call
1062		 * returns.
1063		 */
1064		if (!xen_feature(XENFEAT_gnttab_map_avail_bits)) {
1065			apply_to_page_range(vma->vm_mm, vma->vm_start,
1066					    vma->vm_end - vma->vm_start,
1067					    set_grant_ptes_as_special, NULL);
1068		}
1069#endif
1070		map->pages_vm_start = vma->vm_start;
1071	}
1072
1073	return 0;
1074
1075unlock_out:
1076	mutex_unlock(&priv->lock);
1077	return err;
1078
1079out_unlock_put:
1080	mutex_unlock(&priv->lock);
1081out_put_map:
1082	if (use_ptemod)
1083		map->vma = NULL;
 
 
 
 
 
1084	gntdev_put_map(priv, map);
1085	return err;
1086}
1087
1088static const struct file_operations gntdev_fops = {
1089	.owner = THIS_MODULE,
1090	.open = gntdev_open,
1091	.release = gntdev_release,
1092	.mmap = gntdev_mmap,
1093	.unlocked_ioctl = gntdev_ioctl
1094};
1095
1096static struct miscdevice gntdev_miscdev = {
1097	.minor        = MISC_DYNAMIC_MINOR,
1098	.name         = "xen/gntdev",
1099	.fops         = &gntdev_fops,
1100};
1101
1102/* ------------------------------------------------------------------ */
1103
1104static int __init gntdev_init(void)
1105{
1106	int err;
1107
1108	if (!xen_domain())
1109		return -ENODEV;
1110
1111	use_ptemod = !xen_feature(XENFEAT_auto_translated_physmap);
1112
1113	err = misc_register(&gntdev_miscdev);
1114	if (err != 0) {
1115		pr_err("Could not register gntdev device\n");
1116		return err;
1117	}
1118	return 0;
1119}
1120
1121static void __exit gntdev_exit(void)
1122{
1123	misc_deregister(&gntdev_miscdev);
1124}
1125
1126module_init(gntdev_init);
1127module_exit(gntdev_exit);
1128
1129/* ------------------------------------------------------------------ */