Linux Audio

Check our new training course

Loading...
v4.17
   1/******************************************************************************
   2 * gntdev.c
   3 *
   4 * Device for accessing (in user-space) pages that have been granted by other
   5 * domains.
   6 *
   7 * Copyright (c) 2006-2007, D G Murray.
   8 *           (c) 2009 Gerd Hoffmann <kraxel@redhat.com>
   9 *
  10 * This program is distributed in the hope that it will be useful,
  11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  13 * GNU General Public License for more details.
  14 *
  15 * You should have received a copy of the GNU General Public License
  16 * along with this program; if not, write to the Free Software
  17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  18 */
  19
  20#undef DEBUG
  21
  22#define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
  23
  24#include <linux/module.h>
  25#include <linux/kernel.h>
  26#include <linux/init.h>
  27#include <linux/miscdevice.h>
  28#include <linux/fs.h>
  29#include <linux/mm.h>
  30#include <linux/mman.h>
  31#include <linux/mmu_notifier.h>
  32#include <linux/types.h>
  33#include <linux/uaccess.h>
  34#include <linux/sched.h>
  35#include <linux/sched/mm.h>
  36#include <linux/spinlock.h>
  37#include <linux/slab.h>
  38#include <linux/highmem.h>
  39#include <linux/refcount.h>
  40
  41#include <xen/xen.h>
  42#include <xen/grant_table.h>
  43#include <xen/balloon.h>
  44#include <xen/gntdev.h>
  45#include <xen/events.h>
  46#include <xen/page.h>
  47#include <asm/xen/hypervisor.h>
  48#include <asm/xen/hypercall.h>
 
  49
  50MODULE_LICENSE("GPL");
  51MODULE_AUTHOR("Derek G. Murray <Derek.Murray@cl.cam.ac.uk>, "
  52	      "Gerd Hoffmann <kraxel@redhat.com>");
  53MODULE_DESCRIPTION("User-space granted page access driver");
  54
  55static int limit = 1024*1024;
  56module_param(limit, int, 0644);
  57MODULE_PARM_DESC(limit, "Maximum number of grants that may be mapped by "
  58		"the gntdev device");
  59
  60static atomic_t pages_mapped = ATOMIC_INIT(0);
  61
  62static int use_ptemod;
  63#define populate_freeable_maps use_ptemod
  64
  65struct gntdev_priv {
  66	/* maps with visible offsets in the file descriptor */
  67	struct list_head maps;
  68	/* maps that are not visible; will be freed on munmap.
  69	 * Only populated if populate_freeable_maps == 1 */
  70	struct list_head freeable_maps;
  71	/* lock protects maps and freeable_maps */
  72	struct mutex lock;
  73	struct mm_struct *mm;
  74	struct mmu_notifier mn;
  75};
  76
  77struct unmap_notify {
  78	int flags;
  79	/* Address relative to the start of the grant_map */
  80	int addr;
  81	int event;
  82};
  83
  84struct grant_map {
  85	struct list_head next;
  86	struct vm_area_struct *vma;
  87	int index;
  88	int count;
  89	int flags;
  90	refcount_t users;
  91	struct unmap_notify notify;
  92	struct ioctl_gntdev_grant_ref *grants;
  93	struct gnttab_map_grant_ref   *map_ops;
  94	struct gnttab_unmap_grant_ref *unmap_ops;
  95	struct gnttab_map_grant_ref   *kmap_ops;
  96	struct gnttab_unmap_grant_ref *kunmap_ops;
  97	struct page **pages;
  98	unsigned long pages_vm_start;
  99};
 100
 101static int unmap_grant_pages(struct grant_map *map, int offset, int pages);
 102
 103/* ------------------------------------------------------------------ */
 104
 105static void gntdev_print_maps(struct gntdev_priv *priv,
 106			      char *text, int text_index)
 107{
 108#ifdef DEBUG
 109	struct grant_map *map;
 110
 111	pr_debug("%s: maps list (priv %p)\n", __func__, priv);
 112	list_for_each_entry(map, &priv->maps, next)
 113		pr_debug("  index %2d, count %2d %s\n",
 114		       map->index, map->count,
 115		       map->index == text_index && text ? text : "");
 116#endif
 117}
 118
 119static void gntdev_free_map(struct grant_map *map)
 120{
 121	if (map == NULL)
 122		return;
 123
 124	if (map->pages)
 125		gnttab_free_pages(map->count, map->pages);
 126	kfree(map->pages);
 127	kfree(map->grants);
 128	kfree(map->map_ops);
 129	kfree(map->unmap_ops);
 130	kfree(map->kmap_ops);
 131	kfree(map->kunmap_ops);
 132	kfree(map);
 133}
 134
 135static struct grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count)
 136{
 137	struct grant_map *add;
 138	int i;
 139
 140	add = kzalloc(sizeof(struct grant_map), GFP_KERNEL);
 141	if (NULL == add)
 142		return NULL;
 143
 144	add->grants    = kcalloc(count, sizeof(add->grants[0]), GFP_KERNEL);
 145	add->map_ops   = kcalloc(count, sizeof(add->map_ops[0]), GFP_KERNEL);
 146	add->unmap_ops = kcalloc(count, sizeof(add->unmap_ops[0]), GFP_KERNEL);
 147	add->kmap_ops  = kcalloc(count, sizeof(add->kmap_ops[0]), GFP_KERNEL);
 148	add->kunmap_ops = kcalloc(count, sizeof(add->kunmap_ops[0]), GFP_KERNEL);
 149	add->pages     = kcalloc(count, sizeof(add->pages[0]), GFP_KERNEL);
 150	if (NULL == add->grants    ||
 151	    NULL == add->map_ops   ||
 152	    NULL == add->unmap_ops ||
 153	    NULL == add->kmap_ops  ||
 154	    NULL == add->kunmap_ops ||
 155	    NULL == add->pages)
 156		goto err;
 157
 158	if (gnttab_alloc_pages(count, add->pages))
 159		goto err;
 160
 161	for (i = 0; i < count; i++) {
 162		add->map_ops[i].handle = -1;
 163		add->unmap_ops[i].handle = -1;
 164		add->kmap_ops[i].handle = -1;
 165		add->kunmap_ops[i].handle = -1;
 166	}
 167
 168	add->index = 0;
 169	add->count = count;
 170	refcount_set(&add->users, 1);
 171
 172	return add;
 173
 174err:
 175	gntdev_free_map(add);
 176	return NULL;
 177}
 178
 179static void gntdev_add_map(struct gntdev_priv *priv, struct grant_map *add)
 180{
 181	struct grant_map *map;
 182
 183	list_for_each_entry(map, &priv->maps, next) {
 184		if (add->index + add->count < map->index) {
 185			list_add_tail(&add->next, &map->next);
 186			goto done;
 187		}
 188		add->index = map->index + map->count;
 189	}
 190	list_add_tail(&add->next, &priv->maps);
 191
 192done:
 193	gntdev_print_maps(priv, "[new]", add->index);
 194}
 195
 196static struct grant_map *gntdev_find_map_index(struct gntdev_priv *priv,
 197		int index, int count)
 198{
 199	struct grant_map *map;
 200
 201	list_for_each_entry(map, &priv->maps, next) {
 202		if (map->index != index)
 203			continue;
 204		if (count && map->count != count)
 205			continue;
 206		return map;
 207	}
 208	return NULL;
 209}
 210
 211static void gntdev_put_map(struct gntdev_priv *priv, struct grant_map *map)
 212{
 213	if (!map)
 214		return;
 215
 216	if (!refcount_dec_and_test(&map->users))
 217		return;
 218
 219	atomic_sub(map->count, &pages_mapped);
 220
 221	if (map->notify.flags & UNMAP_NOTIFY_SEND_EVENT) {
 222		notify_remote_via_evtchn(map->notify.event);
 223		evtchn_put(map->notify.event);
 224	}
 225
 226	if (populate_freeable_maps && priv) {
 227		mutex_lock(&priv->lock);
 228		list_del(&map->next);
 229		mutex_unlock(&priv->lock);
 230	}
 231
 232	if (map->pages && !use_ptemod)
 233		unmap_grant_pages(map, 0, map->count);
 234	gntdev_free_map(map);
 235}
 236
 237/* ------------------------------------------------------------------ */
 238
 239static int find_grant_ptes(pte_t *pte, pgtable_t token,
 240		unsigned long addr, void *data)
 241{
 242	struct grant_map *map = data;
 243	unsigned int pgnr = (addr - map->vma->vm_start) >> PAGE_SHIFT;
 244	int flags = map->flags | GNTMAP_application_map | GNTMAP_contains_pte;
 245	u64 pte_maddr;
 246
 247	BUG_ON(pgnr >= map->count);
 248	pte_maddr = arbitrary_virt_to_machine(pte).maddr;
 249
 250	/*
 251	 * Set the PTE as special to force get_user_pages_fast() fall
 252	 * back to the slow path.  If this is not supported as part of
 253	 * the grant map, it will be done afterwards.
 254	 */
 255	if (xen_feature(XENFEAT_gnttab_map_avail_bits))
 256		flags |= (1 << _GNTMAP_guest_avail0);
 257
 258	gnttab_set_map_op(&map->map_ops[pgnr], pte_maddr, flags,
 259			  map->grants[pgnr].ref,
 260			  map->grants[pgnr].domid);
 261	gnttab_set_unmap_op(&map->unmap_ops[pgnr], pte_maddr, flags,
 262			    -1 /* handle */);
 263	return 0;
 264}
 265
 266#ifdef CONFIG_X86
 267static int set_grant_ptes_as_special(pte_t *pte, pgtable_t token,
 268				     unsigned long addr, void *data)
 269{
 270	set_pte_at(current->mm, addr, pte, pte_mkspecial(*pte));
 271	return 0;
 272}
 273#endif
 274
 275static int map_grant_pages(struct grant_map *map)
 276{
 277	int i, err = 0;
 278
 279	if (!use_ptemod) {
 280		/* Note: it could already be mapped */
 281		if (map->map_ops[0].handle != -1)
 282			return 0;
 283		for (i = 0; i < map->count; i++) {
 284			unsigned long addr = (unsigned long)
 285				pfn_to_kaddr(page_to_pfn(map->pages[i]));
 286			gnttab_set_map_op(&map->map_ops[i], addr, map->flags,
 287				map->grants[i].ref,
 288				map->grants[i].domid);
 289			gnttab_set_unmap_op(&map->unmap_ops[i], addr,
 290				map->flags, -1 /* handle */);
 291		}
 292	} else {
 293		/*
 294		 * Setup the map_ops corresponding to the pte entries pointing
 295		 * to the kernel linear addresses of the struct pages.
 296		 * These ptes are completely different from the user ptes dealt
 297		 * with find_grant_ptes.
 298		 */
 299		for (i = 0; i < map->count; i++) {
 300			unsigned long address = (unsigned long)
 301				pfn_to_kaddr(page_to_pfn(map->pages[i]));
 302			BUG_ON(PageHighMem(map->pages[i]));
 303
 304			gnttab_set_map_op(&map->kmap_ops[i], address,
 305				map->flags | GNTMAP_host_map,
 306				map->grants[i].ref,
 307				map->grants[i].domid);
 308			gnttab_set_unmap_op(&map->kunmap_ops[i], address,
 309				map->flags | GNTMAP_host_map, -1);
 310		}
 311	}
 312
 313	pr_debug("map %d+%d\n", map->index, map->count);
 314	err = gnttab_map_refs(map->map_ops, use_ptemod ? map->kmap_ops : NULL,
 315			map->pages, map->count);
 316	if (err)
 317		return err;
 318
 319	for (i = 0; i < map->count; i++) {
 320		if (map->map_ops[i].status) {
 321			err = -EINVAL;
 322			continue;
 
 
 
 323		}
 324
 325		map->unmap_ops[i].handle = map->map_ops[i].handle;
 326		if (use_ptemod)
 327			map->kunmap_ops[i].handle = map->kmap_ops[i].handle;
 328	}
 329	return err;
 330}
 331
 332static int __unmap_grant_pages(struct grant_map *map, int offset, int pages)
 333{
 334	int i, err = 0;
 335	struct gntab_unmap_queue_data unmap_data;
 336
 337	if (map->notify.flags & UNMAP_NOTIFY_CLEAR_BYTE) {
 338		int pgno = (map->notify.addr >> PAGE_SHIFT);
 339		if (pgno >= offset && pgno < offset + pages) {
 340			/* No need for kmap, pages are in lowmem */
 341			uint8_t *tmp = pfn_to_kaddr(page_to_pfn(map->pages[pgno]));
 342			tmp[map->notify.addr & (PAGE_SIZE-1)] = 0;
 343			map->notify.flags &= ~UNMAP_NOTIFY_CLEAR_BYTE;
 344		}
 345	}
 346
 347	unmap_data.unmap_ops = map->unmap_ops + offset;
 348	unmap_data.kunmap_ops = use_ptemod ? map->kunmap_ops + offset : NULL;
 349	unmap_data.pages = map->pages + offset;
 350	unmap_data.count = pages;
 351
 352	err = gnttab_unmap_refs_sync(&unmap_data);
 353	if (err)
 354		return err;
 355
 356	for (i = 0; i < pages; i++) {
 357		if (map->unmap_ops[offset+i].status)
 358			err = -EINVAL;
 359		pr_debug("unmap handle=%d st=%d\n",
 360			map->unmap_ops[offset+i].handle,
 361			map->unmap_ops[offset+i].status);
 362		map->unmap_ops[offset+i].handle = -1;
 363	}
 364	return err;
 365}
 366
 367static int unmap_grant_pages(struct grant_map *map, int offset, int pages)
 368{
 369	int range, err = 0;
 370
 371	pr_debug("unmap %d+%d [%d+%d]\n", map->index, map->count, offset, pages);
 372
 373	/* It is possible the requested range will have a "hole" where we
 374	 * already unmapped some of the grants. Only unmap valid ranges.
 375	 */
 376	while (pages && !err) {
 377		while (pages && map->unmap_ops[offset].handle == -1) {
 378			offset++;
 379			pages--;
 380		}
 381		range = 0;
 382		while (range < pages) {
 383			if (map->unmap_ops[offset+range].handle == -1)
 
 384				break;
 
 385			range++;
 386		}
 387		err = __unmap_grant_pages(map, offset, range);
 388		offset += range;
 389		pages -= range;
 390	}
 391
 392	return err;
 393}
 394
 395/* ------------------------------------------------------------------ */
 396
 397static void gntdev_vma_open(struct vm_area_struct *vma)
 398{
 399	struct grant_map *map = vma->vm_private_data;
 400
 401	pr_debug("gntdev_vma_open %p\n", vma);
 402	refcount_inc(&map->users);
 403}
 404
 405static void gntdev_vma_close(struct vm_area_struct *vma)
 406{
 407	struct grant_map *map = vma->vm_private_data;
 408	struct file *file = vma->vm_file;
 409	struct gntdev_priv *priv = file->private_data;
 410
 411	pr_debug("gntdev_vma_close %p\n", vma);
 412	if (use_ptemod) {
 413		/* It is possible that an mmu notifier could be running
 414		 * concurrently, so take priv->lock to ensure that the vma won't
 415		 * vanishing during the unmap_grant_pages call, since we will
 416		 * spin here until that completes. Such a concurrent call will
 417		 * not do any unmapping, since that has been done prior to
 418		 * closing the vma, but it may still iterate the unmap_ops list.
 419		 */
 420		mutex_lock(&priv->lock);
 421		map->vma = NULL;
 422		mutex_unlock(&priv->lock);
 423	}
 424	vma->vm_private_data = NULL;
 425	gntdev_put_map(priv, map);
 426}
 427
 428static struct page *gntdev_vma_find_special_page(struct vm_area_struct *vma,
 429						 unsigned long addr)
 430{
 431	struct grant_map *map = vma->vm_private_data;
 432
 433	return map->pages[(addr - map->pages_vm_start) >> PAGE_SHIFT];
 434}
 435
 436static const struct vm_operations_struct gntdev_vmops = {
 437	.open = gntdev_vma_open,
 438	.close = gntdev_vma_close,
 439	.find_special_page = gntdev_vma_find_special_page,
 440};
 441
 442/* ------------------------------------------------------------------ */
 443
 444static void unmap_if_in_range(struct grant_map *map,
 445			      unsigned long start, unsigned long end)
 446{
 447	unsigned long mstart, mend;
 448	int err;
 449
 450	if (!map->vma)
 451		return;
 452	if (map->vma->vm_start >= end)
 453		return;
 454	if (map->vma->vm_end <= start)
 455		return;
 456	mstart = max(start, map->vma->vm_start);
 457	mend   = min(end,   map->vma->vm_end);
 458	pr_debug("map %d+%d (%lx %lx), range %lx %lx, mrange %lx %lx\n",
 459			map->index, map->count,
 460			map->vma->vm_start, map->vma->vm_end,
 461			start, end, mstart, mend);
 462	err = unmap_grant_pages(map,
 463				(mstart - map->vma->vm_start) >> PAGE_SHIFT,
 464				(mend - mstart) >> PAGE_SHIFT);
 465	WARN_ON(err);
 466}
 467
 468static void mn_invl_range_start(struct mmu_notifier *mn,
 469				struct mm_struct *mm,
 470				unsigned long start, unsigned long end)
 471{
 472	struct gntdev_priv *priv = container_of(mn, struct gntdev_priv, mn);
 473	struct grant_map *map;
 474
 475	mutex_lock(&priv->lock);
 476	list_for_each_entry(map, &priv->maps, next) {
 477		unmap_if_in_range(map, start, end);
 478	}
 479	list_for_each_entry(map, &priv->freeable_maps, next) {
 480		unmap_if_in_range(map, start, end);
 481	}
 482	mutex_unlock(&priv->lock);
 
 
 
 
 
 
 
 483}
 484
 485static void mn_release(struct mmu_notifier *mn,
 486		       struct mm_struct *mm)
 487{
 488	struct gntdev_priv *priv = container_of(mn, struct gntdev_priv, mn);
 489	struct grant_map *map;
 490	int err;
 491
 492	mutex_lock(&priv->lock);
 493	list_for_each_entry(map, &priv->maps, next) {
 494		if (!map->vma)
 495			continue;
 496		pr_debug("map %d+%d (%lx %lx)\n",
 497				map->index, map->count,
 498				map->vma->vm_start, map->vma->vm_end);
 499		err = unmap_grant_pages(map, /* offset */ 0, map->count);
 500		WARN_ON(err);
 501	}
 502	list_for_each_entry(map, &priv->freeable_maps, next) {
 503		if (!map->vma)
 504			continue;
 505		pr_debug("map %d+%d (%lx %lx)\n",
 506				map->index, map->count,
 507				map->vma->vm_start, map->vma->vm_end);
 508		err = unmap_grant_pages(map, /* offset */ 0, map->count);
 509		WARN_ON(err);
 510	}
 511	mutex_unlock(&priv->lock);
 512}
 513
 514static const struct mmu_notifier_ops gntdev_mmu_ops = {
 515	.release                = mn_release,
 
 516	.invalidate_range_start = mn_invl_range_start,
 517};
 518
 519/* ------------------------------------------------------------------ */
 520
 521static int gntdev_open(struct inode *inode, struct file *flip)
 522{
 523	struct gntdev_priv *priv;
 524	int ret = 0;
 525
 526	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
 527	if (!priv)
 528		return -ENOMEM;
 529
 530	INIT_LIST_HEAD(&priv->maps);
 531	INIT_LIST_HEAD(&priv->freeable_maps);
 532	mutex_init(&priv->lock);
 533
 534	if (use_ptemod) {
 535		priv->mm = get_task_mm(current);
 536		if (!priv->mm) {
 537			kfree(priv);
 538			return -ENOMEM;
 539		}
 540		priv->mn.ops = &gntdev_mmu_ops;
 541		ret = mmu_notifier_register(&priv->mn, priv->mm);
 542		mmput(priv->mm);
 543	}
 544
 545	if (ret) {
 546		kfree(priv);
 547		return ret;
 548	}
 549
 550	flip->private_data = priv;
 551	pr_debug("priv %p\n", priv);
 552
 553	return 0;
 554}
 555
 556static int gntdev_release(struct inode *inode, struct file *flip)
 557{
 558	struct gntdev_priv *priv = flip->private_data;
 559	struct grant_map *map;
 560
 561	pr_debug("priv %p\n", priv);
 562
 563	mutex_lock(&priv->lock);
 564	while (!list_empty(&priv->maps)) {
 565		map = list_entry(priv->maps.next, struct grant_map, next);
 566		list_del(&map->next);
 567		gntdev_put_map(NULL /* already removed */, map);
 568	}
 569	WARN_ON(!list_empty(&priv->freeable_maps));
 570	mutex_unlock(&priv->lock);
 571
 572	if (use_ptemod)
 573		mmu_notifier_unregister(&priv->mn, priv->mm);
 574	kfree(priv);
 575	return 0;
 576}
 577
 578static long gntdev_ioctl_map_grant_ref(struct gntdev_priv *priv,
 579				       struct ioctl_gntdev_map_grant_ref __user *u)
 580{
 581	struct ioctl_gntdev_map_grant_ref op;
 582	struct grant_map *map;
 583	int err;
 584
 585	if (copy_from_user(&op, u, sizeof(op)) != 0)
 586		return -EFAULT;
 587	pr_debug("priv %p, add %d\n", priv, op.count);
 588	if (unlikely(op.count <= 0))
 589		return -EINVAL;
 590
 591	err = -ENOMEM;
 592	map = gntdev_alloc_map(priv, op.count);
 593	if (!map)
 594		return err;
 595
 596	if (unlikely(atomic_add_return(op.count, &pages_mapped) > limit)) {
 597		pr_debug("can't map: over limit\n");
 598		gntdev_put_map(NULL, map);
 599		return err;
 600	}
 601
 602	if (copy_from_user(map->grants, &u->refs,
 603			   sizeof(map->grants[0]) * op.count) != 0) {
 604		gntdev_put_map(NULL, map);
 605		return -EFAULT;
 606	}
 607
 608	mutex_lock(&priv->lock);
 609	gntdev_add_map(priv, map);
 610	op.index = map->index << PAGE_SHIFT;
 611	mutex_unlock(&priv->lock);
 612
 613	if (copy_to_user(u, &op, sizeof(op)) != 0)
 614		return -EFAULT;
 615
 616	return 0;
 617}
 618
 619static long gntdev_ioctl_unmap_grant_ref(struct gntdev_priv *priv,
 620					 struct ioctl_gntdev_unmap_grant_ref __user *u)
 621{
 622	struct ioctl_gntdev_unmap_grant_ref op;
 623	struct grant_map *map;
 624	int err = -ENOENT;
 625
 626	if (copy_from_user(&op, u, sizeof(op)) != 0)
 627		return -EFAULT;
 628	pr_debug("priv %p, del %d+%d\n", priv, (int)op.index, (int)op.count);
 629
 630	mutex_lock(&priv->lock);
 631	map = gntdev_find_map_index(priv, op.index >> PAGE_SHIFT, op.count);
 632	if (map) {
 633		list_del(&map->next);
 634		if (populate_freeable_maps)
 635			list_add_tail(&map->next, &priv->freeable_maps);
 636		err = 0;
 637	}
 638	mutex_unlock(&priv->lock);
 639	if (map)
 640		gntdev_put_map(priv, map);
 641	return err;
 642}
 643
 644static long gntdev_ioctl_get_offset_for_vaddr(struct gntdev_priv *priv,
 645					      struct ioctl_gntdev_get_offset_for_vaddr __user *u)
 646{
 647	struct ioctl_gntdev_get_offset_for_vaddr op;
 648	struct vm_area_struct *vma;
 649	struct grant_map *map;
 650	int rv = -EINVAL;
 651
 652	if (copy_from_user(&op, u, sizeof(op)) != 0)
 653		return -EFAULT;
 654	pr_debug("priv %p, offset for vaddr %lx\n", priv, (unsigned long)op.vaddr);
 655
 656	down_read(&current->mm->mmap_sem);
 657	vma = find_vma(current->mm, op.vaddr);
 658	if (!vma || vma->vm_ops != &gntdev_vmops)
 659		goto out_unlock;
 660
 661	map = vma->vm_private_data;
 662	if (!map)
 663		goto out_unlock;
 664
 665	op.offset = map->index << PAGE_SHIFT;
 666	op.count = map->count;
 667	rv = 0;
 668
 669 out_unlock:
 670	up_read(&current->mm->mmap_sem);
 671
 672	if (rv == 0 && copy_to_user(u, &op, sizeof(op)) != 0)
 673		return -EFAULT;
 674	return rv;
 675}
 676
 677static long gntdev_ioctl_notify(struct gntdev_priv *priv, void __user *u)
 678{
 679	struct ioctl_gntdev_unmap_notify op;
 680	struct grant_map *map;
 681	int rc;
 682	int out_flags;
 683	unsigned int out_event;
 684
 685	if (copy_from_user(&op, u, sizeof(op)))
 686		return -EFAULT;
 687
 688	if (op.action & ~(UNMAP_NOTIFY_CLEAR_BYTE|UNMAP_NOTIFY_SEND_EVENT))
 689		return -EINVAL;
 690
 691	/* We need to grab a reference to the event channel we are going to use
 692	 * to send the notify before releasing the reference we may already have
 693	 * (if someone has called this ioctl twice). This is required so that
 694	 * it is possible to change the clear_byte part of the notification
 695	 * without disturbing the event channel part, which may now be the last
 696	 * reference to that event channel.
 697	 */
 698	if (op.action & UNMAP_NOTIFY_SEND_EVENT) {
 699		if (evtchn_get(op.event_channel_port))
 700			return -EINVAL;
 701	}
 702
 703	out_flags = op.action;
 704	out_event = op.event_channel_port;
 705
 706	mutex_lock(&priv->lock);
 707
 708	list_for_each_entry(map, &priv->maps, next) {
 709		uint64_t begin = map->index << PAGE_SHIFT;
 710		uint64_t end = (map->index + map->count) << PAGE_SHIFT;
 711		if (op.index >= begin && op.index < end)
 712			goto found;
 713	}
 714	rc = -ENOENT;
 715	goto unlock_out;
 716
 717 found:
 718	if ((op.action & UNMAP_NOTIFY_CLEAR_BYTE) &&
 719			(map->flags & GNTMAP_readonly)) {
 720		rc = -EINVAL;
 721		goto unlock_out;
 722	}
 723
 724	out_flags = map->notify.flags;
 725	out_event = map->notify.event;
 726
 727	map->notify.flags = op.action;
 728	map->notify.addr = op.index - (map->index << PAGE_SHIFT);
 729	map->notify.event = op.event_channel_port;
 730
 731	rc = 0;
 732
 733 unlock_out:
 734	mutex_unlock(&priv->lock);
 735
 736	/* Drop the reference to the event channel we did not save in the map */
 737	if (out_flags & UNMAP_NOTIFY_SEND_EVENT)
 738		evtchn_put(out_event);
 739
 740	return rc;
 741}
 742
 743#define GNTDEV_COPY_BATCH 16
 744
 745struct gntdev_copy_batch {
 746	struct gnttab_copy ops[GNTDEV_COPY_BATCH];
 747	struct page *pages[GNTDEV_COPY_BATCH];
 748	s16 __user *status[GNTDEV_COPY_BATCH];
 749	unsigned int nr_ops;
 750	unsigned int nr_pages;
 751};
 752
 753static int gntdev_get_page(struct gntdev_copy_batch *batch, void __user *virt,
 754			   bool writeable, unsigned long *gfn)
 755{
 756	unsigned long addr = (unsigned long)virt;
 757	struct page *page;
 758	unsigned long xen_pfn;
 759	int ret;
 760
 761	ret = get_user_pages_fast(addr, 1, writeable, &page);
 762	if (ret < 0)
 763		return ret;
 764
 765	batch->pages[batch->nr_pages++] = page;
 766
 767	xen_pfn = page_to_xen_pfn(page) + XEN_PFN_DOWN(addr & ~PAGE_MASK);
 768	*gfn = pfn_to_gfn(xen_pfn);
 769
 770	return 0;
 771}
 772
 773static void gntdev_put_pages(struct gntdev_copy_batch *batch)
 774{
 775	unsigned int i;
 776
 777	for (i = 0; i < batch->nr_pages; i++)
 778		put_page(batch->pages[i]);
 779	batch->nr_pages = 0;
 780}
 781
 782static int gntdev_copy(struct gntdev_copy_batch *batch)
 783{
 784	unsigned int i;
 785
 786	gnttab_batch_copy(batch->ops, batch->nr_ops);
 787	gntdev_put_pages(batch);
 788
 789	/*
 790	 * For each completed op, update the status if the op failed
 791	 * and all previous ops for the segment were successful.
 792	 */
 793	for (i = 0; i < batch->nr_ops; i++) {
 794		s16 status = batch->ops[i].status;
 795		s16 old_status;
 796
 797		if (status == GNTST_okay)
 798			continue;
 799
 800		if (__get_user(old_status, batch->status[i]))
 801			return -EFAULT;
 802
 803		if (old_status != GNTST_okay)
 804			continue;
 805
 806		if (__put_user(status, batch->status[i]))
 807			return -EFAULT;
 808	}
 809
 810	batch->nr_ops = 0;
 811	return 0;
 812}
 813
 814static int gntdev_grant_copy_seg(struct gntdev_copy_batch *batch,
 815				 struct gntdev_grant_copy_segment *seg,
 816				 s16 __user *status)
 817{
 818	uint16_t copied = 0;
 819
 820	/*
 821	 * Disallow local -> local copies since there is only space in
 822	 * batch->pages for one page per-op and this would be a very
 823	 * expensive memcpy().
 824	 */
 825	if (!(seg->flags & (GNTCOPY_source_gref | GNTCOPY_dest_gref)))
 826		return -EINVAL;
 827
 828	/* Can't cross page if source/dest is a grant ref. */
 829	if (seg->flags & GNTCOPY_source_gref) {
 830		if (seg->source.foreign.offset + seg->len > XEN_PAGE_SIZE)
 831			return -EINVAL;
 832	}
 833	if (seg->flags & GNTCOPY_dest_gref) {
 834		if (seg->dest.foreign.offset + seg->len > XEN_PAGE_SIZE)
 835			return -EINVAL;
 836	}
 837
 838	if (put_user(GNTST_okay, status))
 839		return -EFAULT;
 840
 841	while (copied < seg->len) {
 842		struct gnttab_copy *op;
 843		void __user *virt;
 844		size_t len, off;
 845		unsigned long gfn;
 846		int ret;
 847
 848		if (batch->nr_ops >= GNTDEV_COPY_BATCH) {
 849			ret = gntdev_copy(batch);
 850			if (ret < 0)
 851				return ret;
 852		}
 853
 854		len = seg->len - copied;
 855
 856		op = &batch->ops[batch->nr_ops];
 857		op->flags = 0;
 858
 859		if (seg->flags & GNTCOPY_source_gref) {
 860			op->source.u.ref = seg->source.foreign.ref;
 861			op->source.domid = seg->source.foreign.domid;
 862			op->source.offset = seg->source.foreign.offset + copied;
 863			op->flags |= GNTCOPY_source_gref;
 864		} else {
 865			virt = seg->source.virt + copied;
 866			off = (unsigned long)virt & ~XEN_PAGE_MASK;
 867			len = min(len, (size_t)XEN_PAGE_SIZE - off);
 868
 869			ret = gntdev_get_page(batch, virt, false, &gfn);
 870			if (ret < 0)
 871				return ret;
 872
 873			op->source.u.gmfn = gfn;
 874			op->source.domid = DOMID_SELF;
 875			op->source.offset = off;
 876		}
 877
 878		if (seg->flags & GNTCOPY_dest_gref) {
 879			op->dest.u.ref = seg->dest.foreign.ref;
 880			op->dest.domid = seg->dest.foreign.domid;
 881			op->dest.offset = seg->dest.foreign.offset + copied;
 882			op->flags |= GNTCOPY_dest_gref;
 883		} else {
 884			virt = seg->dest.virt + copied;
 885			off = (unsigned long)virt & ~XEN_PAGE_MASK;
 886			len = min(len, (size_t)XEN_PAGE_SIZE - off);
 887
 888			ret = gntdev_get_page(batch, virt, true, &gfn);
 889			if (ret < 0)
 890				return ret;
 891
 892			op->dest.u.gmfn = gfn;
 893			op->dest.domid = DOMID_SELF;
 894			op->dest.offset = off;
 895		}
 896
 897		op->len = len;
 898		copied += len;
 899
 900		batch->status[batch->nr_ops] = status;
 901		batch->nr_ops++;
 902	}
 903
 904	return 0;
 905}
 906
 907static long gntdev_ioctl_grant_copy(struct gntdev_priv *priv, void __user *u)
 908{
 909	struct ioctl_gntdev_grant_copy copy;
 910	struct gntdev_copy_batch batch;
 911	unsigned int i;
 912	int ret = 0;
 913
 914	if (copy_from_user(&copy, u, sizeof(copy)))
 915		return -EFAULT;
 916
 917	batch.nr_ops = 0;
 918	batch.nr_pages = 0;
 919
 920	for (i = 0; i < copy.count; i++) {
 921		struct gntdev_grant_copy_segment seg;
 922
 923		if (copy_from_user(&seg, &copy.segments[i], sizeof(seg))) {
 924			ret = -EFAULT;
 925			goto out;
 926		}
 927
 928		ret = gntdev_grant_copy_seg(&batch, &seg, &copy.segments[i].status);
 929		if (ret < 0)
 930			goto out;
 931
 932		cond_resched();
 933	}
 934	if (batch.nr_ops)
 935		ret = gntdev_copy(&batch);
 936	return ret;
 937
 938  out:
 939	gntdev_put_pages(&batch);
 940	return ret;
 941}
 942
 943static long gntdev_ioctl(struct file *flip,
 944			 unsigned int cmd, unsigned long arg)
 945{
 946	struct gntdev_priv *priv = flip->private_data;
 947	void __user *ptr = (void __user *)arg;
 948
 949	switch (cmd) {
 950	case IOCTL_GNTDEV_MAP_GRANT_REF:
 951		return gntdev_ioctl_map_grant_ref(priv, ptr);
 952
 953	case IOCTL_GNTDEV_UNMAP_GRANT_REF:
 954		return gntdev_ioctl_unmap_grant_ref(priv, ptr);
 955
 956	case IOCTL_GNTDEV_GET_OFFSET_FOR_VADDR:
 957		return gntdev_ioctl_get_offset_for_vaddr(priv, ptr);
 958
 959	case IOCTL_GNTDEV_SET_UNMAP_NOTIFY:
 960		return gntdev_ioctl_notify(priv, ptr);
 961
 962	case IOCTL_GNTDEV_GRANT_COPY:
 963		return gntdev_ioctl_grant_copy(priv, ptr);
 964
 965	default:
 966		pr_debug("priv %p, unknown cmd %x\n", priv, cmd);
 967		return -ENOIOCTLCMD;
 968	}
 969
 970	return 0;
 971}
 972
 973static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
 974{
 975	struct gntdev_priv *priv = flip->private_data;
 976	int index = vma->vm_pgoff;
 977	int count = vma_pages(vma);
 978	struct grant_map *map;
 979	int i, err = -EINVAL;
 980
 981	if ((vma->vm_flags & VM_WRITE) && !(vma->vm_flags & VM_SHARED))
 982		return -EINVAL;
 983
 984	pr_debug("map %d+%d at %lx (pgoff %lx)\n",
 985			index, count, vma->vm_start, vma->vm_pgoff);
 986
 987	mutex_lock(&priv->lock);
 988	map = gntdev_find_map_index(priv, index, count);
 989	if (!map)
 990		goto unlock_out;
 991	if (use_ptemod && map->vma)
 992		goto unlock_out;
 993	if (use_ptemod && priv->mm != vma->vm_mm) {
 994		pr_warn("Huh? Other mm?\n");
 995		goto unlock_out;
 996	}
 997
 998	refcount_inc(&map->users);
 999
1000	vma->vm_ops = &gntdev_vmops;
1001
1002	vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP | VM_MIXEDMAP;
1003
1004	if (use_ptemod)
1005		vma->vm_flags |= VM_DONTCOPY;
1006
1007	vma->vm_private_data = map;
1008
1009	if (use_ptemod)
1010		map->vma = vma;
1011
1012	if (map->flags) {
1013		if ((vma->vm_flags & VM_WRITE) &&
1014				(map->flags & GNTMAP_readonly))
1015			goto out_unlock_put;
1016	} else {
1017		map->flags = GNTMAP_host_map;
1018		if (!(vma->vm_flags & VM_WRITE))
1019			map->flags |= GNTMAP_readonly;
1020	}
1021
1022	mutex_unlock(&priv->lock);
1023
1024	if (use_ptemod) {
1025		map->pages_vm_start = vma->vm_start;
1026		err = apply_to_page_range(vma->vm_mm, vma->vm_start,
1027					  vma->vm_end - vma->vm_start,
1028					  find_grant_ptes, map);
1029		if (err) {
1030			pr_warn("find_grant_ptes() failure.\n");
1031			goto out_put_map;
1032		}
1033	}
1034
1035	err = map_grant_pages(map);
1036	if (err)
1037		goto out_put_map;
1038
1039	if (!use_ptemod) {
1040		for (i = 0; i < count; i++) {
1041			err = vm_insert_page(vma, vma->vm_start + i*PAGE_SIZE,
1042				map->pages[i]);
1043			if (err)
1044				goto out_put_map;
1045		}
1046	} else {
1047#ifdef CONFIG_X86
1048		/*
1049		 * If the PTEs were not made special by the grant map
1050		 * hypercall, do so here.
1051		 *
1052		 * This is racy since the mapping is already visible
1053		 * to userspace but userspace should be well-behaved
1054		 * enough to not touch it until the mmap() call
1055		 * returns.
1056		 */
1057		if (!xen_feature(XENFEAT_gnttab_map_avail_bits)) {
1058			apply_to_page_range(vma->vm_mm, vma->vm_start,
1059					    vma->vm_end - vma->vm_start,
1060					    set_grant_ptes_as_special, NULL);
1061		}
1062#endif
1063	}
1064
1065	return 0;
1066
1067unlock_out:
1068	mutex_unlock(&priv->lock);
1069	return err;
1070
1071out_unlock_put:
1072	mutex_unlock(&priv->lock);
1073out_put_map:
1074	if (use_ptemod) {
1075		map->vma = NULL;
1076		unmap_grant_pages(map, 0, map->count);
1077	}
1078	gntdev_put_map(priv, map);
1079	return err;
1080}
1081
1082static const struct file_operations gntdev_fops = {
1083	.owner = THIS_MODULE,
1084	.open = gntdev_open,
1085	.release = gntdev_release,
1086	.mmap = gntdev_mmap,
1087	.unlocked_ioctl = gntdev_ioctl
1088};
1089
1090static struct miscdevice gntdev_miscdev = {
1091	.minor        = MISC_DYNAMIC_MINOR,
1092	.name         = "xen/gntdev",
1093	.fops         = &gntdev_fops,
1094};
1095
1096/* ------------------------------------------------------------------ */
1097
1098static int __init gntdev_init(void)
1099{
1100	int err;
1101
1102	if (!xen_domain())
1103		return -ENODEV;
1104
1105	use_ptemod = !xen_feature(XENFEAT_auto_translated_physmap);
1106
1107	err = misc_register(&gntdev_miscdev);
1108	if (err != 0) {
1109		pr_err("Could not register gntdev device\n");
1110		return err;
1111	}
1112	return 0;
1113}
1114
1115static void __exit gntdev_exit(void)
1116{
1117	misc_deregister(&gntdev_miscdev);
1118}
1119
1120module_init(gntdev_init);
1121module_exit(gntdev_exit);
1122
1123/* ------------------------------------------------------------------ */
v3.15
  1/******************************************************************************
  2 * gntdev.c
  3 *
  4 * Device for accessing (in user-space) pages that have been granted by other
  5 * domains.
  6 *
  7 * Copyright (c) 2006-2007, D G Murray.
  8 *           (c) 2009 Gerd Hoffmann <kraxel@redhat.com>
  9 *
 10 * This program is distributed in the hope that it will be useful,
 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 13 * GNU General Public License for more details.
 14 *
 15 * You should have received a copy of the GNU General Public License
 16 * along with this program; if not, write to the Free Software
 17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 18 */
 19
 20#undef DEBUG
 21
 22#define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
 23
 24#include <linux/module.h>
 25#include <linux/kernel.h>
 26#include <linux/init.h>
 27#include <linux/miscdevice.h>
 28#include <linux/fs.h>
 29#include <linux/mm.h>
 30#include <linux/mman.h>
 31#include <linux/mmu_notifier.h>
 32#include <linux/types.h>
 33#include <linux/uaccess.h>
 34#include <linux/sched.h>
 
 35#include <linux/spinlock.h>
 36#include <linux/slab.h>
 37#include <linux/highmem.h>
 
 38
 39#include <xen/xen.h>
 40#include <xen/grant_table.h>
 41#include <xen/balloon.h>
 42#include <xen/gntdev.h>
 43#include <xen/events.h>
 
 44#include <asm/xen/hypervisor.h>
 45#include <asm/xen/hypercall.h>
 46#include <asm/xen/page.h>
 47
 48MODULE_LICENSE("GPL");
 49MODULE_AUTHOR("Derek G. Murray <Derek.Murray@cl.cam.ac.uk>, "
 50	      "Gerd Hoffmann <kraxel@redhat.com>");
 51MODULE_DESCRIPTION("User-space granted page access driver");
 52
 53static int limit = 1024*1024;
 54module_param(limit, int, 0644);
 55MODULE_PARM_DESC(limit, "Maximum number of grants that may be mapped by "
 56		"the gntdev device");
 57
 58static atomic_t pages_mapped = ATOMIC_INIT(0);
 59
 60static int use_ptemod;
 61#define populate_freeable_maps use_ptemod
 62
 63struct gntdev_priv {
 64	/* maps with visible offsets in the file descriptor */
 65	struct list_head maps;
 66	/* maps that are not visible; will be freed on munmap.
 67	 * Only populated if populate_freeable_maps == 1 */
 68	struct list_head freeable_maps;
 69	/* lock protects maps and freeable_maps */
 70	spinlock_t lock;
 71	struct mm_struct *mm;
 72	struct mmu_notifier mn;
 73};
 74
 75struct unmap_notify {
 76	int flags;
 77	/* Address relative to the start of the grant_map */
 78	int addr;
 79	int event;
 80};
 81
 82struct grant_map {
 83	struct list_head next;
 84	struct vm_area_struct *vma;
 85	int index;
 86	int count;
 87	int flags;
 88	atomic_t users;
 89	struct unmap_notify notify;
 90	struct ioctl_gntdev_grant_ref *grants;
 91	struct gnttab_map_grant_ref   *map_ops;
 92	struct gnttab_unmap_grant_ref *unmap_ops;
 93	struct gnttab_map_grant_ref   *kmap_ops;
 
 94	struct page **pages;
 
 95};
 96
 97static int unmap_grant_pages(struct grant_map *map, int offset, int pages);
 98
 99/* ------------------------------------------------------------------ */
100
101static void gntdev_print_maps(struct gntdev_priv *priv,
102			      char *text, int text_index)
103{
104#ifdef DEBUG
105	struct grant_map *map;
106
107	pr_debug("%s: maps list (priv %p)\n", __func__, priv);
108	list_for_each_entry(map, &priv->maps, next)
109		pr_debug("  index %2d, count %2d %s\n",
110		       map->index, map->count,
111		       map->index == text_index && text ? text : "");
112#endif
113}
114
115static void gntdev_free_map(struct grant_map *map)
116{
117	if (map == NULL)
118		return;
119
120	if (map->pages)
121		free_xenballooned_pages(map->count, map->pages);
122	kfree(map->pages);
123	kfree(map->grants);
124	kfree(map->map_ops);
125	kfree(map->unmap_ops);
126	kfree(map->kmap_ops);
 
127	kfree(map);
128}
129
130static struct grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count)
131{
132	struct grant_map *add;
133	int i;
134
135	add = kzalloc(sizeof(struct grant_map), GFP_KERNEL);
136	if (NULL == add)
137		return NULL;
138
139	add->grants    = kcalloc(count, sizeof(add->grants[0]), GFP_KERNEL);
140	add->map_ops   = kcalloc(count, sizeof(add->map_ops[0]), GFP_KERNEL);
141	add->unmap_ops = kcalloc(count, sizeof(add->unmap_ops[0]), GFP_KERNEL);
142	add->kmap_ops  = kcalloc(count, sizeof(add->kmap_ops[0]), GFP_KERNEL);
 
143	add->pages     = kcalloc(count, sizeof(add->pages[0]), GFP_KERNEL);
144	if (NULL == add->grants    ||
145	    NULL == add->map_ops   ||
146	    NULL == add->unmap_ops ||
147	    NULL == add->kmap_ops  ||
 
148	    NULL == add->pages)
149		goto err;
150
151	if (alloc_xenballooned_pages(count, add->pages, false /* lowmem */))
152		goto err;
153
154	for (i = 0; i < count; i++) {
155		add->map_ops[i].handle = -1;
156		add->unmap_ops[i].handle = -1;
157		add->kmap_ops[i].handle = -1;
 
158	}
159
160	add->index = 0;
161	add->count = count;
162	atomic_set(&add->users, 1);
163
164	return add;
165
166err:
167	gntdev_free_map(add);
168	return NULL;
169}
170
171static void gntdev_add_map(struct gntdev_priv *priv, struct grant_map *add)
172{
173	struct grant_map *map;
174
175	list_for_each_entry(map, &priv->maps, next) {
176		if (add->index + add->count < map->index) {
177			list_add_tail(&add->next, &map->next);
178			goto done;
179		}
180		add->index = map->index + map->count;
181	}
182	list_add_tail(&add->next, &priv->maps);
183
184done:
185	gntdev_print_maps(priv, "[new]", add->index);
186}
187
188static struct grant_map *gntdev_find_map_index(struct gntdev_priv *priv,
189		int index, int count)
190{
191	struct grant_map *map;
192
193	list_for_each_entry(map, &priv->maps, next) {
194		if (map->index != index)
195			continue;
196		if (count && map->count != count)
197			continue;
198		return map;
199	}
200	return NULL;
201}
202
203static void gntdev_put_map(struct gntdev_priv *priv, struct grant_map *map)
204{
205	if (!map)
206		return;
207
208	if (!atomic_dec_and_test(&map->users))
209		return;
210
211	atomic_sub(map->count, &pages_mapped);
212
213	if (map->notify.flags & UNMAP_NOTIFY_SEND_EVENT) {
214		notify_remote_via_evtchn(map->notify.event);
215		evtchn_put(map->notify.event);
216	}
217
218	if (populate_freeable_maps && priv) {
219		spin_lock(&priv->lock);
220		list_del(&map->next);
221		spin_unlock(&priv->lock);
222	}
223
224	if (map->pages && !use_ptemod)
225		unmap_grant_pages(map, 0, map->count);
226	gntdev_free_map(map);
227}
228
229/* ------------------------------------------------------------------ */
230
231static int find_grant_ptes(pte_t *pte, pgtable_t token,
232		unsigned long addr, void *data)
233{
234	struct grant_map *map = data;
235	unsigned int pgnr = (addr - map->vma->vm_start) >> PAGE_SHIFT;
236	int flags = map->flags | GNTMAP_application_map | GNTMAP_contains_pte;
237	u64 pte_maddr;
238
239	BUG_ON(pgnr >= map->count);
240	pte_maddr = arbitrary_virt_to_machine(pte).maddr;
241
 
 
 
 
 
 
 
 
242	gnttab_set_map_op(&map->map_ops[pgnr], pte_maddr, flags,
243			  map->grants[pgnr].ref,
244			  map->grants[pgnr].domid);
245	gnttab_set_unmap_op(&map->unmap_ops[pgnr], pte_maddr, flags,
246			    -1 /* handle */);
247	return 0;
248}
249
 
 
 
 
 
 
 
 
 
250static int map_grant_pages(struct grant_map *map)
251{
252	int i, err = 0;
253
254	if (!use_ptemod) {
255		/* Note: it could already be mapped */
256		if (map->map_ops[0].handle != -1)
257			return 0;
258		for (i = 0; i < map->count; i++) {
259			unsigned long addr = (unsigned long)
260				pfn_to_kaddr(page_to_pfn(map->pages[i]));
261			gnttab_set_map_op(&map->map_ops[i], addr, map->flags,
262				map->grants[i].ref,
263				map->grants[i].domid);
264			gnttab_set_unmap_op(&map->unmap_ops[i], addr,
265				map->flags, -1 /* handle */);
266		}
267	} else {
268		/*
269		 * Setup the map_ops corresponding to the pte entries pointing
270		 * to the kernel linear addresses of the struct pages.
271		 * These ptes are completely different from the user ptes dealt
272		 * with find_grant_ptes.
273		 */
274		for (i = 0; i < map->count; i++) {
275			unsigned long address = (unsigned long)
276				pfn_to_kaddr(page_to_pfn(map->pages[i]));
277			BUG_ON(PageHighMem(map->pages[i]));
278
279			gnttab_set_map_op(&map->kmap_ops[i], address,
280				map->flags | GNTMAP_host_map,
281				map->grants[i].ref,
282				map->grants[i].domid);
 
 
283		}
284	}
285
286	pr_debug("map %d+%d\n", map->index, map->count);
287	err = gnttab_map_refs(map->map_ops, use_ptemod ? map->kmap_ops : NULL,
288			map->pages, map->count);
289	if (err)
290		return err;
291
292	for (i = 0; i < map->count; i++) {
293		if (map->map_ops[i].status)
294			err = -EINVAL;
295		else {
296			BUG_ON(map->map_ops[i].handle == -1);
297			map->unmap_ops[i].handle = map->map_ops[i].handle;
298			pr_debug("map handle=%d\n", map->map_ops[i].handle);
299		}
 
 
 
 
300	}
301	return err;
302}
303
304static int __unmap_grant_pages(struct grant_map *map, int offset, int pages)
305{
306	int i, err = 0;
 
307
308	if (map->notify.flags & UNMAP_NOTIFY_CLEAR_BYTE) {
309		int pgno = (map->notify.addr >> PAGE_SHIFT);
310		if (pgno >= offset && pgno < offset + pages) {
311			/* No need for kmap, pages are in lowmem */
312			uint8_t *tmp = pfn_to_kaddr(page_to_pfn(map->pages[pgno]));
313			tmp[map->notify.addr & (PAGE_SIZE-1)] = 0;
314			map->notify.flags &= ~UNMAP_NOTIFY_CLEAR_BYTE;
315		}
316	}
317
318	err = gnttab_unmap_refs(map->unmap_ops + offset,
319			use_ptemod ? map->kmap_ops + offset : NULL, map->pages + offset,
320			pages);
 
 
 
321	if (err)
322		return err;
323
324	for (i = 0; i < pages; i++) {
325		if (map->unmap_ops[offset+i].status)
326			err = -EINVAL;
327		pr_debug("unmap handle=%d st=%d\n",
328			map->unmap_ops[offset+i].handle,
329			map->unmap_ops[offset+i].status);
330		map->unmap_ops[offset+i].handle = -1;
331	}
332	return err;
333}
334
335static int unmap_grant_pages(struct grant_map *map, int offset, int pages)
336{
337	int range, err = 0;
338
339	pr_debug("unmap %d+%d [%d+%d]\n", map->index, map->count, offset, pages);
340
341	/* It is possible the requested range will have a "hole" where we
342	 * already unmapped some of the grants. Only unmap valid ranges.
343	 */
344	while (pages && !err) {
345		while (pages && map->unmap_ops[offset].handle == -1) {
346			offset++;
347			pages--;
348		}
349		range = 0;
350		while (range < pages) {
351			if (map->unmap_ops[offset+range].handle == -1) {
352				range--;
353				break;
354			}
355			range++;
356		}
357		err = __unmap_grant_pages(map, offset, range);
358		offset += range;
359		pages -= range;
360	}
361
362	return err;
363}
364
365/* ------------------------------------------------------------------ */
366
367static void gntdev_vma_open(struct vm_area_struct *vma)
368{
369	struct grant_map *map = vma->vm_private_data;
370
371	pr_debug("gntdev_vma_open %p\n", vma);
372	atomic_inc(&map->users);
373}
374
375static void gntdev_vma_close(struct vm_area_struct *vma)
376{
377	struct grant_map *map = vma->vm_private_data;
378	struct file *file = vma->vm_file;
379	struct gntdev_priv *priv = file->private_data;
380
381	pr_debug("gntdev_vma_close %p\n", vma);
382	if (use_ptemod) {
383		/* It is possible that an mmu notifier could be running
384		 * concurrently, so take priv->lock to ensure that the vma won't
385		 * vanishing during the unmap_grant_pages call, since we will
386		 * spin here until that completes. Such a concurrent call will
387		 * not do any unmapping, since that has been done prior to
388		 * closing the vma, but it may still iterate the unmap_ops list.
389		 */
390		spin_lock(&priv->lock);
391		map->vma = NULL;
392		spin_unlock(&priv->lock);
393	}
394	vma->vm_private_data = NULL;
395	gntdev_put_map(priv, map);
396}
397
398static struct vm_operations_struct gntdev_vmops = {
 
 
 
 
 
 
 
 
399	.open = gntdev_vma_open,
400	.close = gntdev_vma_close,
 
401};
402
403/* ------------------------------------------------------------------ */
404
405static void unmap_if_in_range(struct grant_map *map,
406			      unsigned long start, unsigned long end)
407{
408	unsigned long mstart, mend;
409	int err;
410
411	if (!map->vma)
412		return;
413	if (map->vma->vm_start >= end)
414		return;
415	if (map->vma->vm_end <= start)
416		return;
417	mstart = max(start, map->vma->vm_start);
418	mend   = min(end,   map->vma->vm_end);
419	pr_debug("map %d+%d (%lx %lx), range %lx %lx, mrange %lx %lx\n",
420			map->index, map->count,
421			map->vma->vm_start, map->vma->vm_end,
422			start, end, mstart, mend);
423	err = unmap_grant_pages(map,
424				(mstart - map->vma->vm_start) >> PAGE_SHIFT,
425				(mend - mstart) >> PAGE_SHIFT);
426	WARN_ON(err);
427}
428
429static void mn_invl_range_start(struct mmu_notifier *mn,
430				struct mm_struct *mm,
431				unsigned long start, unsigned long end)
432{
433	struct gntdev_priv *priv = container_of(mn, struct gntdev_priv, mn);
434	struct grant_map *map;
435
436	spin_lock(&priv->lock);
437	list_for_each_entry(map, &priv->maps, next) {
438		unmap_if_in_range(map, start, end);
439	}
440	list_for_each_entry(map, &priv->freeable_maps, next) {
441		unmap_if_in_range(map, start, end);
442	}
443	spin_unlock(&priv->lock);
444}
445
446static void mn_invl_page(struct mmu_notifier *mn,
447			 struct mm_struct *mm,
448			 unsigned long address)
449{
450	mn_invl_range_start(mn, mm, address, address + PAGE_SIZE);
451}
452
453static void mn_release(struct mmu_notifier *mn,
454		       struct mm_struct *mm)
455{
456	struct gntdev_priv *priv = container_of(mn, struct gntdev_priv, mn);
457	struct grant_map *map;
458	int err;
459
460	spin_lock(&priv->lock);
461	list_for_each_entry(map, &priv->maps, next) {
462		if (!map->vma)
463			continue;
464		pr_debug("map %d+%d (%lx %lx)\n",
465				map->index, map->count,
466				map->vma->vm_start, map->vma->vm_end);
467		err = unmap_grant_pages(map, /* offset */ 0, map->count);
468		WARN_ON(err);
469	}
470	list_for_each_entry(map, &priv->freeable_maps, next) {
471		if (!map->vma)
472			continue;
473		pr_debug("map %d+%d (%lx %lx)\n",
474				map->index, map->count,
475				map->vma->vm_start, map->vma->vm_end);
476		err = unmap_grant_pages(map, /* offset */ 0, map->count);
477		WARN_ON(err);
478	}
479	spin_unlock(&priv->lock);
480}
481
482static struct mmu_notifier_ops gntdev_mmu_ops = {
483	.release                = mn_release,
484	.invalidate_page        = mn_invl_page,
485	.invalidate_range_start = mn_invl_range_start,
486};
487
488/* ------------------------------------------------------------------ */
489
490static int gntdev_open(struct inode *inode, struct file *flip)
491{
492	struct gntdev_priv *priv;
493	int ret = 0;
494
495	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
496	if (!priv)
497		return -ENOMEM;
498
499	INIT_LIST_HEAD(&priv->maps);
500	INIT_LIST_HEAD(&priv->freeable_maps);
501	spin_lock_init(&priv->lock);
502
503	if (use_ptemod) {
504		priv->mm = get_task_mm(current);
505		if (!priv->mm) {
506			kfree(priv);
507			return -ENOMEM;
508		}
509		priv->mn.ops = &gntdev_mmu_ops;
510		ret = mmu_notifier_register(&priv->mn, priv->mm);
511		mmput(priv->mm);
512	}
513
514	if (ret) {
515		kfree(priv);
516		return ret;
517	}
518
519	flip->private_data = priv;
520	pr_debug("priv %p\n", priv);
521
522	return 0;
523}
524
525static int gntdev_release(struct inode *inode, struct file *flip)
526{
527	struct gntdev_priv *priv = flip->private_data;
528	struct grant_map *map;
529
530	pr_debug("priv %p\n", priv);
531
 
532	while (!list_empty(&priv->maps)) {
533		map = list_entry(priv->maps.next, struct grant_map, next);
534		list_del(&map->next);
535		gntdev_put_map(NULL /* already removed */, map);
536	}
537	WARN_ON(!list_empty(&priv->freeable_maps));
 
538
539	if (use_ptemod)
540		mmu_notifier_unregister(&priv->mn, priv->mm);
541	kfree(priv);
542	return 0;
543}
544
545static long gntdev_ioctl_map_grant_ref(struct gntdev_priv *priv,
546				       struct ioctl_gntdev_map_grant_ref __user *u)
547{
548	struct ioctl_gntdev_map_grant_ref op;
549	struct grant_map *map;
550	int err;
551
552	if (copy_from_user(&op, u, sizeof(op)) != 0)
553		return -EFAULT;
554	pr_debug("priv %p, add %d\n", priv, op.count);
555	if (unlikely(op.count <= 0))
556		return -EINVAL;
557
558	err = -ENOMEM;
559	map = gntdev_alloc_map(priv, op.count);
560	if (!map)
561		return err;
562
563	if (unlikely(atomic_add_return(op.count, &pages_mapped) > limit)) {
564		pr_debug("can't map: over limit\n");
565		gntdev_put_map(NULL, map);
566		return err;
567	}
568
569	if (copy_from_user(map->grants, &u->refs,
570			   sizeof(map->grants[0]) * op.count) != 0) {
571		gntdev_put_map(NULL, map);
572		return -EFAULT;
573	}
574
575	spin_lock(&priv->lock);
576	gntdev_add_map(priv, map);
577	op.index = map->index << PAGE_SHIFT;
578	spin_unlock(&priv->lock);
579
580	if (copy_to_user(u, &op, sizeof(op)) != 0)
581		return -EFAULT;
582
583	return 0;
584}
585
586static long gntdev_ioctl_unmap_grant_ref(struct gntdev_priv *priv,
587					 struct ioctl_gntdev_unmap_grant_ref __user *u)
588{
589	struct ioctl_gntdev_unmap_grant_ref op;
590	struct grant_map *map;
591	int err = -ENOENT;
592
593	if (copy_from_user(&op, u, sizeof(op)) != 0)
594		return -EFAULT;
595	pr_debug("priv %p, del %d+%d\n", priv, (int)op.index, (int)op.count);
596
597	spin_lock(&priv->lock);
598	map = gntdev_find_map_index(priv, op.index >> PAGE_SHIFT, op.count);
599	if (map) {
600		list_del(&map->next);
601		if (populate_freeable_maps)
602			list_add_tail(&map->next, &priv->freeable_maps);
603		err = 0;
604	}
605	spin_unlock(&priv->lock);
606	if (map)
607		gntdev_put_map(priv, map);
608	return err;
609}
610
611static long gntdev_ioctl_get_offset_for_vaddr(struct gntdev_priv *priv,
612					      struct ioctl_gntdev_get_offset_for_vaddr __user *u)
613{
614	struct ioctl_gntdev_get_offset_for_vaddr op;
615	struct vm_area_struct *vma;
616	struct grant_map *map;
617	int rv = -EINVAL;
618
619	if (copy_from_user(&op, u, sizeof(op)) != 0)
620		return -EFAULT;
621	pr_debug("priv %p, offset for vaddr %lx\n", priv, (unsigned long)op.vaddr);
622
623	down_read(&current->mm->mmap_sem);
624	vma = find_vma(current->mm, op.vaddr);
625	if (!vma || vma->vm_ops != &gntdev_vmops)
626		goto out_unlock;
627
628	map = vma->vm_private_data;
629	if (!map)
630		goto out_unlock;
631
632	op.offset = map->index << PAGE_SHIFT;
633	op.count = map->count;
634	rv = 0;
635
636 out_unlock:
637	up_read(&current->mm->mmap_sem);
638
639	if (rv == 0 && copy_to_user(u, &op, sizeof(op)) != 0)
640		return -EFAULT;
641	return rv;
642}
643
644static long gntdev_ioctl_notify(struct gntdev_priv *priv, void __user *u)
645{
646	struct ioctl_gntdev_unmap_notify op;
647	struct grant_map *map;
648	int rc;
649	int out_flags;
650	unsigned int out_event;
651
652	if (copy_from_user(&op, u, sizeof(op)))
653		return -EFAULT;
654
655	if (op.action & ~(UNMAP_NOTIFY_CLEAR_BYTE|UNMAP_NOTIFY_SEND_EVENT))
656		return -EINVAL;
657
658	/* We need to grab a reference to the event channel we are going to use
659	 * to send the notify before releasing the reference we may already have
660	 * (if someone has called this ioctl twice). This is required so that
661	 * it is possible to change the clear_byte part of the notification
662	 * without disturbing the event channel part, which may now be the last
663	 * reference to that event channel.
664	 */
665	if (op.action & UNMAP_NOTIFY_SEND_EVENT) {
666		if (evtchn_get(op.event_channel_port))
667			return -EINVAL;
668	}
669
670	out_flags = op.action;
671	out_event = op.event_channel_port;
672
673	spin_lock(&priv->lock);
674
675	list_for_each_entry(map, &priv->maps, next) {
676		uint64_t begin = map->index << PAGE_SHIFT;
677		uint64_t end = (map->index + map->count) << PAGE_SHIFT;
678		if (op.index >= begin && op.index < end)
679			goto found;
680	}
681	rc = -ENOENT;
682	goto unlock_out;
683
684 found:
685	if ((op.action & UNMAP_NOTIFY_CLEAR_BYTE) &&
686			(map->flags & GNTMAP_readonly)) {
687		rc = -EINVAL;
688		goto unlock_out;
689	}
690
691	out_flags = map->notify.flags;
692	out_event = map->notify.event;
693
694	map->notify.flags = op.action;
695	map->notify.addr = op.index - (map->index << PAGE_SHIFT);
696	map->notify.event = op.event_channel_port;
697
698	rc = 0;
699
700 unlock_out:
701	spin_unlock(&priv->lock);
702
703	/* Drop the reference to the event channel we did not save in the map */
704	if (out_flags & UNMAP_NOTIFY_SEND_EVENT)
705		evtchn_put(out_event);
706
707	return rc;
708}
709
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
710static long gntdev_ioctl(struct file *flip,
711			 unsigned int cmd, unsigned long arg)
712{
713	struct gntdev_priv *priv = flip->private_data;
714	void __user *ptr = (void __user *)arg;
715
716	switch (cmd) {
717	case IOCTL_GNTDEV_MAP_GRANT_REF:
718		return gntdev_ioctl_map_grant_ref(priv, ptr);
719
720	case IOCTL_GNTDEV_UNMAP_GRANT_REF:
721		return gntdev_ioctl_unmap_grant_ref(priv, ptr);
722
723	case IOCTL_GNTDEV_GET_OFFSET_FOR_VADDR:
724		return gntdev_ioctl_get_offset_for_vaddr(priv, ptr);
725
726	case IOCTL_GNTDEV_SET_UNMAP_NOTIFY:
727		return gntdev_ioctl_notify(priv, ptr);
728
 
 
 
729	default:
730		pr_debug("priv %p, unknown cmd %x\n", priv, cmd);
731		return -ENOIOCTLCMD;
732	}
733
734	return 0;
735}
736
737static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
738{
739	struct gntdev_priv *priv = flip->private_data;
740	int index = vma->vm_pgoff;
741	int count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
742	struct grant_map *map;
743	int i, err = -EINVAL;
744
745	if ((vma->vm_flags & VM_WRITE) && !(vma->vm_flags & VM_SHARED))
746		return -EINVAL;
747
748	pr_debug("map %d+%d at %lx (pgoff %lx)\n",
749			index, count, vma->vm_start, vma->vm_pgoff);
750
751	spin_lock(&priv->lock);
752	map = gntdev_find_map_index(priv, index, count);
753	if (!map)
754		goto unlock_out;
755	if (use_ptemod && map->vma)
756		goto unlock_out;
757	if (use_ptemod && priv->mm != vma->vm_mm) {
758		pr_warn("Huh? Other mm?\n");
759		goto unlock_out;
760	}
761
762	atomic_inc(&map->users);
763
764	vma->vm_ops = &gntdev_vmops;
765
766	vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
767
768	if (use_ptemod)
769		vma->vm_flags |= VM_DONTCOPY;
770
771	vma->vm_private_data = map;
772
773	if (use_ptemod)
774		map->vma = vma;
775
776	if (map->flags) {
777		if ((vma->vm_flags & VM_WRITE) &&
778				(map->flags & GNTMAP_readonly))
779			goto out_unlock_put;
780	} else {
781		map->flags = GNTMAP_host_map;
782		if (!(vma->vm_flags & VM_WRITE))
783			map->flags |= GNTMAP_readonly;
784	}
785
786	spin_unlock(&priv->lock);
787
788	if (use_ptemod) {
 
789		err = apply_to_page_range(vma->vm_mm, vma->vm_start,
790					  vma->vm_end - vma->vm_start,
791					  find_grant_ptes, map);
792		if (err) {
793			pr_warn("find_grant_ptes() failure.\n");
794			goto out_put_map;
795		}
796	}
797
798	err = map_grant_pages(map);
799	if (err)
800		goto out_put_map;
801
802	if (!use_ptemod) {
803		for (i = 0; i < count; i++) {
804			err = vm_insert_page(vma, vma->vm_start + i*PAGE_SIZE,
805				map->pages[i]);
806			if (err)
807				goto out_put_map;
808		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
809	}
810
811	return 0;
812
813unlock_out:
814	spin_unlock(&priv->lock);
815	return err;
816
817out_unlock_put:
818	spin_unlock(&priv->lock);
819out_put_map:
820	if (use_ptemod)
821		map->vma = NULL;
 
 
822	gntdev_put_map(priv, map);
823	return err;
824}
825
826static const struct file_operations gntdev_fops = {
827	.owner = THIS_MODULE,
828	.open = gntdev_open,
829	.release = gntdev_release,
830	.mmap = gntdev_mmap,
831	.unlocked_ioctl = gntdev_ioctl
832};
833
834static struct miscdevice gntdev_miscdev = {
835	.minor        = MISC_DYNAMIC_MINOR,
836	.name         = "xen/gntdev",
837	.fops         = &gntdev_fops,
838};
839
840/* ------------------------------------------------------------------ */
841
842static int __init gntdev_init(void)
843{
844	int err;
845
846	if (!xen_domain())
847		return -ENODEV;
848
849	use_ptemod = !xen_feature(XENFEAT_auto_translated_physmap);
850
851	err = misc_register(&gntdev_miscdev);
852	if (err != 0) {
853		pr_err("Could not register gntdev device\n");
854		return err;
855	}
856	return 0;
857}
858
859static void __exit gntdev_exit(void)
860{
861	misc_deregister(&gntdev_miscdev);
862}
863
864module_init(gntdev_init);
865module_exit(gntdev_exit);
866
867/* ------------------------------------------------------------------ */