Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0-only
   2/******************************************************************************
   3 * privcmd.c
   4 *
   5 * Interface to privileged domain-0 commands.
   6 *
   7 * Copyright (c) 2002-2004, K A Fraser, B Dragovic
   8 */
   9
  10#define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
  11
  12#include <linux/eventfd.h>
  13#include <linux/file.h>
  14#include <linux/kernel.h>
  15#include <linux/module.h>
  16#include <linux/mutex.h>
  17#include <linux/poll.h>
  18#include <linux/sched.h>
  19#include <linux/slab.h>
  20#include <linux/string.h>
  21#include <linux/workqueue.h>
  22#include <linux/errno.h>
  23#include <linux/mm.h>
  24#include <linux/mman.h>
  25#include <linux/uaccess.h>
  26#include <linux/swap.h>
  27#include <linux/highmem.h>
  28#include <linux/pagemap.h>
  29#include <linux/seq_file.h>
  30#include <linux/miscdevice.h>
  31#include <linux/moduleparam.h>
  32#include <linux/virtio_mmio.h>
  33
 
 
 
  34#include <asm/xen/hypervisor.h>
  35#include <asm/xen/hypercall.h>
  36
  37#include <xen/xen.h>
  38#include <xen/events.h>
  39#include <xen/privcmd.h>
  40#include <xen/interface/xen.h>
  41#include <xen/interface/memory.h>
  42#include <xen/interface/hvm/dm_op.h>
  43#include <xen/interface/hvm/ioreq.h>
  44#include <xen/features.h>
  45#include <xen/page.h>
  46#include <xen/xen-ops.h>
  47#include <xen/balloon.h>
  48
  49#include "privcmd.h"
  50
  51MODULE_LICENSE("GPL");
  52
  53#define PRIV_VMA_LOCKED ((void *)1)
  54
  55static unsigned int privcmd_dm_op_max_num = 16;
  56module_param_named(dm_op_max_nr_bufs, privcmd_dm_op_max_num, uint, 0644);
  57MODULE_PARM_DESC(dm_op_max_nr_bufs,
  58		 "Maximum number of buffers per dm_op hypercall");
  59
  60static unsigned int privcmd_dm_op_buf_max_size = 4096;
  61module_param_named(dm_op_buf_max_size, privcmd_dm_op_buf_max_size, uint,
  62		   0644);
  63MODULE_PARM_DESC(dm_op_buf_max_size,
  64		 "Maximum size of a dm_op hypercall buffer");
  65
  66struct privcmd_data {
  67	domid_t domid;
  68};
  69
  70static int privcmd_vma_range_is_mapped(
  71               struct vm_area_struct *vma,
  72               unsigned long addr,
  73               unsigned long nr_pages);
  74
  75static long privcmd_ioctl_hypercall(struct file *file, void __user *udata)
  76{
  77	struct privcmd_data *data = file->private_data;
  78	struct privcmd_hypercall hypercall;
  79	long ret;
  80
  81	/* Disallow arbitrary hypercalls if restricted */
  82	if (data->domid != DOMID_INVALID)
  83		return -EPERM;
  84
  85	if (copy_from_user(&hypercall, udata, sizeof(hypercall)))
  86		return -EFAULT;
  87
  88	xen_preemptible_hcall_begin();
  89	ret = privcmd_call(hypercall.op,
  90			   hypercall.arg[0], hypercall.arg[1],
  91			   hypercall.arg[2], hypercall.arg[3],
  92			   hypercall.arg[4]);
  93	xen_preemptible_hcall_end();
  94
  95	return ret;
  96}
  97
  98static void free_page_list(struct list_head *pages)
  99{
 100	struct page *p, *n;
 101
 102	list_for_each_entry_safe(p, n, pages, lru)
 103		__free_page(p);
 104
 105	INIT_LIST_HEAD(pages);
 106}
 107
 108/*
 109 * Given an array of items in userspace, return a list of pages
 110 * containing the data.  If copying fails, either because of memory
 111 * allocation failure or a problem reading user memory, return an
 112 * error code; its up to the caller to dispose of any partial list.
 113 */
 114static int gather_array(struct list_head *pagelist,
 115			unsigned nelem, size_t size,
 116			const void __user *data)
 117{
 118	unsigned pageidx;
 119	void *pagedata;
 120	int ret;
 121
 122	if (size > PAGE_SIZE)
 123		return 0;
 124
 125	pageidx = PAGE_SIZE;
 126	pagedata = NULL;	/* quiet, gcc */
 127	while (nelem--) {
 128		if (pageidx > PAGE_SIZE-size) {
 129			struct page *page = alloc_page(GFP_KERNEL);
 130
 131			ret = -ENOMEM;
 132			if (page == NULL)
 133				goto fail;
 134
 135			pagedata = page_address(page);
 136
 137			list_add_tail(&page->lru, pagelist);
 138			pageidx = 0;
 139		}
 140
 141		ret = -EFAULT;
 142		if (copy_from_user(pagedata + pageidx, data, size))
 143			goto fail;
 144
 145		data += size;
 146		pageidx += size;
 147	}
 148
 149	ret = 0;
 150
 151fail:
 152	return ret;
 153}
 154
 155/*
 156 * Call function "fn" on each element of the array fragmented
 157 * over a list of pages.
 158 */
 159static int traverse_pages(unsigned nelem, size_t size,
 160			  struct list_head *pos,
 161			  int (*fn)(void *data, void *state),
 162			  void *state)
 163{
 164	void *pagedata;
 165	unsigned pageidx;
 166	int ret = 0;
 167
 168	BUG_ON(size > PAGE_SIZE);
 169
 170	pageidx = PAGE_SIZE;
 171	pagedata = NULL;	/* hush, gcc */
 172
 173	while (nelem--) {
 174		if (pageidx > PAGE_SIZE-size) {
 175			struct page *page;
 176			pos = pos->next;
 177			page = list_entry(pos, struct page, lru);
 178			pagedata = page_address(page);
 179			pageidx = 0;
 180		}
 181
 182		ret = (*fn)(pagedata + pageidx, state);
 183		if (ret)
 184			break;
 185		pageidx += size;
 186	}
 187
 188	return ret;
 189}
 190
 191/*
 192 * Similar to traverse_pages, but use each page as a "block" of
 193 * data to be processed as one unit.
 194 */
 195static int traverse_pages_block(unsigned nelem, size_t size,
 196				struct list_head *pos,
 197				int (*fn)(void *data, int nr, void *state),
 198				void *state)
 199{
 200	void *pagedata;
 201	int ret = 0;
 202
 203	BUG_ON(size > PAGE_SIZE);
 204
 205	while (nelem) {
 206		int nr = (PAGE_SIZE/size);
 207		struct page *page;
 208		if (nr > nelem)
 209			nr = nelem;
 210		pos = pos->next;
 211		page = list_entry(pos, struct page, lru);
 212		pagedata = page_address(page);
 213		ret = (*fn)(pagedata, nr, state);
 214		if (ret)
 215			break;
 216		nelem -= nr;
 217	}
 218
 219	return ret;
 220}
 221
 222struct mmap_gfn_state {
 223	unsigned long va;
 224	struct vm_area_struct *vma;
 225	domid_t domain;
 226};
 227
 228static int mmap_gfn_range(void *data, void *state)
 229{
 230	struct privcmd_mmap_entry *msg = data;
 231	struct mmap_gfn_state *st = state;
 232	struct vm_area_struct *vma = st->vma;
 233	int rc;
 234
 235	/* Do not allow range to wrap the address space. */
 236	if ((msg->npages > (LONG_MAX >> PAGE_SHIFT)) ||
 237	    ((unsigned long)(msg->npages << PAGE_SHIFT) >= -st->va))
 238		return -EINVAL;
 239
 240	/* Range chunks must be contiguous in va space. */
 241	if ((msg->va != st->va) ||
 242	    ((msg->va+(msg->npages<<PAGE_SHIFT)) > vma->vm_end))
 243		return -EINVAL;
 244
 245	rc = xen_remap_domain_gfn_range(vma,
 246					msg->va & PAGE_MASK,
 247					msg->mfn, msg->npages,
 248					vma->vm_page_prot,
 249					st->domain, NULL);
 250	if (rc < 0)
 251		return rc;
 252
 253	st->va += msg->npages << PAGE_SHIFT;
 254
 255	return 0;
 256}
 257
 258static long privcmd_ioctl_mmap(struct file *file, void __user *udata)
 259{
 260	struct privcmd_data *data = file->private_data;
 261	struct privcmd_mmap mmapcmd;
 262	struct mm_struct *mm = current->mm;
 263	struct vm_area_struct *vma;
 264	int rc;
 265	LIST_HEAD(pagelist);
 266	struct mmap_gfn_state state;
 267
 268	/* We only support privcmd_ioctl_mmap_batch for non-auto-translated. */
 269	if (xen_feature(XENFEAT_auto_translated_physmap))
 270		return -ENOSYS;
 271
 272	if (copy_from_user(&mmapcmd, udata, sizeof(mmapcmd)))
 273		return -EFAULT;
 274
 275	/* If restriction is in place, check the domid matches */
 276	if (data->domid != DOMID_INVALID && data->domid != mmapcmd.dom)
 277		return -EPERM;
 278
 279	rc = gather_array(&pagelist,
 280			  mmapcmd.num, sizeof(struct privcmd_mmap_entry),
 281			  mmapcmd.entry);
 282
 283	if (rc || list_empty(&pagelist))
 284		goto out;
 285
 286	mmap_write_lock(mm);
 287
 288	{
 289		struct page *page = list_first_entry(&pagelist,
 290						     struct page, lru);
 291		struct privcmd_mmap_entry *msg = page_address(page);
 292
 293		vma = vma_lookup(mm, msg->va);
 294		rc = -EINVAL;
 295
 296		if (!vma || (msg->va != vma->vm_start) || vma->vm_private_data)
 
 297			goto out_up;
 298		vma->vm_private_data = PRIV_VMA_LOCKED;
 299	}
 300
 301	state.va = vma->vm_start;
 302	state.vma = vma;
 303	state.domain = mmapcmd.dom;
 304
 305	rc = traverse_pages(mmapcmd.num, sizeof(struct privcmd_mmap_entry),
 306			    &pagelist,
 307			    mmap_gfn_range, &state);
 308
 309
 310out_up:
 311	mmap_write_unlock(mm);
 312
 313out:
 314	free_page_list(&pagelist);
 315
 316	return rc;
 317}
 318
 319struct mmap_batch_state {
 320	domid_t domain;
 321	unsigned long va;
 322	struct vm_area_struct *vma;
 323	int index;
 324	/* A tristate:
 325	 *      0 for no errors
 326	 *      1 if at least one error has happened (and no
 327	 *          -ENOENT errors have happened)
 328	 *      -ENOENT if at least 1 -ENOENT has happened.
 329	 */
 330	int global_error;
 331	int version;
 332
 333	/* User-space gfn array to store errors in the second pass for V1. */
 334	xen_pfn_t __user *user_gfn;
 335	/* User-space int array to store errors in the second pass for V2. */
 336	int __user *user_err;
 337};
 338
 339/* auto translated dom0 note: if domU being created is PV, then gfn is
 340 * mfn(addr on bus). If it's auto xlated, then gfn is pfn (input to HAP).
 341 */
 342static int mmap_batch_fn(void *data, int nr, void *state)
 343{
 344	xen_pfn_t *gfnp = data;
 345	struct mmap_batch_state *st = state;
 346	struct vm_area_struct *vma = st->vma;
 347	struct page **pages = vma->vm_private_data;
 348	struct page **cur_pages = NULL;
 349	int ret;
 350
 351	if (xen_feature(XENFEAT_auto_translated_physmap))
 352		cur_pages = &pages[st->index];
 353
 354	BUG_ON(nr < 0);
 355	ret = xen_remap_domain_gfn_array(st->vma, st->va & PAGE_MASK, gfnp, nr,
 356					 (int *)gfnp, st->vma->vm_page_prot,
 357					 st->domain, cur_pages);
 358
 359	/* Adjust the global_error? */
 360	if (ret != nr) {
 361		if (ret == -ENOENT)
 362			st->global_error = -ENOENT;
 363		else {
 364			/* Record that at least one error has happened. */
 365			if (st->global_error == 0)
 366				st->global_error = 1;
 367		}
 368	}
 369	st->va += XEN_PAGE_SIZE * nr;
 370	st->index += nr / XEN_PFN_PER_PAGE;
 371
 372	return 0;
 373}
 374
 375static int mmap_return_error(int err, struct mmap_batch_state *st)
 376{
 377	int ret;
 378
 379	if (st->version == 1) {
 380		if (err) {
 381			xen_pfn_t gfn;
 382
 383			ret = get_user(gfn, st->user_gfn);
 384			if (ret < 0)
 385				return ret;
 386			/*
 387			 * V1 encodes the error codes in the 32bit top
 388			 * nibble of the gfn (with its known
 389			 * limitations vis-a-vis 64 bit callers).
 390			 */
 391			gfn |= (err == -ENOENT) ?
 392				PRIVCMD_MMAPBATCH_PAGED_ERROR :
 393				PRIVCMD_MMAPBATCH_MFN_ERROR;
 394			return __put_user(gfn, st->user_gfn++);
 395		} else
 396			st->user_gfn++;
 397	} else { /* st->version == 2 */
 398		if (err)
 399			return __put_user(err, st->user_err++);
 400		else
 401			st->user_err++;
 402	}
 
 403
 404	return 0;
 405}
 406
 407static int mmap_return_errors(void *data, int nr, void *state)
 408{
 
 409	struct mmap_batch_state *st = state;
 410	int *errs = data;
 411	int i;
 412	int ret;
 413
 414	for (i = 0; i < nr; i++) {
 415		ret = mmap_return_error(errs[i], st);
 416		if (ret < 0)
 417			return ret;
 418	}
 419	return 0;
 420}
 421
 422/* Allocate pfns that are then mapped with gfns from foreign domid. Update
 423 * the vma with the page info to use later.
 424 * Returns: 0 if success, otherwise -errno
 425 */
 426static int alloc_empty_pages(struct vm_area_struct *vma, int numpgs)
 427{
 428	int rc;
 429	struct page **pages;
 430
 431	pages = kvcalloc(numpgs, sizeof(pages[0]), GFP_KERNEL);
 432	if (pages == NULL)
 433		return -ENOMEM;
 434
 435	rc = xen_alloc_unpopulated_pages(numpgs, pages);
 436	if (rc != 0) {
 437		pr_warn("%s Could not alloc %d pfns rc:%d\n", __func__,
 438			numpgs, rc);
 439		kvfree(pages);
 440		return -ENOMEM;
 441	}
 442	BUG_ON(vma->vm_private_data != NULL);
 443	vma->vm_private_data = pages;
 444
 445	return 0;
 446}
 447
 448static const struct vm_operations_struct privcmd_vm_ops;
 449
 450static long privcmd_ioctl_mmap_batch(
 451	struct file *file, void __user *udata, int version)
 452{
 453	struct privcmd_data *data = file->private_data;
 454	int ret;
 455	struct privcmd_mmapbatch_v2 m;
 456	struct mm_struct *mm = current->mm;
 457	struct vm_area_struct *vma;
 458	unsigned long nr_pages;
 459	LIST_HEAD(pagelist);
 460	struct mmap_batch_state state;
 461
 462	switch (version) {
 463	case 1:
 464		if (copy_from_user(&m, udata, sizeof(struct privcmd_mmapbatch)))
 465			return -EFAULT;
 466		/* Returns per-frame error in m.arr. */
 467		m.err = NULL;
 468		if (!access_ok(m.arr, m.num * sizeof(*m.arr)))
 469			return -EFAULT;
 470		break;
 471	case 2:
 472		if (copy_from_user(&m, udata, sizeof(struct privcmd_mmapbatch_v2)))
 473			return -EFAULT;
 474		/* Returns per-frame error code in m.err. */
 475		if (!access_ok(m.err, m.num * (sizeof(*m.err))))
 476			return -EFAULT;
 477		break;
 478	default:
 479		return -EINVAL;
 480	}
 481
 482	/* If restriction is in place, check the domid matches */
 483	if (data->domid != DOMID_INVALID && data->domid != m.dom)
 484		return -EPERM;
 485
 486	nr_pages = DIV_ROUND_UP(m.num, XEN_PFN_PER_PAGE);
 
 
 
 487	if ((m.num <= 0) || (nr_pages > (LONG_MAX >> PAGE_SHIFT)))
 488		return -EINVAL;
 489
 490	ret = gather_array(&pagelist, m.num, sizeof(xen_pfn_t), m.arr);
 
 491
 492	if (ret)
 493		goto out;
 494	if (list_empty(&pagelist)) {
 495		ret = -EINVAL;
 496		goto out;
 497	}
 498
 499	if (version == 2) {
 500		/* Zero error array now to only copy back actual errors. */
 501		if (clear_user(m.err, sizeof(int) * m.num)) {
 502			ret = -EFAULT;
 503			goto out;
 504		}
 505	}
 506
 507	mmap_write_lock(mm);
 508
 509	vma = find_vma(mm, m.addr);
 
 510	if (!vma ||
 511	    vma->vm_ops != &privcmd_vm_ops) {
 512		ret = -EINVAL;
 513		goto out_unlock;
 514	}
 515
 516	/*
 517	 * Caller must either:
 518	 *
 519	 * Map the whole VMA range, which will also allocate all the
 520	 * pages required for the auto_translated_physmap case.
 521	 *
 522	 * Or
 523	 *
 524	 * Map unmapped holes left from a previous map attempt (e.g.,
 525	 * because those foreign frames were previously paged out).
 526	 */
 527	if (vma->vm_private_data == NULL) {
 528		if (m.addr != vma->vm_start ||
 529		    m.addr + (nr_pages << PAGE_SHIFT) != vma->vm_end) {
 530			ret = -EINVAL;
 531			goto out_unlock;
 532		}
 533		if (xen_feature(XENFEAT_auto_translated_physmap)) {
 534			ret = alloc_empty_pages(vma, nr_pages);
 535			if (ret < 0)
 536				goto out_unlock;
 537		} else
 538			vma->vm_private_data = PRIV_VMA_LOCKED;
 539	} else {
 540		if (m.addr < vma->vm_start ||
 541		    m.addr + (nr_pages << PAGE_SHIFT) > vma->vm_end) {
 542			ret = -EINVAL;
 543			goto out_unlock;
 544		}
 545		if (privcmd_vma_range_is_mapped(vma, m.addr, nr_pages)) {
 546			ret = -EINVAL;
 547			goto out_unlock;
 548		}
 549	}
 550
 551	state.domain        = m.dom;
 552	state.vma           = vma;
 553	state.va            = m.addr;
 554	state.index         = 0;
 555	state.global_error  = 0;
 556	state.version       = version;
 557
 558	BUILD_BUG_ON(((PAGE_SIZE / sizeof(xen_pfn_t)) % XEN_PFN_PER_PAGE) != 0);
 559	/* mmap_batch_fn guarantees ret == 0 */
 560	BUG_ON(traverse_pages_block(m.num, sizeof(xen_pfn_t),
 561				    &pagelist, mmap_batch_fn, &state));
 562
 563	mmap_write_unlock(mm);
 564
 565	if (state.global_error) {
 566		/* Write back errors in second pass. */
 567		state.user_gfn = (xen_pfn_t *)m.arr;
 568		state.user_err = m.err;
 569		ret = traverse_pages_block(m.num, sizeof(xen_pfn_t),
 570					   &pagelist, mmap_return_errors, &state);
 571	} else
 572		ret = 0;
 573
 574	/* If we have not had any EFAULT-like global errors then set the global
 575	 * error to -ENOENT if necessary. */
 576	if ((ret == 0) && (state.global_error == -ENOENT))
 577		ret = -ENOENT;
 578
 579out:
 580	free_page_list(&pagelist);
 581	return ret;
 582
 583out_unlock:
 584	mmap_write_unlock(mm);
 585	goto out;
 586}
 587
 588static int lock_pages(
 589	struct privcmd_dm_op_buf kbufs[], unsigned int num,
 590	struct page *pages[], unsigned int nr_pages, unsigned int *pinned)
 591{
 592	unsigned int i, off = 0;
 593
 594	for (i = 0; i < num; ) {
 595		unsigned int requested;
 596		int page_count;
 597
 598		requested = DIV_ROUND_UP(
 599			offset_in_page(kbufs[i].uptr) + kbufs[i].size,
 600			PAGE_SIZE) - off;
 601		if (requested > nr_pages)
 602			return -ENOSPC;
 603
 604		page_count = pin_user_pages_fast(
 605			(unsigned long)kbufs[i].uptr + off * PAGE_SIZE,
 606			requested, FOLL_WRITE, pages);
 607		if (page_count <= 0)
 608			return page_count ? : -EFAULT;
 609
 610		*pinned += page_count;
 611		nr_pages -= page_count;
 612		pages += page_count;
 613
 614		off = (requested == page_count) ? 0 : off + page_count;
 615		i += !off;
 616	}
 617
 618	return 0;
 619}
 620
 621static void unlock_pages(struct page *pages[], unsigned int nr_pages)
 622{
 623	unpin_user_pages_dirty_lock(pages, nr_pages, true);
 624}
 625
 626static long privcmd_ioctl_dm_op(struct file *file, void __user *udata)
 627{
 628	struct privcmd_data *data = file->private_data;
 629	struct privcmd_dm_op kdata;
 630	struct privcmd_dm_op_buf *kbufs;
 631	unsigned int nr_pages = 0;
 632	struct page **pages = NULL;
 633	struct xen_dm_op_buf *xbufs = NULL;
 634	unsigned int i;
 635	long rc;
 636	unsigned int pinned = 0;
 637
 638	if (copy_from_user(&kdata, udata, sizeof(kdata)))
 639		return -EFAULT;
 640
 641	/* If restriction is in place, check the domid matches */
 642	if (data->domid != DOMID_INVALID && data->domid != kdata.dom)
 643		return -EPERM;
 644
 645	if (kdata.num == 0)
 646		return 0;
 647
 648	if (kdata.num > privcmd_dm_op_max_num)
 649		return -E2BIG;
 650
 651	kbufs = kcalloc(kdata.num, sizeof(*kbufs), GFP_KERNEL);
 652	if (!kbufs)
 653		return -ENOMEM;
 654
 655	if (copy_from_user(kbufs, kdata.ubufs,
 656			   sizeof(*kbufs) * kdata.num)) {
 657		rc = -EFAULT;
 658		goto out;
 659	}
 660
 661	for (i = 0; i < kdata.num; i++) {
 662		if (kbufs[i].size > privcmd_dm_op_buf_max_size) {
 663			rc = -E2BIG;
 664			goto out;
 665		}
 666
 667		if (!access_ok(kbufs[i].uptr,
 668			       kbufs[i].size)) {
 669			rc = -EFAULT;
 670			goto out;
 671		}
 672
 673		nr_pages += DIV_ROUND_UP(
 674			offset_in_page(kbufs[i].uptr) + kbufs[i].size,
 675			PAGE_SIZE);
 676	}
 677
 678	pages = kcalloc(nr_pages, sizeof(*pages), GFP_KERNEL);
 679	if (!pages) {
 680		rc = -ENOMEM;
 681		goto out;
 682	}
 683
 684	xbufs = kcalloc(kdata.num, sizeof(*xbufs), GFP_KERNEL);
 685	if (!xbufs) {
 686		rc = -ENOMEM;
 687		goto out;
 688	}
 689
 690	rc = lock_pages(kbufs, kdata.num, pages, nr_pages, &pinned);
 691	if (rc < 0)
 692		goto out;
 693
 694	for (i = 0; i < kdata.num; i++) {
 695		set_xen_guest_handle(xbufs[i].h, kbufs[i].uptr);
 696		xbufs[i].size = kbufs[i].size;
 697	}
 698
 699	xen_preemptible_hcall_begin();
 700	rc = HYPERVISOR_dm_op(kdata.dom, kdata.num, xbufs);
 701	xen_preemptible_hcall_end();
 702
 703out:
 704	unlock_pages(pages, pinned);
 705	kfree(xbufs);
 706	kfree(pages);
 707	kfree(kbufs);
 708
 709	return rc;
 710}
 711
 712static long privcmd_ioctl_restrict(struct file *file, void __user *udata)
 713{
 714	struct privcmd_data *data = file->private_data;
 715	domid_t dom;
 716
 717	if (copy_from_user(&dom, udata, sizeof(dom)))
 718		return -EFAULT;
 719
 720	/* Set restriction to the specified domain, or check it matches */
 721	if (data->domid == DOMID_INVALID)
 722		data->domid = dom;
 723	else if (data->domid != dom)
 724		return -EINVAL;
 725
 726	return 0;
 727}
 728
 729static long privcmd_ioctl_mmap_resource(struct file *file,
 730				struct privcmd_mmap_resource __user *udata)
 731{
 732	struct privcmd_data *data = file->private_data;
 733	struct mm_struct *mm = current->mm;
 734	struct vm_area_struct *vma;
 735	struct privcmd_mmap_resource kdata;
 736	xen_pfn_t *pfns = NULL;
 737	struct xen_mem_acquire_resource xdata = { };
 738	int rc;
 739
 740	if (copy_from_user(&kdata, udata, sizeof(kdata)))
 741		return -EFAULT;
 742
 743	/* If restriction is in place, check the domid matches */
 744	if (data->domid != DOMID_INVALID && data->domid != kdata.dom)
 745		return -EPERM;
 746
 747	/* Both fields must be set or unset */
 748	if (!!kdata.addr != !!kdata.num)
 749		return -EINVAL;
 750
 751	xdata.domid = kdata.dom;
 752	xdata.type = kdata.type;
 753	xdata.id = kdata.id;
 754
 755	if (!kdata.addr && !kdata.num) {
 756		/* Query the size of the resource. */
 757		rc = HYPERVISOR_memory_op(XENMEM_acquire_resource, &xdata);
 758		if (rc)
 759			return rc;
 760		return __put_user(xdata.nr_frames, &udata->num);
 761	}
 762
 763	mmap_write_lock(mm);
 764
 765	vma = find_vma(mm, kdata.addr);
 766	if (!vma || vma->vm_ops != &privcmd_vm_ops) {
 767		rc = -EINVAL;
 768		goto out;
 769	}
 770
 771	pfns = kcalloc(kdata.num, sizeof(*pfns), GFP_KERNEL | __GFP_NOWARN);
 772	if (!pfns) {
 773		rc = -ENOMEM;
 774		goto out;
 775	}
 776
 777	if (IS_ENABLED(CONFIG_XEN_AUTO_XLATE) &&
 778	    xen_feature(XENFEAT_auto_translated_physmap)) {
 779		unsigned int nr = DIV_ROUND_UP(kdata.num, XEN_PFN_PER_PAGE);
 780		struct page **pages;
 781		unsigned int i;
 782
 783		rc = alloc_empty_pages(vma, nr);
 784		if (rc < 0)
 785			goto out;
 786
 787		pages = vma->vm_private_data;
 788
 789		for (i = 0; i < kdata.num; i++) {
 790			xen_pfn_t pfn =
 791				page_to_xen_pfn(pages[i / XEN_PFN_PER_PAGE]);
 792
 793			pfns[i] = pfn + (i % XEN_PFN_PER_PAGE);
 794		}
 795	} else
 796		vma->vm_private_data = PRIV_VMA_LOCKED;
 797
 798	xdata.frame = kdata.idx;
 799	xdata.nr_frames = kdata.num;
 800	set_xen_guest_handle(xdata.frame_list, pfns);
 801
 802	xen_preemptible_hcall_begin();
 803	rc = HYPERVISOR_memory_op(XENMEM_acquire_resource, &xdata);
 804	xen_preemptible_hcall_end();
 805
 806	if (rc)
 807		goto out;
 808
 809	if (IS_ENABLED(CONFIG_XEN_AUTO_XLATE) &&
 810	    xen_feature(XENFEAT_auto_translated_physmap)) {
 811		rc = xen_remap_vma_range(vma, kdata.addr, kdata.num << PAGE_SHIFT);
 812	} else {
 813		unsigned int domid =
 814			(xdata.flags & XENMEM_rsrc_acq_caller_owned) ?
 815			DOMID_SELF : kdata.dom;
 816		int num, *errs = (int *)pfns;
 817
 818		BUILD_BUG_ON(sizeof(*errs) > sizeof(*pfns));
 819		num = xen_remap_domain_mfn_array(vma,
 820						 kdata.addr & PAGE_MASK,
 821						 pfns, kdata.num, errs,
 822						 vma->vm_page_prot,
 823						 domid);
 824		if (num < 0)
 825			rc = num;
 826		else if (num != kdata.num) {
 827			unsigned int i;
 828
 829			for (i = 0; i < num; i++) {
 830				rc = errs[i];
 831				if (rc < 0)
 832					break;
 833			}
 834		} else
 835			rc = 0;
 836	}
 837
 838out:
 839	mmap_write_unlock(mm);
 840	kfree(pfns);
 841
 842	return rc;
 843}
 844
 845#ifdef CONFIG_XEN_PRIVCMD_EVENTFD
 846/* Irqfd support */
 847static struct workqueue_struct *irqfd_cleanup_wq;
 848static DEFINE_MUTEX(irqfds_lock);
 849static LIST_HEAD(irqfds_list);
 850
 851struct privcmd_kernel_irqfd {
 852	struct xen_dm_op_buf xbufs;
 853	domid_t dom;
 854	bool error;
 855	struct eventfd_ctx *eventfd;
 856	struct work_struct shutdown;
 857	wait_queue_entry_t wait;
 858	struct list_head list;
 859	poll_table pt;
 860};
 861
 862static void irqfd_deactivate(struct privcmd_kernel_irqfd *kirqfd)
 863{
 864	lockdep_assert_held(&irqfds_lock);
 865
 866	list_del_init(&kirqfd->list);
 867	queue_work(irqfd_cleanup_wq, &kirqfd->shutdown);
 868}
 869
 870static void irqfd_shutdown(struct work_struct *work)
 871{
 872	struct privcmd_kernel_irqfd *kirqfd =
 873		container_of(work, struct privcmd_kernel_irqfd, shutdown);
 874	u64 cnt;
 875
 876	eventfd_ctx_remove_wait_queue(kirqfd->eventfd, &kirqfd->wait, &cnt);
 877	eventfd_ctx_put(kirqfd->eventfd);
 878	kfree(kirqfd);
 879}
 880
 881static void irqfd_inject(struct privcmd_kernel_irqfd *kirqfd)
 882{
 883	u64 cnt;
 884	long rc;
 885
 886	eventfd_ctx_do_read(kirqfd->eventfd, &cnt);
 887
 888	xen_preemptible_hcall_begin();
 889	rc = HYPERVISOR_dm_op(kirqfd->dom, 1, &kirqfd->xbufs);
 890	xen_preemptible_hcall_end();
 891
 892	/* Don't repeat the error message for consecutive failures */
 893	if (rc && !kirqfd->error) {
 894		pr_err("Failed to configure irq for guest domain: %d\n",
 895		       kirqfd->dom);
 896	}
 897
 898	kirqfd->error = rc;
 899}
 900
 901static int
 902irqfd_wakeup(wait_queue_entry_t *wait, unsigned int mode, int sync, void *key)
 903{
 904	struct privcmd_kernel_irqfd *kirqfd =
 905		container_of(wait, struct privcmd_kernel_irqfd, wait);
 906	__poll_t flags = key_to_poll(key);
 907
 908	if (flags & EPOLLIN)
 909		irqfd_inject(kirqfd);
 910
 911	if (flags & EPOLLHUP) {
 912		mutex_lock(&irqfds_lock);
 913		irqfd_deactivate(kirqfd);
 914		mutex_unlock(&irqfds_lock);
 915	}
 916
 917	return 0;
 918}
 919
 920static void
 921irqfd_poll_func(struct file *file, wait_queue_head_t *wqh, poll_table *pt)
 922{
 923	struct privcmd_kernel_irqfd *kirqfd =
 924		container_of(pt, struct privcmd_kernel_irqfd, pt);
 925
 926	add_wait_queue_priority(wqh, &kirqfd->wait);
 927}
 928
 929static int privcmd_irqfd_assign(struct privcmd_irqfd *irqfd)
 930{
 931	struct privcmd_kernel_irqfd *kirqfd, *tmp;
 932	__poll_t events;
 933	struct fd f;
 934	void *dm_op;
 935	int ret;
 936
 937	kirqfd = kzalloc(sizeof(*kirqfd) + irqfd->size, GFP_KERNEL);
 938	if (!kirqfd)
 939		return -ENOMEM;
 940	dm_op = kirqfd + 1;
 941
 942	if (copy_from_user(dm_op, u64_to_user_ptr(irqfd->dm_op), irqfd->size)) {
 943		ret = -EFAULT;
 944		goto error_kfree;
 945	}
 946
 947	kirqfd->xbufs.size = irqfd->size;
 948	set_xen_guest_handle(kirqfd->xbufs.h, dm_op);
 949	kirqfd->dom = irqfd->dom;
 950	INIT_WORK(&kirqfd->shutdown, irqfd_shutdown);
 951
 952	f = fdget(irqfd->fd);
 953	if (!f.file) {
 954		ret = -EBADF;
 955		goto error_kfree;
 956	}
 957
 958	kirqfd->eventfd = eventfd_ctx_fileget(f.file);
 959	if (IS_ERR(kirqfd->eventfd)) {
 960		ret = PTR_ERR(kirqfd->eventfd);
 961		goto error_fd_put;
 962	}
 963
 964	/*
 965	 * Install our own custom wake-up handling so we are notified via a
 966	 * callback whenever someone signals the underlying eventfd.
 967	 */
 968	init_waitqueue_func_entry(&kirqfd->wait, irqfd_wakeup);
 969	init_poll_funcptr(&kirqfd->pt, irqfd_poll_func);
 970
 971	mutex_lock(&irqfds_lock);
 972
 973	list_for_each_entry(tmp, &irqfds_list, list) {
 974		if (kirqfd->eventfd == tmp->eventfd) {
 975			ret = -EBUSY;
 976			mutex_unlock(&irqfds_lock);
 977			goto error_eventfd;
 978		}
 979	}
 980
 981	list_add_tail(&kirqfd->list, &irqfds_list);
 982	mutex_unlock(&irqfds_lock);
 983
 984	/*
 985	 * Check if there was an event already pending on the eventfd before we
 986	 * registered, and trigger it as if we didn't miss it.
 987	 */
 988	events = vfs_poll(f.file, &kirqfd->pt);
 989	if (events & EPOLLIN)
 990		irqfd_inject(kirqfd);
 991
 992	/*
 993	 * Do not drop the file until the kirqfd is fully initialized, otherwise
 994	 * we might race against the EPOLLHUP.
 995	 */
 996	fdput(f);
 997	return 0;
 998
 999error_eventfd:
1000	eventfd_ctx_put(kirqfd->eventfd);
1001
1002error_fd_put:
1003	fdput(f);
1004
1005error_kfree:
1006	kfree(kirqfd);
1007	return ret;
1008}
1009
1010static int privcmd_irqfd_deassign(struct privcmd_irqfd *irqfd)
1011{
1012	struct privcmd_kernel_irqfd *kirqfd;
1013	struct eventfd_ctx *eventfd;
1014
1015	eventfd = eventfd_ctx_fdget(irqfd->fd);
1016	if (IS_ERR(eventfd))
1017		return PTR_ERR(eventfd);
1018
1019	mutex_lock(&irqfds_lock);
1020
1021	list_for_each_entry(kirqfd, &irqfds_list, list) {
1022		if (kirqfd->eventfd == eventfd) {
1023			irqfd_deactivate(kirqfd);
1024			break;
1025		}
1026	}
1027
1028	mutex_unlock(&irqfds_lock);
1029
1030	eventfd_ctx_put(eventfd);
1031
1032	/*
1033	 * Block until we know all outstanding shutdown jobs have completed so
1034	 * that we guarantee there will not be any more interrupts once this
1035	 * deassign function returns.
1036	 */
1037	flush_workqueue(irqfd_cleanup_wq);
1038
1039	return 0;
1040}
1041
1042static long privcmd_ioctl_irqfd(struct file *file, void __user *udata)
1043{
1044	struct privcmd_data *data = file->private_data;
1045	struct privcmd_irqfd irqfd;
1046
1047	if (copy_from_user(&irqfd, udata, sizeof(irqfd)))
1048		return -EFAULT;
1049
1050	/* No other flags should be set */
1051	if (irqfd.flags & ~PRIVCMD_IRQFD_FLAG_DEASSIGN)
1052		return -EINVAL;
1053
1054	/* If restriction is in place, check the domid matches */
1055	if (data->domid != DOMID_INVALID && data->domid != irqfd.dom)
1056		return -EPERM;
1057
1058	if (irqfd.flags & PRIVCMD_IRQFD_FLAG_DEASSIGN)
1059		return privcmd_irqfd_deassign(&irqfd);
1060
1061	return privcmd_irqfd_assign(&irqfd);
1062}
1063
1064static int privcmd_irqfd_init(void)
1065{
1066	irqfd_cleanup_wq = alloc_workqueue("privcmd-irqfd-cleanup", 0, 0);
1067	if (!irqfd_cleanup_wq)
1068		return -ENOMEM;
1069
1070	return 0;
1071}
1072
1073static void privcmd_irqfd_exit(void)
1074{
1075	struct privcmd_kernel_irqfd *kirqfd, *tmp;
1076
1077	mutex_lock(&irqfds_lock);
1078
1079	list_for_each_entry_safe(kirqfd, tmp, &irqfds_list, list)
1080		irqfd_deactivate(kirqfd);
1081
1082	mutex_unlock(&irqfds_lock);
1083
1084	destroy_workqueue(irqfd_cleanup_wq);
1085}
1086
1087/* Ioeventfd Support */
1088#define QUEUE_NOTIFY_VQ_MASK 0xFFFF
1089
1090static DEFINE_MUTEX(ioreq_lock);
1091static LIST_HEAD(ioreq_list);
1092
1093/* per-eventfd structure */
1094struct privcmd_kernel_ioeventfd {
1095	struct eventfd_ctx *eventfd;
1096	struct list_head list;
1097	u64 addr;
1098	unsigned int addr_len;
1099	unsigned int vq;
1100};
1101
1102/* per-guest CPU / port structure */
1103struct ioreq_port {
1104	int vcpu;
1105	unsigned int port;
1106	struct privcmd_kernel_ioreq *kioreq;
1107};
1108
1109/* per-guest structure */
1110struct privcmd_kernel_ioreq {
1111	domid_t dom;
1112	unsigned int vcpus;
1113	u64 uioreq;
1114	struct ioreq *ioreq;
1115	spinlock_t lock; /* Protects ioeventfds list */
1116	struct list_head ioeventfds;
1117	struct list_head list;
1118	struct ioreq_port ports[] __counted_by(vcpus);
1119};
1120
1121static irqreturn_t ioeventfd_interrupt(int irq, void *dev_id)
1122{
1123	struct ioreq_port *port = dev_id;
1124	struct privcmd_kernel_ioreq *kioreq = port->kioreq;
1125	struct ioreq *ioreq = &kioreq->ioreq[port->vcpu];
1126	struct privcmd_kernel_ioeventfd *kioeventfd;
1127	unsigned int state = STATE_IOREQ_READY;
1128
1129	if (ioreq->state != STATE_IOREQ_READY ||
1130	    ioreq->type != IOREQ_TYPE_COPY || ioreq->dir != IOREQ_WRITE)
1131		return IRQ_NONE;
1132
1133	/*
1134	 * We need a barrier, smp_mb(), here to ensure reads are finished before
1135	 * `state` is updated. Since the lock implementation ensures that
1136	 * appropriate barrier will be added anyway, we can avoid adding
1137	 * explicit barrier here.
1138	 *
1139	 * Ideally we don't need to update `state` within the locks, but we do
1140	 * that here to avoid adding explicit barrier.
1141	 */
1142
1143	spin_lock(&kioreq->lock);
1144	ioreq->state = STATE_IOREQ_INPROCESS;
1145
1146	list_for_each_entry(kioeventfd, &kioreq->ioeventfds, list) {
1147		if (ioreq->addr == kioeventfd->addr + VIRTIO_MMIO_QUEUE_NOTIFY &&
1148		    ioreq->size == kioeventfd->addr_len &&
1149		    (ioreq->data & QUEUE_NOTIFY_VQ_MASK) == kioeventfd->vq) {
1150			eventfd_signal(kioeventfd->eventfd);
1151			state = STATE_IORESP_READY;
1152			break;
1153		}
1154	}
1155	spin_unlock(&kioreq->lock);
1156
1157	/*
1158	 * We need a barrier, smp_mb(), here to ensure writes are finished
1159	 * before `state` is updated. Since the lock implementation ensures that
1160	 * appropriate barrier will be added anyway, we can avoid adding
1161	 * explicit barrier here.
1162	 */
1163
1164	ioreq->state = state;
1165
1166	if (state == STATE_IORESP_READY) {
1167		notify_remote_via_evtchn(port->port);
1168		return IRQ_HANDLED;
1169	}
1170
1171	return IRQ_NONE;
1172}
1173
1174static void ioreq_free(struct privcmd_kernel_ioreq *kioreq)
1175{
1176	struct ioreq_port *ports = kioreq->ports;
1177	int i;
1178
1179	lockdep_assert_held(&ioreq_lock);
1180
1181	list_del(&kioreq->list);
1182
1183	for (i = kioreq->vcpus - 1; i >= 0; i--)
1184		unbind_from_irqhandler(irq_from_evtchn(ports[i].port), &ports[i]);
1185
1186	kfree(kioreq);
1187}
1188
1189static
1190struct privcmd_kernel_ioreq *alloc_ioreq(struct privcmd_ioeventfd *ioeventfd)
1191{
1192	struct privcmd_kernel_ioreq *kioreq;
1193	struct mm_struct *mm = current->mm;
1194	struct vm_area_struct *vma;
1195	struct page **pages;
1196	unsigned int *ports;
1197	int ret, size, i;
1198
1199	lockdep_assert_held(&ioreq_lock);
1200
1201	size = struct_size(kioreq, ports, ioeventfd->vcpus);
1202	kioreq = kzalloc(size, GFP_KERNEL);
1203	if (!kioreq)
1204		return ERR_PTR(-ENOMEM);
1205
1206	kioreq->dom = ioeventfd->dom;
1207	kioreq->vcpus = ioeventfd->vcpus;
1208	kioreq->uioreq = ioeventfd->ioreq;
1209	spin_lock_init(&kioreq->lock);
1210	INIT_LIST_HEAD(&kioreq->ioeventfds);
1211
1212	/* The memory for ioreq server must have been mapped earlier */
1213	mmap_write_lock(mm);
1214	vma = find_vma(mm, (unsigned long)ioeventfd->ioreq);
1215	if (!vma) {
1216		pr_err("Failed to find vma for ioreq page!\n");
1217		mmap_write_unlock(mm);
1218		ret = -EFAULT;
1219		goto error_kfree;
1220	}
1221
1222	pages = vma->vm_private_data;
1223	kioreq->ioreq = (struct ioreq *)(page_to_virt(pages[0]));
1224	mmap_write_unlock(mm);
1225
1226	ports = memdup_array_user(u64_to_user_ptr(ioeventfd->ports),
1227				  kioreq->vcpus, sizeof(*ports));
1228	if (IS_ERR(ports)) {
1229		ret = PTR_ERR(ports);
1230		goto error_kfree;
1231	}
1232
1233	for (i = 0; i < kioreq->vcpus; i++) {
1234		kioreq->ports[i].vcpu = i;
1235		kioreq->ports[i].port = ports[i];
1236		kioreq->ports[i].kioreq = kioreq;
1237
1238		ret = bind_evtchn_to_irqhandler_lateeoi(ports[i],
1239				ioeventfd_interrupt, IRQF_SHARED, "ioeventfd",
1240				&kioreq->ports[i]);
1241		if (ret < 0)
1242			goto error_unbind;
1243	}
1244
1245	kfree(ports);
1246
1247	list_add_tail(&kioreq->list, &ioreq_list);
1248
1249	return kioreq;
1250
1251error_unbind:
1252	while (--i >= 0)
1253		unbind_from_irqhandler(irq_from_evtchn(ports[i]), &kioreq->ports[i]);
1254
1255	kfree(ports);
1256error_kfree:
1257	kfree(kioreq);
1258	return ERR_PTR(ret);
1259}
1260
1261static struct privcmd_kernel_ioreq *
1262get_ioreq(struct privcmd_ioeventfd *ioeventfd, struct eventfd_ctx *eventfd)
1263{
1264	struct privcmd_kernel_ioreq *kioreq;
1265	unsigned long flags;
1266
1267	list_for_each_entry(kioreq, &ioreq_list, list) {
1268		struct privcmd_kernel_ioeventfd *kioeventfd;
1269
1270		/*
1271		 * kioreq fields can be accessed here without a lock as they are
1272		 * never updated after being added to the ioreq_list.
1273		 */
1274		if (kioreq->uioreq != ioeventfd->ioreq) {
1275			continue;
1276		} else if (kioreq->dom != ioeventfd->dom ||
1277			   kioreq->vcpus != ioeventfd->vcpus) {
1278			pr_err("Invalid ioeventfd configuration mismatch, dom (%u vs %u), vcpus (%u vs %u)\n",
1279			       kioreq->dom, ioeventfd->dom, kioreq->vcpus,
1280			       ioeventfd->vcpus);
1281			return ERR_PTR(-EINVAL);
1282		}
1283
1284		/* Look for a duplicate eventfd for the same guest */
1285		spin_lock_irqsave(&kioreq->lock, flags);
1286		list_for_each_entry(kioeventfd, &kioreq->ioeventfds, list) {
1287			if (eventfd == kioeventfd->eventfd) {
1288				spin_unlock_irqrestore(&kioreq->lock, flags);
1289				return ERR_PTR(-EBUSY);
1290			}
1291		}
1292		spin_unlock_irqrestore(&kioreq->lock, flags);
1293
1294		return kioreq;
1295	}
1296
1297	/* Matching kioreq isn't found, allocate a new one */
1298	return alloc_ioreq(ioeventfd);
1299}
1300
1301static void ioeventfd_free(struct privcmd_kernel_ioeventfd *kioeventfd)
1302{
1303	list_del(&kioeventfd->list);
1304	eventfd_ctx_put(kioeventfd->eventfd);
1305	kfree(kioeventfd);
1306}
1307
1308static int privcmd_ioeventfd_assign(struct privcmd_ioeventfd *ioeventfd)
1309{
1310	struct privcmd_kernel_ioeventfd *kioeventfd;
1311	struct privcmd_kernel_ioreq *kioreq;
1312	unsigned long flags;
1313	struct fd f;
1314	int ret;
1315
1316	/* Check for range overflow */
1317	if (ioeventfd->addr + ioeventfd->addr_len < ioeventfd->addr)
1318		return -EINVAL;
1319
1320	/* Vhost requires us to support length 1, 2, 4, and 8 */
1321	if (!(ioeventfd->addr_len == 1 || ioeventfd->addr_len == 2 ||
1322	      ioeventfd->addr_len == 4 || ioeventfd->addr_len == 8))
1323		return -EINVAL;
1324
1325	/* 4096 vcpus limit enough ? */
1326	if (!ioeventfd->vcpus || ioeventfd->vcpus > 4096)
1327		return -EINVAL;
1328
1329	kioeventfd = kzalloc(sizeof(*kioeventfd), GFP_KERNEL);
1330	if (!kioeventfd)
1331		return -ENOMEM;
1332
1333	f = fdget(ioeventfd->event_fd);
1334	if (!f.file) {
1335		ret = -EBADF;
1336		goto error_kfree;
1337	}
1338
1339	kioeventfd->eventfd = eventfd_ctx_fileget(f.file);
1340	fdput(f);
1341
1342	if (IS_ERR(kioeventfd->eventfd)) {
1343		ret = PTR_ERR(kioeventfd->eventfd);
1344		goto error_kfree;
1345	}
1346
1347	kioeventfd->addr = ioeventfd->addr;
1348	kioeventfd->addr_len = ioeventfd->addr_len;
1349	kioeventfd->vq = ioeventfd->vq;
1350
1351	mutex_lock(&ioreq_lock);
1352	kioreq = get_ioreq(ioeventfd, kioeventfd->eventfd);
1353	if (IS_ERR(kioreq)) {
1354		mutex_unlock(&ioreq_lock);
1355		ret = PTR_ERR(kioreq);
1356		goto error_eventfd;
1357	}
1358
1359	spin_lock_irqsave(&kioreq->lock, flags);
1360	list_add_tail(&kioeventfd->list, &kioreq->ioeventfds);
1361	spin_unlock_irqrestore(&kioreq->lock, flags);
1362
1363	mutex_unlock(&ioreq_lock);
1364
1365	return 0;
1366
1367error_eventfd:
1368	eventfd_ctx_put(kioeventfd->eventfd);
1369
1370error_kfree:
1371	kfree(kioeventfd);
1372	return ret;
1373}
1374
1375static int privcmd_ioeventfd_deassign(struct privcmd_ioeventfd *ioeventfd)
1376{
1377	struct privcmd_kernel_ioreq *kioreq, *tkioreq;
1378	struct eventfd_ctx *eventfd;
1379	unsigned long flags;
1380	int ret = 0;
1381
1382	eventfd = eventfd_ctx_fdget(ioeventfd->event_fd);
1383	if (IS_ERR(eventfd))
1384		return PTR_ERR(eventfd);
1385
1386	mutex_lock(&ioreq_lock);
1387	list_for_each_entry_safe(kioreq, tkioreq, &ioreq_list, list) {
1388		struct privcmd_kernel_ioeventfd *kioeventfd, *tmp;
1389		/*
1390		 * kioreq fields can be accessed here without a lock as they are
1391		 * never updated after being added to the ioreq_list.
1392		 */
1393		if (kioreq->dom != ioeventfd->dom ||
1394		    kioreq->uioreq != ioeventfd->ioreq ||
1395		    kioreq->vcpus != ioeventfd->vcpus)
1396			continue;
1397
1398		spin_lock_irqsave(&kioreq->lock, flags);
1399		list_for_each_entry_safe(kioeventfd, tmp, &kioreq->ioeventfds, list) {
1400			if (eventfd == kioeventfd->eventfd) {
1401				ioeventfd_free(kioeventfd);
1402				spin_unlock_irqrestore(&kioreq->lock, flags);
1403
1404				if (list_empty(&kioreq->ioeventfds))
1405					ioreq_free(kioreq);
1406				goto unlock;
1407			}
1408		}
1409		spin_unlock_irqrestore(&kioreq->lock, flags);
1410		break;
1411	}
1412
1413	pr_err("Ioeventfd isn't already assigned, dom: %u, addr: %llu\n",
1414	       ioeventfd->dom, ioeventfd->addr);
1415	ret = -ENODEV;
1416
1417unlock:
1418	mutex_unlock(&ioreq_lock);
1419	eventfd_ctx_put(eventfd);
1420
1421	return ret;
1422}
1423
1424static long privcmd_ioctl_ioeventfd(struct file *file, void __user *udata)
1425{
1426	struct privcmd_data *data = file->private_data;
1427	struct privcmd_ioeventfd ioeventfd;
1428
1429	if (copy_from_user(&ioeventfd, udata, sizeof(ioeventfd)))
1430		return -EFAULT;
1431
1432	/* No other flags should be set */
1433	if (ioeventfd.flags & ~PRIVCMD_IOEVENTFD_FLAG_DEASSIGN)
1434		return -EINVAL;
1435
1436	/* If restriction is in place, check the domid matches */
1437	if (data->domid != DOMID_INVALID && data->domid != ioeventfd.dom)
1438		return -EPERM;
1439
1440	if (ioeventfd.flags & PRIVCMD_IOEVENTFD_FLAG_DEASSIGN)
1441		return privcmd_ioeventfd_deassign(&ioeventfd);
1442
1443	return privcmd_ioeventfd_assign(&ioeventfd);
1444}
1445
1446static void privcmd_ioeventfd_exit(void)
1447{
1448	struct privcmd_kernel_ioreq *kioreq, *tmp;
1449	unsigned long flags;
1450
1451	mutex_lock(&ioreq_lock);
1452	list_for_each_entry_safe(kioreq, tmp, &ioreq_list, list) {
1453		struct privcmd_kernel_ioeventfd *kioeventfd, *tmp;
1454
1455		spin_lock_irqsave(&kioreq->lock, flags);
1456		list_for_each_entry_safe(kioeventfd, tmp, &kioreq->ioeventfds, list)
1457			ioeventfd_free(kioeventfd);
1458		spin_unlock_irqrestore(&kioreq->lock, flags);
1459
1460		ioreq_free(kioreq);
1461	}
1462	mutex_unlock(&ioreq_lock);
1463}
1464#else
1465static inline long privcmd_ioctl_irqfd(struct file *file, void __user *udata)
1466{
1467	return -EOPNOTSUPP;
1468}
1469
1470static inline int privcmd_irqfd_init(void)
1471{
1472	return 0;
1473}
1474
1475static inline void privcmd_irqfd_exit(void)
1476{
1477}
1478
1479static inline long privcmd_ioctl_ioeventfd(struct file *file, void __user *udata)
1480{
1481	return -EOPNOTSUPP;
1482}
1483
1484static inline void privcmd_ioeventfd_exit(void)
1485{
1486}
1487#endif /* CONFIG_XEN_PRIVCMD_EVENTFD */
1488
1489static long privcmd_ioctl(struct file *file,
1490			  unsigned int cmd, unsigned long data)
1491{
1492	int ret = -ENOTTY;
1493	void __user *udata = (void __user *) data;
1494
1495	switch (cmd) {
1496	case IOCTL_PRIVCMD_HYPERCALL:
1497		ret = privcmd_ioctl_hypercall(file, udata);
1498		break;
1499
1500	case IOCTL_PRIVCMD_MMAP:
1501		ret = privcmd_ioctl_mmap(file, udata);
1502		break;
1503
1504	case IOCTL_PRIVCMD_MMAPBATCH:
1505		ret = privcmd_ioctl_mmap_batch(file, udata, 1);
1506		break;
1507
1508	case IOCTL_PRIVCMD_MMAPBATCH_V2:
1509		ret = privcmd_ioctl_mmap_batch(file, udata, 2);
1510		break;
1511
1512	case IOCTL_PRIVCMD_DM_OP:
1513		ret = privcmd_ioctl_dm_op(file, udata);
1514		break;
1515
1516	case IOCTL_PRIVCMD_RESTRICT:
1517		ret = privcmd_ioctl_restrict(file, udata);
1518		break;
1519
1520	case IOCTL_PRIVCMD_MMAP_RESOURCE:
1521		ret = privcmd_ioctl_mmap_resource(file, udata);
1522		break;
1523
1524	case IOCTL_PRIVCMD_IRQFD:
1525		ret = privcmd_ioctl_irqfd(file, udata);
1526		break;
1527
1528	case IOCTL_PRIVCMD_IOEVENTFD:
1529		ret = privcmd_ioctl_ioeventfd(file, udata);
1530		break;
1531
1532	default:
 
1533		break;
1534	}
1535
1536	return ret;
1537}
1538
1539static int privcmd_open(struct inode *ino, struct file *file)
1540{
1541	struct privcmd_data *data = kzalloc(sizeof(*data), GFP_KERNEL);
1542
1543	if (!data)
1544		return -ENOMEM;
1545
1546	/* DOMID_INVALID implies no restriction */
1547	data->domid = DOMID_INVALID;
1548
1549	file->private_data = data;
1550	return 0;
1551}
1552
1553static int privcmd_release(struct inode *ino, struct file *file)
1554{
1555	struct privcmd_data *data = file->private_data;
1556
1557	kfree(data);
1558	return 0;
1559}
1560
1561static void privcmd_close(struct vm_area_struct *vma)
1562{
1563	struct page **pages = vma->vm_private_data;
1564	int numpgs = vma_pages(vma);
1565	int numgfns = (vma->vm_end - vma->vm_start) >> XEN_PAGE_SHIFT;
1566	int rc;
1567
1568	if (!xen_feature(XENFEAT_auto_translated_physmap) || !numpgs || !pages)
1569		return;
1570
1571	rc = xen_unmap_domain_gfn_range(vma, numgfns, pages);
1572	if (rc == 0)
1573		xen_free_unpopulated_pages(numpgs, pages);
1574	else
1575		pr_crit("unable to unmap MFN range: leaking %d pages. rc=%d\n",
1576			numpgs, rc);
1577	kvfree(pages);
1578}
1579
1580static vm_fault_t privcmd_fault(struct vm_fault *vmf)
1581{
1582	printk(KERN_DEBUG "privcmd_fault: vma=%p %lx-%lx, pgoff=%lx, uv=%p\n",
1583	       vmf->vma, vmf->vma->vm_start, vmf->vma->vm_end,
1584	       vmf->pgoff, (void *)vmf->address);
1585
1586	return VM_FAULT_SIGBUS;
1587}
1588
1589static const struct vm_operations_struct privcmd_vm_ops = {
1590	.close = privcmd_close,
1591	.fault = privcmd_fault
1592};
1593
1594static int privcmd_mmap(struct file *file, struct vm_area_struct *vma)
1595{
 
 
 
 
1596	/* DONTCOPY is essential for Xen because copy_page_range doesn't know
1597	 * how to recreate these mappings */
1598	vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTCOPY |
1599			 VM_DONTEXPAND | VM_DONTDUMP);
1600	vma->vm_ops = &privcmd_vm_ops;
1601	vma->vm_private_data = NULL;
1602
1603	return 0;
1604}
1605
1606/*
1607 * For MMAPBATCH*. This allows asserting the singleshot mapping
1608 * on a per pfn/pte basis. Mapping calls that fail with ENOENT
1609 * can be then retried until success.
1610 */
1611static int is_mapped_fn(pte_t *pte, unsigned long addr, void *data)
1612{
1613	return pte_none(ptep_get(pte)) ? 0 : -EBUSY;
1614}
1615
1616static int privcmd_vma_range_is_mapped(
1617	           struct vm_area_struct *vma,
1618	           unsigned long addr,
1619	           unsigned long nr_pages)
1620{
1621	return apply_to_page_range(vma->vm_mm, addr, nr_pages << PAGE_SHIFT,
1622				   is_mapped_fn, NULL) != 0;
1623}
1624
1625const struct file_operations xen_privcmd_fops = {
1626	.owner = THIS_MODULE,
1627	.unlocked_ioctl = privcmd_ioctl,
1628	.open = privcmd_open,
1629	.release = privcmd_release,
1630	.mmap = privcmd_mmap,
1631};
1632EXPORT_SYMBOL_GPL(xen_privcmd_fops);
1633
1634static struct miscdevice privcmd_dev = {
1635	.minor = MISC_DYNAMIC_MINOR,
1636	.name = "xen/privcmd",
1637	.fops = &xen_privcmd_fops,
1638};
1639
1640static int __init privcmd_init(void)
1641{
1642	int err;
1643
1644	if (!xen_domain())
1645		return -ENODEV;
1646
1647	err = misc_register(&privcmd_dev);
1648	if (err != 0) {
1649		pr_err("Could not register Xen privcmd device\n");
1650		return err;
1651	}
1652
1653	err = misc_register(&xen_privcmdbuf_dev);
1654	if (err != 0) {
1655		pr_err("Could not register Xen hypercall-buf device\n");
1656		goto err_privcmdbuf;
1657	}
1658
1659	err = privcmd_irqfd_init();
1660	if (err != 0) {
1661		pr_err("irqfd init failed\n");
1662		goto err_irqfd;
1663	}
1664
1665	return 0;
1666
1667err_irqfd:
1668	misc_deregister(&xen_privcmdbuf_dev);
1669err_privcmdbuf:
1670	misc_deregister(&privcmd_dev);
1671	return err;
1672}
1673
1674static void __exit privcmd_exit(void)
1675{
1676	privcmd_ioeventfd_exit();
1677	privcmd_irqfd_exit();
1678	misc_deregister(&privcmd_dev);
1679	misc_deregister(&xen_privcmdbuf_dev);
1680}
1681
1682module_init(privcmd_init);
1683module_exit(privcmd_exit);
v3.5.6
 
  1/******************************************************************************
  2 * privcmd.c
  3 *
  4 * Interface to privileged domain-0 commands.
  5 *
  6 * Copyright (c) 2002-2004, K A Fraser, B Dragovic
  7 */
  8
 
 
 
 
  9#include <linux/kernel.h>
 10#include <linux/module.h>
 
 
 11#include <linux/sched.h>
 12#include <linux/slab.h>
 13#include <linux/string.h>
 
 14#include <linux/errno.h>
 15#include <linux/mm.h>
 16#include <linux/mman.h>
 17#include <linux/uaccess.h>
 18#include <linux/swap.h>
 19#include <linux/highmem.h>
 20#include <linux/pagemap.h>
 21#include <linux/seq_file.h>
 22#include <linux/miscdevice.h>
 
 
 23
 24#include <asm/pgalloc.h>
 25#include <asm/pgtable.h>
 26#include <asm/tlb.h>
 27#include <asm/xen/hypervisor.h>
 28#include <asm/xen/hypercall.h>
 29
 30#include <xen/xen.h>
 
 31#include <xen/privcmd.h>
 32#include <xen/interface/xen.h>
 
 
 
 33#include <xen/features.h>
 34#include <xen/page.h>
 35#include <xen/xen-ops.h>
 
 36
 37#include "privcmd.h"
 38
 39MODULE_LICENSE("GPL");
 40
 41#ifndef HAVE_ARCH_PRIVCMD_MMAP
 42static int privcmd_enforce_singleshot_mapping(struct vm_area_struct *vma);
 43#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 44
 45static long privcmd_ioctl_hypercall(void __user *udata)
 
 
 
 
 
 46{
 
 47	struct privcmd_hypercall hypercall;
 48	long ret;
 49
 
 
 
 
 50	if (copy_from_user(&hypercall, udata, sizeof(hypercall)))
 51		return -EFAULT;
 52
 
 53	ret = privcmd_call(hypercall.op,
 54			   hypercall.arg[0], hypercall.arg[1],
 55			   hypercall.arg[2], hypercall.arg[3],
 56			   hypercall.arg[4]);
 
 57
 58	return ret;
 59}
 60
 61static void free_page_list(struct list_head *pages)
 62{
 63	struct page *p, *n;
 64
 65	list_for_each_entry_safe(p, n, pages, lru)
 66		__free_page(p);
 67
 68	INIT_LIST_HEAD(pages);
 69}
 70
 71/*
 72 * Given an array of items in userspace, return a list of pages
 73 * containing the data.  If copying fails, either because of memory
 74 * allocation failure or a problem reading user memory, return an
 75 * error code; its up to the caller to dispose of any partial list.
 76 */
 77static int gather_array(struct list_head *pagelist,
 78			unsigned nelem, size_t size,
 79			void __user *data)
 80{
 81	unsigned pageidx;
 82	void *pagedata;
 83	int ret;
 84
 85	if (size > PAGE_SIZE)
 86		return 0;
 87
 88	pageidx = PAGE_SIZE;
 89	pagedata = NULL;	/* quiet, gcc */
 90	while (nelem--) {
 91		if (pageidx > PAGE_SIZE-size) {
 92			struct page *page = alloc_page(GFP_KERNEL);
 93
 94			ret = -ENOMEM;
 95			if (page == NULL)
 96				goto fail;
 97
 98			pagedata = page_address(page);
 99
100			list_add_tail(&page->lru, pagelist);
101			pageidx = 0;
102		}
103
104		ret = -EFAULT;
105		if (copy_from_user(pagedata + pageidx, data, size))
106			goto fail;
107
108		data += size;
109		pageidx += size;
110	}
111
112	ret = 0;
113
114fail:
115	return ret;
116}
117
118/*
119 * Call function "fn" on each element of the array fragmented
120 * over a list of pages.
121 */
122static int traverse_pages(unsigned nelem, size_t size,
123			  struct list_head *pos,
124			  int (*fn)(void *data, void *state),
125			  void *state)
126{
127	void *pagedata;
128	unsigned pageidx;
129	int ret = 0;
130
131	BUG_ON(size > PAGE_SIZE);
132
133	pageidx = PAGE_SIZE;
134	pagedata = NULL;	/* hush, gcc */
135
136	while (nelem--) {
137		if (pageidx > PAGE_SIZE-size) {
138			struct page *page;
139			pos = pos->next;
140			page = list_entry(pos, struct page, lru);
141			pagedata = page_address(page);
142			pageidx = 0;
143		}
144
145		ret = (*fn)(pagedata + pageidx, state);
146		if (ret)
147			break;
148		pageidx += size;
149	}
150
151	return ret;
152}
153
154struct mmap_mfn_state {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
155	unsigned long va;
156	struct vm_area_struct *vma;
157	domid_t domain;
158};
159
160static int mmap_mfn_range(void *data, void *state)
161{
162	struct privcmd_mmap_entry *msg = data;
163	struct mmap_mfn_state *st = state;
164	struct vm_area_struct *vma = st->vma;
165	int rc;
166
167	/* Do not allow range to wrap the address space. */
168	if ((msg->npages > (LONG_MAX >> PAGE_SHIFT)) ||
169	    ((unsigned long)(msg->npages << PAGE_SHIFT) >= -st->va))
170		return -EINVAL;
171
172	/* Range chunks must be contiguous in va space. */
173	if ((msg->va != st->va) ||
174	    ((msg->va+(msg->npages<<PAGE_SHIFT)) > vma->vm_end))
175		return -EINVAL;
176
177	rc = xen_remap_domain_mfn_range(vma,
178					msg->va & PAGE_MASK,
179					msg->mfn, msg->npages,
180					vma->vm_page_prot,
181					st->domain);
182	if (rc < 0)
183		return rc;
184
185	st->va += msg->npages << PAGE_SHIFT;
186
187	return 0;
188}
189
190static long privcmd_ioctl_mmap(void __user *udata)
191{
 
192	struct privcmd_mmap mmapcmd;
193	struct mm_struct *mm = current->mm;
194	struct vm_area_struct *vma;
195	int rc;
196	LIST_HEAD(pagelist);
197	struct mmap_mfn_state state;
198
199	if (!xen_initial_domain())
200		return -EPERM;
 
201
202	if (copy_from_user(&mmapcmd, udata, sizeof(mmapcmd)))
203		return -EFAULT;
204
 
 
 
 
205	rc = gather_array(&pagelist,
206			  mmapcmd.num, sizeof(struct privcmd_mmap_entry),
207			  mmapcmd.entry);
208
209	if (rc || list_empty(&pagelist))
210		goto out;
211
212	down_write(&mm->mmap_sem);
213
214	{
215		struct page *page = list_first_entry(&pagelist,
216						     struct page, lru);
217		struct privcmd_mmap_entry *msg = page_address(page);
218
219		vma = find_vma(mm, msg->va);
220		rc = -EINVAL;
221
222		if (!vma || (msg->va != vma->vm_start) ||
223		    !privcmd_enforce_singleshot_mapping(vma))
224			goto out_up;
 
225	}
226
227	state.va = vma->vm_start;
228	state.vma = vma;
229	state.domain = mmapcmd.dom;
230
231	rc = traverse_pages(mmapcmd.num, sizeof(struct privcmd_mmap_entry),
232			    &pagelist,
233			    mmap_mfn_range, &state);
234
235
236out_up:
237	up_write(&mm->mmap_sem);
238
239out:
240	free_page_list(&pagelist);
241
242	return rc;
243}
244
245struct mmap_batch_state {
246	domid_t domain;
247	unsigned long va;
248	struct vm_area_struct *vma;
249	int err;
250
251	xen_pfn_t __user *user;
 
 
 
 
 
 
 
 
 
 
 
252};
253
254static int mmap_batch_fn(void *data, void *state)
 
 
 
255{
256	xen_pfn_t *mfnp = data;
257	struct mmap_batch_state *st = state;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
258
259	if (xen_remap_domain_mfn_range(st->vma, st->va & PAGE_MASK, *mfnp, 1,
260				       st->vma->vm_page_prot, st->domain) < 0) {
261		*mfnp |= 0xf0000000U;
262		st->err++;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
263	}
264	st->va += PAGE_SIZE;
265
266	return 0;
267}
268
269static int mmap_return_errors(void *data, void *state)
270{
271	xen_pfn_t *mfnp = data;
272	struct mmap_batch_state *st = state;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
273
274	return put_user(*mfnp, st->user++);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
275}
276
277static struct vm_operations_struct privcmd_vm_ops;
278
279static long privcmd_ioctl_mmap_batch(void __user *udata)
 
280{
 
281	int ret;
282	struct privcmd_mmapbatch m;
283	struct mm_struct *mm = current->mm;
284	struct vm_area_struct *vma;
285	unsigned long nr_pages;
286	LIST_HEAD(pagelist);
287	struct mmap_batch_state state;
288
289	if (!xen_initial_domain())
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
290		return -EPERM;
291
292	if (copy_from_user(&m, udata, sizeof(m)))
293		return -EFAULT;
294
295	nr_pages = m.num;
296	if ((m.num <= 0) || (nr_pages > (LONG_MAX >> PAGE_SHIFT)))
297		return -EINVAL;
298
299	ret = gather_array(&pagelist, m.num, sizeof(xen_pfn_t),
300			   m.arr);
301
302	if (ret || list_empty(&pagelist))
 
 
 
303		goto out;
 
 
 
 
 
 
 
 
 
304
305	down_write(&mm->mmap_sem);
306
307	vma = find_vma(mm, m.addr);
308	ret = -EINVAL;
309	if (!vma ||
310	    vma->vm_ops != &privcmd_vm_ops ||
311	    (m.addr != vma->vm_start) ||
312	    ((m.addr + (nr_pages << PAGE_SHIFT)) != vma->vm_end) ||
313	    !privcmd_enforce_singleshot_mapping(vma)) {
314		up_write(&mm->mmap_sem);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
315		goto out;
316	}
317
318	state.domain = m.dom;
319	state.vma = vma;
320	state.va = m.addr;
321	state.err = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
322
323	ret = traverse_pages(m.num, sizeof(xen_pfn_t),
324			     &pagelist, mmap_batch_fn, &state);
 
 
325
326	up_write(&mm->mmap_sem);
 
 
 
 
 
 
327
328	if (state.err > 0) {
329		state.user = m.arr;
330		ret = traverse_pages(m.num, sizeof(xen_pfn_t),
331			       &pagelist,
332			       mmap_return_errors, &state);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
333	}
334
335out:
336	free_page_list(&pagelist);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
337
338	return ret;
339}
340
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
341static long privcmd_ioctl(struct file *file,
342			  unsigned int cmd, unsigned long data)
343{
344	int ret = -ENOSYS;
345	void __user *udata = (void __user *) data;
346
347	switch (cmd) {
348	case IOCTL_PRIVCMD_HYPERCALL:
349		ret = privcmd_ioctl_hypercall(udata);
350		break;
351
352	case IOCTL_PRIVCMD_MMAP:
353		ret = privcmd_ioctl_mmap(udata);
354		break;
355
356	case IOCTL_PRIVCMD_MMAPBATCH:
357		ret = privcmd_ioctl_mmap_batch(udata);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
358		break;
359
360	default:
361		ret = -EINVAL;
362		break;
363	}
364
365	return ret;
366}
367
368static int privcmd_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
369{
370	printk(KERN_DEBUG "privcmd_fault: vma=%p %lx-%lx, pgoff=%lx, uv=%p\n",
371	       vma, vma->vm_start, vma->vm_end,
372	       vmf->pgoff, vmf->virtual_address);
373
374	return VM_FAULT_SIGBUS;
375}
376
377static struct vm_operations_struct privcmd_vm_ops = {
 
378	.fault = privcmd_fault
379};
380
381static int privcmd_mmap(struct file *file, struct vm_area_struct *vma)
382{
383	/* Unsupported for auto-translate guests. */
384	if (xen_feature(XENFEAT_auto_translated_physmap))
385		return -ENOSYS;
386
387	/* DONTCOPY is essential for Xen because copy_page_range doesn't know
388	 * how to recreate these mappings */
389	vma->vm_flags |= VM_RESERVED | VM_IO | VM_DONTCOPY | VM_PFNMAP;
 
390	vma->vm_ops = &privcmd_vm_ops;
391	vma->vm_private_data = NULL;
392
393	return 0;
394}
395
396static int privcmd_enforce_singleshot_mapping(struct vm_area_struct *vma)
 
 
 
 
 
397{
398	return (xchg(&vma->vm_private_data, (void *)1) == NULL);
 
 
 
 
 
 
 
 
 
399}
400
401const struct file_operations xen_privcmd_fops = {
402	.owner = THIS_MODULE,
403	.unlocked_ioctl = privcmd_ioctl,
 
 
404	.mmap = privcmd_mmap,
405};
406EXPORT_SYMBOL_GPL(xen_privcmd_fops);
407
408static struct miscdevice privcmd_dev = {
409	.minor = MISC_DYNAMIC_MINOR,
410	.name = "xen/privcmd",
411	.fops = &xen_privcmd_fops,
412};
413
414static int __init privcmd_init(void)
415{
416	int err;
417
418	if (!xen_domain())
419		return -ENODEV;
420
421	err = misc_register(&privcmd_dev);
422	if (err != 0) {
423		printk(KERN_ERR "Could not register Xen privcmd device\n");
424		return err;
425	}
 
 
 
 
 
 
 
 
 
 
 
 
 
426	return 0;
 
 
 
 
 
 
427}
428
429static void __exit privcmd_exit(void)
430{
 
 
431	misc_deregister(&privcmd_dev);
 
432}
433
434module_init(privcmd_init);
435module_exit(privcmd_exit);