Linux Audio

Check our new training course

Loading...
v5.9
  1// SPDX-License-Identifier: GPL-2.0-only
  2/******************************************************************************
  3 * privcmd.c
  4 *
  5 * Interface to privileged domain-0 commands.
  6 *
  7 * Copyright (c) 2002-2004, K A Fraser, B Dragovic
  8 */
  9
 10#define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
 11
 12#include <linux/kernel.h>
 13#include <linux/module.h>
 14#include <linux/sched.h>
 15#include <linux/slab.h>
 16#include <linux/string.h>
 17#include <linux/errno.h>
 18#include <linux/mm.h>
 19#include <linux/mman.h>
 20#include <linux/uaccess.h>
 21#include <linux/swap.h>
 22#include <linux/highmem.h>
 23#include <linux/pagemap.h>
 24#include <linux/seq_file.h>
 25#include <linux/miscdevice.h>
 26#include <linux/moduleparam.h>
 27
 
 
 
 28#include <asm/xen/hypervisor.h>
 29#include <asm/xen/hypercall.h>
 30
 31#include <xen/xen.h>
 32#include <xen/privcmd.h>
 33#include <xen/interface/xen.h>
 34#include <xen/interface/memory.h>
 35#include <xen/interface/hvm/dm_op.h>
 36#include <xen/features.h>
 37#include <xen/page.h>
 38#include <xen/xen-ops.h>
 39#include <xen/balloon.h>
 40
 41#include "privcmd.h"
 42
 43MODULE_LICENSE("GPL");
 44
 45#define PRIV_VMA_LOCKED ((void *)1)
 46
 47static unsigned int privcmd_dm_op_max_num = 16;
 48module_param_named(dm_op_max_nr_bufs, privcmd_dm_op_max_num, uint, 0644);
 49MODULE_PARM_DESC(dm_op_max_nr_bufs,
 50		 "Maximum number of buffers per dm_op hypercall");
 51
 52static unsigned int privcmd_dm_op_buf_max_size = 4096;
 53module_param_named(dm_op_buf_max_size, privcmd_dm_op_buf_max_size, uint,
 54		   0644);
 55MODULE_PARM_DESC(dm_op_buf_max_size,
 56		 "Maximum size of a dm_op hypercall buffer");
 57
 58struct privcmd_data {
 59	domid_t domid;
 60};
 61
 62static int privcmd_vma_range_is_mapped(
 63               struct vm_area_struct *vma,
 64               unsigned long addr,
 65               unsigned long nr_pages);
 66
 67static long privcmd_ioctl_hypercall(struct file *file, void __user *udata)
 68{
 69	struct privcmd_data *data = file->private_data;
 70	struct privcmd_hypercall hypercall;
 71	long ret;
 72
 73	/* Disallow arbitrary hypercalls if restricted */
 74	if (data->domid != DOMID_INVALID)
 75		return -EPERM;
 76
 77	if (copy_from_user(&hypercall, udata, sizeof(hypercall)))
 78		return -EFAULT;
 79
 80	xen_preemptible_hcall_begin();
 81	ret = privcmd_call(hypercall.op,
 82			   hypercall.arg[0], hypercall.arg[1],
 83			   hypercall.arg[2], hypercall.arg[3],
 84			   hypercall.arg[4]);
 85	xen_preemptible_hcall_end();
 86
 87	return ret;
 88}
 89
 90static void free_page_list(struct list_head *pages)
 91{
 92	struct page *p, *n;
 93
 94	list_for_each_entry_safe(p, n, pages, lru)
 95		__free_page(p);
 96
 97	INIT_LIST_HEAD(pages);
 98}
 99
100/*
101 * Given an array of items in userspace, return a list of pages
102 * containing the data.  If copying fails, either because of memory
103 * allocation failure or a problem reading user memory, return an
104 * error code; its up to the caller to dispose of any partial list.
105 */
106static int gather_array(struct list_head *pagelist,
107			unsigned nelem, size_t size,
108			const void __user *data)
109{
110	unsigned pageidx;
111	void *pagedata;
112	int ret;
113
114	if (size > PAGE_SIZE)
115		return 0;
116
117	pageidx = PAGE_SIZE;
118	pagedata = NULL;	/* quiet, gcc */
119	while (nelem--) {
120		if (pageidx > PAGE_SIZE-size) {
121			struct page *page = alloc_page(GFP_KERNEL);
122
123			ret = -ENOMEM;
124			if (page == NULL)
125				goto fail;
126
127			pagedata = page_address(page);
128
129			list_add_tail(&page->lru, pagelist);
130			pageidx = 0;
131		}
132
133		ret = -EFAULT;
134		if (copy_from_user(pagedata + pageidx, data, size))
135			goto fail;
136
137		data += size;
138		pageidx += size;
139	}
140
141	ret = 0;
142
143fail:
144	return ret;
145}
146
147/*
148 * Call function "fn" on each element of the array fragmented
149 * over a list of pages.
150 */
151static int traverse_pages(unsigned nelem, size_t size,
152			  struct list_head *pos,
153			  int (*fn)(void *data, void *state),
154			  void *state)
155{
156	void *pagedata;
157	unsigned pageidx;
158	int ret = 0;
159
160	BUG_ON(size > PAGE_SIZE);
161
162	pageidx = PAGE_SIZE;
163	pagedata = NULL;	/* hush, gcc */
164
165	while (nelem--) {
166		if (pageidx > PAGE_SIZE-size) {
167			struct page *page;
168			pos = pos->next;
169			page = list_entry(pos, struct page, lru);
170			pagedata = page_address(page);
171			pageidx = 0;
172		}
173
174		ret = (*fn)(pagedata + pageidx, state);
175		if (ret)
176			break;
177		pageidx += size;
178	}
179
180	return ret;
181}
182
183/*
184 * Similar to traverse_pages, but use each page as a "block" of
185 * data to be processed as one unit.
186 */
187static int traverse_pages_block(unsigned nelem, size_t size,
188				struct list_head *pos,
189				int (*fn)(void *data, int nr, void *state),
190				void *state)
191{
192	void *pagedata;
 
193	int ret = 0;
194
195	BUG_ON(size > PAGE_SIZE);
196
 
 
197	while (nelem) {
198		int nr = (PAGE_SIZE/size);
199		struct page *page;
200		if (nr > nelem)
201			nr = nelem;
202		pos = pos->next;
203		page = list_entry(pos, struct page, lru);
204		pagedata = page_address(page);
205		ret = (*fn)(pagedata, nr, state);
206		if (ret)
207			break;
208		nelem -= nr;
209	}
210
211	return ret;
212}
213
214struct mmap_gfn_state {
215	unsigned long va;
216	struct vm_area_struct *vma;
217	domid_t domain;
218};
219
220static int mmap_gfn_range(void *data, void *state)
221{
222	struct privcmd_mmap_entry *msg = data;
223	struct mmap_gfn_state *st = state;
224	struct vm_area_struct *vma = st->vma;
225	int rc;
226
227	/* Do not allow range to wrap the address space. */
228	if ((msg->npages > (LONG_MAX >> PAGE_SHIFT)) ||
229	    ((unsigned long)(msg->npages << PAGE_SHIFT) >= -st->va))
230		return -EINVAL;
231
232	/* Range chunks must be contiguous in va space. */
233	if ((msg->va != st->va) ||
234	    ((msg->va+(msg->npages<<PAGE_SHIFT)) > vma->vm_end))
235		return -EINVAL;
236
237	rc = xen_remap_domain_gfn_range(vma,
238					msg->va & PAGE_MASK,
239					msg->mfn, msg->npages,
240					vma->vm_page_prot,
241					st->domain, NULL);
242	if (rc < 0)
243		return rc;
244
245	st->va += msg->npages << PAGE_SHIFT;
246
247	return 0;
248}
249
250static long privcmd_ioctl_mmap(struct file *file, void __user *udata)
251{
252	struct privcmd_data *data = file->private_data;
253	struct privcmd_mmap mmapcmd;
254	struct mm_struct *mm = current->mm;
255	struct vm_area_struct *vma;
256	int rc;
257	LIST_HEAD(pagelist);
258	struct mmap_gfn_state state;
259
260	/* We only support privcmd_ioctl_mmap_batch for auto translated. */
261	if (xen_feature(XENFEAT_auto_translated_physmap))
262		return -ENOSYS;
263
264	if (copy_from_user(&mmapcmd, udata, sizeof(mmapcmd)))
265		return -EFAULT;
266
267	/* If restriction is in place, check the domid matches */
268	if (data->domid != DOMID_INVALID && data->domid != mmapcmd.dom)
269		return -EPERM;
270
271	rc = gather_array(&pagelist,
272			  mmapcmd.num, sizeof(struct privcmd_mmap_entry),
273			  mmapcmd.entry);
274
275	if (rc || list_empty(&pagelist))
276		goto out;
277
278	mmap_write_lock(mm);
279
280	{
281		struct page *page = list_first_entry(&pagelist,
282						     struct page, lru);
283		struct privcmd_mmap_entry *msg = page_address(page);
284
285		vma = find_vma(mm, msg->va);
286		rc = -EINVAL;
287
288		if (!vma || (msg->va != vma->vm_start) || vma->vm_private_data)
289			goto out_up;
290		vma->vm_private_data = PRIV_VMA_LOCKED;
291	}
292
293	state.va = vma->vm_start;
294	state.vma = vma;
295	state.domain = mmapcmd.dom;
296
297	rc = traverse_pages(mmapcmd.num, sizeof(struct privcmd_mmap_entry),
298			    &pagelist,
299			    mmap_gfn_range, &state);
300
301
302out_up:
303	mmap_write_unlock(mm);
304
305out:
306	free_page_list(&pagelist);
307
308	return rc;
309}
310
311struct mmap_batch_state {
312	domid_t domain;
313	unsigned long va;
314	struct vm_area_struct *vma;
315	int index;
316	/* A tristate:
317	 *      0 for no errors
318	 *      1 if at least one error has happened (and no
319	 *          -ENOENT errors have happened)
320	 *      -ENOENT if at least 1 -ENOENT has happened.
321	 */
322	int global_error;
323	int version;
324
325	/* User-space gfn array to store errors in the second pass for V1. */
326	xen_pfn_t __user *user_gfn;
327	/* User-space int array to store errors in the second pass for V2. */
328	int __user *user_err;
329};
330
331/* auto translated dom0 note: if domU being created is PV, then gfn is
332 * mfn(addr on bus). If it's auto xlated, then gfn is pfn (input to HAP).
333 */
334static int mmap_batch_fn(void *data, int nr, void *state)
335{
336	xen_pfn_t *gfnp = data;
337	struct mmap_batch_state *st = state;
338	struct vm_area_struct *vma = st->vma;
339	struct page **pages = vma->vm_private_data;
340	struct page **cur_pages = NULL;
341	int ret;
342
343	if (xen_feature(XENFEAT_auto_translated_physmap))
344		cur_pages = &pages[st->index];
345
346	BUG_ON(nr < 0);
347	ret = xen_remap_domain_gfn_array(st->vma, st->va & PAGE_MASK, gfnp, nr,
348					 (int *)gfnp, st->vma->vm_page_prot,
349					 st->domain, cur_pages);
350
351	/* Adjust the global_error? */
352	if (ret != nr) {
353		if (ret == -ENOENT)
354			st->global_error = -ENOENT;
355		else {
356			/* Record that at least one error has happened. */
357			if (st->global_error == 0)
358				st->global_error = 1;
359		}
360	}
361	st->va += XEN_PAGE_SIZE * nr;
362	st->index += nr / XEN_PFN_PER_PAGE;
363
364	return 0;
365}
366
367static int mmap_return_error(int err, struct mmap_batch_state *st)
368{
369	int ret;
370
371	if (st->version == 1) {
372		if (err) {
373			xen_pfn_t gfn;
374
375			ret = get_user(gfn, st->user_gfn);
376			if (ret < 0)
377				return ret;
378			/*
379			 * V1 encodes the error codes in the 32bit top
380			 * nibble of the gfn (with its known
381			 * limitations vis-a-vis 64 bit callers).
382			 */
383			gfn |= (err == -ENOENT) ?
384				PRIVCMD_MMAPBATCH_PAGED_ERROR :
385				PRIVCMD_MMAPBATCH_MFN_ERROR;
386			return __put_user(gfn, st->user_gfn++);
387		} else
388			st->user_gfn++;
389	} else { /* st->version == 2 */
390		if (err)
391			return __put_user(err, st->user_err++);
392		else
393			st->user_err++;
394	}
395
396	return 0;
397}
398
399static int mmap_return_errors(void *data, int nr, void *state)
400{
401	struct mmap_batch_state *st = state;
402	int *errs = data;
403	int i;
404	int ret;
405
406	for (i = 0; i < nr; i++) {
407		ret = mmap_return_error(errs[i], st);
408		if (ret < 0)
409			return ret;
410	}
411	return 0;
412}
413
414/* Allocate pfns that are then mapped with gfns from foreign domid. Update
415 * the vma with the page info to use later.
416 * Returns: 0 if success, otherwise -errno
417 */
418static int alloc_empty_pages(struct vm_area_struct *vma, int numpgs)
419{
420	int rc;
421	struct page **pages;
422
423	pages = kcalloc(numpgs, sizeof(pages[0]), GFP_KERNEL);
424	if (pages == NULL)
425		return -ENOMEM;
426
427	rc = xen_alloc_unpopulated_pages(numpgs, pages);
428	if (rc != 0) {
429		pr_warn("%s Could not alloc %d pfns rc:%d\n", __func__,
430			numpgs, rc);
431		kfree(pages);
432		return -ENOMEM;
433	}
434	BUG_ON(vma->vm_private_data != NULL);
435	vma->vm_private_data = pages;
436
437	return 0;
438}
439
440static const struct vm_operations_struct privcmd_vm_ops;
441
442static long privcmd_ioctl_mmap_batch(
443	struct file *file, void __user *udata, int version)
444{
445	struct privcmd_data *data = file->private_data;
446	int ret;
447	struct privcmd_mmapbatch_v2 m;
448	struct mm_struct *mm = current->mm;
449	struct vm_area_struct *vma;
450	unsigned long nr_pages;
451	LIST_HEAD(pagelist);
452	struct mmap_batch_state state;
453
454	switch (version) {
455	case 1:
456		if (copy_from_user(&m, udata, sizeof(struct privcmd_mmapbatch)))
457			return -EFAULT;
458		/* Returns per-frame error in m.arr. */
459		m.err = NULL;
460		if (!access_ok(m.arr, m.num * sizeof(*m.arr)))
461			return -EFAULT;
462		break;
463	case 2:
464		if (copy_from_user(&m, udata, sizeof(struct privcmd_mmapbatch_v2)))
465			return -EFAULT;
466		/* Returns per-frame error code in m.err. */
467		if (!access_ok(m.err, m.num * (sizeof(*m.err))))
468			return -EFAULT;
469		break;
470	default:
471		return -EINVAL;
472	}
473
474	/* If restriction is in place, check the domid matches */
475	if (data->domid != DOMID_INVALID && data->domid != m.dom)
476		return -EPERM;
477
478	nr_pages = DIV_ROUND_UP(m.num, XEN_PFN_PER_PAGE);
479	if ((m.num <= 0) || (nr_pages > (LONG_MAX >> PAGE_SHIFT)))
480		return -EINVAL;
481
482	ret = gather_array(&pagelist, m.num, sizeof(xen_pfn_t), m.arr);
483
484	if (ret)
485		goto out;
486	if (list_empty(&pagelist)) {
487		ret = -EINVAL;
488		goto out;
489	}
490
491	if (version == 2) {
492		/* Zero error array now to only copy back actual errors. */
493		if (clear_user(m.err, sizeof(int) * m.num)) {
494			ret = -EFAULT;
495			goto out;
496		}
497	}
498
499	mmap_write_lock(mm);
500
501	vma = find_vma(mm, m.addr);
502	if (!vma ||
503	    vma->vm_ops != &privcmd_vm_ops) {
504		ret = -EINVAL;
505		goto out_unlock;
506	}
507
508	/*
509	 * Caller must either:
510	 *
511	 * Map the whole VMA range, which will also allocate all the
512	 * pages required for the auto_translated_physmap case.
513	 *
514	 * Or
515	 *
516	 * Map unmapped holes left from a previous map attempt (e.g.,
517	 * because those foreign frames were previously paged out).
518	 */
519	if (vma->vm_private_data == NULL) {
520		if (m.addr != vma->vm_start ||
521		    m.addr + (nr_pages << PAGE_SHIFT) != vma->vm_end) {
522			ret = -EINVAL;
523			goto out_unlock;
524		}
525		if (xen_feature(XENFEAT_auto_translated_physmap)) {
526			ret = alloc_empty_pages(vma, nr_pages);
527			if (ret < 0)
528				goto out_unlock;
529		} else
530			vma->vm_private_data = PRIV_VMA_LOCKED;
531	} else {
532		if (m.addr < vma->vm_start ||
533		    m.addr + (nr_pages << PAGE_SHIFT) > vma->vm_end) {
534			ret = -EINVAL;
535			goto out_unlock;
536		}
537		if (privcmd_vma_range_is_mapped(vma, m.addr, nr_pages)) {
538			ret = -EINVAL;
539			goto out_unlock;
540		}
541	}
542
543	state.domain        = m.dom;
544	state.vma           = vma;
545	state.va            = m.addr;
546	state.index         = 0;
547	state.global_error  = 0;
548	state.version       = version;
549
550	BUILD_BUG_ON(((PAGE_SIZE / sizeof(xen_pfn_t)) % XEN_PFN_PER_PAGE) != 0);
551	/* mmap_batch_fn guarantees ret == 0 */
552	BUG_ON(traverse_pages_block(m.num, sizeof(xen_pfn_t),
553				    &pagelist, mmap_batch_fn, &state));
554
555	mmap_write_unlock(mm);
556
557	if (state.global_error) {
558		/* Write back errors in second pass. */
559		state.user_gfn = (xen_pfn_t *)m.arr;
560		state.user_err = m.err;
561		ret = traverse_pages_block(m.num, sizeof(xen_pfn_t),
562					   &pagelist, mmap_return_errors, &state);
563	} else
564		ret = 0;
565
566	/* If we have not had any EFAULT-like global errors then set the global
567	 * error to -ENOENT if necessary. */
568	if ((ret == 0) && (state.global_error == -ENOENT))
569		ret = -ENOENT;
570
571out:
572	free_page_list(&pagelist);
573	return ret;
574
575out_unlock:
576	mmap_write_unlock(mm);
577	goto out;
578}
579
580static int lock_pages(
581	struct privcmd_dm_op_buf kbufs[], unsigned int num,
582	struct page *pages[], unsigned int nr_pages, unsigned int *pinned)
583{
584	unsigned int i;
585
586	for (i = 0; i < num; i++) {
587		unsigned int requested;
588		int page_count;
589
590		requested = DIV_ROUND_UP(
591			offset_in_page(kbufs[i].uptr) + kbufs[i].size,
592			PAGE_SIZE);
593		if (requested > nr_pages)
594			return -ENOSPC;
595
596		page_count = pin_user_pages_fast(
597			(unsigned long) kbufs[i].uptr,
598			requested, FOLL_WRITE, pages);
599		if (page_count < 0)
600			return page_count;
601
602		*pinned += page_count;
603		nr_pages -= page_count;
604		pages += page_count;
605	}
606
607	return 0;
608}
609
610static void unlock_pages(struct page *pages[], unsigned int nr_pages)
611{
612	unpin_user_pages_dirty_lock(pages, nr_pages, true);
613}
614
615static long privcmd_ioctl_dm_op(struct file *file, void __user *udata)
616{
617	struct privcmd_data *data = file->private_data;
618	struct privcmd_dm_op kdata;
619	struct privcmd_dm_op_buf *kbufs;
620	unsigned int nr_pages = 0;
621	struct page **pages = NULL;
622	struct xen_dm_op_buf *xbufs = NULL;
623	unsigned int i;
624	long rc;
625	unsigned int pinned = 0;
626
627	if (copy_from_user(&kdata, udata, sizeof(kdata)))
628		return -EFAULT;
629
630	/* If restriction is in place, check the domid matches */
631	if (data->domid != DOMID_INVALID && data->domid != kdata.dom)
632		return -EPERM;
633
634	if (kdata.num == 0)
635		return 0;
636
637	if (kdata.num > privcmd_dm_op_max_num)
638		return -E2BIG;
639
640	kbufs = kcalloc(kdata.num, sizeof(*kbufs), GFP_KERNEL);
641	if (!kbufs)
642		return -ENOMEM;
643
644	if (copy_from_user(kbufs, kdata.ubufs,
645			   sizeof(*kbufs) * kdata.num)) {
646		rc = -EFAULT;
647		goto out;
648	}
649
650	for (i = 0; i < kdata.num; i++) {
651		if (kbufs[i].size > privcmd_dm_op_buf_max_size) {
652			rc = -E2BIG;
653			goto out;
654		}
655
656		if (!access_ok(kbufs[i].uptr,
657			       kbufs[i].size)) {
658			rc = -EFAULT;
659			goto out;
660		}
661
662		nr_pages += DIV_ROUND_UP(
663			offset_in_page(kbufs[i].uptr) + kbufs[i].size,
664			PAGE_SIZE);
665	}
666
667	pages = kcalloc(nr_pages, sizeof(*pages), GFP_KERNEL);
668	if (!pages) {
669		rc = -ENOMEM;
670		goto out;
671	}
672
673	xbufs = kcalloc(kdata.num, sizeof(*xbufs), GFP_KERNEL);
674	if (!xbufs) {
675		rc = -ENOMEM;
676		goto out;
677	}
678
679	rc = lock_pages(kbufs, kdata.num, pages, nr_pages, &pinned);
680	if (rc < 0) {
681		nr_pages = pinned;
682		goto out;
683	}
684
685	for (i = 0; i < kdata.num; i++) {
686		set_xen_guest_handle(xbufs[i].h, kbufs[i].uptr);
687		xbufs[i].size = kbufs[i].size;
688	}
689
690	xen_preemptible_hcall_begin();
691	rc = HYPERVISOR_dm_op(kdata.dom, kdata.num, xbufs);
692	xen_preemptible_hcall_end();
693
694out:
695	unlock_pages(pages, nr_pages);
696	kfree(xbufs);
697	kfree(pages);
698	kfree(kbufs);
699
700	return rc;
701}
702
703static long privcmd_ioctl_restrict(struct file *file, void __user *udata)
704{
705	struct privcmd_data *data = file->private_data;
706	domid_t dom;
707
708	if (copy_from_user(&dom, udata, sizeof(dom)))
709		return -EFAULT;
710
711	/* Set restriction to the specified domain, or check it matches */
712	if (data->domid == DOMID_INVALID)
713		data->domid = dom;
714	else if (data->domid != dom)
715		return -EINVAL;
716
717	return 0;
718}
719
720static long privcmd_ioctl_mmap_resource(struct file *file, void __user *udata)
721{
722	struct privcmd_data *data = file->private_data;
723	struct mm_struct *mm = current->mm;
724	struct vm_area_struct *vma;
725	struct privcmd_mmap_resource kdata;
726	xen_pfn_t *pfns = NULL;
727	struct xen_mem_acquire_resource xdata;
728	int rc;
729
730	if (copy_from_user(&kdata, udata, sizeof(kdata)))
731		return -EFAULT;
732
733	/* If restriction is in place, check the domid matches */
734	if (data->domid != DOMID_INVALID && data->domid != kdata.dom)
735		return -EPERM;
736
737	mmap_write_lock(mm);
738
739	vma = find_vma(mm, kdata.addr);
740	if (!vma || vma->vm_ops != &privcmd_vm_ops) {
741		rc = -EINVAL;
742		goto out;
743	}
744
745	pfns = kcalloc(kdata.num, sizeof(*pfns), GFP_KERNEL);
746	if (!pfns) {
747		rc = -ENOMEM;
748		goto out;
749	}
750
751	if (IS_ENABLED(CONFIG_XEN_AUTO_XLATE) &&
752	    xen_feature(XENFEAT_auto_translated_physmap)) {
753		unsigned int nr = DIV_ROUND_UP(kdata.num, XEN_PFN_PER_PAGE);
754		struct page **pages;
755		unsigned int i;
756
757		rc = alloc_empty_pages(vma, nr);
758		if (rc < 0)
759			goto out;
760
761		pages = vma->vm_private_data;
762		for (i = 0; i < kdata.num; i++) {
763			xen_pfn_t pfn =
764				page_to_xen_pfn(pages[i / XEN_PFN_PER_PAGE]);
765
766			pfns[i] = pfn + (i % XEN_PFN_PER_PAGE);
767		}
768	} else
769		vma->vm_private_data = PRIV_VMA_LOCKED;
770
771	memset(&xdata, 0, sizeof(xdata));
772	xdata.domid = kdata.dom;
773	xdata.type = kdata.type;
774	xdata.id = kdata.id;
775	xdata.frame = kdata.idx;
776	xdata.nr_frames = kdata.num;
777	set_xen_guest_handle(xdata.frame_list, pfns);
778
779	xen_preemptible_hcall_begin();
780	rc = HYPERVISOR_memory_op(XENMEM_acquire_resource, &xdata);
781	xen_preemptible_hcall_end();
782
783	if (rc)
784		goto out;
785
786	if (IS_ENABLED(CONFIG_XEN_AUTO_XLATE) &&
787	    xen_feature(XENFEAT_auto_translated_physmap)) {
788		rc = xen_remap_vma_range(vma, kdata.addr, kdata.num << PAGE_SHIFT);
789	} else {
790		unsigned int domid =
791			(xdata.flags & XENMEM_rsrc_acq_caller_owned) ?
792			DOMID_SELF : kdata.dom;
793		int num;
794
795		num = xen_remap_domain_mfn_array(vma,
796						 kdata.addr & PAGE_MASK,
797						 pfns, kdata.num, (int *)pfns,
798						 vma->vm_page_prot,
799						 domid,
800						 vma->vm_private_data);
801		if (num < 0)
802			rc = num;
803		else if (num != kdata.num) {
804			unsigned int i;
805
806			for (i = 0; i < num; i++) {
807				rc = pfns[i];
808				if (rc < 0)
809					break;
810			}
811		} else
812			rc = 0;
813	}
814
815out:
816	mmap_write_unlock(mm);
817	kfree(pfns);
818
819	return rc;
820}
821
822static long privcmd_ioctl(struct file *file,
823			  unsigned int cmd, unsigned long data)
824{
825	int ret = -ENOTTY;
826	void __user *udata = (void __user *) data;
827
828	switch (cmd) {
829	case IOCTL_PRIVCMD_HYPERCALL:
830		ret = privcmd_ioctl_hypercall(file, udata);
831		break;
832
833	case IOCTL_PRIVCMD_MMAP:
834		ret = privcmd_ioctl_mmap(file, udata);
835		break;
836
837	case IOCTL_PRIVCMD_MMAPBATCH:
838		ret = privcmd_ioctl_mmap_batch(file, udata, 1);
839		break;
840
841	case IOCTL_PRIVCMD_MMAPBATCH_V2:
842		ret = privcmd_ioctl_mmap_batch(file, udata, 2);
843		break;
844
845	case IOCTL_PRIVCMD_DM_OP:
846		ret = privcmd_ioctl_dm_op(file, udata);
847		break;
848
849	case IOCTL_PRIVCMD_RESTRICT:
850		ret = privcmd_ioctl_restrict(file, udata);
851		break;
852
853	case IOCTL_PRIVCMD_MMAP_RESOURCE:
854		ret = privcmd_ioctl_mmap_resource(file, udata);
855		break;
856
857	default:
 
858		break;
859	}
860
861	return ret;
862}
863
864static int privcmd_open(struct inode *ino, struct file *file)
865{
866	struct privcmd_data *data = kzalloc(sizeof(*data), GFP_KERNEL);
867
868	if (!data)
869		return -ENOMEM;
870
871	/* DOMID_INVALID implies no restriction */
872	data->domid = DOMID_INVALID;
873
874	file->private_data = data;
875	return 0;
876}
877
878static int privcmd_release(struct inode *ino, struct file *file)
879{
880	struct privcmd_data *data = file->private_data;
881
882	kfree(data);
883	return 0;
884}
885
886static void privcmd_close(struct vm_area_struct *vma)
887{
888	struct page **pages = vma->vm_private_data;
889	int numpgs = vma_pages(vma);
890	int numgfns = (vma->vm_end - vma->vm_start) >> XEN_PAGE_SHIFT;
891	int rc;
892
893	if (!xen_feature(XENFEAT_auto_translated_physmap) || !numpgs || !pages)
894		return;
895
896	rc = xen_unmap_domain_gfn_range(vma, numgfns, pages);
897	if (rc == 0)
898		xen_free_unpopulated_pages(numpgs, pages);
899	else
900		pr_crit("unable to unmap MFN range: leaking %d pages. rc=%d\n",
901			numpgs, rc);
902	kfree(pages);
903}
904
905static vm_fault_t privcmd_fault(struct vm_fault *vmf)
906{
907	printk(KERN_DEBUG "privcmd_fault: vma=%p %lx-%lx, pgoff=%lx, uv=%p\n",
908	       vmf->vma, vmf->vma->vm_start, vmf->vma->vm_end,
909	       vmf->pgoff, (void *)vmf->address);
910
911	return VM_FAULT_SIGBUS;
912}
913
914static const struct vm_operations_struct privcmd_vm_ops = {
915	.close = privcmd_close,
916	.fault = privcmd_fault
917};
918
919static int privcmd_mmap(struct file *file, struct vm_area_struct *vma)
920{
921	/* DONTCOPY is essential for Xen because copy_page_range doesn't know
922	 * how to recreate these mappings */
923	vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTCOPY |
924			 VM_DONTEXPAND | VM_DONTDUMP;
925	vma->vm_ops = &privcmd_vm_ops;
926	vma->vm_private_data = NULL;
927
928	return 0;
929}
930
931/*
932 * For MMAPBATCH*. This allows asserting the singleshot mapping
933 * on a per pfn/pte basis. Mapping calls that fail with ENOENT
934 * can be then retried until success.
935 */
936static int is_mapped_fn(pte_t *pte, unsigned long addr, void *data)
 
937{
938	return pte_none(*pte) ? 0 : -EBUSY;
939}
940
941static int privcmd_vma_range_is_mapped(
942	           struct vm_area_struct *vma,
943	           unsigned long addr,
944	           unsigned long nr_pages)
945{
946	return apply_to_page_range(vma->vm_mm, addr, nr_pages << PAGE_SHIFT,
947				   is_mapped_fn, NULL) != 0;
948}
949
950const struct file_operations xen_privcmd_fops = {
951	.owner = THIS_MODULE,
952	.unlocked_ioctl = privcmd_ioctl,
953	.open = privcmd_open,
954	.release = privcmd_release,
955	.mmap = privcmd_mmap,
956};
957EXPORT_SYMBOL_GPL(xen_privcmd_fops);
958
959static struct miscdevice privcmd_dev = {
960	.minor = MISC_DYNAMIC_MINOR,
961	.name = "xen/privcmd",
962	.fops = &xen_privcmd_fops,
963};
964
965static int __init privcmd_init(void)
966{
967	int err;
968
969	if (!xen_domain())
970		return -ENODEV;
971
972	err = misc_register(&privcmd_dev);
973	if (err != 0) {
974		pr_err("Could not register Xen privcmd device\n");
975		return err;
976	}
977
978	err = misc_register(&xen_privcmdbuf_dev);
979	if (err != 0) {
980		pr_err("Could not register Xen hypercall-buf device\n");
981		misc_deregister(&privcmd_dev);
982		return err;
983	}
984
985	return 0;
986}
987
988static void __exit privcmd_exit(void)
989{
990	misc_deregister(&privcmd_dev);
991	misc_deregister(&xen_privcmdbuf_dev);
992}
993
994module_init(privcmd_init);
995module_exit(privcmd_exit);
v4.6
 
  1/******************************************************************************
  2 * privcmd.c
  3 *
  4 * Interface to privileged domain-0 commands.
  5 *
  6 * Copyright (c) 2002-2004, K A Fraser, B Dragovic
  7 */
  8
  9#define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
 10
 11#include <linux/kernel.h>
 12#include <linux/module.h>
 13#include <linux/sched.h>
 14#include <linux/slab.h>
 15#include <linux/string.h>
 16#include <linux/errno.h>
 17#include <linux/mm.h>
 18#include <linux/mman.h>
 19#include <linux/uaccess.h>
 20#include <linux/swap.h>
 21#include <linux/highmem.h>
 22#include <linux/pagemap.h>
 23#include <linux/seq_file.h>
 24#include <linux/miscdevice.h>
 
 25
 26#include <asm/pgalloc.h>
 27#include <asm/pgtable.h>
 28#include <asm/tlb.h>
 29#include <asm/xen/hypervisor.h>
 30#include <asm/xen/hypercall.h>
 31
 32#include <xen/xen.h>
 33#include <xen/privcmd.h>
 34#include <xen/interface/xen.h>
 
 
 35#include <xen/features.h>
 36#include <xen/page.h>
 37#include <xen/xen-ops.h>
 38#include <xen/balloon.h>
 39
 40#include "privcmd.h"
 41
 42MODULE_LICENSE("GPL");
 43
 44#define PRIV_VMA_LOCKED ((void *)1)
 45
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 46static int privcmd_vma_range_is_mapped(
 47               struct vm_area_struct *vma,
 48               unsigned long addr,
 49               unsigned long nr_pages);
 50
 51static long privcmd_ioctl_hypercall(void __user *udata)
 52{
 
 53	struct privcmd_hypercall hypercall;
 54	long ret;
 55
 
 
 
 
 56	if (copy_from_user(&hypercall, udata, sizeof(hypercall)))
 57		return -EFAULT;
 58
 59	xen_preemptible_hcall_begin();
 60	ret = privcmd_call(hypercall.op,
 61			   hypercall.arg[0], hypercall.arg[1],
 62			   hypercall.arg[2], hypercall.arg[3],
 63			   hypercall.arg[4]);
 64	xen_preemptible_hcall_end();
 65
 66	return ret;
 67}
 68
 69static void free_page_list(struct list_head *pages)
 70{
 71	struct page *p, *n;
 72
 73	list_for_each_entry_safe(p, n, pages, lru)
 74		__free_page(p);
 75
 76	INIT_LIST_HEAD(pages);
 77}
 78
 79/*
 80 * Given an array of items in userspace, return a list of pages
 81 * containing the data.  If copying fails, either because of memory
 82 * allocation failure or a problem reading user memory, return an
 83 * error code; its up to the caller to dispose of any partial list.
 84 */
 85static int gather_array(struct list_head *pagelist,
 86			unsigned nelem, size_t size,
 87			const void __user *data)
 88{
 89	unsigned pageidx;
 90	void *pagedata;
 91	int ret;
 92
 93	if (size > PAGE_SIZE)
 94		return 0;
 95
 96	pageidx = PAGE_SIZE;
 97	pagedata = NULL;	/* quiet, gcc */
 98	while (nelem--) {
 99		if (pageidx > PAGE_SIZE-size) {
100			struct page *page = alloc_page(GFP_KERNEL);
101
102			ret = -ENOMEM;
103			if (page == NULL)
104				goto fail;
105
106			pagedata = page_address(page);
107
108			list_add_tail(&page->lru, pagelist);
109			pageidx = 0;
110		}
111
112		ret = -EFAULT;
113		if (copy_from_user(pagedata + pageidx, data, size))
114			goto fail;
115
116		data += size;
117		pageidx += size;
118	}
119
120	ret = 0;
121
122fail:
123	return ret;
124}
125
126/*
127 * Call function "fn" on each element of the array fragmented
128 * over a list of pages.
129 */
130static int traverse_pages(unsigned nelem, size_t size,
131			  struct list_head *pos,
132			  int (*fn)(void *data, void *state),
133			  void *state)
134{
135	void *pagedata;
136	unsigned pageidx;
137	int ret = 0;
138
139	BUG_ON(size > PAGE_SIZE);
140
141	pageidx = PAGE_SIZE;
142	pagedata = NULL;	/* hush, gcc */
143
144	while (nelem--) {
145		if (pageidx > PAGE_SIZE-size) {
146			struct page *page;
147			pos = pos->next;
148			page = list_entry(pos, struct page, lru);
149			pagedata = page_address(page);
150			pageidx = 0;
151		}
152
153		ret = (*fn)(pagedata + pageidx, state);
154		if (ret)
155			break;
156		pageidx += size;
157	}
158
159	return ret;
160}
161
162/*
163 * Similar to traverse_pages, but use each page as a "block" of
164 * data to be processed as one unit.
165 */
166static int traverse_pages_block(unsigned nelem, size_t size,
167				struct list_head *pos,
168				int (*fn)(void *data, int nr, void *state),
169				void *state)
170{
171	void *pagedata;
172	unsigned pageidx;
173	int ret = 0;
174
175	BUG_ON(size > PAGE_SIZE);
176
177	pageidx = PAGE_SIZE;
178
179	while (nelem) {
180		int nr = (PAGE_SIZE/size);
181		struct page *page;
182		if (nr > nelem)
183			nr = nelem;
184		pos = pos->next;
185		page = list_entry(pos, struct page, lru);
186		pagedata = page_address(page);
187		ret = (*fn)(pagedata, nr, state);
188		if (ret)
189			break;
190		nelem -= nr;
191	}
192
193	return ret;
194}
195
196struct mmap_gfn_state {
197	unsigned long va;
198	struct vm_area_struct *vma;
199	domid_t domain;
200};
201
202static int mmap_gfn_range(void *data, void *state)
203{
204	struct privcmd_mmap_entry *msg = data;
205	struct mmap_gfn_state *st = state;
206	struct vm_area_struct *vma = st->vma;
207	int rc;
208
209	/* Do not allow range to wrap the address space. */
210	if ((msg->npages > (LONG_MAX >> PAGE_SHIFT)) ||
211	    ((unsigned long)(msg->npages << PAGE_SHIFT) >= -st->va))
212		return -EINVAL;
213
214	/* Range chunks must be contiguous in va space. */
215	if ((msg->va != st->va) ||
216	    ((msg->va+(msg->npages<<PAGE_SHIFT)) > vma->vm_end))
217		return -EINVAL;
218
219	rc = xen_remap_domain_gfn_range(vma,
220					msg->va & PAGE_MASK,
221					msg->mfn, msg->npages,
222					vma->vm_page_prot,
223					st->domain, NULL);
224	if (rc < 0)
225		return rc;
226
227	st->va += msg->npages << PAGE_SHIFT;
228
229	return 0;
230}
231
232static long privcmd_ioctl_mmap(void __user *udata)
233{
 
234	struct privcmd_mmap mmapcmd;
235	struct mm_struct *mm = current->mm;
236	struct vm_area_struct *vma;
237	int rc;
238	LIST_HEAD(pagelist);
239	struct mmap_gfn_state state;
240
241	/* We only support privcmd_ioctl_mmap_batch for auto translated. */
242	if (xen_feature(XENFEAT_auto_translated_physmap))
243		return -ENOSYS;
244
245	if (copy_from_user(&mmapcmd, udata, sizeof(mmapcmd)))
246		return -EFAULT;
247
 
 
 
 
248	rc = gather_array(&pagelist,
249			  mmapcmd.num, sizeof(struct privcmd_mmap_entry),
250			  mmapcmd.entry);
251
252	if (rc || list_empty(&pagelist))
253		goto out;
254
255	down_write(&mm->mmap_sem);
256
257	{
258		struct page *page = list_first_entry(&pagelist,
259						     struct page, lru);
260		struct privcmd_mmap_entry *msg = page_address(page);
261
262		vma = find_vma(mm, msg->va);
263		rc = -EINVAL;
264
265		if (!vma || (msg->va != vma->vm_start) || vma->vm_private_data)
266			goto out_up;
267		vma->vm_private_data = PRIV_VMA_LOCKED;
268	}
269
270	state.va = vma->vm_start;
271	state.vma = vma;
272	state.domain = mmapcmd.dom;
273
274	rc = traverse_pages(mmapcmd.num, sizeof(struct privcmd_mmap_entry),
275			    &pagelist,
276			    mmap_gfn_range, &state);
277
278
279out_up:
280	up_write(&mm->mmap_sem);
281
282out:
283	free_page_list(&pagelist);
284
285	return rc;
286}
287
288struct mmap_batch_state {
289	domid_t domain;
290	unsigned long va;
291	struct vm_area_struct *vma;
292	int index;
293	/* A tristate:
294	 *      0 for no errors
295	 *      1 if at least one error has happened (and no
296	 *          -ENOENT errors have happened)
297	 *      -ENOENT if at least 1 -ENOENT has happened.
298	 */
299	int global_error;
300	int version;
301
302	/* User-space gfn array to store errors in the second pass for V1. */
303	xen_pfn_t __user *user_gfn;
304	/* User-space int array to store errors in the second pass for V2. */
305	int __user *user_err;
306};
307
308/* auto translated dom0 note: if domU being created is PV, then gfn is
309 * mfn(addr on bus). If it's auto xlated, then gfn is pfn (input to HAP).
310 */
311static int mmap_batch_fn(void *data, int nr, void *state)
312{
313	xen_pfn_t *gfnp = data;
314	struct mmap_batch_state *st = state;
315	struct vm_area_struct *vma = st->vma;
316	struct page **pages = vma->vm_private_data;
317	struct page **cur_pages = NULL;
318	int ret;
319
320	if (xen_feature(XENFEAT_auto_translated_physmap))
321		cur_pages = &pages[st->index];
322
323	BUG_ON(nr < 0);
324	ret = xen_remap_domain_gfn_array(st->vma, st->va & PAGE_MASK, gfnp, nr,
325					 (int *)gfnp, st->vma->vm_page_prot,
326					 st->domain, cur_pages);
327
328	/* Adjust the global_error? */
329	if (ret != nr) {
330		if (ret == -ENOENT)
331			st->global_error = -ENOENT;
332		else {
333			/* Record that at least one error has happened. */
334			if (st->global_error == 0)
335				st->global_error = 1;
336		}
337	}
338	st->va += PAGE_SIZE * nr;
339	st->index += nr;
340
341	return 0;
342}
343
344static int mmap_return_error(int err, struct mmap_batch_state *st)
345{
346	int ret;
347
348	if (st->version == 1) {
349		if (err) {
350			xen_pfn_t gfn;
351
352			ret = get_user(gfn, st->user_gfn);
353			if (ret < 0)
354				return ret;
355			/*
356			 * V1 encodes the error codes in the 32bit top
357			 * nibble of the gfn (with its known
358			 * limitations vis-a-vis 64 bit callers).
359			 */
360			gfn |= (err == -ENOENT) ?
361				PRIVCMD_MMAPBATCH_PAGED_ERROR :
362				PRIVCMD_MMAPBATCH_MFN_ERROR;
363			return __put_user(gfn, st->user_gfn++);
364		} else
365			st->user_gfn++;
366	} else { /* st->version == 2 */
367		if (err)
368			return __put_user(err, st->user_err++);
369		else
370			st->user_err++;
371	}
372
373	return 0;
374}
375
376static int mmap_return_errors(void *data, int nr, void *state)
377{
378	struct mmap_batch_state *st = state;
379	int *errs = data;
380	int i;
381	int ret;
382
383	for (i = 0; i < nr; i++) {
384		ret = mmap_return_error(errs[i], st);
385		if (ret < 0)
386			return ret;
387	}
388	return 0;
389}
390
391/* Allocate pfns that are then mapped with gfns from foreign domid. Update
392 * the vma with the page info to use later.
393 * Returns: 0 if success, otherwise -errno
394 */
395static int alloc_empty_pages(struct vm_area_struct *vma, int numpgs)
396{
397	int rc;
398	struct page **pages;
399
400	pages = kcalloc(numpgs, sizeof(pages[0]), GFP_KERNEL);
401	if (pages == NULL)
402		return -ENOMEM;
403
404	rc = alloc_xenballooned_pages(numpgs, pages);
405	if (rc != 0) {
406		pr_warn("%s Could not alloc %d pfns rc:%d\n", __func__,
407			numpgs, rc);
408		kfree(pages);
409		return -ENOMEM;
410	}
411	BUG_ON(vma->vm_private_data != NULL);
412	vma->vm_private_data = pages;
413
414	return 0;
415}
416
417static const struct vm_operations_struct privcmd_vm_ops;
418
419static long privcmd_ioctl_mmap_batch(void __user *udata, int version)
 
420{
 
421	int ret;
422	struct privcmd_mmapbatch_v2 m;
423	struct mm_struct *mm = current->mm;
424	struct vm_area_struct *vma;
425	unsigned long nr_pages;
426	LIST_HEAD(pagelist);
427	struct mmap_batch_state state;
428
429	switch (version) {
430	case 1:
431		if (copy_from_user(&m, udata, sizeof(struct privcmd_mmapbatch)))
432			return -EFAULT;
433		/* Returns per-frame error in m.arr. */
434		m.err = NULL;
435		if (!access_ok(VERIFY_WRITE, m.arr, m.num * sizeof(*m.arr)))
436			return -EFAULT;
437		break;
438	case 2:
439		if (copy_from_user(&m, udata, sizeof(struct privcmd_mmapbatch_v2)))
440			return -EFAULT;
441		/* Returns per-frame error code in m.err. */
442		if (!access_ok(VERIFY_WRITE, m.err, m.num * (sizeof(*m.err))))
443			return -EFAULT;
444		break;
445	default:
446		return -EINVAL;
447	}
448
 
 
 
 
449	nr_pages = DIV_ROUND_UP(m.num, XEN_PFN_PER_PAGE);
450	if ((m.num <= 0) || (nr_pages > (LONG_MAX >> PAGE_SHIFT)))
451		return -EINVAL;
452
453	ret = gather_array(&pagelist, m.num, sizeof(xen_pfn_t), m.arr);
454
455	if (ret)
456		goto out;
457	if (list_empty(&pagelist)) {
458		ret = -EINVAL;
459		goto out;
460	}
461
462	if (version == 2) {
463		/* Zero error array now to only copy back actual errors. */
464		if (clear_user(m.err, sizeof(int) * m.num)) {
465			ret = -EFAULT;
466			goto out;
467		}
468	}
469
470	down_write(&mm->mmap_sem);
471
472	vma = find_vma(mm, m.addr);
473	if (!vma ||
474	    vma->vm_ops != &privcmd_vm_ops) {
475		ret = -EINVAL;
476		goto out_unlock;
477	}
478
479	/*
480	 * Caller must either:
481	 *
482	 * Map the whole VMA range, which will also allocate all the
483	 * pages required for the auto_translated_physmap case.
484	 *
485	 * Or
486	 *
487	 * Map unmapped holes left from a previous map attempt (e.g.,
488	 * because those foreign frames were previously paged out).
489	 */
490	if (vma->vm_private_data == NULL) {
491		if (m.addr != vma->vm_start ||
492		    m.addr + (nr_pages << PAGE_SHIFT) != vma->vm_end) {
493			ret = -EINVAL;
494			goto out_unlock;
495		}
496		if (xen_feature(XENFEAT_auto_translated_physmap)) {
497			ret = alloc_empty_pages(vma, nr_pages);
498			if (ret < 0)
499				goto out_unlock;
500		} else
501			vma->vm_private_data = PRIV_VMA_LOCKED;
502	} else {
503		if (m.addr < vma->vm_start ||
504		    m.addr + (nr_pages << PAGE_SHIFT) > vma->vm_end) {
505			ret = -EINVAL;
506			goto out_unlock;
507		}
508		if (privcmd_vma_range_is_mapped(vma, m.addr, nr_pages)) {
509			ret = -EINVAL;
510			goto out_unlock;
511		}
512	}
513
514	state.domain        = m.dom;
515	state.vma           = vma;
516	state.va            = m.addr;
517	state.index         = 0;
518	state.global_error  = 0;
519	state.version       = version;
520
521	BUILD_BUG_ON(((PAGE_SIZE / sizeof(xen_pfn_t)) % XEN_PFN_PER_PAGE) != 0);
522	/* mmap_batch_fn guarantees ret == 0 */
523	BUG_ON(traverse_pages_block(m.num, sizeof(xen_pfn_t),
524				    &pagelist, mmap_batch_fn, &state));
525
526	up_write(&mm->mmap_sem);
527
528	if (state.global_error) {
529		/* Write back errors in second pass. */
530		state.user_gfn = (xen_pfn_t *)m.arr;
531		state.user_err = m.err;
532		ret = traverse_pages_block(m.num, sizeof(xen_pfn_t),
533					   &pagelist, mmap_return_errors, &state);
534	} else
535		ret = 0;
536
537	/* If we have not had any EFAULT-like global errors then set the global
538	 * error to -ENOENT if necessary. */
539	if ((ret == 0) && (state.global_error == -ENOENT))
540		ret = -ENOENT;
541
542out:
543	free_page_list(&pagelist);
544	return ret;
545
546out_unlock:
547	up_write(&mm->mmap_sem);
548	goto out;
549}
550
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
551static long privcmd_ioctl(struct file *file,
552			  unsigned int cmd, unsigned long data)
553{
554	int ret = -ENOSYS;
555	void __user *udata = (void __user *) data;
556
557	switch (cmd) {
558	case IOCTL_PRIVCMD_HYPERCALL:
559		ret = privcmd_ioctl_hypercall(udata);
560		break;
561
562	case IOCTL_PRIVCMD_MMAP:
563		ret = privcmd_ioctl_mmap(udata);
564		break;
565
566	case IOCTL_PRIVCMD_MMAPBATCH:
567		ret = privcmd_ioctl_mmap_batch(udata, 1);
568		break;
569
570	case IOCTL_PRIVCMD_MMAPBATCH_V2:
571		ret = privcmd_ioctl_mmap_batch(udata, 2);
 
 
 
 
 
 
 
 
 
 
 
 
572		break;
573
574	default:
575		ret = -EINVAL;
576		break;
577	}
578
579	return ret;
580}
581
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
582static void privcmd_close(struct vm_area_struct *vma)
583{
584	struct page **pages = vma->vm_private_data;
585	int numpgs = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
586	int numgfns = (vma->vm_end - vma->vm_start) >> XEN_PAGE_SHIFT;
587	int rc;
588
589	if (!xen_feature(XENFEAT_auto_translated_physmap) || !numpgs || !pages)
590		return;
591
592	rc = xen_unmap_domain_gfn_range(vma, numgfns, pages);
593	if (rc == 0)
594		free_xenballooned_pages(numpgs, pages);
595	else
596		pr_crit("unable to unmap MFN range: leaking %d pages. rc=%d\n",
597			numpgs, rc);
598	kfree(pages);
599}
600
601static int privcmd_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
602{
603	printk(KERN_DEBUG "privcmd_fault: vma=%p %lx-%lx, pgoff=%lx, uv=%p\n",
604	       vma, vma->vm_start, vma->vm_end,
605	       vmf->pgoff, vmf->virtual_address);
606
607	return VM_FAULT_SIGBUS;
608}
609
610static const struct vm_operations_struct privcmd_vm_ops = {
611	.close = privcmd_close,
612	.fault = privcmd_fault
613};
614
615static int privcmd_mmap(struct file *file, struct vm_area_struct *vma)
616{
617	/* DONTCOPY is essential for Xen because copy_page_range doesn't know
618	 * how to recreate these mappings */
619	vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTCOPY |
620			 VM_DONTEXPAND | VM_DONTDUMP;
621	vma->vm_ops = &privcmd_vm_ops;
622	vma->vm_private_data = NULL;
623
624	return 0;
625}
626
627/*
628 * For MMAPBATCH*. This allows asserting the singleshot mapping
629 * on a per pfn/pte basis. Mapping calls that fail with ENOENT
630 * can be then retried until success.
631 */
632static int is_mapped_fn(pte_t *pte, struct page *pmd_page,
633	                unsigned long addr, void *data)
634{
635	return pte_none(*pte) ? 0 : -EBUSY;
636}
637
638static int privcmd_vma_range_is_mapped(
639	           struct vm_area_struct *vma,
640	           unsigned long addr,
641	           unsigned long nr_pages)
642{
643	return apply_to_page_range(vma->vm_mm, addr, nr_pages << PAGE_SHIFT,
644				   is_mapped_fn, NULL) != 0;
645}
646
647const struct file_operations xen_privcmd_fops = {
648	.owner = THIS_MODULE,
649	.unlocked_ioctl = privcmd_ioctl,
 
 
650	.mmap = privcmd_mmap,
651};
652EXPORT_SYMBOL_GPL(xen_privcmd_fops);
653
654static struct miscdevice privcmd_dev = {
655	.minor = MISC_DYNAMIC_MINOR,
656	.name = "xen/privcmd",
657	.fops = &xen_privcmd_fops,
658};
659
660static int __init privcmd_init(void)
661{
662	int err;
663
664	if (!xen_domain())
665		return -ENODEV;
666
667	err = misc_register(&privcmd_dev);
668	if (err != 0) {
669		pr_err("Could not register Xen privcmd device\n");
670		return err;
671	}
 
 
 
 
 
 
 
 
672	return 0;
673}
674
675static void __exit privcmd_exit(void)
676{
677	misc_deregister(&privcmd_dev);
 
678}
679
680module_init(privcmd_init);
681module_exit(privcmd_exit);