Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/******************************************************************************
3 * privcmd.c
4 *
5 * Interface to privileged domain-0 commands.
6 *
7 * Copyright (c) 2002-2004, K A Fraser, B Dragovic
8 */
9
10#define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
11
12#include <linux/kernel.h>
13#include <linux/module.h>
14#include <linux/sched.h>
15#include <linux/slab.h>
16#include <linux/string.h>
17#include <linux/errno.h>
18#include <linux/mm.h>
19#include <linux/mman.h>
20#include <linux/uaccess.h>
21#include <linux/swap.h>
22#include <linux/highmem.h>
23#include <linux/pagemap.h>
24#include <linux/seq_file.h>
25#include <linux/miscdevice.h>
26#include <linux/moduleparam.h>
27
28#include <asm/xen/hypervisor.h>
29#include <asm/xen/hypercall.h>
30
31#include <xen/xen.h>
32#include <xen/privcmd.h>
33#include <xen/interface/xen.h>
34#include <xen/interface/memory.h>
35#include <xen/interface/hvm/dm_op.h>
36#include <xen/features.h>
37#include <xen/page.h>
38#include <xen/xen-ops.h>
39#include <xen/balloon.h>
40
41#include "privcmd.h"
42
43MODULE_LICENSE("GPL");
44
45#define PRIV_VMA_LOCKED ((void *)1)
46
47static unsigned int privcmd_dm_op_max_num = 16;
48module_param_named(dm_op_max_nr_bufs, privcmd_dm_op_max_num, uint, 0644);
49MODULE_PARM_DESC(dm_op_max_nr_bufs,
50 "Maximum number of buffers per dm_op hypercall");
51
52static unsigned int privcmd_dm_op_buf_max_size = 4096;
53module_param_named(dm_op_buf_max_size, privcmd_dm_op_buf_max_size, uint,
54 0644);
55MODULE_PARM_DESC(dm_op_buf_max_size,
56 "Maximum size of a dm_op hypercall buffer");
57
58struct privcmd_data {
59 domid_t domid;
60};
61
62static int privcmd_vma_range_is_mapped(
63 struct vm_area_struct *vma,
64 unsigned long addr,
65 unsigned long nr_pages);
66
67static long privcmd_ioctl_hypercall(struct file *file, void __user *udata)
68{
69 struct privcmd_data *data = file->private_data;
70 struct privcmd_hypercall hypercall;
71 long ret;
72
73 /* Disallow arbitrary hypercalls if restricted */
74 if (data->domid != DOMID_INVALID)
75 return -EPERM;
76
77 if (copy_from_user(&hypercall, udata, sizeof(hypercall)))
78 return -EFAULT;
79
80 xen_preemptible_hcall_begin();
81 ret = privcmd_call(hypercall.op,
82 hypercall.arg[0], hypercall.arg[1],
83 hypercall.arg[2], hypercall.arg[3],
84 hypercall.arg[4]);
85 xen_preemptible_hcall_end();
86
87 return ret;
88}
89
90static void free_page_list(struct list_head *pages)
91{
92 struct page *p, *n;
93
94 list_for_each_entry_safe(p, n, pages, lru)
95 __free_page(p);
96
97 INIT_LIST_HEAD(pages);
98}
99
100/*
101 * Given an array of items in userspace, return a list of pages
102 * containing the data. If copying fails, either because of memory
103 * allocation failure or a problem reading user memory, return an
104 * error code; its up to the caller to dispose of any partial list.
105 */
106static int gather_array(struct list_head *pagelist,
107 unsigned nelem, size_t size,
108 const void __user *data)
109{
110 unsigned pageidx;
111 void *pagedata;
112 int ret;
113
114 if (size > PAGE_SIZE)
115 return 0;
116
117 pageidx = PAGE_SIZE;
118 pagedata = NULL; /* quiet, gcc */
119 while (nelem--) {
120 if (pageidx > PAGE_SIZE-size) {
121 struct page *page = alloc_page(GFP_KERNEL);
122
123 ret = -ENOMEM;
124 if (page == NULL)
125 goto fail;
126
127 pagedata = page_address(page);
128
129 list_add_tail(&page->lru, pagelist);
130 pageidx = 0;
131 }
132
133 ret = -EFAULT;
134 if (copy_from_user(pagedata + pageidx, data, size))
135 goto fail;
136
137 data += size;
138 pageidx += size;
139 }
140
141 ret = 0;
142
143fail:
144 return ret;
145}
146
147/*
148 * Call function "fn" on each element of the array fragmented
149 * over a list of pages.
150 */
151static int traverse_pages(unsigned nelem, size_t size,
152 struct list_head *pos,
153 int (*fn)(void *data, void *state),
154 void *state)
155{
156 void *pagedata;
157 unsigned pageidx;
158 int ret = 0;
159
160 BUG_ON(size > PAGE_SIZE);
161
162 pageidx = PAGE_SIZE;
163 pagedata = NULL; /* hush, gcc */
164
165 while (nelem--) {
166 if (pageidx > PAGE_SIZE-size) {
167 struct page *page;
168 pos = pos->next;
169 page = list_entry(pos, struct page, lru);
170 pagedata = page_address(page);
171 pageidx = 0;
172 }
173
174 ret = (*fn)(pagedata + pageidx, state);
175 if (ret)
176 break;
177 pageidx += size;
178 }
179
180 return ret;
181}
182
183/*
184 * Similar to traverse_pages, but use each page as a "block" of
185 * data to be processed as one unit.
186 */
187static int traverse_pages_block(unsigned nelem, size_t size,
188 struct list_head *pos,
189 int (*fn)(void *data, int nr, void *state),
190 void *state)
191{
192 void *pagedata;
193 int ret = 0;
194
195 BUG_ON(size > PAGE_SIZE);
196
197 while (nelem) {
198 int nr = (PAGE_SIZE/size);
199 struct page *page;
200 if (nr > nelem)
201 nr = nelem;
202 pos = pos->next;
203 page = list_entry(pos, struct page, lru);
204 pagedata = page_address(page);
205 ret = (*fn)(pagedata, nr, state);
206 if (ret)
207 break;
208 nelem -= nr;
209 }
210
211 return ret;
212}
213
214struct mmap_gfn_state {
215 unsigned long va;
216 struct vm_area_struct *vma;
217 domid_t domain;
218};
219
220static int mmap_gfn_range(void *data, void *state)
221{
222 struct privcmd_mmap_entry *msg = data;
223 struct mmap_gfn_state *st = state;
224 struct vm_area_struct *vma = st->vma;
225 int rc;
226
227 /* Do not allow range to wrap the address space. */
228 if ((msg->npages > (LONG_MAX >> PAGE_SHIFT)) ||
229 ((unsigned long)(msg->npages << PAGE_SHIFT) >= -st->va))
230 return -EINVAL;
231
232 /* Range chunks must be contiguous in va space. */
233 if ((msg->va != st->va) ||
234 ((msg->va+(msg->npages<<PAGE_SHIFT)) > vma->vm_end))
235 return -EINVAL;
236
237 rc = xen_remap_domain_gfn_range(vma,
238 msg->va & PAGE_MASK,
239 msg->mfn, msg->npages,
240 vma->vm_page_prot,
241 st->domain, NULL);
242 if (rc < 0)
243 return rc;
244
245 st->va += msg->npages << PAGE_SHIFT;
246
247 return 0;
248}
249
250static long privcmd_ioctl_mmap(struct file *file, void __user *udata)
251{
252 struct privcmd_data *data = file->private_data;
253 struct privcmd_mmap mmapcmd;
254 struct mm_struct *mm = current->mm;
255 struct vm_area_struct *vma;
256 int rc;
257 LIST_HEAD(pagelist);
258 struct mmap_gfn_state state;
259
260 /* We only support privcmd_ioctl_mmap_batch for auto translated. */
261 if (xen_feature(XENFEAT_auto_translated_physmap))
262 return -ENOSYS;
263
264 if (copy_from_user(&mmapcmd, udata, sizeof(mmapcmd)))
265 return -EFAULT;
266
267 /* If restriction is in place, check the domid matches */
268 if (data->domid != DOMID_INVALID && data->domid != mmapcmd.dom)
269 return -EPERM;
270
271 rc = gather_array(&pagelist,
272 mmapcmd.num, sizeof(struct privcmd_mmap_entry),
273 mmapcmd.entry);
274
275 if (rc || list_empty(&pagelist))
276 goto out;
277
278 mmap_write_lock(mm);
279
280 {
281 struct page *page = list_first_entry(&pagelist,
282 struct page, lru);
283 struct privcmd_mmap_entry *msg = page_address(page);
284
285 vma = find_vma(mm, msg->va);
286 rc = -EINVAL;
287
288 if (!vma || (msg->va != vma->vm_start) || vma->vm_private_data)
289 goto out_up;
290 vma->vm_private_data = PRIV_VMA_LOCKED;
291 }
292
293 state.va = vma->vm_start;
294 state.vma = vma;
295 state.domain = mmapcmd.dom;
296
297 rc = traverse_pages(mmapcmd.num, sizeof(struct privcmd_mmap_entry),
298 &pagelist,
299 mmap_gfn_range, &state);
300
301
302out_up:
303 mmap_write_unlock(mm);
304
305out:
306 free_page_list(&pagelist);
307
308 return rc;
309}
310
311struct mmap_batch_state {
312 domid_t domain;
313 unsigned long va;
314 struct vm_area_struct *vma;
315 int index;
316 /* A tristate:
317 * 0 for no errors
318 * 1 if at least one error has happened (and no
319 * -ENOENT errors have happened)
320 * -ENOENT if at least 1 -ENOENT has happened.
321 */
322 int global_error;
323 int version;
324
325 /* User-space gfn array to store errors in the second pass for V1. */
326 xen_pfn_t __user *user_gfn;
327 /* User-space int array to store errors in the second pass for V2. */
328 int __user *user_err;
329};
330
331/* auto translated dom0 note: if domU being created is PV, then gfn is
332 * mfn(addr on bus). If it's auto xlated, then gfn is pfn (input to HAP).
333 */
334static int mmap_batch_fn(void *data, int nr, void *state)
335{
336 xen_pfn_t *gfnp = data;
337 struct mmap_batch_state *st = state;
338 struct vm_area_struct *vma = st->vma;
339 struct page **pages = vma->vm_private_data;
340 struct page **cur_pages = NULL;
341 int ret;
342
343 if (xen_feature(XENFEAT_auto_translated_physmap))
344 cur_pages = &pages[st->index];
345
346 BUG_ON(nr < 0);
347 ret = xen_remap_domain_gfn_array(st->vma, st->va & PAGE_MASK, gfnp, nr,
348 (int *)gfnp, st->vma->vm_page_prot,
349 st->domain, cur_pages);
350
351 /* Adjust the global_error? */
352 if (ret != nr) {
353 if (ret == -ENOENT)
354 st->global_error = -ENOENT;
355 else {
356 /* Record that at least one error has happened. */
357 if (st->global_error == 0)
358 st->global_error = 1;
359 }
360 }
361 st->va += XEN_PAGE_SIZE * nr;
362 st->index += nr / XEN_PFN_PER_PAGE;
363
364 return 0;
365}
366
367static int mmap_return_error(int err, struct mmap_batch_state *st)
368{
369 int ret;
370
371 if (st->version == 1) {
372 if (err) {
373 xen_pfn_t gfn;
374
375 ret = get_user(gfn, st->user_gfn);
376 if (ret < 0)
377 return ret;
378 /*
379 * V1 encodes the error codes in the 32bit top
380 * nibble of the gfn (with its known
381 * limitations vis-a-vis 64 bit callers).
382 */
383 gfn |= (err == -ENOENT) ?
384 PRIVCMD_MMAPBATCH_PAGED_ERROR :
385 PRIVCMD_MMAPBATCH_MFN_ERROR;
386 return __put_user(gfn, st->user_gfn++);
387 } else
388 st->user_gfn++;
389 } else { /* st->version == 2 */
390 if (err)
391 return __put_user(err, st->user_err++);
392 else
393 st->user_err++;
394 }
395
396 return 0;
397}
398
399static int mmap_return_errors(void *data, int nr, void *state)
400{
401 struct mmap_batch_state *st = state;
402 int *errs = data;
403 int i;
404 int ret;
405
406 for (i = 0; i < nr; i++) {
407 ret = mmap_return_error(errs[i], st);
408 if (ret < 0)
409 return ret;
410 }
411 return 0;
412}
413
414/* Allocate pfns that are then mapped with gfns from foreign domid. Update
415 * the vma with the page info to use later.
416 * Returns: 0 if success, otherwise -errno
417 */
418static int alloc_empty_pages(struct vm_area_struct *vma, int numpgs)
419{
420 int rc;
421 struct page **pages;
422
423 pages = kcalloc(numpgs, sizeof(pages[0]), GFP_KERNEL);
424 if (pages == NULL)
425 return -ENOMEM;
426
427 rc = xen_alloc_unpopulated_pages(numpgs, pages);
428 if (rc != 0) {
429 pr_warn("%s Could not alloc %d pfns rc:%d\n", __func__,
430 numpgs, rc);
431 kfree(pages);
432 return -ENOMEM;
433 }
434 BUG_ON(vma->vm_private_data != NULL);
435 vma->vm_private_data = pages;
436
437 return 0;
438}
439
440static const struct vm_operations_struct privcmd_vm_ops;
441
442static long privcmd_ioctl_mmap_batch(
443 struct file *file, void __user *udata, int version)
444{
445 struct privcmd_data *data = file->private_data;
446 int ret;
447 struct privcmd_mmapbatch_v2 m;
448 struct mm_struct *mm = current->mm;
449 struct vm_area_struct *vma;
450 unsigned long nr_pages;
451 LIST_HEAD(pagelist);
452 struct mmap_batch_state state;
453
454 switch (version) {
455 case 1:
456 if (copy_from_user(&m, udata, sizeof(struct privcmd_mmapbatch)))
457 return -EFAULT;
458 /* Returns per-frame error in m.arr. */
459 m.err = NULL;
460 if (!access_ok(m.arr, m.num * sizeof(*m.arr)))
461 return -EFAULT;
462 break;
463 case 2:
464 if (copy_from_user(&m, udata, sizeof(struct privcmd_mmapbatch_v2)))
465 return -EFAULT;
466 /* Returns per-frame error code in m.err. */
467 if (!access_ok(m.err, m.num * (sizeof(*m.err))))
468 return -EFAULT;
469 break;
470 default:
471 return -EINVAL;
472 }
473
474 /* If restriction is in place, check the domid matches */
475 if (data->domid != DOMID_INVALID && data->domid != m.dom)
476 return -EPERM;
477
478 nr_pages = DIV_ROUND_UP(m.num, XEN_PFN_PER_PAGE);
479 if ((m.num <= 0) || (nr_pages > (LONG_MAX >> PAGE_SHIFT)))
480 return -EINVAL;
481
482 ret = gather_array(&pagelist, m.num, sizeof(xen_pfn_t), m.arr);
483
484 if (ret)
485 goto out;
486 if (list_empty(&pagelist)) {
487 ret = -EINVAL;
488 goto out;
489 }
490
491 if (version == 2) {
492 /* Zero error array now to only copy back actual errors. */
493 if (clear_user(m.err, sizeof(int) * m.num)) {
494 ret = -EFAULT;
495 goto out;
496 }
497 }
498
499 mmap_write_lock(mm);
500
501 vma = find_vma(mm, m.addr);
502 if (!vma ||
503 vma->vm_ops != &privcmd_vm_ops) {
504 ret = -EINVAL;
505 goto out_unlock;
506 }
507
508 /*
509 * Caller must either:
510 *
511 * Map the whole VMA range, which will also allocate all the
512 * pages required for the auto_translated_physmap case.
513 *
514 * Or
515 *
516 * Map unmapped holes left from a previous map attempt (e.g.,
517 * because those foreign frames were previously paged out).
518 */
519 if (vma->vm_private_data == NULL) {
520 if (m.addr != vma->vm_start ||
521 m.addr + (nr_pages << PAGE_SHIFT) != vma->vm_end) {
522 ret = -EINVAL;
523 goto out_unlock;
524 }
525 if (xen_feature(XENFEAT_auto_translated_physmap)) {
526 ret = alloc_empty_pages(vma, nr_pages);
527 if (ret < 0)
528 goto out_unlock;
529 } else
530 vma->vm_private_data = PRIV_VMA_LOCKED;
531 } else {
532 if (m.addr < vma->vm_start ||
533 m.addr + (nr_pages << PAGE_SHIFT) > vma->vm_end) {
534 ret = -EINVAL;
535 goto out_unlock;
536 }
537 if (privcmd_vma_range_is_mapped(vma, m.addr, nr_pages)) {
538 ret = -EINVAL;
539 goto out_unlock;
540 }
541 }
542
543 state.domain = m.dom;
544 state.vma = vma;
545 state.va = m.addr;
546 state.index = 0;
547 state.global_error = 0;
548 state.version = version;
549
550 BUILD_BUG_ON(((PAGE_SIZE / sizeof(xen_pfn_t)) % XEN_PFN_PER_PAGE) != 0);
551 /* mmap_batch_fn guarantees ret == 0 */
552 BUG_ON(traverse_pages_block(m.num, sizeof(xen_pfn_t),
553 &pagelist, mmap_batch_fn, &state));
554
555 mmap_write_unlock(mm);
556
557 if (state.global_error) {
558 /* Write back errors in second pass. */
559 state.user_gfn = (xen_pfn_t *)m.arr;
560 state.user_err = m.err;
561 ret = traverse_pages_block(m.num, sizeof(xen_pfn_t),
562 &pagelist, mmap_return_errors, &state);
563 } else
564 ret = 0;
565
566 /* If we have not had any EFAULT-like global errors then set the global
567 * error to -ENOENT if necessary. */
568 if ((ret == 0) && (state.global_error == -ENOENT))
569 ret = -ENOENT;
570
571out:
572 free_page_list(&pagelist);
573 return ret;
574
575out_unlock:
576 mmap_write_unlock(mm);
577 goto out;
578}
579
580static int lock_pages(
581 struct privcmd_dm_op_buf kbufs[], unsigned int num,
582 struct page *pages[], unsigned int nr_pages, unsigned int *pinned)
583{
584 unsigned int i;
585
586 for (i = 0; i < num; i++) {
587 unsigned int requested;
588 int page_count;
589
590 requested = DIV_ROUND_UP(
591 offset_in_page(kbufs[i].uptr) + kbufs[i].size,
592 PAGE_SIZE);
593 if (requested > nr_pages)
594 return -ENOSPC;
595
596 page_count = pin_user_pages_fast(
597 (unsigned long) kbufs[i].uptr,
598 requested, FOLL_WRITE, pages);
599 if (page_count < 0)
600 return page_count;
601
602 *pinned += page_count;
603 nr_pages -= page_count;
604 pages += page_count;
605 }
606
607 return 0;
608}
609
610static void unlock_pages(struct page *pages[], unsigned int nr_pages)
611{
612 unpin_user_pages_dirty_lock(pages, nr_pages, true);
613}
614
615static long privcmd_ioctl_dm_op(struct file *file, void __user *udata)
616{
617 struct privcmd_data *data = file->private_data;
618 struct privcmd_dm_op kdata;
619 struct privcmd_dm_op_buf *kbufs;
620 unsigned int nr_pages = 0;
621 struct page **pages = NULL;
622 struct xen_dm_op_buf *xbufs = NULL;
623 unsigned int i;
624 long rc;
625 unsigned int pinned = 0;
626
627 if (copy_from_user(&kdata, udata, sizeof(kdata)))
628 return -EFAULT;
629
630 /* If restriction is in place, check the domid matches */
631 if (data->domid != DOMID_INVALID && data->domid != kdata.dom)
632 return -EPERM;
633
634 if (kdata.num == 0)
635 return 0;
636
637 if (kdata.num > privcmd_dm_op_max_num)
638 return -E2BIG;
639
640 kbufs = kcalloc(kdata.num, sizeof(*kbufs), GFP_KERNEL);
641 if (!kbufs)
642 return -ENOMEM;
643
644 if (copy_from_user(kbufs, kdata.ubufs,
645 sizeof(*kbufs) * kdata.num)) {
646 rc = -EFAULT;
647 goto out;
648 }
649
650 for (i = 0; i < kdata.num; i++) {
651 if (kbufs[i].size > privcmd_dm_op_buf_max_size) {
652 rc = -E2BIG;
653 goto out;
654 }
655
656 if (!access_ok(kbufs[i].uptr,
657 kbufs[i].size)) {
658 rc = -EFAULT;
659 goto out;
660 }
661
662 nr_pages += DIV_ROUND_UP(
663 offset_in_page(kbufs[i].uptr) + kbufs[i].size,
664 PAGE_SIZE);
665 }
666
667 pages = kcalloc(nr_pages, sizeof(*pages), GFP_KERNEL);
668 if (!pages) {
669 rc = -ENOMEM;
670 goto out;
671 }
672
673 xbufs = kcalloc(kdata.num, sizeof(*xbufs), GFP_KERNEL);
674 if (!xbufs) {
675 rc = -ENOMEM;
676 goto out;
677 }
678
679 rc = lock_pages(kbufs, kdata.num, pages, nr_pages, &pinned);
680 if (rc < 0) {
681 nr_pages = pinned;
682 goto out;
683 }
684
685 for (i = 0; i < kdata.num; i++) {
686 set_xen_guest_handle(xbufs[i].h, kbufs[i].uptr);
687 xbufs[i].size = kbufs[i].size;
688 }
689
690 xen_preemptible_hcall_begin();
691 rc = HYPERVISOR_dm_op(kdata.dom, kdata.num, xbufs);
692 xen_preemptible_hcall_end();
693
694out:
695 unlock_pages(pages, nr_pages);
696 kfree(xbufs);
697 kfree(pages);
698 kfree(kbufs);
699
700 return rc;
701}
702
703static long privcmd_ioctl_restrict(struct file *file, void __user *udata)
704{
705 struct privcmd_data *data = file->private_data;
706 domid_t dom;
707
708 if (copy_from_user(&dom, udata, sizeof(dom)))
709 return -EFAULT;
710
711 /* Set restriction to the specified domain, or check it matches */
712 if (data->domid == DOMID_INVALID)
713 data->domid = dom;
714 else if (data->domid != dom)
715 return -EINVAL;
716
717 return 0;
718}
719
720static long privcmd_ioctl_mmap_resource(struct file *file, void __user *udata)
721{
722 struct privcmd_data *data = file->private_data;
723 struct mm_struct *mm = current->mm;
724 struct vm_area_struct *vma;
725 struct privcmd_mmap_resource kdata;
726 xen_pfn_t *pfns = NULL;
727 struct xen_mem_acquire_resource xdata;
728 int rc;
729
730 if (copy_from_user(&kdata, udata, sizeof(kdata)))
731 return -EFAULT;
732
733 /* If restriction is in place, check the domid matches */
734 if (data->domid != DOMID_INVALID && data->domid != kdata.dom)
735 return -EPERM;
736
737 mmap_write_lock(mm);
738
739 vma = find_vma(mm, kdata.addr);
740 if (!vma || vma->vm_ops != &privcmd_vm_ops) {
741 rc = -EINVAL;
742 goto out;
743 }
744
745 pfns = kcalloc(kdata.num, sizeof(*pfns), GFP_KERNEL);
746 if (!pfns) {
747 rc = -ENOMEM;
748 goto out;
749 }
750
751 if (IS_ENABLED(CONFIG_XEN_AUTO_XLATE) &&
752 xen_feature(XENFEAT_auto_translated_physmap)) {
753 unsigned int nr = DIV_ROUND_UP(kdata.num, XEN_PFN_PER_PAGE);
754 struct page **pages;
755 unsigned int i;
756
757 rc = alloc_empty_pages(vma, nr);
758 if (rc < 0)
759 goto out;
760
761 pages = vma->vm_private_data;
762 for (i = 0; i < kdata.num; i++) {
763 xen_pfn_t pfn =
764 page_to_xen_pfn(pages[i / XEN_PFN_PER_PAGE]);
765
766 pfns[i] = pfn + (i % XEN_PFN_PER_PAGE);
767 }
768 } else
769 vma->vm_private_data = PRIV_VMA_LOCKED;
770
771 memset(&xdata, 0, sizeof(xdata));
772 xdata.domid = kdata.dom;
773 xdata.type = kdata.type;
774 xdata.id = kdata.id;
775 xdata.frame = kdata.idx;
776 xdata.nr_frames = kdata.num;
777 set_xen_guest_handle(xdata.frame_list, pfns);
778
779 xen_preemptible_hcall_begin();
780 rc = HYPERVISOR_memory_op(XENMEM_acquire_resource, &xdata);
781 xen_preemptible_hcall_end();
782
783 if (rc)
784 goto out;
785
786 if (IS_ENABLED(CONFIG_XEN_AUTO_XLATE) &&
787 xen_feature(XENFEAT_auto_translated_physmap)) {
788 rc = xen_remap_vma_range(vma, kdata.addr, kdata.num << PAGE_SHIFT);
789 } else {
790 unsigned int domid =
791 (xdata.flags & XENMEM_rsrc_acq_caller_owned) ?
792 DOMID_SELF : kdata.dom;
793 int num;
794
795 num = xen_remap_domain_mfn_array(vma,
796 kdata.addr & PAGE_MASK,
797 pfns, kdata.num, (int *)pfns,
798 vma->vm_page_prot,
799 domid,
800 vma->vm_private_data);
801 if (num < 0)
802 rc = num;
803 else if (num != kdata.num) {
804 unsigned int i;
805
806 for (i = 0; i < num; i++) {
807 rc = pfns[i];
808 if (rc < 0)
809 break;
810 }
811 } else
812 rc = 0;
813 }
814
815out:
816 mmap_write_unlock(mm);
817 kfree(pfns);
818
819 return rc;
820}
821
822static long privcmd_ioctl(struct file *file,
823 unsigned int cmd, unsigned long data)
824{
825 int ret = -ENOTTY;
826 void __user *udata = (void __user *) data;
827
828 switch (cmd) {
829 case IOCTL_PRIVCMD_HYPERCALL:
830 ret = privcmd_ioctl_hypercall(file, udata);
831 break;
832
833 case IOCTL_PRIVCMD_MMAP:
834 ret = privcmd_ioctl_mmap(file, udata);
835 break;
836
837 case IOCTL_PRIVCMD_MMAPBATCH:
838 ret = privcmd_ioctl_mmap_batch(file, udata, 1);
839 break;
840
841 case IOCTL_PRIVCMD_MMAPBATCH_V2:
842 ret = privcmd_ioctl_mmap_batch(file, udata, 2);
843 break;
844
845 case IOCTL_PRIVCMD_DM_OP:
846 ret = privcmd_ioctl_dm_op(file, udata);
847 break;
848
849 case IOCTL_PRIVCMD_RESTRICT:
850 ret = privcmd_ioctl_restrict(file, udata);
851 break;
852
853 case IOCTL_PRIVCMD_MMAP_RESOURCE:
854 ret = privcmd_ioctl_mmap_resource(file, udata);
855 break;
856
857 default:
858 break;
859 }
860
861 return ret;
862}
863
864static int privcmd_open(struct inode *ino, struct file *file)
865{
866 struct privcmd_data *data = kzalloc(sizeof(*data), GFP_KERNEL);
867
868 if (!data)
869 return -ENOMEM;
870
871 /* DOMID_INVALID implies no restriction */
872 data->domid = DOMID_INVALID;
873
874 file->private_data = data;
875 return 0;
876}
877
878static int privcmd_release(struct inode *ino, struct file *file)
879{
880 struct privcmd_data *data = file->private_data;
881
882 kfree(data);
883 return 0;
884}
885
886static void privcmd_close(struct vm_area_struct *vma)
887{
888 struct page **pages = vma->vm_private_data;
889 int numpgs = vma_pages(vma);
890 int numgfns = (vma->vm_end - vma->vm_start) >> XEN_PAGE_SHIFT;
891 int rc;
892
893 if (!xen_feature(XENFEAT_auto_translated_physmap) || !numpgs || !pages)
894 return;
895
896 rc = xen_unmap_domain_gfn_range(vma, numgfns, pages);
897 if (rc == 0)
898 xen_free_unpopulated_pages(numpgs, pages);
899 else
900 pr_crit("unable to unmap MFN range: leaking %d pages. rc=%d\n",
901 numpgs, rc);
902 kfree(pages);
903}
904
905static vm_fault_t privcmd_fault(struct vm_fault *vmf)
906{
907 printk(KERN_DEBUG "privcmd_fault: vma=%p %lx-%lx, pgoff=%lx, uv=%p\n",
908 vmf->vma, vmf->vma->vm_start, vmf->vma->vm_end,
909 vmf->pgoff, (void *)vmf->address);
910
911 return VM_FAULT_SIGBUS;
912}
913
914static const struct vm_operations_struct privcmd_vm_ops = {
915 .close = privcmd_close,
916 .fault = privcmd_fault
917};
918
919static int privcmd_mmap(struct file *file, struct vm_area_struct *vma)
920{
921 /* DONTCOPY is essential for Xen because copy_page_range doesn't know
922 * how to recreate these mappings */
923 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTCOPY |
924 VM_DONTEXPAND | VM_DONTDUMP;
925 vma->vm_ops = &privcmd_vm_ops;
926 vma->vm_private_data = NULL;
927
928 return 0;
929}
930
931/*
932 * For MMAPBATCH*. This allows asserting the singleshot mapping
933 * on a per pfn/pte basis. Mapping calls that fail with ENOENT
934 * can be then retried until success.
935 */
936static int is_mapped_fn(pte_t *pte, unsigned long addr, void *data)
937{
938 return pte_none(*pte) ? 0 : -EBUSY;
939}
940
941static int privcmd_vma_range_is_mapped(
942 struct vm_area_struct *vma,
943 unsigned long addr,
944 unsigned long nr_pages)
945{
946 return apply_to_page_range(vma->vm_mm, addr, nr_pages << PAGE_SHIFT,
947 is_mapped_fn, NULL) != 0;
948}
949
950const struct file_operations xen_privcmd_fops = {
951 .owner = THIS_MODULE,
952 .unlocked_ioctl = privcmd_ioctl,
953 .open = privcmd_open,
954 .release = privcmd_release,
955 .mmap = privcmd_mmap,
956};
957EXPORT_SYMBOL_GPL(xen_privcmd_fops);
958
959static struct miscdevice privcmd_dev = {
960 .minor = MISC_DYNAMIC_MINOR,
961 .name = "xen/privcmd",
962 .fops = &xen_privcmd_fops,
963};
964
965static int __init privcmd_init(void)
966{
967 int err;
968
969 if (!xen_domain())
970 return -ENODEV;
971
972 err = misc_register(&privcmd_dev);
973 if (err != 0) {
974 pr_err("Could not register Xen privcmd device\n");
975 return err;
976 }
977
978 err = misc_register(&xen_privcmdbuf_dev);
979 if (err != 0) {
980 pr_err("Could not register Xen hypercall-buf device\n");
981 misc_deregister(&privcmd_dev);
982 return err;
983 }
984
985 return 0;
986}
987
988static void __exit privcmd_exit(void)
989{
990 misc_deregister(&privcmd_dev);
991 misc_deregister(&xen_privcmdbuf_dev);
992}
993
994module_init(privcmd_init);
995module_exit(privcmd_exit);
1/******************************************************************************
2 * privcmd.c
3 *
4 * Interface to privileged domain-0 commands.
5 *
6 * Copyright (c) 2002-2004, K A Fraser, B Dragovic
7 */
8
9#define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
10
11#include <linux/kernel.h>
12#include <linux/module.h>
13#include <linux/sched.h>
14#include <linux/slab.h>
15#include <linux/string.h>
16#include <linux/errno.h>
17#include <linux/mm.h>
18#include <linux/mman.h>
19#include <linux/uaccess.h>
20#include <linux/swap.h>
21#include <linux/highmem.h>
22#include <linux/pagemap.h>
23#include <linux/seq_file.h>
24#include <linux/miscdevice.h>
25#include <linux/moduleparam.h>
26
27#include <asm/pgalloc.h>
28#include <asm/pgtable.h>
29#include <asm/tlb.h>
30#include <asm/xen/hypervisor.h>
31#include <asm/xen/hypercall.h>
32
33#include <xen/xen.h>
34#include <xen/privcmd.h>
35#include <xen/interface/xen.h>
36#include <xen/interface/hvm/dm_op.h>
37#include <xen/features.h>
38#include <xen/page.h>
39#include <xen/xen-ops.h>
40#include <xen/balloon.h>
41
42#include "privcmd.h"
43
44MODULE_LICENSE("GPL");
45
46#define PRIV_VMA_LOCKED ((void *)1)
47
48static unsigned int privcmd_dm_op_max_num = 16;
49module_param_named(dm_op_max_nr_bufs, privcmd_dm_op_max_num, uint, 0644);
50MODULE_PARM_DESC(dm_op_max_nr_bufs,
51 "Maximum number of buffers per dm_op hypercall");
52
53static unsigned int privcmd_dm_op_buf_max_size = 4096;
54module_param_named(dm_op_buf_max_size, privcmd_dm_op_buf_max_size, uint,
55 0644);
56MODULE_PARM_DESC(dm_op_buf_max_size,
57 "Maximum size of a dm_op hypercall buffer");
58
59struct privcmd_data {
60 domid_t domid;
61};
62
63static int privcmd_vma_range_is_mapped(
64 struct vm_area_struct *vma,
65 unsigned long addr,
66 unsigned long nr_pages);
67
68static long privcmd_ioctl_hypercall(struct file *file, void __user *udata)
69{
70 struct privcmd_data *data = file->private_data;
71 struct privcmd_hypercall hypercall;
72 long ret;
73
74 /* Disallow arbitrary hypercalls if restricted */
75 if (data->domid != DOMID_INVALID)
76 return -EPERM;
77
78 if (copy_from_user(&hypercall, udata, sizeof(hypercall)))
79 return -EFAULT;
80
81 xen_preemptible_hcall_begin();
82 ret = privcmd_call(hypercall.op,
83 hypercall.arg[0], hypercall.arg[1],
84 hypercall.arg[2], hypercall.arg[3],
85 hypercall.arg[4]);
86 xen_preemptible_hcall_end();
87
88 return ret;
89}
90
91static void free_page_list(struct list_head *pages)
92{
93 struct page *p, *n;
94
95 list_for_each_entry_safe(p, n, pages, lru)
96 __free_page(p);
97
98 INIT_LIST_HEAD(pages);
99}
100
101/*
102 * Given an array of items in userspace, return a list of pages
103 * containing the data. If copying fails, either because of memory
104 * allocation failure or a problem reading user memory, return an
105 * error code; its up to the caller to dispose of any partial list.
106 */
107static int gather_array(struct list_head *pagelist,
108 unsigned nelem, size_t size,
109 const void __user *data)
110{
111 unsigned pageidx;
112 void *pagedata;
113 int ret;
114
115 if (size > PAGE_SIZE)
116 return 0;
117
118 pageidx = PAGE_SIZE;
119 pagedata = NULL; /* quiet, gcc */
120 while (nelem--) {
121 if (pageidx > PAGE_SIZE-size) {
122 struct page *page = alloc_page(GFP_KERNEL);
123
124 ret = -ENOMEM;
125 if (page == NULL)
126 goto fail;
127
128 pagedata = page_address(page);
129
130 list_add_tail(&page->lru, pagelist);
131 pageidx = 0;
132 }
133
134 ret = -EFAULT;
135 if (copy_from_user(pagedata + pageidx, data, size))
136 goto fail;
137
138 data += size;
139 pageidx += size;
140 }
141
142 ret = 0;
143
144fail:
145 return ret;
146}
147
148/*
149 * Call function "fn" on each element of the array fragmented
150 * over a list of pages.
151 */
152static int traverse_pages(unsigned nelem, size_t size,
153 struct list_head *pos,
154 int (*fn)(void *data, void *state),
155 void *state)
156{
157 void *pagedata;
158 unsigned pageidx;
159 int ret = 0;
160
161 BUG_ON(size > PAGE_SIZE);
162
163 pageidx = PAGE_SIZE;
164 pagedata = NULL; /* hush, gcc */
165
166 while (nelem--) {
167 if (pageidx > PAGE_SIZE-size) {
168 struct page *page;
169 pos = pos->next;
170 page = list_entry(pos, struct page, lru);
171 pagedata = page_address(page);
172 pageidx = 0;
173 }
174
175 ret = (*fn)(pagedata + pageidx, state);
176 if (ret)
177 break;
178 pageidx += size;
179 }
180
181 return ret;
182}
183
184/*
185 * Similar to traverse_pages, but use each page as a "block" of
186 * data to be processed as one unit.
187 */
188static int traverse_pages_block(unsigned nelem, size_t size,
189 struct list_head *pos,
190 int (*fn)(void *data, int nr, void *state),
191 void *state)
192{
193 void *pagedata;
194 int ret = 0;
195
196 BUG_ON(size > PAGE_SIZE);
197
198 while (nelem) {
199 int nr = (PAGE_SIZE/size);
200 struct page *page;
201 if (nr > nelem)
202 nr = nelem;
203 pos = pos->next;
204 page = list_entry(pos, struct page, lru);
205 pagedata = page_address(page);
206 ret = (*fn)(pagedata, nr, state);
207 if (ret)
208 break;
209 nelem -= nr;
210 }
211
212 return ret;
213}
214
215struct mmap_gfn_state {
216 unsigned long va;
217 struct vm_area_struct *vma;
218 domid_t domain;
219};
220
221static int mmap_gfn_range(void *data, void *state)
222{
223 struct privcmd_mmap_entry *msg = data;
224 struct mmap_gfn_state *st = state;
225 struct vm_area_struct *vma = st->vma;
226 int rc;
227
228 /* Do not allow range to wrap the address space. */
229 if ((msg->npages > (LONG_MAX >> PAGE_SHIFT)) ||
230 ((unsigned long)(msg->npages << PAGE_SHIFT) >= -st->va))
231 return -EINVAL;
232
233 /* Range chunks must be contiguous in va space. */
234 if ((msg->va != st->va) ||
235 ((msg->va+(msg->npages<<PAGE_SHIFT)) > vma->vm_end))
236 return -EINVAL;
237
238 rc = xen_remap_domain_gfn_range(vma,
239 msg->va & PAGE_MASK,
240 msg->mfn, msg->npages,
241 vma->vm_page_prot,
242 st->domain, NULL);
243 if (rc < 0)
244 return rc;
245
246 st->va += msg->npages << PAGE_SHIFT;
247
248 return 0;
249}
250
251static long privcmd_ioctl_mmap(struct file *file, void __user *udata)
252{
253 struct privcmd_data *data = file->private_data;
254 struct privcmd_mmap mmapcmd;
255 struct mm_struct *mm = current->mm;
256 struct vm_area_struct *vma;
257 int rc;
258 LIST_HEAD(pagelist);
259 struct mmap_gfn_state state;
260
261 /* We only support privcmd_ioctl_mmap_batch for auto translated. */
262 if (xen_feature(XENFEAT_auto_translated_physmap))
263 return -ENOSYS;
264
265 if (copy_from_user(&mmapcmd, udata, sizeof(mmapcmd)))
266 return -EFAULT;
267
268 /* If restriction is in place, check the domid matches */
269 if (data->domid != DOMID_INVALID && data->domid != mmapcmd.dom)
270 return -EPERM;
271
272 rc = gather_array(&pagelist,
273 mmapcmd.num, sizeof(struct privcmd_mmap_entry),
274 mmapcmd.entry);
275
276 if (rc || list_empty(&pagelist))
277 goto out;
278
279 down_write(&mm->mmap_sem);
280
281 {
282 struct page *page = list_first_entry(&pagelist,
283 struct page, lru);
284 struct privcmd_mmap_entry *msg = page_address(page);
285
286 vma = find_vma(mm, msg->va);
287 rc = -EINVAL;
288
289 if (!vma || (msg->va != vma->vm_start) || vma->vm_private_data)
290 goto out_up;
291 vma->vm_private_data = PRIV_VMA_LOCKED;
292 }
293
294 state.va = vma->vm_start;
295 state.vma = vma;
296 state.domain = mmapcmd.dom;
297
298 rc = traverse_pages(mmapcmd.num, sizeof(struct privcmd_mmap_entry),
299 &pagelist,
300 mmap_gfn_range, &state);
301
302
303out_up:
304 up_write(&mm->mmap_sem);
305
306out:
307 free_page_list(&pagelist);
308
309 return rc;
310}
311
312struct mmap_batch_state {
313 domid_t domain;
314 unsigned long va;
315 struct vm_area_struct *vma;
316 int index;
317 /* A tristate:
318 * 0 for no errors
319 * 1 if at least one error has happened (and no
320 * -ENOENT errors have happened)
321 * -ENOENT if at least 1 -ENOENT has happened.
322 */
323 int global_error;
324 int version;
325
326 /* User-space gfn array to store errors in the second pass for V1. */
327 xen_pfn_t __user *user_gfn;
328 /* User-space int array to store errors in the second pass for V2. */
329 int __user *user_err;
330};
331
332/* auto translated dom0 note: if domU being created is PV, then gfn is
333 * mfn(addr on bus). If it's auto xlated, then gfn is pfn (input to HAP).
334 */
335static int mmap_batch_fn(void *data, int nr, void *state)
336{
337 xen_pfn_t *gfnp = data;
338 struct mmap_batch_state *st = state;
339 struct vm_area_struct *vma = st->vma;
340 struct page **pages = vma->vm_private_data;
341 struct page **cur_pages = NULL;
342 int ret;
343
344 if (xen_feature(XENFEAT_auto_translated_physmap))
345 cur_pages = &pages[st->index];
346
347 BUG_ON(nr < 0);
348 ret = xen_remap_domain_gfn_array(st->vma, st->va & PAGE_MASK, gfnp, nr,
349 (int *)gfnp, st->vma->vm_page_prot,
350 st->domain, cur_pages);
351
352 /* Adjust the global_error? */
353 if (ret != nr) {
354 if (ret == -ENOENT)
355 st->global_error = -ENOENT;
356 else {
357 /* Record that at least one error has happened. */
358 if (st->global_error == 0)
359 st->global_error = 1;
360 }
361 }
362 st->va += XEN_PAGE_SIZE * nr;
363 st->index += nr / XEN_PFN_PER_PAGE;
364
365 return 0;
366}
367
368static int mmap_return_error(int err, struct mmap_batch_state *st)
369{
370 int ret;
371
372 if (st->version == 1) {
373 if (err) {
374 xen_pfn_t gfn;
375
376 ret = get_user(gfn, st->user_gfn);
377 if (ret < 0)
378 return ret;
379 /*
380 * V1 encodes the error codes in the 32bit top
381 * nibble of the gfn (with its known
382 * limitations vis-a-vis 64 bit callers).
383 */
384 gfn |= (err == -ENOENT) ?
385 PRIVCMD_MMAPBATCH_PAGED_ERROR :
386 PRIVCMD_MMAPBATCH_MFN_ERROR;
387 return __put_user(gfn, st->user_gfn++);
388 } else
389 st->user_gfn++;
390 } else { /* st->version == 2 */
391 if (err)
392 return __put_user(err, st->user_err++);
393 else
394 st->user_err++;
395 }
396
397 return 0;
398}
399
400static int mmap_return_errors(void *data, int nr, void *state)
401{
402 struct mmap_batch_state *st = state;
403 int *errs = data;
404 int i;
405 int ret;
406
407 for (i = 0; i < nr; i++) {
408 ret = mmap_return_error(errs[i], st);
409 if (ret < 0)
410 return ret;
411 }
412 return 0;
413}
414
415/* Allocate pfns that are then mapped with gfns from foreign domid. Update
416 * the vma with the page info to use later.
417 * Returns: 0 if success, otherwise -errno
418 */
419static int alloc_empty_pages(struct vm_area_struct *vma, int numpgs)
420{
421 int rc;
422 struct page **pages;
423
424 pages = kcalloc(numpgs, sizeof(pages[0]), GFP_KERNEL);
425 if (pages == NULL)
426 return -ENOMEM;
427
428 rc = alloc_xenballooned_pages(numpgs, pages);
429 if (rc != 0) {
430 pr_warn("%s Could not alloc %d pfns rc:%d\n", __func__,
431 numpgs, rc);
432 kfree(pages);
433 return -ENOMEM;
434 }
435 BUG_ON(vma->vm_private_data != NULL);
436 vma->vm_private_data = pages;
437
438 return 0;
439}
440
441static const struct vm_operations_struct privcmd_vm_ops;
442
443static long privcmd_ioctl_mmap_batch(
444 struct file *file, void __user *udata, int version)
445{
446 struct privcmd_data *data = file->private_data;
447 int ret;
448 struct privcmd_mmapbatch_v2 m;
449 struct mm_struct *mm = current->mm;
450 struct vm_area_struct *vma;
451 unsigned long nr_pages;
452 LIST_HEAD(pagelist);
453 struct mmap_batch_state state;
454
455 switch (version) {
456 case 1:
457 if (copy_from_user(&m, udata, sizeof(struct privcmd_mmapbatch)))
458 return -EFAULT;
459 /* Returns per-frame error in m.arr. */
460 m.err = NULL;
461 if (!access_ok(VERIFY_WRITE, m.arr, m.num * sizeof(*m.arr)))
462 return -EFAULT;
463 break;
464 case 2:
465 if (copy_from_user(&m, udata, sizeof(struct privcmd_mmapbatch_v2)))
466 return -EFAULT;
467 /* Returns per-frame error code in m.err. */
468 if (!access_ok(VERIFY_WRITE, m.err, m.num * (sizeof(*m.err))))
469 return -EFAULT;
470 break;
471 default:
472 return -EINVAL;
473 }
474
475 /* If restriction is in place, check the domid matches */
476 if (data->domid != DOMID_INVALID && data->domid != m.dom)
477 return -EPERM;
478
479 nr_pages = DIV_ROUND_UP(m.num, XEN_PFN_PER_PAGE);
480 if ((m.num <= 0) || (nr_pages > (LONG_MAX >> PAGE_SHIFT)))
481 return -EINVAL;
482
483 ret = gather_array(&pagelist, m.num, sizeof(xen_pfn_t), m.arr);
484
485 if (ret)
486 goto out;
487 if (list_empty(&pagelist)) {
488 ret = -EINVAL;
489 goto out;
490 }
491
492 if (version == 2) {
493 /* Zero error array now to only copy back actual errors. */
494 if (clear_user(m.err, sizeof(int) * m.num)) {
495 ret = -EFAULT;
496 goto out;
497 }
498 }
499
500 down_write(&mm->mmap_sem);
501
502 vma = find_vma(mm, m.addr);
503 if (!vma ||
504 vma->vm_ops != &privcmd_vm_ops) {
505 ret = -EINVAL;
506 goto out_unlock;
507 }
508
509 /*
510 * Caller must either:
511 *
512 * Map the whole VMA range, which will also allocate all the
513 * pages required for the auto_translated_physmap case.
514 *
515 * Or
516 *
517 * Map unmapped holes left from a previous map attempt (e.g.,
518 * because those foreign frames were previously paged out).
519 */
520 if (vma->vm_private_data == NULL) {
521 if (m.addr != vma->vm_start ||
522 m.addr + (nr_pages << PAGE_SHIFT) != vma->vm_end) {
523 ret = -EINVAL;
524 goto out_unlock;
525 }
526 if (xen_feature(XENFEAT_auto_translated_physmap)) {
527 ret = alloc_empty_pages(vma, nr_pages);
528 if (ret < 0)
529 goto out_unlock;
530 } else
531 vma->vm_private_data = PRIV_VMA_LOCKED;
532 } else {
533 if (m.addr < vma->vm_start ||
534 m.addr + (nr_pages << PAGE_SHIFT) > vma->vm_end) {
535 ret = -EINVAL;
536 goto out_unlock;
537 }
538 if (privcmd_vma_range_is_mapped(vma, m.addr, nr_pages)) {
539 ret = -EINVAL;
540 goto out_unlock;
541 }
542 }
543
544 state.domain = m.dom;
545 state.vma = vma;
546 state.va = m.addr;
547 state.index = 0;
548 state.global_error = 0;
549 state.version = version;
550
551 BUILD_BUG_ON(((PAGE_SIZE / sizeof(xen_pfn_t)) % XEN_PFN_PER_PAGE) != 0);
552 /* mmap_batch_fn guarantees ret == 0 */
553 BUG_ON(traverse_pages_block(m.num, sizeof(xen_pfn_t),
554 &pagelist, mmap_batch_fn, &state));
555
556 up_write(&mm->mmap_sem);
557
558 if (state.global_error) {
559 /* Write back errors in second pass. */
560 state.user_gfn = (xen_pfn_t *)m.arr;
561 state.user_err = m.err;
562 ret = traverse_pages_block(m.num, sizeof(xen_pfn_t),
563 &pagelist, mmap_return_errors, &state);
564 } else
565 ret = 0;
566
567 /* If we have not had any EFAULT-like global errors then set the global
568 * error to -ENOENT if necessary. */
569 if ((ret == 0) && (state.global_error == -ENOENT))
570 ret = -ENOENT;
571
572out:
573 free_page_list(&pagelist);
574 return ret;
575
576out_unlock:
577 up_write(&mm->mmap_sem);
578 goto out;
579}
580
581static int lock_pages(
582 struct privcmd_dm_op_buf kbufs[], unsigned int num,
583 struct page *pages[], unsigned int nr_pages)
584{
585 unsigned int i;
586
587 for (i = 0; i < num; i++) {
588 unsigned int requested;
589 int pinned;
590
591 requested = DIV_ROUND_UP(
592 offset_in_page(kbufs[i].uptr) + kbufs[i].size,
593 PAGE_SIZE);
594 if (requested > nr_pages)
595 return -ENOSPC;
596
597 pinned = get_user_pages_fast(
598 (unsigned long) kbufs[i].uptr,
599 requested, FOLL_WRITE, pages);
600 if (pinned < 0)
601 return pinned;
602
603 nr_pages -= pinned;
604 pages += pinned;
605 }
606
607 return 0;
608}
609
610static void unlock_pages(struct page *pages[], unsigned int nr_pages)
611{
612 unsigned int i;
613
614 if (!pages)
615 return;
616
617 for (i = 0; i < nr_pages; i++) {
618 if (pages[i])
619 put_page(pages[i]);
620 }
621}
622
623static long privcmd_ioctl_dm_op(struct file *file, void __user *udata)
624{
625 struct privcmd_data *data = file->private_data;
626 struct privcmd_dm_op kdata;
627 struct privcmd_dm_op_buf *kbufs;
628 unsigned int nr_pages = 0;
629 struct page **pages = NULL;
630 struct xen_dm_op_buf *xbufs = NULL;
631 unsigned int i;
632 long rc;
633
634 if (copy_from_user(&kdata, udata, sizeof(kdata)))
635 return -EFAULT;
636
637 /* If restriction is in place, check the domid matches */
638 if (data->domid != DOMID_INVALID && data->domid != kdata.dom)
639 return -EPERM;
640
641 if (kdata.num == 0)
642 return 0;
643
644 if (kdata.num > privcmd_dm_op_max_num)
645 return -E2BIG;
646
647 kbufs = kcalloc(kdata.num, sizeof(*kbufs), GFP_KERNEL);
648 if (!kbufs)
649 return -ENOMEM;
650
651 if (copy_from_user(kbufs, kdata.ubufs,
652 sizeof(*kbufs) * kdata.num)) {
653 rc = -EFAULT;
654 goto out;
655 }
656
657 for (i = 0; i < kdata.num; i++) {
658 if (kbufs[i].size > privcmd_dm_op_buf_max_size) {
659 rc = -E2BIG;
660 goto out;
661 }
662
663 if (!access_ok(VERIFY_WRITE, kbufs[i].uptr,
664 kbufs[i].size)) {
665 rc = -EFAULT;
666 goto out;
667 }
668
669 nr_pages += DIV_ROUND_UP(
670 offset_in_page(kbufs[i].uptr) + kbufs[i].size,
671 PAGE_SIZE);
672 }
673
674 pages = kcalloc(nr_pages, sizeof(*pages), GFP_KERNEL);
675 if (!pages) {
676 rc = -ENOMEM;
677 goto out;
678 }
679
680 xbufs = kcalloc(kdata.num, sizeof(*xbufs), GFP_KERNEL);
681 if (!xbufs) {
682 rc = -ENOMEM;
683 goto out;
684 }
685
686 rc = lock_pages(kbufs, kdata.num, pages, nr_pages);
687 if (rc)
688 goto out;
689
690 for (i = 0; i < kdata.num; i++) {
691 set_xen_guest_handle(xbufs[i].h, kbufs[i].uptr);
692 xbufs[i].size = kbufs[i].size;
693 }
694
695 xen_preemptible_hcall_begin();
696 rc = HYPERVISOR_dm_op(kdata.dom, kdata.num, xbufs);
697 xen_preemptible_hcall_end();
698
699out:
700 unlock_pages(pages, nr_pages);
701 kfree(xbufs);
702 kfree(pages);
703 kfree(kbufs);
704
705 return rc;
706}
707
708static long privcmd_ioctl_restrict(struct file *file, void __user *udata)
709{
710 struct privcmd_data *data = file->private_data;
711 domid_t dom;
712
713 if (copy_from_user(&dom, udata, sizeof(dom)))
714 return -EFAULT;
715
716 /* Set restriction to the specified domain, or check it matches */
717 if (data->domid == DOMID_INVALID)
718 data->domid = dom;
719 else if (data->domid != dom)
720 return -EINVAL;
721
722 return 0;
723}
724
725static long privcmd_ioctl(struct file *file,
726 unsigned int cmd, unsigned long data)
727{
728 int ret = -ENOTTY;
729 void __user *udata = (void __user *) data;
730
731 switch (cmd) {
732 case IOCTL_PRIVCMD_HYPERCALL:
733 ret = privcmd_ioctl_hypercall(file, udata);
734 break;
735
736 case IOCTL_PRIVCMD_MMAP:
737 ret = privcmd_ioctl_mmap(file, udata);
738 break;
739
740 case IOCTL_PRIVCMD_MMAPBATCH:
741 ret = privcmd_ioctl_mmap_batch(file, udata, 1);
742 break;
743
744 case IOCTL_PRIVCMD_MMAPBATCH_V2:
745 ret = privcmd_ioctl_mmap_batch(file, udata, 2);
746 break;
747
748 case IOCTL_PRIVCMD_DM_OP:
749 ret = privcmd_ioctl_dm_op(file, udata);
750 break;
751
752 case IOCTL_PRIVCMD_RESTRICT:
753 ret = privcmd_ioctl_restrict(file, udata);
754 break;
755
756 default:
757 break;
758 }
759
760 return ret;
761}
762
763static int privcmd_open(struct inode *ino, struct file *file)
764{
765 struct privcmd_data *data = kzalloc(sizeof(*data), GFP_KERNEL);
766
767 if (!data)
768 return -ENOMEM;
769
770 /* DOMID_INVALID implies no restriction */
771 data->domid = DOMID_INVALID;
772
773 file->private_data = data;
774 return 0;
775}
776
777static int privcmd_release(struct inode *ino, struct file *file)
778{
779 struct privcmd_data *data = file->private_data;
780
781 kfree(data);
782 return 0;
783}
784
785static void privcmd_close(struct vm_area_struct *vma)
786{
787 struct page **pages = vma->vm_private_data;
788 int numpgs = vma_pages(vma);
789 int numgfns = (vma->vm_end - vma->vm_start) >> XEN_PAGE_SHIFT;
790 int rc;
791
792 if (!xen_feature(XENFEAT_auto_translated_physmap) || !numpgs || !pages)
793 return;
794
795 rc = xen_unmap_domain_gfn_range(vma, numgfns, pages);
796 if (rc == 0)
797 free_xenballooned_pages(numpgs, pages);
798 else
799 pr_crit("unable to unmap MFN range: leaking %d pages. rc=%d\n",
800 numpgs, rc);
801 kfree(pages);
802}
803
804static int privcmd_fault(struct vm_fault *vmf)
805{
806 printk(KERN_DEBUG "privcmd_fault: vma=%p %lx-%lx, pgoff=%lx, uv=%p\n",
807 vmf->vma, vmf->vma->vm_start, vmf->vma->vm_end,
808 vmf->pgoff, (void *)vmf->address);
809
810 return VM_FAULT_SIGBUS;
811}
812
813static const struct vm_operations_struct privcmd_vm_ops = {
814 .close = privcmd_close,
815 .fault = privcmd_fault
816};
817
818static int privcmd_mmap(struct file *file, struct vm_area_struct *vma)
819{
820 /* DONTCOPY is essential for Xen because copy_page_range doesn't know
821 * how to recreate these mappings */
822 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTCOPY |
823 VM_DONTEXPAND | VM_DONTDUMP;
824 vma->vm_ops = &privcmd_vm_ops;
825 vma->vm_private_data = NULL;
826
827 return 0;
828}
829
830/*
831 * For MMAPBATCH*. This allows asserting the singleshot mapping
832 * on a per pfn/pte basis. Mapping calls that fail with ENOENT
833 * can be then retried until success.
834 */
835static int is_mapped_fn(pte_t *pte, struct page *pmd_page,
836 unsigned long addr, void *data)
837{
838 return pte_none(*pte) ? 0 : -EBUSY;
839}
840
841static int privcmd_vma_range_is_mapped(
842 struct vm_area_struct *vma,
843 unsigned long addr,
844 unsigned long nr_pages)
845{
846 return apply_to_page_range(vma->vm_mm, addr, nr_pages << PAGE_SHIFT,
847 is_mapped_fn, NULL) != 0;
848}
849
850const struct file_operations xen_privcmd_fops = {
851 .owner = THIS_MODULE,
852 .unlocked_ioctl = privcmd_ioctl,
853 .open = privcmd_open,
854 .release = privcmd_release,
855 .mmap = privcmd_mmap,
856};
857EXPORT_SYMBOL_GPL(xen_privcmd_fops);
858
859static struct miscdevice privcmd_dev = {
860 .minor = MISC_DYNAMIC_MINOR,
861 .name = "xen/privcmd",
862 .fops = &xen_privcmd_fops,
863};
864
865static int __init privcmd_init(void)
866{
867 int err;
868
869 if (!xen_domain())
870 return -ENODEV;
871
872 err = misc_register(&privcmd_dev);
873 if (err != 0) {
874 pr_err("Could not register Xen privcmd device\n");
875 return err;
876 }
877 return 0;
878}
879
880static void __exit privcmd_exit(void)
881{
882 misc_deregister(&privcmd_dev);
883}
884
885module_init(privcmd_init);
886module_exit(privcmd_exit);