Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/******************************************************************************
3 * privcmd.c
4 *
5 * Interface to privileged domain-0 commands.
6 *
7 * Copyright (c) 2002-2004, K A Fraser, B Dragovic
8 */
9
10#define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
11
12#include <linux/eventfd.h>
13#include <linux/file.h>
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/mutex.h>
17#include <linux/poll.h>
18#include <linux/sched.h>
19#include <linux/slab.h>
20#include <linux/string.h>
21#include <linux/workqueue.h>
22#include <linux/errno.h>
23#include <linux/mm.h>
24#include <linux/mman.h>
25#include <linux/uaccess.h>
26#include <linux/swap.h>
27#include <linux/highmem.h>
28#include <linux/pagemap.h>
29#include <linux/seq_file.h>
30#include <linux/miscdevice.h>
31#include <linux/moduleparam.h>
32#include <linux/virtio_mmio.h>
33
34#include <asm/xen/hypervisor.h>
35#include <asm/xen/hypercall.h>
36
37#include <xen/xen.h>
38#include <xen/events.h>
39#include <xen/privcmd.h>
40#include <xen/interface/xen.h>
41#include <xen/interface/memory.h>
42#include <xen/interface/hvm/dm_op.h>
43#include <xen/interface/hvm/ioreq.h>
44#include <xen/features.h>
45#include <xen/page.h>
46#include <xen/xen-ops.h>
47#include <xen/balloon.h>
48
49#include "privcmd.h"
50
51MODULE_LICENSE("GPL");
52
53#define PRIV_VMA_LOCKED ((void *)1)
54
55static unsigned int privcmd_dm_op_max_num = 16;
56module_param_named(dm_op_max_nr_bufs, privcmd_dm_op_max_num, uint, 0644);
57MODULE_PARM_DESC(dm_op_max_nr_bufs,
58 "Maximum number of buffers per dm_op hypercall");
59
60static unsigned int privcmd_dm_op_buf_max_size = 4096;
61module_param_named(dm_op_buf_max_size, privcmd_dm_op_buf_max_size, uint,
62 0644);
63MODULE_PARM_DESC(dm_op_buf_max_size,
64 "Maximum size of a dm_op hypercall buffer");
65
66struct privcmd_data {
67 domid_t domid;
68};
69
70static int privcmd_vma_range_is_mapped(
71 struct vm_area_struct *vma,
72 unsigned long addr,
73 unsigned long nr_pages);
74
75static long privcmd_ioctl_hypercall(struct file *file, void __user *udata)
76{
77 struct privcmd_data *data = file->private_data;
78 struct privcmd_hypercall hypercall;
79 long ret;
80
81 /* Disallow arbitrary hypercalls if restricted */
82 if (data->domid != DOMID_INVALID)
83 return -EPERM;
84
85 if (copy_from_user(&hypercall, udata, sizeof(hypercall)))
86 return -EFAULT;
87
88 xen_preemptible_hcall_begin();
89 ret = privcmd_call(hypercall.op,
90 hypercall.arg[0], hypercall.arg[1],
91 hypercall.arg[2], hypercall.arg[3],
92 hypercall.arg[4]);
93 xen_preemptible_hcall_end();
94
95 return ret;
96}
97
98static void free_page_list(struct list_head *pages)
99{
100 struct page *p, *n;
101
102 list_for_each_entry_safe(p, n, pages, lru)
103 __free_page(p);
104
105 INIT_LIST_HEAD(pages);
106}
107
108/*
109 * Given an array of items in userspace, return a list of pages
110 * containing the data. If copying fails, either because of memory
111 * allocation failure or a problem reading user memory, return an
112 * error code; its up to the caller to dispose of any partial list.
113 */
114static int gather_array(struct list_head *pagelist,
115 unsigned nelem, size_t size,
116 const void __user *data)
117{
118 unsigned pageidx;
119 void *pagedata;
120 int ret;
121
122 if (size > PAGE_SIZE)
123 return 0;
124
125 pageidx = PAGE_SIZE;
126 pagedata = NULL; /* quiet, gcc */
127 while (nelem--) {
128 if (pageidx > PAGE_SIZE-size) {
129 struct page *page = alloc_page(GFP_KERNEL);
130
131 ret = -ENOMEM;
132 if (page == NULL)
133 goto fail;
134
135 pagedata = page_address(page);
136
137 list_add_tail(&page->lru, pagelist);
138 pageidx = 0;
139 }
140
141 ret = -EFAULT;
142 if (copy_from_user(pagedata + pageidx, data, size))
143 goto fail;
144
145 data += size;
146 pageidx += size;
147 }
148
149 ret = 0;
150
151fail:
152 return ret;
153}
154
155/*
156 * Call function "fn" on each element of the array fragmented
157 * over a list of pages.
158 */
159static int traverse_pages(unsigned nelem, size_t size,
160 struct list_head *pos,
161 int (*fn)(void *data, void *state),
162 void *state)
163{
164 void *pagedata;
165 unsigned pageidx;
166 int ret = 0;
167
168 BUG_ON(size > PAGE_SIZE);
169
170 pageidx = PAGE_SIZE;
171 pagedata = NULL; /* hush, gcc */
172
173 while (nelem--) {
174 if (pageidx > PAGE_SIZE-size) {
175 struct page *page;
176 pos = pos->next;
177 page = list_entry(pos, struct page, lru);
178 pagedata = page_address(page);
179 pageidx = 0;
180 }
181
182 ret = (*fn)(pagedata + pageidx, state);
183 if (ret)
184 break;
185 pageidx += size;
186 }
187
188 return ret;
189}
190
191/*
192 * Similar to traverse_pages, but use each page as a "block" of
193 * data to be processed as one unit.
194 */
195static int traverse_pages_block(unsigned nelem, size_t size,
196 struct list_head *pos,
197 int (*fn)(void *data, int nr, void *state),
198 void *state)
199{
200 void *pagedata;
201 int ret = 0;
202
203 BUG_ON(size > PAGE_SIZE);
204
205 while (nelem) {
206 int nr = (PAGE_SIZE/size);
207 struct page *page;
208 if (nr > nelem)
209 nr = nelem;
210 pos = pos->next;
211 page = list_entry(pos, struct page, lru);
212 pagedata = page_address(page);
213 ret = (*fn)(pagedata, nr, state);
214 if (ret)
215 break;
216 nelem -= nr;
217 }
218
219 return ret;
220}
221
222struct mmap_gfn_state {
223 unsigned long va;
224 struct vm_area_struct *vma;
225 domid_t domain;
226};
227
228static int mmap_gfn_range(void *data, void *state)
229{
230 struct privcmd_mmap_entry *msg = data;
231 struct mmap_gfn_state *st = state;
232 struct vm_area_struct *vma = st->vma;
233 int rc;
234
235 /* Do not allow range to wrap the address space. */
236 if ((msg->npages > (LONG_MAX >> PAGE_SHIFT)) ||
237 ((unsigned long)(msg->npages << PAGE_SHIFT) >= -st->va))
238 return -EINVAL;
239
240 /* Range chunks must be contiguous in va space. */
241 if ((msg->va != st->va) ||
242 ((msg->va+(msg->npages<<PAGE_SHIFT)) > vma->vm_end))
243 return -EINVAL;
244
245 rc = xen_remap_domain_gfn_range(vma,
246 msg->va & PAGE_MASK,
247 msg->mfn, msg->npages,
248 vma->vm_page_prot,
249 st->domain, NULL);
250 if (rc < 0)
251 return rc;
252
253 st->va += msg->npages << PAGE_SHIFT;
254
255 return 0;
256}
257
258static long privcmd_ioctl_mmap(struct file *file, void __user *udata)
259{
260 struct privcmd_data *data = file->private_data;
261 struct privcmd_mmap mmapcmd;
262 struct mm_struct *mm = current->mm;
263 struct vm_area_struct *vma;
264 int rc;
265 LIST_HEAD(pagelist);
266 struct mmap_gfn_state state;
267
268 /* We only support privcmd_ioctl_mmap_batch for non-auto-translated. */
269 if (xen_feature(XENFEAT_auto_translated_physmap))
270 return -ENOSYS;
271
272 if (copy_from_user(&mmapcmd, udata, sizeof(mmapcmd)))
273 return -EFAULT;
274
275 /* If restriction is in place, check the domid matches */
276 if (data->domid != DOMID_INVALID && data->domid != mmapcmd.dom)
277 return -EPERM;
278
279 rc = gather_array(&pagelist,
280 mmapcmd.num, sizeof(struct privcmd_mmap_entry),
281 mmapcmd.entry);
282
283 if (rc || list_empty(&pagelist))
284 goto out;
285
286 mmap_write_lock(mm);
287
288 {
289 struct page *page = list_first_entry(&pagelist,
290 struct page, lru);
291 struct privcmd_mmap_entry *msg = page_address(page);
292
293 vma = vma_lookup(mm, msg->va);
294 rc = -EINVAL;
295
296 if (!vma || (msg->va != vma->vm_start) || vma->vm_private_data)
297 goto out_up;
298 vma->vm_private_data = PRIV_VMA_LOCKED;
299 }
300
301 state.va = vma->vm_start;
302 state.vma = vma;
303 state.domain = mmapcmd.dom;
304
305 rc = traverse_pages(mmapcmd.num, sizeof(struct privcmd_mmap_entry),
306 &pagelist,
307 mmap_gfn_range, &state);
308
309
310out_up:
311 mmap_write_unlock(mm);
312
313out:
314 free_page_list(&pagelist);
315
316 return rc;
317}
318
319struct mmap_batch_state {
320 domid_t domain;
321 unsigned long va;
322 struct vm_area_struct *vma;
323 int index;
324 /* A tristate:
325 * 0 for no errors
326 * 1 if at least one error has happened (and no
327 * -ENOENT errors have happened)
328 * -ENOENT if at least 1 -ENOENT has happened.
329 */
330 int global_error;
331 int version;
332
333 /* User-space gfn array to store errors in the second pass for V1. */
334 xen_pfn_t __user *user_gfn;
335 /* User-space int array to store errors in the second pass for V2. */
336 int __user *user_err;
337};
338
339/* auto translated dom0 note: if domU being created is PV, then gfn is
340 * mfn(addr on bus). If it's auto xlated, then gfn is pfn (input to HAP).
341 */
342static int mmap_batch_fn(void *data, int nr, void *state)
343{
344 xen_pfn_t *gfnp = data;
345 struct mmap_batch_state *st = state;
346 struct vm_area_struct *vma = st->vma;
347 struct page **pages = vma->vm_private_data;
348 struct page **cur_pages = NULL;
349 int ret;
350
351 if (xen_feature(XENFEAT_auto_translated_physmap))
352 cur_pages = &pages[st->index];
353
354 BUG_ON(nr < 0);
355 ret = xen_remap_domain_gfn_array(st->vma, st->va & PAGE_MASK, gfnp, nr,
356 (int *)gfnp, st->vma->vm_page_prot,
357 st->domain, cur_pages);
358
359 /* Adjust the global_error? */
360 if (ret != nr) {
361 if (ret == -ENOENT)
362 st->global_error = -ENOENT;
363 else {
364 /* Record that at least one error has happened. */
365 if (st->global_error == 0)
366 st->global_error = 1;
367 }
368 }
369 st->va += XEN_PAGE_SIZE * nr;
370 st->index += nr / XEN_PFN_PER_PAGE;
371
372 return 0;
373}
374
375static int mmap_return_error(int err, struct mmap_batch_state *st)
376{
377 int ret;
378
379 if (st->version == 1) {
380 if (err) {
381 xen_pfn_t gfn;
382
383 ret = get_user(gfn, st->user_gfn);
384 if (ret < 0)
385 return ret;
386 /*
387 * V1 encodes the error codes in the 32bit top
388 * nibble of the gfn (with its known
389 * limitations vis-a-vis 64 bit callers).
390 */
391 gfn |= (err == -ENOENT) ?
392 PRIVCMD_MMAPBATCH_PAGED_ERROR :
393 PRIVCMD_MMAPBATCH_MFN_ERROR;
394 return __put_user(gfn, st->user_gfn++);
395 } else
396 st->user_gfn++;
397 } else { /* st->version == 2 */
398 if (err)
399 return __put_user(err, st->user_err++);
400 else
401 st->user_err++;
402 }
403
404 return 0;
405}
406
407static int mmap_return_errors(void *data, int nr, void *state)
408{
409 struct mmap_batch_state *st = state;
410 int *errs = data;
411 int i;
412 int ret;
413
414 for (i = 0; i < nr; i++) {
415 ret = mmap_return_error(errs[i], st);
416 if (ret < 0)
417 return ret;
418 }
419 return 0;
420}
421
422/* Allocate pfns that are then mapped with gfns from foreign domid. Update
423 * the vma with the page info to use later.
424 * Returns: 0 if success, otherwise -errno
425 */
426static int alloc_empty_pages(struct vm_area_struct *vma, int numpgs)
427{
428 int rc;
429 struct page **pages;
430
431 pages = kvcalloc(numpgs, sizeof(pages[0]), GFP_KERNEL);
432 if (pages == NULL)
433 return -ENOMEM;
434
435 rc = xen_alloc_unpopulated_pages(numpgs, pages);
436 if (rc != 0) {
437 pr_warn("%s Could not alloc %d pfns rc:%d\n", __func__,
438 numpgs, rc);
439 kvfree(pages);
440 return -ENOMEM;
441 }
442 BUG_ON(vma->vm_private_data != NULL);
443 vma->vm_private_data = pages;
444
445 return 0;
446}
447
448static const struct vm_operations_struct privcmd_vm_ops;
449
450static long privcmd_ioctl_mmap_batch(
451 struct file *file, void __user *udata, int version)
452{
453 struct privcmd_data *data = file->private_data;
454 int ret;
455 struct privcmd_mmapbatch_v2 m;
456 struct mm_struct *mm = current->mm;
457 struct vm_area_struct *vma;
458 unsigned long nr_pages;
459 LIST_HEAD(pagelist);
460 struct mmap_batch_state state;
461
462 switch (version) {
463 case 1:
464 if (copy_from_user(&m, udata, sizeof(struct privcmd_mmapbatch)))
465 return -EFAULT;
466 /* Returns per-frame error in m.arr. */
467 m.err = NULL;
468 if (!access_ok(m.arr, m.num * sizeof(*m.arr)))
469 return -EFAULT;
470 break;
471 case 2:
472 if (copy_from_user(&m, udata, sizeof(struct privcmd_mmapbatch_v2)))
473 return -EFAULT;
474 /* Returns per-frame error code in m.err. */
475 if (!access_ok(m.err, m.num * (sizeof(*m.err))))
476 return -EFAULT;
477 break;
478 default:
479 return -EINVAL;
480 }
481
482 /* If restriction is in place, check the domid matches */
483 if (data->domid != DOMID_INVALID && data->domid != m.dom)
484 return -EPERM;
485
486 nr_pages = DIV_ROUND_UP(m.num, XEN_PFN_PER_PAGE);
487 if ((m.num <= 0) || (nr_pages > (LONG_MAX >> PAGE_SHIFT)))
488 return -EINVAL;
489
490 ret = gather_array(&pagelist, m.num, sizeof(xen_pfn_t), m.arr);
491
492 if (ret)
493 goto out;
494 if (list_empty(&pagelist)) {
495 ret = -EINVAL;
496 goto out;
497 }
498
499 if (version == 2) {
500 /* Zero error array now to only copy back actual errors. */
501 if (clear_user(m.err, sizeof(int) * m.num)) {
502 ret = -EFAULT;
503 goto out;
504 }
505 }
506
507 mmap_write_lock(mm);
508
509 vma = find_vma(mm, m.addr);
510 if (!vma ||
511 vma->vm_ops != &privcmd_vm_ops) {
512 ret = -EINVAL;
513 goto out_unlock;
514 }
515
516 /*
517 * Caller must either:
518 *
519 * Map the whole VMA range, which will also allocate all the
520 * pages required for the auto_translated_physmap case.
521 *
522 * Or
523 *
524 * Map unmapped holes left from a previous map attempt (e.g.,
525 * because those foreign frames were previously paged out).
526 */
527 if (vma->vm_private_data == NULL) {
528 if (m.addr != vma->vm_start ||
529 m.addr + (nr_pages << PAGE_SHIFT) != vma->vm_end) {
530 ret = -EINVAL;
531 goto out_unlock;
532 }
533 if (xen_feature(XENFEAT_auto_translated_physmap)) {
534 ret = alloc_empty_pages(vma, nr_pages);
535 if (ret < 0)
536 goto out_unlock;
537 } else
538 vma->vm_private_data = PRIV_VMA_LOCKED;
539 } else {
540 if (m.addr < vma->vm_start ||
541 m.addr + (nr_pages << PAGE_SHIFT) > vma->vm_end) {
542 ret = -EINVAL;
543 goto out_unlock;
544 }
545 if (privcmd_vma_range_is_mapped(vma, m.addr, nr_pages)) {
546 ret = -EINVAL;
547 goto out_unlock;
548 }
549 }
550
551 state.domain = m.dom;
552 state.vma = vma;
553 state.va = m.addr;
554 state.index = 0;
555 state.global_error = 0;
556 state.version = version;
557
558 BUILD_BUG_ON(((PAGE_SIZE / sizeof(xen_pfn_t)) % XEN_PFN_PER_PAGE) != 0);
559 /* mmap_batch_fn guarantees ret == 0 */
560 BUG_ON(traverse_pages_block(m.num, sizeof(xen_pfn_t),
561 &pagelist, mmap_batch_fn, &state));
562
563 mmap_write_unlock(mm);
564
565 if (state.global_error) {
566 /* Write back errors in second pass. */
567 state.user_gfn = (xen_pfn_t *)m.arr;
568 state.user_err = m.err;
569 ret = traverse_pages_block(m.num, sizeof(xen_pfn_t),
570 &pagelist, mmap_return_errors, &state);
571 } else
572 ret = 0;
573
574 /* If we have not had any EFAULT-like global errors then set the global
575 * error to -ENOENT if necessary. */
576 if ((ret == 0) && (state.global_error == -ENOENT))
577 ret = -ENOENT;
578
579out:
580 free_page_list(&pagelist);
581 return ret;
582
583out_unlock:
584 mmap_write_unlock(mm);
585 goto out;
586}
587
588static int lock_pages(
589 struct privcmd_dm_op_buf kbufs[], unsigned int num,
590 struct page *pages[], unsigned int nr_pages, unsigned int *pinned)
591{
592 unsigned int i, off = 0;
593
594 for (i = 0; i < num; ) {
595 unsigned int requested;
596 int page_count;
597
598 requested = DIV_ROUND_UP(
599 offset_in_page(kbufs[i].uptr) + kbufs[i].size,
600 PAGE_SIZE) - off;
601 if (requested > nr_pages)
602 return -ENOSPC;
603
604 page_count = pin_user_pages_fast(
605 (unsigned long)kbufs[i].uptr + off * PAGE_SIZE,
606 requested, FOLL_WRITE, pages);
607 if (page_count <= 0)
608 return page_count ? : -EFAULT;
609
610 *pinned += page_count;
611 nr_pages -= page_count;
612 pages += page_count;
613
614 off = (requested == page_count) ? 0 : off + page_count;
615 i += !off;
616 }
617
618 return 0;
619}
620
621static void unlock_pages(struct page *pages[], unsigned int nr_pages)
622{
623 unpin_user_pages_dirty_lock(pages, nr_pages, true);
624}
625
626static long privcmd_ioctl_dm_op(struct file *file, void __user *udata)
627{
628 struct privcmd_data *data = file->private_data;
629 struct privcmd_dm_op kdata;
630 struct privcmd_dm_op_buf *kbufs;
631 unsigned int nr_pages = 0;
632 struct page **pages = NULL;
633 struct xen_dm_op_buf *xbufs = NULL;
634 unsigned int i;
635 long rc;
636 unsigned int pinned = 0;
637
638 if (copy_from_user(&kdata, udata, sizeof(kdata)))
639 return -EFAULT;
640
641 /* If restriction is in place, check the domid matches */
642 if (data->domid != DOMID_INVALID && data->domid != kdata.dom)
643 return -EPERM;
644
645 if (kdata.num == 0)
646 return 0;
647
648 if (kdata.num > privcmd_dm_op_max_num)
649 return -E2BIG;
650
651 kbufs = kcalloc(kdata.num, sizeof(*kbufs), GFP_KERNEL);
652 if (!kbufs)
653 return -ENOMEM;
654
655 if (copy_from_user(kbufs, kdata.ubufs,
656 sizeof(*kbufs) * kdata.num)) {
657 rc = -EFAULT;
658 goto out;
659 }
660
661 for (i = 0; i < kdata.num; i++) {
662 if (kbufs[i].size > privcmd_dm_op_buf_max_size) {
663 rc = -E2BIG;
664 goto out;
665 }
666
667 if (!access_ok(kbufs[i].uptr,
668 kbufs[i].size)) {
669 rc = -EFAULT;
670 goto out;
671 }
672
673 nr_pages += DIV_ROUND_UP(
674 offset_in_page(kbufs[i].uptr) + kbufs[i].size,
675 PAGE_SIZE);
676 }
677
678 pages = kcalloc(nr_pages, sizeof(*pages), GFP_KERNEL);
679 if (!pages) {
680 rc = -ENOMEM;
681 goto out;
682 }
683
684 xbufs = kcalloc(kdata.num, sizeof(*xbufs), GFP_KERNEL);
685 if (!xbufs) {
686 rc = -ENOMEM;
687 goto out;
688 }
689
690 rc = lock_pages(kbufs, kdata.num, pages, nr_pages, &pinned);
691 if (rc < 0)
692 goto out;
693
694 for (i = 0; i < kdata.num; i++) {
695 set_xen_guest_handle(xbufs[i].h, kbufs[i].uptr);
696 xbufs[i].size = kbufs[i].size;
697 }
698
699 xen_preemptible_hcall_begin();
700 rc = HYPERVISOR_dm_op(kdata.dom, kdata.num, xbufs);
701 xen_preemptible_hcall_end();
702
703out:
704 unlock_pages(pages, pinned);
705 kfree(xbufs);
706 kfree(pages);
707 kfree(kbufs);
708
709 return rc;
710}
711
712static long privcmd_ioctl_restrict(struct file *file, void __user *udata)
713{
714 struct privcmd_data *data = file->private_data;
715 domid_t dom;
716
717 if (copy_from_user(&dom, udata, sizeof(dom)))
718 return -EFAULT;
719
720 /* Set restriction to the specified domain, or check it matches */
721 if (data->domid == DOMID_INVALID)
722 data->domid = dom;
723 else if (data->domid != dom)
724 return -EINVAL;
725
726 return 0;
727}
728
729static long privcmd_ioctl_mmap_resource(struct file *file,
730 struct privcmd_mmap_resource __user *udata)
731{
732 struct privcmd_data *data = file->private_data;
733 struct mm_struct *mm = current->mm;
734 struct vm_area_struct *vma;
735 struct privcmd_mmap_resource kdata;
736 xen_pfn_t *pfns = NULL;
737 struct xen_mem_acquire_resource xdata = { };
738 int rc;
739
740 if (copy_from_user(&kdata, udata, sizeof(kdata)))
741 return -EFAULT;
742
743 /* If restriction is in place, check the domid matches */
744 if (data->domid != DOMID_INVALID && data->domid != kdata.dom)
745 return -EPERM;
746
747 /* Both fields must be set or unset */
748 if (!!kdata.addr != !!kdata.num)
749 return -EINVAL;
750
751 xdata.domid = kdata.dom;
752 xdata.type = kdata.type;
753 xdata.id = kdata.id;
754
755 if (!kdata.addr && !kdata.num) {
756 /* Query the size of the resource. */
757 rc = HYPERVISOR_memory_op(XENMEM_acquire_resource, &xdata);
758 if (rc)
759 return rc;
760 return __put_user(xdata.nr_frames, &udata->num);
761 }
762
763 mmap_write_lock(mm);
764
765 vma = find_vma(mm, kdata.addr);
766 if (!vma || vma->vm_ops != &privcmd_vm_ops) {
767 rc = -EINVAL;
768 goto out;
769 }
770
771 pfns = kcalloc(kdata.num, sizeof(*pfns), GFP_KERNEL | __GFP_NOWARN);
772 if (!pfns) {
773 rc = -ENOMEM;
774 goto out;
775 }
776
777 if (IS_ENABLED(CONFIG_XEN_AUTO_XLATE) &&
778 xen_feature(XENFEAT_auto_translated_physmap)) {
779 unsigned int nr = DIV_ROUND_UP(kdata.num, XEN_PFN_PER_PAGE);
780 struct page **pages;
781 unsigned int i;
782
783 rc = alloc_empty_pages(vma, nr);
784 if (rc < 0)
785 goto out;
786
787 pages = vma->vm_private_data;
788
789 for (i = 0; i < kdata.num; i++) {
790 xen_pfn_t pfn =
791 page_to_xen_pfn(pages[i / XEN_PFN_PER_PAGE]);
792
793 pfns[i] = pfn + (i % XEN_PFN_PER_PAGE);
794 }
795 } else
796 vma->vm_private_data = PRIV_VMA_LOCKED;
797
798 xdata.frame = kdata.idx;
799 xdata.nr_frames = kdata.num;
800 set_xen_guest_handle(xdata.frame_list, pfns);
801
802 xen_preemptible_hcall_begin();
803 rc = HYPERVISOR_memory_op(XENMEM_acquire_resource, &xdata);
804 xen_preemptible_hcall_end();
805
806 if (rc)
807 goto out;
808
809 if (IS_ENABLED(CONFIG_XEN_AUTO_XLATE) &&
810 xen_feature(XENFEAT_auto_translated_physmap)) {
811 rc = xen_remap_vma_range(vma, kdata.addr, kdata.num << PAGE_SHIFT);
812 } else {
813 unsigned int domid =
814 (xdata.flags & XENMEM_rsrc_acq_caller_owned) ?
815 DOMID_SELF : kdata.dom;
816 int num, *errs = (int *)pfns;
817
818 BUILD_BUG_ON(sizeof(*errs) > sizeof(*pfns));
819 num = xen_remap_domain_mfn_array(vma,
820 kdata.addr & PAGE_MASK,
821 pfns, kdata.num, errs,
822 vma->vm_page_prot,
823 domid);
824 if (num < 0)
825 rc = num;
826 else if (num != kdata.num) {
827 unsigned int i;
828
829 for (i = 0; i < num; i++) {
830 rc = errs[i];
831 if (rc < 0)
832 break;
833 }
834 } else
835 rc = 0;
836 }
837
838out:
839 mmap_write_unlock(mm);
840 kfree(pfns);
841
842 return rc;
843}
844
845#ifdef CONFIG_XEN_PRIVCMD_EVENTFD
846/* Irqfd support */
847static struct workqueue_struct *irqfd_cleanup_wq;
848static DEFINE_MUTEX(irqfds_lock);
849static LIST_HEAD(irqfds_list);
850
851struct privcmd_kernel_irqfd {
852 struct xen_dm_op_buf xbufs;
853 domid_t dom;
854 bool error;
855 struct eventfd_ctx *eventfd;
856 struct work_struct shutdown;
857 wait_queue_entry_t wait;
858 struct list_head list;
859 poll_table pt;
860};
861
862static void irqfd_deactivate(struct privcmd_kernel_irqfd *kirqfd)
863{
864 lockdep_assert_held(&irqfds_lock);
865
866 list_del_init(&kirqfd->list);
867 queue_work(irqfd_cleanup_wq, &kirqfd->shutdown);
868}
869
870static void irqfd_shutdown(struct work_struct *work)
871{
872 struct privcmd_kernel_irqfd *kirqfd =
873 container_of(work, struct privcmd_kernel_irqfd, shutdown);
874 u64 cnt;
875
876 eventfd_ctx_remove_wait_queue(kirqfd->eventfd, &kirqfd->wait, &cnt);
877 eventfd_ctx_put(kirqfd->eventfd);
878 kfree(kirqfd);
879}
880
881static void irqfd_inject(struct privcmd_kernel_irqfd *kirqfd)
882{
883 u64 cnt;
884 long rc;
885
886 eventfd_ctx_do_read(kirqfd->eventfd, &cnt);
887
888 xen_preemptible_hcall_begin();
889 rc = HYPERVISOR_dm_op(kirqfd->dom, 1, &kirqfd->xbufs);
890 xen_preemptible_hcall_end();
891
892 /* Don't repeat the error message for consecutive failures */
893 if (rc && !kirqfd->error) {
894 pr_err("Failed to configure irq for guest domain: %d\n",
895 kirqfd->dom);
896 }
897
898 kirqfd->error = rc;
899}
900
901static int
902irqfd_wakeup(wait_queue_entry_t *wait, unsigned int mode, int sync, void *key)
903{
904 struct privcmd_kernel_irqfd *kirqfd =
905 container_of(wait, struct privcmd_kernel_irqfd, wait);
906 __poll_t flags = key_to_poll(key);
907
908 if (flags & EPOLLIN)
909 irqfd_inject(kirqfd);
910
911 if (flags & EPOLLHUP) {
912 mutex_lock(&irqfds_lock);
913 irqfd_deactivate(kirqfd);
914 mutex_unlock(&irqfds_lock);
915 }
916
917 return 0;
918}
919
920static void
921irqfd_poll_func(struct file *file, wait_queue_head_t *wqh, poll_table *pt)
922{
923 struct privcmd_kernel_irqfd *kirqfd =
924 container_of(pt, struct privcmd_kernel_irqfd, pt);
925
926 add_wait_queue_priority(wqh, &kirqfd->wait);
927}
928
929static int privcmd_irqfd_assign(struct privcmd_irqfd *irqfd)
930{
931 struct privcmd_kernel_irqfd *kirqfd, *tmp;
932 __poll_t events;
933 struct fd f;
934 void *dm_op;
935 int ret;
936
937 kirqfd = kzalloc(sizeof(*kirqfd) + irqfd->size, GFP_KERNEL);
938 if (!kirqfd)
939 return -ENOMEM;
940 dm_op = kirqfd + 1;
941
942 if (copy_from_user(dm_op, u64_to_user_ptr(irqfd->dm_op), irqfd->size)) {
943 ret = -EFAULT;
944 goto error_kfree;
945 }
946
947 kirqfd->xbufs.size = irqfd->size;
948 set_xen_guest_handle(kirqfd->xbufs.h, dm_op);
949 kirqfd->dom = irqfd->dom;
950 INIT_WORK(&kirqfd->shutdown, irqfd_shutdown);
951
952 f = fdget(irqfd->fd);
953 if (!f.file) {
954 ret = -EBADF;
955 goto error_kfree;
956 }
957
958 kirqfd->eventfd = eventfd_ctx_fileget(f.file);
959 if (IS_ERR(kirqfd->eventfd)) {
960 ret = PTR_ERR(kirqfd->eventfd);
961 goto error_fd_put;
962 }
963
964 /*
965 * Install our own custom wake-up handling so we are notified via a
966 * callback whenever someone signals the underlying eventfd.
967 */
968 init_waitqueue_func_entry(&kirqfd->wait, irqfd_wakeup);
969 init_poll_funcptr(&kirqfd->pt, irqfd_poll_func);
970
971 mutex_lock(&irqfds_lock);
972
973 list_for_each_entry(tmp, &irqfds_list, list) {
974 if (kirqfd->eventfd == tmp->eventfd) {
975 ret = -EBUSY;
976 mutex_unlock(&irqfds_lock);
977 goto error_eventfd;
978 }
979 }
980
981 list_add_tail(&kirqfd->list, &irqfds_list);
982 mutex_unlock(&irqfds_lock);
983
984 /*
985 * Check if there was an event already pending on the eventfd before we
986 * registered, and trigger it as if we didn't miss it.
987 */
988 events = vfs_poll(f.file, &kirqfd->pt);
989 if (events & EPOLLIN)
990 irqfd_inject(kirqfd);
991
992 /*
993 * Do not drop the file until the kirqfd is fully initialized, otherwise
994 * we might race against the EPOLLHUP.
995 */
996 fdput(f);
997 return 0;
998
999error_eventfd:
1000 eventfd_ctx_put(kirqfd->eventfd);
1001
1002error_fd_put:
1003 fdput(f);
1004
1005error_kfree:
1006 kfree(kirqfd);
1007 return ret;
1008}
1009
1010static int privcmd_irqfd_deassign(struct privcmd_irqfd *irqfd)
1011{
1012 struct privcmd_kernel_irqfd *kirqfd;
1013 struct eventfd_ctx *eventfd;
1014
1015 eventfd = eventfd_ctx_fdget(irqfd->fd);
1016 if (IS_ERR(eventfd))
1017 return PTR_ERR(eventfd);
1018
1019 mutex_lock(&irqfds_lock);
1020
1021 list_for_each_entry(kirqfd, &irqfds_list, list) {
1022 if (kirqfd->eventfd == eventfd) {
1023 irqfd_deactivate(kirqfd);
1024 break;
1025 }
1026 }
1027
1028 mutex_unlock(&irqfds_lock);
1029
1030 eventfd_ctx_put(eventfd);
1031
1032 /*
1033 * Block until we know all outstanding shutdown jobs have completed so
1034 * that we guarantee there will not be any more interrupts once this
1035 * deassign function returns.
1036 */
1037 flush_workqueue(irqfd_cleanup_wq);
1038
1039 return 0;
1040}
1041
1042static long privcmd_ioctl_irqfd(struct file *file, void __user *udata)
1043{
1044 struct privcmd_data *data = file->private_data;
1045 struct privcmd_irqfd irqfd;
1046
1047 if (copy_from_user(&irqfd, udata, sizeof(irqfd)))
1048 return -EFAULT;
1049
1050 /* No other flags should be set */
1051 if (irqfd.flags & ~PRIVCMD_IRQFD_FLAG_DEASSIGN)
1052 return -EINVAL;
1053
1054 /* If restriction is in place, check the domid matches */
1055 if (data->domid != DOMID_INVALID && data->domid != irqfd.dom)
1056 return -EPERM;
1057
1058 if (irqfd.flags & PRIVCMD_IRQFD_FLAG_DEASSIGN)
1059 return privcmd_irqfd_deassign(&irqfd);
1060
1061 return privcmd_irqfd_assign(&irqfd);
1062}
1063
1064static int privcmd_irqfd_init(void)
1065{
1066 irqfd_cleanup_wq = alloc_workqueue("privcmd-irqfd-cleanup", 0, 0);
1067 if (!irqfd_cleanup_wq)
1068 return -ENOMEM;
1069
1070 return 0;
1071}
1072
1073static void privcmd_irqfd_exit(void)
1074{
1075 struct privcmd_kernel_irqfd *kirqfd, *tmp;
1076
1077 mutex_lock(&irqfds_lock);
1078
1079 list_for_each_entry_safe(kirqfd, tmp, &irqfds_list, list)
1080 irqfd_deactivate(kirqfd);
1081
1082 mutex_unlock(&irqfds_lock);
1083
1084 destroy_workqueue(irqfd_cleanup_wq);
1085}
1086
1087/* Ioeventfd Support */
1088#define QUEUE_NOTIFY_VQ_MASK 0xFFFF
1089
1090static DEFINE_MUTEX(ioreq_lock);
1091static LIST_HEAD(ioreq_list);
1092
1093/* per-eventfd structure */
1094struct privcmd_kernel_ioeventfd {
1095 struct eventfd_ctx *eventfd;
1096 struct list_head list;
1097 u64 addr;
1098 unsigned int addr_len;
1099 unsigned int vq;
1100};
1101
1102/* per-guest CPU / port structure */
1103struct ioreq_port {
1104 int vcpu;
1105 unsigned int port;
1106 struct privcmd_kernel_ioreq *kioreq;
1107};
1108
1109/* per-guest structure */
1110struct privcmd_kernel_ioreq {
1111 domid_t dom;
1112 unsigned int vcpus;
1113 u64 uioreq;
1114 struct ioreq *ioreq;
1115 spinlock_t lock; /* Protects ioeventfds list */
1116 struct list_head ioeventfds;
1117 struct list_head list;
1118 struct ioreq_port ports[] __counted_by(vcpus);
1119};
1120
1121static irqreturn_t ioeventfd_interrupt(int irq, void *dev_id)
1122{
1123 struct ioreq_port *port = dev_id;
1124 struct privcmd_kernel_ioreq *kioreq = port->kioreq;
1125 struct ioreq *ioreq = &kioreq->ioreq[port->vcpu];
1126 struct privcmd_kernel_ioeventfd *kioeventfd;
1127 unsigned int state = STATE_IOREQ_READY;
1128
1129 if (ioreq->state != STATE_IOREQ_READY ||
1130 ioreq->type != IOREQ_TYPE_COPY || ioreq->dir != IOREQ_WRITE)
1131 return IRQ_NONE;
1132
1133 /*
1134 * We need a barrier, smp_mb(), here to ensure reads are finished before
1135 * `state` is updated. Since the lock implementation ensures that
1136 * appropriate barrier will be added anyway, we can avoid adding
1137 * explicit barrier here.
1138 *
1139 * Ideally we don't need to update `state` within the locks, but we do
1140 * that here to avoid adding explicit barrier.
1141 */
1142
1143 spin_lock(&kioreq->lock);
1144 ioreq->state = STATE_IOREQ_INPROCESS;
1145
1146 list_for_each_entry(kioeventfd, &kioreq->ioeventfds, list) {
1147 if (ioreq->addr == kioeventfd->addr + VIRTIO_MMIO_QUEUE_NOTIFY &&
1148 ioreq->size == kioeventfd->addr_len &&
1149 (ioreq->data & QUEUE_NOTIFY_VQ_MASK) == kioeventfd->vq) {
1150 eventfd_signal(kioeventfd->eventfd);
1151 state = STATE_IORESP_READY;
1152 break;
1153 }
1154 }
1155 spin_unlock(&kioreq->lock);
1156
1157 /*
1158 * We need a barrier, smp_mb(), here to ensure writes are finished
1159 * before `state` is updated. Since the lock implementation ensures that
1160 * appropriate barrier will be added anyway, we can avoid adding
1161 * explicit barrier here.
1162 */
1163
1164 ioreq->state = state;
1165
1166 if (state == STATE_IORESP_READY) {
1167 notify_remote_via_evtchn(port->port);
1168 return IRQ_HANDLED;
1169 }
1170
1171 return IRQ_NONE;
1172}
1173
1174static void ioreq_free(struct privcmd_kernel_ioreq *kioreq)
1175{
1176 struct ioreq_port *ports = kioreq->ports;
1177 int i;
1178
1179 lockdep_assert_held(&ioreq_lock);
1180
1181 list_del(&kioreq->list);
1182
1183 for (i = kioreq->vcpus - 1; i >= 0; i--)
1184 unbind_from_irqhandler(irq_from_evtchn(ports[i].port), &ports[i]);
1185
1186 kfree(kioreq);
1187}
1188
1189static
1190struct privcmd_kernel_ioreq *alloc_ioreq(struct privcmd_ioeventfd *ioeventfd)
1191{
1192 struct privcmd_kernel_ioreq *kioreq;
1193 struct mm_struct *mm = current->mm;
1194 struct vm_area_struct *vma;
1195 struct page **pages;
1196 unsigned int *ports;
1197 int ret, size, i;
1198
1199 lockdep_assert_held(&ioreq_lock);
1200
1201 size = struct_size(kioreq, ports, ioeventfd->vcpus);
1202 kioreq = kzalloc(size, GFP_KERNEL);
1203 if (!kioreq)
1204 return ERR_PTR(-ENOMEM);
1205
1206 kioreq->dom = ioeventfd->dom;
1207 kioreq->vcpus = ioeventfd->vcpus;
1208 kioreq->uioreq = ioeventfd->ioreq;
1209 spin_lock_init(&kioreq->lock);
1210 INIT_LIST_HEAD(&kioreq->ioeventfds);
1211
1212 /* The memory for ioreq server must have been mapped earlier */
1213 mmap_write_lock(mm);
1214 vma = find_vma(mm, (unsigned long)ioeventfd->ioreq);
1215 if (!vma) {
1216 pr_err("Failed to find vma for ioreq page!\n");
1217 mmap_write_unlock(mm);
1218 ret = -EFAULT;
1219 goto error_kfree;
1220 }
1221
1222 pages = vma->vm_private_data;
1223 kioreq->ioreq = (struct ioreq *)(page_to_virt(pages[0]));
1224 mmap_write_unlock(mm);
1225
1226 ports = memdup_array_user(u64_to_user_ptr(ioeventfd->ports),
1227 kioreq->vcpus, sizeof(*ports));
1228 if (IS_ERR(ports)) {
1229 ret = PTR_ERR(ports);
1230 goto error_kfree;
1231 }
1232
1233 for (i = 0; i < kioreq->vcpus; i++) {
1234 kioreq->ports[i].vcpu = i;
1235 kioreq->ports[i].port = ports[i];
1236 kioreq->ports[i].kioreq = kioreq;
1237
1238 ret = bind_evtchn_to_irqhandler_lateeoi(ports[i],
1239 ioeventfd_interrupt, IRQF_SHARED, "ioeventfd",
1240 &kioreq->ports[i]);
1241 if (ret < 0)
1242 goto error_unbind;
1243 }
1244
1245 kfree(ports);
1246
1247 list_add_tail(&kioreq->list, &ioreq_list);
1248
1249 return kioreq;
1250
1251error_unbind:
1252 while (--i >= 0)
1253 unbind_from_irqhandler(irq_from_evtchn(ports[i]), &kioreq->ports[i]);
1254
1255 kfree(ports);
1256error_kfree:
1257 kfree(kioreq);
1258 return ERR_PTR(ret);
1259}
1260
1261static struct privcmd_kernel_ioreq *
1262get_ioreq(struct privcmd_ioeventfd *ioeventfd, struct eventfd_ctx *eventfd)
1263{
1264 struct privcmd_kernel_ioreq *kioreq;
1265 unsigned long flags;
1266
1267 list_for_each_entry(kioreq, &ioreq_list, list) {
1268 struct privcmd_kernel_ioeventfd *kioeventfd;
1269
1270 /*
1271 * kioreq fields can be accessed here without a lock as they are
1272 * never updated after being added to the ioreq_list.
1273 */
1274 if (kioreq->uioreq != ioeventfd->ioreq) {
1275 continue;
1276 } else if (kioreq->dom != ioeventfd->dom ||
1277 kioreq->vcpus != ioeventfd->vcpus) {
1278 pr_err("Invalid ioeventfd configuration mismatch, dom (%u vs %u), vcpus (%u vs %u)\n",
1279 kioreq->dom, ioeventfd->dom, kioreq->vcpus,
1280 ioeventfd->vcpus);
1281 return ERR_PTR(-EINVAL);
1282 }
1283
1284 /* Look for a duplicate eventfd for the same guest */
1285 spin_lock_irqsave(&kioreq->lock, flags);
1286 list_for_each_entry(kioeventfd, &kioreq->ioeventfds, list) {
1287 if (eventfd == kioeventfd->eventfd) {
1288 spin_unlock_irqrestore(&kioreq->lock, flags);
1289 return ERR_PTR(-EBUSY);
1290 }
1291 }
1292 spin_unlock_irqrestore(&kioreq->lock, flags);
1293
1294 return kioreq;
1295 }
1296
1297 /* Matching kioreq isn't found, allocate a new one */
1298 return alloc_ioreq(ioeventfd);
1299}
1300
1301static void ioeventfd_free(struct privcmd_kernel_ioeventfd *kioeventfd)
1302{
1303 list_del(&kioeventfd->list);
1304 eventfd_ctx_put(kioeventfd->eventfd);
1305 kfree(kioeventfd);
1306}
1307
1308static int privcmd_ioeventfd_assign(struct privcmd_ioeventfd *ioeventfd)
1309{
1310 struct privcmd_kernel_ioeventfd *kioeventfd;
1311 struct privcmd_kernel_ioreq *kioreq;
1312 unsigned long flags;
1313 struct fd f;
1314 int ret;
1315
1316 /* Check for range overflow */
1317 if (ioeventfd->addr + ioeventfd->addr_len < ioeventfd->addr)
1318 return -EINVAL;
1319
1320 /* Vhost requires us to support length 1, 2, 4, and 8 */
1321 if (!(ioeventfd->addr_len == 1 || ioeventfd->addr_len == 2 ||
1322 ioeventfd->addr_len == 4 || ioeventfd->addr_len == 8))
1323 return -EINVAL;
1324
1325 /* 4096 vcpus limit enough ? */
1326 if (!ioeventfd->vcpus || ioeventfd->vcpus > 4096)
1327 return -EINVAL;
1328
1329 kioeventfd = kzalloc(sizeof(*kioeventfd), GFP_KERNEL);
1330 if (!kioeventfd)
1331 return -ENOMEM;
1332
1333 f = fdget(ioeventfd->event_fd);
1334 if (!f.file) {
1335 ret = -EBADF;
1336 goto error_kfree;
1337 }
1338
1339 kioeventfd->eventfd = eventfd_ctx_fileget(f.file);
1340 fdput(f);
1341
1342 if (IS_ERR(kioeventfd->eventfd)) {
1343 ret = PTR_ERR(kioeventfd->eventfd);
1344 goto error_kfree;
1345 }
1346
1347 kioeventfd->addr = ioeventfd->addr;
1348 kioeventfd->addr_len = ioeventfd->addr_len;
1349 kioeventfd->vq = ioeventfd->vq;
1350
1351 mutex_lock(&ioreq_lock);
1352 kioreq = get_ioreq(ioeventfd, kioeventfd->eventfd);
1353 if (IS_ERR(kioreq)) {
1354 mutex_unlock(&ioreq_lock);
1355 ret = PTR_ERR(kioreq);
1356 goto error_eventfd;
1357 }
1358
1359 spin_lock_irqsave(&kioreq->lock, flags);
1360 list_add_tail(&kioeventfd->list, &kioreq->ioeventfds);
1361 spin_unlock_irqrestore(&kioreq->lock, flags);
1362
1363 mutex_unlock(&ioreq_lock);
1364
1365 return 0;
1366
1367error_eventfd:
1368 eventfd_ctx_put(kioeventfd->eventfd);
1369
1370error_kfree:
1371 kfree(kioeventfd);
1372 return ret;
1373}
1374
1375static int privcmd_ioeventfd_deassign(struct privcmd_ioeventfd *ioeventfd)
1376{
1377 struct privcmd_kernel_ioreq *kioreq, *tkioreq;
1378 struct eventfd_ctx *eventfd;
1379 unsigned long flags;
1380 int ret = 0;
1381
1382 eventfd = eventfd_ctx_fdget(ioeventfd->event_fd);
1383 if (IS_ERR(eventfd))
1384 return PTR_ERR(eventfd);
1385
1386 mutex_lock(&ioreq_lock);
1387 list_for_each_entry_safe(kioreq, tkioreq, &ioreq_list, list) {
1388 struct privcmd_kernel_ioeventfd *kioeventfd, *tmp;
1389 /*
1390 * kioreq fields can be accessed here without a lock as they are
1391 * never updated after being added to the ioreq_list.
1392 */
1393 if (kioreq->dom != ioeventfd->dom ||
1394 kioreq->uioreq != ioeventfd->ioreq ||
1395 kioreq->vcpus != ioeventfd->vcpus)
1396 continue;
1397
1398 spin_lock_irqsave(&kioreq->lock, flags);
1399 list_for_each_entry_safe(kioeventfd, tmp, &kioreq->ioeventfds, list) {
1400 if (eventfd == kioeventfd->eventfd) {
1401 ioeventfd_free(kioeventfd);
1402 spin_unlock_irqrestore(&kioreq->lock, flags);
1403
1404 if (list_empty(&kioreq->ioeventfds))
1405 ioreq_free(kioreq);
1406 goto unlock;
1407 }
1408 }
1409 spin_unlock_irqrestore(&kioreq->lock, flags);
1410 break;
1411 }
1412
1413 pr_err("Ioeventfd isn't already assigned, dom: %u, addr: %llu\n",
1414 ioeventfd->dom, ioeventfd->addr);
1415 ret = -ENODEV;
1416
1417unlock:
1418 mutex_unlock(&ioreq_lock);
1419 eventfd_ctx_put(eventfd);
1420
1421 return ret;
1422}
1423
1424static long privcmd_ioctl_ioeventfd(struct file *file, void __user *udata)
1425{
1426 struct privcmd_data *data = file->private_data;
1427 struct privcmd_ioeventfd ioeventfd;
1428
1429 if (copy_from_user(&ioeventfd, udata, sizeof(ioeventfd)))
1430 return -EFAULT;
1431
1432 /* No other flags should be set */
1433 if (ioeventfd.flags & ~PRIVCMD_IOEVENTFD_FLAG_DEASSIGN)
1434 return -EINVAL;
1435
1436 /* If restriction is in place, check the domid matches */
1437 if (data->domid != DOMID_INVALID && data->domid != ioeventfd.dom)
1438 return -EPERM;
1439
1440 if (ioeventfd.flags & PRIVCMD_IOEVENTFD_FLAG_DEASSIGN)
1441 return privcmd_ioeventfd_deassign(&ioeventfd);
1442
1443 return privcmd_ioeventfd_assign(&ioeventfd);
1444}
1445
1446static void privcmd_ioeventfd_exit(void)
1447{
1448 struct privcmd_kernel_ioreq *kioreq, *tmp;
1449 unsigned long flags;
1450
1451 mutex_lock(&ioreq_lock);
1452 list_for_each_entry_safe(kioreq, tmp, &ioreq_list, list) {
1453 struct privcmd_kernel_ioeventfd *kioeventfd, *tmp;
1454
1455 spin_lock_irqsave(&kioreq->lock, flags);
1456 list_for_each_entry_safe(kioeventfd, tmp, &kioreq->ioeventfds, list)
1457 ioeventfd_free(kioeventfd);
1458 spin_unlock_irqrestore(&kioreq->lock, flags);
1459
1460 ioreq_free(kioreq);
1461 }
1462 mutex_unlock(&ioreq_lock);
1463}
1464#else
1465static inline long privcmd_ioctl_irqfd(struct file *file, void __user *udata)
1466{
1467 return -EOPNOTSUPP;
1468}
1469
1470static inline int privcmd_irqfd_init(void)
1471{
1472 return 0;
1473}
1474
1475static inline void privcmd_irqfd_exit(void)
1476{
1477}
1478
1479static inline long privcmd_ioctl_ioeventfd(struct file *file, void __user *udata)
1480{
1481 return -EOPNOTSUPP;
1482}
1483
1484static inline void privcmd_ioeventfd_exit(void)
1485{
1486}
1487#endif /* CONFIG_XEN_PRIVCMD_EVENTFD */
1488
1489static long privcmd_ioctl(struct file *file,
1490 unsigned int cmd, unsigned long data)
1491{
1492 int ret = -ENOTTY;
1493 void __user *udata = (void __user *) data;
1494
1495 switch (cmd) {
1496 case IOCTL_PRIVCMD_HYPERCALL:
1497 ret = privcmd_ioctl_hypercall(file, udata);
1498 break;
1499
1500 case IOCTL_PRIVCMD_MMAP:
1501 ret = privcmd_ioctl_mmap(file, udata);
1502 break;
1503
1504 case IOCTL_PRIVCMD_MMAPBATCH:
1505 ret = privcmd_ioctl_mmap_batch(file, udata, 1);
1506 break;
1507
1508 case IOCTL_PRIVCMD_MMAPBATCH_V2:
1509 ret = privcmd_ioctl_mmap_batch(file, udata, 2);
1510 break;
1511
1512 case IOCTL_PRIVCMD_DM_OP:
1513 ret = privcmd_ioctl_dm_op(file, udata);
1514 break;
1515
1516 case IOCTL_PRIVCMD_RESTRICT:
1517 ret = privcmd_ioctl_restrict(file, udata);
1518 break;
1519
1520 case IOCTL_PRIVCMD_MMAP_RESOURCE:
1521 ret = privcmd_ioctl_mmap_resource(file, udata);
1522 break;
1523
1524 case IOCTL_PRIVCMD_IRQFD:
1525 ret = privcmd_ioctl_irqfd(file, udata);
1526 break;
1527
1528 case IOCTL_PRIVCMD_IOEVENTFD:
1529 ret = privcmd_ioctl_ioeventfd(file, udata);
1530 break;
1531
1532 default:
1533 break;
1534 }
1535
1536 return ret;
1537}
1538
1539static int privcmd_open(struct inode *ino, struct file *file)
1540{
1541 struct privcmd_data *data = kzalloc(sizeof(*data), GFP_KERNEL);
1542
1543 if (!data)
1544 return -ENOMEM;
1545
1546 /* DOMID_INVALID implies no restriction */
1547 data->domid = DOMID_INVALID;
1548
1549 file->private_data = data;
1550 return 0;
1551}
1552
1553static int privcmd_release(struct inode *ino, struct file *file)
1554{
1555 struct privcmd_data *data = file->private_data;
1556
1557 kfree(data);
1558 return 0;
1559}
1560
1561static void privcmd_close(struct vm_area_struct *vma)
1562{
1563 struct page **pages = vma->vm_private_data;
1564 int numpgs = vma_pages(vma);
1565 int numgfns = (vma->vm_end - vma->vm_start) >> XEN_PAGE_SHIFT;
1566 int rc;
1567
1568 if (!xen_feature(XENFEAT_auto_translated_physmap) || !numpgs || !pages)
1569 return;
1570
1571 rc = xen_unmap_domain_gfn_range(vma, numgfns, pages);
1572 if (rc == 0)
1573 xen_free_unpopulated_pages(numpgs, pages);
1574 else
1575 pr_crit("unable to unmap MFN range: leaking %d pages. rc=%d\n",
1576 numpgs, rc);
1577 kvfree(pages);
1578}
1579
1580static vm_fault_t privcmd_fault(struct vm_fault *vmf)
1581{
1582 printk(KERN_DEBUG "privcmd_fault: vma=%p %lx-%lx, pgoff=%lx, uv=%p\n",
1583 vmf->vma, vmf->vma->vm_start, vmf->vma->vm_end,
1584 vmf->pgoff, (void *)vmf->address);
1585
1586 return VM_FAULT_SIGBUS;
1587}
1588
1589static const struct vm_operations_struct privcmd_vm_ops = {
1590 .close = privcmd_close,
1591 .fault = privcmd_fault
1592};
1593
1594static int privcmd_mmap(struct file *file, struct vm_area_struct *vma)
1595{
1596 /* DONTCOPY is essential for Xen because copy_page_range doesn't know
1597 * how to recreate these mappings */
1598 vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTCOPY |
1599 VM_DONTEXPAND | VM_DONTDUMP);
1600 vma->vm_ops = &privcmd_vm_ops;
1601 vma->vm_private_data = NULL;
1602
1603 return 0;
1604}
1605
1606/*
1607 * For MMAPBATCH*. This allows asserting the singleshot mapping
1608 * on a per pfn/pte basis. Mapping calls that fail with ENOENT
1609 * can be then retried until success.
1610 */
1611static int is_mapped_fn(pte_t *pte, unsigned long addr, void *data)
1612{
1613 return pte_none(ptep_get(pte)) ? 0 : -EBUSY;
1614}
1615
1616static int privcmd_vma_range_is_mapped(
1617 struct vm_area_struct *vma,
1618 unsigned long addr,
1619 unsigned long nr_pages)
1620{
1621 return apply_to_page_range(vma->vm_mm, addr, nr_pages << PAGE_SHIFT,
1622 is_mapped_fn, NULL) != 0;
1623}
1624
1625const struct file_operations xen_privcmd_fops = {
1626 .owner = THIS_MODULE,
1627 .unlocked_ioctl = privcmd_ioctl,
1628 .open = privcmd_open,
1629 .release = privcmd_release,
1630 .mmap = privcmd_mmap,
1631};
1632EXPORT_SYMBOL_GPL(xen_privcmd_fops);
1633
1634static struct miscdevice privcmd_dev = {
1635 .minor = MISC_DYNAMIC_MINOR,
1636 .name = "xen/privcmd",
1637 .fops = &xen_privcmd_fops,
1638};
1639
1640static int __init privcmd_init(void)
1641{
1642 int err;
1643
1644 if (!xen_domain())
1645 return -ENODEV;
1646
1647 err = misc_register(&privcmd_dev);
1648 if (err != 0) {
1649 pr_err("Could not register Xen privcmd device\n");
1650 return err;
1651 }
1652
1653 err = misc_register(&xen_privcmdbuf_dev);
1654 if (err != 0) {
1655 pr_err("Could not register Xen hypercall-buf device\n");
1656 goto err_privcmdbuf;
1657 }
1658
1659 err = privcmd_irqfd_init();
1660 if (err != 0) {
1661 pr_err("irqfd init failed\n");
1662 goto err_irqfd;
1663 }
1664
1665 return 0;
1666
1667err_irqfd:
1668 misc_deregister(&xen_privcmdbuf_dev);
1669err_privcmdbuf:
1670 misc_deregister(&privcmd_dev);
1671 return err;
1672}
1673
1674static void __exit privcmd_exit(void)
1675{
1676 privcmd_ioeventfd_exit();
1677 privcmd_irqfd_exit();
1678 misc_deregister(&privcmd_dev);
1679 misc_deregister(&xen_privcmdbuf_dev);
1680}
1681
1682module_init(privcmd_init);
1683module_exit(privcmd_exit);
1/******************************************************************************
2 * privcmd.c
3 *
4 * Interface to privileged domain-0 commands.
5 *
6 * Copyright (c) 2002-2004, K A Fraser, B Dragovic
7 */
8
9#define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
10
11#include <linux/kernel.h>
12#include <linux/module.h>
13#include <linux/sched.h>
14#include <linux/slab.h>
15#include <linux/string.h>
16#include <linux/errno.h>
17#include <linux/mm.h>
18#include <linux/mman.h>
19#include <linux/uaccess.h>
20#include <linux/swap.h>
21#include <linux/highmem.h>
22#include <linux/pagemap.h>
23#include <linux/seq_file.h>
24#include <linux/miscdevice.h>
25#include <linux/moduleparam.h>
26
27#include <asm/pgalloc.h>
28#include <asm/pgtable.h>
29#include <asm/tlb.h>
30#include <asm/xen/hypervisor.h>
31#include <asm/xen/hypercall.h>
32
33#include <xen/xen.h>
34#include <xen/privcmd.h>
35#include <xen/interface/xen.h>
36#include <xen/interface/hvm/dm_op.h>
37#include <xen/features.h>
38#include <xen/page.h>
39#include <xen/xen-ops.h>
40#include <xen/balloon.h>
41
42#include "privcmd.h"
43
44MODULE_LICENSE("GPL");
45
46#define PRIV_VMA_LOCKED ((void *)1)
47
48static unsigned int privcmd_dm_op_max_num = 16;
49module_param_named(dm_op_max_nr_bufs, privcmd_dm_op_max_num, uint, 0644);
50MODULE_PARM_DESC(dm_op_max_nr_bufs,
51 "Maximum number of buffers per dm_op hypercall");
52
53static unsigned int privcmd_dm_op_buf_max_size = 4096;
54module_param_named(dm_op_buf_max_size, privcmd_dm_op_buf_max_size, uint,
55 0644);
56MODULE_PARM_DESC(dm_op_buf_max_size,
57 "Maximum size of a dm_op hypercall buffer");
58
59struct privcmd_data {
60 domid_t domid;
61};
62
63static int privcmd_vma_range_is_mapped(
64 struct vm_area_struct *vma,
65 unsigned long addr,
66 unsigned long nr_pages);
67
68static long privcmd_ioctl_hypercall(struct file *file, void __user *udata)
69{
70 struct privcmd_data *data = file->private_data;
71 struct privcmd_hypercall hypercall;
72 long ret;
73
74 /* Disallow arbitrary hypercalls if restricted */
75 if (data->domid != DOMID_INVALID)
76 return -EPERM;
77
78 if (copy_from_user(&hypercall, udata, sizeof(hypercall)))
79 return -EFAULT;
80
81 xen_preemptible_hcall_begin();
82 ret = privcmd_call(hypercall.op,
83 hypercall.arg[0], hypercall.arg[1],
84 hypercall.arg[2], hypercall.arg[3],
85 hypercall.arg[4]);
86 xen_preemptible_hcall_end();
87
88 return ret;
89}
90
91static void free_page_list(struct list_head *pages)
92{
93 struct page *p, *n;
94
95 list_for_each_entry_safe(p, n, pages, lru)
96 __free_page(p);
97
98 INIT_LIST_HEAD(pages);
99}
100
101/*
102 * Given an array of items in userspace, return a list of pages
103 * containing the data. If copying fails, either because of memory
104 * allocation failure or a problem reading user memory, return an
105 * error code; its up to the caller to dispose of any partial list.
106 */
107static int gather_array(struct list_head *pagelist,
108 unsigned nelem, size_t size,
109 const void __user *data)
110{
111 unsigned pageidx;
112 void *pagedata;
113 int ret;
114
115 if (size > PAGE_SIZE)
116 return 0;
117
118 pageidx = PAGE_SIZE;
119 pagedata = NULL; /* quiet, gcc */
120 while (nelem--) {
121 if (pageidx > PAGE_SIZE-size) {
122 struct page *page = alloc_page(GFP_KERNEL);
123
124 ret = -ENOMEM;
125 if (page == NULL)
126 goto fail;
127
128 pagedata = page_address(page);
129
130 list_add_tail(&page->lru, pagelist);
131 pageidx = 0;
132 }
133
134 ret = -EFAULT;
135 if (copy_from_user(pagedata + pageidx, data, size))
136 goto fail;
137
138 data += size;
139 pageidx += size;
140 }
141
142 ret = 0;
143
144fail:
145 return ret;
146}
147
148/*
149 * Call function "fn" on each element of the array fragmented
150 * over a list of pages.
151 */
152static int traverse_pages(unsigned nelem, size_t size,
153 struct list_head *pos,
154 int (*fn)(void *data, void *state),
155 void *state)
156{
157 void *pagedata;
158 unsigned pageidx;
159 int ret = 0;
160
161 BUG_ON(size > PAGE_SIZE);
162
163 pageidx = PAGE_SIZE;
164 pagedata = NULL; /* hush, gcc */
165
166 while (nelem--) {
167 if (pageidx > PAGE_SIZE-size) {
168 struct page *page;
169 pos = pos->next;
170 page = list_entry(pos, struct page, lru);
171 pagedata = page_address(page);
172 pageidx = 0;
173 }
174
175 ret = (*fn)(pagedata + pageidx, state);
176 if (ret)
177 break;
178 pageidx += size;
179 }
180
181 return ret;
182}
183
184/*
185 * Similar to traverse_pages, but use each page as a "block" of
186 * data to be processed as one unit.
187 */
188static int traverse_pages_block(unsigned nelem, size_t size,
189 struct list_head *pos,
190 int (*fn)(void *data, int nr, void *state),
191 void *state)
192{
193 void *pagedata;
194 int ret = 0;
195
196 BUG_ON(size > PAGE_SIZE);
197
198 while (nelem) {
199 int nr = (PAGE_SIZE/size);
200 struct page *page;
201 if (nr > nelem)
202 nr = nelem;
203 pos = pos->next;
204 page = list_entry(pos, struct page, lru);
205 pagedata = page_address(page);
206 ret = (*fn)(pagedata, nr, state);
207 if (ret)
208 break;
209 nelem -= nr;
210 }
211
212 return ret;
213}
214
215struct mmap_gfn_state {
216 unsigned long va;
217 struct vm_area_struct *vma;
218 domid_t domain;
219};
220
221static int mmap_gfn_range(void *data, void *state)
222{
223 struct privcmd_mmap_entry *msg = data;
224 struct mmap_gfn_state *st = state;
225 struct vm_area_struct *vma = st->vma;
226 int rc;
227
228 /* Do not allow range to wrap the address space. */
229 if ((msg->npages > (LONG_MAX >> PAGE_SHIFT)) ||
230 ((unsigned long)(msg->npages << PAGE_SHIFT) >= -st->va))
231 return -EINVAL;
232
233 /* Range chunks must be contiguous in va space. */
234 if ((msg->va != st->va) ||
235 ((msg->va+(msg->npages<<PAGE_SHIFT)) > vma->vm_end))
236 return -EINVAL;
237
238 rc = xen_remap_domain_gfn_range(vma,
239 msg->va & PAGE_MASK,
240 msg->mfn, msg->npages,
241 vma->vm_page_prot,
242 st->domain, NULL);
243 if (rc < 0)
244 return rc;
245
246 st->va += msg->npages << PAGE_SHIFT;
247
248 return 0;
249}
250
251static long privcmd_ioctl_mmap(struct file *file, void __user *udata)
252{
253 struct privcmd_data *data = file->private_data;
254 struct privcmd_mmap mmapcmd;
255 struct mm_struct *mm = current->mm;
256 struct vm_area_struct *vma;
257 int rc;
258 LIST_HEAD(pagelist);
259 struct mmap_gfn_state state;
260
261 /* We only support privcmd_ioctl_mmap_batch for auto translated. */
262 if (xen_feature(XENFEAT_auto_translated_physmap))
263 return -ENOSYS;
264
265 if (copy_from_user(&mmapcmd, udata, sizeof(mmapcmd)))
266 return -EFAULT;
267
268 /* If restriction is in place, check the domid matches */
269 if (data->domid != DOMID_INVALID && data->domid != mmapcmd.dom)
270 return -EPERM;
271
272 rc = gather_array(&pagelist,
273 mmapcmd.num, sizeof(struct privcmd_mmap_entry),
274 mmapcmd.entry);
275
276 if (rc || list_empty(&pagelist))
277 goto out;
278
279 down_write(&mm->mmap_sem);
280
281 {
282 struct page *page = list_first_entry(&pagelist,
283 struct page, lru);
284 struct privcmd_mmap_entry *msg = page_address(page);
285
286 vma = find_vma(mm, msg->va);
287 rc = -EINVAL;
288
289 if (!vma || (msg->va != vma->vm_start) || vma->vm_private_data)
290 goto out_up;
291 vma->vm_private_data = PRIV_VMA_LOCKED;
292 }
293
294 state.va = vma->vm_start;
295 state.vma = vma;
296 state.domain = mmapcmd.dom;
297
298 rc = traverse_pages(mmapcmd.num, sizeof(struct privcmd_mmap_entry),
299 &pagelist,
300 mmap_gfn_range, &state);
301
302
303out_up:
304 up_write(&mm->mmap_sem);
305
306out:
307 free_page_list(&pagelist);
308
309 return rc;
310}
311
312struct mmap_batch_state {
313 domid_t domain;
314 unsigned long va;
315 struct vm_area_struct *vma;
316 int index;
317 /* A tristate:
318 * 0 for no errors
319 * 1 if at least one error has happened (and no
320 * -ENOENT errors have happened)
321 * -ENOENT if at least 1 -ENOENT has happened.
322 */
323 int global_error;
324 int version;
325
326 /* User-space gfn array to store errors in the second pass for V1. */
327 xen_pfn_t __user *user_gfn;
328 /* User-space int array to store errors in the second pass for V2. */
329 int __user *user_err;
330};
331
332/* auto translated dom0 note: if domU being created is PV, then gfn is
333 * mfn(addr on bus). If it's auto xlated, then gfn is pfn (input to HAP).
334 */
335static int mmap_batch_fn(void *data, int nr, void *state)
336{
337 xen_pfn_t *gfnp = data;
338 struct mmap_batch_state *st = state;
339 struct vm_area_struct *vma = st->vma;
340 struct page **pages = vma->vm_private_data;
341 struct page **cur_pages = NULL;
342 int ret;
343
344 if (xen_feature(XENFEAT_auto_translated_physmap))
345 cur_pages = &pages[st->index];
346
347 BUG_ON(nr < 0);
348 ret = xen_remap_domain_gfn_array(st->vma, st->va & PAGE_MASK, gfnp, nr,
349 (int *)gfnp, st->vma->vm_page_prot,
350 st->domain, cur_pages);
351
352 /* Adjust the global_error? */
353 if (ret != nr) {
354 if (ret == -ENOENT)
355 st->global_error = -ENOENT;
356 else {
357 /* Record that at least one error has happened. */
358 if (st->global_error == 0)
359 st->global_error = 1;
360 }
361 }
362 st->va += XEN_PAGE_SIZE * nr;
363 st->index += nr / XEN_PFN_PER_PAGE;
364
365 return 0;
366}
367
368static int mmap_return_error(int err, struct mmap_batch_state *st)
369{
370 int ret;
371
372 if (st->version == 1) {
373 if (err) {
374 xen_pfn_t gfn;
375
376 ret = get_user(gfn, st->user_gfn);
377 if (ret < 0)
378 return ret;
379 /*
380 * V1 encodes the error codes in the 32bit top
381 * nibble of the gfn (with its known
382 * limitations vis-a-vis 64 bit callers).
383 */
384 gfn |= (err == -ENOENT) ?
385 PRIVCMD_MMAPBATCH_PAGED_ERROR :
386 PRIVCMD_MMAPBATCH_MFN_ERROR;
387 return __put_user(gfn, st->user_gfn++);
388 } else
389 st->user_gfn++;
390 } else { /* st->version == 2 */
391 if (err)
392 return __put_user(err, st->user_err++);
393 else
394 st->user_err++;
395 }
396
397 return 0;
398}
399
400static int mmap_return_errors(void *data, int nr, void *state)
401{
402 struct mmap_batch_state *st = state;
403 int *errs = data;
404 int i;
405 int ret;
406
407 for (i = 0; i < nr; i++) {
408 ret = mmap_return_error(errs[i], st);
409 if (ret < 0)
410 return ret;
411 }
412 return 0;
413}
414
415/* Allocate pfns that are then mapped with gfns from foreign domid. Update
416 * the vma with the page info to use later.
417 * Returns: 0 if success, otherwise -errno
418 */
419static int alloc_empty_pages(struct vm_area_struct *vma, int numpgs)
420{
421 int rc;
422 struct page **pages;
423
424 pages = kcalloc(numpgs, sizeof(pages[0]), GFP_KERNEL);
425 if (pages == NULL)
426 return -ENOMEM;
427
428 rc = alloc_xenballooned_pages(numpgs, pages);
429 if (rc != 0) {
430 pr_warn("%s Could not alloc %d pfns rc:%d\n", __func__,
431 numpgs, rc);
432 kfree(pages);
433 return -ENOMEM;
434 }
435 BUG_ON(vma->vm_private_data != NULL);
436 vma->vm_private_data = pages;
437
438 return 0;
439}
440
441static const struct vm_operations_struct privcmd_vm_ops;
442
443static long privcmd_ioctl_mmap_batch(
444 struct file *file, void __user *udata, int version)
445{
446 struct privcmd_data *data = file->private_data;
447 int ret;
448 struct privcmd_mmapbatch_v2 m;
449 struct mm_struct *mm = current->mm;
450 struct vm_area_struct *vma;
451 unsigned long nr_pages;
452 LIST_HEAD(pagelist);
453 struct mmap_batch_state state;
454
455 switch (version) {
456 case 1:
457 if (copy_from_user(&m, udata, sizeof(struct privcmd_mmapbatch)))
458 return -EFAULT;
459 /* Returns per-frame error in m.arr. */
460 m.err = NULL;
461 if (!access_ok(VERIFY_WRITE, m.arr, m.num * sizeof(*m.arr)))
462 return -EFAULT;
463 break;
464 case 2:
465 if (copy_from_user(&m, udata, sizeof(struct privcmd_mmapbatch_v2)))
466 return -EFAULT;
467 /* Returns per-frame error code in m.err. */
468 if (!access_ok(VERIFY_WRITE, m.err, m.num * (sizeof(*m.err))))
469 return -EFAULT;
470 break;
471 default:
472 return -EINVAL;
473 }
474
475 /* If restriction is in place, check the domid matches */
476 if (data->domid != DOMID_INVALID && data->domid != m.dom)
477 return -EPERM;
478
479 nr_pages = DIV_ROUND_UP(m.num, XEN_PFN_PER_PAGE);
480 if ((m.num <= 0) || (nr_pages > (LONG_MAX >> PAGE_SHIFT)))
481 return -EINVAL;
482
483 ret = gather_array(&pagelist, m.num, sizeof(xen_pfn_t), m.arr);
484
485 if (ret)
486 goto out;
487 if (list_empty(&pagelist)) {
488 ret = -EINVAL;
489 goto out;
490 }
491
492 if (version == 2) {
493 /* Zero error array now to only copy back actual errors. */
494 if (clear_user(m.err, sizeof(int) * m.num)) {
495 ret = -EFAULT;
496 goto out;
497 }
498 }
499
500 down_write(&mm->mmap_sem);
501
502 vma = find_vma(mm, m.addr);
503 if (!vma ||
504 vma->vm_ops != &privcmd_vm_ops) {
505 ret = -EINVAL;
506 goto out_unlock;
507 }
508
509 /*
510 * Caller must either:
511 *
512 * Map the whole VMA range, which will also allocate all the
513 * pages required for the auto_translated_physmap case.
514 *
515 * Or
516 *
517 * Map unmapped holes left from a previous map attempt (e.g.,
518 * because those foreign frames were previously paged out).
519 */
520 if (vma->vm_private_data == NULL) {
521 if (m.addr != vma->vm_start ||
522 m.addr + (nr_pages << PAGE_SHIFT) != vma->vm_end) {
523 ret = -EINVAL;
524 goto out_unlock;
525 }
526 if (xen_feature(XENFEAT_auto_translated_physmap)) {
527 ret = alloc_empty_pages(vma, nr_pages);
528 if (ret < 0)
529 goto out_unlock;
530 } else
531 vma->vm_private_data = PRIV_VMA_LOCKED;
532 } else {
533 if (m.addr < vma->vm_start ||
534 m.addr + (nr_pages << PAGE_SHIFT) > vma->vm_end) {
535 ret = -EINVAL;
536 goto out_unlock;
537 }
538 if (privcmd_vma_range_is_mapped(vma, m.addr, nr_pages)) {
539 ret = -EINVAL;
540 goto out_unlock;
541 }
542 }
543
544 state.domain = m.dom;
545 state.vma = vma;
546 state.va = m.addr;
547 state.index = 0;
548 state.global_error = 0;
549 state.version = version;
550
551 BUILD_BUG_ON(((PAGE_SIZE / sizeof(xen_pfn_t)) % XEN_PFN_PER_PAGE) != 0);
552 /* mmap_batch_fn guarantees ret == 0 */
553 BUG_ON(traverse_pages_block(m.num, sizeof(xen_pfn_t),
554 &pagelist, mmap_batch_fn, &state));
555
556 up_write(&mm->mmap_sem);
557
558 if (state.global_error) {
559 /* Write back errors in second pass. */
560 state.user_gfn = (xen_pfn_t *)m.arr;
561 state.user_err = m.err;
562 ret = traverse_pages_block(m.num, sizeof(xen_pfn_t),
563 &pagelist, mmap_return_errors, &state);
564 } else
565 ret = 0;
566
567 /* If we have not had any EFAULT-like global errors then set the global
568 * error to -ENOENT if necessary. */
569 if ((ret == 0) && (state.global_error == -ENOENT))
570 ret = -ENOENT;
571
572out:
573 free_page_list(&pagelist);
574 return ret;
575
576out_unlock:
577 up_write(&mm->mmap_sem);
578 goto out;
579}
580
581static int lock_pages(
582 struct privcmd_dm_op_buf kbufs[], unsigned int num,
583 struct page *pages[], unsigned int nr_pages)
584{
585 unsigned int i;
586
587 for (i = 0; i < num; i++) {
588 unsigned int requested;
589 int pinned;
590
591 requested = DIV_ROUND_UP(
592 offset_in_page(kbufs[i].uptr) + kbufs[i].size,
593 PAGE_SIZE);
594 if (requested > nr_pages)
595 return -ENOSPC;
596
597 pinned = get_user_pages_fast(
598 (unsigned long) kbufs[i].uptr,
599 requested, FOLL_WRITE, pages);
600 if (pinned < 0)
601 return pinned;
602
603 nr_pages -= pinned;
604 pages += pinned;
605 }
606
607 return 0;
608}
609
610static void unlock_pages(struct page *pages[], unsigned int nr_pages)
611{
612 unsigned int i;
613
614 if (!pages)
615 return;
616
617 for (i = 0; i < nr_pages; i++) {
618 if (pages[i])
619 put_page(pages[i]);
620 }
621}
622
623static long privcmd_ioctl_dm_op(struct file *file, void __user *udata)
624{
625 struct privcmd_data *data = file->private_data;
626 struct privcmd_dm_op kdata;
627 struct privcmd_dm_op_buf *kbufs;
628 unsigned int nr_pages = 0;
629 struct page **pages = NULL;
630 struct xen_dm_op_buf *xbufs = NULL;
631 unsigned int i;
632 long rc;
633
634 if (copy_from_user(&kdata, udata, sizeof(kdata)))
635 return -EFAULT;
636
637 /* If restriction is in place, check the domid matches */
638 if (data->domid != DOMID_INVALID && data->domid != kdata.dom)
639 return -EPERM;
640
641 if (kdata.num == 0)
642 return 0;
643
644 if (kdata.num > privcmd_dm_op_max_num)
645 return -E2BIG;
646
647 kbufs = kcalloc(kdata.num, sizeof(*kbufs), GFP_KERNEL);
648 if (!kbufs)
649 return -ENOMEM;
650
651 if (copy_from_user(kbufs, kdata.ubufs,
652 sizeof(*kbufs) * kdata.num)) {
653 rc = -EFAULT;
654 goto out;
655 }
656
657 for (i = 0; i < kdata.num; i++) {
658 if (kbufs[i].size > privcmd_dm_op_buf_max_size) {
659 rc = -E2BIG;
660 goto out;
661 }
662
663 if (!access_ok(VERIFY_WRITE, kbufs[i].uptr,
664 kbufs[i].size)) {
665 rc = -EFAULT;
666 goto out;
667 }
668
669 nr_pages += DIV_ROUND_UP(
670 offset_in_page(kbufs[i].uptr) + kbufs[i].size,
671 PAGE_SIZE);
672 }
673
674 pages = kcalloc(nr_pages, sizeof(*pages), GFP_KERNEL);
675 if (!pages) {
676 rc = -ENOMEM;
677 goto out;
678 }
679
680 xbufs = kcalloc(kdata.num, sizeof(*xbufs), GFP_KERNEL);
681 if (!xbufs) {
682 rc = -ENOMEM;
683 goto out;
684 }
685
686 rc = lock_pages(kbufs, kdata.num, pages, nr_pages);
687 if (rc)
688 goto out;
689
690 for (i = 0; i < kdata.num; i++) {
691 set_xen_guest_handle(xbufs[i].h, kbufs[i].uptr);
692 xbufs[i].size = kbufs[i].size;
693 }
694
695 xen_preemptible_hcall_begin();
696 rc = HYPERVISOR_dm_op(kdata.dom, kdata.num, xbufs);
697 xen_preemptible_hcall_end();
698
699out:
700 unlock_pages(pages, nr_pages);
701 kfree(xbufs);
702 kfree(pages);
703 kfree(kbufs);
704
705 return rc;
706}
707
708static long privcmd_ioctl_restrict(struct file *file, void __user *udata)
709{
710 struct privcmd_data *data = file->private_data;
711 domid_t dom;
712
713 if (copy_from_user(&dom, udata, sizeof(dom)))
714 return -EFAULT;
715
716 /* Set restriction to the specified domain, or check it matches */
717 if (data->domid == DOMID_INVALID)
718 data->domid = dom;
719 else if (data->domid != dom)
720 return -EINVAL;
721
722 return 0;
723}
724
725static long privcmd_ioctl(struct file *file,
726 unsigned int cmd, unsigned long data)
727{
728 int ret = -ENOTTY;
729 void __user *udata = (void __user *) data;
730
731 switch (cmd) {
732 case IOCTL_PRIVCMD_HYPERCALL:
733 ret = privcmd_ioctl_hypercall(file, udata);
734 break;
735
736 case IOCTL_PRIVCMD_MMAP:
737 ret = privcmd_ioctl_mmap(file, udata);
738 break;
739
740 case IOCTL_PRIVCMD_MMAPBATCH:
741 ret = privcmd_ioctl_mmap_batch(file, udata, 1);
742 break;
743
744 case IOCTL_PRIVCMD_MMAPBATCH_V2:
745 ret = privcmd_ioctl_mmap_batch(file, udata, 2);
746 break;
747
748 case IOCTL_PRIVCMD_DM_OP:
749 ret = privcmd_ioctl_dm_op(file, udata);
750 break;
751
752 case IOCTL_PRIVCMD_RESTRICT:
753 ret = privcmd_ioctl_restrict(file, udata);
754 break;
755
756 default:
757 break;
758 }
759
760 return ret;
761}
762
763static int privcmd_open(struct inode *ino, struct file *file)
764{
765 struct privcmd_data *data = kzalloc(sizeof(*data), GFP_KERNEL);
766
767 if (!data)
768 return -ENOMEM;
769
770 /* DOMID_INVALID implies no restriction */
771 data->domid = DOMID_INVALID;
772
773 file->private_data = data;
774 return 0;
775}
776
777static int privcmd_release(struct inode *ino, struct file *file)
778{
779 struct privcmd_data *data = file->private_data;
780
781 kfree(data);
782 return 0;
783}
784
785static void privcmd_close(struct vm_area_struct *vma)
786{
787 struct page **pages = vma->vm_private_data;
788 int numpgs = vma_pages(vma);
789 int numgfns = (vma->vm_end - vma->vm_start) >> XEN_PAGE_SHIFT;
790 int rc;
791
792 if (!xen_feature(XENFEAT_auto_translated_physmap) || !numpgs || !pages)
793 return;
794
795 rc = xen_unmap_domain_gfn_range(vma, numgfns, pages);
796 if (rc == 0)
797 free_xenballooned_pages(numpgs, pages);
798 else
799 pr_crit("unable to unmap MFN range: leaking %d pages. rc=%d\n",
800 numpgs, rc);
801 kfree(pages);
802}
803
804static int privcmd_fault(struct vm_fault *vmf)
805{
806 printk(KERN_DEBUG "privcmd_fault: vma=%p %lx-%lx, pgoff=%lx, uv=%p\n",
807 vmf->vma, vmf->vma->vm_start, vmf->vma->vm_end,
808 vmf->pgoff, (void *)vmf->address);
809
810 return VM_FAULT_SIGBUS;
811}
812
813static const struct vm_operations_struct privcmd_vm_ops = {
814 .close = privcmd_close,
815 .fault = privcmd_fault
816};
817
818static int privcmd_mmap(struct file *file, struct vm_area_struct *vma)
819{
820 /* DONTCOPY is essential for Xen because copy_page_range doesn't know
821 * how to recreate these mappings */
822 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTCOPY |
823 VM_DONTEXPAND | VM_DONTDUMP;
824 vma->vm_ops = &privcmd_vm_ops;
825 vma->vm_private_data = NULL;
826
827 return 0;
828}
829
830/*
831 * For MMAPBATCH*. This allows asserting the singleshot mapping
832 * on a per pfn/pte basis. Mapping calls that fail with ENOENT
833 * can be then retried until success.
834 */
835static int is_mapped_fn(pte_t *pte, struct page *pmd_page,
836 unsigned long addr, void *data)
837{
838 return pte_none(*pte) ? 0 : -EBUSY;
839}
840
841static int privcmd_vma_range_is_mapped(
842 struct vm_area_struct *vma,
843 unsigned long addr,
844 unsigned long nr_pages)
845{
846 return apply_to_page_range(vma->vm_mm, addr, nr_pages << PAGE_SHIFT,
847 is_mapped_fn, NULL) != 0;
848}
849
850const struct file_operations xen_privcmd_fops = {
851 .owner = THIS_MODULE,
852 .unlocked_ioctl = privcmd_ioctl,
853 .open = privcmd_open,
854 .release = privcmd_release,
855 .mmap = privcmd_mmap,
856};
857EXPORT_SYMBOL_GPL(xen_privcmd_fops);
858
859static struct miscdevice privcmd_dev = {
860 .minor = MISC_DYNAMIC_MINOR,
861 .name = "xen/privcmd",
862 .fops = &xen_privcmd_fops,
863};
864
865static int __init privcmd_init(void)
866{
867 int err;
868
869 if (!xen_domain())
870 return -ENODEV;
871
872 err = misc_register(&privcmd_dev);
873 if (err != 0) {
874 pr_err("Could not register Xen privcmd device\n");
875 return err;
876 }
877 return 0;
878}
879
880static void __exit privcmd_exit(void)
881{
882 misc_deregister(&privcmd_dev);
883}
884
885module_init(privcmd_init);
886module_exit(privcmd_exit);