Loading...
1/******************************************************************************
2 * privcmd.c
3 *
4 * Interface to privileged domain-0 commands.
5 *
6 * Copyright (c) 2002-2004, K A Fraser, B Dragovic
7 */
8
9#define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
10
11#include <linux/kernel.h>
12#include <linux/module.h>
13#include <linux/sched.h>
14#include <linux/slab.h>
15#include <linux/string.h>
16#include <linux/errno.h>
17#include <linux/mm.h>
18#include <linux/mman.h>
19#include <linux/uaccess.h>
20#include <linux/swap.h>
21#include <linux/highmem.h>
22#include <linux/pagemap.h>
23#include <linux/seq_file.h>
24#include <linux/miscdevice.h>
25
26#include <asm/pgalloc.h>
27#include <asm/pgtable.h>
28#include <asm/tlb.h>
29#include <asm/xen/hypervisor.h>
30#include <asm/xen/hypercall.h>
31
32#include <xen/xen.h>
33#include <xen/privcmd.h>
34#include <xen/interface/xen.h>
35#include <xen/features.h>
36#include <xen/page.h>
37#include <xen/xen-ops.h>
38#include <xen/balloon.h>
39
40#include "privcmd.h"
41
42MODULE_LICENSE("GPL");
43
44#define PRIV_VMA_LOCKED ((void *)1)
45
46static int privcmd_vma_range_is_mapped(
47 struct vm_area_struct *vma,
48 unsigned long addr,
49 unsigned long nr_pages);
50
51static long privcmd_ioctl_hypercall(void __user *udata)
52{
53 struct privcmd_hypercall hypercall;
54 long ret;
55
56 if (copy_from_user(&hypercall, udata, sizeof(hypercall)))
57 return -EFAULT;
58
59 xen_preemptible_hcall_begin();
60 ret = privcmd_call(hypercall.op,
61 hypercall.arg[0], hypercall.arg[1],
62 hypercall.arg[2], hypercall.arg[3],
63 hypercall.arg[4]);
64 xen_preemptible_hcall_end();
65
66 return ret;
67}
68
69static void free_page_list(struct list_head *pages)
70{
71 struct page *p, *n;
72
73 list_for_each_entry_safe(p, n, pages, lru)
74 __free_page(p);
75
76 INIT_LIST_HEAD(pages);
77}
78
79/*
80 * Given an array of items in userspace, return a list of pages
81 * containing the data. If copying fails, either because of memory
82 * allocation failure or a problem reading user memory, return an
83 * error code; its up to the caller to dispose of any partial list.
84 */
85static int gather_array(struct list_head *pagelist,
86 unsigned nelem, size_t size,
87 const void __user *data)
88{
89 unsigned pageidx;
90 void *pagedata;
91 int ret;
92
93 if (size > PAGE_SIZE)
94 return 0;
95
96 pageidx = PAGE_SIZE;
97 pagedata = NULL; /* quiet, gcc */
98 while (nelem--) {
99 if (pageidx > PAGE_SIZE-size) {
100 struct page *page = alloc_page(GFP_KERNEL);
101
102 ret = -ENOMEM;
103 if (page == NULL)
104 goto fail;
105
106 pagedata = page_address(page);
107
108 list_add_tail(&page->lru, pagelist);
109 pageidx = 0;
110 }
111
112 ret = -EFAULT;
113 if (copy_from_user(pagedata + pageidx, data, size))
114 goto fail;
115
116 data += size;
117 pageidx += size;
118 }
119
120 ret = 0;
121
122fail:
123 return ret;
124}
125
126/*
127 * Call function "fn" on each element of the array fragmented
128 * over a list of pages.
129 */
130static int traverse_pages(unsigned nelem, size_t size,
131 struct list_head *pos,
132 int (*fn)(void *data, void *state),
133 void *state)
134{
135 void *pagedata;
136 unsigned pageidx;
137 int ret = 0;
138
139 BUG_ON(size > PAGE_SIZE);
140
141 pageidx = PAGE_SIZE;
142 pagedata = NULL; /* hush, gcc */
143
144 while (nelem--) {
145 if (pageidx > PAGE_SIZE-size) {
146 struct page *page;
147 pos = pos->next;
148 page = list_entry(pos, struct page, lru);
149 pagedata = page_address(page);
150 pageidx = 0;
151 }
152
153 ret = (*fn)(pagedata + pageidx, state);
154 if (ret)
155 break;
156 pageidx += size;
157 }
158
159 return ret;
160}
161
162/*
163 * Similar to traverse_pages, but use each page as a "block" of
164 * data to be processed as one unit.
165 */
166static int traverse_pages_block(unsigned nelem, size_t size,
167 struct list_head *pos,
168 int (*fn)(void *data, int nr, void *state),
169 void *state)
170{
171 void *pagedata;
172 unsigned pageidx;
173 int ret = 0;
174
175 BUG_ON(size > PAGE_SIZE);
176
177 pageidx = PAGE_SIZE;
178
179 while (nelem) {
180 int nr = (PAGE_SIZE/size);
181 struct page *page;
182 if (nr > nelem)
183 nr = nelem;
184 pos = pos->next;
185 page = list_entry(pos, struct page, lru);
186 pagedata = page_address(page);
187 ret = (*fn)(pagedata, nr, state);
188 if (ret)
189 break;
190 nelem -= nr;
191 }
192
193 return ret;
194}
195
196struct mmap_gfn_state {
197 unsigned long va;
198 struct vm_area_struct *vma;
199 domid_t domain;
200};
201
202static int mmap_gfn_range(void *data, void *state)
203{
204 struct privcmd_mmap_entry *msg = data;
205 struct mmap_gfn_state *st = state;
206 struct vm_area_struct *vma = st->vma;
207 int rc;
208
209 /* Do not allow range to wrap the address space. */
210 if ((msg->npages > (LONG_MAX >> PAGE_SHIFT)) ||
211 ((unsigned long)(msg->npages << PAGE_SHIFT) >= -st->va))
212 return -EINVAL;
213
214 /* Range chunks must be contiguous in va space. */
215 if ((msg->va != st->va) ||
216 ((msg->va+(msg->npages<<PAGE_SHIFT)) > vma->vm_end))
217 return -EINVAL;
218
219 rc = xen_remap_domain_gfn_range(vma,
220 msg->va & PAGE_MASK,
221 msg->mfn, msg->npages,
222 vma->vm_page_prot,
223 st->domain, NULL);
224 if (rc < 0)
225 return rc;
226
227 st->va += msg->npages << PAGE_SHIFT;
228
229 return 0;
230}
231
232static long privcmd_ioctl_mmap(void __user *udata)
233{
234 struct privcmd_mmap mmapcmd;
235 struct mm_struct *mm = current->mm;
236 struct vm_area_struct *vma;
237 int rc;
238 LIST_HEAD(pagelist);
239 struct mmap_gfn_state state;
240
241 /* We only support privcmd_ioctl_mmap_batch for auto translated. */
242 if (xen_feature(XENFEAT_auto_translated_physmap))
243 return -ENOSYS;
244
245 if (copy_from_user(&mmapcmd, udata, sizeof(mmapcmd)))
246 return -EFAULT;
247
248 rc = gather_array(&pagelist,
249 mmapcmd.num, sizeof(struct privcmd_mmap_entry),
250 mmapcmd.entry);
251
252 if (rc || list_empty(&pagelist))
253 goto out;
254
255 down_write(&mm->mmap_sem);
256
257 {
258 struct page *page = list_first_entry(&pagelist,
259 struct page, lru);
260 struct privcmd_mmap_entry *msg = page_address(page);
261
262 vma = find_vma(mm, msg->va);
263 rc = -EINVAL;
264
265 if (!vma || (msg->va != vma->vm_start) || vma->vm_private_data)
266 goto out_up;
267 vma->vm_private_data = PRIV_VMA_LOCKED;
268 }
269
270 state.va = vma->vm_start;
271 state.vma = vma;
272 state.domain = mmapcmd.dom;
273
274 rc = traverse_pages(mmapcmd.num, sizeof(struct privcmd_mmap_entry),
275 &pagelist,
276 mmap_gfn_range, &state);
277
278
279out_up:
280 up_write(&mm->mmap_sem);
281
282out:
283 free_page_list(&pagelist);
284
285 return rc;
286}
287
288struct mmap_batch_state {
289 domid_t domain;
290 unsigned long va;
291 struct vm_area_struct *vma;
292 int index;
293 /* A tristate:
294 * 0 for no errors
295 * 1 if at least one error has happened (and no
296 * -ENOENT errors have happened)
297 * -ENOENT if at least 1 -ENOENT has happened.
298 */
299 int global_error;
300 int version;
301
302 /* User-space gfn array to store errors in the second pass for V1. */
303 xen_pfn_t __user *user_gfn;
304 /* User-space int array to store errors in the second pass for V2. */
305 int __user *user_err;
306};
307
308/* auto translated dom0 note: if domU being created is PV, then gfn is
309 * mfn(addr on bus). If it's auto xlated, then gfn is pfn (input to HAP).
310 */
311static int mmap_batch_fn(void *data, int nr, void *state)
312{
313 xen_pfn_t *gfnp = data;
314 struct mmap_batch_state *st = state;
315 struct vm_area_struct *vma = st->vma;
316 struct page **pages = vma->vm_private_data;
317 struct page **cur_pages = NULL;
318 int ret;
319
320 if (xen_feature(XENFEAT_auto_translated_physmap))
321 cur_pages = &pages[st->index];
322
323 BUG_ON(nr < 0);
324 ret = xen_remap_domain_gfn_array(st->vma, st->va & PAGE_MASK, gfnp, nr,
325 (int *)gfnp, st->vma->vm_page_prot,
326 st->domain, cur_pages);
327
328 /* Adjust the global_error? */
329 if (ret != nr) {
330 if (ret == -ENOENT)
331 st->global_error = -ENOENT;
332 else {
333 /* Record that at least one error has happened. */
334 if (st->global_error == 0)
335 st->global_error = 1;
336 }
337 }
338 st->va += PAGE_SIZE * nr;
339 st->index += nr;
340
341 return 0;
342}
343
344static int mmap_return_error(int err, struct mmap_batch_state *st)
345{
346 int ret;
347
348 if (st->version == 1) {
349 if (err) {
350 xen_pfn_t gfn;
351
352 ret = get_user(gfn, st->user_gfn);
353 if (ret < 0)
354 return ret;
355 /*
356 * V1 encodes the error codes in the 32bit top
357 * nibble of the gfn (with its known
358 * limitations vis-a-vis 64 bit callers).
359 */
360 gfn |= (err == -ENOENT) ?
361 PRIVCMD_MMAPBATCH_PAGED_ERROR :
362 PRIVCMD_MMAPBATCH_MFN_ERROR;
363 return __put_user(gfn, st->user_gfn++);
364 } else
365 st->user_gfn++;
366 } else { /* st->version == 2 */
367 if (err)
368 return __put_user(err, st->user_err++);
369 else
370 st->user_err++;
371 }
372
373 return 0;
374}
375
376static int mmap_return_errors(void *data, int nr, void *state)
377{
378 struct mmap_batch_state *st = state;
379 int *errs = data;
380 int i;
381 int ret;
382
383 for (i = 0; i < nr; i++) {
384 ret = mmap_return_error(errs[i], st);
385 if (ret < 0)
386 return ret;
387 }
388 return 0;
389}
390
391/* Allocate pfns that are then mapped with gfns from foreign domid. Update
392 * the vma with the page info to use later.
393 * Returns: 0 if success, otherwise -errno
394 */
395static int alloc_empty_pages(struct vm_area_struct *vma, int numpgs)
396{
397 int rc;
398 struct page **pages;
399
400 pages = kcalloc(numpgs, sizeof(pages[0]), GFP_KERNEL);
401 if (pages == NULL)
402 return -ENOMEM;
403
404 rc = alloc_xenballooned_pages(numpgs, pages);
405 if (rc != 0) {
406 pr_warn("%s Could not alloc %d pfns rc:%d\n", __func__,
407 numpgs, rc);
408 kfree(pages);
409 return -ENOMEM;
410 }
411 BUG_ON(vma->vm_private_data != NULL);
412 vma->vm_private_data = pages;
413
414 return 0;
415}
416
417static const struct vm_operations_struct privcmd_vm_ops;
418
419static long privcmd_ioctl_mmap_batch(void __user *udata, int version)
420{
421 int ret;
422 struct privcmd_mmapbatch_v2 m;
423 struct mm_struct *mm = current->mm;
424 struct vm_area_struct *vma;
425 unsigned long nr_pages;
426 LIST_HEAD(pagelist);
427 struct mmap_batch_state state;
428
429 switch (version) {
430 case 1:
431 if (copy_from_user(&m, udata, sizeof(struct privcmd_mmapbatch)))
432 return -EFAULT;
433 /* Returns per-frame error in m.arr. */
434 m.err = NULL;
435 if (!access_ok(VERIFY_WRITE, m.arr, m.num * sizeof(*m.arr)))
436 return -EFAULT;
437 break;
438 case 2:
439 if (copy_from_user(&m, udata, sizeof(struct privcmd_mmapbatch_v2)))
440 return -EFAULT;
441 /* Returns per-frame error code in m.err. */
442 if (!access_ok(VERIFY_WRITE, m.err, m.num * (sizeof(*m.err))))
443 return -EFAULT;
444 break;
445 default:
446 return -EINVAL;
447 }
448
449 nr_pages = DIV_ROUND_UP(m.num, XEN_PFN_PER_PAGE);
450 if ((m.num <= 0) || (nr_pages > (LONG_MAX >> PAGE_SHIFT)))
451 return -EINVAL;
452
453 ret = gather_array(&pagelist, m.num, sizeof(xen_pfn_t), m.arr);
454
455 if (ret)
456 goto out;
457 if (list_empty(&pagelist)) {
458 ret = -EINVAL;
459 goto out;
460 }
461
462 if (version == 2) {
463 /* Zero error array now to only copy back actual errors. */
464 if (clear_user(m.err, sizeof(int) * m.num)) {
465 ret = -EFAULT;
466 goto out;
467 }
468 }
469
470 down_write(&mm->mmap_sem);
471
472 vma = find_vma(mm, m.addr);
473 if (!vma ||
474 vma->vm_ops != &privcmd_vm_ops) {
475 ret = -EINVAL;
476 goto out_unlock;
477 }
478
479 /*
480 * Caller must either:
481 *
482 * Map the whole VMA range, which will also allocate all the
483 * pages required for the auto_translated_physmap case.
484 *
485 * Or
486 *
487 * Map unmapped holes left from a previous map attempt (e.g.,
488 * because those foreign frames were previously paged out).
489 */
490 if (vma->vm_private_data == NULL) {
491 if (m.addr != vma->vm_start ||
492 m.addr + (nr_pages << PAGE_SHIFT) != vma->vm_end) {
493 ret = -EINVAL;
494 goto out_unlock;
495 }
496 if (xen_feature(XENFEAT_auto_translated_physmap)) {
497 ret = alloc_empty_pages(vma, nr_pages);
498 if (ret < 0)
499 goto out_unlock;
500 } else
501 vma->vm_private_data = PRIV_VMA_LOCKED;
502 } else {
503 if (m.addr < vma->vm_start ||
504 m.addr + (nr_pages << PAGE_SHIFT) > vma->vm_end) {
505 ret = -EINVAL;
506 goto out_unlock;
507 }
508 if (privcmd_vma_range_is_mapped(vma, m.addr, nr_pages)) {
509 ret = -EINVAL;
510 goto out_unlock;
511 }
512 }
513
514 state.domain = m.dom;
515 state.vma = vma;
516 state.va = m.addr;
517 state.index = 0;
518 state.global_error = 0;
519 state.version = version;
520
521 BUILD_BUG_ON(((PAGE_SIZE / sizeof(xen_pfn_t)) % XEN_PFN_PER_PAGE) != 0);
522 /* mmap_batch_fn guarantees ret == 0 */
523 BUG_ON(traverse_pages_block(m.num, sizeof(xen_pfn_t),
524 &pagelist, mmap_batch_fn, &state));
525
526 up_write(&mm->mmap_sem);
527
528 if (state.global_error) {
529 /* Write back errors in second pass. */
530 state.user_gfn = (xen_pfn_t *)m.arr;
531 state.user_err = m.err;
532 ret = traverse_pages_block(m.num, sizeof(xen_pfn_t),
533 &pagelist, mmap_return_errors, &state);
534 } else
535 ret = 0;
536
537 /* If we have not had any EFAULT-like global errors then set the global
538 * error to -ENOENT if necessary. */
539 if ((ret == 0) && (state.global_error == -ENOENT))
540 ret = -ENOENT;
541
542out:
543 free_page_list(&pagelist);
544 return ret;
545
546out_unlock:
547 up_write(&mm->mmap_sem);
548 goto out;
549}
550
551static long privcmd_ioctl(struct file *file,
552 unsigned int cmd, unsigned long data)
553{
554 int ret = -ENOSYS;
555 void __user *udata = (void __user *) data;
556
557 switch (cmd) {
558 case IOCTL_PRIVCMD_HYPERCALL:
559 ret = privcmd_ioctl_hypercall(udata);
560 break;
561
562 case IOCTL_PRIVCMD_MMAP:
563 ret = privcmd_ioctl_mmap(udata);
564 break;
565
566 case IOCTL_PRIVCMD_MMAPBATCH:
567 ret = privcmd_ioctl_mmap_batch(udata, 1);
568 break;
569
570 case IOCTL_PRIVCMD_MMAPBATCH_V2:
571 ret = privcmd_ioctl_mmap_batch(udata, 2);
572 break;
573
574 default:
575 ret = -EINVAL;
576 break;
577 }
578
579 return ret;
580}
581
582static void privcmd_close(struct vm_area_struct *vma)
583{
584 struct page **pages = vma->vm_private_data;
585 int numpgs = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
586 int numgfns = (vma->vm_end - vma->vm_start) >> XEN_PAGE_SHIFT;
587 int rc;
588
589 if (!xen_feature(XENFEAT_auto_translated_physmap) || !numpgs || !pages)
590 return;
591
592 rc = xen_unmap_domain_gfn_range(vma, numgfns, pages);
593 if (rc == 0)
594 free_xenballooned_pages(numpgs, pages);
595 else
596 pr_crit("unable to unmap MFN range: leaking %d pages. rc=%d\n",
597 numpgs, rc);
598 kfree(pages);
599}
600
601static int privcmd_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
602{
603 printk(KERN_DEBUG "privcmd_fault: vma=%p %lx-%lx, pgoff=%lx, uv=%p\n",
604 vma, vma->vm_start, vma->vm_end,
605 vmf->pgoff, vmf->virtual_address);
606
607 return VM_FAULT_SIGBUS;
608}
609
610static const struct vm_operations_struct privcmd_vm_ops = {
611 .close = privcmd_close,
612 .fault = privcmd_fault
613};
614
615static int privcmd_mmap(struct file *file, struct vm_area_struct *vma)
616{
617 /* DONTCOPY is essential for Xen because copy_page_range doesn't know
618 * how to recreate these mappings */
619 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTCOPY |
620 VM_DONTEXPAND | VM_DONTDUMP;
621 vma->vm_ops = &privcmd_vm_ops;
622 vma->vm_private_data = NULL;
623
624 return 0;
625}
626
627/*
628 * For MMAPBATCH*. This allows asserting the singleshot mapping
629 * on a per pfn/pte basis. Mapping calls that fail with ENOENT
630 * can be then retried until success.
631 */
632static int is_mapped_fn(pte_t *pte, struct page *pmd_page,
633 unsigned long addr, void *data)
634{
635 return pte_none(*pte) ? 0 : -EBUSY;
636}
637
638static int privcmd_vma_range_is_mapped(
639 struct vm_area_struct *vma,
640 unsigned long addr,
641 unsigned long nr_pages)
642{
643 return apply_to_page_range(vma->vm_mm, addr, nr_pages << PAGE_SHIFT,
644 is_mapped_fn, NULL) != 0;
645}
646
647const struct file_operations xen_privcmd_fops = {
648 .owner = THIS_MODULE,
649 .unlocked_ioctl = privcmd_ioctl,
650 .mmap = privcmd_mmap,
651};
652EXPORT_SYMBOL_GPL(xen_privcmd_fops);
653
654static struct miscdevice privcmd_dev = {
655 .minor = MISC_DYNAMIC_MINOR,
656 .name = "xen/privcmd",
657 .fops = &xen_privcmd_fops,
658};
659
660static int __init privcmd_init(void)
661{
662 int err;
663
664 if (!xen_domain())
665 return -ENODEV;
666
667 err = misc_register(&privcmd_dev);
668 if (err != 0) {
669 pr_err("Could not register Xen privcmd device\n");
670 return err;
671 }
672 return 0;
673}
674
675static void __exit privcmd_exit(void)
676{
677 misc_deregister(&privcmd_dev);
678}
679
680module_init(privcmd_init);
681module_exit(privcmd_exit);
1/******************************************************************************
2 * privcmd.c
3 *
4 * Interface to privileged domain-0 commands.
5 *
6 * Copyright (c) 2002-2004, K A Fraser, B Dragovic
7 */
8
9#define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
10
11#include <linux/kernel.h>
12#include <linux/module.h>
13#include <linux/sched.h>
14#include <linux/slab.h>
15#include <linux/string.h>
16#include <linux/errno.h>
17#include <linux/mm.h>
18#include <linux/mman.h>
19#include <linux/uaccess.h>
20#include <linux/swap.h>
21#include <linux/highmem.h>
22#include <linux/pagemap.h>
23#include <linux/seq_file.h>
24#include <linux/miscdevice.h>
25
26#include <asm/pgalloc.h>
27#include <asm/pgtable.h>
28#include <asm/tlb.h>
29#include <asm/xen/hypervisor.h>
30#include <asm/xen/hypercall.h>
31
32#include <xen/xen.h>
33#include <xen/privcmd.h>
34#include <xen/interface/xen.h>
35#include <xen/features.h>
36#include <xen/page.h>
37#include <xen/xen-ops.h>
38#include <xen/balloon.h>
39
40#include "privcmd.h"
41
42MODULE_LICENSE("GPL");
43
44#define PRIV_VMA_LOCKED ((void *)1)
45
46static int privcmd_vma_range_is_mapped(
47 struct vm_area_struct *vma,
48 unsigned long addr,
49 unsigned long nr_pages);
50
51static long privcmd_ioctl_hypercall(void __user *udata)
52{
53 struct privcmd_hypercall hypercall;
54 long ret;
55
56 if (copy_from_user(&hypercall, udata, sizeof(hypercall)))
57 return -EFAULT;
58
59 ret = privcmd_call(hypercall.op,
60 hypercall.arg[0], hypercall.arg[1],
61 hypercall.arg[2], hypercall.arg[3],
62 hypercall.arg[4]);
63
64 return ret;
65}
66
67static void free_page_list(struct list_head *pages)
68{
69 struct page *p, *n;
70
71 list_for_each_entry_safe(p, n, pages, lru)
72 __free_page(p);
73
74 INIT_LIST_HEAD(pages);
75}
76
77/*
78 * Given an array of items in userspace, return a list of pages
79 * containing the data. If copying fails, either because of memory
80 * allocation failure or a problem reading user memory, return an
81 * error code; its up to the caller to dispose of any partial list.
82 */
83static int gather_array(struct list_head *pagelist,
84 unsigned nelem, size_t size,
85 const void __user *data)
86{
87 unsigned pageidx;
88 void *pagedata;
89 int ret;
90
91 if (size > PAGE_SIZE)
92 return 0;
93
94 pageidx = PAGE_SIZE;
95 pagedata = NULL; /* quiet, gcc */
96 while (nelem--) {
97 if (pageidx > PAGE_SIZE-size) {
98 struct page *page = alloc_page(GFP_KERNEL);
99
100 ret = -ENOMEM;
101 if (page == NULL)
102 goto fail;
103
104 pagedata = page_address(page);
105
106 list_add_tail(&page->lru, pagelist);
107 pageidx = 0;
108 }
109
110 ret = -EFAULT;
111 if (copy_from_user(pagedata + pageidx, data, size))
112 goto fail;
113
114 data += size;
115 pageidx += size;
116 }
117
118 ret = 0;
119
120fail:
121 return ret;
122}
123
124/*
125 * Call function "fn" on each element of the array fragmented
126 * over a list of pages.
127 */
128static int traverse_pages(unsigned nelem, size_t size,
129 struct list_head *pos,
130 int (*fn)(void *data, void *state),
131 void *state)
132{
133 void *pagedata;
134 unsigned pageidx;
135 int ret = 0;
136
137 BUG_ON(size > PAGE_SIZE);
138
139 pageidx = PAGE_SIZE;
140 pagedata = NULL; /* hush, gcc */
141
142 while (nelem--) {
143 if (pageidx > PAGE_SIZE-size) {
144 struct page *page;
145 pos = pos->next;
146 page = list_entry(pos, struct page, lru);
147 pagedata = page_address(page);
148 pageidx = 0;
149 }
150
151 ret = (*fn)(pagedata + pageidx, state);
152 if (ret)
153 break;
154 pageidx += size;
155 }
156
157 return ret;
158}
159
160struct mmap_mfn_state {
161 unsigned long va;
162 struct vm_area_struct *vma;
163 domid_t domain;
164};
165
166static int mmap_mfn_range(void *data, void *state)
167{
168 struct privcmd_mmap_entry *msg = data;
169 struct mmap_mfn_state *st = state;
170 struct vm_area_struct *vma = st->vma;
171 int rc;
172
173 /* Do not allow range to wrap the address space. */
174 if ((msg->npages > (LONG_MAX >> PAGE_SHIFT)) ||
175 ((unsigned long)(msg->npages << PAGE_SHIFT) >= -st->va))
176 return -EINVAL;
177
178 /* Range chunks must be contiguous in va space. */
179 if ((msg->va != st->va) ||
180 ((msg->va+(msg->npages<<PAGE_SHIFT)) > vma->vm_end))
181 return -EINVAL;
182
183 rc = xen_remap_domain_mfn_range(vma,
184 msg->va & PAGE_MASK,
185 msg->mfn, msg->npages,
186 vma->vm_page_prot,
187 st->domain, NULL);
188 if (rc < 0)
189 return rc;
190
191 st->va += msg->npages << PAGE_SHIFT;
192
193 return 0;
194}
195
196static long privcmd_ioctl_mmap(void __user *udata)
197{
198 struct privcmd_mmap mmapcmd;
199 struct mm_struct *mm = current->mm;
200 struct vm_area_struct *vma;
201 int rc;
202 LIST_HEAD(pagelist);
203 struct mmap_mfn_state state;
204
205 /* We only support privcmd_ioctl_mmap_batch for auto translated. */
206 if (xen_feature(XENFEAT_auto_translated_physmap))
207 return -ENOSYS;
208
209 if (copy_from_user(&mmapcmd, udata, sizeof(mmapcmd)))
210 return -EFAULT;
211
212 rc = gather_array(&pagelist,
213 mmapcmd.num, sizeof(struct privcmd_mmap_entry),
214 mmapcmd.entry);
215
216 if (rc || list_empty(&pagelist))
217 goto out;
218
219 down_write(&mm->mmap_sem);
220
221 {
222 struct page *page = list_first_entry(&pagelist,
223 struct page, lru);
224 struct privcmd_mmap_entry *msg = page_address(page);
225
226 vma = find_vma(mm, msg->va);
227 rc = -EINVAL;
228
229 if (!vma || (msg->va != vma->vm_start) || vma->vm_private_data)
230 goto out_up;
231 vma->vm_private_data = PRIV_VMA_LOCKED;
232 }
233
234 state.va = vma->vm_start;
235 state.vma = vma;
236 state.domain = mmapcmd.dom;
237
238 rc = traverse_pages(mmapcmd.num, sizeof(struct privcmd_mmap_entry),
239 &pagelist,
240 mmap_mfn_range, &state);
241
242
243out_up:
244 up_write(&mm->mmap_sem);
245
246out:
247 free_page_list(&pagelist);
248
249 return rc;
250}
251
252struct mmap_batch_state {
253 domid_t domain;
254 unsigned long va;
255 struct vm_area_struct *vma;
256 int index;
257 /* A tristate:
258 * 0 for no errors
259 * 1 if at least one error has happened (and no
260 * -ENOENT errors have happened)
261 * -ENOENT if at least 1 -ENOENT has happened.
262 */
263 int global_error;
264 int version;
265
266 /* User-space mfn array to store errors in the second pass for V1. */
267 xen_pfn_t __user *user_mfn;
268 /* User-space int array to store errors in the second pass for V2. */
269 int __user *user_err;
270};
271
272/* auto translated dom0 note: if domU being created is PV, then mfn is
273 * mfn(addr on bus). If it's auto xlated, then mfn is pfn (input to HAP).
274 */
275static int mmap_batch_fn(void *data, void *state)
276{
277 xen_pfn_t *mfnp = data;
278 struct mmap_batch_state *st = state;
279 struct vm_area_struct *vma = st->vma;
280 struct page **pages = vma->vm_private_data;
281 struct page *cur_page = NULL;
282 int ret;
283
284 if (xen_feature(XENFEAT_auto_translated_physmap))
285 cur_page = pages[st->index++];
286
287 ret = xen_remap_domain_mfn_range(st->vma, st->va & PAGE_MASK, *mfnp, 1,
288 st->vma->vm_page_prot, st->domain,
289 &cur_page);
290
291 /* Store error code for second pass. */
292 if (st->version == 1) {
293 if (ret < 0) {
294 /*
295 * V1 encodes the error codes in the 32bit top nibble of the
296 * mfn (with its known limitations vis-a-vis 64 bit callers).
297 */
298 *mfnp |= (ret == -ENOENT) ?
299 PRIVCMD_MMAPBATCH_PAGED_ERROR :
300 PRIVCMD_MMAPBATCH_MFN_ERROR;
301 }
302 } else { /* st->version == 2 */
303 *((int *) mfnp) = ret;
304 }
305
306 /* And see if it affects the global_error. */
307 if (ret < 0) {
308 if (ret == -ENOENT)
309 st->global_error = -ENOENT;
310 else {
311 /* Record that at least one error has happened. */
312 if (st->global_error == 0)
313 st->global_error = 1;
314 }
315 }
316 st->va += PAGE_SIZE;
317
318 return 0;
319}
320
321static int mmap_return_errors(void *data, void *state)
322{
323 struct mmap_batch_state *st = state;
324
325 if (st->version == 1) {
326 xen_pfn_t mfnp = *((xen_pfn_t *) data);
327 if (mfnp & PRIVCMD_MMAPBATCH_MFN_ERROR)
328 return __put_user(mfnp, st->user_mfn++);
329 else
330 st->user_mfn++;
331 } else { /* st->version == 2 */
332 int err = *((int *) data);
333 if (err)
334 return __put_user(err, st->user_err++);
335 else
336 st->user_err++;
337 }
338
339 return 0;
340}
341
342/* Allocate pfns that are then mapped with gmfns from foreign domid. Update
343 * the vma with the page info to use later.
344 * Returns: 0 if success, otherwise -errno
345 */
346static int alloc_empty_pages(struct vm_area_struct *vma, int numpgs)
347{
348 int rc;
349 struct page **pages;
350
351 pages = kcalloc(numpgs, sizeof(pages[0]), GFP_KERNEL);
352 if (pages == NULL)
353 return -ENOMEM;
354
355 rc = alloc_xenballooned_pages(numpgs, pages, 0);
356 if (rc != 0) {
357 pr_warn("%s Could not alloc %d pfns rc:%d\n", __func__,
358 numpgs, rc);
359 kfree(pages);
360 return -ENOMEM;
361 }
362 BUG_ON(vma->vm_private_data != NULL);
363 vma->vm_private_data = pages;
364
365 return 0;
366}
367
368static struct vm_operations_struct privcmd_vm_ops;
369
370static long privcmd_ioctl_mmap_batch(void __user *udata, int version)
371{
372 int ret;
373 struct privcmd_mmapbatch_v2 m;
374 struct mm_struct *mm = current->mm;
375 struct vm_area_struct *vma;
376 unsigned long nr_pages;
377 LIST_HEAD(pagelist);
378 struct mmap_batch_state state;
379
380 switch (version) {
381 case 1:
382 if (copy_from_user(&m, udata, sizeof(struct privcmd_mmapbatch)))
383 return -EFAULT;
384 /* Returns per-frame error in m.arr. */
385 m.err = NULL;
386 if (!access_ok(VERIFY_WRITE, m.arr, m.num * sizeof(*m.arr)))
387 return -EFAULT;
388 break;
389 case 2:
390 if (copy_from_user(&m, udata, sizeof(struct privcmd_mmapbatch_v2)))
391 return -EFAULT;
392 /* Returns per-frame error code in m.err. */
393 if (!access_ok(VERIFY_WRITE, m.err, m.num * (sizeof(*m.err))))
394 return -EFAULT;
395 break;
396 default:
397 return -EINVAL;
398 }
399
400 nr_pages = m.num;
401 if ((m.num <= 0) || (nr_pages > (LONG_MAX >> PAGE_SHIFT)))
402 return -EINVAL;
403
404 ret = gather_array(&pagelist, m.num, sizeof(xen_pfn_t), m.arr);
405
406 if (ret)
407 goto out;
408 if (list_empty(&pagelist)) {
409 ret = -EINVAL;
410 goto out;
411 }
412
413 if (version == 2) {
414 /* Zero error array now to only copy back actual errors. */
415 if (clear_user(m.err, sizeof(int) * m.num)) {
416 ret = -EFAULT;
417 goto out;
418 }
419 }
420
421 down_write(&mm->mmap_sem);
422
423 vma = find_vma(mm, m.addr);
424 if (!vma ||
425 vma->vm_ops != &privcmd_vm_ops) {
426 ret = -EINVAL;
427 goto out_unlock;
428 }
429
430 /*
431 * Caller must either:
432 *
433 * Map the whole VMA range, which will also allocate all the
434 * pages required for the auto_translated_physmap case.
435 *
436 * Or
437 *
438 * Map unmapped holes left from a previous map attempt (e.g.,
439 * because those foreign frames were previously paged out).
440 */
441 if (vma->vm_private_data == NULL) {
442 if (m.addr != vma->vm_start ||
443 m.addr + (nr_pages << PAGE_SHIFT) != vma->vm_end) {
444 ret = -EINVAL;
445 goto out_unlock;
446 }
447 if (xen_feature(XENFEAT_auto_translated_physmap)) {
448 ret = alloc_empty_pages(vma, m.num);
449 if (ret < 0)
450 goto out_unlock;
451 } else
452 vma->vm_private_data = PRIV_VMA_LOCKED;
453 } else {
454 if (m.addr < vma->vm_start ||
455 m.addr + (nr_pages << PAGE_SHIFT) > vma->vm_end) {
456 ret = -EINVAL;
457 goto out_unlock;
458 }
459 if (privcmd_vma_range_is_mapped(vma, m.addr, nr_pages)) {
460 ret = -EINVAL;
461 goto out_unlock;
462 }
463 }
464
465 state.domain = m.dom;
466 state.vma = vma;
467 state.va = m.addr;
468 state.index = 0;
469 state.global_error = 0;
470 state.version = version;
471
472 /* mmap_batch_fn guarantees ret == 0 */
473 BUG_ON(traverse_pages(m.num, sizeof(xen_pfn_t),
474 &pagelist, mmap_batch_fn, &state));
475
476 up_write(&mm->mmap_sem);
477
478 if (state.global_error) {
479 /* Write back errors in second pass. */
480 state.user_mfn = (xen_pfn_t *)m.arr;
481 state.user_err = m.err;
482 ret = traverse_pages(m.num, sizeof(xen_pfn_t),
483 &pagelist, mmap_return_errors, &state);
484 } else
485 ret = 0;
486
487 /* If we have not had any EFAULT-like global errors then set the global
488 * error to -ENOENT if necessary. */
489 if ((ret == 0) && (state.global_error == -ENOENT))
490 ret = -ENOENT;
491
492out:
493 free_page_list(&pagelist);
494 return ret;
495
496out_unlock:
497 up_write(&mm->mmap_sem);
498 goto out;
499}
500
501static long privcmd_ioctl(struct file *file,
502 unsigned int cmd, unsigned long data)
503{
504 int ret = -ENOSYS;
505 void __user *udata = (void __user *) data;
506
507 switch (cmd) {
508 case IOCTL_PRIVCMD_HYPERCALL:
509 ret = privcmd_ioctl_hypercall(udata);
510 break;
511
512 case IOCTL_PRIVCMD_MMAP:
513 ret = privcmd_ioctl_mmap(udata);
514 break;
515
516 case IOCTL_PRIVCMD_MMAPBATCH:
517 ret = privcmd_ioctl_mmap_batch(udata, 1);
518 break;
519
520 case IOCTL_PRIVCMD_MMAPBATCH_V2:
521 ret = privcmd_ioctl_mmap_batch(udata, 2);
522 break;
523
524 default:
525 ret = -EINVAL;
526 break;
527 }
528
529 return ret;
530}
531
532static void privcmd_close(struct vm_area_struct *vma)
533{
534 struct page **pages = vma->vm_private_data;
535 int numpgs = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
536 int rc;
537
538 if (!xen_feature(XENFEAT_auto_translated_physmap) || !numpgs || !pages)
539 return;
540
541 rc = xen_unmap_domain_mfn_range(vma, numpgs, pages);
542 if (rc == 0)
543 free_xenballooned_pages(numpgs, pages);
544 else
545 pr_crit("unable to unmap MFN range: leaking %d pages. rc=%d\n",
546 numpgs, rc);
547 kfree(pages);
548}
549
550static int privcmd_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
551{
552 printk(KERN_DEBUG "privcmd_fault: vma=%p %lx-%lx, pgoff=%lx, uv=%p\n",
553 vma, vma->vm_start, vma->vm_end,
554 vmf->pgoff, vmf->virtual_address);
555
556 return VM_FAULT_SIGBUS;
557}
558
559static struct vm_operations_struct privcmd_vm_ops = {
560 .close = privcmd_close,
561 .fault = privcmd_fault
562};
563
564static int privcmd_mmap(struct file *file, struct vm_area_struct *vma)
565{
566 /* DONTCOPY is essential for Xen because copy_page_range doesn't know
567 * how to recreate these mappings */
568 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTCOPY |
569 VM_DONTEXPAND | VM_DONTDUMP;
570 vma->vm_ops = &privcmd_vm_ops;
571 vma->vm_private_data = NULL;
572
573 return 0;
574}
575
576/*
577 * For MMAPBATCH*. This allows asserting the singleshot mapping
578 * on a per pfn/pte basis. Mapping calls that fail with ENOENT
579 * can be then retried until success.
580 */
581static int is_mapped_fn(pte_t *pte, struct page *pmd_page,
582 unsigned long addr, void *data)
583{
584 return pte_none(*pte) ? 0 : -EBUSY;
585}
586
587static int privcmd_vma_range_is_mapped(
588 struct vm_area_struct *vma,
589 unsigned long addr,
590 unsigned long nr_pages)
591{
592 return apply_to_page_range(vma->vm_mm, addr, nr_pages << PAGE_SHIFT,
593 is_mapped_fn, NULL) != 0;
594}
595
596const struct file_operations xen_privcmd_fops = {
597 .owner = THIS_MODULE,
598 .unlocked_ioctl = privcmd_ioctl,
599 .mmap = privcmd_mmap,
600};
601EXPORT_SYMBOL_GPL(xen_privcmd_fops);
602
603static struct miscdevice privcmd_dev = {
604 .minor = MISC_DYNAMIC_MINOR,
605 .name = "xen/privcmd",
606 .fops = &xen_privcmd_fops,
607};
608
609static int __init privcmd_init(void)
610{
611 int err;
612
613 if (!xen_domain())
614 return -ENODEV;
615
616 err = misc_register(&privcmd_dev);
617 if (err != 0) {
618 pr_err("Could not register Xen privcmd device\n");
619 return err;
620 }
621 return 0;
622}
623
624static void __exit privcmd_exit(void)
625{
626 misc_deregister(&privcmd_dev);
627}
628
629module_init(privcmd_init);
630module_exit(privcmd_exit);