Loading...
1/*
2 * Kernel-based Virtual Machine driver for Linux
3 *
4 * This module enables machines with Intel VT-x extensions to run virtual
5 * machines without emulation or binary translation.
6 *
7 * Copyright (C) 2006 Qumranet, Inc.
8 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
9 *
10 * Authors:
11 * Avi Kivity <avi@qumranet.com>
12 * Yaniv Kamay <yaniv@qumranet.com>
13 *
14 * This work is licensed under the terms of the GNU GPL, version 2. See
15 * the COPYING file in the top-level directory.
16 *
17 */
18
19#include <kvm/iodev.h>
20
21#include <linux/kvm_host.h>
22#include <linux/kvm.h>
23#include <linux/module.h>
24#include <linux/errno.h>
25#include <linux/percpu.h>
26#include <linux/mm.h>
27#include <linux/miscdevice.h>
28#include <linux/vmalloc.h>
29#include <linux/reboot.h>
30#include <linux/debugfs.h>
31#include <linux/highmem.h>
32#include <linux/file.h>
33#include <linux/syscore_ops.h>
34#include <linux/cpu.h>
35#include <linux/sched.h>
36#include <linux/cpumask.h>
37#include <linux/smp.h>
38#include <linux/anon_inodes.h>
39#include <linux/profile.h>
40#include <linux/kvm_para.h>
41#include <linux/pagemap.h>
42#include <linux/mman.h>
43#include <linux/swap.h>
44#include <linux/bitops.h>
45#include <linux/spinlock.h>
46#include <linux/compat.h>
47#include <linux/srcu.h>
48#include <linux/hugetlb.h>
49#include <linux/slab.h>
50#include <linux/sort.h>
51#include <linux/bsearch.h>
52
53#include <asm/processor.h>
54#include <asm/io.h>
55#include <asm/ioctl.h>
56#include <asm/uaccess.h>
57#include <asm/pgtable.h>
58
59#include "coalesced_mmio.h"
60#include "async_pf.h"
61#include "vfio.h"
62
63#define CREATE_TRACE_POINTS
64#include <trace/events/kvm.h>
65
66MODULE_AUTHOR("Qumranet");
67MODULE_LICENSE("GPL");
68
69/* Architectures should define their poll value according to the halt latency */
70static unsigned int halt_poll_ns = KVM_HALT_POLL_NS_DEFAULT;
71module_param(halt_poll_ns, uint, S_IRUGO | S_IWUSR);
72
73/* Default doubles per-vcpu halt_poll_ns. */
74static unsigned int halt_poll_ns_grow = 2;
75module_param(halt_poll_ns_grow, uint, S_IRUGO | S_IWUSR);
76
77/* Default resets per-vcpu halt_poll_ns . */
78static unsigned int halt_poll_ns_shrink;
79module_param(halt_poll_ns_shrink, uint, S_IRUGO | S_IWUSR);
80
81/*
82 * Ordering of locks:
83 *
84 * kvm->lock --> kvm->slots_lock --> kvm->irq_lock
85 */
86
87DEFINE_SPINLOCK(kvm_lock);
88static DEFINE_RAW_SPINLOCK(kvm_count_lock);
89LIST_HEAD(vm_list);
90
91static cpumask_var_t cpus_hardware_enabled;
92static int kvm_usage_count;
93static atomic_t hardware_enable_failed;
94
95struct kmem_cache *kvm_vcpu_cache;
96EXPORT_SYMBOL_GPL(kvm_vcpu_cache);
97
98static __read_mostly struct preempt_ops kvm_preempt_ops;
99
100struct dentry *kvm_debugfs_dir;
101EXPORT_SYMBOL_GPL(kvm_debugfs_dir);
102
103static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
104 unsigned long arg);
105#ifdef CONFIG_KVM_COMPAT
106static long kvm_vcpu_compat_ioctl(struct file *file, unsigned int ioctl,
107 unsigned long arg);
108#endif
109static int hardware_enable_all(void);
110static void hardware_disable_all(void);
111
112static void kvm_io_bus_destroy(struct kvm_io_bus *bus);
113
114static void kvm_release_pfn_dirty(kvm_pfn_t pfn);
115static void mark_page_dirty_in_slot(struct kvm_memory_slot *memslot, gfn_t gfn);
116
117__visible bool kvm_rebooting;
118EXPORT_SYMBOL_GPL(kvm_rebooting);
119
120static bool largepages_enabled = true;
121
122bool kvm_is_reserved_pfn(kvm_pfn_t pfn)
123{
124 if (pfn_valid(pfn))
125 return PageReserved(pfn_to_page(pfn));
126
127 return true;
128}
129
130/*
131 * Switches to specified vcpu, until a matching vcpu_put()
132 */
133int vcpu_load(struct kvm_vcpu *vcpu)
134{
135 int cpu;
136
137 if (mutex_lock_killable(&vcpu->mutex))
138 return -EINTR;
139 cpu = get_cpu();
140 preempt_notifier_register(&vcpu->preempt_notifier);
141 kvm_arch_vcpu_load(vcpu, cpu);
142 put_cpu();
143 return 0;
144}
145
146void vcpu_put(struct kvm_vcpu *vcpu)
147{
148 preempt_disable();
149 kvm_arch_vcpu_put(vcpu);
150 preempt_notifier_unregister(&vcpu->preempt_notifier);
151 preempt_enable();
152 mutex_unlock(&vcpu->mutex);
153}
154
155static void ack_flush(void *_completed)
156{
157}
158
159bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req)
160{
161 int i, cpu, me;
162 cpumask_var_t cpus;
163 bool called = true;
164 struct kvm_vcpu *vcpu;
165
166 zalloc_cpumask_var(&cpus, GFP_ATOMIC);
167
168 me = get_cpu();
169 kvm_for_each_vcpu(i, vcpu, kvm) {
170 kvm_make_request(req, vcpu);
171 cpu = vcpu->cpu;
172
173 /* Set ->requests bit before we read ->mode. */
174 smp_mb__after_atomic();
175
176 if (cpus != NULL && cpu != -1 && cpu != me &&
177 kvm_vcpu_exiting_guest_mode(vcpu) != OUTSIDE_GUEST_MODE)
178 cpumask_set_cpu(cpu, cpus);
179 }
180 if (unlikely(cpus == NULL))
181 smp_call_function_many(cpu_online_mask, ack_flush, NULL, 1);
182 else if (!cpumask_empty(cpus))
183 smp_call_function_many(cpus, ack_flush, NULL, 1);
184 else
185 called = false;
186 put_cpu();
187 free_cpumask_var(cpus);
188 return called;
189}
190
191#ifndef CONFIG_HAVE_KVM_ARCH_TLB_FLUSH_ALL
192void kvm_flush_remote_tlbs(struct kvm *kvm)
193{
194 /*
195 * Read tlbs_dirty before setting KVM_REQ_TLB_FLUSH in
196 * kvm_make_all_cpus_request.
197 */
198 long dirty_count = smp_load_acquire(&kvm->tlbs_dirty);
199
200 /*
201 * We want to publish modifications to the page tables before reading
202 * mode. Pairs with a memory barrier in arch-specific code.
203 * - x86: smp_mb__after_srcu_read_unlock in vcpu_enter_guest
204 * and smp_mb in walk_shadow_page_lockless_begin/end.
205 * - powerpc: smp_mb in kvmppc_prepare_to_enter.
206 *
207 * There is already an smp_mb__after_atomic() before
208 * kvm_make_all_cpus_request() reads vcpu->mode. We reuse that
209 * barrier here.
210 */
211 if (kvm_make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH))
212 ++kvm->stat.remote_tlb_flush;
213 cmpxchg(&kvm->tlbs_dirty, dirty_count, 0);
214}
215EXPORT_SYMBOL_GPL(kvm_flush_remote_tlbs);
216#endif
217
218void kvm_reload_remote_mmus(struct kvm *kvm)
219{
220 kvm_make_all_cpus_request(kvm, KVM_REQ_MMU_RELOAD);
221}
222
223int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
224{
225 struct page *page;
226 int r;
227
228 mutex_init(&vcpu->mutex);
229 vcpu->cpu = -1;
230 vcpu->kvm = kvm;
231 vcpu->vcpu_id = id;
232 vcpu->pid = NULL;
233 init_swait_queue_head(&vcpu->wq);
234 kvm_async_pf_vcpu_init(vcpu);
235
236 vcpu->pre_pcpu = -1;
237 INIT_LIST_HEAD(&vcpu->blocked_vcpu_list);
238
239 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
240 if (!page) {
241 r = -ENOMEM;
242 goto fail;
243 }
244 vcpu->run = page_address(page);
245
246 kvm_vcpu_set_in_spin_loop(vcpu, false);
247 kvm_vcpu_set_dy_eligible(vcpu, false);
248 vcpu->preempted = false;
249
250 r = kvm_arch_vcpu_init(vcpu);
251 if (r < 0)
252 goto fail_free_run;
253 return 0;
254
255fail_free_run:
256 free_page((unsigned long)vcpu->run);
257fail:
258 return r;
259}
260EXPORT_SYMBOL_GPL(kvm_vcpu_init);
261
262void kvm_vcpu_uninit(struct kvm_vcpu *vcpu)
263{
264 put_pid(vcpu->pid);
265 kvm_arch_vcpu_uninit(vcpu);
266 free_page((unsigned long)vcpu->run);
267}
268EXPORT_SYMBOL_GPL(kvm_vcpu_uninit);
269
270#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
271static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn)
272{
273 return container_of(mn, struct kvm, mmu_notifier);
274}
275
276static void kvm_mmu_notifier_invalidate_page(struct mmu_notifier *mn,
277 struct mm_struct *mm,
278 unsigned long address)
279{
280 struct kvm *kvm = mmu_notifier_to_kvm(mn);
281 int need_tlb_flush, idx;
282
283 /*
284 * When ->invalidate_page runs, the linux pte has been zapped
285 * already but the page is still allocated until
286 * ->invalidate_page returns. So if we increase the sequence
287 * here the kvm page fault will notice if the spte can't be
288 * established because the page is going to be freed. If
289 * instead the kvm page fault establishes the spte before
290 * ->invalidate_page runs, kvm_unmap_hva will release it
291 * before returning.
292 *
293 * The sequence increase only need to be seen at spin_unlock
294 * time, and not at spin_lock time.
295 *
296 * Increasing the sequence after the spin_unlock would be
297 * unsafe because the kvm page fault could then establish the
298 * pte after kvm_unmap_hva returned, without noticing the page
299 * is going to be freed.
300 */
301 idx = srcu_read_lock(&kvm->srcu);
302 spin_lock(&kvm->mmu_lock);
303
304 kvm->mmu_notifier_seq++;
305 need_tlb_flush = kvm_unmap_hva(kvm, address) | kvm->tlbs_dirty;
306 /* we've to flush the tlb before the pages can be freed */
307 if (need_tlb_flush)
308 kvm_flush_remote_tlbs(kvm);
309
310 spin_unlock(&kvm->mmu_lock);
311
312 kvm_arch_mmu_notifier_invalidate_page(kvm, address);
313
314 srcu_read_unlock(&kvm->srcu, idx);
315}
316
317static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn,
318 struct mm_struct *mm,
319 unsigned long address,
320 pte_t pte)
321{
322 struct kvm *kvm = mmu_notifier_to_kvm(mn);
323 int idx;
324
325 idx = srcu_read_lock(&kvm->srcu);
326 spin_lock(&kvm->mmu_lock);
327 kvm->mmu_notifier_seq++;
328 kvm_set_spte_hva(kvm, address, pte);
329 spin_unlock(&kvm->mmu_lock);
330 srcu_read_unlock(&kvm->srcu, idx);
331}
332
333static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
334 struct mm_struct *mm,
335 unsigned long start,
336 unsigned long end)
337{
338 struct kvm *kvm = mmu_notifier_to_kvm(mn);
339 int need_tlb_flush = 0, idx;
340
341 idx = srcu_read_lock(&kvm->srcu);
342 spin_lock(&kvm->mmu_lock);
343 /*
344 * The count increase must become visible at unlock time as no
345 * spte can be established without taking the mmu_lock and
346 * count is also read inside the mmu_lock critical section.
347 */
348 kvm->mmu_notifier_count++;
349 need_tlb_flush = kvm_unmap_hva_range(kvm, start, end);
350 need_tlb_flush |= kvm->tlbs_dirty;
351 /* we've to flush the tlb before the pages can be freed */
352 if (need_tlb_flush)
353 kvm_flush_remote_tlbs(kvm);
354
355 spin_unlock(&kvm->mmu_lock);
356 srcu_read_unlock(&kvm->srcu, idx);
357}
358
359static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn,
360 struct mm_struct *mm,
361 unsigned long start,
362 unsigned long end)
363{
364 struct kvm *kvm = mmu_notifier_to_kvm(mn);
365
366 spin_lock(&kvm->mmu_lock);
367 /*
368 * This sequence increase will notify the kvm page fault that
369 * the page that is going to be mapped in the spte could have
370 * been freed.
371 */
372 kvm->mmu_notifier_seq++;
373 smp_wmb();
374 /*
375 * The above sequence increase must be visible before the
376 * below count decrease, which is ensured by the smp_wmb above
377 * in conjunction with the smp_rmb in mmu_notifier_retry().
378 */
379 kvm->mmu_notifier_count--;
380 spin_unlock(&kvm->mmu_lock);
381
382 BUG_ON(kvm->mmu_notifier_count < 0);
383}
384
385static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn,
386 struct mm_struct *mm,
387 unsigned long start,
388 unsigned long end)
389{
390 struct kvm *kvm = mmu_notifier_to_kvm(mn);
391 int young, idx;
392
393 idx = srcu_read_lock(&kvm->srcu);
394 spin_lock(&kvm->mmu_lock);
395
396 young = kvm_age_hva(kvm, start, end);
397 if (young)
398 kvm_flush_remote_tlbs(kvm);
399
400 spin_unlock(&kvm->mmu_lock);
401 srcu_read_unlock(&kvm->srcu, idx);
402
403 return young;
404}
405
406static int kvm_mmu_notifier_clear_young(struct mmu_notifier *mn,
407 struct mm_struct *mm,
408 unsigned long start,
409 unsigned long end)
410{
411 struct kvm *kvm = mmu_notifier_to_kvm(mn);
412 int young, idx;
413
414 idx = srcu_read_lock(&kvm->srcu);
415 spin_lock(&kvm->mmu_lock);
416 /*
417 * Even though we do not flush TLB, this will still adversely
418 * affect performance on pre-Haswell Intel EPT, where there is
419 * no EPT Access Bit to clear so that we have to tear down EPT
420 * tables instead. If we find this unacceptable, we can always
421 * add a parameter to kvm_age_hva so that it effectively doesn't
422 * do anything on clear_young.
423 *
424 * Also note that currently we never issue secondary TLB flushes
425 * from clear_young, leaving this job up to the regular system
426 * cadence. If we find this inaccurate, we might come up with a
427 * more sophisticated heuristic later.
428 */
429 young = kvm_age_hva(kvm, start, end);
430 spin_unlock(&kvm->mmu_lock);
431 srcu_read_unlock(&kvm->srcu, idx);
432
433 return young;
434}
435
436static int kvm_mmu_notifier_test_young(struct mmu_notifier *mn,
437 struct mm_struct *mm,
438 unsigned long address)
439{
440 struct kvm *kvm = mmu_notifier_to_kvm(mn);
441 int young, idx;
442
443 idx = srcu_read_lock(&kvm->srcu);
444 spin_lock(&kvm->mmu_lock);
445 young = kvm_test_age_hva(kvm, address);
446 spin_unlock(&kvm->mmu_lock);
447 srcu_read_unlock(&kvm->srcu, idx);
448
449 return young;
450}
451
452static void kvm_mmu_notifier_release(struct mmu_notifier *mn,
453 struct mm_struct *mm)
454{
455 struct kvm *kvm = mmu_notifier_to_kvm(mn);
456 int idx;
457
458 idx = srcu_read_lock(&kvm->srcu);
459 kvm_arch_flush_shadow_all(kvm);
460 srcu_read_unlock(&kvm->srcu, idx);
461}
462
463static const struct mmu_notifier_ops kvm_mmu_notifier_ops = {
464 .invalidate_page = kvm_mmu_notifier_invalidate_page,
465 .invalidate_range_start = kvm_mmu_notifier_invalidate_range_start,
466 .invalidate_range_end = kvm_mmu_notifier_invalidate_range_end,
467 .clear_flush_young = kvm_mmu_notifier_clear_flush_young,
468 .clear_young = kvm_mmu_notifier_clear_young,
469 .test_young = kvm_mmu_notifier_test_young,
470 .change_pte = kvm_mmu_notifier_change_pte,
471 .release = kvm_mmu_notifier_release,
472};
473
474static int kvm_init_mmu_notifier(struct kvm *kvm)
475{
476 kvm->mmu_notifier.ops = &kvm_mmu_notifier_ops;
477 return mmu_notifier_register(&kvm->mmu_notifier, current->mm);
478}
479
480#else /* !(CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER) */
481
482static int kvm_init_mmu_notifier(struct kvm *kvm)
483{
484 return 0;
485}
486
487#endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */
488
489static struct kvm_memslots *kvm_alloc_memslots(void)
490{
491 int i;
492 struct kvm_memslots *slots;
493
494 slots = kvm_kvzalloc(sizeof(struct kvm_memslots));
495 if (!slots)
496 return NULL;
497
498 /*
499 * Init kvm generation close to the maximum to easily test the
500 * code of handling generation number wrap-around.
501 */
502 slots->generation = -150;
503 for (i = 0; i < KVM_MEM_SLOTS_NUM; i++)
504 slots->id_to_index[i] = slots->memslots[i].id = i;
505
506 return slots;
507}
508
509static void kvm_destroy_dirty_bitmap(struct kvm_memory_slot *memslot)
510{
511 if (!memslot->dirty_bitmap)
512 return;
513
514 kvfree(memslot->dirty_bitmap);
515 memslot->dirty_bitmap = NULL;
516}
517
518/*
519 * Free any memory in @free but not in @dont.
520 */
521static void kvm_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
522 struct kvm_memory_slot *dont)
523{
524 if (!dont || free->dirty_bitmap != dont->dirty_bitmap)
525 kvm_destroy_dirty_bitmap(free);
526
527 kvm_arch_free_memslot(kvm, free, dont);
528
529 free->npages = 0;
530}
531
532static void kvm_free_memslots(struct kvm *kvm, struct kvm_memslots *slots)
533{
534 struct kvm_memory_slot *memslot;
535
536 if (!slots)
537 return;
538
539 kvm_for_each_memslot(memslot, slots)
540 kvm_free_memslot(kvm, memslot, NULL);
541
542 kvfree(slots);
543}
544
545static struct kvm *kvm_create_vm(unsigned long type)
546{
547 int r, i;
548 struct kvm *kvm = kvm_arch_alloc_vm();
549
550 if (!kvm)
551 return ERR_PTR(-ENOMEM);
552
553 spin_lock_init(&kvm->mmu_lock);
554 atomic_inc(¤t->mm->mm_count);
555 kvm->mm = current->mm;
556 kvm_eventfd_init(kvm);
557 mutex_init(&kvm->lock);
558 mutex_init(&kvm->irq_lock);
559 mutex_init(&kvm->slots_lock);
560 atomic_set(&kvm->users_count, 1);
561 INIT_LIST_HEAD(&kvm->devices);
562
563 r = kvm_arch_init_vm(kvm, type);
564 if (r)
565 goto out_err_no_disable;
566
567 r = hardware_enable_all();
568 if (r)
569 goto out_err_no_disable;
570
571#ifdef CONFIG_HAVE_KVM_IRQFD
572 INIT_HLIST_HEAD(&kvm->irq_ack_notifier_list);
573#endif
574
575 BUILD_BUG_ON(KVM_MEM_SLOTS_NUM > SHRT_MAX);
576
577 r = -ENOMEM;
578 for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
579 kvm->memslots[i] = kvm_alloc_memslots();
580 if (!kvm->memslots[i])
581 goto out_err_no_srcu;
582 }
583
584 if (init_srcu_struct(&kvm->srcu))
585 goto out_err_no_srcu;
586 if (init_srcu_struct(&kvm->irq_srcu))
587 goto out_err_no_irq_srcu;
588 for (i = 0; i < KVM_NR_BUSES; i++) {
589 kvm->buses[i] = kzalloc(sizeof(struct kvm_io_bus),
590 GFP_KERNEL);
591 if (!kvm->buses[i])
592 goto out_err;
593 }
594
595 r = kvm_init_mmu_notifier(kvm);
596 if (r)
597 goto out_err;
598
599 spin_lock(&kvm_lock);
600 list_add(&kvm->vm_list, &vm_list);
601 spin_unlock(&kvm_lock);
602
603 preempt_notifier_inc();
604
605 return kvm;
606
607out_err:
608 cleanup_srcu_struct(&kvm->irq_srcu);
609out_err_no_irq_srcu:
610 cleanup_srcu_struct(&kvm->srcu);
611out_err_no_srcu:
612 hardware_disable_all();
613out_err_no_disable:
614 for (i = 0; i < KVM_NR_BUSES; i++)
615 kfree(kvm->buses[i]);
616 for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++)
617 kvm_free_memslots(kvm, kvm->memslots[i]);
618 kvm_arch_free_vm(kvm);
619 mmdrop(current->mm);
620 return ERR_PTR(r);
621}
622
623/*
624 * Avoid using vmalloc for a small buffer.
625 * Should not be used when the size is statically known.
626 */
627void *kvm_kvzalloc(unsigned long size)
628{
629 if (size > PAGE_SIZE)
630 return vzalloc(size);
631 else
632 return kzalloc(size, GFP_KERNEL);
633}
634
635static void kvm_destroy_devices(struct kvm *kvm)
636{
637 struct kvm_device *dev, *tmp;
638
639 list_for_each_entry_safe(dev, tmp, &kvm->devices, vm_node) {
640 list_del(&dev->vm_node);
641 dev->ops->destroy(dev);
642 }
643}
644
645static void kvm_destroy_vm(struct kvm *kvm)
646{
647 int i;
648 struct mm_struct *mm = kvm->mm;
649
650 kvm_arch_sync_events(kvm);
651 spin_lock(&kvm_lock);
652 list_del(&kvm->vm_list);
653 spin_unlock(&kvm_lock);
654 kvm_free_irq_routing(kvm);
655 for (i = 0; i < KVM_NR_BUSES; i++)
656 kvm_io_bus_destroy(kvm->buses[i]);
657 kvm_coalesced_mmio_free(kvm);
658#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
659 mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm);
660#else
661 kvm_arch_flush_shadow_all(kvm);
662#endif
663 kvm_arch_destroy_vm(kvm);
664 kvm_destroy_devices(kvm);
665 for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++)
666 kvm_free_memslots(kvm, kvm->memslots[i]);
667 cleanup_srcu_struct(&kvm->irq_srcu);
668 cleanup_srcu_struct(&kvm->srcu);
669 kvm_arch_free_vm(kvm);
670 preempt_notifier_dec();
671 hardware_disable_all();
672 mmdrop(mm);
673}
674
675void kvm_get_kvm(struct kvm *kvm)
676{
677 atomic_inc(&kvm->users_count);
678}
679EXPORT_SYMBOL_GPL(kvm_get_kvm);
680
681void kvm_put_kvm(struct kvm *kvm)
682{
683 if (atomic_dec_and_test(&kvm->users_count))
684 kvm_destroy_vm(kvm);
685}
686EXPORT_SYMBOL_GPL(kvm_put_kvm);
687
688
689static int kvm_vm_release(struct inode *inode, struct file *filp)
690{
691 struct kvm *kvm = filp->private_data;
692
693 kvm_irqfd_release(kvm);
694
695 kvm_put_kvm(kvm);
696 return 0;
697}
698
699/*
700 * Allocation size is twice as large as the actual dirty bitmap size.
701 * See x86's kvm_vm_ioctl_get_dirty_log() why this is needed.
702 */
703static int kvm_create_dirty_bitmap(struct kvm_memory_slot *memslot)
704{
705 unsigned long dirty_bytes = 2 * kvm_dirty_bitmap_bytes(memslot);
706
707 memslot->dirty_bitmap = kvm_kvzalloc(dirty_bytes);
708 if (!memslot->dirty_bitmap)
709 return -ENOMEM;
710
711 return 0;
712}
713
714/*
715 * Insert memslot and re-sort memslots based on their GFN,
716 * so binary search could be used to lookup GFN.
717 * Sorting algorithm takes advantage of having initially
718 * sorted array and known changed memslot position.
719 */
720static void update_memslots(struct kvm_memslots *slots,
721 struct kvm_memory_slot *new)
722{
723 int id = new->id;
724 int i = slots->id_to_index[id];
725 struct kvm_memory_slot *mslots = slots->memslots;
726
727 WARN_ON(mslots[i].id != id);
728 if (!new->npages) {
729 WARN_ON(!mslots[i].npages);
730 if (mslots[i].npages)
731 slots->used_slots--;
732 } else {
733 if (!mslots[i].npages)
734 slots->used_slots++;
735 }
736
737 while (i < KVM_MEM_SLOTS_NUM - 1 &&
738 new->base_gfn <= mslots[i + 1].base_gfn) {
739 if (!mslots[i + 1].npages)
740 break;
741 mslots[i] = mslots[i + 1];
742 slots->id_to_index[mslots[i].id] = i;
743 i++;
744 }
745
746 /*
747 * The ">=" is needed when creating a slot with base_gfn == 0,
748 * so that it moves before all those with base_gfn == npages == 0.
749 *
750 * On the other hand, if new->npages is zero, the above loop has
751 * already left i pointing to the beginning of the empty part of
752 * mslots, and the ">=" would move the hole backwards in this
753 * case---which is wrong. So skip the loop when deleting a slot.
754 */
755 if (new->npages) {
756 while (i > 0 &&
757 new->base_gfn >= mslots[i - 1].base_gfn) {
758 mslots[i] = mslots[i - 1];
759 slots->id_to_index[mslots[i].id] = i;
760 i--;
761 }
762 } else
763 WARN_ON_ONCE(i != slots->used_slots);
764
765 mslots[i] = *new;
766 slots->id_to_index[mslots[i].id] = i;
767}
768
769static int check_memory_region_flags(const struct kvm_userspace_memory_region *mem)
770{
771 u32 valid_flags = KVM_MEM_LOG_DIRTY_PAGES;
772
773#ifdef __KVM_HAVE_READONLY_MEM
774 valid_flags |= KVM_MEM_READONLY;
775#endif
776
777 if (mem->flags & ~valid_flags)
778 return -EINVAL;
779
780 return 0;
781}
782
783static struct kvm_memslots *install_new_memslots(struct kvm *kvm,
784 int as_id, struct kvm_memslots *slots)
785{
786 struct kvm_memslots *old_memslots = __kvm_memslots(kvm, as_id);
787
788 /*
789 * Set the low bit in the generation, which disables SPTE caching
790 * until the end of synchronize_srcu_expedited.
791 */
792 WARN_ON(old_memslots->generation & 1);
793 slots->generation = old_memslots->generation + 1;
794
795 rcu_assign_pointer(kvm->memslots[as_id], slots);
796 synchronize_srcu_expedited(&kvm->srcu);
797
798 /*
799 * Increment the new memslot generation a second time. This prevents
800 * vm exits that race with memslot updates from caching a memslot
801 * generation that will (potentially) be valid forever.
802 */
803 slots->generation++;
804
805 kvm_arch_memslots_updated(kvm, slots);
806
807 return old_memslots;
808}
809
810/*
811 * Allocate some memory and give it an address in the guest physical address
812 * space.
813 *
814 * Discontiguous memory is allowed, mostly for framebuffers.
815 *
816 * Must be called holding kvm->slots_lock for write.
817 */
818int __kvm_set_memory_region(struct kvm *kvm,
819 const struct kvm_userspace_memory_region *mem)
820{
821 int r;
822 gfn_t base_gfn;
823 unsigned long npages;
824 struct kvm_memory_slot *slot;
825 struct kvm_memory_slot old, new;
826 struct kvm_memslots *slots = NULL, *old_memslots;
827 int as_id, id;
828 enum kvm_mr_change change;
829
830 r = check_memory_region_flags(mem);
831 if (r)
832 goto out;
833
834 r = -EINVAL;
835 as_id = mem->slot >> 16;
836 id = (u16)mem->slot;
837
838 /* General sanity checks */
839 if (mem->memory_size & (PAGE_SIZE - 1))
840 goto out;
841 if (mem->guest_phys_addr & (PAGE_SIZE - 1))
842 goto out;
843 /* We can read the guest memory with __xxx_user() later on. */
844 if ((id < KVM_USER_MEM_SLOTS) &&
845 ((mem->userspace_addr & (PAGE_SIZE - 1)) ||
846 !access_ok(VERIFY_WRITE,
847 (void __user *)(unsigned long)mem->userspace_addr,
848 mem->memory_size)))
849 goto out;
850 if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_MEM_SLOTS_NUM)
851 goto out;
852 if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
853 goto out;
854
855 slot = id_to_memslot(__kvm_memslots(kvm, as_id), id);
856 base_gfn = mem->guest_phys_addr >> PAGE_SHIFT;
857 npages = mem->memory_size >> PAGE_SHIFT;
858
859 if (npages > KVM_MEM_MAX_NR_PAGES)
860 goto out;
861
862 new = old = *slot;
863
864 new.id = id;
865 new.base_gfn = base_gfn;
866 new.npages = npages;
867 new.flags = mem->flags;
868
869 if (npages) {
870 if (!old.npages)
871 change = KVM_MR_CREATE;
872 else { /* Modify an existing slot. */
873 if ((mem->userspace_addr != old.userspace_addr) ||
874 (npages != old.npages) ||
875 ((new.flags ^ old.flags) & KVM_MEM_READONLY))
876 goto out;
877
878 if (base_gfn != old.base_gfn)
879 change = KVM_MR_MOVE;
880 else if (new.flags != old.flags)
881 change = KVM_MR_FLAGS_ONLY;
882 else { /* Nothing to change. */
883 r = 0;
884 goto out;
885 }
886 }
887 } else {
888 if (!old.npages)
889 goto out;
890
891 change = KVM_MR_DELETE;
892 new.base_gfn = 0;
893 new.flags = 0;
894 }
895
896 if ((change == KVM_MR_CREATE) || (change == KVM_MR_MOVE)) {
897 /* Check for overlaps */
898 r = -EEXIST;
899 kvm_for_each_memslot(slot, __kvm_memslots(kvm, as_id)) {
900 if ((slot->id >= KVM_USER_MEM_SLOTS) ||
901 (slot->id == id))
902 continue;
903 if (!((base_gfn + npages <= slot->base_gfn) ||
904 (base_gfn >= slot->base_gfn + slot->npages)))
905 goto out;
906 }
907 }
908
909 /* Free page dirty bitmap if unneeded */
910 if (!(new.flags & KVM_MEM_LOG_DIRTY_PAGES))
911 new.dirty_bitmap = NULL;
912
913 r = -ENOMEM;
914 if (change == KVM_MR_CREATE) {
915 new.userspace_addr = mem->userspace_addr;
916
917 if (kvm_arch_create_memslot(kvm, &new, npages))
918 goto out_free;
919 }
920
921 /* Allocate page dirty bitmap if needed */
922 if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) {
923 if (kvm_create_dirty_bitmap(&new) < 0)
924 goto out_free;
925 }
926
927 slots = kvm_kvzalloc(sizeof(struct kvm_memslots));
928 if (!slots)
929 goto out_free;
930 memcpy(slots, __kvm_memslots(kvm, as_id), sizeof(struct kvm_memslots));
931
932 if ((change == KVM_MR_DELETE) || (change == KVM_MR_MOVE)) {
933 slot = id_to_memslot(slots, id);
934 slot->flags |= KVM_MEMSLOT_INVALID;
935
936 old_memslots = install_new_memslots(kvm, as_id, slots);
937
938 /* slot was deleted or moved, clear iommu mapping */
939 kvm_iommu_unmap_pages(kvm, &old);
940 /* From this point no new shadow pages pointing to a deleted,
941 * or moved, memslot will be created.
942 *
943 * validation of sp->gfn happens in:
944 * - gfn_to_hva (kvm_read_guest, gfn_to_pfn)
945 * - kvm_is_visible_gfn (mmu_check_roots)
946 */
947 kvm_arch_flush_shadow_memslot(kvm, slot);
948
949 /*
950 * We can re-use the old_memslots from above, the only difference
951 * from the currently installed memslots is the invalid flag. This
952 * will get overwritten by update_memslots anyway.
953 */
954 slots = old_memslots;
955 }
956
957 r = kvm_arch_prepare_memory_region(kvm, &new, mem, change);
958 if (r)
959 goto out_slots;
960
961 /* actual memory is freed via old in kvm_free_memslot below */
962 if (change == KVM_MR_DELETE) {
963 new.dirty_bitmap = NULL;
964 memset(&new.arch, 0, sizeof(new.arch));
965 }
966
967 update_memslots(slots, &new);
968 old_memslots = install_new_memslots(kvm, as_id, slots);
969
970 kvm_arch_commit_memory_region(kvm, mem, &old, &new, change);
971
972 kvm_free_memslot(kvm, &old, &new);
973 kvfree(old_memslots);
974
975 /*
976 * IOMMU mapping: New slots need to be mapped. Old slots need to be
977 * un-mapped and re-mapped if their base changes. Since base change
978 * unmapping is handled above with slot deletion, mapping alone is
979 * needed here. Anything else the iommu might care about for existing
980 * slots (size changes, userspace addr changes and read-only flag
981 * changes) is disallowed above, so any other attribute changes getting
982 * here can be skipped.
983 */
984 if ((change == KVM_MR_CREATE) || (change == KVM_MR_MOVE)) {
985 r = kvm_iommu_map_pages(kvm, &new);
986 return r;
987 }
988
989 return 0;
990
991out_slots:
992 kvfree(slots);
993out_free:
994 kvm_free_memslot(kvm, &new, &old);
995out:
996 return r;
997}
998EXPORT_SYMBOL_GPL(__kvm_set_memory_region);
999
1000int kvm_set_memory_region(struct kvm *kvm,
1001 const struct kvm_userspace_memory_region *mem)
1002{
1003 int r;
1004
1005 mutex_lock(&kvm->slots_lock);
1006 r = __kvm_set_memory_region(kvm, mem);
1007 mutex_unlock(&kvm->slots_lock);
1008 return r;
1009}
1010EXPORT_SYMBOL_GPL(kvm_set_memory_region);
1011
1012static int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
1013 struct kvm_userspace_memory_region *mem)
1014{
1015 if ((u16)mem->slot >= KVM_USER_MEM_SLOTS)
1016 return -EINVAL;
1017
1018 return kvm_set_memory_region(kvm, mem);
1019}
1020
1021int kvm_get_dirty_log(struct kvm *kvm,
1022 struct kvm_dirty_log *log, int *is_dirty)
1023{
1024 struct kvm_memslots *slots;
1025 struct kvm_memory_slot *memslot;
1026 int r, i, as_id, id;
1027 unsigned long n;
1028 unsigned long any = 0;
1029
1030 r = -EINVAL;
1031 as_id = log->slot >> 16;
1032 id = (u16)log->slot;
1033 if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS)
1034 goto out;
1035
1036 slots = __kvm_memslots(kvm, as_id);
1037 memslot = id_to_memslot(slots, id);
1038 r = -ENOENT;
1039 if (!memslot->dirty_bitmap)
1040 goto out;
1041
1042 n = kvm_dirty_bitmap_bytes(memslot);
1043
1044 for (i = 0; !any && i < n/sizeof(long); ++i)
1045 any = memslot->dirty_bitmap[i];
1046
1047 r = -EFAULT;
1048 if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n))
1049 goto out;
1050
1051 if (any)
1052 *is_dirty = 1;
1053
1054 r = 0;
1055out:
1056 return r;
1057}
1058EXPORT_SYMBOL_GPL(kvm_get_dirty_log);
1059
1060#ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
1061/**
1062 * kvm_get_dirty_log_protect - get a snapshot of dirty pages, and if any pages
1063 * are dirty write protect them for next write.
1064 * @kvm: pointer to kvm instance
1065 * @log: slot id and address to which we copy the log
1066 * @is_dirty: flag set if any page is dirty
1067 *
1068 * We need to keep it in mind that VCPU threads can write to the bitmap
1069 * concurrently. So, to avoid losing track of dirty pages we keep the
1070 * following order:
1071 *
1072 * 1. Take a snapshot of the bit and clear it if needed.
1073 * 2. Write protect the corresponding page.
1074 * 3. Copy the snapshot to the userspace.
1075 * 4. Upon return caller flushes TLB's if needed.
1076 *
1077 * Between 2 and 4, the guest may write to the page using the remaining TLB
1078 * entry. This is not a problem because the page is reported dirty using
1079 * the snapshot taken before and step 4 ensures that writes done after
1080 * exiting to userspace will be logged for the next call.
1081 *
1082 */
1083int kvm_get_dirty_log_protect(struct kvm *kvm,
1084 struct kvm_dirty_log *log, bool *is_dirty)
1085{
1086 struct kvm_memslots *slots;
1087 struct kvm_memory_slot *memslot;
1088 int r, i, as_id, id;
1089 unsigned long n;
1090 unsigned long *dirty_bitmap;
1091 unsigned long *dirty_bitmap_buffer;
1092
1093 r = -EINVAL;
1094 as_id = log->slot >> 16;
1095 id = (u16)log->slot;
1096 if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS)
1097 goto out;
1098
1099 slots = __kvm_memslots(kvm, as_id);
1100 memslot = id_to_memslot(slots, id);
1101
1102 dirty_bitmap = memslot->dirty_bitmap;
1103 r = -ENOENT;
1104 if (!dirty_bitmap)
1105 goto out;
1106
1107 n = kvm_dirty_bitmap_bytes(memslot);
1108
1109 dirty_bitmap_buffer = dirty_bitmap + n / sizeof(long);
1110 memset(dirty_bitmap_buffer, 0, n);
1111
1112 spin_lock(&kvm->mmu_lock);
1113 *is_dirty = false;
1114 for (i = 0; i < n / sizeof(long); i++) {
1115 unsigned long mask;
1116 gfn_t offset;
1117
1118 if (!dirty_bitmap[i])
1119 continue;
1120
1121 *is_dirty = true;
1122
1123 mask = xchg(&dirty_bitmap[i], 0);
1124 dirty_bitmap_buffer[i] = mask;
1125
1126 if (mask) {
1127 offset = i * BITS_PER_LONG;
1128 kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot,
1129 offset, mask);
1130 }
1131 }
1132
1133 spin_unlock(&kvm->mmu_lock);
1134
1135 r = -EFAULT;
1136 if (copy_to_user(log->dirty_bitmap, dirty_bitmap_buffer, n))
1137 goto out;
1138
1139 r = 0;
1140out:
1141 return r;
1142}
1143EXPORT_SYMBOL_GPL(kvm_get_dirty_log_protect);
1144#endif
1145
1146bool kvm_largepages_enabled(void)
1147{
1148 return largepages_enabled;
1149}
1150
1151void kvm_disable_largepages(void)
1152{
1153 largepages_enabled = false;
1154}
1155EXPORT_SYMBOL_GPL(kvm_disable_largepages);
1156
1157struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
1158{
1159 return __gfn_to_memslot(kvm_memslots(kvm), gfn);
1160}
1161EXPORT_SYMBOL_GPL(gfn_to_memslot);
1162
1163struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn)
1164{
1165 return __gfn_to_memslot(kvm_vcpu_memslots(vcpu), gfn);
1166}
1167
1168bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
1169{
1170 struct kvm_memory_slot *memslot = gfn_to_memslot(kvm, gfn);
1171
1172 if (!memslot || memslot->id >= KVM_USER_MEM_SLOTS ||
1173 memslot->flags & KVM_MEMSLOT_INVALID)
1174 return false;
1175
1176 return true;
1177}
1178EXPORT_SYMBOL_GPL(kvm_is_visible_gfn);
1179
1180unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn)
1181{
1182 struct vm_area_struct *vma;
1183 unsigned long addr, size;
1184
1185 size = PAGE_SIZE;
1186
1187 addr = gfn_to_hva(kvm, gfn);
1188 if (kvm_is_error_hva(addr))
1189 return PAGE_SIZE;
1190
1191 down_read(¤t->mm->mmap_sem);
1192 vma = find_vma(current->mm, addr);
1193 if (!vma)
1194 goto out;
1195
1196 size = vma_kernel_pagesize(vma);
1197
1198out:
1199 up_read(¤t->mm->mmap_sem);
1200
1201 return size;
1202}
1203
1204static bool memslot_is_readonly(struct kvm_memory_slot *slot)
1205{
1206 return slot->flags & KVM_MEM_READONLY;
1207}
1208
1209static unsigned long __gfn_to_hva_many(struct kvm_memory_slot *slot, gfn_t gfn,
1210 gfn_t *nr_pages, bool write)
1211{
1212 if (!slot || slot->flags & KVM_MEMSLOT_INVALID)
1213 return KVM_HVA_ERR_BAD;
1214
1215 if (memslot_is_readonly(slot) && write)
1216 return KVM_HVA_ERR_RO_BAD;
1217
1218 if (nr_pages)
1219 *nr_pages = slot->npages - (gfn - slot->base_gfn);
1220
1221 return __gfn_to_hva_memslot(slot, gfn);
1222}
1223
1224static unsigned long gfn_to_hva_many(struct kvm_memory_slot *slot, gfn_t gfn,
1225 gfn_t *nr_pages)
1226{
1227 return __gfn_to_hva_many(slot, gfn, nr_pages, true);
1228}
1229
1230unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot,
1231 gfn_t gfn)
1232{
1233 return gfn_to_hva_many(slot, gfn, NULL);
1234}
1235EXPORT_SYMBOL_GPL(gfn_to_hva_memslot);
1236
1237unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
1238{
1239 return gfn_to_hva_many(gfn_to_memslot(kvm, gfn), gfn, NULL);
1240}
1241EXPORT_SYMBOL_GPL(gfn_to_hva);
1242
1243unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn)
1244{
1245 return gfn_to_hva_many(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn, NULL);
1246}
1247EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_hva);
1248
1249/*
1250 * If writable is set to false, the hva returned by this function is only
1251 * allowed to be read.
1252 */
1253unsigned long gfn_to_hva_memslot_prot(struct kvm_memory_slot *slot,
1254 gfn_t gfn, bool *writable)
1255{
1256 unsigned long hva = __gfn_to_hva_many(slot, gfn, NULL, false);
1257
1258 if (!kvm_is_error_hva(hva) && writable)
1259 *writable = !memslot_is_readonly(slot);
1260
1261 return hva;
1262}
1263
1264unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable)
1265{
1266 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
1267
1268 return gfn_to_hva_memslot_prot(slot, gfn, writable);
1269}
1270
1271unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable)
1272{
1273 struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
1274
1275 return gfn_to_hva_memslot_prot(slot, gfn, writable);
1276}
1277
1278static int get_user_page_nowait(unsigned long start, int write,
1279 struct page **page)
1280{
1281 int flags = FOLL_TOUCH | FOLL_NOWAIT | FOLL_HWPOISON | FOLL_GET;
1282
1283 if (write)
1284 flags |= FOLL_WRITE;
1285
1286 return __get_user_pages(current, current->mm, start, 1, flags, page,
1287 NULL, NULL);
1288}
1289
1290static inline int check_user_page_hwpoison(unsigned long addr)
1291{
1292 int rc, flags = FOLL_TOUCH | FOLL_HWPOISON | FOLL_WRITE;
1293
1294 rc = __get_user_pages(current, current->mm, addr, 1,
1295 flags, NULL, NULL, NULL);
1296 return rc == -EHWPOISON;
1297}
1298
1299/*
1300 * The atomic path to get the writable pfn which will be stored in @pfn,
1301 * true indicates success, otherwise false is returned.
1302 */
1303static bool hva_to_pfn_fast(unsigned long addr, bool atomic, bool *async,
1304 bool write_fault, bool *writable, kvm_pfn_t *pfn)
1305{
1306 struct page *page[1];
1307 int npages;
1308
1309 if (!(async || atomic))
1310 return false;
1311
1312 /*
1313 * Fast pin a writable pfn only if it is a write fault request
1314 * or the caller allows to map a writable pfn for a read fault
1315 * request.
1316 */
1317 if (!(write_fault || writable))
1318 return false;
1319
1320 npages = __get_user_pages_fast(addr, 1, 1, page);
1321 if (npages == 1) {
1322 *pfn = page_to_pfn(page[0]);
1323
1324 if (writable)
1325 *writable = true;
1326 return true;
1327 }
1328
1329 return false;
1330}
1331
1332/*
1333 * The slow path to get the pfn of the specified host virtual address,
1334 * 1 indicates success, -errno is returned if error is detected.
1335 */
1336static int hva_to_pfn_slow(unsigned long addr, bool *async, bool write_fault,
1337 bool *writable, kvm_pfn_t *pfn)
1338{
1339 struct page *page[1];
1340 int npages = 0;
1341
1342 might_sleep();
1343
1344 if (writable)
1345 *writable = write_fault;
1346
1347 if (async) {
1348 down_read(¤t->mm->mmap_sem);
1349 npages = get_user_page_nowait(addr, write_fault, page);
1350 up_read(¤t->mm->mmap_sem);
1351 } else
1352 npages = __get_user_pages_unlocked(current, current->mm, addr, 1,
1353 write_fault, 0, page,
1354 FOLL_TOUCH|FOLL_HWPOISON);
1355 if (npages != 1)
1356 return npages;
1357
1358 /* map read fault as writable if possible */
1359 if (unlikely(!write_fault) && writable) {
1360 struct page *wpage[1];
1361
1362 npages = __get_user_pages_fast(addr, 1, 1, wpage);
1363 if (npages == 1) {
1364 *writable = true;
1365 put_page(page[0]);
1366 page[0] = wpage[0];
1367 }
1368
1369 npages = 1;
1370 }
1371 *pfn = page_to_pfn(page[0]);
1372 return npages;
1373}
1374
1375static bool vma_is_valid(struct vm_area_struct *vma, bool write_fault)
1376{
1377 if (unlikely(!(vma->vm_flags & VM_READ)))
1378 return false;
1379
1380 if (write_fault && (unlikely(!(vma->vm_flags & VM_WRITE))))
1381 return false;
1382
1383 return true;
1384}
1385
1386/*
1387 * Pin guest page in memory and return its pfn.
1388 * @addr: host virtual address which maps memory to the guest
1389 * @atomic: whether this function can sleep
1390 * @async: whether this function need to wait IO complete if the
1391 * host page is not in the memory
1392 * @write_fault: whether we should get a writable host page
1393 * @writable: whether it allows to map a writable host page for !@write_fault
1394 *
1395 * The function will map a writable host page for these two cases:
1396 * 1): @write_fault = true
1397 * 2): @write_fault = false && @writable, @writable will tell the caller
1398 * whether the mapping is writable.
1399 */
1400static kvm_pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async,
1401 bool write_fault, bool *writable)
1402{
1403 struct vm_area_struct *vma;
1404 kvm_pfn_t pfn = 0;
1405 int npages;
1406
1407 /* we can do it either atomically or asynchronously, not both */
1408 BUG_ON(atomic && async);
1409
1410 if (hva_to_pfn_fast(addr, atomic, async, write_fault, writable, &pfn))
1411 return pfn;
1412
1413 if (atomic)
1414 return KVM_PFN_ERR_FAULT;
1415
1416 npages = hva_to_pfn_slow(addr, async, write_fault, writable, &pfn);
1417 if (npages == 1)
1418 return pfn;
1419
1420 down_read(¤t->mm->mmap_sem);
1421 if (npages == -EHWPOISON ||
1422 (!async && check_user_page_hwpoison(addr))) {
1423 pfn = KVM_PFN_ERR_HWPOISON;
1424 goto exit;
1425 }
1426
1427 vma = find_vma_intersection(current->mm, addr, addr + 1);
1428
1429 if (vma == NULL)
1430 pfn = KVM_PFN_ERR_FAULT;
1431 else if ((vma->vm_flags & VM_PFNMAP)) {
1432 pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) +
1433 vma->vm_pgoff;
1434 BUG_ON(!kvm_is_reserved_pfn(pfn));
1435 } else {
1436 if (async && vma_is_valid(vma, write_fault))
1437 *async = true;
1438 pfn = KVM_PFN_ERR_FAULT;
1439 }
1440exit:
1441 up_read(¤t->mm->mmap_sem);
1442 return pfn;
1443}
1444
1445kvm_pfn_t __gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn,
1446 bool atomic, bool *async, bool write_fault,
1447 bool *writable)
1448{
1449 unsigned long addr = __gfn_to_hva_many(slot, gfn, NULL, write_fault);
1450
1451 if (addr == KVM_HVA_ERR_RO_BAD) {
1452 if (writable)
1453 *writable = false;
1454 return KVM_PFN_ERR_RO_FAULT;
1455 }
1456
1457 if (kvm_is_error_hva(addr)) {
1458 if (writable)
1459 *writable = false;
1460 return KVM_PFN_NOSLOT;
1461 }
1462
1463 /* Do not map writable pfn in the readonly memslot. */
1464 if (writable && memslot_is_readonly(slot)) {
1465 *writable = false;
1466 writable = NULL;
1467 }
1468
1469 return hva_to_pfn(addr, atomic, async, write_fault,
1470 writable);
1471}
1472EXPORT_SYMBOL_GPL(__gfn_to_pfn_memslot);
1473
1474kvm_pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
1475 bool *writable)
1476{
1477 return __gfn_to_pfn_memslot(gfn_to_memslot(kvm, gfn), gfn, false, NULL,
1478 write_fault, writable);
1479}
1480EXPORT_SYMBOL_GPL(gfn_to_pfn_prot);
1481
1482kvm_pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn)
1483{
1484 return __gfn_to_pfn_memslot(slot, gfn, false, NULL, true, NULL);
1485}
1486EXPORT_SYMBOL_GPL(gfn_to_pfn_memslot);
1487
1488kvm_pfn_t gfn_to_pfn_memslot_atomic(struct kvm_memory_slot *slot, gfn_t gfn)
1489{
1490 return __gfn_to_pfn_memslot(slot, gfn, true, NULL, true, NULL);
1491}
1492EXPORT_SYMBOL_GPL(gfn_to_pfn_memslot_atomic);
1493
1494kvm_pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn)
1495{
1496 return gfn_to_pfn_memslot_atomic(gfn_to_memslot(kvm, gfn), gfn);
1497}
1498EXPORT_SYMBOL_GPL(gfn_to_pfn_atomic);
1499
1500kvm_pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn)
1501{
1502 return gfn_to_pfn_memslot_atomic(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn);
1503}
1504EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_pfn_atomic);
1505
1506kvm_pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
1507{
1508 return gfn_to_pfn_memslot(gfn_to_memslot(kvm, gfn), gfn);
1509}
1510EXPORT_SYMBOL_GPL(gfn_to_pfn);
1511
1512kvm_pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn)
1513{
1514 return gfn_to_pfn_memslot(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn);
1515}
1516EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_pfn);
1517
1518int gfn_to_page_many_atomic(struct kvm_memory_slot *slot, gfn_t gfn,
1519 struct page **pages, int nr_pages)
1520{
1521 unsigned long addr;
1522 gfn_t entry;
1523
1524 addr = gfn_to_hva_many(slot, gfn, &entry);
1525 if (kvm_is_error_hva(addr))
1526 return -1;
1527
1528 if (entry < nr_pages)
1529 return 0;
1530
1531 return __get_user_pages_fast(addr, nr_pages, 1, pages);
1532}
1533EXPORT_SYMBOL_GPL(gfn_to_page_many_atomic);
1534
1535static struct page *kvm_pfn_to_page(kvm_pfn_t pfn)
1536{
1537 if (is_error_noslot_pfn(pfn))
1538 return KVM_ERR_PTR_BAD_PAGE;
1539
1540 if (kvm_is_reserved_pfn(pfn)) {
1541 WARN_ON(1);
1542 return KVM_ERR_PTR_BAD_PAGE;
1543 }
1544
1545 return pfn_to_page(pfn);
1546}
1547
1548struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
1549{
1550 kvm_pfn_t pfn;
1551
1552 pfn = gfn_to_pfn(kvm, gfn);
1553
1554 return kvm_pfn_to_page(pfn);
1555}
1556EXPORT_SYMBOL_GPL(gfn_to_page);
1557
1558struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn)
1559{
1560 kvm_pfn_t pfn;
1561
1562 pfn = kvm_vcpu_gfn_to_pfn(vcpu, gfn);
1563
1564 return kvm_pfn_to_page(pfn);
1565}
1566EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_page);
1567
1568void kvm_release_page_clean(struct page *page)
1569{
1570 WARN_ON(is_error_page(page));
1571
1572 kvm_release_pfn_clean(page_to_pfn(page));
1573}
1574EXPORT_SYMBOL_GPL(kvm_release_page_clean);
1575
1576void kvm_release_pfn_clean(kvm_pfn_t pfn)
1577{
1578 if (!is_error_noslot_pfn(pfn) && !kvm_is_reserved_pfn(pfn))
1579 put_page(pfn_to_page(pfn));
1580}
1581EXPORT_SYMBOL_GPL(kvm_release_pfn_clean);
1582
1583void kvm_release_page_dirty(struct page *page)
1584{
1585 WARN_ON(is_error_page(page));
1586
1587 kvm_release_pfn_dirty(page_to_pfn(page));
1588}
1589EXPORT_SYMBOL_GPL(kvm_release_page_dirty);
1590
1591static void kvm_release_pfn_dirty(kvm_pfn_t pfn)
1592{
1593 kvm_set_pfn_dirty(pfn);
1594 kvm_release_pfn_clean(pfn);
1595}
1596
1597void kvm_set_pfn_dirty(kvm_pfn_t pfn)
1598{
1599 if (!kvm_is_reserved_pfn(pfn)) {
1600 struct page *page = pfn_to_page(pfn);
1601
1602 if (!PageReserved(page))
1603 SetPageDirty(page);
1604 }
1605}
1606EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty);
1607
1608void kvm_set_pfn_accessed(kvm_pfn_t pfn)
1609{
1610 if (!kvm_is_reserved_pfn(pfn))
1611 mark_page_accessed(pfn_to_page(pfn));
1612}
1613EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed);
1614
1615void kvm_get_pfn(kvm_pfn_t pfn)
1616{
1617 if (!kvm_is_reserved_pfn(pfn))
1618 get_page(pfn_to_page(pfn));
1619}
1620EXPORT_SYMBOL_GPL(kvm_get_pfn);
1621
1622static int next_segment(unsigned long len, int offset)
1623{
1624 if (len > PAGE_SIZE - offset)
1625 return PAGE_SIZE - offset;
1626 else
1627 return len;
1628}
1629
1630static int __kvm_read_guest_page(struct kvm_memory_slot *slot, gfn_t gfn,
1631 void *data, int offset, int len)
1632{
1633 int r;
1634 unsigned long addr;
1635
1636 addr = gfn_to_hva_memslot_prot(slot, gfn, NULL);
1637 if (kvm_is_error_hva(addr))
1638 return -EFAULT;
1639 r = __copy_from_user(data, (void __user *)addr + offset, len);
1640 if (r)
1641 return -EFAULT;
1642 return 0;
1643}
1644
1645int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
1646 int len)
1647{
1648 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
1649
1650 return __kvm_read_guest_page(slot, gfn, data, offset, len);
1651}
1652EXPORT_SYMBOL_GPL(kvm_read_guest_page);
1653
1654int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data,
1655 int offset, int len)
1656{
1657 struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
1658
1659 return __kvm_read_guest_page(slot, gfn, data, offset, len);
1660}
1661EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest_page);
1662
1663int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len)
1664{
1665 gfn_t gfn = gpa >> PAGE_SHIFT;
1666 int seg;
1667 int offset = offset_in_page(gpa);
1668 int ret;
1669
1670 while ((seg = next_segment(len, offset)) != 0) {
1671 ret = kvm_read_guest_page(kvm, gfn, data, offset, seg);
1672 if (ret < 0)
1673 return ret;
1674 offset = 0;
1675 len -= seg;
1676 data += seg;
1677 ++gfn;
1678 }
1679 return 0;
1680}
1681EXPORT_SYMBOL_GPL(kvm_read_guest);
1682
1683int kvm_vcpu_read_guest(struct kvm_vcpu *vcpu, gpa_t gpa, void *data, unsigned long len)
1684{
1685 gfn_t gfn = gpa >> PAGE_SHIFT;
1686 int seg;
1687 int offset = offset_in_page(gpa);
1688 int ret;
1689
1690 while ((seg = next_segment(len, offset)) != 0) {
1691 ret = kvm_vcpu_read_guest_page(vcpu, gfn, data, offset, seg);
1692 if (ret < 0)
1693 return ret;
1694 offset = 0;
1695 len -= seg;
1696 data += seg;
1697 ++gfn;
1698 }
1699 return 0;
1700}
1701EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest);
1702
1703static int __kvm_read_guest_atomic(struct kvm_memory_slot *slot, gfn_t gfn,
1704 void *data, int offset, unsigned long len)
1705{
1706 int r;
1707 unsigned long addr;
1708
1709 addr = gfn_to_hva_memslot_prot(slot, gfn, NULL);
1710 if (kvm_is_error_hva(addr))
1711 return -EFAULT;
1712 pagefault_disable();
1713 r = __copy_from_user_inatomic(data, (void __user *)addr + offset, len);
1714 pagefault_enable();
1715 if (r)
1716 return -EFAULT;
1717 return 0;
1718}
1719
1720int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
1721 unsigned long len)
1722{
1723 gfn_t gfn = gpa >> PAGE_SHIFT;
1724 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
1725 int offset = offset_in_page(gpa);
1726
1727 return __kvm_read_guest_atomic(slot, gfn, data, offset, len);
1728}
1729EXPORT_SYMBOL_GPL(kvm_read_guest_atomic);
1730
1731int kvm_vcpu_read_guest_atomic(struct kvm_vcpu *vcpu, gpa_t gpa,
1732 void *data, unsigned long len)
1733{
1734 gfn_t gfn = gpa >> PAGE_SHIFT;
1735 struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
1736 int offset = offset_in_page(gpa);
1737
1738 return __kvm_read_guest_atomic(slot, gfn, data, offset, len);
1739}
1740EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest_atomic);
1741
1742static int __kvm_write_guest_page(struct kvm_memory_slot *memslot, gfn_t gfn,
1743 const void *data, int offset, int len)
1744{
1745 int r;
1746 unsigned long addr;
1747
1748 addr = gfn_to_hva_memslot(memslot, gfn);
1749 if (kvm_is_error_hva(addr))
1750 return -EFAULT;
1751 r = __copy_to_user((void __user *)addr + offset, data, len);
1752 if (r)
1753 return -EFAULT;
1754 mark_page_dirty_in_slot(memslot, gfn);
1755 return 0;
1756}
1757
1758int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn,
1759 const void *data, int offset, int len)
1760{
1761 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
1762
1763 return __kvm_write_guest_page(slot, gfn, data, offset, len);
1764}
1765EXPORT_SYMBOL_GPL(kvm_write_guest_page);
1766
1767int kvm_vcpu_write_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn,
1768 const void *data, int offset, int len)
1769{
1770 struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
1771
1772 return __kvm_write_guest_page(slot, gfn, data, offset, len);
1773}
1774EXPORT_SYMBOL_GPL(kvm_vcpu_write_guest_page);
1775
1776int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
1777 unsigned long len)
1778{
1779 gfn_t gfn = gpa >> PAGE_SHIFT;
1780 int seg;
1781 int offset = offset_in_page(gpa);
1782 int ret;
1783
1784 while ((seg = next_segment(len, offset)) != 0) {
1785 ret = kvm_write_guest_page(kvm, gfn, data, offset, seg);
1786 if (ret < 0)
1787 return ret;
1788 offset = 0;
1789 len -= seg;
1790 data += seg;
1791 ++gfn;
1792 }
1793 return 0;
1794}
1795EXPORT_SYMBOL_GPL(kvm_write_guest);
1796
1797int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data,
1798 unsigned long len)
1799{
1800 gfn_t gfn = gpa >> PAGE_SHIFT;
1801 int seg;
1802 int offset = offset_in_page(gpa);
1803 int ret;
1804
1805 while ((seg = next_segment(len, offset)) != 0) {
1806 ret = kvm_vcpu_write_guest_page(vcpu, gfn, data, offset, seg);
1807 if (ret < 0)
1808 return ret;
1809 offset = 0;
1810 len -= seg;
1811 data += seg;
1812 ++gfn;
1813 }
1814 return 0;
1815}
1816EXPORT_SYMBOL_GPL(kvm_vcpu_write_guest);
1817
1818int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
1819 gpa_t gpa, unsigned long len)
1820{
1821 struct kvm_memslots *slots = kvm_memslots(kvm);
1822 int offset = offset_in_page(gpa);
1823 gfn_t start_gfn = gpa >> PAGE_SHIFT;
1824 gfn_t end_gfn = (gpa + len - 1) >> PAGE_SHIFT;
1825 gfn_t nr_pages_needed = end_gfn - start_gfn + 1;
1826 gfn_t nr_pages_avail;
1827
1828 ghc->gpa = gpa;
1829 ghc->generation = slots->generation;
1830 ghc->len = len;
1831 ghc->memslot = gfn_to_memslot(kvm, start_gfn);
1832 ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn, NULL);
1833 if (!kvm_is_error_hva(ghc->hva) && nr_pages_needed <= 1) {
1834 ghc->hva += offset;
1835 } else {
1836 /*
1837 * If the requested region crosses two memslots, we still
1838 * verify that the entire region is valid here.
1839 */
1840 while (start_gfn <= end_gfn) {
1841 ghc->memslot = gfn_to_memslot(kvm, start_gfn);
1842 ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn,
1843 &nr_pages_avail);
1844 if (kvm_is_error_hva(ghc->hva))
1845 return -EFAULT;
1846 start_gfn += nr_pages_avail;
1847 }
1848 /* Use the slow path for cross page reads and writes. */
1849 ghc->memslot = NULL;
1850 }
1851 return 0;
1852}
1853EXPORT_SYMBOL_GPL(kvm_gfn_to_hva_cache_init);
1854
1855int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
1856 void *data, unsigned long len)
1857{
1858 struct kvm_memslots *slots = kvm_memslots(kvm);
1859 int r;
1860
1861 BUG_ON(len > ghc->len);
1862
1863 if (slots->generation != ghc->generation)
1864 kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa, ghc->len);
1865
1866 if (unlikely(!ghc->memslot))
1867 return kvm_write_guest(kvm, ghc->gpa, data, len);
1868
1869 if (kvm_is_error_hva(ghc->hva))
1870 return -EFAULT;
1871
1872 r = __copy_to_user((void __user *)ghc->hva, data, len);
1873 if (r)
1874 return -EFAULT;
1875 mark_page_dirty_in_slot(ghc->memslot, ghc->gpa >> PAGE_SHIFT);
1876
1877 return 0;
1878}
1879EXPORT_SYMBOL_GPL(kvm_write_guest_cached);
1880
1881int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
1882 void *data, unsigned long len)
1883{
1884 struct kvm_memslots *slots = kvm_memslots(kvm);
1885 int r;
1886
1887 BUG_ON(len > ghc->len);
1888
1889 if (slots->generation != ghc->generation)
1890 kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa, ghc->len);
1891
1892 if (unlikely(!ghc->memslot))
1893 return kvm_read_guest(kvm, ghc->gpa, data, len);
1894
1895 if (kvm_is_error_hva(ghc->hva))
1896 return -EFAULT;
1897
1898 r = __copy_from_user(data, (void __user *)ghc->hva, len);
1899 if (r)
1900 return -EFAULT;
1901
1902 return 0;
1903}
1904EXPORT_SYMBOL_GPL(kvm_read_guest_cached);
1905
1906int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len)
1907{
1908 const void *zero_page = (const void *) __va(page_to_phys(ZERO_PAGE(0)));
1909
1910 return kvm_write_guest_page(kvm, gfn, zero_page, offset, len);
1911}
1912EXPORT_SYMBOL_GPL(kvm_clear_guest_page);
1913
1914int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len)
1915{
1916 gfn_t gfn = gpa >> PAGE_SHIFT;
1917 int seg;
1918 int offset = offset_in_page(gpa);
1919 int ret;
1920
1921 while ((seg = next_segment(len, offset)) != 0) {
1922 ret = kvm_clear_guest_page(kvm, gfn, offset, seg);
1923 if (ret < 0)
1924 return ret;
1925 offset = 0;
1926 len -= seg;
1927 ++gfn;
1928 }
1929 return 0;
1930}
1931EXPORT_SYMBOL_GPL(kvm_clear_guest);
1932
1933static void mark_page_dirty_in_slot(struct kvm_memory_slot *memslot,
1934 gfn_t gfn)
1935{
1936 if (memslot && memslot->dirty_bitmap) {
1937 unsigned long rel_gfn = gfn - memslot->base_gfn;
1938
1939 set_bit_le(rel_gfn, memslot->dirty_bitmap);
1940 }
1941}
1942
1943void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
1944{
1945 struct kvm_memory_slot *memslot;
1946
1947 memslot = gfn_to_memslot(kvm, gfn);
1948 mark_page_dirty_in_slot(memslot, gfn);
1949}
1950EXPORT_SYMBOL_GPL(mark_page_dirty);
1951
1952void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn)
1953{
1954 struct kvm_memory_slot *memslot;
1955
1956 memslot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
1957 mark_page_dirty_in_slot(memslot, gfn);
1958}
1959EXPORT_SYMBOL_GPL(kvm_vcpu_mark_page_dirty);
1960
1961static void grow_halt_poll_ns(struct kvm_vcpu *vcpu)
1962{
1963 unsigned int old, val, grow;
1964
1965 old = val = vcpu->halt_poll_ns;
1966 grow = READ_ONCE(halt_poll_ns_grow);
1967 /* 10us base */
1968 if (val == 0 && grow)
1969 val = 10000;
1970 else
1971 val *= grow;
1972
1973 if (val > halt_poll_ns)
1974 val = halt_poll_ns;
1975
1976 vcpu->halt_poll_ns = val;
1977 trace_kvm_halt_poll_ns_grow(vcpu->vcpu_id, val, old);
1978}
1979
1980static void shrink_halt_poll_ns(struct kvm_vcpu *vcpu)
1981{
1982 unsigned int old, val, shrink;
1983
1984 old = val = vcpu->halt_poll_ns;
1985 shrink = READ_ONCE(halt_poll_ns_shrink);
1986 if (shrink == 0)
1987 val = 0;
1988 else
1989 val /= shrink;
1990
1991 vcpu->halt_poll_ns = val;
1992 trace_kvm_halt_poll_ns_shrink(vcpu->vcpu_id, val, old);
1993}
1994
1995static int kvm_vcpu_check_block(struct kvm_vcpu *vcpu)
1996{
1997 if (kvm_arch_vcpu_runnable(vcpu)) {
1998 kvm_make_request(KVM_REQ_UNHALT, vcpu);
1999 return -EINTR;
2000 }
2001 if (kvm_cpu_has_pending_timer(vcpu))
2002 return -EINTR;
2003 if (signal_pending(current))
2004 return -EINTR;
2005
2006 return 0;
2007}
2008
2009/*
2010 * The vCPU has executed a HLT instruction with in-kernel mode enabled.
2011 */
2012void kvm_vcpu_block(struct kvm_vcpu *vcpu)
2013{
2014 ktime_t start, cur;
2015 DECLARE_SWAITQUEUE(wait);
2016 bool waited = false;
2017 u64 block_ns;
2018
2019 start = cur = ktime_get();
2020 if (vcpu->halt_poll_ns) {
2021 ktime_t stop = ktime_add_ns(ktime_get(), vcpu->halt_poll_ns);
2022
2023 ++vcpu->stat.halt_attempted_poll;
2024 do {
2025 /*
2026 * This sets KVM_REQ_UNHALT if an interrupt
2027 * arrives.
2028 */
2029 if (kvm_vcpu_check_block(vcpu) < 0) {
2030 ++vcpu->stat.halt_successful_poll;
2031 goto out;
2032 }
2033 cur = ktime_get();
2034 } while (single_task_running() && ktime_before(cur, stop));
2035 }
2036
2037 kvm_arch_vcpu_blocking(vcpu);
2038
2039 for (;;) {
2040 prepare_to_swait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE);
2041
2042 if (kvm_vcpu_check_block(vcpu) < 0)
2043 break;
2044
2045 waited = true;
2046 schedule();
2047 }
2048
2049 finish_swait(&vcpu->wq, &wait);
2050 cur = ktime_get();
2051
2052 kvm_arch_vcpu_unblocking(vcpu);
2053out:
2054 block_ns = ktime_to_ns(cur) - ktime_to_ns(start);
2055
2056 if (halt_poll_ns) {
2057 if (block_ns <= vcpu->halt_poll_ns)
2058 ;
2059 /* we had a long block, shrink polling */
2060 else if (vcpu->halt_poll_ns && block_ns > halt_poll_ns)
2061 shrink_halt_poll_ns(vcpu);
2062 /* we had a short halt and our poll time is too small */
2063 else if (vcpu->halt_poll_ns < halt_poll_ns &&
2064 block_ns < halt_poll_ns)
2065 grow_halt_poll_ns(vcpu);
2066 } else
2067 vcpu->halt_poll_ns = 0;
2068
2069 trace_kvm_vcpu_wakeup(block_ns, waited);
2070}
2071EXPORT_SYMBOL_GPL(kvm_vcpu_block);
2072
2073#ifndef CONFIG_S390
2074/*
2075 * Kick a sleeping VCPU, or a guest VCPU in guest mode, into host kernel mode.
2076 */
2077void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
2078{
2079 int me;
2080 int cpu = vcpu->cpu;
2081 struct swait_queue_head *wqp;
2082
2083 wqp = kvm_arch_vcpu_wq(vcpu);
2084 if (swait_active(wqp)) {
2085 swake_up(wqp);
2086 ++vcpu->stat.halt_wakeup;
2087 }
2088
2089 me = get_cpu();
2090 if (cpu != me && (unsigned)cpu < nr_cpu_ids && cpu_online(cpu))
2091 if (kvm_arch_vcpu_should_kick(vcpu))
2092 smp_send_reschedule(cpu);
2093 put_cpu();
2094}
2095EXPORT_SYMBOL_GPL(kvm_vcpu_kick);
2096#endif /* !CONFIG_S390 */
2097
2098int kvm_vcpu_yield_to(struct kvm_vcpu *target)
2099{
2100 struct pid *pid;
2101 struct task_struct *task = NULL;
2102 int ret = 0;
2103
2104 rcu_read_lock();
2105 pid = rcu_dereference(target->pid);
2106 if (pid)
2107 task = get_pid_task(pid, PIDTYPE_PID);
2108 rcu_read_unlock();
2109 if (!task)
2110 return ret;
2111 ret = yield_to(task, 1);
2112 put_task_struct(task);
2113
2114 return ret;
2115}
2116EXPORT_SYMBOL_GPL(kvm_vcpu_yield_to);
2117
2118/*
2119 * Helper that checks whether a VCPU is eligible for directed yield.
2120 * Most eligible candidate to yield is decided by following heuristics:
2121 *
2122 * (a) VCPU which has not done pl-exit or cpu relax intercepted recently
2123 * (preempted lock holder), indicated by @in_spin_loop.
2124 * Set at the beiginning and cleared at the end of interception/PLE handler.
2125 *
2126 * (b) VCPU which has done pl-exit/ cpu relax intercepted but did not get
2127 * chance last time (mostly it has become eligible now since we have probably
2128 * yielded to lockholder in last iteration. This is done by toggling
2129 * @dy_eligible each time a VCPU checked for eligibility.)
2130 *
2131 * Yielding to a recently pl-exited/cpu relax intercepted VCPU before yielding
2132 * to preempted lock-holder could result in wrong VCPU selection and CPU
2133 * burning. Giving priority for a potential lock-holder increases lock
2134 * progress.
2135 *
2136 * Since algorithm is based on heuristics, accessing another VCPU data without
2137 * locking does not harm. It may result in trying to yield to same VCPU, fail
2138 * and continue with next VCPU and so on.
2139 */
2140static bool kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu *vcpu)
2141{
2142#ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT
2143 bool eligible;
2144
2145 eligible = !vcpu->spin_loop.in_spin_loop ||
2146 vcpu->spin_loop.dy_eligible;
2147
2148 if (vcpu->spin_loop.in_spin_loop)
2149 kvm_vcpu_set_dy_eligible(vcpu, !vcpu->spin_loop.dy_eligible);
2150
2151 return eligible;
2152#else
2153 return true;
2154#endif
2155}
2156
2157void kvm_vcpu_on_spin(struct kvm_vcpu *me)
2158{
2159 struct kvm *kvm = me->kvm;
2160 struct kvm_vcpu *vcpu;
2161 int last_boosted_vcpu = me->kvm->last_boosted_vcpu;
2162 int yielded = 0;
2163 int try = 3;
2164 int pass;
2165 int i;
2166
2167 kvm_vcpu_set_in_spin_loop(me, true);
2168 /*
2169 * We boost the priority of a VCPU that is runnable but not
2170 * currently running, because it got preempted by something
2171 * else and called schedule in __vcpu_run. Hopefully that
2172 * VCPU is holding the lock that we need and will release it.
2173 * We approximate round-robin by starting at the last boosted VCPU.
2174 */
2175 for (pass = 0; pass < 2 && !yielded && try; pass++) {
2176 kvm_for_each_vcpu(i, vcpu, kvm) {
2177 if (!pass && i <= last_boosted_vcpu) {
2178 i = last_boosted_vcpu;
2179 continue;
2180 } else if (pass && i > last_boosted_vcpu)
2181 break;
2182 if (!ACCESS_ONCE(vcpu->preempted))
2183 continue;
2184 if (vcpu == me)
2185 continue;
2186 if (swait_active(&vcpu->wq) && !kvm_arch_vcpu_runnable(vcpu))
2187 continue;
2188 if (!kvm_vcpu_eligible_for_directed_yield(vcpu))
2189 continue;
2190
2191 yielded = kvm_vcpu_yield_to(vcpu);
2192 if (yielded > 0) {
2193 kvm->last_boosted_vcpu = i;
2194 break;
2195 } else if (yielded < 0) {
2196 try--;
2197 if (!try)
2198 break;
2199 }
2200 }
2201 }
2202 kvm_vcpu_set_in_spin_loop(me, false);
2203
2204 /* Ensure vcpu is not eligible during next spinloop */
2205 kvm_vcpu_set_dy_eligible(me, false);
2206}
2207EXPORT_SYMBOL_GPL(kvm_vcpu_on_spin);
2208
2209static int kvm_vcpu_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
2210{
2211 struct kvm_vcpu *vcpu = vma->vm_file->private_data;
2212 struct page *page;
2213
2214 if (vmf->pgoff == 0)
2215 page = virt_to_page(vcpu->run);
2216#ifdef CONFIG_X86
2217 else if (vmf->pgoff == KVM_PIO_PAGE_OFFSET)
2218 page = virt_to_page(vcpu->arch.pio_data);
2219#endif
2220#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
2221 else if (vmf->pgoff == KVM_COALESCED_MMIO_PAGE_OFFSET)
2222 page = virt_to_page(vcpu->kvm->coalesced_mmio_ring);
2223#endif
2224 else
2225 return kvm_arch_vcpu_fault(vcpu, vmf);
2226 get_page(page);
2227 vmf->page = page;
2228 return 0;
2229}
2230
2231static const struct vm_operations_struct kvm_vcpu_vm_ops = {
2232 .fault = kvm_vcpu_fault,
2233};
2234
2235static int kvm_vcpu_mmap(struct file *file, struct vm_area_struct *vma)
2236{
2237 vma->vm_ops = &kvm_vcpu_vm_ops;
2238 return 0;
2239}
2240
2241static int kvm_vcpu_release(struct inode *inode, struct file *filp)
2242{
2243 struct kvm_vcpu *vcpu = filp->private_data;
2244
2245 kvm_put_kvm(vcpu->kvm);
2246 return 0;
2247}
2248
2249static struct file_operations kvm_vcpu_fops = {
2250 .release = kvm_vcpu_release,
2251 .unlocked_ioctl = kvm_vcpu_ioctl,
2252#ifdef CONFIG_KVM_COMPAT
2253 .compat_ioctl = kvm_vcpu_compat_ioctl,
2254#endif
2255 .mmap = kvm_vcpu_mmap,
2256 .llseek = noop_llseek,
2257};
2258
2259/*
2260 * Allocates an inode for the vcpu.
2261 */
2262static int create_vcpu_fd(struct kvm_vcpu *vcpu)
2263{
2264 return anon_inode_getfd("kvm-vcpu", &kvm_vcpu_fops, vcpu, O_RDWR | O_CLOEXEC);
2265}
2266
2267/*
2268 * Creates some virtual cpus. Good luck creating more than one.
2269 */
2270static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
2271{
2272 int r;
2273 struct kvm_vcpu *vcpu;
2274
2275 if (id >= KVM_MAX_VCPUS)
2276 return -EINVAL;
2277
2278 vcpu = kvm_arch_vcpu_create(kvm, id);
2279 if (IS_ERR(vcpu))
2280 return PTR_ERR(vcpu);
2281
2282 preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops);
2283
2284 r = kvm_arch_vcpu_setup(vcpu);
2285 if (r)
2286 goto vcpu_destroy;
2287
2288 mutex_lock(&kvm->lock);
2289 if (!kvm_vcpu_compatible(vcpu)) {
2290 r = -EINVAL;
2291 goto unlock_vcpu_destroy;
2292 }
2293 if (atomic_read(&kvm->online_vcpus) == KVM_MAX_VCPUS) {
2294 r = -EINVAL;
2295 goto unlock_vcpu_destroy;
2296 }
2297 if (kvm_get_vcpu_by_id(kvm, id)) {
2298 r = -EEXIST;
2299 goto unlock_vcpu_destroy;
2300 }
2301
2302 BUG_ON(kvm->vcpus[atomic_read(&kvm->online_vcpus)]);
2303
2304 /* Now it's all set up, let userspace reach it */
2305 kvm_get_kvm(kvm);
2306 r = create_vcpu_fd(vcpu);
2307 if (r < 0) {
2308 kvm_put_kvm(kvm);
2309 goto unlock_vcpu_destroy;
2310 }
2311
2312 kvm->vcpus[atomic_read(&kvm->online_vcpus)] = vcpu;
2313
2314 /*
2315 * Pairs with smp_rmb() in kvm_get_vcpu. Write kvm->vcpus
2316 * before kvm->online_vcpu's incremented value.
2317 */
2318 smp_wmb();
2319 atomic_inc(&kvm->online_vcpus);
2320
2321 mutex_unlock(&kvm->lock);
2322 kvm_arch_vcpu_postcreate(vcpu);
2323 return r;
2324
2325unlock_vcpu_destroy:
2326 mutex_unlock(&kvm->lock);
2327vcpu_destroy:
2328 kvm_arch_vcpu_destroy(vcpu);
2329 return r;
2330}
2331
2332static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset)
2333{
2334 if (sigset) {
2335 sigdelsetmask(sigset, sigmask(SIGKILL)|sigmask(SIGSTOP));
2336 vcpu->sigset_active = 1;
2337 vcpu->sigset = *sigset;
2338 } else
2339 vcpu->sigset_active = 0;
2340 return 0;
2341}
2342
2343static long kvm_vcpu_ioctl(struct file *filp,
2344 unsigned int ioctl, unsigned long arg)
2345{
2346 struct kvm_vcpu *vcpu = filp->private_data;
2347 void __user *argp = (void __user *)arg;
2348 int r;
2349 struct kvm_fpu *fpu = NULL;
2350 struct kvm_sregs *kvm_sregs = NULL;
2351
2352 if (vcpu->kvm->mm != current->mm)
2353 return -EIO;
2354
2355 if (unlikely(_IOC_TYPE(ioctl) != KVMIO))
2356 return -EINVAL;
2357
2358#if defined(CONFIG_S390) || defined(CONFIG_PPC) || defined(CONFIG_MIPS)
2359 /*
2360 * Special cases: vcpu ioctls that are asynchronous to vcpu execution,
2361 * so vcpu_load() would break it.
2362 */
2363 if (ioctl == KVM_S390_INTERRUPT || ioctl == KVM_S390_IRQ || ioctl == KVM_INTERRUPT)
2364 return kvm_arch_vcpu_ioctl(filp, ioctl, arg);
2365#endif
2366
2367
2368 r = vcpu_load(vcpu);
2369 if (r)
2370 return r;
2371 switch (ioctl) {
2372 case KVM_RUN:
2373 r = -EINVAL;
2374 if (arg)
2375 goto out;
2376 if (unlikely(vcpu->pid != current->pids[PIDTYPE_PID].pid)) {
2377 /* The thread running this VCPU changed. */
2378 struct pid *oldpid = vcpu->pid;
2379 struct pid *newpid = get_task_pid(current, PIDTYPE_PID);
2380
2381 rcu_assign_pointer(vcpu->pid, newpid);
2382 if (oldpid)
2383 synchronize_rcu();
2384 put_pid(oldpid);
2385 }
2386 r = kvm_arch_vcpu_ioctl_run(vcpu, vcpu->run);
2387 trace_kvm_userspace_exit(vcpu->run->exit_reason, r);
2388 break;
2389 case KVM_GET_REGS: {
2390 struct kvm_regs *kvm_regs;
2391
2392 r = -ENOMEM;
2393 kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL);
2394 if (!kvm_regs)
2395 goto out;
2396 r = kvm_arch_vcpu_ioctl_get_regs(vcpu, kvm_regs);
2397 if (r)
2398 goto out_free1;
2399 r = -EFAULT;
2400 if (copy_to_user(argp, kvm_regs, sizeof(struct kvm_regs)))
2401 goto out_free1;
2402 r = 0;
2403out_free1:
2404 kfree(kvm_regs);
2405 break;
2406 }
2407 case KVM_SET_REGS: {
2408 struct kvm_regs *kvm_regs;
2409
2410 r = -ENOMEM;
2411 kvm_regs = memdup_user(argp, sizeof(*kvm_regs));
2412 if (IS_ERR(kvm_regs)) {
2413 r = PTR_ERR(kvm_regs);
2414 goto out;
2415 }
2416 r = kvm_arch_vcpu_ioctl_set_regs(vcpu, kvm_regs);
2417 kfree(kvm_regs);
2418 break;
2419 }
2420 case KVM_GET_SREGS: {
2421 kvm_sregs = kzalloc(sizeof(struct kvm_sregs), GFP_KERNEL);
2422 r = -ENOMEM;
2423 if (!kvm_sregs)
2424 goto out;
2425 r = kvm_arch_vcpu_ioctl_get_sregs(vcpu, kvm_sregs);
2426 if (r)
2427 goto out;
2428 r = -EFAULT;
2429 if (copy_to_user(argp, kvm_sregs, sizeof(struct kvm_sregs)))
2430 goto out;
2431 r = 0;
2432 break;
2433 }
2434 case KVM_SET_SREGS: {
2435 kvm_sregs = memdup_user(argp, sizeof(*kvm_sregs));
2436 if (IS_ERR(kvm_sregs)) {
2437 r = PTR_ERR(kvm_sregs);
2438 kvm_sregs = NULL;
2439 goto out;
2440 }
2441 r = kvm_arch_vcpu_ioctl_set_sregs(vcpu, kvm_sregs);
2442 break;
2443 }
2444 case KVM_GET_MP_STATE: {
2445 struct kvm_mp_state mp_state;
2446
2447 r = kvm_arch_vcpu_ioctl_get_mpstate(vcpu, &mp_state);
2448 if (r)
2449 goto out;
2450 r = -EFAULT;
2451 if (copy_to_user(argp, &mp_state, sizeof(mp_state)))
2452 goto out;
2453 r = 0;
2454 break;
2455 }
2456 case KVM_SET_MP_STATE: {
2457 struct kvm_mp_state mp_state;
2458
2459 r = -EFAULT;
2460 if (copy_from_user(&mp_state, argp, sizeof(mp_state)))
2461 goto out;
2462 r = kvm_arch_vcpu_ioctl_set_mpstate(vcpu, &mp_state);
2463 break;
2464 }
2465 case KVM_TRANSLATE: {
2466 struct kvm_translation tr;
2467
2468 r = -EFAULT;
2469 if (copy_from_user(&tr, argp, sizeof(tr)))
2470 goto out;
2471 r = kvm_arch_vcpu_ioctl_translate(vcpu, &tr);
2472 if (r)
2473 goto out;
2474 r = -EFAULT;
2475 if (copy_to_user(argp, &tr, sizeof(tr)))
2476 goto out;
2477 r = 0;
2478 break;
2479 }
2480 case KVM_SET_GUEST_DEBUG: {
2481 struct kvm_guest_debug dbg;
2482
2483 r = -EFAULT;
2484 if (copy_from_user(&dbg, argp, sizeof(dbg)))
2485 goto out;
2486 r = kvm_arch_vcpu_ioctl_set_guest_debug(vcpu, &dbg);
2487 break;
2488 }
2489 case KVM_SET_SIGNAL_MASK: {
2490 struct kvm_signal_mask __user *sigmask_arg = argp;
2491 struct kvm_signal_mask kvm_sigmask;
2492 sigset_t sigset, *p;
2493
2494 p = NULL;
2495 if (argp) {
2496 r = -EFAULT;
2497 if (copy_from_user(&kvm_sigmask, argp,
2498 sizeof(kvm_sigmask)))
2499 goto out;
2500 r = -EINVAL;
2501 if (kvm_sigmask.len != sizeof(sigset))
2502 goto out;
2503 r = -EFAULT;
2504 if (copy_from_user(&sigset, sigmask_arg->sigset,
2505 sizeof(sigset)))
2506 goto out;
2507 p = &sigset;
2508 }
2509 r = kvm_vcpu_ioctl_set_sigmask(vcpu, p);
2510 break;
2511 }
2512 case KVM_GET_FPU: {
2513 fpu = kzalloc(sizeof(struct kvm_fpu), GFP_KERNEL);
2514 r = -ENOMEM;
2515 if (!fpu)
2516 goto out;
2517 r = kvm_arch_vcpu_ioctl_get_fpu(vcpu, fpu);
2518 if (r)
2519 goto out;
2520 r = -EFAULT;
2521 if (copy_to_user(argp, fpu, sizeof(struct kvm_fpu)))
2522 goto out;
2523 r = 0;
2524 break;
2525 }
2526 case KVM_SET_FPU: {
2527 fpu = memdup_user(argp, sizeof(*fpu));
2528 if (IS_ERR(fpu)) {
2529 r = PTR_ERR(fpu);
2530 fpu = NULL;
2531 goto out;
2532 }
2533 r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, fpu);
2534 break;
2535 }
2536 default:
2537 r = kvm_arch_vcpu_ioctl(filp, ioctl, arg);
2538 }
2539out:
2540 vcpu_put(vcpu);
2541 kfree(fpu);
2542 kfree(kvm_sregs);
2543 return r;
2544}
2545
2546#ifdef CONFIG_KVM_COMPAT
2547static long kvm_vcpu_compat_ioctl(struct file *filp,
2548 unsigned int ioctl, unsigned long arg)
2549{
2550 struct kvm_vcpu *vcpu = filp->private_data;
2551 void __user *argp = compat_ptr(arg);
2552 int r;
2553
2554 if (vcpu->kvm->mm != current->mm)
2555 return -EIO;
2556
2557 switch (ioctl) {
2558 case KVM_SET_SIGNAL_MASK: {
2559 struct kvm_signal_mask __user *sigmask_arg = argp;
2560 struct kvm_signal_mask kvm_sigmask;
2561 compat_sigset_t csigset;
2562 sigset_t sigset;
2563
2564 if (argp) {
2565 r = -EFAULT;
2566 if (copy_from_user(&kvm_sigmask, argp,
2567 sizeof(kvm_sigmask)))
2568 goto out;
2569 r = -EINVAL;
2570 if (kvm_sigmask.len != sizeof(csigset))
2571 goto out;
2572 r = -EFAULT;
2573 if (copy_from_user(&csigset, sigmask_arg->sigset,
2574 sizeof(csigset)))
2575 goto out;
2576 sigset_from_compat(&sigset, &csigset);
2577 r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset);
2578 } else
2579 r = kvm_vcpu_ioctl_set_sigmask(vcpu, NULL);
2580 break;
2581 }
2582 default:
2583 r = kvm_vcpu_ioctl(filp, ioctl, arg);
2584 }
2585
2586out:
2587 return r;
2588}
2589#endif
2590
2591static int kvm_device_ioctl_attr(struct kvm_device *dev,
2592 int (*accessor)(struct kvm_device *dev,
2593 struct kvm_device_attr *attr),
2594 unsigned long arg)
2595{
2596 struct kvm_device_attr attr;
2597
2598 if (!accessor)
2599 return -EPERM;
2600
2601 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
2602 return -EFAULT;
2603
2604 return accessor(dev, &attr);
2605}
2606
2607static long kvm_device_ioctl(struct file *filp, unsigned int ioctl,
2608 unsigned long arg)
2609{
2610 struct kvm_device *dev = filp->private_data;
2611
2612 switch (ioctl) {
2613 case KVM_SET_DEVICE_ATTR:
2614 return kvm_device_ioctl_attr(dev, dev->ops->set_attr, arg);
2615 case KVM_GET_DEVICE_ATTR:
2616 return kvm_device_ioctl_attr(dev, dev->ops->get_attr, arg);
2617 case KVM_HAS_DEVICE_ATTR:
2618 return kvm_device_ioctl_attr(dev, dev->ops->has_attr, arg);
2619 default:
2620 if (dev->ops->ioctl)
2621 return dev->ops->ioctl(dev, ioctl, arg);
2622
2623 return -ENOTTY;
2624 }
2625}
2626
2627static int kvm_device_release(struct inode *inode, struct file *filp)
2628{
2629 struct kvm_device *dev = filp->private_data;
2630 struct kvm *kvm = dev->kvm;
2631
2632 kvm_put_kvm(kvm);
2633 return 0;
2634}
2635
2636static const struct file_operations kvm_device_fops = {
2637 .unlocked_ioctl = kvm_device_ioctl,
2638#ifdef CONFIG_KVM_COMPAT
2639 .compat_ioctl = kvm_device_ioctl,
2640#endif
2641 .release = kvm_device_release,
2642};
2643
2644struct kvm_device *kvm_device_from_filp(struct file *filp)
2645{
2646 if (filp->f_op != &kvm_device_fops)
2647 return NULL;
2648
2649 return filp->private_data;
2650}
2651
2652static struct kvm_device_ops *kvm_device_ops_table[KVM_DEV_TYPE_MAX] = {
2653#ifdef CONFIG_KVM_MPIC
2654 [KVM_DEV_TYPE_FSL_MPIC_20] = &kvm_mpic_ops,
2655 [KVM_DEV_TYPE_FSL_MPIC_42] = &kvm_mpic_ops,
2656#endif
2657
2658#ifdef CONFIG_KVM_XICS
2659 [KVM_DEV_TYPE_XICS] = &kvm_xics_ops,
2660#endif
2661};
2662
2663int kvm_register_device_ops(struct kvm_device_ops *ops, u32 type)
2664{
2665 if (type >= ARRAY_SIZE(kvm_device_ops_table))
2666 return -ENOSPC;
2667
2668 if (kvm_device_ops_table[type] != NULL)
2669 return -EEXIST;
2670
2671 kvm_device_ops_table[type] = ops;
2672 return 0;
2673}
2674
2675void kvm_unregister_device_ops(u32 type)
2676{
2677 if (kvm_device_ops_table[type] != NULL)
2678 kvm_device_ops_table[type] = NULL;
2679}
2680
2681static int kvm_ioctl_create_device(struct kvm *kvm,
2682 struct kvm_create_device *cd)
2683{
2684 struct kvm_device_ops *ops = NULL;
2685 struct kvm_device *dev;
2686 bool test = cd->flags & KVM_CREATE_DEVICE_TEST;
2687 int ret;
2688
2689 if (cd->type >= ARRAY_SIZE(kvm_device_ops_table))
2690 return -ENODEV;
2691
2692 ops = kvm_device_ops_table[cd->type];
2693 if (ops == NULL)
2694 return -ENODEV;
2695
2696 if (test)
2697 return 0;
2698
2699 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
2700 if (!dev)
2701 return -ENOMEM;
2702
2703 dev->ops = ops;
2704 dev->kvm = kvm;
2705
2706 ret = ops->create(dev, cd->type);
2707 if (ret < 0) {
2708 kfree(dev);
2709 return ret;
2710 }
2711
2712 ret = anon_inode_getfd(ops->name, &kvm_device_fops, dev, O_RDWR | O_CLOEXEC);
2713 if (ret < 0) {
2714 ops->destroy(dev);
2715 return ret;
2716 }
2717
2718 list_add(&dev->vm_node, &kvm->devices);
2719 kvm_get_kvm(kvm);
2720 cd->fd = ret;
2721 return 0;
2722}
2723
2724static long kvm_vm_ioctl_check_extension_generic(struct kvm *kvm, long arg)
2725{
2726 switch (arg) {
2727 case KVM_CAP_USER_MEMORY:
2728 case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
2729 case KVM_CAP_JOIN_MEMORY_REGIONS_WORKS:
2730 case KVM_CAP_INTERNAL_ERROR_DATA:
2731#ifdef CONFIG_HAVE_KVM_MSI
2732 case KVM_CAP_SIGNAL_MSI:
2733#endif
2734#ifdef CONFIG_HAVE_KVM_IRQFD
2735 case KVM_CAP_IRQFD:
2736 case KVM_CAP_IRQFD_RESAMPLE:
2737#endif
2738 case KVM_CAP_IOEVENTFD_ANY_LENGTH:
2739 case KVM_CAP_CHECK_EXTENSION_VM:
2740 return 1;
2741#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
2742 case KVM_CAP_IRQ_ROUTING:
2743 return KVM_MAX_IRQ_ROUTES;
2744#endif
2745#if KVM_ADDRESS_SPACE_NUM > 1
2746 case KVM_CAP_MULTI_ADDRESS_SPACE:
2747 return KVM_ADDRESS_SPACE_NUM;
2748#endif
2749 default:
2750 break;
2751 }
2752 return kvm_vm_ioctl_check_extension(kvm, arg);
2753}
2754
2755static long kvm_vm_ioctl(struct file *filp,
2756 unsigned int ioctl, unsigned long arg)
2757{
2758 struct kvm *kvm = filp->private_data;
2759 void __user *argp = (void __user *)arg;
2760 int r;
2761
2762 if (kvm->mm != current->mm)
2763 return -EIO;
2764 switch (ioctl) {
2765 case KVM_CREATE_VCPU:
2766 r = kvm_vm_ioctl_create_vcpu(kvm, arg);
2767 break;
2768 case KVM_SET_USER_MEMORY_REGION: {
2769 struct kvm_userspace_memory_region kvm_userspace_mem;
2770
2771 r = -EFAULT;
2772 if (copy_from_user(&kvm_userspace_mem, argp,
2773 sizeof(kvm_userspace_mem)))
2774 goto out;
2775
2776 r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem);
2777 break;
2778 }
2779 case KVM_GET_DIRTY_LOG: {
2780 struct kvm_dirty_log log;
2781
2782 r = -EFAULT;
2783 if (copy_from_user(&log, argp, sizeof(log)))
2784 goto out;
2785 r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
2786 break;
2787 }
2788#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
2789 case KVM_REGISTER_COALESCED_MMIO: {
2790 struct kvm_coalesced_mmio_zone zone;
2791
2792 r = -EFAULT;
2793 if (copy_from_user(&zone, argp, sizeof(zone)))
2794 goto out;
2795 r = kvm_vm_ioctl_register_coalesced_mmio(kvm, &zone);
2796 break;
2797 }
2798 case KVM_UNREGISTER_COALESCED_MMIO: {
2799 struct kvm_coalesced_mmio_zone zone;
2800
2801 r = -EFAULT;
2802 if (copy_from_user(&zone, argp, sizeof(zone)))
2803 goto out;
2804 r = kvm_vm_ioctl_unregister_coalesced_mmio(kvm, &zone);
2805 break;
2806 }
2807#endif
2808 case KVM_IRQFD: {
2809 struct kvm_irqfd data;
2810
2811 r = -EFAULT;
2812 if (copy_from_user(&data, argp, sizeof(data)))
2813 goto out;
2814 r = kvm_irqfd(kvm, &data);
2815 break;
2816 }
2817 case KVM_IOEVENTFD: {
2818 struct kvm_ioeventfd data;
2819
2820 r = -EFAULT;
2821 if (copy_from_user(&data, argp, sizeof(data)))
2822 goto out;
2823 r = kvm_ioeventfd(kvm, &data);
2824 break;
2825 }
2826#ifdef CONFIG_HAVE_KVM_MSI
2827 case KVM_SIGNAL_MSI: {
2828 struct kvm_msi msi;
2829
2830 r = -EFAULT;
2831 if (copy_from_user(&msi, argp, sizeof(msi)))
2832 goto out;
2833 r = kvm_send_userspace_msi(kvm, &msi);
2834 break;
2835 }
2836#endif
2837#ifdef __KVM_HAVE_IRQ_LINE
2838 case KVM_IRQ_LINE_STATUS:
2839 case KVM_IRQ_LINE: {
2840 struct kvm_irq_level irq_event;
2841
2842 r = -EFAULT;
2843 if (copy_from_user(&irq_event, argp, sizeof(irq_event)))
2844 goto out;
2845
2846 r = kvm_vm_ioctl_irq_line(kvm, &irq_event,
2847 ioctl == KVM_IRQ_LINE_STATUS);
2848 if (r)
2849 goto out;
2850
2851 r = -EFAULT;
2852 if (ioctl == KVM_IRQ_LINE_STATUS) {
2853 if (copy_to_user(argp, &irq_event, sizeof(irq_event)))
2854 goto out;
2855 }
2856
2857 r = 0;
2858 break;
2859 }
2860#endif
2861#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
2862 case KVM_SET_GSI_ROUTING: {
2863 struct kvm_irq_routing routing;
2864 struct kvm_irq_routing __user *urouting;
2865 struct kvm_irq_routing_entry *entries;
2866
2867 r = -EFAULT;
2868 if (copy_from_user(&routing, argp, sizeof(routing)))
2869 goto out;
2870 r = -EINVAL;
2871 if (routing.nr >= KVM_MAX_IRQ_ROUTES)
2872 goto out;
2873 if (routing.flags)
2874 goto out;
2875 r = -ENOMEM;
2876 entries = vmalloc(routing.nr * sizeof(*entries));
2877 if (!entries)
2878 goto out;
2879 r = -EFAULT;
2880 urouting = argp;
2881 if (copy_from_user(entries, urouting->entries,
2882 routing.nr * sizeof(*entries)))
2883 goto out_free_irq_routing;
2884 r = kvm_set_irq_routing(kvm, entries, routing.nr,
2885 routing.flags);
2886out_free_irq_routing:
2887 vfree(entries);
2888 break;
2889 }
2890#endif /* CONFIG_HAVE_KVM_IRQ_ROUTING */
2891 case KVM_CREATE_DEVICE: {
2892 struct kvm_create_device cd;
2893
2894 r = -EFAULT;
2895 if (copy_from_user(&cd, argp, sizeof(cd)))
2896 goto out;
2897
2898 r = kvm_ioctl_create_device(kvm, &cd);
2899 if (r)
2900 goto out;
2901
2902 r = -EFAULT;
2903 if (copy_to_user(argp, &cd, sizeof(cd)))
2904 goto out;
2905
2906 r = 0;
2907 break;
2908 }
2909 case KVM_CHECK_EXTENSION:
2910 r = kvm_vm_ioctl_check_extension_generic(kvm, arg);
2911 break;
2912 default:
2913 r = kvm_arch_vm_ioctl(filp, ioctl, arg);
2914 }
2915out:
2916 return r;
2917}
2918
2919#ifdef CONFIG_KVM_COMPAT
2920struct compat_kvm_dirty_log {
2921 __u32 slot;
2922 __u32 padding1;
2923 union {
2924 compat_uptr_t dirty_bitmap; /* one bit per page */
2925 __u64 padding2;
2926 };
2927};
2928
2929static long kvm_vm_compat_ioctl(struct file *filp,
2930 unsigned int ioctl, unsigned long arg)
2931{
2932 struct kvm *kvm = filp->private_data;
2933 int r;
2934
2935 if (kvm->mm != current->mm)
2936 return -EIO;
2937 switch (ioctl) {
2938 case KVM_GET_DIRTY_LOG: {
2939 struct compat_kvm_dirty_log compat_log;
2940 struct kvm_dirty_log log;
2941
2942 r = -EFAULT;
2943 if (copy_from_user(&compat_log, (void __user *)arg,
2944 sizeof(compat_log)))
2945 goto out;
2946 log.slot = compat_log.slot;
2947 log.padding1 = compat_log.padding1;
2948 log.padding2 = compat_log.padding2;
2949 log.dirty_bitmap = compat_ptr(compat_log.dirty_bitmap);
2950
2951 r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
2952 break;
2953 }
2954 default:
2955 r = kvm_vm_ioctl(filp, ioctl, arg);
2956 }
2957
2958out:
2959 return r;
2960}
2961#endif
2962
2963static struct file_operations kvm_vm_fops = {
2964 .release = kvm_vm_release,
2965 .unlocked_ioctl = kvm_vm_ioctl,
2966#ifdef CONFIG_KVM_COMPAT
2967 .compat_ioctl = kvm_vm_compat_ioctl,
2968#endif
2969 .llseek = noop_llseek,
2970};
2971
2972static int kvm_dev_ioctl_create_vm(unsigned long type)
2973{
2974 int r;
2975 struct kvm *kvm;
2976
2977 kvm = kvm_create_vm(type);
2978 if (IS_ERR(kvm))
2979 return PTR_ERR(kvm);
2980#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
2981 r = kvm_coalesced_mmio_init(kvm);
2982 if (r < 0) {
2983 kvm_put_kvm(kvm);
2984 return r;
2985 }
2986#endif
2987 r = anon_inode_getfd("kvm-vm", &kvm_vm_fops, kvm, O_RDWR | O_CLOEXEC);
2988 if (r < 0)
2989 kvm_put_kvm(kvm);
2990
2991 return r;
2992}
2993
2994static long kvm_dev_ioctl(struct file *filp,
2995 unsigned int ioctl, unsigned long arg)
2996{
2997 long r = -EINVAL;
2998
2999 switch (ioctl) {
3000 case KVM_GET_API_VERSION:
3001 if (arg)
3002 goto out;
3003 r = KVM_API_VERSION;
3004 break;
3005 case KVM_CREATE_VM:
3006 r = kvm_dev_ioctl_create_vm(arg);
3007 break;
3008 case KVM_CHECK_EXTENSION:
3009 r = kvm_vm_ioctl_check_extension_generic(NULL, arg);
3010 break;
3011 case KVM_GET_VCPU_MMAP_SIZE:
3012 if (arg)
3013 goto out;
3014 r = PAGE_SIZE; /* struct kvm_run */
3015#ifdef CONFIG_X86
3016 r += PAGE_SIZE; /* pio data page */
3017#endif
3018#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
3019 r += PAGE_SIZE; /* coalesced mmio ring page */
3020#endif
3021 break;
3022 case KVM_TRACE_ENABLE:
3023 case KVM_TRACE_PAUSE:
3024 case KVM_TRACE_DISABLE:
3025 r = -EOPNOTSUPP;
3026 break;
3027 default:
3028 return kvm_arch_dev_ioctl(filp, ioctl, arg);
3029 }
3030out:
3031 return r;
3032}
3033
3034static struct file_operations kvm_chardev_ops = {
3035 .unlocked_ioctl = kvm_dev_ioctl,
3036 .compat_ioctl = kvm_dev_ioctl,
3037 .llseek = noop_llseek,
3038};
3039
3040static struct miscdevice kvm_dev = {
3041 KVM_MINOR,
3042 "kvm",
3043 &kvm_chardev_ops,
3044};
3045
3046static void hardware_enable_nolock(void *junk)
3047{
3048 int cpu = raw_smp_processor_id();
3049 int r;
3050
3051 if (cpumask_test_cpu(cpu, cpus_hardware_enabled))
3052 return;
3053
3054 cpumask_set_cpu(cpu, cpus_hardware_enabled);
3055
3056 r = kvm_arch_hardware_enable();
3057
3058 if (r) {
3059 cpumask_clear_cpu(cpu, cpus_hardware_enabled);
3060 atomic_inc(&hardware_enable_failed);
3061 pr_info("kvm: enabling virtualization on CPU%d failed\n", cpu);
3062 }
3063}
3064
3065static void hardware_enable(void)
3066{
3067 raw_spin_lock(&kvm_count_lock);
3068 if (kvm_usage_count)
3069 hardware_enable_nolock(NULL);
3070 raw_spin_unlock(&kvm_count_lock);
3071}
3072
3073static void hardware_disable_nolock(void *junk)
3074{
3075 int cpu = raw_smp_processor_id();
3076
3077 if (!cpumask_test_cpu(cpu, cpus_hardware_enabled))
3078 return;
3079 cpumask_clear_cpu(cpu, cpus_hardware_enabled);
3080 kvm_arch_hardware_disable();
3081}
3082
3083static void hardware_disable(void)
3084{
3085 raw_spin_lock(&kvm_count_lock);
3086 if (kvm_usage_count)
3087 hardware_disable_nolock(NULL);
3088 raw_spin_unlock(&kvm_count_lock);
3089}
3090
3091static void hardware_disable_all_nolock(void)
3092{
3093 BUG_ON(!kvm_usage_count);
3094
3095 kvm_usage_count--;
3096 if (!kvm_usage_count)
3097 on_each_cpu(hardware_disable_nolock, NULL, 1);
3098}
3099
3100static void hardware_disable_all(void)
3101{
3102 raw_spin_lock(&kvm_count_lock);
3103 hardware_disable_all_nolock();
3104 raw_spin_unlock(&kvm_count_lock);
3105}
3106
3107static int hardware_enable_all(void)
3108{
3109 int r = 0;
3110
3111 raw_spin_lock(&kvm_count_lock);
3112
3113 kvm_usage_count++;
3114 if (kvm_usage_count == 1) {
3115 atomic_set(&hardware_enable_failed, 0);
3116 on_each_cpu(hardware_enable_nolock, NULL, 1);
3117
3118 if (atomic_read(&hardware_enable_failed)) {
3119 hardware_disable_all_nolock();
3120 r = -EBUSY;
3121 }
3122 }
3123
3124 raw_spin_unlock(&kvm_count_lock);
3125
3126 return r;
3127}
3128
3129static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val,
3130 void *v)
3131{
3132 val &= ~CPU_TASKS_FROZEN;
3133 switch (val) {
3134 case CPU_DYING:
3135 hardware_disable();
3136 break;
3137 case CPU_STARTING:
3138 hardware_enable();
3139 break;
3140 }
3141 return NOTIFY_OK;
3142}
3143
3144static int kvm_reboot(struct notifier_block *notifier, unsigned long val,
3145 void *v)
3146{
3147 /*
3148 * Some (well, at least mine) BIOSes hang on reboot if
3149 * in vmx root mode.
3150 *
3151 * And Intel TXT required VMX off for all cpu when system shutdown.
3152 */
3153 pr_info("kvm: exiting hardware virtualization\n");
3154 kvm_rebooting = true;
3155 on_each_cpu(hardware_disable_nolock, NULL, 1);
3156 return NOTIFY_OK;
3157}
3158
3159static struct notifier_block kvm_reboot_notifier = {
3160 .notifier_call = kvm_reboot,
3161 .priority = 0,
3162};
3163
3164static void kvm_io_bus_destroy(struct kvm_io_bus *bus)
3165{
3166 int i;
3167
3168 for (i = 0; i < bus->dev_count; i++) {
3169 struct kvm_io_device *pos = bus->range[i].dev;
3170
3171 kvm_iodevice_destructor(pos);
3172 }
3173 kfree(bus);
3174}
3175
3176static inline int kvm_io_bus_cmp(const struct kvm_io_range *r1,
3177 const struct kvm_io_range *r2)
3178{
3179 gpa_t addr1 = r1->addr;
3180 gpa_t addr2 = r2->addr;
3181
3182 if (addr1 < addr2)
3183 return -1;
3184
3185 /* If r2->len == 0, match the exact address. If r2->len != 0,
3186 * accept any overlapping write. Any order is acceptable for
3187 * overlapping ranges, because kvm_io_bus_get_first_dev ensures
3188 * we process all of them.
3189 */
3190 if (r2->len) {
3191 addr1 += r1->len;
3192 addr2 += r2->len;
3193 }
3194
3195 if (addr1 > addr2)
3196 return 1;
3197
3198 return 0;
3199}
3200
3201static int kvm_io_bus_sort_cmp(const void *p1, const void *p2)
3202{
3203 return kvm_io_bus_cmp(p1, p2);
3204}
3205
3206static int kvm_io_bus_insert_dev(struct kvm_io_bus *bus, struct kvm_io_device *dev,
3207 gpa_t addr, int len)
3208{
3209 bus->range[bus->dev_count++] = (struct kvm_io_range) {
3210 .addr = addr,
3211 .len = len,
3212 .dev = dev,
3213 };
3214
3215 sort(bus->range, bus->dev_count, sizeof(struct kvm_io_range),
3216 kvm_io_bus_sort_cmp, NULL);
3217
3218 return 0;
3219}
3220
3221static int kvm_io_bus_get_first_dev(struct kvm_io_bus *bus,
3222 gpa_t addr, int len)
3223{
3224 struct kvm_io_range *range, key;
3225 int off;
3226
3227 key = (struct kvm_io_range) {
3228 .addr = addr,
3229 .len = len,
3230 };
3231
3232 range = bsearch(&key, bus->range, bus->dev_count,
3233 sizeof(struct kvm_io_range), kvm_io_bus_sort_cmp);
3234 if (range == NULL)
3235 return -ENOENT;
3236
3237 off = range - bus->range;
3238
3239 while (off > 0 && kvm_io_bus_cmp(&key, &bus->range[off-1]) == 0)
3240 off--;
3241
3242 return off;
3243}
3244
3245static int __kvm_io_bus_write(struct kvm_vcpu *vcpu, struct kvm_io_bus *bus,
3246 struct kvm_io_range *range, const void *val)
3247{
3248 int idx;
3249
3250 idx = kvm_io_bus_get_first_dev(bus, range->addr, range->len);
3251 if (idx < 0)
3252 return -EOPNOTSUPP;
3253
3254 while (idx < bus->dev_count &&
3255 kvm_io_bus_cmp(range, &bus->range[idx]) == 0) {
3256 if (!kvm_iodevice_write(vcpu, bus->range[idx].dev, range->addr,
3257 range->len, val))
3258 return idx;
3259 idx++;
3260 }
3261
3262 return -EOPNOTSUPP;
3263}
3264
3265/* kvm_io_bus_write - called under kvm->slots_lock */
3266int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
3267 int len, const void *val)
3268{
3269 struct kvm_io_bus *bus;
3270 struct kvm_io_range range;
3271 int r;
3272
3273 range = (struct kvm_io_range) {
3274 .addr = addr,
3275 .len = len,
3276 };
3277
3278 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
3279 r = __kvm_io_bus_write(vcpu, bus, &range, val);
3280 return r < 0 ? r : 0;
3281}
3282
3283/* kvm_io_bus_write_cookie - called under kvm->slots_lock */
3284int kvm_io_bus_write_cookie(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx,
3285 gpa_t addr, int len, const void *val, long cookie)
3286{
3287 struct kvm_io_bus *bus;
3288 struct kvm_io_range range;
3289
3290 range = (struct kvm_io_range) {
3291 .addr = addr,
3292 .len = len,
3293 };
3294
3295 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
3296
3297 /* First try the device referenced by cookie. */
3298 if ((cookie >= 0) && (cookie < bus->dev_count) &&
3299 (kvm_io_bus_cmp(&range, &bus->range[cookie]) == 0))
3300 if (!kvm_iodevice_write(vcpu, bus->range[cookie].dev, addr, len,
3301 val))
3302 return cookie;
3303
3304 /*
3305 * cookie contained garbage; fall back to search and return the
3306 * correct cookie value.
3307 */
3308 return __kvm_io_bus_write(vcpu, bus, &range, val);
3309}
3310
3311static int __kvm_io_bus_read(struct kvm_vcpu *vcpu, struct kvm_io_bus *bus,
3312 struct kvm_io_range *range, void *val)
3313{
3314 int idx;
3315
3316 idx = kvm_io_bus_get_first_dev(bus, range->addr, range->len);
3317 if (idx < 0)
3318 return -EOPNOTSUPP;
3319
3320 while (idx < bus->dev_count &&
3321 kvm_io_bus_cmp(range, &bus->range[idx]) == 0) {
3322 if (!kvm_iodevice_read(vcpu, bus->range[idx].dev, range->addr,
3323 range->len, val))
3324 return idx;
3325 idx++;
3326 }
3327
3328 return -EOPNOTSUPP;
3329}
3330EXPORT_SYMBOL_GPL(kvm_io_bus_write);
3331
3332/* kvm_io_bus_read - called under kvm->slots_lock */
3333int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
3334 int len, void *val)
3335{
3336 struct kvm_io_bus *bus;
3337 struct kvm_io_range range;
3338 int r;
3339
3340 range = (struct kvm_io_range) {
3341 .addr = addr,
3342 .len = len,
3343 };
3344
3345 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
3346 r = __kvm_io_bus_read(vcpu, bus, &range, val);
3347 return r < 0 ? r : 0;
3348}
3349
3350
3351/* Caller must hold slots_lock. */
3352int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
3353 int len, struct kvm_io_device *dev)
3354{
3355 struct kvm_io_bus *new_bus, *bus;
3356
3357 bus = kvm->buses[bus_idx];
3358 /* exclude ioeventfd which is limited by maximum fd */
3359 if (bus->dev_count - bus->ioeventfd_count > NR_IOBUS_DEVS - 1)
3360 return -ENOSPC;
3361
3362 new_bus = kmalloc(sizeof(*bus) + ((bus->dev_count + 1) *
3363 sizeof(struct kvm_io_range)), GFP_KERNEL);
3364 if (!new_bus)
3365 return -ENOMEM;
3366 memcpy(new_bus, bus, sizeof(*bus) + (bus->dev_count *
3367 sizeof(struct kvm_io_range)));
3368 kvm_io_bus_insert_dev(new_bus, dev, addr, len);
3369 rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
3370 synchronize_srcu_expedited(&kvm->srcu);
3371 kfree(bus);
3372
3373 return 0;
3374}
3375
3376/* Caller must hold slots_lock. */
3377int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
3378 struct kvm_io_device *dev)
3379{
3380 int i, r;
3381 struct kvm_io_bus *new_bus, *bus;
3382
3383 bus = kvm->buses[bus_idx];
3384 r = -ENOENT;
3385 for (i = 0; i < bus->dev_count; i++)
3386 if (bus->range[i].dev == dev) {
3387 r = 0;
3388 break;
3389 }
3390
3391 if (r)
3392 return r;
3393
3394 new_bus = kmalloc(sizeof(*bus) + ((bus->dev_count - 1) *
3395 sizeof(struct kvm_io_range)), GFP_KERNEL);
3396 if (!new_bus)
3397 return -ENOMEM;
3398
3399 memcpy(new_bus, bus, sizeof(*bus) + i * sizeof(struct kvm_io_range));
3400 new_bus->dev_count--;
3401 memcpy(new_bus->range + i, bus->range + i + 1,
3402 (new_bus->dev_count - i) * sizeof(struct kvm_io_range));
3403
3404 rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
3405 synchronize_srcu_expedited(&kvm->srcu);
3406 kfree(bus);
3407 return r;
3408}
3409
3410static struct notifier_block kvm_cpu_notifier = {
3411 .notifier_call = kvm_cpu_hotplug,
3412};
3413
3414static int vm_stat_get(void *_offset, u64 *val)
3415{
3416 unsigned offset = (long)_offset;
3417 struct kvm *kvm;
3418
3419 *val = 0;
3420 spin_lock(&kvm_lock);
3421 list_for_each_entry(kvm, &vm_list, vm_list)
3422 *val += *(u32 *)((void *)kvm + offset);
3423 spin_unlock(&kvm_lock);
3424 return 0;
3425}
3426
3427DEFINE_SIMPLE_ATTRIBUTE(vm_stat_fops, vm_stat_get, NULL, "%llu\n");
3428
3429static int vcpu_stat_get(void *_offset, u64 *val)
3430{
3431 unsigned offset = (long)_offset;
3432 struct kvm *kvm;
3433 struct kvm_vcpu *vcpu;
3434 int i;
3435
3436 *val = 0;
3437 spin_lock(&kvm_lock);
3438 list_for_each_entry(kvm, &vm_list, vm_list)
3439 kvm_for_each_vcpu(i, vcpu, kvm)
3440 *val += *(u32 *)((void *)vcpu + offset);
3441
3442 spin_unlock(&kvm_lock);
3443 return 0;
3444}
3445
3446DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops, vcpu_stat_get, NULL, "%llu\n");
3447
3448static const struct file_operations *stat_fops[] = {
3449 [KVM_STAT_VCPU] = &vcpu_stat_fops,
3450 [KVM_STAT_VM] = &vm_stat_fops,
3451};
3452
3453static int kvm_init_debug(void)
3454{
3455 int r = -EEXIST;
3456 struct kvm_stats_debugfs_item *p;
3457
3458 kvm_debugfs_dir = debugfs_create_dir("kvm", NULL);
3459 if (kvm_debugfs_dir == NULL)
3460 goto out;
3461
3462 for (p = debugfs_entries; p->name; ++p) {
3463 if (!debugfs_create_file(p->name, 0444, kvm_debugfs_dir,
3464 (void *)(long)p->offset,
3465 stat_fops[p->kind]))
3466 goto out_dir;
3467 }
3468
3469 return 0;
3470
3471out_dir:
3472 debugfs_remove_recursive(kvm_debugfs_dir);
3473out:
3474 return r;
3475}
3476
3477static int kvm_suspend(void)
3478{
3479 if (kvm_usage_count)
3480 hardware_disable_nolock(NULL);
3481 return 0;
3482}
3483
3484static void kvm_resume(void)
3485{
3486 if (kvm_usage_count) {
3487 WARN_ON(raw_spin_is_locked(&kvm_count_lock));
3488 hardware_enable_nolock(NULL);
3489 }
3490}
3491
3492static struct syscore_ops kvm_syscore_ops = {
3493 .suspend = kvm_suspend,
3494 .resume = kvm_resume,
3495};
3496
3497static inline
3498struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn)
3499{
3500 return container_of(pn, struct kvm_vcpu, preempt_notifier);
3501}
3502
3503static void kvm_sched_in(struct preempt_notifier *pn, int cpu)
3504{
3505 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
3506
3507 if (vcpu->preempted)
3508 vcpu->preempted = false;
3509
3510 kvm_arch_sched_in(vcpu, cpu);
3511
3512 kvm_arch_vcpu_load(vcpu, cpu);
3513}
3514
3515static void kvm_sched_out(struct preempt_notifier *pn,
3516 struct task_struct *next)
3517{
3518 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
3519
3520 if (current->state == TASK_RUNNING)
3521 vcpu->preempted = true;
3522 kvm_arch_vcpu_put(vcpu);
3523}
3524
3525int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
3526 struct module *module)
3527{
3528 int r;
3529 int cpu;
3530
3531 r = kvm_arch_init(opaque);
3532 if (r)
3533 goto out_fail;
3534
3535 /*
3536 * kvm_arch_init makes sure there's at most one caller
3537 * for architectures that support multiple implementations,
3538 * like intel and amd on x86.
3539 * kvm_arch_init must be called before kvm_irqfd_init to avoid creating
3540 * conflicts in case kvm is already setup for another implementation.
3541 */
3542 r = kvm_irqfd_init();
3543 if (r)
3544 goto out_irqfd;
3545
3546 if (!zalloc_cpumask_var(&cpus_hardware_enabled, GFP_KERNEL)) {
3547 r = -ENOMEM;
3548 goto out_free_0;
3549 }
3550
3551 r = kvm_arch_hardware_setup();
3552 if (r < 0)
3553 goto out_free_0a;
3554
3555 for_each_online_cpu(cpu) {
3556 smp_call_function_single(cpu,
3557 kvm_arch_check_processor_compat,
3558 &r, 1);
3559 if (r < 0)
3560 goto out_free_1;
3561 }
3562
3563 r = register_cpu_notifier(&kvm_cpu_notifier);
3564 if (r)
3565 goto out_free_2;
3566 register_reboot_notifier(&kvm_reboot_notifier);
3567
3568 /* A kmem cache lets us meet the alignment requirements of fx_save. */
3569 if (!vcpu_align)
3570 vcpu_align = __alignof__(struct kvm_vcpu);
3571 kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size, vcpu_align,
3572 0, NULL);
3573 if (!kvm_vcpu_cache) {
3574 r = -ENOMEM;
3575 goto out_free_3;
3576 }
3577
3578 r = kvm_async_pf_init();
3579 if (r)
3580 goto out_free;
3581
3582 kvm_chardev_ops.owner = module;
3583 kvm_vm_fops.owner = module;
3584 kvm_vcpu_fops.owner = module;
3585
3586 r = misc_register(&kvm_dev);
3587 if (r) {
3588 pr_err("kvm: misc device register failed\n");
3589 goto out_unreg;
3590 }
3591
3592 register_syscore_ops(&kvm_syscore_ops);
3593
3594 kvm_preempt_ops.sched_in = kvm_sched_in;
3595 kvm_preempt_ops.sched_out = kvm_sched_out;
3596
3597 r = kvm_init_debug();
3598 if (r) {
3599 pr_err("kvm: create debugfs files failed\n");
3600 goto out_undebugfs;
3601 }
3602
3603 r = kvm_vfio_ops_init();
3604 WARN_ON(r);
3605
3606 return 0;
3607
3608out_undebugfs:
3609 unregister_syscore_ops(&kvm_syscore_ops);
3610 misc_deregister(&kvm_dev);
3611out_unreg:
3612 kvm_async_pf_deinit();
3613out_free:
3614 kmem_cache_destroy(kvm_vcpu_cache);
3615out_free_3:
3616 unregister_reboot_notifier(&kvm_reboot_notifier);
3617 unregister_cpu_notifier(&kvm_cpu_notifier);
3618out_free_2:
3619out_free_1:
3620 kvm_arch_hardware_unsetup();
3621out_free_0a:
3622 free_cpumask_var(cpus_hardware_enabled);
3623out_free_0:
3624 kvm_irqfd_exit();
3625out_irqfd:
3626 kvm_arch_exit();
3627out_fail:
3628 return r;
3629}
3630EXPORT_SYMBOL_GPL(kvm_init);
3631
3632void kvm_exit(void)
3633{
3634 debugfs_remove_recursive(kvm_debugfs_dir);
3635 misc_deregister(&kvm_dev);
3636 kmem_cache_destroy(kvm_vcpu_cache);
3637 kvm_async_pf_deinit();
3638 unregister_syscore_ops(&kvm_syscore_ops);
3639 unregister_reboot_notifier(&kvm_reboot_notifier);
3640 unregister_cpu_notifier(&kvm_cpu_notifier);
3641 on_each_cpu(hardware_disable_nolock, NULL, 1);
3642 kvm_arch_hardware_unsetup();
3643 kvm_arch_exit();
3644 kvm_irqfd_exit();
3645 free_cpumask_var(cpus_hardware_enabled);
3646 kvm_vfio_ops_exit();
3647}
3648EXPORT_SYMBOL_GPL(kvm_exit);
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Kernel-based Virtual Machine driver for Linux
4 *
5 * This module enables machines with Intel VT-x extensions to run virtual
6 * machines without emulation or binary translation.
7 *
8 * Copyright (C) 2006 Qumranet, Inc.
9 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
10 *
11 * Authors:
12 * Avi Kivity <avi@qumranet.com>
13 * Yaniv Kamay <yaniv@qumranet.com>
14 */
15
16#include <kvm/iodev.h>
17
18#include <linux/kvm_host.h>
19#include <linux/kvm.h>
20#include <linux/module.h>
21#include <linux/errno.h>
22#include <linux/percpu.h>
23#include <linux/mm.h>
24#include <linux/miscdevice.h>
25#include <linux/vmalloc.h>
26#include <linux/reboot.h>
27#include <linux/debugfs.h>
28#include <linux/highmem.h>
29#include <linux/file.h>
30#include <linux/syscore_ops.h>
31#include <linux/cpu.h>
32#include <linux/sched/signal.h>
33#include <linux/sched/mm.h>
34#include <linux/sched/stat.h>
35#include <linux/cpumask.h>
36#include <linux/smp.h>
37#include <linux/anon_inodes.h>
38#include <linux/profile.h>
39#include <linux/kvm_para.h>
40#include <linux/pagemap.h>
41#include <linux/mman.h>
42#include <linux/swap.h>
43#include <linux/bitops.h>
44#include <linux/spinlock.h>
45#include <linux/compat.h>
46#include <linux/srcu.h>
47#include <linux/hugetlb.h>
48#include <linux/slab.h>
49#include <linux/sort.h>
50#include <linux/bsearch.h>
51#include <linux/io.h>
52#include <linux/lockdep.h>
53#include <linux/kthread.h>
54#include <linux/suspend.h>
55
56#include <asm/processor.h>
57#include <asm/ioctl.h>
58#include <linux/uaccess.h>
59
60#include "coalesced_mmio.h"
61#include "async_pf.h"
62#include "mmu_lock.h"
63#include "vfio.h"
64
65#define CREATE_TRACE_POINTS
66#include <trace/events/kvm.h>
67
68#include <linux/kvm_dirty_ring.h>
69
70/* Worst case buffer size needed for holding an integer. */
71#define ITOA_MAX_LEN 12
72
73MODULE_AUTHOR("Qumranet");
74MODULE_LICENSE("GPL");
75
76/* Architectures should define their poll value according to the halt latency */
77unsigned int halt_poll_ns = KVM_HALT_POLL_NS_DEFAULT;
78module_param(halt_poll_ns, uint, 0644);
79EXPORT_SYMBOL_GPL(halt_poll_ns);
80
81/* Default doubles per-vcpu halt_poll_ns. */
82unsigned int halt_poll_ns_grow = 2;
83module_param(halt_poll_ns_grow, uint, 0644);
84EXPORT_SYMBOL_GPL(halt_poll_ns_grow);
85
86/* The start value to grow halt_poll_ns from */
87unsigned int halt_poll_ns_grow_start = 10000; /* 10us */
88module_param(halt_poll_ns_grow_start, uint, 0644);
89EXPORT_SYMBOL_GPL(halt_poll_ns_grow_start);
90
91/* Default resets per-vcpu halt_poll_ns . */
92unsigned int halt_poll_ns_shrink;
93module_param(halt_poll_ns_shrink, uint, 0644);
94EXPORT_SYMBOL_GPL(halt_poll_ns_shrink);
95
96/*
97 * Ordering of locks:
98 *
99 * kvm->lock --> kvm->slots_lock --> kvm->irq_lock
100 */
101
102DEFINE_MUTEX(kvm_lock);
103static DEFINE_RAW_SPINLOCK(kvm_count_lock);
104LIST_HEAD(vm_list);
105
106static cpumask_var_t cpus_hardware_enabled;
107static int kvm_usage_count;
108static atomic_t hardware_enable_failed;
109
110static struct kmem_cache *kvm_vcpu_cache;
111
112static __read_mostly struct preempt_ops kvm_preempt_ops;
113static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_running_vcpu);
114
115struct dentry *kvm_debugfs_dir;
116EXPORT_SYMBOL_GPL(kvm_debugfs_dir);
117
118static const struct file_operations stat_fops_per_vm;
119
120static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
121 unsigned long arg);
122#ifdef CONFIG_KVM_COMPAT
123static long kvm_vcpu_compat_ioctl(struct file *file, unsigned int ioctl,
124 unsigned long arg);
125#define KVM_COMPAT(c) .compat_ioctl = (c)
126#else
127/*
128 * For architectures that don't implement a compat infrastructure,
129 * adopt a double line of defense:
130 * - Prevent a compat task from opening /dev/kvm
131 * - If the open has been done by a 64bit task, and the KVM fd
132 * passed to a compat task, let the ioctls fail.
133 */
134static long kvm_no_compat_ioctl(struct file *file, unsigned int ioctl,
135 unsigned long arg) { return -EINVAL; }
136
137static int kvm_no_compat_open(struct inode *inode, struct file *file)
138{
139 return is_compat_task() ? -ENODEV : 0;
140}
141#define KVM_COMPAT(c) .compat_ioctl = kvm_no_compat_ioctl, \
142 .open = kvm_no_compat_open
143#endif
144static int hardware_enable_all(void);
145static void hardware_disable_all(void);
146
147static void kvm_io_bus_destroy(struct kvm_io_bus *bus);
148
149__visible bool kvm_rebooting;
150EXPORT_SYMBOL_GPL(kvm_rebooting);
151
152#define KVM_EVENT_CREATE_VM 0
153#define KVM_EVENT_DESTROY_VM 1
154static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm);
155static unsigned long long kvm_createvm_count;
156static unsigned long long kvm_active_vms;
157
158__weak void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
159 unsigned long start, unsigned long end)
160{
161}
162
163bool kvm_is_zone_device_pfn(kvm_pfn_t pfn)
164{
165 /*
166 * The metadata used by is_zone_device_page() to determine whether or
167 * not a page is ZONE_DEVICE is guaranteed to be valid if and only if
168 * the device has been pinned, e.g. by get_user_pages(). WARN if the
169 * page_count() is zero to help detect bad usage of this helper.
170 */
171 if (!pfn_valid(pfn) || WARN_ON_ONCE(!page_count(pfn_to_page(pfn))))
172 return false;
173
174 return is_zone_device_page(pfn_to_page(pfn));
175}
176
177bool kvm_is_reserved_pfn(kvm_pfn_t pfn)
178{
179 /*
180 * ZONE_DEVICE pages currently set PG_reserved, but from a refcounting
181 * perspective they are "normal" pages, albeit with slightly different
182 * usage rules.
183 */
184 if (pfn_valid(pfn))
185 return PageReserved(pfn_to_page(pfn)) &&
186 !is_zero_pfn(pfn) &&
187 !kvm_is_zone_device_pfn(pfn);
188
189 return true;
190}
191
192bool kvm_is_transparent_hugepage(kvm_pfn_t pfn)
193{
194 struct page *page = pfn_to_page(pfn);
195
196 if (!PageTransCompoundMap(page))
197 return false;
198
199 return is_transparent_hugepage(compound_head(page));
200}
201
202/*
203 * Switches to specified vcpu, until a matching vcpu_put()
204 */
205void vcpu_load(struct kvm_vcpu *vcpu)
206{
207 int cpu = get_cpu();
208
209 __this_cpu_write(kvm_running_vcpu, vcpu);
210 preempt_notifier_register(&vcpu->preempt_notifier);
211 kvm_arch_vcpu_load(vcpu, cpu);
212 put_cpu();
213}
214EXPORT_SYMBOL_GPL(vcpu_load);
215
216void vcpu_put(struct kvm_vcpu *vcpu)
217{
218 preempt_disable();
219 kvm_arch_vcpu_put(vcpu);
220 preempt_notifier_unregister(&vcpu->preempt_notifier);
221 __this_cpu_write(kvm_running_vcpu, NULL);
222 preempt_enable();
223}
224EXPORT_SYMBOL_GPL(vcpu_put);
225
226/* TODO: merge with kvm_arch_vcpu_should_kick */
227static bool kvm_request_needs_ipi(struct kvm_vcpu *vcpu, unsigned req)
228{
229 int mode = kvm_vcpu_exiting_guest_mode(vcpu);
230
231 /*
232 * We need to wait for the VCPU to reenable interrupts and get out of
233 * READING_SHADOW_PAGE_TABLES mode.
234 */
235 if (req & KVM_REQUEST_WAIT)
236 return mode != OUTSIDE_GUEST_MODE;
237
238 /*
239 * Need to kick a running VCPU, but otherwise there is nothing to do.
240 */
241 return mode == IN_GUEST_MODE;
242}
243
244static void ack_flush(void *_completed)
245{
246}
247
248static inline bool kvm_kick_many_cpus(const struct cpumask *cpus, bool wait)
249{
250 if (unlikely(!cpus))
251 cpus = cpu_online_mask;
252
253 if (cpumask_empty(cpus))
254 return false;
255
256 smp_call_function_many(cpus, ack_flush, NULL, wait);
257 return true;
258}
259
260bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req,
261 struct kvm_vcpu *except,
262 unsigned long *vcpu_bitmap, cpumask_var_t tmp)
263{
264 int i, cpu, me;
265 struct kvm_vcpu *vcpu;
266 bool called;
267
268 me = get_cpu();
269
270 kvm_for_each_vcpu(i, vcpu, kvm) {
271 if ((vcpu_bitmap && !test_bit(i, vcpu_bitmap)) ||
272 vcpu == except)
273 continue;
274
275 kvm_make_request(req, vcpu);
276 cpu = vcpu->cpu;
277
278 if (!(req & KVM_REQUEST_NO_WAKEUP) && kvm_vcpu_wake_up(vcpu))
279 continue;
280
281 if (tmp != NULL && cpu != -1 && cpu != me &&
282 kvm_request_needs_ipi(vcpu, req))
283 __cpumask_set_cpu(cpu, tmp);
284 }
285
286 called = kvm_kick_many_cpus(tmp, !!(req & KVM_REQUEST_WAIT));
287 put_cpu();
288
289 return called;
290}
291
292bool kvm_make_all_cpus_request_except(struct kvm *kvm, unsigned int req,
293 struct kvm_vcpu *except)
294{
295 cpumask_var_t cpus;
296 bool called;
297
298 zalloc_cpumask_var(&cpus, GFP_ATOMIC);
299
300 called = kvm_make_vcpus_request_mask(kvm, req, except, NULL, cpus);
301
302 free_cpumask_var(cpus);
303 return called;
304}
305
306bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req)
307{
308 return kvm_make_all_cpus_request_except(kvm, req, NULL);
309}
310EXPORT_SYMBOL_GPL(kvm_make_all_cpus_request);
311
312#ifndef CONFIG_HAVE_KVM_ARCH_TLB_FLUSH_ALL
313void kvm_flush_remote_tlbs(struct kvm *kvm)
314{
315 /*
316 * Read tlbs_dirty before setting KVM_REQ_TLB_FLUSH in
317 * kvm_make_all_cpus_request.
318 */
319 long dirty_count = smp_load_acquire(&kvm->tlbs_dirty);
320
321 /*
322 * We want to publish modifications to the page tables before reading
323 * mode. Pairs with a memory barrier in arch-specific code.
324 * - x86: smp_mb__after_srcu_read_unlock in vcpu_enter_guest
325 * and smp_mb in walk_shadow_page_lockless_begin/end.
326 * - powerpc: smp_mb in kvmppc_prepare_to_enter.
327 *
328 * There is already an smp_mb__after_atomic() before
329 * kvm_make_all_cpus_request() reads vcpu->mode. We reuse that
330 * barrier here.
331 */
332 if (!kvm_arch_flush_remote_tlb(kvm)
333 || kvm_make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH))
334 ++kvm->stat.generic.remote_tlb_flush;
335 cmpxchg(&kvm->tlbs_dirty, dirty_count, 0);
336}
337EXPORT_SYMBOL_GPL(kvm_flush_remote_tlbs);
338#endif
339
340void kvm_reload_remote_mmus(struct kvm *kvm)
341{
342 kvm_make_all_cpus_request(kvm, KVM_REQ_MMU_RELOAD);
343}
344
345#ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE
346static inline void *mmu_memory_cache_alloc_obj(struct kvm_mmu_memory_cache *mc,
347 gfp_t gfp_flags)
348{
349 gfp_flags |= mc->gfp_zero;
350
351 if (mc->kmem_cache)
352 return kmem_cache_alloc(mc->kmem_cache, gfp_flags);
353 else
354 return (void *)__get_free_page(gfp_flags);
355}
356
357int kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int min)
358{
359 void *obj;
360
361 if (mc->nobjs >= min)
362 return 0;
363 while (mc->nobjs < ARRAY_SIZE(mc->objects)) {
364 obj = mmu_memory_cache_alloc_obj(mc, GFP_KERNEL_ACCOUNT);
365 if (!obj)
366 return mc->nobjs >= min ? 0 : -ENOMEM;
367 mc->objects[mc->nobjs++] = obj;
368 }
369 return 0;
370}
371
372int kvm_mmu_memory_cache_nr_free_objects(struct kvm_mmu_memory_cache *mc)
373{
374 return mc->nobjs;
375}
376
377void kvm_mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
378{
379 while (mc->nobjs) {
380 if (mc->kmem_cache)
381 kmem_cache_free(mc->kmem_cache, mc->objects[--mc->nobjs]);
382 else
383 free_page((unsigned long)mc->objects[--mc->nobjs]);
384 }
385}
386
387void *kvm_mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
388{
389 void *p;
390
391 if (WARN_ON(!mc->nobjs))
392 p = mmu_memory_cache_alloc_obj(mc, GFP_ATOMIC | __GFP_ACCOUNT);
393 else
394 p = mc->objects[--mc->nobjs];
395 BUG_ON(!p);
396 return p;
397}
398#endif
399
400static void kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
401{
402 mutex_init(&vcpu->mutex);
403 vcpu->cpu = -1;
404 vcpu->kvm = kvm;
405 vcpu->vcpu_id = id;
406 vcpu->pid = NULL;
407 rcuwait_init(&vcpu->wait);
408 kvm_async_pf_vcpu_init(vcpu);
409
410 vcpu->pre_pcpu = -1;
411 INIT_LIST_HEAD(&vcpu->blocked_vcpu_list);
412
413 kvm_vcpu_set_in_spin_loop(vcpu, false);
414 kvm_vcpu_set_dy_eligible(vcpu, false);
415 vcpu->preempted = false;
416 vcpu->ready = false;
417 preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops);
418}
419
420void kvm_vcpu_destroy(struct kvm_vcpu *vcpu)
421{
422 kvm_dirty_ring_free(&vcpu->dirty_ring);
423 kvm_arch_vcpu_destroy(vcpu);
424
425 /*
426 * No need for rcu_read_lock as VCPU_RUN is the only place that changes
427 * the vcpu->pid pointer, and at destruction time all file descriptors
428 * are already gone.
429 */
430 put_pid(rcu_dereference_protected(vcpu->pid, 1));
431
432 free_page((unsigned long)vcpu->run);
433 kmem_cache_free(kvm_vcpu_cache, vcpu);
434}
435EXPORT_SYMBOL_GPL(kvm_vcpu_destroy);
436
437#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
438static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn)
439{
440 return container_of(mn, struct kvm, mmu_notifier);
441}
442
443static void kvm_mmu_notifier_invalidate_range(struct mmu_notifier *mn,
444 struct mm_struct *mm,
445 unsigned long start, unsigned long end)
446{
447 struct kvm *kvm = mmu_notifier_to_kvm(mn);
448 int idx;
449
450 idx = srcu_read_lock(&kvm->srcu);
451 kvm_arch_mmu_notifier_invalidate_range(kvm, start, end);
452 srcu_read_unlock(&kvm->srcu, idx);
453}
454
455typedef bool (*hva_handler_t)(struct kvm *kvm, struct kvm_gfn_range *range);
456
457typedef void (*on_lock_fn_t)(struct kvm *kvm, unsigned long start,
458 unsigned long end);
459
460struct kvm_hva_range {
461 unsigned long start;
462 unsigned long end;
463 pte_t pte;
464 hva_handler_t handler;
465 on_lock_fn_t on_lock;
466 bool flush_on_ret;
467 bool may_block;
468};
469
470/*
471 * Use a dedicated stub instead of NULL to indicate that there is no callback
472 * function/handler. The compiler technically can't guarantee that a real
473 * function will have a non-zero address, and so it will generate code to
474 * check for !NULL, whereas comparing against a stub will be elided at compile
475 * time (unless the compiler is getting long in the tooth, e.g. gcc 4.9).
476 */
477static void kvm_null_fn(void)
478{
479
480}
481#define IS_KVM_NULL_FN(fn) ((fn) == (void *)kvm_null_fn)
482
483static __always_inline int __kvm_handle_hva_range(struct kvm *kvm,
484 const struct kvm_hva_range *range)
485{
486 bool ret = false, locked = false;
487 struct kvm_gfn_range gfn_range;
488 struct kvm_memory_slot *slot;
489 struct kvm_memslots *slots;
490 int i, idx;
491
492 /* A null handler is allowed if and only if on_lock() is provided. */
493 if (WARN_ON_ONCE(IS_KVM_NULL_FN(range->on_lock) &&
494 IS_KVM_NULL_FN(range->handler)))
495 return 0;
496
497 idx = srcu_read_lock(&kvm->srcu);
498
499 /* The on_lock() path does not yet support lock elision. */
500 if (!IS_KVM_NULL_FN(range->on_lock)) {
501 locked = true;
502 KVM_MMU_LOCK(kvm);
503
504 range->on_lock(kvm, range->start, range->end);
505
506 if (IS_KVM_NULL_FN(range->handler))
507 goto out_unlock;
508 }
509
510 for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
511 slots = __kvm_memslots(kvm, i);
512 kvm_for_each_memslot(slot, slots) {
513 unsigned long hva_start, hva_end;
514
515 hva_start = max(range->start, slot->userspace_addr);
516 hva_end = min(range->end, slot->userspace_addr +
517 (slot->npages << PAGE_SHIFT));
518 if (hva_start >= hva_end)
519 continue;
520
521 /*
522 * To optimize for the likely case where the address
523 * range is covered by zero or one memslots, don't
524 * bother making these conditional (to avoid writes on
525 * the second or later invocation of the handler).
526 */
527 gfn_range.pte = range->pte;
528 gfn_range.may_block = range->may_block;
529
530 /*
531 * {gfn(page) | page intersects with [hva_start, hva_end)} =
532 * {gfn_start, gfn_start+1, ..., gfn_end-1}.
533 */
534 gfn_range.start = hva_to_gfn_memslot(hva_start, slot);
535 gfn_range.end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, slot);
536 gfn_range.slot = slot;
537
538 if (!locked) {
539 locked = true;
540 KVM_MMU_LOCK(kvm);
541 }
542 ret |= range->handler(kvm, &gfn_range);
543 }
544 }
545
546 if (range->flush_on_ret && (ret || kvm->tlbs_dirty))
547 kvm_flush_remote_tlbs(kvm);
548
549out_unlock:
550 if (locked)
551 KVM_MMU_UNLOCK(kvm);
552
553 srcu_read_unlock(&kvm->srcu, idx);
554
555 /* The notifiers are averse to booleans. :-( */
556 return (int)ret;
557}
558
559static __always_inline int kvm_handle_hva_range(struct mmu_notifier *mn,
560 unsigned long start,
561 unsigned long end,
562 pte_t pte,
563 hva_handler_t handler)
564{
565 struct kvm *kvm = mmu_notifier_to_kvm(mn);
566 const struct kvm_hva_range range = {
567 .start = start,
568 .end = end,
569 .pte = pte,
570 .handler = handler,
571 .on_lock = (void *)kvm_null_fn,
572 .flush_on_ret = true,
573 .may_block = false,
574 };
575
576 return __kvm_handle_hva_range(kvm, &range);
577}
578
579static __always_inline int kvm_handle_hva_range_no_flush(struct mmu_notifier *mn,
580 unsigned long start,
581 unsigned long end,
582 hva_handler_t handler)
583{
584 struct kvm *kvm = mmu_notifier_to_kvm(mn);
585 const struct kvm_hva_range range = {
586 .start = start,
587 .end = end,
588 .pte = __pte(0),
589 .handler = handler,
590 .on_lock = (void *)kvm_null_fn,
591 .flush_on_ret = false,
592 .may_block = false,
593 };
594
595 return __kvm_handle_hva_range(kvm, &range);
596}
597static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn,
598 struct mm_struct *mm,
599 unsigned long address,
600 pte_t pte)
601{
602 struct kvm *kvm = mmu_notifier_to_kvm(mn);
603
604 trace_kvm_set_spte_hva(address);
605
606 /*
607 * .change_pte() must be surrounded by .invalidate_range_{start,end}(),
608 * and so always runs with an elevated notifier count. This obviates
609 * the need to bump the sequence count.
610 */
611 WARN_ON_ONCE(!kvm->mmu_notifier_count);
612
613 kvm_handle_hva_range(mn, address, address + 1, pte, kvm_set_spte_gfn);
614}
615
616static void kvm_inc_notifier_count(struct kvm *kvm, unsigned long start,
617 unsigned long end)
618{
619 /*
620 * The count increase must become visible at unlock time as no
621 * spte can be established without taking the mmu_lock and
622 * count is also read inside the mmu_lock critical section.
623 */
624 kvm->mmu_notifier_count++;
625 if (likely(kvm->mmu_notifier_count == 1)) {
626 kvm->mmu_notifier_range_start = start;
627 kvm->mmu_notifier_range_end = end;
628 } else {
629 /*
630 * Fully tracking multiple concurrent ranges has dimishing
631 * returns. Keep things simple and just find the minimal range
632 * which includes the current and new ranges. As there won't be
633 * enough information to subtract a range after its invalidate
634 * completes, any ranges invalidated concurrently will
635 * accumulate and persist until all outstanding invalidates
636 * complete.
637 */
638 kvm->mmu_notifier_range_start =
639 min(kvm->mmu_notifier_range_start, start);
640 kvm->mmu_notifier_range_end =
641 max(kvm->mmu_notifier_range_end, end);
642 }
643}
644
645static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
646 const struct mmu_notifier_range *range)
647{
648 struct kvm *kvm = mmu_notifier_to_kvm(mn);
649 const struct kvm_hva_range hva_range = {
650 .start = range->start,
651 .end = range->end,
652 .pte = __pte(0),
653 .handler = kvm_unmap_gfn_range,
654 .on_lock = kvm_inc_notifier_count,
655 .flush_on_ret = true,
656 .may_block = mmu_notifier_range_blockable(range),
657 };
658
659 trace_kvm_unmap_hva_range(range->start, range->end);
660
661 __kvm_handle_hva_range(kvm, &hva_range);
662
663 return 0;
664}
665
666static void kvm_dec_notifier_count(struct kvm *kvm, unsigned long start,
667 unsigned long end)
668{
669 /*
670 * This sequence increase will notify the kvm page fault that
671 * the page that is going to be mapped in the spte could have
672 * been freed.
673 */
674 kvm->mmu_notifier_seq++;
675 smp_wmb();
676 /*
677 * The above sequence increase must be visible before the
678 * below count decrease, which is ensured by the smp_wmb above
679 * in conjunction with the smp_rmb in mmu_notifier_retry().
680 */
681 kvm->mmu_notifier_count--;
682}
683
684static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn,
685 const struct mmu_notifier_range *range)
686{
687 struct kvm *kvm = mmu_notifier_to_kvm(mn);
688 const struct kvm_hva_range hva_range = {
689 .start = range->start,
690 .end = range->end,
691 .pte = __pte(0),
692 .handler = (void *)kvm_null_fn,
693 .on_lock = kvm_dec_notifier_count,
694 .flush_on_ret = false,
695 .may_block = mmu_notifier_range_blockable(range),
696 };
697
698 __kvm_handle_hva_range(kvm, &hva_range);
699
700 BUG_ON(kvm->mmu_notifier_count < 0);
701}
702
703static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn,
704 struct mm_struct *mm,
705 unsigned long start,
706 unsigned long end)
707{
708 trace_kvm_age_hva(start, end);
709
710 return kvm_handle_hva_range(mn, start, end, __pte(0), kvm_age_gfn);
711}
712
713static int kvm_mmu_notifier_clear_young(struct mmu_notifier *mn,
714 struct mm_struct *mm,
715 unsigned long start,
716 unsigned long end)
717{
718 trace_kvm_age_hva(start, end);
719
720 /*
721 * Even though we do not flush TLB, this will still adversely
722 * affect performance on pre-Haswell Intel EPT, where there is
723 * no EPT Access Bit to clear so that we have to tear down EPT
724 * tables instead. If we find this unacceptable, we can always
725 * add a parameter to kvm_age_hva so that it effectively doesn't
726 * do anything on clear_young.
727 *
728 * Also note that currently we never issue secondary TLB flushes
729 * from clear_young, leaving this job up to the regular system
730 * cadence. If we find this inaccurate, we might come up with a
731 * more sophisticated heuristic later.
732 */
733 return kvm_handle_hva_range_no_flush(mn, start, end, kvm_age_gfn);
734}
735
736static int kvm_mmu_notifier_test_young(struct mmu_notifier *mn,
737 struct mm_struct *mm,
738 unsigned long address)
739{
740 trace_kvm_test_age_hva(address);
741
742 return kvm_handle_hva_range_no_flush(mn, address, address + 1,
743 kvm_test_age_gfn);
744}
745
746static void kvm_mmu_notifier_release(struct mmu_notifier *mn,
747 struct mm_struct *mm)
748{
749 struct kvm *kvm = mmu_notifier_to_kvm(mn);
750 int idx;
751
752 idx = srcu_read_lock(&kvm->srcu);
753 kvm_arch_flush_shadow_all(kvm);
754 srcu_read_unlock(&kvm->srcu, idx);
755}
756
757static const struct mmu_notifier_ops kvm_mmu_notifier_ops = {
758 .invalidate_range = kvm_mmu_notifier_invalidate_range,
759 .invalidate_range_start = kvm_mmu_notifier_invalidate_range_start,
760 .invalidate_range_end = kvm_mmu_notifier_invalidate_range_end,
761 .clear_flush_young = kvm_mmu_notifier_clear_flush_young,
762 .clear_young = kvm_mmu_notifier_clear_young,
763 .test_young = kvm_mmu_notifier_test_young,
764 .change_pte = kvm_mmu_notifier_change_pte,
765 .release = kvm_mmu_notifier_release,
766};
767
768static int kvm_init_mmu_notifier(struct kvm *kvm)
769{
770 kvm->mmu_notifier.ops = &kvm_mmu_notifier_ops;
771 return mmu_notifier_register(&kvm->mmu_notifier, current->mm);
772}
773
774#else /* !(CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER) */
775
776static int kvm_init_mmu_notifier(struct kvm *kvm)
777{
778 return 0;
779}
780
781#endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */
782
783#ifdef CONFIG_HAVE_KVM_PM_NOTIFIER
784static int kvm_pm_notifier_call(struct notifier_block *bl,
785 unsigned long state,
786 void *unused)
787{
788 struct kvm *kvm = container_of(bl, struct kvm, pm_notifier);
789
790 return kvm_arch_pm_notifier(kvm, state);
791}
792
793static void kvm_init_pm_notifier(struct kvm *kvm)
794{
795 kvm->pm_notifier.notifier_call = kvm_pm_notifier_call;
796 /* Suspend KVM before we suspend ftrace, RCU, etc. */
797 kvm->pm_notifier.priority = INT_MAX;
798 register_pm_notifier(&kvm->pm_notifier);
799}
800
801static void kvm_destroy_pm_notifier(struct kvm *kvm)
802{
803 unregister_pm_notifier(&kvm->pm_notifier);
804}
805#else /* !CONFIG_HAVE_KVM_PM_NOTIFIER */
806static void kvm_init_pm_notifier(struct kvm *kvm)
807{
808}
809
810static void kvm_destroy_pm_notifier(struct kvm *kvm)
811{
812}
813#endif /* CONFIG_HAVE_KVM_PM_NOTIFIER */
814
815static struct kvm_memslots *kvm_alloc_memslots(void)
816{
817 int i;
818 struct kvm_memslots *slots;
819
820 slots = kvzalloc(sizeof(struct kvm_memslots), GFP_KERNEL_ACCOUNT);
821 if (!slots)
822 return NULL;
823
824 for (i = 0; i < KVM_MEM_SLOTS_NUM; i++)
825 slots->id_to_index[i] = -1;
826
827 return slots;
828}
829
830static void kvm_destroy_dirty_bitmap(struct kvm_memory_slot *memslot)
831{
832 if (!memslot->dirty_bitmap)
833 return;
834
835 kvfree(memslot->dirty_bitmap);
836 memslot->dirty_bitmap = NULL;
837}
838
839static void kvm_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot)
840{
841 kvm_destroy_dirty_bitmap(slot);
842
843 kvm_arch_free_memslot(kvm, slot);
844
845 slot->flags = 0;
846 slot->npages = 0;
847}
848
849static void kvm_free_memslots(struct kvm *kvm, struct kvm_memslots *slots)
850{
851 struct kvm_memory_slot *memslot;
852
853 if (!slots)
854 return;
855
856 kvm_for_each_memslot(memslot, slots)
857 kvm_free_memslot(kvm, memslot);
858
859 kvfree(slots);
860}
861
862static umode_t kvm_stats_debugfs_mode(const struct _kvm_stats_desc *pdesc)
863{
864 switch (pdesc->desc.flags & KVM_STATS_TYPE_MASK) {
865 case KVM_STATS_TYPE_INSTANT:
866 return 0444;
867 case KVM_STATS_TYPE_CUMULATIVE:
868 case KVM_STATS_TYPE_PEAK:
869 default:
870 return 0644;
871 }
872}
873
874
875static void kvm_destroy_vm_debugfs(struct kvm *kvm)
876{
877 int i;
878 int kvm_debugfs_num_entries = kvm_vm_stats_header.num_desc +
879 kvm_vcpu_stats_header.num_desc;
880
881 if (!kvm->debugfs_dentry)
882 return;
883
884 debugfs_remove_recursive(kvm->debugfs_dentry);
885
886 if (kvm->debugfs_stat_data) {
887 for (i = 0; i < kvm_debugfs_num_entries; i++)
888 kfree(kvm->debugfs_stat_data[i]);
889 kfree(kvm->debugfs_stat_data);
890 }
891}
892
893static int kvm_create_vm_debugfs(struct kvm *kvm, int fd)
894{
895 static DEFINE_MUTEX(kvm_debugfs_lock);
896 struct dentry *dent;
897 char dir_name[ITOA_MAX_LEN * 2];
898 struct kvm_stat_data *stat_data;
899 const struct _kvm_stats_desc *pdesc;
900 int i;
901 int kvm_debugfs_num_entries = kvm_vm_stats_header.num_desc +
902 kvm_vcpu_stats_header.num_desc;
903
904 if (!debugfs_initialized())
905 return 0;
906
907 snprintf(dir_name, sizeof(dir_name), "%d-%d", task_pid_nr(current), fd);
908 mutex_lock(&kvm_debugfs_lock);
909 dent = debugfs_lookup(dir_name, kvm_debugfs_dir);
910 if (dent) {
911 pr_warn_ratelimited("KVM: debugfs: duplicate directory %s\n", dir_name);
912 dput(dent);
913 mutex_unlock(&kvm_debugfs_lock);
914 return 0;
915 }
916 dent = debugfs_create_dir(dir_name, kvm_debugfs_dir);
917 mutex_unlock(&kvm_debugfs_lock);
918 if (IS_ERR(dent))
919 return 0;
920
921 kvm->debugfs_dentry = dent;
922 kvm->debugfs_stat_data = kcalloc(kvm_debugfs_num_entries,
923 sizeof(*kvm->debugfs_stat_data),
924 GFP_KERNEL_ACCOUNT);
925 if (!kvm->debugfs_stat_data)
926 return -ENOMEM;
927
928 for (i = 0; i < kvm_vm_stats_header.num_desc; ++i) {
929 pdesc = &kvm_vm_stats_desc[i];
930 stat_data = kzalloc(sizeof(*stat_data), GFP_KERNEL_ACCOUNT);
931 if (!stat_data)
932 return -ENOMEM;
933
934 stat_data->kvm = kvm;
935 stat_data->desc = pdesc;
936 stat_data->kind = KVM_STAT_VM;
937 kvm->debugfs_stat_data[i] = stat_data;
938 debugfs_create_file(pdesc->name, kvm_stats_debugfs_mode(pdesc),
939 kvm->debugfs_dentry, stat_data,
940 &stat_fops_per_vm);
941 }
942
943 for (i = 0; i < kvm_vcpu_stats_header.num_desc; ++i) {
944 pdesc = &kvm_vcpu_stats_desc[i];
945 stat_data = kzalloc(sizeof(*stat_data), GFP_KERNEL_ACCOUNT);
946 if (!stat_data)
947 return -ENOMEM;
948
949 stat_data->kvm = kvm;
950 stat_data->desc = pdesc;
951 stat_data->kind = KVM_STAT_VCPU;
952 kvm->debugfs_stat_data[i + kvm_vm_stats_header.num_desc] = stat_data;
953 debugfs_create_file(pdesc->name, kvm_stats_debugfs_mode(pdesc),
954 kvm->debugfs_dentry, stat_data,
955 &stat_fops_per_vm);
956 }
957 return 0;
958}
959
960/*
961 * Called after the VM is otherwise initialized, but just before adding it to
962 * the vm_list.
963 */
964int __weak kvm_arch_post_init_vm(struct kvm *kvm)
965{
966 return 0;
967}
968
969/*
970 * Called just after removing the VM from the vm_list, but before doing any
971 * other destruction.
972 */
973void __weak kvm_arch_pre_destroy_vm(struct kvm *kvm)
974{
975}
976
977static struct kvm *kvm_create_vm(unsigned long type)
978{
979 struct kvm *kvm = kvm_arch_alloc_vm();
980 int r = -ENOMEM;
981 int i;
982
983 if (!kvm)
984 return ERR_PTR(-ENOMEM);
985
986 KVM_MMU_LOCK_INIT(kvm);
987 mmgrab(current->mm);
988 kvm->mm = current->mm;
989 kvm_eventfd_init(kvm);
990 mutex_init(&kvm->lock);
991 mutex_init(&kvm->irq_lock);
992 mutex_init(&kvm->slots_lock);
993 mutex_init(&kvm->slots_arch_lock);
994 INIT_LIST_HEAD(&kvm->devices);
995
996 BUILD_BUG_ON(KVM_MEM_SLOTS_NUM > SHRT_MAX);
997
998 if (init_srcu_struct(&kvm->srcu))
999 goto out_err_no_srcu;
1000 if (init_srcu_struct(&kvm->irq_srcu))
1001 goto out_err_no_irq_srcu;
1002
1003 refcount_set(&kvm->users_count, 1);
1004 for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
1005 struct kvm_memslots *slots = kvm_alloc_memslots();
1006
1007 if (!slots)
1008 goto out_err_no_arch_destroy_vm;
1009 /* Generations must be different for each address space. */
1010 slots->generation = i;
1011 rcu_assign_pointer(kvm->memslots[i], slots);
1012 }
1013
1014 for (i = 0; i < KVM_NR_BUSES; i++) {
1015 rcu_assign_pointer(kvm->buses[i],
1016 kzalloc(sizeof(struct kvm_io_bus), GFP_KERNEL_ACCOUNT));
1017 if (!kvm->buses[i])
1018 goto out_err_no_arch_destroy_vm;
1019 }
1020
1021 kvm->max_halt_poll_ns = halt_poll_ns;
1022
1023 r = kvm_arch_init_vm(kvm, type);
1024 if (r)
1025 goto out_err_no_arch_destroy_vm;
1026
1027 r = hardware_enable_all();
1028 if (r)
1029 goto out_err_no_disable;
1030
1031#ifdef CONFIG_HAVE_KVM_IRQFD
1032 INIT_HLIST_HEAD(&kvm->irq_ack_notifier_list);
1033#endif
1034
1035 r = kvm_init_mmu_notifier(kvm);
1036 if (r)
1037 goto out_err_no_mmu_notifier;
1038
1039 r = kvm_arch_post_init_vm(kvm);
1040 if (r)
1041 goto out_err;
1042
1043 mutex_lock(&kvm_lock);
1044 list_add(&kvm->vm_list, &vm_list);
1045 mutex_unlock(&kvm_lock);
1046
1047 preempt_notifier_inc();
1048 kvm_init_pm_notifier(kvm);
1049
1050 return kvm;
1051
1052out_err:
1053#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
1054 if (kvm->mmu_notifier.ops)
1055 mmu_notifier_unregister(&kvm->mmu_notifier, current->mm);
1056#endif
1057out_err_no_mmu_notifier:
1058 hardware_disable_all();
1059out_err_no_disable:
1060 kvm_arch_destroy_vm(kvm);
1061out_err_no_arch_destroy_vm:
1062 WARN_ON_ONCE(!refcount_dec_and_test(&kvm->users_count));
1063 for (i = 0; i < KVM_NR_BUSES; i++)
1064 kfree(kvm_get_bus(kvm, i));
1065 for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++)
1066 kvm_free_memslots(kvm, __kvm_memslots(kvm, i));
1067 cleanup_srcu_struct(&kvm->irq_srcu);
1068out_err_no_irq_srcu:
1069 cleanup_srcu_struct(&kvm->srcu);
1070out_err_no_srcu:
1071 kvm_arch_free_vm(kvm);
1072 mmdrop(current->mm);
1073 return ERR_PTR(r);
1074}
1075
1076static void kvm_destroy_devices(struct kvm *kvm)
1077{
1078 struct kvm_device *dev, *tmp;
1079
1080 /*
1081 * We do not need to take the kvm->lock here, because nobody else
1082 * has a reference to the struct kvm at this point and therefore
1083 * cannot access the devices list anyhow.
1084 */
1085 list_for_each_entry_safe(dev, tmp, &kvm->devices, vm_node) {
1086 list_del(&dev->vm_node);
1087 dev->ops->destroy(dev);
1088 }
1089}
1090
1091static void kvm_destroy_vm(struct kvm *kvm)
1092{
1093 int i;
1094 struct mm_struct *mm = kvm->mm;
1095
1096 kvm_destroy_pm_notifier(kvm);
1097 kvm_uevent_notify_change(KVM_EVENT_DESTROY_VM, kvm);
1098 kvm_destroy_vm_debugfs(kvm);
1099 kvm_arch_sync_events(kvm);
1100 mutex_lock(&kvm_lock);
1101 list_del(&kvm->vm_list);
1102 mutex_unlock(&kvm_lock);
1103 kvm_arch_pre_destroy_vm(kvm);
1104
1105 kvm_free_irq_routing(kvm);
1106 for (i = 0; i < KVM_NR_BUSES; i++) {
1107 struct kvm_io_bus *bus = kvm_get_bus(kvm, i);
1108
1109 if (bus)
1110 kvm_io_bus_destroy(bus);
1111 kvm->buses[i] = NULL;
1112 }
1113 kvm_coalesced_mmio_free(kvm);
1114#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
1115 mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm);
1116#else
1117 kvm_arch_flush_shadow_all(kvm);
1118#endif
1119 kvm_arch_destroy_vm(kvm);
1120 kvm_destroy_devices(kvm);
1121 for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++)
1122 kvm_free_memslots(kvm, __kvm_memslots(kvm, i));
1123 cleanup_srcu_struct(&kvm->irq_srcu);
1124 cleanup_srcu_struct(&kvm->srcu);
1125 kvm_arch_free_vm(kvm);
1126 preempt_notifier_dec();
1127 hardware_disable_all();
1128 mmdrop(mm);
1129}
1130
1131void kvm_get_kvm(struct kvm *kvm)
1132{
1133 refcount_inc(&kvm->users_count);
1134}
1135EXPORT_SYMBOL_GPL(kvm_get_kvm);
1136
1137void kvm_put_kvm(struct kvm *kvm)
1138{
1139 if (refcount_dec_and_test(&kvm->users_count))
1140 kvm_destroy_vm(kvm);
1141}
1142EXPORT_SYMBOL_GPL(kvm_put_kvm);
1143
1144/*
1145 * Used to put a reference that was taken on behalf of an object associated
1146 * with a user-visible file descriptor, e.g. a vcpu or device, if installation
1147 * of the new file descriptor fails and the reference cannot be transferred to
1148 * its final owner. In such cases, the caller is still actively using @kvm and
1149 * will fail miserably if the refcount unexpectedly hits zero.
1150 */
1151void kvm_put_kvm_no_destroy(struct kvm *kvm)
1152{
1153 WARN_ON(refcount_dec_and_test(&kvm->users_count));
1154}
1155EXPORT_SYMBOL_GPL(kvm_put_kvm_no_destroy);
1156
1157static int kvm_vm_release(struct inode *inode, struct file *filp)
1158{
1159 struct kvm *kvm = filp->private_data;
1160
1161 kvm_irqfd_release(kvm);
1162
1163 kvm_put_kvm(kvm);
1164 return 0;
1165}
1166
1167/*
1168 * Allocation size is twice as large as the actual dirty bitmap size.
1169 * See kvm_vm_ioctl_get_dirty_log() why this is needed.
1170 */
1171static int kvm_alloc_dirty_bitmap(struct kvm_memory_slot *memslot)
1172{
1173 unsigned long dirty_bytes = 2 * kvm_dirty_bitmap_bytes(memslot);
1174
1175 memslot->dirty_bitmap = kvzalloc(dirty_bytes, GFP_KERNEL_ACCOUNT);
1176 if (!memslot->dirty_bitmap)
1177 return -ENOMEM;
1178
1179 return 0;
1180}
1181
1182/*
1183 * Delete a memslot by decrementing the number of used slots and shifting all
1184 * other entries in the array forward one spot.
1185 */
1186static inline void kvm_memslot_delete(struct kvm_memslots *slots,
1187 struct kvm_memory_slot *memslot)
1188{
1189 struct kvm_memory_slot *mslots = slots->memslots;
1190 int i;
1191
1192 if (WARN_ON(slots->id_to_index[memslot->id] == -1))
1193 return;
1194
1195 slots->used_slots--;
1196
1197 if (atomic_read(&slots->lru_slot) >= slots->used_slots)
1198 atomic_set(&slots->lru_slot, 0);
1199
1200 for (i = slots->id_to_index[memslot->id]; i < slots->used_slots; i++) {
1201 mslots[i] = mslots[i + 1];
1202 slots->id_to_index[mslots[i].id] = i;
1203 }
1204 mslots[i] = *memslot;
1205 slots->id_to_index[memslot->id] = -1;
1206}
1207
1208/*
1209 * "Insert" a new memslot by incrementing the number of used slots. Returns
1210 * the new slot's initial index into the memslots array.
1211 */
1212static inline int kvm_memslot_insert_back(struct kvm_memslots *slots)
1213{
1214 return slots->used_slots++;
1215}
1216
1217/*
1218 * Move a changed memslot backwards in the array by shifting existing slots
1219 * with a higher GFN toward the front of the array. Note, the changed memslot
1220 * itself is not preserved in the array, i.e. not swapped at this time, only
1221 * its new index into the array is tracked. Returns the changed memslot's
1222 * current index into the memslots array.
1223 */
1224static inline int kvm_memslot_move_backward(struct kvm_memslots *slots,
1225 struct kvm_memory_slot *memslot)
1226{
1227 struct kvm_memory_slot *mslots = slots->memslots;
1228 int i;
1229
1230 if (WARN_ON_ONCE(slots->id_to_index[memslot->id] == -1) ||
1231 WARN_ON_ONCE(!slots->used_slots))
1232 return -1;
1233
1234 /*
1235 * Move the target memslot backward in the array by shifting existing
1236 * memslots with a higher GFN (than the target memslot) towards the
1237 * front of the array.
1238 */
1239 for (i = slots->id_to_index[memslot->id]; i < slots->used_slots - 1; i++) {
1240 if (memslot->base_gfn > mslots[i + 1].base_gfn)
1241 break;
1242
1243 WARN_ON_ONCE(memslot->base_gfn == mslots[i + 1].base_gfn);
1244
1245 /* Shift the next memslot forward one and update its index. */
1246 mslots[i] = mslots[i + 1];
1247 slots->id_to_index[mslots[i].id] = i;
1248 }
1249 return i;
1250}
1251
1252/*
1253 * Move a changed memslot forwards in the array by shifting existing slots with
1254 * a lower GFN toward the back of the array. Note, the changed memslot itself
1255 * is not preserved in the array, i.e. not swapped at this time, only its new
1256 * index into the array is tracked. Returns the changed memslot's final index
1257 * into the memslots array.
1258 */
1259static inline int kvm_memslot_move_forward(struct kvm_memslots *slots,
1260 struct kvm_memory_slot *memslot,
1261 int start)
1262{
1263 struct kvm_memory_slot *mslots = slots->memslots;
1264 int i;
1265
1266 for (i = start; i > 0; i--) {
1267 if (memslot->base_gfn < mslots[i - 1].base_gfn)
1268 break;
1269
1270 WARN_ON_ONCE(memslot->base_gfn == mslots[i - 1].base_gfn);
1271
1272 /* Shift the next memslot back one and update its index. */
1273 mslots[i] = mslots[i - 1];
1274 slots->id_to_index[mslots[i].id] = i;
1275 }
1276 return i;
1277}
1278
1279/*
1280 * Re-sort memslots based on their GFN to account for an added, deleted, or
1281 * moved memslot. Sorting memslots by GFN allows using a binary search during
1282 * memslot lookup.
1283 *
1284 * IMPORTANT: Slots are sorted from highest GFN to lowest GFN! I.e. the entry
1285 * at memslots[0] has the highest GFN.
1286 *
1287 * The sorting algorithm takes advantage of having initially sorted memslots
1288 * and knowing the position of the changed memslot. Sorting is also optimized
1289 * by not swapping the updated memslot and instead only shifting other memslots
1290 * and tracking the new index for the update memslot. Only once its final
1291 * index is known is the updated memslot copied into its position in the array.
1292 *
1293 * - When deleting a memslot, the deleted memslot simply needs to be moved to
1294 * the end of the array.
1295 *
1296 * - When creating a memslot, the algorithm "inserts" the new memslot at the
1297 * end of the array and then it forward to its correct location.
1298 *
1299 * - When moving a memslot, the algorithm first moves the updated memslot
1300 * backward to handle the scenario where the memslot's GFN was changed to a
1301 * lower value. update_memslots() then falls through and runs the same flow
1302 * as creating a memslot to move the memslot forward to handle the scenario
1303 * where its GFN was changed to a higher value.
1304 *
1305 * Note, slots are sorted from highest->lowest instead of lowest->highest for
1306 * historical reasons. Originally, invalid memslots where denoted by having
1307 * GFN=0, thus sorting from highest->lowest naturally sorted invalid memslots
1308 * to the end of the array. The current algorithm uses dedicated logic to
1309 * delete a memslot and thus does not rely on invalid memslots having GFN=0.
1310 *
1311 * The other historical motiviation for highest->lowest was to improve the
1312 * performance of memslot lookup. KVM originally used a linear search starting
1313 * at memslots[0]. On x86, the largest memslot usually has one of the highest,
1314 * if not *the* highest, GFN, as the bulk of the guest's RAM is located in a
1315 * single memslot above the 4gb boundary. As the largest memslot is also the
1316 * most likely to be referenced, sorting it to the front of the array was
1317 * advantageous. The current binary search starts from the middle of the array
1318 * and uses an LRU pointer to improve performance for all memslots and GFNs.
1319 */
1320static void update_memslots(struct kvm_memslots *slots,
1321 struct kvm_memory_slot *memslot,
1322 enum kvm_mr_change change)
1323{
1324 int i;
1325
1326 if (change == KVM_MR_DELETE) {
1327 kvm_memslot_delete(slots, memslot);
1328 } else {
1329 if (change == KVM_MR_CREATE)
1330 i = kvm_memslot_insert_back(slots);
1331 else
1332 i = kvm_memslot_move_backward(slots, memslot);
1333 i = kvm_memslot_move_forward(slots, memslot, i);
1334
1335 /*
1336 * Copy the memslot to its new position in memslots and update
1337 * its index accordingly.
1338 */
1339 slots->memslots[i] = *memslot;
1340 slots->id_to_index[memslot->id] = i;
1341 }
1342}
1343
1344static int check_memory_region_flags(const struct kvm_userspace_memory_region *mem)
1345{
1346 u32 valid_flags = KVM_MEM_LOG_DIRTY_PAGES;
1347
1348#ifdef __KVM_HAVE_READONLY_MEM
1349 valid_flags |= KVM_MEM_READONLY;
1350#endif
1351
1352 if (mem->flags & ~valid_flags)
1353 return -EINVAL;
1354
1355 return 0;
1356}
1357
1358static struct kvm_memslots *install_new_memslots(struct kvm *kvm,
1359 int as_id, struct kvm_memslots *slots)
1360{
1361 struct kvm_memslots *old_memslots = __kvm_memslots(kvm, as_id);
1362 u64 gen = old_memslots->generation;
1363
1364 WARN_ON(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS);
1365 slots->generation = gen | KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS;
1366
1367 rcu_assign_pointer(kvm->memslots[as_id], slots);
1368
1369 /*
1370 * Acquired in kvm_set_memslot. Must be released before synchronize
1371 * SRCU below in order to avoid deadlock with another thread
1372 * acquiring the slots_arch_lock in an srcu critical section.
1373 */
1374 mutex_unlock(&kvm->slots_arch_lock);
1375
1376 synchronize_srcu_expedited(&kvm->srcu);
1377
1378 /*
1379 * Increment the new memslot generation a second time, dropping the
1380 * update in-progress flag and incrementing the generation based on
1381 * the number of address spaces. This provides a unique and easily
1382 * identifiable generation number while the memslots are in flux.
1383 */
1384 gen = slots->generation & ~KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS;
1385
1386 /*
1387 * Generations must be unique even across address spaces. We do not need
1388 * a global counter for that, instead the generation space is evenly split
1389 * across address spaces. For example, with two address spaces, address
1390 * space 0 will use generations 0, 2, 4, ... while address space 1 will
1391 * use generations 1, 3, 5, ...
1392 */
1393 gen += KVM_ADDRESS_SPACE_NUM;
1394
1395 kvm_arch_memslots_updated(kvm, gen);
1396
1397 slots->generation = gen;
1398
1399 return old_memslots;
1400}
1401
1402static size_t kvm_memslots_size(int slots)
1403{
1404 return sizeof(struct kvm_memslots) +
1405 (sizeof(struct kvm_memory_slot) * slots);
1406}
1407
1408static void kvm_copy_memslots(struct kvm_memslots *to,
1409 struct kvm_memslots *from)
1410{
1411 memcpy(to, from, kvm_memslots_size(from->used_slots));
1412}
1413
1414/*
1415 * Note, at a minimum, the current number of used slots must be allocated, even
1416 * when deleting a memslot, as we need a complete duplicate of the memslots for
1417 * use when invalidating a memslot prior to deleting/moving the memslot.
1418 */
1419static struct kvm_memslots *kvm_dup_memslots(struct kvm_memslots *old,
1420 enum kvm_mr_change change)
1421{
1422 struct kvm_memslots *slots;
1423 size_t new_size;
1424
1425 if (change == KVM_MR_CREATE)
1426 new_size = kvm_memslots_size(old->used_slots + 1);
1427 else
1428 new_size = kvm_memslots_size(old->used_slots);
1429
1430 slots = kvzalloc(new_size, GFP_KERNEL_ACCOUNT);
1431 if (likely(slots))
1432 kvm_copy_memslots(slots, old);
1433
1434 return slots;
1435}
1436
1437static int kvm_set_memslot(struct kvm *kvm,
1438 const struct kvm_userspace_memory_region *mem,
1439 struct kvm_memory_slot *old,
1440 struct kvm_memory_slot *new, int as_id,
1441 enum kvm_mr_change change)
1442{
1443 struct kvm_memory_slot *slot;
1444 struct kvm_memslots *slots;
1445 int r;
1446
1447 /*
1448 * Released in install_new_memslots.
1449 *
1450 * Must be held from before the current memslots are copied until
1451 * after the new memslots are installed with rcu_assign_pointer,
1452 * then released before the synchronize srcu in install_new_memslots.
1453 *
1454 * When modifying memslots outside of the slots_lock, must be held
1455 * before reading the pointer to the current memslots until after all
1456 * changes to those memslots are complete.
1457 *
1458 * These rules ensure that installing new memslots does not lose
1459 * changes made to the previous memslots.
1460 */
1461 mutex_lock(&kvm->slots_arch_lock);
1462
1463 slots = kvm_dup_memslots(__kvm_memslots(kvm, as_id), change);
1464 if (!slots) {
1465 mutex_unlock(&kvm->slots_arch_lock);
1466 return -ENOMEM;
1467 }
1468
1469 if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) {
1470 /*
1471 * Note, the INVALID flag needs to be in the appropriate entry
1472 * in the freshly allocated memslots, not in @old or @new.
1473 */
1474 slot = id_to_memslot(slots, old->id);
1475 slot->flags |= KVM_MEMSLOT_INVALID;
1476
1477 /*
1478 * We can re-use the memory from the old memslots.
1479 * It will be overwritten with a copy of the new memslots
1480 * after reacquiring the slots_arch_lock below.
1481 */
1482 slots = install_new_memslots(kvm, as_id, slots);
1483
1484 /* From this point no new shadow pages pointing to a deleted,
1485 * or moved, memslot will be created.
1486 *
1487 * validation of sp->gfn happens in:
1488 * - gfn_to_hva (kvm_read_guest, gfn_to_pfn)
1489 * - kvm_is_visible_gfn (mmu_check_root)
1490 */
1491 kvm_arch_flush_shadow_memslot(kvm, slot);
1492
1493 /* Released in install_new_memslots. */
1494 mutex_lock(&kvm->slots_arch_lock);
1495
1496 /*
1497 * The arch-specific fields of the memslots could have changed
1498 * between releasing the slots_arch_lock in
1499 * install_new_memslots and here, so get a fresh copy of the
1500 * slots.
1501 */
1502 kvm_copy_memslots(slots, __kvm_memslots(kvm, as_id));
1503 }
1504
1505 r = kvm_arch_prepare_memory_region(kvm, new, mem, change);
1506 if (r)
1507 goto out_slots;
1508
1509 update_memslots(slots, new, change);
1510 slots = install_new_memslots(kvm, as_id, slots);
1511
1512 kvm_arch_commit_memory_region(kvm, mem, old, new, change);
1513
1514 kvfree(slots);
1515 return 0;
1516
1517out_slots:
1518 if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) {
1519 slot = id_to_memslot(slots, old->id);
1520 slot->flags &= ~KVM_MEMSLOT_INVALID;
1521 slots = install_new_memslots(kvm, as_id, slots);
1522 } else {
1523 mutex_unlock(&kvm->slots_arch_lock);
1524 }
1525 kvfree(slots);
1526 return r;
1527}
1528
1529static int kvm_delete_memslot(struct kvm *kvm,
1530 const struct kvm_userspace_memory_region *mem,
1531 struct kvm_memory_slot *old, int as_id)
1532{
1533 struct kvm_memory_slot new;
1534 int r;
1535
1536 if (!old->npages)
1537 return -EINVAL;
1538
1539 memset(&new, 0, sizeof(new));
1540 new.id = old->id;
1541 /*
1542 * This is only for debugging purpose; it should never be referenced
1543 * for a removed memslot.
1544 */
1545 new.as_id = as_id;
1546
1547 r = kvm_set_memslot(kvm, mem, old, &new, as_id, KVM_MR_DELETE);
1548 if (r)
1549 return r;
1550
1551 kvm_free_memslot(kvm, old);
1552 return 0;
1553}
1554
1555/*
1556 * Allocate some memory and give it an address in the guest physical address
1557 * space.
1558 *
1559 * Discontiguous memory is allowed, mostly for framebuffers.
1560 *
1561 * Must be called holding kvm->slots_lock for write.
1562 */
1563int __kvm_set_memory_region(struct kvm *kvm,
1564 const struct kvm_userspace_memory_region *mem)
1565{
1566 struct kvm_memory_slot old, new;
1567 struct kvm_memory_slot *tmp;
1568 enum kvm_mr_change change;
1569 int as_id, id;
1570 int r;
1571
1572 r = check_memory_region_flags(mem);
1573 if (r)
1574 return r;
1575
1576 as_id = mem->slot >> 16;
1577 id = (u16)mem->slot;
1578
1579 /* General sanity checks */
1580 if (mem->memory_size & (PAGE_SIZE - 1))
1581 return -EINVAL;
1582 if (mem->guest_phys_addr & (PAGE_SIZE - 1))
1583 return -EINVAL;
1584 /* We can read the guest memory with __xxx_user() later on. */
1585 if ((mem->userspace_addr & (PAGE_SIZE - 1)) ||
1586 (mem->userspace_addr != untagged_addr(mem->userspace_addr)) ||
1587 !access_ok((void __user *)(unsigned long)mem->userspace_addr,
1588 mem->memory_size))
1589 return -EINVAL;
1590 if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_MEM_SLOTS_NUM)
1591 return -EINVAL;
1592 if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
1593 return -EINVAL;
1594
1595 /*
1596 * Make a full copy of the old memslot, the pointer will become stale
1597 * when the memslots are re-sorted by update_memslots(), and the old
1598 * memslot needs to be referenced after calling update_memslots(), e.g.
1599 * to free its resources and for arch specific behavior.
1600 */
1601 tmp = id_to_memslot(__kvm_memslots(kvm, as_id), id);
1602 if (tmp) {
1603 old = *tmp;
1604 tmp = NULL;
1605 } else {
1606 memset(&old, 0, sizeof(old));
1607 old.id = id;
1608 }
1609
1610 if (!mem->memory_size)
1611 return kvm_delete_memslot(kvm, mem, &old, as_id);
1612
1613 new.as_id = as_id;
1614 new.id = id;
1615 new.base_gfn = mem->guest_phys_addr >> PAGE_SHIFT;
1616 new.npages = mem->memory_size >> PAGE_SHIFT;
1617 new.flags = mem->flags;
1618 new.userspace_addr = mem->userspace_addr;
1619
1620 if (new.npages > KVM_MEM_MAX_NR_PAGES)
1621 return -EINVAL;
1622
1623 if (!old.npages) {
1624 change = KVM_MR_CREATE;
1625 new.dirty_bitmap = NULL;
1626 memset(&new.arch, 0, sizeof(new.arch));
1627 } else { /* Modify an existing slot. */
1628 if ((new.userspace_addr != old.userspace_addr) ||
1629 (new.npages != old.npages) ||
1630 ((new.flags ^ old.flags) & KVM_MEM_READONLY))
1631 return -EINVAL;
1632
1633 if (new.base_gfn != old.base_gfn)
1634 change = KVM_MR_MOVE;
1635 else if (new.flags != old.flags)
1636 change = KVM_MR_FLAGS_ONLY;
1637 else /* Nothing to change. */
1638 return 0;
1639
1640 /* Copy dirty_bitmap and arch from the current memslot. */
1641 new.dirty_bitmap = old.dirty_bitmap;
1642 memcpy(&new.arch, &old.arch, sizeof(new.arch));
1643 }
1644
1645 if ((change == KVM_MR_CREATE) || (change == KVM_MR_MOVE)) {
1646 /* Check for overlaps */
1647 kvm_for_each_memslot(tmp, __kvm_memslots(kvm, as_id)) {
1648 if (tmp->id == id)
1649 continue;
1650 if (!((new.base_gfn + new.npages <= tmp->base_gfn) ||
1651 (new.base_gfn >= tmp->base_gfn + tmp->npages)))
1652 return -EEXIST;
1653 }
1654 }
1655
1656 /* Allocate/free page dirty bitmap as needed */
1657 if (!(new.flags & KVM_MEM_LOG_DIRTY_PAGES))
1658 new.dirty_bitmap = NULL;
1659 else if (!new.dirty_bitmap && !kvm->dirty_ring_size) {
1660 r = kvm_alloc_dirty_bitmap(&new);
1661 if (r)
1662 return r;
1663
1664 if (kvm_dirty_log_manual_protect_and_init_set(kvm))
1665 bitmap_set(new.dirty_bitmap, 0, new.npages);
1666 }
1667
1668 r = kvm_set_memslot(kvm, mem, &old, &new, as_id, change);
1669 if (r)
1670 goto out_bitmap;
1671
1672 if (old.dirty_bitmap && !new.dirty_bitmap)
1673 kvm_destroy_dirty_bitmap(&old);
1674 return 0;
1675
1676out_bitmap:
1677 if (new.dirty_bitmap && !old.dirty_bitmap)
1678 kvm_destroy_dirty_bitmap(&new);
1679 return r;
1680}
1681EXPORT_SYMBOL_GPL(__kvm_set_memory_region);
1682
1683int kvm_set_memory_region(struct kvm *kvm,
1684 const struct kvm_userspace_memory_region *mem)
1685{
1686 int r;
1687
1688 mutex_lock(&kvm->slots_lock);
1689 r = __kvm_set_memory_region(kvm, mem);
1690 mutex_unlock(&kvm->slots_lock);
1691 return r;
1692}
1693EXPORT_SYMBOL_GPL(kvm_set_memory_region);
1694
1695static int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
1696 struct kvm_userspace_memory_region *mem)
1697{
1698 if ((u16)mem->slot >= KVM_USER_MEM_SLOTS)
1699 return -EINVAL;
1700
1701 return kvm_set_memory_region(kvm, mem);
1702}
1703
1704#ifndef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
1705/**
1706 * kvm_get_dirty_log - get a snapshot of dirty pages
1707 * @kvm: pointer to kvm instance
1708 * @log: slot id and address to which we copy the log
1709 * @is_dirty: set to '1' if any dirty pages were found
1710 * @memslot: set to the associated memslot, always valid on success
1711 */
1712int kvm_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log,
1713 int *is_dirty, struct kvm_memory_slot **memslot)
1714{
1715 struct kvm_memslots *slots;
1716 int i, as_id, id;
1717 unsigned long n;
1718 unsigned long any = 0;
1719
1720 /* Dirty ring tracking is exclusive to dirty log tracking */
1721 if (kvm->dirty_ring_size)
1722 return -ENXIO;
1723
1724 *memslot = NULL;
1725 *is_dirty = 0;
1726
1727 as_id = log->slot >> 16;
1728 id = (u16)log->slot;
1729 if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS)
1730 return -EINVAL;
1731
1732 slots = __kvm_memslots(kvm, as_id);
1733 *memslot = id_to_memslot(slots, id);
1734 if (!(*memslot) || !(*memslot)->dirty_bitmap)
1735 return -ENOENT;
1736
1737 kvm_arch_sync_dirty_log(kvm, *memslot);
1738
1739 n = kvm_dirty_bitmap_bytes(*memslot);
1740
1741 for (i = 0; !any && i < n/sizeof(long); ++i)
1742 any = (*memslot)->dirty_bitmap[i];
1743
1744 if (copy_to_user(log->dirty_bitmap, (*memslot)->dirty_bitmap, n))
1745 return -EFAULT;
1746
1747 if (any)
1748 *is_dirty = 1;
1749 return 0;
1750}
1751EXPORT_SYMBOL_GPL(kvm_get_dirty_log);
1752
1753#else /* CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT */
1754/**
1755 * kvm_get_dirty_log_protect - get a snapshot of dirty pages
1756 * and reenable dirty page tracking for the corresponding pages.
1757 * @kvm: pointer to kvm instance
1758 * @log: slot id and address to which we copy the log
1759 *
1760 * We need to keep it in mind that VCPU threads can write to the bitmap
1761 * concurrently. So, to avoid losing track of dirty pages we keep the
1762 * following order:
1763 *
1764 * 1. Take a snapshot of the bit and clear it if needed.
1765 * 2. Write protect the corresponding page.
1766 * 3. Copy the snapshot to the userspace.
1767 * 4. Upon return caller flushes TLB's if needed.
1768 *
1769 * Between 2 and 4, the guest may write to the page using the remaining TLB
1770 * entry. This is not a problem because the page is reported dirty using
1771 * the snapshot taken before and step 4 ensures that writes done after
1772 * exiting to userspace will be logged for the next call.
1773 *
1774 */
1775static int kvm_get_dirty_log_protect(struct kvm *kvm, struct kvm_dirty_log *log)
1776{
1777 struct kvm_memslots *slots;
1778 struct kvm_memory_slot *memslot;
1779 int i, as_id, id;
1780 unsigned long n;
1781 unsigned long *dirty_bitmap;
1782 unsigned long *dirty_bitmap_buffer;
1783 bool flush;
1784
1785 /* Dirty ring tracking is exclusive to dirty log tracking */
1786 if (kvm->dirty_ring_size)
1787 return -ENXIO;
1788
1789 as_id = log->slot >> 16;
1790 id = (u16)log->slot;
1791 if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS)
1792 return -EINVAL;
1793
1794 slots = __kvm_memslots(kvm, as_id);
1795 memslot = id_to_memslot(slots, id);
1796 if (!memslot || !memslot->dirty_bitmap)
1797 return -ENOENT;
1798
1799 dirty_bitmap = memslot->dirty_bitmap;
1800
1801 kvm_arch_sync_dirty_log(kvm, memslot);
1802
1803 n = kvm_dirty_bitmap_bytes(memslot);
1804 flush = false;
1805 if (kvm->manual_dirty_log_protect) {
1806 /*
1807 * Unlike kvm_get_dirty_log, we always return false in *flush,
1808 * because no flush is needed until KVM_CLEAR_DIRTY_LOG. There
1809 * is some code duplication between this function and
1810 * kvm_get_dirty_log, but hopefully all architecture
1811 * transition to kvm_get_dirty_log_protect and kvm_get_dirty_log
1812 * can be eliminated.
1813 */
1814 dirty_bitmap_buffer = dirty_bitmap;
1815 } else {
1816 dirty_bitmap_buffer = kvm_second_dirty_bitmap(memslot);
1817 memset(dirty_bitmap_buffer, 0, n);
1818
1819 KVM_MMU_LOCK(kvm);
1820 for (i = 0; i < n / sizeof(long); i++) {
1821 unsigned long mask;
1822 gfn_t offset;
1823
1824 if (!dirty_bitmap[i])
1825 continue;
1826
1827 flush = true;
1828 mask = xchg(&dirty_bitmap[i], 0);
1829 dirty_bitmap_buffer[i] = mask;
1830
1831 offset = i * BITS_PER_LONG;
1832 kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot,
1833 offset, mask);
1834 }
1835 KVM_MMU_UNLOCK(kvm);
1836 }
1837
1838 if (flush)
1839 kvm_arch_flush_remote_tlbs_memslot(kvm, memslot);
1840
1841 if (copy_to_user(log->dirty_bitmap, dirty_bitmap_buffer, n))
1842 return -EFAULT;
1843 return 0;
1844}
1845
1846
1847/**
1848 * kvm_vm_ioctl_get_dirty_log - get and clear the log of dirty pages in a slot
1849 * @kvm: kvm instance
1850 * @log: slot id and address to which we copy the log
1851 *
1852 * Steps 1-4 below provide general overview of dirty page logging. See
1853 * kvm_get_dirty_log_protect() function description for additional details.
1854 *
1855 * We call kvm_get_dirty_log_protect() to handle steps 1-3, upon return we
1856 * always flush the TLB (step 4) even if previous step failed and the dirty
1857 * bitmap may be corrupt. Regardless of previous outcome the KVM logging API
1858 * does not preclude user space subsequent dirty log read. Flushing TLB ensures
1859 * writes will be marked dirty for next log read.
1860 *
1861 * 1. Take a snapshot of the bit and clear it if needed.
1862 * 2. Write protect the corresponding page.
1863 * 3. Copy the snapshot to the userspace.
1864 * 4. Flush TLB's if needed.
1865 */
1866static int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
1867 struct kvm_dirty_log *log)
1868{
1869 int r;
1870
1871 mutex_lock(&kvm->slots_lock);
1872
1873 r = kvm_get_dirty_log_protect(kvm, log);
1874
1875 mutex_unlock(&kvm->slots_lock);
1876 return r;
1877}
1878
1879/**
1880 * kvm_clear_dirty_log_protect - clear dirty bits in the bitmap
1881 * and reenable dirty page tracking for the corresponding pages.
1882 * @kvm: pointer to kvm instance
1883 * @log: slot id and address from which to fetch the bitmap of dirty pages
1884 */
1885static int kvm_clear_dirty_log_protect(struct kvm *kvm,
1886 struct kvm_clear_dirty_log *log)
1887{
1888 struct kvm_memslots *slots;
1889 struct kvm_memory_slot *memslot;
1890 int as_id, id;
1891 gfn_t offset;
1892 unsigned long i, n;
1893 unsigned long *dirty_bitmap;
1894 unsigned long *dirty_bitmap_buffer;
1895 bool flush;
1896
1897 /* Dirty ring tracking is exclusive to dirty log tracking */
1898 if (kvm->dirty_ring_size)
1899 return -ENXIO;
1900
1901 as_id = log->slot >> 16;
1902 id = (u16)log->slot;
1903 if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS)
1904 return -EINVAL;
1905
1906 if (log->first_page & 63)
1907 return -EINVAL;
1908
1909 slots = __kvm_memslots(kvm, as_id);
1910 memslot = id_to_memslot(slots, id);
1911 if (!memslot || !memslot->dirty_bitmap)
1912 return -ENOENT;
1913
1914 dirty_bitmap = memslot->dirty_bitmap;
1915
1916 n = ALIGN(log->num_pages, BITS_PER_LONG) / 8;
1917
1918 if (log->first_page > memslot->npages ||
1919 log->num_pages > memslot->npages - log->first_page ||
1920 (log->num_pages < memslot->npages - log->first_page && (log->num_pages & 63)))
1921 return -EINVAL;
1922
1923 kvm_arch_sync_dirty_log(kvm, memslot);
1924
1925 flush = false;
1926 dirty_bitmap_buffer = kvm_second_dirty_bitmap(memslot);
1927 if (copy_from_user(dirty_bitmap_buffer, log->dirty_bitmap, n))
1928 return -EFAULT;
1929
1930 KVM_MMU_LOCK(kvm);
1931 for (offset = log->first_page, i = offset / BITS_PER_LONG,
1932 n = DIV_ROUND_UP(log->num_pages, BITS_PER_LONG); n--;
1933 i++, offset += BITS_PER_LONG) {
1934 unsigned long mask = *dirty_bitmap_buffer++;
1935 atomic_long_t *p = (atomic_long_t *) &dirty_bitmap[i];
1936 if (!mask)
1937 continue;
1938
1939 mask &= atomic_long_fetch_andnot(mask, p);
1940
1941 /*
1942 * mask contains the bits that really have been cleared. This
1943 * never includes any bits beyond the length of the memslot (if
1944 * the length is not aligned to 64 pages), therefore it is not
1945 * a problem if userspace sets them in log->dirty_bitmap.
1946 */
1947 if (mask) {
1948 flush = true;
1949 kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot,
1950 offset, mask);
1951 }
1952 }
1953 KVM_MMU_UNLOCK(kvm);
1954
1955 if (flush)
1956 kvm_arch_flush_remote_tlbs_memslot(kvm, memslot);
1957
1958 return 0;
1959}
1960
1961static int kvm_vm_ioctl_clear_dirty_log(struct kvm *kvm,
1962 struct kvm_clear_dirty_log *log)
1963{
1964 int r;
1965
1966 mutex_lock(&kvm->slots_lock);
1967
1968 r = kvm_clear_dirty_log_protect(kvm, log);
1969
1970 mutex_unlock(&kvm->slots_lock);
1971 return r;
1972}
1973#endif /* CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT */
1974
1975struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
1976{
1977 return __gfn_to_memslot(kvm_memslots(kvm), gfn);
1978}
1979EXPORT_SYMBOL_GPL(gfn_to_memslot);
1980
1981struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn)
1982{
1983 return __gfn_to_memslot(kvm_vcpu_memslots(vcpu), gfn);
1984}
1985EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_memslot);
1986
1987bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
1988{
1989 struct kvm_memory_slot *memslot = gfn_to_memslot(kvm, gfn);
1990
1991 return kvm_is_visible_memslot(memslot);
1992}
1993EXPORT_SYMBOL_GPL(kvm_is_visible_gfn);
1994
1995bool kvm_vcpu_is_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
1996{
1997 struct kvm_memory_slot *memslot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
1998
1999 return kvm_is_visible_memslot(memslot);
2000}
2001EXPORT_SYMBOL_GPL(kvm_vcpu_is_visible_gfn);
2002
2003unsigned long kvm_host_page_size(struct kvm_vcpu *vcpu, gfn_t gfn)
2004{
2005 struct vm_area_struct *vma;
2006 unsigned long addr, size;
2007
2008 size = PAGE_SIZE;
2009
2010 addr = kvm_vcpu_gfn_to_hva_prot(vcpu, gfn, NULL);
2011 if (kvm_is_error_hva(addr))
2012 return PAGE_SIZE;
2013
2014 mmap_read_lock(current->mm);
2015 vma = find_vma(current->mm, addr);
2016 if (!vma)
2017 goto out;
2018
2019 size = vma_kernel_pagesize(vma);
2020
2021out:
2022 mmap_read_unlock(current->mm);
2023
2024 return size;
2025}
2026
2027static bool memslot_is_readonly(struct kvm_memory_slot *slot)
2028{
2029 return slot->flags & KVM_MEM_READONLY;
2030}
2031
2032static unsigned long __gfn_to_hva_many(struct kvm_memory_slot *slot, gfn_t gfn,
2033 gfn_t *nr_pages, bool write)
2034{
2035 if (!slot || slot->flags & KVM_MEMSLOT_INVALID)
2036 return KVM_HVA_ERR_BAD;
2037
2038 if (memslot_is_readonly(slot) && write)
2039 return KVM_HVA_ERR_RO_BAD;
2040
2041 if (nr_pages)
2042 *nr_pages = slot->npages - (gfn - slot->base_gfn);
2043
2044 return __gfn_to_hva_memslot(slot, gfn);
2045}
2046
2047static unsigned long gfn_to_hva_many(struct kvm_memory_slot *slot, gfn_t gfn,
2048 gfn_t *nr_pages)
2049{
2050 return __gfn_to_hva_many(slot, gfn, nr_pages, true);
2051}
2052
2053unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot,
2054 gfn_t gfn)
2055{
2056 return gfn_to_hva_many(slot, gfn, NULL);
2057}
2058EXPORT_SYMBOL_GPL(gfn_to_hva_memslot);
2059
2060unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
2061{
2062 return gfn_to_hva_many(gfn_to_memslot(kvm, gfn), gfn, NULL);
2063}
2064EXPORT_SYMBOL_GPL(gfn_to_hva);
2065
2066unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn)
2067{
2068 return gfn_to_hva_many(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn, NULL);
2069}
2070EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_hva);
2071
2072/*
2073 * Return the hva of a @gfn and the R/W attribute if possible.
2074 *
2075 * @slot: the kvm_memory_slot which contains @gfn
2076 * @gfn: the gfn to be translated
2077 * @writable: used to return the read/write attribute of the @slot if the hva
2078 * is valid and @writable is not NULL
2079 */
2080unsigned long gfn_to_hva_memslot_prot(struct kvm_memory_slot *slot,
2081 gfn_t gfn, bool *writable)
2082{
2083 unsigned long hva = __gfn_to_hva_many(slot, gfn, NULL, false);
2084
2085 if (!kvm_is_error_hva(hva) && writable)
2086 *writable = !memslot_is_readonly(slot);
2087
2088 return hva;
2089}
2090
2091unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable)
2092{
2093 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
2094
2095 return gfn_to_hva_memslot_prot(slot, gfn, writable);
2096}
2097
2098unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable)
2099{
2100 struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
2101
2102 return gfn_to_hva_memslot_prot(slot, gfn, writable);
2103}
2104
2105static inline int check_user_page_hwpoison(unsigned long addr)
2106{
2107 int rc, flags = FOLL_HWPOISON | FOLL_WRITE;
2108
2109 rc = get_user_pages(addr, 1, flags, NULL, NULL);
2110 return rc == -EHWPOISON;
2111}
2112
2113/*
2114 * The fast path to get the writable pfn which will be stored in @pfn,
2115 * true indicates success, otherwise false is returned. It's also the
2116 * only part that runs if we can in atomic context.
2117 */
2118static bool hva_to_pfn_fast(unsigned long addr, bool write_fault,
2119 bool *writable, kvm_pfn_t *pfn)
2120{
2121 struct page *page[1];
2122
2123 /*
2124 * Fast pin a writable pfn only if it is a write fault request
2125 * or the caller allows to map a writable pfn for a read fault
2126 * request.
2127 */
2128 if (!(write_fault || writable))
2129 return false;
2130
2131 if (get_user_page_fast_only(addr, FOLL_WRITE, page)) {
2132 *pfn = page_to_pfn(page[0]);
2133
2134 if (writable)
2135 *writable = true;
2136 return true;
2137 }
2138
2139 return false;
2140}
2141
2142/*
2143 * The slow path to get the pfn of the specified host virtual address,
2144 * 1 indicates success, -errno is returned if error is detected.
2145 */
2146static int hva_to_pfn_slow(unsigned long addr, bool *async, bool write_fault,
2147 bool *writable, kvm_pfn_t *pfn)
2148{
2149 unsigned int flags = FOLL_HWPOISON;
2150 struct page *page;
2151 int npages = 0;
2152
2153 might_sleep();
2154
2155 if (writable)
2156 *writable = write_fault;
2157
2158 if (write_fault)
2159 flags |= FOLL_WRITE;
2160 if (async)
2161 flags |= FOLL_NOWAIT;
2162
2163 npages = get_user_pages_unlocked(addr, 1, &page, flags);
2164 if (npages != 1)
2165 return npages;
2166
2167 /* map read fault as writable if possible */
2168 if (unlikely(!write_fault) && writable) {
2169 struct page *wpage;
2170
2171 if (get_user_page_fast_only(addr, FOLL_WRITE, &wpage)) {
2172 *writable = true;
2173 put_page(page);
2174 page = wpage;
2175 }
2176 }
2177 *pfn = page_to_pfn(page);
2178 return npages;
2179}
2180
2181static bool vma_is_valid(struct vm_area_struct *vma, bool write_fault)
2182{
2183 if (unlikely(!(vma->vm_flags & VM_READ)))
2184 return false;
2185
2186 if (write_fault && (unlikely(!(vma->vm_flags & VM_WRITE))))
2187 return false;
2188
2189 return true;
2190}
2191
2192static int kvm_try_get_pfn(kvm_pfn_t pfn)
2193{
2194 if (kvm_is_reserved_pfn(pfn))
2195 return 1;
2196 return get_page_unless_zero(pfn_to_page(pfn));
2197}
2198
2199static int hva_to_pfn_remapped(struct vm_area_struct *vma,
2200 unsigned long addr, bool *async,
2201 bool write_fault, bool *writable,
2202 kvm_pfn_t *p_pfn)
2203{
2204 kvm_pfn_t pfn;
2205 pte_t *ptep;
2206 spinlock_t *ptl;
2207 int r;
2208
2209 r = follow_pte(vma->vm_mm, addr, &ptep, &ptl);
2210 if (r) {
2211 /*
2212 * get_user_pages fails for VM_IO and VM_PFNMAP vmas and does
2213 * not call the fault handler, so do it here.
2214 */
2215 bool unlocked = false;
2216 r = fixup_user_fault(current->mm, addr,
2217 (write_fault ? FAULT_FLAG_WRITE : 0),
2218 &unlocked);
2219 if (unlocked)
2220 return -EAGAIN;
2221 if (r)
2222 return r;
2223
2224 r = follow_pte(vma->vm_mm, addr, &ptep, &ptl);
2225 if (r)
2226 return r;
2227 }
2228
2229 if (write_fault && !pte_write(*ptep)) {
2230 pfn = KVM_PFN_ERR_RO_FAULT;
2231 goto out;
2232 }
2233
2234 if (writable)
2235 *writable = pte_write(*ptep);
2236 pfn = pte_pfn(*ptep);
2237
2238 /*
2239 * Get a reference here because callers of *hva_to_pfn* and
2240 * *gfn_to_pfn* ultimately call kvm_release_pfn_clean on the
2241 * returned pfn. This is only needed if the VMA has VM_MIXEDMAP
2242 * set, but the kvm_get_pfn/kvm_release_pfn_clean pair will
2243 * simply do nothing for reserved pfns.
2244 *
2245 * Whoever called remap_pfn_range is also going to call e.g.
2246 * unmap_mapping_range before the underlying pages are freed,
2247 * causing a call to our MMU notifier.
2248 *
2249 * Certain IO or PFNMAP mappings can be backed with valid
2250 * struct pages, but be allocated without refcounting e.g.,
2251 * tail pages of non-compound higher order allocations, which
2252 * would then underflow the refcount when the caller does the
2253 * required put_page. Don't allow those pages here.
2254 */
2255 if (!kvm_try_get_pfn(pfn))
2256 r = -EFAULT;
2257
2258out:
2259 pte_unmap_unlock(ptep, ptl);
2260 *p_pfn = pfn;
2261
2262 return r;
2263}
2264
2265/*
2266 * Pin guest page in memory and return its pfn.
2267 * @addr: host virtual address which maps memory to the guest
2268 * @atomic: whether this function can sleep
2269 * @async: whether this function need to wait IO complete if the
2270 * host page is not in the memory
2271 * @write_fault: whether we should get a writable host page
2272 * @writable: whether it allows to map a writable host page for !@write_fault
2273 *
2274 * The function will map a writable host page for these two cases:
2275 * 1): @write_fault = true
2276 * 2): @write_fault = false && @writable, @writable will tell the caller
2277 * whether the mapping is writable.
2278 */
2279static kvm_pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async,
2280 bool write_fault, bool *writable)
2281{
2282 struct vm_area_struct *vma;
2283 kvm_pfn_t pfn = 0;
2284 int npages, r;
2285
2286 /* we can do it either atomically or asynchronously, not both */
2287 BUG_ON(atomic && async);
2288
2289 if (hva_to_pfn_fast(addr, write_fault, writable, &pfn))
2290 return pfn;
2291
2292 if (atomic)
2293 return KVM_PFN_ERR_FAULT;
2294
2295 npages = hva_to_pfn_slow(addr, async, write_fault, writable, &pfn);
2296 if (npages == 1)
2297 return pfn;
2298
2299 mmap_read_lock(current->mm);
2300 if (npages == -EHWPOISON ||
2301 (!async && check_user_page_hwpoison(addr))) {
2302 pfn = KVM_PFN_ERR_HWPOISON;
2303 goto exit;
2304 }
2305
2306retry:
2307 vma = vma_lookup(current->mm, addr);
2308
2309 if (vma == NULL)
2310 pfn = KVM_PFN_ERR_FAULT;
2311 else if (vma->vm_flags & (VM_IO | VM_PFNMAP)) {
2312 r = hva_to_pfn_remapped(vma, addr, async, write_fault, writable, &pfn);
2313 if (r == -EAGAIN)
2314 goto retry;
2315 if (r < 0)
2316 pfn = KVM_PFN_ERR_FAULT;
2317 } else {
2318 if (async && vma_is_valid(vma, write_fault))
2319 *async = true;
2320 pfn = KVM_PFN_ERR_FAULT;
2321 }
2322exit:
2323 mmap_read_unlock(current->mm);
2324 return pfn;
2325}
2326
2327kvm_pfn_t __gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn,
2328 bool atomic, bool *async, bool write_fault,
2329 bool *writable, hva_t *hva)
2330{
2331 unsigned long addr = __gfn_to_hva_many(slot, gfn, NULL, write_fault);
2332
2333 if (hva)
2334 *hva = addr;
2335
2336 if (addr == KVM_HVA_ERR_RO_BAD) {
2337 if (writable)
2338 *writable = false;
2339 return KVM_PFN_ERR_RO_FAULT;
2340 }
2341
2342 if (kvm_is_error_hva(addr)) {
2343 if (writable)
2344 *writable = false;
2345 return KVM_PFN_NOSLOT;
2346 }
2347
2348 /* Do not map writable pfn in the readonly memslot. */
2349 if (writable && memslot_is_readonly(slot)) {
2350 *writable = false;
2351 writable = NULL;
2352 }
2353
2354 return hva_to_pfn(addr, atomic, async, write_fault,
2355 writable);
2356}
2357EXPORT_SYMBOL_GPL(__gfn_to_pfn_memslot);
2358
2359kvm_pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
2360 bool *writable)
2361{
2362 return __gfn_to_pfn_memslot(gfn_to_memslot(kvm, gfn), gfn, false, NULL,
2363 write_fault, writable, NULL);
2364}
2365EXPORT_SYMBOL_GPL(gfn_to_pfn_prot);
2366
2367kvm_pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn)
2368{
2369 return __gfn_to_pfn_memslot(slot, gfn, false, NULL, true, NULL, NULL);
2370}
2371EXPORT_SYMBOL_GPL(gfn_to_pfn_memslot);
2372
2373kvm_pfn_t gfn_to_pfn_memslot_atomic(struct kvm_memory_slot *slot, gfn_t gfn)
2374{
2375 return __gfn_to_pfn_memslot(slot, gfn, true, NULL, true, NULL, NULL);
2376}
2377EXPORT_SYMBOL_GPL(gfn_to_pfn_memslot_atomic);
2378
2379kvm_pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn)
2380{
2381 return gfn_to_pfn_memslot_atomic(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn);
2382}
2383EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_pfn_atomic);
2384
2385kvm_pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
2386{
2387 return gfn_to_pfn_memslot(gfn_to_memslot(kvm, gfn), gfn);
2388}
2389EXPORT_SYMBOL_GPL(gfn_to_pfn);
2390
2391kvm_pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn)
2392{
2393 return gfn_to_pfn_memslot(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn);
2394}
2395EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_pfn);
2396
2397int gfn_to_page_many_atomic(struct kvm_memory_slot *slot, gfn_t gfn,
2398 struct page **pages, int nr_pages)
2399{
2400 unsigned long addr;
2401 gfn_t entry = 0;
2402
2403 addr = gfn_to_hva_many(slot, gfn, &entry);
2404 if (kvm_is_error_hva(addr))
2405 return -1;
2406
2407 if (entry < nr_pages)
2408 return 0;
2409
2410 return get_user_pages_fast_only(addr, nr_pages, FOLL_WRITE, pages);
2411}
2412EXPORT_SYMBOL_GPL(gfn_to_page_many_atomic);
2413
2414static struct page *kvm_pfn_to_page(kvm_pfn_t pfn)
2415{
2416 if (is_error_noslot_pfn(pfn))
2417 return KVM_ERR_PTR_BAD_PAGE;
2418
2419 if (kvm_is_reserved_pfn(pfn)) {
2420 WARN_ON(1);
2421 return KVM_ERR_PTR_BAD_PAGE;
2422 }
2423
2424 return pfn_to_page(pfn);
2425}
2426
2427struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
2428{
2429 kvm_pfn_t pfn;
2430
2431 pfn = gfn_to_pfn(kvm, gfn);
2432
2433 return kvm_pfn_to_page(pfn);
2434}
2435EXPORT_SYMBOL_GPL(gfn_to_page);
2436
2437void kvm_release_pfn(kvm_pfn_t pfn, bool dirty, struct gfn_to_pfn_cache *cache)
2438{
2439 if (pfn == 0)
2440 return;
2441
2442 if (cache)
2443 cache->pfn = cache->gfn = 0;
2444
2445 if (dirty)
2446 kvm_release_pfn_dirty(pfn);
2447 else
2448 kvm_release_pfn_clean(pfn);
2449}
2450
2451static void kvm_cache_gfn_to_pfn(struct kvm_memory_slot *slot, gfn_t gfn,
2452 struct gfn_to_pfn_cache *cache, u64 gen)
2453{
2454 kvm_release_pfn(cache->pfn, cache->dirty, cache);
2455
2456 cache->pfn = gfn_to_pfn_memslot(slot, gfn);
2457 cache->gfn = gfn;
2458 cache->dirty = false;
2459 cache->generation = gen;
2460}
2461
2462static int __kvm_map_gfn(struct kvm_memslots *slots, gfn_t gfn,
2463 struct kvm_host_map *map,
2464 struct gfn_to_pfn_cache *cache,
2465 bool atomic)
2466{
2467 kvm_pfn_t pfn;
2468 void *hva = NULL;
2469 struct page *page = KVM_UNMAPPED_PAGE;
2470 struct kvm_memory_slot *slot = __gfn_to_memslot(slots, gfn);
2471 u64 gen = slots->generation;
2472
2473 if (!map)
2474 return -EINVAL;
2475
2476 if (cache) {
2477 if (!cache->pfn || cache->gfn != gfn ||
2478 cache->generation != gen) {
2479 if (atomic)
2480 return -EAGAIN;
2481 kvm_cache_gfn_to_pfn(slot, gfn, cache, gen);
2482 }
2483 pfn = cache->pfn;
2484 } else {
2485 if (atomic)
2486 return -EAGAIN;
2487 pfn = gfn_to_pfn_memslot(slot, gfn);
2488 }
2489 if (is_error_noslot_pfn(pfn))
2490 return -EINVAL;
2491
2492 if (pfn_valid(pfn)) {
2493 page = pfn_to_page(pfn);
2494 if (atomic)
2495 hva = kmap_atomic(page);
2496 else
2497 hva = kmap(page);
2498#ifdef CONFIG_HAS_IOMEM
2499 } else if (!atomic) {
2500 hva = memremap(pfn_to_hpa(pfn), PAGE_SIZE, MEMREMAP_WB);
2501 } else {
2502 return -EINVAL;
2503#endif
2504 }
2505
2506 if (!hva)
2507 return -EFAULT;
2508
2509 map->page = page;
2510 map->hva = hva;
2511 map->pfn = pfn;
2512 map->gfn = gfn;
2513
2514 return 0;
2515}
2516
2517int kvm_map_gfn(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map,
2518 struct gfn_to_pfn_cache *cache, bool atomic)
2519{
2520 return __kvm_map_gfn(kvm_memslots(vcpu->kvm), gfn, map,
2521 cache, atomic);
2522}
2523EXPORT_SYMBOL_GPL(kvm_map_gfn);
2524
2525int kvm_vcpu_map(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map)
2526{
2527 return __kvm_map_gfn(kvm_vcpu_memslots(vcpu), gfn, map,
2528 NULL, false);
2529}
2530EXPORT_SYMBOL_GPL(kvm_vcpu_map);
2531
2532static void __kvm_unmap_gfn(struct kvm *kvm,
2533 struct kvm_memory_slot *memslot,
2534 struct kvm_host_map *map,
2535 struct gfn_to_pfn_cache *cache,
2536 bool dirty, bool atomic)
2537{
2538 if (!map)
2539 return;
2540
2541 if (!map->hva)
2542 return;
2543
2544 if (map->page != KVM_UNMAPPED_PAGE) {
2545 if (atomic)
2546 kunmap_atomic(map->hva);
2547 else
2548 kunmap(map->page);
2549 }
2550#ifdef CONFIG_HAS_IOMEM
2551 else if (!atomic)
2552 memunmap(map->hva);
2553 else
2554 WARN_ONCE(1, "Unexpected unmapping in atomic context");
2555#endif
2556
2557 if (dirty)
2558 mark_page_dirty_in_slot(kvm, memslot, map->gfn);
2559
2560 if (cache)
2561 cache->dirty |= dirty;
2562 else
2563 kvm_release_pfn(map->pfn, dirty, NULL);
2564
2565 map->hva = NULL;
2566 map->page = NULL;
2567}
2568
2569int kvm_unmap_gfn(struct kvm_vcpu *vcpu, struct kvm_host_map *map,
2570 struct gfn_to_pfn_cache *cache, bool dirty, bool atomic)
2571{
2572 __kvm_unmap_gfn(vcpu->kvm, gfn_to_memslot(vcpu->kvm, map->gfn), map,
2573 cache, dirty, atomic);
2574 return 0;
2575}
2576EXPORT_SYMBOL_GPL(kvm_unmap_gfn);
2577
2578void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty)
2579{
2580 __kvm_unmap_gfn(vcpu->kvm, kvm_vcpu_gfn_to_memslot(vcpu, map->gfn),
2581 map, NULL, dirty, false);
2582}
2583EXPORT_SYMBOL_GPL(kvm_vcpu_unmap);
2584
2585struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn)
2586{
2587 kvm_pfn_t pfn;
2588
2589 pfn = kvm_vcpu_gfn_to_pfn(vcpu, gfn);
2590
2591 return kvm_pfn_to_page(pfn);
2592}
2593EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_page);
2594
2595void kvm_release_page_clean(struct page *page)
2596{
2597 WARN_ON(is_error_page(page));
2598
2599 kvm_release_pfn_clean(page_to_pfn(page));
2600}
2601EXPORT_SYMBOL_GPL(kvm_release_page_clean);
2602
2603void kvm_release_pfn_clean(kvm_pfn_t pfn)
2604{
2605 if (!is_error_noslot_pfn(pfn) && !kvm_is_reserved_pfn(pfn))
2606 put_page(pfn_to_page(pfn));
2607}
2608EXPORT_SYMBOL_GPL(kvm_release_pfn_clean);
2609
2610void kvm_release_page_dirty(struct page *page)
2611{
2612 WARN_ON(is_error_page(page));
2613
2614 kvm_release_pfn_dirty(page_to_pfn(page));
2615}
2616EXPORT_SYMBOL_GPL(kvm_release_page_dirty);
2617
2618void kvm_release_pfn_dirty(kvm_pfn_t pfn)
2619{
2620 kvm_set_pfn_dirty(pfn);
2621 kvm_release_pfn_clean(pfn);
2622}
2623EXPORT_SYMBOL_GPL(kvm_release_pfn_dirty);
2624
2625void kvm_set_pfn_dirty(kvm_pfn_t pfn)
2626{
2627 if (!kvm_is_reserved_pfn(pfn) && !kvm_is_zone_device_pfn(pfn))
2628 SetPageDirty(pfn_to_page(pfn));
2629}
2630EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty);
2631
2632void kvm_set_pfn_accessed(kvm_pfn_t pfn)
2633{
2634 if (!kvm_is_reserved_pfn(pfn) && !kvm_is_zone_device_pfn(pfn))
2635 mark_page_accessed(pfn_to_page(pfn));
2636}
2637EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed);
2638
2639void kvm_get_pfn(kvm_pfn_t pfn)
2640{
2641 if (!kvm_is_reserved_pfn(pfn))
2642 get_page(pfn_to_page(pfn));
2643}
2644EXPORT_SYMBOL_GPL(kvm_get_pfn);
2645
2646static int next_segment(unsigned long len, int offset)
2647{
2648 if (len > PAGE_SIZE - offset)
2649 return PAGE_SIZE - offset;
2650 else
2651 return len;
2652}
2653
2654static int __kvm_read_guest_page(struct kvm_memory_slot *slot, gfn_t gfn,
2655 void *data, int offset, int len)
2656{
2657 int r;
2658 unsigned long addr;
2659
2660 addr = gfn_to_hva_memslot_prot(slot, gfn, NULL);
2661 if (kvm_is_error_hva(addr))
2662 return -EFAULT;
2663 r = __copy_from_user(data, (void __user *)addr + offset, len);
2664 if (r)
2665 return -EFAULT;
2666 return 0;
2667}
2668
2669int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
2670 int len)
2671{
2672 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
2673
2674 return __kvm_read_guest_page(slot, gfn, data, offset, len);
2675}
2676EXPORT_SYMBOL_GPL(kvm_read_guest_page);
2677
2678int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data,
2679 int offset, int len)
2680{
2681 struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
2682
2683 return __kvm_read_guest_page(slot, gfn, data, offset, len);
2684}
2685EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest_page);
2686
2687int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len)
2688{
2689 gfn_t gfn = gpa >> PAGE_SHIFT;
2690 int seg;
2691 int offset = offset_in_page(gpa);
2692 int ret;
2693
2694 while ((seg = next_segment(len, offset)) != 0) {
2695 ret = kvm_read_guest_page(kvm, gfn, data, offset, seg);
2696 if (ret < 0)
2697 return ret;
2698 offset = 0;
2699 len -= seg;
2700 data += seg;
2701 ++gfn;
2702 }
2703 return 0;
2704}
2705EXPORT_SYMBOL_GPL(kvm_read_guest);
2706
2707int kvm_vcpu_read_guest(struct kvm_vcpu *vcpu, gpa_t gpa, void *data, unsigned long len)
2708{
2709 gfn_t gfn = gpa >> PAGE_SHIFT;
2710 int seg;
2711 int offset = offset_in_page(gpa);
2712 int ret;
2713
2714 while ((seg = next_segment(len, offset)) != 0) {
2715 ret = kvm_vcpu_read_guest_page(vcpu, gfn, data, offset, seg);
2716 if (ret < 0)
2717 return ret;
2718 offset = 0;
2719 len -= seg;
2720 data += seg;
2721 ++gfn;
2722 }
2723 return 0;
2724}
2725EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest);
2726
2727static int __kvm_read_guest_atomic(struct kvm_memory_slot *slot, gfn_t gfn,
2728 void *data, int offset, unsigned long len)
2729{
2730 int r;
2731 unsigned long addr;
2732
2733 addr = gfn_to_hva_memslot_prot(slot, gfn, NULL);
2734 if (kvm_is_error_hva(addr))
2735 return -EFAULT;
2736 pagefault_disable();
2737 r = __copy_from_user_inatomic(data, (void __user *)addr + offset, len);
2738 pagefault_enable();
2739 if (r)
2740 return -EFAULT;
2741 return 0;
2742}
2743
2744int kvm_vcpu_read_guest_atomic(struct kvm_vcpu *vcpu, gpa_t gpa,
2745 void *data, unsigned long len)
2746{
2747 gfn_t gfn = gpa >> PAGE_SHIFT;
2748 struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
2749 int offset = offset_in_page(gpa);
2750
2751 return __kvm_read_guest_atomic(slot, gfn, data, offset, len);
2752}
2753EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest_atomic);
2754
2755static int __kvm_write_guest_page(struct kvm *kvm,
2756 struct kvm_memory_slot *memslot, gfn_t gfn,
2757 const void *data, int offset, int len)
2758{
2759 int r;
2760 unsigned long addr;
2761
2762 addr = gfn_to_hva_memslot(memslot, gfn);
2763 if (kvm_is_error_hva(addr))
2764 return -EFAULT;
2765 r = __copy_to_user((void __user *)addr + offset, data, len);
2766 if (r)
2767 return -EFAULT;
2768 mark_page_dirty_in_slot(kvm, memslot, gfn);
2769 return 0;
2770}
2771
2772int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn,
2773 const void *data, int offset, int len)
2774{
2775 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
2776
2777 return __kvm_write_guest_page(kvm, slot, gfn, data, offset, len);
2778}
2779EXPORT_SYMBOL_GPL(kvm_write_guest_page);
2780
2781int kvm_vcpu_write_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn,
2782 const void *data, int offset, int len)
2783{
2784 struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
2785
2786 return __kvm_write_guest_page(vcpu->kvm, slot, gfn, data, offset, len);
2787}
2788EXPORT_SYMBOL_GPL(kvm_vcpu_write_guest_page);
2789
2790int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
2791 unsigned long len)
2792{
2793 gfn_t gfn = gpa >> PAGE_SHIFT;
2794 int seg;
2795 int offset = offset_in_page(gpa);
2796 int ret;
2797
2798 while ((seg = next_segment(len, offset)) != 0) {
2799 ret = kvm_write_guest_page(kvm, gfn, data, offset, seg);
2800 if (ret < 0)
2801 return ret;
2802 offset = 0;
2803 len -= seg;
2804 data += seg;
2805 ++gfn;
2806 }
2807 return 0;
2808}
2809EXPORT_SYMBOL_GPL(kvm_write_guest);
2810
2811int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data,
2812 unsigned long len)
2813{
2814 gfn_t gfn = gpa >> PAGE_SHIFT;
2815 int seg;
2816 int offset = offset_in_page(gpa);
2817 int ret;
2818
2819 while ((seg = next_segment(len, offset)) != 0) {
2820 ret = kvm_vcpu_write_guest_page(vcpu, gfn, data, offset, seg);
2821 if (ret < 0)
2822 return ret;
2823 offset = 0;
2824 len -= seg;
2825 data += seg;
2826 ++gfn;
2827 }
2828 return 0;
2829}
2830EXPORT_SYMBOL_GPL(kvm_vcpu_write_guest);
2831
2832static int __kvm_gfn_to_hva_cache_init(struct kvm_memslots *slots,
2833 struct gfn_to_hva_cache *ghc,
2834 gpa_t gpa, unsigned long len)
2835{
2836 int offset = offset_in_page(gpa);
2837 gfn_t start_gfn = gpa >> PAGE_SHIFT;
2838 gfn_t end_gfn = (gpa + len - 1) >> PAGE_SHIFT;
2839 gfn_t nr_pages_needed = end_gfn - start_gfn + 1;
2840 gfn_t nr_pages_avail;
2841
2842 /* Update ghc->generation before performing any error checks. */
2843 ghc->generation = slots->generation;
2844
2845 if (start_gfn > end_gfn) {
2846 ghc->hva = KVM_HVA_ERR_BAD;
2847 return -EINVAL;
2848 }
2849
2850 /*
2851 * If the requested region crosses two memslots, we still
2852 * verify that the entire region is valid here.
2853 */
2854 for ( ; start_gfn <= end_gfn; start_gfn += nr_pages_avail) {
2855 ghc->memslot = __gfn_to_memslot(slots, start_gfn);
2856 ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn,
2857 &nr_pages_avail);
2858 if (kvm_is_error_hva(ghc->hva))
2859 return -EFAULT;
2860 }
2861
2862 /* Use the slow path for cross page reads and writes. */
2863 if (nr_pages_needed == 1)
2864 ghc->hva += offset;
2865 else
2866 ghc->memslot = NULL;
2867
2868 ghc->gpa = gpa;
2869 ghc->len = len;
2870 return 0;
2871}
2872
2873int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
2874 gpa_t gpa, unsigned long len)
2875{
2876 struct kvm_memslots *slots = kvm_memslots(kvm);
2877 return __kvm_gfn_to_hva_cache_init(slots, ghc, gpa, len);
2878}
2879EXPORT_SYMBOL_GPL(kvm_gfn_to_hva_cache_init);
2880
2881int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
2882 void *data, unsigned int offset,
2883 unsigned long len)
2884{
2885 struct kvm_memslots *slots = kvm_memslots(kvm);
2886 int r;
2887 gpa_t gpa = ghc->gpa + offset;
2888
2889 BUG_ON(len + offset > ghc->len);
2890
2891 if (slots->generation != ghc->generation) {
2892 if (__kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len))
2893 return -EFAULT;
2894 }
2895
2896 if (kvm_is_error_hva(ghc->hva))
2897 return -EFAULT;
2898
2899 if (unlikely(!ghc->memslot))
2900 return kvm_write_guest(kvm, gpa, data, len);
2901
2902 r = __copy_to_user((void __user *)ghc->hva + offset, data, len);
2903 if (r)
2904 return -EFAULT;
2905 mark_page_dirty_in_slot(kvm, ghc->memslot, gpa >> PAGE_SHIFT);
2906
2907 return 0;
2908}
2909EXPORT_SYMBOL_GPL(kvm_write_guest_offset_cached);
2910
2911int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
2912 void *data, unsigned long len)
2913{
2914 return kvm_write_guest_offset_cached(kvm, ghc, data, 0, len);
2915}
2916EXPORT_SYMBOL_GPL(kvm_write_guest_cached);
2917
2918int kvm_read_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
2919 void *data, unsigned int offset,
2920 unsigned long len)
2921{
2922 struct kvm_memslots *slots = kvm_memslots(kvm);
2923 int r;
2924 gpa_t gpa = ghc->gpa + offset;
2925
2926 BUG_ON(len + offset > ghc->len);
2927
2928 if (slots->generation != ghc->generation) {
2929 if (__kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len))
2930 return -EFAULT;
2931 }
2932
2933 if (kvm_is_error_hva(ghc->hva))
2934 return -EFAULT;
2935
2936 if (unlikely(!ghc->memslot))
2937 return kvm_read_guest(kvm, gpa, data, len);
2938
2939 r = __copy_from_user(data, (void __user *)ghc->hva + offset, len);
2940 if (r)
2941 return -EFAULT;
2942
2943 return 0;
2944}
2945EXPORT_SYMBOL_GPL(kvm_read_guest_offset_cached);
2946
2947int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
2948 void *data, unsigned long len)
2949{
2950 return kvm_read_guest_offset_cached(kvm, ghc, data, 0, len);
2951}
2952EXPORT_SYMBOL_GPL(kvm_read_guest_cached);
2953
2954int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len)
2955{
2956 const void *zero_page = (const void *) __va(page_to_phys(ZERO_PAGE(0)));
2957 gfn_t gfn = gpa >> PAGE_SHIFT;
2958 int seg;
2959 int offset = offset_in_page(gpa);
2960 int ret;
2961
2962 while ((seg = next_segment(len, offset)) != 0) {
2963 ret = kvm_write_guest_page(kvm, gfn, zero_page, offset, len);
2964 if (ret < 0)
2965 return ret;
2966 offset = 0;
2967 len -= seg;
2968 ++gfn;
2969 }
2970 return 0;
2971}
2972EXPORT_SYMBOL_GPL(kvm_clear_guest);
2973
2974void mark_page_dirty_in_slot(struct kvm *kvm,
2975 struct kvm_memory_slot *memslot,
2976 gfn_t gfn)
2977{
2978 if (memslot && kvm_slot_dirty_track_enabled(memslot)) {
2979 unsigned long rel_gfn = gfn - memslot->base_gfn;
2980 u32 slot = (memslot->as_id << 16) | memslot->id;
2981
2982 if (kvm->dirty_ring_size)
2983 kvm_dirty_ring_push(kvm_dirty_ring_get(kvm),
2984 slot, rel_gfn);
2985 else
2986 set_bit_le(rel_gfn, memslot->dirty_bitmap);
2987 }
2988}
2989EXPORT_SYMBOL_GPL(mark_page_dirty_in_slot);
2990
2991void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
2992{
2993 struct kvm_memory_slot *memslot;
2994
2995 memslot = gfn_to_memslot(kvm, gfn);
2996 mark_page_dirty_in_slot(kvm, memslot, gfn);
2997}
2998EXPORT_SYMBOL_GPL(mark_page_dirty);
2999
3000void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn)
3001{
3002 struct kvm_memory_slot *memslot;
3003
3004 memslot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
3005 mark_page_dirty_in_slot(vcpu->kvm, memslot, gfn);
3006}
3007EXPORT_SYMBOL_GPL(kvm_vcpu_mark_page_dirty);
3008
3009void kvm_sigset_activate(struct kvm_vcpu *vcpu)
3010{
3011 if (!vcpu->sigset_active)
3012 return;
3013
3014 /*
3015 * This does a lockless modification of ->real_blocked, which is fine
3016 * because, only current can change ->real_blocked and all readers of
3017 * ->real_blocked don't care as long ->real_blocked is always a subset
3018 * of ->blocked.
3019 */
3020 sigprocmask(SIG_SETMASK, &vcpu->sigset, ¤t->real_blocked);
3021}
3022
3023void kvm_sigset_deactivate(struct kvm_vcpu *vcpu)
3024{
3025 if (!vcpu->sigset_active)
3026 return;
3027
3028 sigprocmask(SIG_SETMASK, ¤t->real_blocked, NULL);
3029 sigemptyset(¤t->real_blocked);
3030}
3031
3032static void grow_halt_poll_ns(struct kvm_vcpu *vcpu)
3033{
3034 unsigned int old, val, grow, grow_start;
3035
3036 old = val = vcpu->halt_poll_ns;
3037 grow_start = READ_ONCE(halt_poll_ns_grow_start);
3038 grow = READ_ONCE(halt_poll_ns_grow);
3039 if (!grow)
3040 goto out;
3041
3042 val *= grow;
3043 if (val < grow_start)
3044 val = grow_start;
3045
3046 if (val > vcpu->kvm->max_halt_poll_ns)
3047 val = vcpu->kvm->max_halt_poll_ns;
3048
3049 vcpu->halt_poll_ns = val;
3050out:
3051 trace_kvm_halt_poll_ns_grow(vcpu->vcpu_id, val, old);
3052}
3053
3054static void shrink_halt_poll_ns(struct kvm_vcpu *vcpu)
3055{
3056 unsigned int old, val, shrink, grow_start;
3057
3058 old = val = vcpu->halt_poll_ns;
3059 shrink = READ_ONCE(halt_poll_ns_shrink);
3060 grow_start = READ_ONCE(halt_poll_ns_grow_start);
3061 if (shrink == 0)
3062 val = 0;
3063 else
3064 val /= shrink;
3065
3066 if (val < grow_start)
3067 val = 0;
3068
3069 vcpu->halt_poll_ns = val;
3070 trace_kvm_halt_poll_ns_shrink(vcpu->vcpu_id, val, old);
3071}
3072
3073static int kvm_vcpu_check_block(struct kvm_vcpu *vcpu)
3074{
3075 int ret = -EINTR;
3076 int idx = srcu_read_lock(&vcpu->kvm->srcu);
3077
3078 if (kvm_arch_vcpu_runnable(vcpu)) {
3079 kvm_make_request(KVM_REQ_UNHALT, vcpu);
3080 goto out;
3081 }
3082 if (kvm_cpu_has_pending_timer(vcpu))
3083 goto out;
3084 if (signal_pending(current))
3085 goto out;
3086 if (kvm_check_request(KVM_REQ_UNBLOCK, vcpu))
3087 goto out;
3088
3089 ret = 0;
3090out:
3091 srcu_read_unlock(&vcpu->kvm->srcu, idx);
3092 return ret;
3093}
3094
3095static inline void
3096update_halt_poll_stats(struct kvm_vcpu *vcpu, u64 poll_ns, bool waited)
3097{
3098 if (waited)
3099 vcpu->stat.generic.halt_poll_fail_ns += poll_ns;
3100 else
3101 vcpu->stat.generic.halt_poll_success_ns += poll_ns;
3102}
3103
3104/*
3105 * The vCPU has executed a HLT instruction with in-kernel mode enabled.
3106 */
3107void kvm_vcpu_block(struct kvm_vcpu *vcpu)
3108{
3109 ktime_t start, cur, poll_end;
3110 bool waited = false;
3111 u64 block_ns;
3112
3113 kvm_arch_vcpu_blocking(vcpu);
3114
3115 start = cur = poll_end = ktime_get();
3116 if (vcpu->halt_poll_ns && !kvm_arch_no_poll(vcpu)) {
3117 ktime_t stop = ktime_add_ns(ktime_get(), vcpu->halt_poll_ns);
3118
3119 ++vcpu->stat.generic.halt_attempted_poll;
3120 do {
3121 /*
3122 * This sets KVM_REQ_UNHALT if an interrupt
3123 * arrives.
3124 */
3125 if (kvm_vcpu_check_block(vcpu) < 0) {
3126 ++vcpu->stat.generic.halt_successful_poll;
3127 if (!vcpu_valid_wakeup(vcpu))
3128 ++vcpu->stat.generic.halt_poll_invalid;
3129 goto out;
3130 }
3131 cpu_relax();
3132 poll_end = cur = ktime_get();
3133 } while (kvm_vcpu_can_poll(cur, stop));
3134 }
3135
3136 prepare_to_rcuwait(&vcpu->wait);
3137 for (;;) {
3138 set_current_state(TASK_INTERRUPTIBLE);
3139
3140 if (kvm_vcpu_check_block(vcpu) < 0)
3141 break;
3142
3143 waited = true;
3144 schedule();
3145 }
3146 finish_rcuwait(&vcpu->wait);
3147 cur = ktime_get();
3148out:
3149 kvm_arch_vcpu_unblocking(vcpu);
3150 block_ns = ktime_to_ns(cur) - ktime_to_ns(start);
3151
3152 update_halt_poll_stats(
3153 vcpu, ktime_to_ns(ktime_sub(poll_end, start)), waited);
3154
3155 if (!kvm_arch_no_poll(vcpu)) {
3156 if (!vcpu_valid_wakeup(vcpu)) {
3157 shrink_halt_poll_ns(vcpu);
3158 } else if (vcpu->kvm->max_halt_poll_ns) {
3159 if (block_ns <= vcpu->halt_poll_ns)
3160 ;
3161 /* we had a long block, shrink polling */
3162 else if (vcpu->halt_poll_ns &&
3163 block_ns > vcpu->kvm->max_halt_poll_ns)
3164 shrink_halt_poll_ns(vcpu);
3165 /* we had a short halt and our poll time is too small */
3166 else if (vcpu->halt_poll_ns < vcpu->kvm->max_halt_poll_ns &&
3167 block_ns < vcpu->kvm->max_halt_poll_ns)
3168 grow_halt_poll_ns(vcpu);
3169 } else {
3170 vcpu->halt_poll_ns = 0;
3171 }
3172 }
3173
3174 trace_kvm_vcpu_wakeup(block_ns, waited, vcpu_valid_wakeup(vcpu));
3175 kvm_arch_vcpu_block_finish(vcpu);
3176}
3177EXPORT_SYMBOL_GPL(kvm_vcpu_block);
3178
3179bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu)
3180{
3181 struct rcuwait *waitp;
3182
3183 waitp = kvm_arch_vcpu_get_wait(vcpu);
3184 if (rcuwait_wake_up(waitp)) {
3185 WRITE_ONCE(vcpu->ready, true);
3186 ++vcpu->stat.generic.halt_wakeup;
3187 return true;
3188 }
3189
3190 return false;
3191}
3192EXPORT_SYMBOL_GPL(kvm_vcpu_wake_up);
3193
3194#ifndef CONFIG_S390
3195/*
3196 * Kick a sleeping VCPU, or a guest VCPU in guest mode, into host kernel mode.
3197 */
3198void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
3199{
3200 int me;
3201 int cpu = vcpu->cpu;
3202
3203 if (kvm_vcpu_wake_up(vcpu))
3204 return;
3205
3206 me = get_cpu();
3207 if (cpu != me && (unsigned)cpu < nr_cpu_ids && cpu_online(cpu))
3208 if (kvm_arch_vcpu_should_kick(vcpu))
3209 smp_send_reschedule(cpu);
3210 put_cpu();
3211}
3212EXPORT_SYMBOL_GPL(kvm_vcpu_kick);
3213#endif /* !CONFIG_S390 */
3214
3215int kvm_vcpu_yield_to(struct kvm_vcpu *target)
3216{
3217 struct pid *pid;
3218 struct task_struct *task = NULL;
3219 int ret = 0;
3220
3221 rcu_read_lock();
3222 pid = rcu_dereference(target->pid);
3223 if (pid)
3224 task = get_pid_task(pid, PIDTYPE_PID);
3225 rcu_read_unlock();
3226 if (!task)
3227 return ret;
3228 ret = yield_to(task, 1);
3229 put_task_struct(task);
3230
3231 return ret;
3232}
3233EXPORT_SYMBOL_GPL(kvm_vcpu_yield_to);
3234
3235/*
3236 * Helper that checks whether a VCPU is eligible for directed yield.
3237 * Most eligible candidate to yield is decided by following heuristics:
3238 *
3239 * (a) VCPU which has not done pl-exit or cpu relax intercepted recently
3240 * (preempted lock holder), indicated by @in_spin_loop.
3241 * Set at the beginning and cleared at the end of interception/PLE handler.
3242 *
3243 * (b) VCPU which has done pl-exit/ cpu relax intercepted but did not get
3244 * chance last time (mostly it has become eligible now since we have probably
3245 * yielded to lockholder in last iteration. This is done by toggling
3246 * @dy_eligible each time a VCPU checked for eligibility.)
3247 *
3248 * Yielding to a recently pl-exited/cpu relax intercepted VCPU before yielding
3249 * to preempted lock-holder could result in wrong VCPU selection and CPU
3250 * burning. Giving priority for a potential lock-holder increases lock
3251 * progress.
3252 *
3253 * Since algorithm is based on heuristics, accessing another VCPU data without
3254 * locking does not harm. It may result in trying to yield to same VCPU, fail
3255 * and continue with next VCPU and so on.
3256 */
3257static bool kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu *vcpu)
3258{
3259#ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT
3260 bool eligible;
3261
3262 eligible = !vcpu->spin_loop.in_spin_loop ||
3263 vcpu->spin_loop.dy_eligible;
3264
3265 if (vcpu->spin_loop.in_spin_loop)
3266 kvm_vcpu_set_dy_eligible(vcpu, !vcpu->spin_loop.dy_eligible);
3267
3268 return eligible;
3269#else
3270 return true;
3271#endif
3272}
3273
3274/*
3275 * Unlike kvm_arch_vcpu_runnable, this function is called outside
3276 * a vcpu_load/vcpu_put pair. However, for most architectures
3277 * kvm_arch_vcpu_runnable does not require vcpu_load.
3278 */
3279bool __weak kvm_arch_dy_runnable(struct kvm_vcpu *vcpu)
3280{
3281 return kvm_arch_vcpu_runnable(vcpu);
3282}
3283
3284static bool vcpu_dy_runnable(struct kvm_vcpu *vcpu)
3285{
3286 if (kvm_arch_dy_runnable(vcpu))
3287 return true;
3288
3289#ifdef CONFIG_KVM_ASYNC_PF
3290 if (!list_empty_careful(&vcpu->async_pf.done))
3291 return true;
3292#endif
3293
3294 return false;
3295}
3296
3297bool __weak kvm_arch_dy_has_pending_interrupt(struct kvm_vcpu *vcpu)
3298{
3299 return false;
3300}
3301
3302void kvm_vcpu_on_spin(struct kvm_vcpu *me, bool yield_to_kernel_mode)
3303{
3304 struct kvm *kvm = me->kvm;
3305 struct kvm_vcpu *vcpu;
3306 int last_boosted_vcpu = me->kvm->last_boosted_vcpu;
3307 int yielded = 0;
3308 int try = 3;
3309 int pass;
3310 int i;
3311
3312 kvm_vcpu_set_in_spin_loop(me, true);
3313 /*
3314 * We boost the priority of a VCPU that is runnable but not
3315 * currently running, because it got preempted by something
3316 * else and called schedule in __vcpu_run. Hopefully that
3317 * VCPU is holding the lock that we need and will release it.
3318 * We approximate round-robin by starting at the last boosted VCPU.
3319 */
3320 for (pass = 0; pass < 2 && !yielded && try; pass++) {
3321 kvm_for_each_vcpu(i, vcpu, kvm) {
3322 if (!pass && i <= last_boosted_vcpu) {
3323 i = last_boosted_vcpu;
3324 continue;
3325 } else if (pass && i > last_boosted_vcpu)
3326 break;
3327 if (!READ_ONCE(vcpu->ready))
3328 continue;
3329 if (vcpu == me)
3330 continue;
3331 if (rcuwait_active(&vcpu->wait) &&
3332 !vcpu_dy_runnable(vcpu))
3333 continue;
3334 if (READ_ONCE(vcpu->preempted) && yield_to_kernel_mode &&
3335 !kvm_arch_dy_has_pending_interrupt(vcpu) &&
3336 !kvm_arch_vcpu_in_kernel(vcpu))
3337 continue;
3338 if (!kvm_vcpu_eligible_for_directed_yield(vcpu))
3339 continue;
3340
3341 yielded = kvm_vcpu_yield_to(vcpu);
3342 if (yielded > 0) {
3343 kvm->last_boosted_vcpu = i;
3344 break;
3345 } else if (yielded < 0) {
3346 try--;
3347 if (!try)
3348 break;
3349 }
3350 }
3351 }
3352 kvm_vcpu_set_in_spin_loop(me, false);
3353
3354 /* Ensure vcpu is not eligible during next spinloop */
3355 kvm_vcpu_set_dy_eligible(me, false);
3356}
3357EXPORT_SYMBOL_GPL(kvm_vcpu_on_spin);
3358
3359static bool kvm_page_in_dirty_ring(struct kvm *kvm, unsigned long pgoff)
3360{
3361#if KVM_DIRTY_LOG_PAGE_OFFSET > 0
3362 return (pgoff >= KVM_DIRTY_LOG_PAGE_OFFSET) &&
3363 (pgoff < KVM_DIRTY_LOG_PAGE_OFFSET +
3364 kvm->dirty_ring_size / PAGE_SIZE);
3365#else
3366 return false;
3367#endif
3368}
3369
3370static vm_fault_t kvm_vcpu_fault(struct vm_fault *vmf)
3371{
3372 struct kvm_vcpu *vcpu = vmf->vma->vm_file->private_data;
3373 struct page *page;
3374
3375 if (vmf->pgoff == 0)
3376 page = virt_to_page(vcpu->run);
3377#ifdef CONFIG_X86
3378 else if (vmf->pgoff == KVM_PIO_PAGE_OFFSET)
3379 page = virt_to_page(vcpu->arch.pio_data);
3380#endif
3381#ifdef CONFIG_KVM_MMIO
3382 else if (vmf->pgoff == KVM_COALESCED_MMIO_PAGE_OFFSET)
3383 page = virt_to_page(vcpu->kvm->coalesced_mmio_ring);
3384#endif
3385 else if (kvm_page_in_dirty_ring(vcpu->kvm, vmf->pgoff))
3386 page = kvm_dirty_ring_get_page(
3387 &vcpu->dirty_ring,
3388 vmf->pgoff - KVM_DIRTY_LOG_PAGE_OFFSET);
3389 else
3390 return kvm_arch_vcpu_fault(vcpu, vmf);
3391 get_page(page);
3392 vmf->page = page;
3393 return 0;
3394}
3395
3396static const struct vm_operations_struct kvm_vcpu_vm_ops = {
3397 .fault = kvm_vcpu_fault,
3398};
3399
3400static int kvm_vcpu_mmap(struct file *file, struct vm_area_struct *vma)
3401{
3402 struct kvm_vcpu *vcpu = file->private_data;
3403 unsigned long pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
3404
3405 if ((kvm_page_in_dirty_ring(vcpu->kvm, vma->vm_pgoff) ||
3406 kvm_page_in_dirty_ring(vcpu->kvm, vma->vm_pgoff + pages - 1)) &&
3407 ((vma->vm_flags & VM_EXEC) || !(vma->vm_flags & VM_SHARED)))
3408 return -EINVAL;
3409
3410 vma->vm_ops = &kvm_vcpu_vm_ops;
3411 return 0;
3412}
3413
3414static int kvm_vcpu_release(struct inode *inode, struct file *filp)
3415{
3416 struct kvm_vcpu *vcpu = filp->private_data;
3417
3418 kvm_put_kvm(vcpu->kvm);
3419 return 0;
3420}
3421
3422static struct file_operations kvm_vcpu_fops = {
3423 .release = kvm_vcpu_release,
3424 .unlocked_ioctl = kvm_vcpu_ioctl,
3425 .mmap = kvm_vcpu_mmap,
3426 .llseek = noop_llseek,
3427 KVM_COMPAT(kvm_vcpu_compat_ioctl),
3428};
3429
3430/*
3431 * Allocates an inode for the vcpu.
3432 */
3433static int create_vcpu_fd(struct kvm_vcpu *vcpu)
3434{
3435 char name[8 + 1 + ITOA_MAX_LEN + 1];
3436
3437 snprintf(name, sizeof(name), "kvm-vcpu:%d", vcpu->vcpu_id);
3438 return anon_inode_getfd(name, &kvm_vcpu_fops, vcpu, O_RDWR | O_CLOEXEC);
3439}
3440
3441static void kvm_create_vcpu_debugfs(struct kvm_vcpu *vcpu)
3442{
3443#ifdef __KVM_HAVE_ARCH_VCPU_DEBUGFS
3444 struct dentry *debugfs_dentry;
3445 char dir_name[ITOA_MAX_LEN * 2];
3446
3447 if (!debugfs_initialized())
3448 return;
3449
3450 snprintf(dir_name, sizeof(dir_name), "vcpu%d", vcpu->vcpu_id);
3451 debugfs_dentry = debugfs_create_dir(dir_name,
3452 vcpu->kvm->debugfs_dentry);
3453
3454 kvm_arch_create_vcpu_debugfs(vcpu, debugfs_dentry);
3455#endif
3456}
3457
3458/*
3459 * Creates some virtual cpus. Good luck creating more than one.
3460 */
3461static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
3462{
3463 int r;
3464 struct kvm_vcpu *vcpu;
3465 struct page *page;
3466
3467 if (id >= KVM_MAX_VCPU_ID)
3468 return -EINVAL;
3469
3470 mutex_lock(&kvm->lock);
3471 if (kvm->created_vcpus == KVM_MAX_VCPUS) {
3472 mutex_unlock(&kvm->lock);
3473 return -EINVAL;
3474 }
3475
3476 kvm->created_vcpus++;
3477 mutex_unlock(&kvm->lock);
3478
3479 r = kvm_arch_vcpu_precreate(kvm, id);
3480 if (r)
3481 goto vcpu_decrement;
3482
3483 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL_ACCOUNT);
3484 if (!vcpu) {
3485 r = -ENOMEM;
3486 goto vcpu_decrement;
3487 }
3488
3489 BUILD_BUG_ON(sizeof(struct kvm_run) > PAGE_SIZE);
3490 page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
3491 if (!page) {
3492 r = -ENOMEM;
3493 goto vcpu_free;
3494 }
3495 vcpu->run = page_address(page);
3496
3497 kvm_vcpu_init(vcpu, kvm, id);
3498
3499 r = kvm_arch_vcpu_create(vcpu);
3500 if (r)
3501 goto vcpu_free_run_page;
3502
3503 if (kvm->dirty_ring_size) {
3504 r = kvm_dirty_ring_alloc(&vcpu->dirty_ring,
3505 id, kvm->dirty_ring_size);
3506 if (r)
3507 goto arch_vcpu_destroy;
3508 }
3509
3510 mutex_lock(&kvm->lock);
3511 if (kvm_get_vcpu_by_id(kvm, id)) {
3512 r = -EEXIST;
3513 goto unlock_vcpu_destroy;
3514 }
3515
3516 vcpu->vcpu_idx = atomic_read(&kvm->online_vcpus);
3517 BUG_ON(kvm->vcpus[vcpu->vcpu_idx]);
3518
3519 /* Fill the stats id string for the vcpu */
3520 snprintf(vcpu->stats_id, sizeof(vcpu->stats_id), "kvm-%d/vcpu-%d",
3521 task_pid_nr(current), id);
3522
3523 /* Now it's all set up, let userspace reach it */
3524 kvm_get_kvm(kvm);
3525 r = create_vcpu_fd(vcpu);
3526 if (r < 0) {
3527 kvm_put_kvm_no_destroy(kvm);
3528 goto unlock_vcpu_destroy;
3529 }
3530
3531 kvm->vcpus[vcpu->vcpu_idx] = vcpu;
3532
3533 /*
3534 * Pairs with smp_rmb() in kvm_get_vcpu. Write kvm->vcpus
3535 * before kvm->online_vcpu's incremented value.
3536 */
3537 smp_wmb();
3538 atomic_inc(&kvm->online_vcpus);
3539
3540 mutex_unlock(&kvm->lock);
3541 kvm_arch_vcpu_postcreate(vcpu);
3542 kvm_create_vcpu_debugfs(vcpu);
3543 return r;
3544
3545unlock_vcpu_destroy:
3546 mutex_unlock(&kvm->lock);
3547 kvm_dirty_ring_free(&vcpu->dirty_ring);
3548arch_vcpu_destroy:
3549 kvm_arch_vcpu_destroy(vcpu);
3550vcpu_free_run_page:
3551 free_page((unsigned long)vcpu->run);
3552vcpu_free:
3553 kmem_cache_free(kvm_vcpu_cache, vcpu);
3554vcpu_decrement:
3555 mutex_lock(&kvm->lock);
3556 kvm->created_vcpus--;
3557 mutex_unlock(&kvm->lock);
3558 return r;
3559}
3560
3561static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset)
3562{
3563 if (sigset) {
3564 sigdelsetmask(sigset, sigmask(SIGKILL)|sigmask(SIGSTOP));
3565 vcpu->sigset_active = 1;
3566 vcpu->sigset = *sigset;
3567 } else
3568 vcpu->sigset_active = 0;
3569 return 0;
3570}
3571
3572static ssize_t kvm_vcpu_stats_read(struct file *file, char __user *user_buffer,
3573 size_t size, loff_t *offset)
3574{
3575 struct kvm_vcpu *vcpu = file->private_data;
3576
3577 return kvm_stats_read(vcpu->stats_id, &kvm_vcpu_stats_header,
3578 &kvm_vcpu_stats_desc[0], &vcpu->stat,
3579 sizeof(vcpu->stat), user_buffer, size, offset);
3580}
3581
3582static const struct file_operations kvm_vcpu_stats_fops = {
3583 .read = kvm_vcpu_stats_read,
3584 .llseek = noop_llseek,
3585};
3586
3587static int kvm_vcpu_ioctl_get_stats_fd(struct kvm_vcpu *vcpu)
3588{
3589 int fd;
3590 struct file *file;
3591 char name[15 + ITOA_MAX_LEN + 1];
3592
3593 snprintf(name, sizeof(name), "kvm-vcpu-stats:%d", vcpu->vcpu_id);
3594
3595 fd = get_unused_fd_flags(O_CLOEXEC);
3596 if (fd < 0)
3597 return fd;
3598
3599 file = anon_inode_getfile(name, &kvm_vcpu_stats_fops, vcpu, O_RDONLY);
3600 if (IS_ERR(file)) {
3601 put_unused_fd(fd);
3602 return PTR_ERR(file);
3603 }
3604 file->f_mode |= FMODE_PREAD;
3605 fd_install(fd, file);
3606
3607 return fd;
3608}
3609
3610static long kvm_vcpu_ioctl(struct file *filp,
3611 unsigned int ioctl, unsigned long arg)
3612{
3613 struct kvm_vcpu *vcpu = filp->private_data;
3614 void __user *argp = (void __user *)arg;
3615 int r;
3616 struct kvm_fpu *fpu = NULL;
3617 struct kvm_sregs *kvm_sregs = NULL;
3618
3619 if (vcpu->kvm->mm != current->mm)
3620 return -EIO;
3621
3622 if (unlikely(_IOC_TYPE(ioctl) != KVMIO))
3623 return -EINVAL;
3624
3625 /*
3626 * Some architectures have vcpu ioctls that are asynchronous to vcpu
3627 * execution; mutex_lock() would break them.
3628 */
3629 r = kvm_arch_vcpu_async_ioctl(filp, ioctl, arg);
3630 if (r != -ENOIOCTLCMD)
3631 return r;
3632
3633 if (mutex_lock_killable(&vcpu->mutex))
3634 return -EINTR;
3635 switch (ioctl) {
3636 case KVM_RUN: {
3637 struct pid *oldpid;
3638 r = -EINVAL;
3639 if (arg)
3640 goto out;
3641 oldpid = rcu_access_pointer(vcpu->pid);
3642 if (unlikely(oldpid != task_pid(current))) {
3643 /* The thread running this VCPU changed. */
3644 struct pid *newpid;
3645
3646 r = kvm_arch_vcpu_run_pid_change(vcpu);
3647 if (r)
3648 break;
3649
3650 newpid = get_task_pid(current, PIDTYPE_PID);
3651 rcu_assign_pointer(vcpu->pid, newpid);
3652 if (oldpid)
3653 synchronize_rcu();
3654 put_pid(oldpid);
3655 }
3656 r = kvm_arch_vcpu_ioctl_run(vcpu);
3657 trace_kvm_userspace_exit(vcpu->run->exit_reason, r);
3658 break;
3659 }
3660 case KVM_GET_REGS: {
3661 struct kvm_regs *kvm_regs;
3662
3663 r = -ENOMEM;
3664 kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL_ACCOUNT);
3665 if (!kvm_regs)
3666 goto out;
3667 r = kvm_arch_vcpu_ioctl_get_regs(vcpu, kvm_regs);
3668 if (r)
3669 goto out_free1;
3670 r = -EFAULT;
3671 if (copy_to_user(argp, kvm_regs, sizeof(struct kvm_regs)))
3672 goto out_free1;
3673 r = 0;
3674out_free1:
3675 kfree(kvm_regs);
3676 break;
3677 }
3678 case KVM_SET_REGS: {
3679 struct kvm_regs *kvm_regs;
3680
3681 kvm_regs = memdup_user(argp, sizeof(*kvm_regs));
3682 if (IS_ERR(kvm_regs)) {
3683 r = PTR_ERR(kvm_regs);
3684 goto out;
3685 }
3686 r = kvm_arch_vcpu_ioctl_set_regs(vcpu, kvm_regs);
3687 kfree(kvm_regs);
3688 break;
3689 }
3690 case KVM_GET_SREGS: {
3691 kvm_sregs = kzalloc(sizeof(struct kvm_sregs),
3692 GFP_KERNEL_ACCOUNT);
3693 r = -ENOMEM;
3694 if (!kvm_sregs)
3695 goto out;
3696 r = kvm_arch_vcpu_ioctl_get_sregs(vcpu, kvm_sregs);
3697 if (r)
3698 goto out;
3699 r = -EFAULT;
3700 if (copy_to_user(argp, kvm_sregs, sizeof(struct kvm_sregs)))
3701 goto out;
3702 r = 0;
3703 break;
3704 }
3705 case KVM_SET_SREGS: {
3706 kvm_sregs = memdup_user(argp, sizeof(*kvm_sregs));
3707 if (IS_ERR(kvm_sregs)) {
3708 r = PTR_ERR(kvm_sregs);
3709 kvm_sregs = NULL;
3710 goto out;
3711 }
3712 r = kvm_arch_vcpu_ioctl_set_sregs(vcpu, kvm_sregs);
3713 break;
3714 }
3715 case KVM_GET_MP_STATE: {
3716 struct kvm_mp_state mp_state;
3717
3718 r = kvm_arch_vcpu_ioctl_get_mpstate(vcpu, &mp_state);
3719 if (r)
3720 goto out;
3721 r = -EFAULT;
3722 if (copy_to_user(argp, &mp_state, sizeof(mp_state)))
3723 goto out;
3724 r = 0;
3725 break;
3726 }
3727 case KVM_SET_MP_STATE: {
3728 struct kvm_mp_state mp_state;
3729
3730 r = -EFAULT;
3731 if (copy_from_user(&mp_state, argp, sizeof(mp_state)))
3732 goto out;
3733 r = kvm_arch_vcpu_ioctl_set_mpstate(vcpu, &mp_state);
3734 break;
3735 }
3736 case KVM_TRANSLATE: {
3737 struct kvm_translation tr;
3738
3739 r = -EFAULT;
3740 if (copy_from_user(&tr, argp, sizeof(tr)))
3741 goto out;
3742 r = kvm_arch_vcpu_ioctl_translate(vcpu, &tr);
3743 if (r)
3744 goto out;
3745 r = -EFAULT;
3746 if (copy_to_user(argp, &tr, sizeof(tr)))
3747 goto out;
3748 r = 0;
3749 break;
3750 }
3751 case KVM_SET_GUEST_DEBUG: {
3752 struct kvm_guest_debug dbg;
3753
3754 r = -EFAULT;
3755 if (copy_from_user(&dbg, argp, sizeof(dbg)))
3756 goto out;
3757 r = kvm_arch_vcpu_ioctl_set_guest_debug(vcpu, &dbg);
3758 break;
3759 }
3760 case KVM_SET_SIGNAL_MASK: {
3761 struct kvm_signal_mask __user *sigmask_arg = argp;
3762 struct kvm_signal_mask kvm_sigmask;
3763 sigset_t sigset, *p;
3764
3765 p = NULL;
3766 if (argp) {
3767 r = -EFAULT;
3768 if (copy_from_user(&kvm_sigmask, argp,
3769 sizeof(kvm_sigmask)))
3770 goto out;
3771 r = -EINVAL;
3772 if (kvm_sigmask.len != sizeof(sigset))
3773 goto out;
3774 r = -EFAULT;
3775 if (copy_from_user(&sigset, sigmask_arg->sigset,
3776 sizeof(sigset)))
3777 goto out;
3778 p = &sigset;
3779 }
3780 r = kvm_vcpu_ioctl_set_sigmask(vcpu, p);
3781 break;
3782 }
3783 case KVM_GET_FPU: {
3784 fpu = kzalloc(sizeof(struct kvm_fpu), GFP_KERNEL_ACCOUNT);
3785 r = -ENOMEM;
3786 if (!fpu)
3787 goto out;
3788 r = kvm_arch_vcpu_ioctl_get_fpu(vcpu, fpu);
3789 if (r)
3790 goto out;
3791 r = -EFAULT;
3792 if (copy_to_user(argp, fpu, sizeof(struct kvm_fpu)))
3793 goto out;
3794 r = 0;
3795 break;
3796 }
3797 case KVM_SET_FPU: {
3798 fpu = memdup_user(argp, sizeof(*fpu));
3799 if (IS_ERR(fpu)) {
3800 r = PTR_ERR(fpu);
3801 fpu = NULL;
3802 goto out;
3803 }
3804 r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, fpu);
3805 break;
3806 }
3807 case KVM_GET_STATS_FD: {
3808 r = kvm_vcpu_ioctl_get_stats_fd(vcpu);
3809 break;
3810 }
3811 default:
3812 r = kvm_arch_vcpu_ioctl(filp, ioctl, arg);
3813 }
3814out:
3815 mutex_unlock(&vcpu->mutex);
3816 kfree(fpu);
3817 kfree(kvm_sregs);
3818 return r;
3819}
3820
3821#ifdef CONFIG_KVM_COMPAT
3822static long kvm_vcpu_compat_ioctl(struct file *filp,
3823 unsigned int ioctl, unsigned long arg)
3824{
3825 struct kvm_vcpu *vcpu = filp->private_data;
3826 void __user *argp = compat_ptr(arg);
3827 int r;
3828
3829 if (vcpu->kvm->mm != current->mm)
3830 return -EIO;
3831
3832 switch (ioctl) {
3833 case KVM_SET_SIGNAL_MASK: {
3834 struct kvm_signal_mask __user *sigmask_arg = argp;
3835 struct kvm_signal_mask kvm_sigmask;
3836 sigset_t sigset;
3837
3838 if (argp) {
3839 r = -EFAULT;
3840 if (copy_from_user(&kvm_sigmask, argp,
3841 sizeof(kvm_sigmask)))
3842 goto out;
3843 r = -EINVAL;
3844 if (kvm_sigmask.len != sizeof(compat_sigset_t))
3845 goto out;
3846 r = -EFAULT;
3847 if (get_compat_sigset(&sigset,
3848 (compat_sigset_t __user *)sigmask_arg->sigset))
3849 goto out;
3850 r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset);
3851 } else
3852 r = kvm_vcpu_ioctl_set_sigmask(vcpu, NULL);
3853 break;
3854 }
3855 default:
3856 r = kvm_vcpu_ioctl(filp, ioctl, arg);
3857 }
3858
3859out:
3860 return r;
3861}
3862#endif
3863
3864static int kvm_device_mmap(struct file *filp, struct vm_area_struct *vma)
3865{
3866 struct kvm_device *dev = filp->private_data;
3867
3868 if (dev->ops->mmap)
3869 return dev->ops->mmap(dev, vma);
3870
3871 return -ENODEV;
3872}
3873
3874static int kvm_device_ioctl_attr(struct kvm_device *dev,
3875 int (*accessor)(struct kvm_device *dev,
3876 struct kvm_device_attr *attr),
3877 unsigned long arg)
3878{
3879 struct kvm_device_attr attr;
3880
3881 if (!accessor)
3882 return -EPERM;
3883
3884 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
3885 return -EFAULT;
3886
3887 return accessor(dev, &attr);
3888}
3889
3890static long kvm_device_ioctl(struct file *filp, unsigned int ioctl,
3891 unsigned long arg)
3892{
3893 struct kvm_device *dev = filp->private_data;
3894
3895 if (dev->kvm->mm != current->mm)
3896 return -EIO;
3897
3898 switch (ioctl) {
3899 case KVM_SET_DEVICE_ATTR:
3900 return kvm_device_ioctl_attr(dev, dev->ops->set_attr, arg);
3901 case KVM_GET_DEVICE_ATTR:
3902 return kvm_device_ioctl_attr(dev, dev->ops->get_attr, arg);
3903 case KVM_HAS_DEVICE_ATTR:
3904 return kvm_device_ioctl_attr(dev, dev->ops->has_attr, arg);
3905 default:
3906 if (dev->ops->ioctl)
3907 return dev->ops->ioctl(dev, ioctl, arg);
3908
3909 return -ENOTTY;
3910 }
3911}
3912
3913static int kvm_device_release(struct inode *inode, struct file *filp)
3914{
3915 struct kvm_device *dev = filp->private_data;
3916 struct kvm *kvm = dev->kvm;
3917
3918 if (dev->ops->release) {
3919 mutex_lock(&kvm->lock);
3920 list_del(&dev->vm_node);
3921 dev->ops->release(dev);
3922 mutex_unlock(&kvm->lock);
3923 }
3924
3925 kvm_put_kvm(kvm);
3926 return 0;
3927}
3928
3929static const struct file_operations kvm_device_fops = {
3930 .unlocked_ioctl = kvm_device_ioctl,
3931 .release = kvm_device_release,
3932 KVM_COMPAT(kvm_device_ioctl),
3933 .mmap = kvm_device_mmap,
3934};
3935
3936struct kvm_device *kvm_device_from_filp(struct file *filp)
3937{
3938 if (filp->f_op != &kvm_device_fops)
3939 return NULL;
3940
3941 return filp->private_data;
3942}
3943
3944static const struct kvm_device_ops *kvm_device_ops_table[KVM_DEV_TYPE_MAX] = {
3945#ifdef CONFIG_KVM_MPIC
3946 [KVM_DEV_TYPE_FSL_MPIC_20] = &kvm_mpic_ops,
3947 [KVM_DEV_TYPE_FSL_MPIC_42] = &kvm_mpic_ops,
3948#endif
3949};
3950
3951int kvm_register_device_ops(const struct kvm_device_ops *ops, u32 type)
3952{
3953 if (type >= ARRAY_SIZE(kvm_device_ops_table))
3954 return -ENOSPC;
3955
3956 if (kvm_device_ops_table[type] != NULL)
3957 return -EEXIST;
3958
3959 kvm_device_ops_table[type] = ops;
3960 return 0;
3961}
3962
3963void kvm_unregister_device_ops(u32 type)
3964{
3965 if (kvm_device_ops_table[type] != NULL)
3966 kvm_device_ops_table[type] = NULL;
3967}
3968
3969static int kvm_ioctl_create_device(struct kvm *kvm,
3970 struct kvm_create_device *cd)
3971{
3972 const struct kvm_device_ops *ops = NULL;
3973 struct kvm_device *dev;
3974 bool test = cd->flags & KVM_CREATE_DEVICE_TEST;
3975 int type;
3976 int ret;
3977
3978 if (cd->type >= ARRAY_SIZE(kvm_device_ops_table))
3979 return -ENODEV;
3980
3981 type = array_index_nospec(cd->type, ARRAY_SIZE(kvm_device_ops_table));
3982 ops = kvm_device_ops_table[type];
3983 if (ops == NULL)
3984 return -ENODEV;
3985
3986 if (test)
3987 return 0;
3988
3989 dev = kzalloc(sizeof(*dev), GFP_KERNEL_ACCOUNT);
3990 if (!dev)
3991 return -ENOMEM;
3992
3993 dev->ops = ops;
3994 dev->kvm = kvm;
3995
3996 mutex_lock(&kvm->lock);
3997 ret = ops->create(dev, type);
3998 if (ret < 0) {
3999 mutex_unlock(&kvm->lock);
4000 kfree(dev);
4001 return ret;
4002 }
4003 list_add(&dev->vm_node, &kvm->devices);
4004 mutex_unlock(&kvm->lock);
4005
4006 if (ops->init)
4007 ops->init(dev);
4008
4009 kvm_get_kvm(kvm);
4010 ret = anon_inode_getfd(ops->name, &kvm_device_fops, dev, O_RDWR | O_CLOEXEC);
4011 if (ret < 0) {
4012 kvm_put_kvm_no_destroy(kvm);
4013 mutex_lock(&kvm->lock);
4014 list_del(&dev->vm_node);
4015 mutex_unlock(&kvm->lock);
4016 ops->destroy(dev);
4017 return ret;
4018 }
4019
4020 cd->fd = ret;
4021 return 0;
4022}
4023
4024static long kvm_vm_ioctl_check_extension_generic(struct kvm *kvm, long arg)
4025{
4026 switch (arg) {
4027 case KVM_CAP_USER_MEMORY:
4028 case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
4029 case KVM_CAP_JOIN_MEMORY_REGIONS_WORKS:
4030 case KVM_CAP_INTERNAL_ERROR_DATA:
4031#ifdef CONFIG_HAVE_KVM_MSI
4032 case KVM_CAP_SIGNAL_MSI:
4033#endif
4034#ifdef CONFIG_HAVE_KVM_IRQFD
4035 case KVM_CAP_IRQFD:
4036 case KVM_CAP_IRQFD_RESAMPLE:
4037#endif
4038 case KVM_CAP_IOEVENTFD_ANY_LENGTH:
4039 case KVM_CAP_CHECK_EXTENSION_VM:
4040 case KVM_CAP_ENABLE_CAP_VM:
4041 case KVM_CAP_HALT_POLL:
4042 return 1;
4043#ifdef CONFIG_KVM_MMIO
4044 case KVM_CAP_COALESCED_MMIO:
4045 return KVM_COALESCED_MMIO_PAGE_OFFSET;
4046 case KVM_CAP_COALESCED_PIO:
4047 return 1;
4048#endif
4049#ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
4050 case KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2:
4051 return KVM_DIRTY_LOG_MANUAL_CAPS;
4052#endif
4053#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
4054 case KVM_CAP_IRQ_ROUTING:
4055 return KVM_MAX_IRQ_ROUTES;
4056#endif
4057#if KVM_ADDRESS_SPACE_NUM > 1
4058 case KVM_CAP_MULTI_ADDRESS_SPACE:
4059 return KVM_ADDRESS_SPACE_NUM;
4060#endif
4061 case KVM_CAP_NR_MEMSLOTS:
4062 return KVM_USER_MEM_SLOTS;
4063 case KVM_CAP_DIRTY_LOG_RING:
4064#if KVM_DIRTY_LOG_PAGE_OFFSET > 0
4065 return KVM_DIRTY_RING_MAX_ENTRIES * sizeof(struct kvm_dirty_gfn);
4066#else
4067 return 0;
4068#endif
4069 case KVM_CAP_BINARY_STATS_FD:
4070 return 1;
4071 default:
4072 break;
4073 }
4074 return kvm_vm_ioctl_check_extension(kvm, arg);
4075}
4076
4077static int kvm_vm_ioctl_enable_dirty_log_ring(struct kvm *kvm, u32 size)
4078{
4079 int r;
4080
4081 if (!KVM_DIRTY_LOG_PAGE_OFFSET)
4082 return -EINVAL;
4083
4084 /* the size should be power of 2 */
4085 if (!size || (size & (size - 1)))
4086 return -EINVAL;
4087
4088 /* Should be bigger to keep the reserved entries, or a page */
4089 if (size < kvm_dirty_ring_get_rsvd_entries() *
4090 sizeof(struct kvm_dirty_gfn) || size < PAGE_SIZE)
4091 return -EINVAL;
4092
4093 if (size > KVM_DIRTY_RING_MAX_ENTRIES *
4094 sizeof(struct kvm_dirty_gfn))
4095 return -E2BIG;
4096
4097 /* We only allow it to set once */
4098 if (kvm->dirty_ring_size)
4099 return -EINVAL;
4100
4101 mutex_lock(&kvm->lock);
4102
4103 if (kvm->created_vcpus) {
4104 /* We don't allow to change this value after vcpu created */
4105 r = -EINVAL;
4106 } else {
4107 kvm->dirty_ring_size = size;
4108 r = 0;
4109 }
4110
4111 mutex_unlock(&kvm->lock);
4112 return r;
4113}
4114
4115static int kvm_vm_ioctl_reset_dirty_pages(struct kvm *kvm)
4116{
4117 int i;
4118 struct kvm_vcpu *vcpu;
4119 int cleared = 0;
4120
4121 if (!kvm->dirty_ring_size)
4122 return -EINVAL;
4123
4124 mutex_lock(&kvm->slots_lock);
4125
4126 kvm_for_each_vcpu(i, vcpu, kvm)
4127 cleared += kvm_dirty_ring_reset(vcpu->kvm, &vcpu->dirty_ring);
4128
4129 mutex_unlock(&kvm->slots_lock);
4130
4131 if (cleared)
4132 kvm_flush_remote_tlbs(kvm);
4133
4134 return cleared;
4135}
4136
4137int __attribute__((weak)) kvm_vm_ioctl_enable_cap(struct kvm *kvm,
4138 struct kvm_enable_cap *cap)
4139{
4140 return -EINVAL;
4141}
4142
4143static int kvm_vm_ioctl_enable_cap_generic(struct kvm *kvm,
4144 struct kvm_enable_cap *cap)
4145{
4146 switch (cap->cap) {
4147#ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
4148 case KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2: {
4149 u64 allowed_options = KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE;
4150
4151 if (cap->args[0] & KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE)
4152 allowed_options = KVM_DIRTY_LOG_MANUAL_CAPS;
4153
4154 if (cap->flags || (cap->args[0] & ~allowed_options))
4155 return -EINVAL;
4156 kvm->manual_dirty_log_protect = cap->args[0];
4157 return 0;
4158 }
4159#endif
4160 case KVM_CAP_HALT_POLL: {
4161 if (cap->flags || cap->args[0] != (unsigned int)cap->args[0])
4162 return -EINVAL;
4163
4164 kvm->max_halt_poll_ns = cap->args[0];
4165 return 0;
4166 }
4167 case KVM_CAP_DIRTY_LOG_RING:
4168 return kvm_vm_ioctl_enable_dirty_log_ring(kvm, cap->args[0]);
4169 default:
4170 return kvm_vm_ioctl_enable_cap(kvm, cap);
4171 }
4172}
4173
4174static ssize_t kvm_vm_stats_read(struct file *file, char __user *user_buffer,
4175 size_t size, loff_t *offset)
4176{
4177 struct kvm *kvm = file->private_data;
4178
4179 return kvm_stats_read(kvm->stats_id, &kvm_vm_stats_header,
4180 &kvm_vm_stats_desc[0], &kvm->stat,
4181 sizeof(kvm->stat), user_buffer, size, offset);
4182}
4183
4184static const struct file_operations kvm_vm_stats_fops = {
4185 .read = kvm_vm_stats_read,
4186 .llseek = noop_llseek,
4187};
4188
4189static int kvm_vm_ioctl_get_stats_fd(struct kvm *kvm)
4190{
4191 int fd;
4192 struct file *file;
4193
4194 fd = get_unused_fd_flags(O_CLOEXEC);
4195 if (fd < 0)
4196 return fd;
4197
4198 file = anon_inode_getfile("kvm-vm-stats",
4199 &kvm_vm_stats_fops, kvm, O_RDONLY);
4200 if (IS_ERR(file)) {
4201 put_unused_fd(fd);
4202 return PTR_ERR(file);
4203 }
4204 file->f_mode |= FMODE_PREAD;
4205 fd_install(fd, file);
4206
4207 return fd;
4208}
4209
4210static long kvm_vm_ioctl(struct file *filp,
4211 unsigned int ioctl, unsigned long arg)
4212{
4213 struct kvm *kvm = filp->private_data;
4214 void __user *argp = (void __user *)arg;
4215 int r;
4216
4217 if (kvm->mm != current->mm)
4218 return -EIO;
4219 switch (ioctl) {
4220 case KVM_CREATE_VCPU:
4221 r = kvm_vm_ioctl_create_vcpu(kvm, arg);
4222 break;
4223 case KVM_ENABLE_CAP: {
4224 struct kvm_enable_cap cap;
4225
4226 r = -EFAULT;
4227 if (copy_from_user(&cap, argp, sizeof(cap)))
4228 goto out;
4229 r = kvm_vm_ioctl_enable_cap_generic(kvm, &cap);
4230 break;
4231 }
4232 case KVM_SET_USER_MEMORY_REGION: {
4233 struct kvm_userspace_memory_region kvm_userspace_mem;
4234
4235 r = -EFAULT;
4236 if (copy_from_user(&kvm_userspace_mem, argp,
4237 sizeof(kvm_userspace_mem)))
4238 goto out;
4239
4240 r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem);
4241 break;
4242 }
4243 case KVM_GET_DIRTY_LOG: {
4244 struct kvm_dirty_log log;
4245
4246 r = -EFAULT;
4247 if (copy_from_user(&log, argp, sizeof(log)))
4248 goto out;
4249 r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
4250 break;
4251 }
4252#ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
4253 case KVM_CLEAR_DIRTY_LOG: {
4254 struct kvm_clear_dirty_log log;
4255
4256 r = -EFAULT;
4257 if (copy_from_user(&log, argp, sizeof(log)))
4258 goto out;
4259 r = kvm_vm_ioctl_clear_dirty_log(kvm, &log);
4260 break;
4261 }
4262#endif
4263#ifdef CONFIG_KVM_MMIO
4264 case KVM_REGISTER_COALESCED_MMIO: {
4265 struct kvm_coalesced_mmio_zone zone;
4266
4267 r = -EFAULT;
4268 if (copy_from_user(&zone, argp, sizeof(zone)))
4269 goto out;
4270 r = kvm_vm_ioctl_register_coalesced_mmio(kvm, &zone);
4271 break;
4272 }
4273 case KVM_UNREGISTER_COALESCED_MMIO: {
4274 struct kvm_coalesced_mmio_zone zone;
4275
4276 r = -EFAULT;
4277 if (copy_from_user(&zone, argp, sizeof(zone)))
4278 goto out;
4279 r = kvm_vm_ioctl_unregister_coalesced_mmio(kvm, &zone);
4280 break;
4281 }
4282#endif
4283 case KVM_IRQFD: {
4284 struct kvm_irqfd data;
4285
4286 r = -EFAULT;
4287 if (copy_from_user(&data, argp, sizeof(data)))
4288 goto out;
4289 r = kvm_irqfd(kvm, &data);
4290 break;
4291 }
4292 case KVM_IOEVENTFD: {
4293 struct kvm_ioeventfd data;
4294
4295 r = -EFAULT;
4296 if (copy_from_user(&data, argp, sizeof(data)))
4297 goto out;
4298 r = kvm_ioeventfd(kvm, &data);
4299 break;
4300 }
4301#ifdef CONFIG_HAVE_KVM_MSI
4302 case KVM_SIGNAL_MSI: {
4303 struct kvm_msi msi;
4304
4305 r = -EFAULT;
4306 if (copy_from_user(&msi, argp, sizeof(msi)))
4307 goto out;
4308 r = kvm_send_userspace_msi(kvm, &msi);
4309 break;
4310 }
4311#endif
4312#ifdef __KVM_HAVE_IRQ_LINE
4313 case KVM_IRQ_LINE_STATUS:
4314 case KVM_IRQ_LINE: {
4315 struct kvm_irq_level irq_event;
4316
4317 r = -EFAULT;
4318 if (copy_from_user(&irq_event, argp, sizeof(irq_event)))
4319 goto out;
4320
4321 r = kvm_vm_ioctl_irq_line(kvm, &irq_event,
4322 ioctl == KVM_IRQ_LINE_STATUS);
4323 if (r)
4324 goto out;
4325
4326 r = -EFAULT;
4327 if (ioctl == KVM_IRQ_LINE_STATUS) {
4328 if (copy_to_user(argp, &irq_event, sizeof(irq_event)))
4329 goto out;
4330 }
4331
4332 r = 0;
4333 break;
4334 }
4335#endif
4336#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
4337 case KVM_SET_GSI_ROUTING: {
4338 struct kvm_irq_routing routing;
4339 struct kvm_irq_routing __user *urouting;
4340 struct kvm_irq_routing_entry *entries = NULL;
4341
4342 r = -EFAULT;
4343 if (copy_from_user(&routing, argp, sizeof(routing)))
4344 goto out;
4345 r = -EINVAL;
4346 if (!kvm_arch_can_set_irq_routing(kvm))
4347 goto out;
4348 if (routing.nr > KVM_MAX_IRQ_ROUTES)
4349 goto out;
4350 if (routing.flags)
4351 goto out;
4352 if (routing.nr) {
4353 urouting = argp;
4354 entries = vmemdup_user(urouting->entries,
4355 array_size(sizeof(*entries),
4356 routing.nr));
4357 if (IS_ERR(entries)) {
4358 r = PTR_ERR(entries);
4359 goto out;
4360 }
4361 }
4362 r = kvm_set_irq_routing(kvm, entries, routing.nr,
4363 routing.flags);
4364 kvfree(entries);
4365 break;
4366 }
4367#endif /* CONFIG_HAVE_KVM_IRQ_ROUTING */
4368 case KVM_CREATE_DEVICE: {
4369 struct kvm_create_device cd;
4370
4371 r = -EFAULT;
4372 if (copy_from_user(&cd, argp, sizeof(cd)))
4373 goto out;
4374
4375 r = kvm_ioctl_create_device(kvm, &cd);
4376 if (r)
4377 goto out;
4378
4379 r = -EFAULT;
4380 if (copy_to_user(argp, &cd, sizeof(cd)))
4381 goto out;
4382
4383 r = 0;
4384 break;
4385 }
4386 case KVM_CHECK_EXTENSION:
4387 r = kvm_vm_ioctl_check_extension_generic(kvm, arg);
4388 break;
4389 case KVM_RESET_DIRTY_RINGS:
4390 r = kvm_vm_ioctl_reset_dirty_pages(kvm);
4391 break;
4392 case KVM_GET_STATS_FD:
4393 r = kvm_vm_ioctl_get_stats_fd(kvm);
4394 break;
4395 default:
4396 r = kvm_arch_vm_ioctl(filp, ioctl, arg);
4397 }
4398out:
4399 return r;
4400}
4401
4402#ifdef CONFIG_KVM_COMPAT
4403struct compat_kvm_dirty_log {
4404 __u32 slot;
4405 __u32 padding1;
4406 union {
4407 compat_uptr_t dirty_bitmap; /* one bit per page */
4408 __u64 padding2;
4409 };
4410};
4411
4412struct compat_kvm_clear_dirty_log {
4413 __u32 slot;
4414 __u32 num_pages;
4415 __u64 first_page;
4416 union {
4417 compat_uptr_t dirty_bitmap; /* one bit per page */
4418 __u64 padding2;
4419 };
4420};
4421
4422static long kvm_vm_compat_ioctl(struct file *filp,
4423 unsigned int ioctl, unsigned long arg)
4424{
4425 struct kvm *kvm = filp->private_data;
4426 int r;
4427
4428 if (kvm->mm != current->mm)
4429 return -EIO;
4430 switch (ioctl) {
4431#ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
4432 case KVM_CLEAR_DIRTY_LOG: {
4433 struct compat_kvm_clear_dirty_log compat_log;
4434 struct kvm_clear_dirty_log log;
4435
4436 if (copy_from_user(&compat_log, (void __user *)arg,
4437 sizeof(compat_log)))
4438 return -EFAULT;
4439 log.slot = compat_log.slot;
4440 log.num_pages = compat_log.num_pages;
4441 log.first_page = compat_log.first_page;
4442 log.padding2 = compat_log.padding2;
4443 log.dirty_bitmap = compat_ptr(compat_log.dirty_bitmap);
4444
4445 r = kvm_vm_ioctl_clear_dirty_log(kvm, &log);
4446 break;
4447 }
4448#endif
4449 case KVM_GET_DIRTY_LOG: {
4450 struct compat_kvm_dirty_log compat_log;
4451 struct kvm_dirty_log log;
4452
4453 if (copy_from_user(&compat_log, (void __user *)arg,
4454 sizeof(compat_log)))
4455 return -EFAULT;
4456 log.slot = compat_log.slot;
4457 log.padding1 = compat_log.padding1;
4458 log.padding2 = compat_log.padding2;
4459 log.dirty_bitmap = compat_ptr(compat_log.dirty_bitmap);
4460
4461 r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
4462 break;
4463 }
4464 default:
4465 r = kvm_vm_ioctl(filp, ioctl, arg);
4466 }
4467 return r;
4468}
4469#endif
4470
4471static struct file_operations kvm_vm_fops = {
4472 .release = kvm_vm_release,
4473 .unlocked_ioctl = kvm_vm_ioctl,
4474 .llseek = noop_llseek,
4475 KVM_COMPAT(kvm_vm_compat_ioctl),
4476};
4477
4478bool file_is_kvm(struct file *file)
4479{
4480 return file && file->f_op == &kvm_vm_fops;
4481}
4482EXPORT_SYMBOL_GPL(file_is_kvm);
4483
4484static int kvm_dev_ioctl_create_vm(unsigned long type)
4485{
4486 int r;
4487 struct kvm *kvm;
4488 struct file *file;
4489
4490 kvm = kvm_create_vm(type);
4491 if (IS_ERR(kvm))
4492 return PTR_ERR(kvm);
4493#ifdef CONFIG_KVM_MMIO
4494 r = kvm_coalesced_mmio_init(kvm);
4495 if (r < 0)
4496 goto put_kvm;
4497#endif
4498 r = get_unused_fd_flags(O_CLOEXEC);
4499 if (r < 0)
4500 goto put_kvm;
4501
4502 snprintf(kvm->stats_id, sizeof(kvm->stats_id),
4503 "kvm-%d", task_pid_nr(current));
4504
4505 file = anon_inode_getfile("kvm-vm", &kvm_vm_fops, kvm, O_RDWR);
4506 if (IS_ERR(file)) {
4507 put_unused_fd(r);
4508 r = PTR_ERR(file);
4509 goto put_kvm;
4510 }
4511
4512 /*
4513 * Don't call kvm_put_kvm anymore at this point; file->f_op is
4514 * already set, with ->release() being kvm_vm_release(). In error
4515 * cases it will be called by the final fput(file) and will take
4516 * care of doing kvm_put_kvm(kvm).
4517 */
4518 if (kvm_create_vm_debugfs(kvm, r) < 0) {
4519 put_unused_fd(r);
4520 fput(file);
4521 return -ENOMEM;
4522 }
4523 kvm_uevent_notify_change(KVM_EVENT_CREATE_VM, kvm);
4524
4525 fd_install(r, file);
4526 return r;
4527
4528put_kvm:
4529 kvm_put_kvm(kvm);
4530 return r;
4531}
4532
4533static long kvm_dev_ioctl(struct file *filp,
4534 unsigned int ioctl, unsigned long arg)
4535{
4536 long r = -EINVAL;
4537
4538 switch (ioctl) {
4539 case KVM_GET_API_VERSION:
4540 if (arg)
4541 goto out;
4542 r = KVM_API_VERSION;
4543 break;
4544 case KVM_CREATE_VM:
4545 r = kvm_dev_ioctl_create_vm(arg);
4546 break;
4547 case KVM_CHECK_EXTENSION:
4548 r = kvm_vm_ioctl_check_extension_generic(NULL, arg);
4549 break;
4550 case KVM_GET_VCPU_MMAP_SIZE:
4551 if (arg)
4552 goto out;
4553 r = PAGE_SIZE; /* struct kvm_run */
4554#ifdef CONFIG_X86
4555 r += PAGE_SIZE; /* pio data page */
4556#endif
4557#ifdef CONFIG_KVM_MMIO
4558 r += PAGE_SIZE; /* coalesced mmio ring page */
4559#endif
4560 break;
4561 case KVM_TRACE_ENABLE:
4562 case KVM_TRACE_PAUSE:
4563 case KVM_TRACE_DISABLE:
4564 r = -EOPNOTSUPP;
4565 break;
4566 default:
4567 return kvm_arch_dev_ioctl(filp, ioctl, arg);
4568 }
4569out:
4570 return r;
4571}
4572
4573static struct file_operations kvm_chardev_ops = {
4574 .unlocked_ioctl = kvm_dev_ioctl,
4575 .llseek = noop_llseek,
4576 KVM_COMPAT(kvm_dev_ioctl),
4577};
4578
4579static struct miscdevice kvm_dev = {
4580 KVM_MINOR,
4581 "kvm",
4582 &kvm_chardev_ops,
4583};
4584
4585static void hardware_enable_nolock(void *junk)
4586{
4587 int cpu = raw_smp_processor_id();
4588 int r;
4589
4590 if (cpumask_test_cpu(cpu, cpus_hardware_enabled))
4591 return;
4592
4593 cpumask_set_cpu(cpu, cpus_hardware_enabled);
4594
4595 r = kvm_arch_hardware_enable();
4596
4597 if (r) {
4598 cpumask_clear_cpu(cpu, cpus_hardware_enabled);
4599 atomic_inc(&hardware_enable_failed);
4600 pr_info("kvm: enabling virtualization on CPU%d failed\n", cpu);
4601 }
4602}
4603
4604static int kvm_starting_cpu(unsigned int cpu)
4605{
4606 raw_spin_lock(&kvm_count_lock);
4607 if (kvm_usage_count)
4608 hardware_enable_nolock(NULL);
4609 raw_spin_unlock(&kvm_count_lock);
4610 return 0;
4611}
4612
4613static void hardware_disable_nolock(void *junk)
4614{
4615 int cpu = raw_smp_processor_id();
4616
4617 if (!cpumask_test_cpu(cpu, cpus_hardware_enabled))
4618 return;
4619 cpumask_clear_cpu(cpu, cpus_hardware_enabled);
4620 kvm_arch_hardware_disable();
4621}
4622
4623static int kvm_dying_cpu(unsigned int cpu)
4624{
4625 raw_spin_lock(&kvm_count_lock);
4626 if (kvm_usage_count)
4627 hardware_disable_nolock(NULL);
4628 raw_spin_unlock(&kvm_count_lock);
4629 return 0;
4630}
4631
4632static void hardware_disable_all_nolock(void)
4633{
4634 BUG_ON(!kvm_usage_count);
4635
4636 kvm_usage_count--;
4637 if (!kvm_usage_count)
4638 on_each_cpu(hardware_disable_nolock, NULL, 1);
4639}
4640
4641static void hardware_disable_all(void)
4642{
4643 raw_spin_lock(&kvm_count_lock);
4644 hardware_disable_all_nolock();
4645 raw_spin_unlock(&kvm_count_lock);
4646}
4647
4648static int hardware_enable_all(void)
4649{
4650 int r = 0;
4651
4652 raw_spin_lock(&kvm_count_lock);
4653
4654 kvm_usage_count++;
4655 if (kvm_usage_count == 1) {
4656 atomic_set(&hardware_enable_failed, 0);
4657 on_each_cpu(hardware_enable_nolock, NULL, 1);
4658
4659 if (atomic_read(&hardware_enable_failed)) {
4660 hardware_disable_all_nolock();
4661 r = -EBUSY;
4662 }
4663 }
4664
4665 raw_spin_unlock(&kvm_count_lock);
4666
4667 return r;
4668}
4669
4670static int kvm_reboot(struct notifier_block *notifier, unsigned long val,
4671 void *v)
4672{
4673 /*
4674 * Some (well, at least mine) BIOSes hang on reboot if
4675 * in vmx root mode.
4676 *
4677 * And Intel TXT required VMX off for all cpu when system shutdown.
4678 */
4679 pr_info("kvm: exiting hardware virtualization\n");
4680 kvm_rebooting = true;
4681 on_each_cpu(hardware_disable_nolock, NULL, 1);
4682 return NOTIFY_OK;
4683}
4684
4685static struct notifier_block kvm_reboot_notifier = {
4686 .notifier_call = kvm_reboot,
4687 .priority = 0,
4688};
4689
4690static void kvm_io_bus_destroy(struct kvm_io_bus *bus)
4691{
4692 int i;
4693
4694 for (i = 0; i < bus->dev_count; i++) {
4695 struct kvm_io_device *pos = bus->range[i].dev;
4696
4697 kvm_iodevice_destructor(pos);
4698 }
4699 kfree(bus);
4700}
4701
4702static inline int kvm_io_bus_cmp(const struct kvm_io_range *r1,
4703 const struct kvm_io_range *r2)
4704{
4705 gpa_t addr1 = r1->addr;
4706 gpa_t addr2 = r2->addr;
4707
4708 if (addr1 < addr2)
4709 return -1;
4710
4711 /* If r2->len == 0, match the exact address. If r2->len != 0,
4712 * accept any overlapping write. Any order is acceptable for
4713 * overlapping ranges, because kvm_io_bus_get_first_dev ensures
4714 * we process all of them.
4715 */
4716 if (r2->len) {
4717 addr1 += r1->len;
4718 addr2 += r2->len;
4719 }
4720
4721 if (addr1 > addr2)
4722 return 1;
4723
4724 return 0;
4725}
4726
4727static int kvm_io_bus_sort_cmp(const void *p1, const void *p2)
4728{
4729 return kvm_io_bus_cmp(p1, p2);
4730}
4731
4732static int kvm_io_bus_get_first_dev(struct kvm_io_bus *bus,
4733 gpa_t addr, int len)
4734{
4735 struct kvm_io_range *range, key;
4736 int off;
4737
4738 key = (struct kvm_io_range) {
4739 .addr = addr,
4740 .len = len,
4741 };
4742
4743 range = bsearch(&key, bus->range, bus->dev_count,
4744 sizeof(struct kvm_io_range), kvm_io_bus_sort_cmp);
4745 if (range == NULL)
4746 return -ENOENT;
4747
4748 off = range - bus->range;
4749
4750 while (off > 0 && kvm_io_bus_cmp(&key, &bus->range[off-1]) == 0)
4751 off--;
4752
4753 return off;
4754}
4755
4756static int __kvm_io_bus_write(struct kvm_vcpu *vcpu, struct kvm_io_bus *bus,
4757 struct kvm_io_range *range, const void *val)
4758{
4759 int idx;
4760
4761 idx = kvm_io_bus_get_first_dev(bus, range->addr, range->len);
4762 if (idx < 0)
4763 return -EOPNOTSUPP;
4764
4765 while (idx < bus->dev_count &&
4766 kvm_io_bus_cmp(range, &bus->range[idx]) == 0) {
4767 if (!kvm_iodevice_write(vcpu, bus->range[idx].dev, range->addr,
4768 range->len, val))
4769 return idx;
4770 idx++;
4771 }
4772
4773 return -EOPNOTSUPP;
4774}
4775
4776/* kvm_io_bus_write - called under kvm->slots_lock */
4777int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
4778 int len, const void *val)
4779{
4780 struct kvm_io_bus *bus;
4781 struct kvm_io_range range;
4782 int r;
4783
4784 range = (struct kvm_io_range) {
4785 .addr = addr,
4786 .len = len,
4787 };
4788
4789 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
4790 if (!bus)
4791 return -ENOMEM;
4792 r = __kvm_io_bus_write(vcpu, bus, &range, val);
4793 return r < 0 ? r : 0;
4794}
4795EXPORT_SYMBOL_GPL(kvm_io_bus_write);
4796
4797/* kvm_io_bus_write_cookie - called under kvm->slots_lock */
4798int kvm_io_bus_write_cookie(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx,
4799 gpa_t addr, int len, const void *val, long cookie)
4800{
4801 struct kvm_io_bus *bus;
4802 struct kvm_io_range range;
4803
4804 range = (struct kvm_io_range) {
4805 .addr = addr,
4806 .len = len,
4807 };
4808
4809 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
4810 if (!bus)
4811 return -ENOMEM;
4812
4813 /* First try the device referenced by cookie. */
4814 if ((cookie >= 0) && (cookie < bus->dev_count) &&
4815 (kvm_io_bus_cmp(&range, &bus->range[cookie]) == 0))
4816 if (!kvm_iodevice_write(vcpu, bus->range[cookie].dev, addr, len,
4817 val))
4818 return cookie;
4819
4820 /*
4821 * cookie contained garbage; fall back to search and return the
4822 * correct cookie value.
4823 */
4824 return __kvm_io_bus_write(vcpu, bus, &range, val);
4825}
4826
4827static int __kvm_io_bus_read(struct kvm_vcpu *vcpu, struct kvm_io_bus *bus,
4828 struct kvm_io_range *range, void *val)
4829{
4830 int idx;
4831
4832 idx = kvm_io_bus_get_first_dev(bus, range->addr, range->len);
4833 if (idx < 0)
4834 return -EOPNOTSUPP;
4835
4836 while (idx < bus->dev_count &&
4837 kvm_io_bus_cmp(range, &bus->range[idx]) == 0) {
4838 if (!kvm_iodevice_read(vcpu, bus->range[idx].dev, range->addr,
4839 range->len, val))
4840 return idx;
4841 idx++;
4842 }
4843
4844 return -EOPNOTSUPP;
4845}
4846
4847/* kvm_io_bus_read - called under kvm->slots_lock */
4848int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
4849 int len, void *val)
4850{
4851 struct kvm_io_bus *bus;
4852 struct kvm_io_range range;
4853 int r;
4854
4855 range = (struct kvm_io_range) {
4856 .addr = addr,
4857 .len = len,
4858 };
4859
4860 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
4861 if (!bus)
4862 return -ENOMEM;
4863 r = __kvm_io_bus_read(vcpu, bus, &range, val);
4864 return r < 0 ? r : 0;
4865}
4866
4867/* Caller must hold slots_lock. */
4868int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
4869 int len, struct kvm_io_device *dev)
4870{
4871 int i;
4872 struct kvm_io_bus *new_bus, *bus;
4873 struct kvm_io_range range;
4874
4875 bus = kvm_get_bus(kvm, bus_idx);
4876 if (!bus)
4877 return -ENOMEM;
4878
4879 /* exclude ioeventfd which is limited by maximum fd */
4880 if (bus->dev_count - bus->ioeventfd_count > NR_IOBUS_DEVS - 1)
4881 return -ENOSPC;
4882
4883 new_bus = kmalloc(struct_size(bus, range, bus->dev_count + 1),
4884 GFP_KERNEL_ACCOUNT);
4885 if (!new_bus)
4886 return -ENOMEM;
4887
4888 range = (struct kvm_io_range) {
4889 .addr = addr,
4890 .len = len,
4891 .dev = dev,
4892 };
4893
4894 for (i = 0; i < bus->dev_count; i++)
4895 if (kvm_io_bus_cmp(&bus->range[i], &range) > 0)
4896 break;
4897
4898 memcpy(new_bus, bus, sizeof(*bus) + i * sizeof(struct kvm_io_range));
4899 new_bus->dev_count++;
4900 new_bus->range[i] = range;
4901 memcpy(new_bus->range + i + 1, bus->range + i,
4902 (bus->dev_count - i) * sizeof(struct kvm_io_range));
4903 rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
4904 synchronize_srcu_expedited(&kvm->srcu);
4905 kfree(bus);
4906
4907 return 0;
4908}
4909
4910int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
4911 struct kvm_io_device *dev)
4912{
4913 int i, j;
4914 struct kvm_io_bus *new_bus, *bus;
4915
4916 lockdep_assert_held(&kvm->slots_lock);
4917
4918 bus = kvm_get_bus(kvm, bus_idx);
4919 if (!bus)
4920 return 0;
4921
4922 for (i = 0; i < bus->dev_count; i++) {
4923 if (bus->range[i].dev == dev) {
4924 break;
4925 }
4926 }
4927
4928 if (i == bus->dev_count)
4929 return 0;
4930
4931 new_bus = kmalloc(struct_size(bus, range, bus->dev_count - 1),
4932 GFP_KERNEL_ACCOUNT);
4933 if (new_bus) {
4934 memcpy(new_bus, bus, struct_size(bus, range, i));
4935 new_bus->dev_count--;
4936 memcpy(new_bus->range + i, bus->range + i + 1,
4937 flex_array_size(new_bus, range, new_bus->dev_count - i));
4938 }
4939
4940 rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
4941 synchronize_srcu_expedited(&kvm->srcu);
4942
4943 /* Destroy the old bus _after_ installing the (null) bus. */
4944 if (!new_bus) {
4945 pr_err("kvm: failed to shrink bus, removing it completely\n");
4946 for (j = 0; j < bus->dev_count; j++) {
4947 if (j == i)
4948 continue;
4949 kvm_iodevice_destructor(bus->range[j].dev);
4950 }
4951 }
4952
4953 kfree(bus);
4954 return new_bus ? 0 : -ENOMEM;
4955}
4956
4957struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx,
4958 gpa_t addr)
4959{
4960 struct kvm_io_bus *bus;
4961 int dev_idx, srcu_idx;
4962 struct kvm_io_device *iodev = NULL;
4963
4964 srcu_idx = srcu_read_lock(&kvm->srcu);
4965
4966 bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu);
4967 if (!bus)
4968 goto out_unlock;
4969
4970 dev_idx = kvm_io_bus_get_first_dev(bus, addr, 1);
4971 if (dev_idx < 0)
4972 goto out_unlock;
4973
4974 iodev = bus->range[dev_idx].dev;
4975
4976out_unlock:
4977 srcu_read_unlock(&kvm->srcu, srcu_idx);
4978
4979 return iodev;
4980}
4981EXPORT_SYMBOL_GPL(kvm_io_bus_get_dev);
4982
4983static int kvm_debugfs_open(struct inode *inode, struct file *file,
4984 int (*get)(void *, u64 *), int (*set)(void *, u64),
4985 const char *fmt)
4986{
4987 struct kvm_stat_data *stat_data = (struct kvm_stat_data *)
4988 inode->i_private;
4989
4990 /* The debugfs files are a reference to the kvm struct which
4991 * is still valid when kvm_destroy_vm is called.
4992 * To avoid the race between open and the removal of the debugfs
4993 * directory we test against the users count.
4994 */
4995 if (!refcount_inc_not_zero(&stat_data->kvm->users_count))
4996 return -ENOENT;
4997
4998 if (simple_attr_open(inode, file, get,
4999 kvm_stats_debugfs_mode(stat_data->desc) & 0222
5000 ? set : NULL,
5001 fmt)) {
5002 kvm_put_kvm(stat_data->kvm);
5003 return -ENOMEM;
5004 }
5005
5006 return 0;
5007}
5008
5009static int kvm_debugfs_release(struct inode *inode, struct file *file)
5010{
5011 struct kvm_stat_data *stat_data = (struct kvm_stat_data *)
5012 inode->i_private;
5013
5014 simple_attr_release(inode, file);
5015 kvm_put_kvm(stat_data->kvm);
5016
5017 return 0;
5018}
5019
5020static int kvm_get_stat_per_vm(struct kvm *kvm, size_t offset, u64 *val)
5021{
5022 *val = *(u64 *)((void *)(&kvm->stat) + offset);
5023
5024 return 0;
5025}
5026
5027static int kvm_clear_stat_per_vm(struct kvm *kvm, size_t offset)
5028{
5029 *(u64 *)((void *)(&kvm->stat) + offset) = 0;
5030
5031 return 0;
5032}
5033
5034static int kvm_get_stat_per_vcpu(struct kvm *kvm, size_t offset, u64 *val)
5035{
5036 int i;
5037 struct kvm_vcpu *vcpu;
5038
5039 *val = 0;
5040
5041 kvm_for_each_vcpu(i, vcpu, kvm)
5042 *val += *(u64 *)((void *)(&vcpu->stat) + offset);
5043
5044 return 0;
5045}
5046
5047static int kvm_clear_stat_per_vcpu(struct kvm *kvm, size_t offset)
5048{
5049 int i;
5050 struct kvm_vcpu *vcpu;
5051
5052 kvm_for_each_vcpu(i, vcpu, kvm)
5053 *(u64 *)((void *)(&vcpu->stat) + offset) = 0;
5054
5055 return 0;
5056}
5057
5058static int kvm_stat_data_get(void *data, u64 *val)
5059{
5060 int r = -EFAULT;
5061 struct kvm_stat_data *stat_data = (struct kvm_stat_data *)data;
5062
5063 switch (stat_data->kind) {
5064 case KVM_STAT_VM:
5065 r = kvm_get_stat_per_vm(stat_data->kvm,
5066 stat_data->desc->desc.offset, val);
5067 break;
5068 case KVM_STAT_VCPU:
5069 r = kvm_get_stat_per_vcpu(stat_data->kvm,
5070 stat_data->desc->desc.offset, val);
5071 break;
5072 }
5073
5074 return r;
5075}
5076
5077static int kvm_stat_data_clear(void *data, u64 val)
5078{
5079 int r = -EFAULT;
5080 struct kvm_stat_data *stat_data = (struct kvm_stat_data *)data;
5081
5082 if (val)
5083 return -EINVAL;
5084
5085 switch (stat_data->kind) {
5086 case KVM_STAT_VM:
5087 r = kvm_clear_stat_per_vm(stat_data->kvm,
5088 stat_data->desc->desc.offset);
5089 break;
5090 case KVM_STAT_VCPU:
5091 r = kvm_clear_stat_per_vcpu(stat_data->kvm,
5092 stat_data->desc->desc.offset);
5093 break;
5094 }
5095
5096 return r;
5097}
5098
5099static int kvm_stat_data_open(struct inode *inode, struct file *file)
5100{
5101 __simple_attr_check_format("%llu\n", 0ull);
5102 return kvm_debugfs_open(inode, file, kvm_stat_data_get,
5103 kvm_stat_data_clear, "%llu\n");
5104}
5105
5106static const struct file_operations stat_fops_per_vm = {
5107 .owner = THIS_MODULE,
5108 .open = kvm_stat_data_open,
5109 .release = kvm_debugfs_release,
5110 .read = simple_attr_read,
5111 .write = simple_attr_write,
5112 .llseek = no_llseek,
5113};
5114
5115static int vm_stat_get(void *_offset, u64 *val)
5116{
5117 unsigned offset = (long)_offset;
5118 struct kvm *kvm;
5119 u64 tmp_val;
5120
5121 *val = 0;
5122 mutex_lock(&kvm_lock);
5123 list_for_each_entry(kvm, &vm_list, vm_list) {
5124 kvm_get_stat_per_vm(kvm, offset, &tmp_val);
5125 *val += tmp_val;
5126 }
5127 mutex_unlock(&kvm_lock);
5128 return 0;
5129}
5130
5131static int vm_stat_clear(void *_offset, u64 val)
5132{
5133 unsigned offset = (long)_offset;
5134 struct kvm *kvm;
5135
5136 if (val)
5137 return -EINVAL;
5138
5139 mutex_lock(&kvm_lock);
5140 list_for_each_entry(kvm, &vm_list, vm_list) {
5141 kvm_clear_stat_per_vm(kvm, offset);
5142 }
5143 mutex_unlock(&kvm_lock);
5144
5145 return 0;
5146}
5147
5148DEFINE_SIMPLE_ATTRIBUTE(vm_stat_fops, vm_stat_get, vm_stat_clear, "%llu\n");
5149DEFINE_SIMPLE_ATTRIBUTE(vm_stat_readonly_fops, vm_stat_get, NULL, "%llu\n");
5150
5151static int vcpu_stat_get(void *_offset, u64 *val)
5152{
5153 unsigned offset = (long)_offset;
5154 struct kvm *kvm;
5155 u64 tmp_val;
5156
5157 *val = 0;
5158 mutex_lock(&kvm_lock);
5159 list_for_each_entry(kvm, &vm_list, vm_list) {
5160 kvm_get_stat_per_vcpu(kvm, offset, &tmp_val);
5161 *val += tmp_val;
5162 }
5163 mutex_unlock(&kvm_lock);
5164 return 0;
5165}
5166
5167static int vcpu_stat_clear(void *_offset, u64 val)
5168{
5169 unsigned offset = (long)_offset;
5170 struct kvm *kvm;
5171
5172 if (val)
5173 return -EINVAL;
5174
5175 mutex_lock(&kvm_lock);
5176 list_for_each_entry(kvm, &vm_list, vm_list) {
5177 kvm_clear_stat_per_vcpu(kvm, offset);
5178 }
5179 mutex_unlock(&kvm_lock);
5180
5181 return 0;
5182}
5183
5184DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops, vcpu_stat_get, vcpu_stat_clear,
5185 "%llu\n");
5186DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_readonly_fops, vcpu_stat_get, NULL, "%llu\n");
5187
5188static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm)
5189{
5190 struct kobj_uevent_env *env;
5191 unsigned long long created, active;
5192
5193 if (!kvm_dev.this_device || !kvm)
5194 return;
5195
5196 mutex_lock(&kvm_lock);
5197 if (type == KVM_EVENT_CREATE_VM) {
5198 kvm_createvm_count++;
5199 kvm_active_vms++;
5200 } else if (type == KVM_EVENT_DESTROY_VM) {
5201 kvm_active_vms--;
5202 }
5203 created = kvm_createvm_count;
5204 active = kvm_active_vms;
5205 mutex_unlock(&kvm_lock);
5206
5207 env = kzalloc(sizeof(*env), GFP_KERNEL_ACCOUNT);
5208 if (!env)
5209 return;
5210
5211 add_uevent_var(env, "CREATED=%llu", created);
5212 add_uevent_var(env, "COUNT=%llu", active);
5213
5214 if (type == KVM_EVENT_CREATE_VM) {
5215 add_uevent_var(env, "EVENT=create");
5216 kvm->userspace_pid = task_pid_nr(current);
5217 } else if (type == KVM_EVENT_DESTROY_VM) {
5218 add_uevent_var(env, "EVENT=destroy");
5219 }
5220 add_uevent_var(env, "PID=%d", kvm->userspace_pid);
5221
5222 if (kvm->debugfs_dentry) {
5223 char *tmp, *p = kmalloc(PATH_MAX, GFP_KERNEL_ACCOUNT);
5224
5225 if (p) {
5226 tmp = dentry_path_raw(kvm->debugfs_dentry, p, PATH_MAX);
5227 if (!IS_ERR(tmp))
5228 add_uevent_var(env, "STATS_PATH=%s", tmp);
5229 kfree(p);
5230 }
5231 }
5232 /* no need for checks, since we are adding at most only 5 keys */
5233 env->envp[env->envp_idx++] = NULL;
5234 kobject_uevent_env(&kvm_dev.this_device->kobj, KOBJ_CHANGE, env->envp);
5235 kfree(env);
5236}
5237
5238static void kvm_init_debug(void)
5239{
5240 const struct file_operations *fops;
5241 const struct _kvm_stats_desc *pdesc;
5242 int i;
5243
5244 kvm_debugfs_dir = debugfs_create_dir("kvm", NULL);
5245
5246 for (i = 0; i < kvm_vm_stats_header.num_desc; ++i) {
5247 pdesc = &kvm_vm_stats_desc[i];
5248 if (kvm_stats_debugfs_mode(pdesc) & 0222)
5249 fops = &vm_stat_fops;
5250 else
5251 fops = &vm_stat_readonly_fops;
5252 debugfs_create_file(pdesc->name, kvm_stats_debugfs_mode(pdesc),
5253 kvm_debugfs_dir,
5254 (void *)(long)pdesc->desc.offset, fops);
5255 }
5256
5257 for (i = 0; i < kvm_vcpu_stats_header.num_desc; ++i) {
5258 pdesc = &kvm_vcpu_stats_desc[i];
5259 if (kvm_stats_debugfs_mode(pdesc) & 0222)
5260 fops = &vcpu_stat_fops;
5261 else
5262 fops = &vcpu_stat_readonly_fops;
5263 debugfs_create_file(pdesc->name, kvm_stats_debugfs_mode(pdesc),
5264 kvm_debugfs_dir,
5265 (void *)(long)pdesc->desc.offset, fops);
5266 }
5267}
5268
5269static int kvm_suspend(void)
5270{
5271 if (kvm_usage_count)
5272 hardware_disable_nolock(NULL);
5273 return 0;
5274}
5275
5276static void kvm_resume(void)
5277{
5278 if (kvm_usage_count) {
5279#ifdef CONFIG_LOCKDEP
5280 WARN_ON(lockdep_is_held(&kvm_count_lock));
5281#endif
5282 hardware_enable_nolock(NULL);
5283 }
5284}
5285
5286static struct syscore_ops kvm_syscore_ops = {
5287 .suspend = kvm_suspend,
5288 .resume = kvm_resume,
5289};
5290
5291static inline
5292struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn)
5293{
5294 return container_of(pn, struct kvm_vcpu, preempt_notifier);
5295}
5296
5297static void kvm_sched_in(struct preempt_notifier *pn, int cpu)
5298{
5299 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
5300
5301 WRITE_ONCE(vcpu->preempted, false);
5302 WRITE_ONCE(vcpu->ready, false);
5303
5304 __this_cpu_write(kvm_running_vcpu, vcpu);
5305 kvm_arch_sched_in(vcpu, cpu);
5306 kvm_arch_vcpu_load(vcpu, cpu);
5307}
5308
5309static void kvm_sched_out(struct preempt_notifier *pn,
5310 struct task_struct *next)
5311{
5312 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
5313
5314 if (current->on_rq) {
5315 WRITE_ONCE(vcpu->preempted, true);
5316 WRITE_ONCE(vcpu->ready, true);
5317 }
5318 kvm_arch_vcpu_put(vcpu);
5319 __this_cpu_write(kvm_running_vcpu, NULL);
5320}
5321
5322/**
5323 * kvm_get_running_vcpu - get the vcpu running on the current CPU.
5324 *
5325 * We can disable preemption locally around accessing the per-CPU variable,
5326 * and use the resolved vcpu pointer after enabling preemption again,
5327 * because even if the current thread is migrated to another CPU, reading
5328 * the per-CPU value later will give us the same value as we update the
5329 * per-CPU variable in the preempt notifier handlers.
5330 */
5331struct kvm_vcpu *kvm_get_running_vcpu(void)
5332{
5333 struct kvm_vcpu *vcpu;
5334
5335 preempt_disable();
5336 vcpu = __this_cpu_read(kvm_running_vcpu);
5337 preempt_enable();
5338
5339 return vcpu;
5340}
5341EXPORT_SYMBOL_GPL(kvm_get_running_vcpu);
5342
5343/**
5344 * kvm_get_running_vcpus - get the per-CPU array of currently running vcpus.
5345 */
5346struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void)
5347{
5348 return &kvm_running_vcpu;
5349}
5350
5351struct kvm_cpu_compat_check {
5352 void *opaque;
5353 int *ret;
5354};
5355
5356static void check_processor_compat(void *data)
5357{
5358 struct kvm_cpu_compat_check *c = data;
5359
5360 *c->ret = kvm_arch_check_processor_compat(c->opaque);
5361}
5362
5363int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
5364 struct module *module)
5365{
5366 struct kvm_cpu_compat_check c;
5367 int r;
5368 int cpu;
5369
5370 r = kvm_arch_init(opaque);
5371 if (r)
5372 goto out_fail;
5373
5374 /*
5375 * kvm_arch_init makes sure there's at most one caller
5376 * for architectures that support multiple implementations,
5377 * like intel and amd on x86.
5378 * kvm_arch_init must be called before kvm_irqfd_init to avoid creating
5379 * conflicts in case kvm is already setup for another implementation.
5380 */
5381 r = kvm_irqfd_init();
5382 if (r)
5383 goto out_irqfd;
5384
5385 if (!zalloc_cpumask_var(&cpus_hardware_enabled, GFP_KERNEL)) {
5386 r = -ENOMEM;
5387 goto out_free_0;
5388 }
5389
5390 r = kvm_arch_hardware_setup(opaque);
5391 if (r < 0)
5392 goto out_free_1;
5393
5394 c.ret = &r;
5395 c.opaque = opaque;
5396 for_each_online_cpu(cpu) {
5397 smp_call_function_single(cpu, check_processor_compat, &c, 1);
5398 if (r < 0)
5399 goto out_free_2;
5400 }
5401
5402 r = cpuhp_setup_state_nocalls(CPUHP_AP_KVM_STARTING, "kvm/cpu:starting",
5403 kvm_starting_cpu, kvm_dying_cpu);
5404 if (r)
5405 goto out_free_2;
5406 register_reboot_notifier(&kvm_reboot_notifier);
5407
5408 /* A kmem cache lets us meet the alignment requirements of fx_save. */
5409 if (!vcpu_align)
5410 vcpu_align = __alignof__(struct kvm_vcpu);
5411 kvm_vcpu_cache =
5412 kmem_cache_create_usercopy("kvm_vcpu", vcpu_size, vcpu_align,
5413 SLAB_ACCOUNT,
5414 offsetof(struct kvm_vcpu, arch),
5415 offsetofend(struct kvm_vcpu, stats_id)
5416 - offsetof(struct kvm_vcpu, arch),
5417 NULL);
5418 if (!kvm_vcpu_cache) {
5419 r = -ENOMEM;
5420 goto out_free_3;
5421 }
5422
5423 r = kvm_async_pf_init();
5424 if (r)
5425 goto out_free;
5426
5427 kvm_chardev_ops.owner = module;
5428 kvm_vm_fops.owner = module;
5429 kvm_vcpu_fops.owner = module;
5430
5431 r = misc_register(&kvm_dev);
5432 if (r) {
5433 pr_err("kvm: misc device register failed\n");
5434 goto out_unreg;
5435 }
5436
5437 register_syscore_ops(&kvm_syscore_ops);
5438
5439 kvm_preempt_ops.sched_in = kvm_sched_in;
5440 kvm_preempt_ops.sched_out = kvm_sched_out;
5441
5442 kvm_init_debug();
5443
5444 r = kvm_vfio_ops_init();
5445 WARN_ON(r);
5446
5447 return 0;
5448
5449out_unreg:
5450 kvm_async_pf_deinit();
5451out_free:
5452 kmem_cache_destroy(kvm_vcpu_cache);
5453out_free_3:
5454 unregister_reboot_notifier(&kvm_reboot_notifier);
5455 cpuhp_remove_state_nocalls(CPUHP_AP_KVM_STARTING);
5456out_free_2:
5457 kvm_arch_hardware_unsetup();
5458out_free_1:
5459 free_cpumask_var(cpus_hardware_enabled);
5460out_free_0:
5461 kvm_irqfd_exit();
5462out_irqfd:
5463 kvm_arch_exit();
5464out_fail:
5465 return r;
5466}
5467EXPORT_SYMBOL_GPL(kvm_init);
5468
5469void kvm_exit(void)
5470{
5471 debugfs_remove_recursive(kvm_debugfs_dir);
5472 misc_deregister(&kvm_dev);
5473 kmem_cache_destroy(kvm_vcpu_cache);
5474 kvm_async_pf_deinit();
5475 unregister_syscore_ops(&kvm_syscore_ops);
5476 unregister_reboot_notifier(&kvm_reboot_notifier);
5477 cpuhp_remove_state_nocalls(CPUHP_AP_KVM_STARTING);
5478 on_each_cpu(hardware_disable_nolock, NULL, 1);
5479 kvm_arch_hardware_unsetup();
5480 kvm_arch_exit();
5481 kvm_irqfd_exit();
5482 free_cpumask_var(cpus_hardware_enabled);
5483 kvm_vfio_ops_exit();
5484}
5485EXPORT_SYMBOL_GPL(kvm_exit);
5486
5487struct kvm_vm_worker_thread_context {
5488 struct kvm *kvm;
5489 struct task_struct *parent;
5490 struct completion init_done;
5491 kvm_vm_thread_fn_t thread_fn;
5492 uintptr_t data;
5493 int err;
5494};
5495
5496static int kvm_vm_worker_thread(void *context)
5497{
5498 /*
5499 * The init_context is allocated on the stack of the parent thread, so
5500 * we have to locally copy anything that is needed beyond initialization
5501 */
5502 struct kvm_vm_worker_thread_context *init_context = context;
5503 struct kvm *kvm = init_context->kvm;
5504 kvm_vm_thread_fn_t thread_fn = init_context->thread_fn;
5505 uintptr_t data = init_context->data;
5506 int err;
5507
5508 err = kthread_park(current);
5509 /* kthread_park(current) is never supposed to return an error */
5510 WARN_ON(err != 0);
5511 if (err)
5512 goto init_complete;
5513
5514 err = cgroup_attach_task_all(init_context->parent, current);
5515 if (err) {
5516 kvm_err("%s: cgroup_attach_task_all failed with err %d\n",
5517 __func__, err);
5518 goto init_complete;
5519 }
5520
5521 set_user_nice(current, task_nice(init_context->parent));
5522
5523init_complete:
5524 init_context->err = err;
5525 complete(&init_context->init_done);
5526 init_context = NULL;
5527
5528 if (err)
5529 return err;
5530
5531 /* Wait to be woken up by the spawner before proceeding. */
5532 kthread_parkme();
5533
5534 if (!kthread_should_stop())
5535 err = thread_fn(kvm, data);
5536
5537 return err;
5538}
5539
5540int kvm_vm_create_worker_thread(struct kvm *kvm, kvm_vm_thread_fn_t thread_fn,
5541 uintptr_t data, const char *name,
5542 struct task_struct **thread_ptr)
5543{
5544 struct kvm_vm_worker_thread_context init_context = {};
5545 struct task_struct *thread;
5546
5547 *thread_ptr = NULL;
5548 init_context.kvm = kvm;
5549 init_context.parent = current;
5550 init_context.thread_fn = thread_fn;
5551 init_context.data = data;
5552 init_completion(&init_context.init_done);
5553
5554 thread = kthread_run(kvm_vm_worker_thread, &init_context,
5555 "%s-%d", name, task_pid_nr(current));
5556 if (IS_ERR(thread))
5557 return PTR_ERR(thread);
5558
5559 /* kthread_run is never supposed to return NULL */
5560 WARN_ON(thread == NULL);
5561
5562 wait_for_completion(&init_context.init_done);
5563
5564 if (!init_context.err)
5565 *thread_ptr = thread;
5566
5567 return init_context.err;
5568}