Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Kernel-based Virtual Machine driver for Linux
4 *
5 * This module enables machines with Intel VT-x extensions to run virtual
6 * machines without emulation or binary translation.
7 *
8 * Copyright (C) 2006 Qumranet, Inc.
9 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
10 *
11 * Authors:
12 * Avi Kivity <avi@qumranet.com>
13 * Yaniv Kamay <yaniv@qumranet.com>
14 */
15
16#include <kvm/iodev.h>
17
18#include <linux/kvm_host.h>
19#include <linux/kvm.h>
20#include <linux/module.h>
21#include <linux/errno.h>
22#include <linux/percpu.h>
23#include <linux/mm.h>
24#include <linux/miscdevice.h>
25#include <linux/vmalloc.h>
26#include <linux/reboot.h>
27#include <linux/debugfs.h>
28#include <linux/highmem.h>
29#include <linux/file.h>
30#include <linux/syscore_ops.h>
31#include <linux/cpu.h>
32#include <linux/sched/signal.h>
33#include <linux/sched/mm.h>
34#include <linux/sched/stat.h>
35#include <linux/cpumask.h>
36#include <linux/smp.h>
37#include <linux/anon_inodes.h>
38#include <linux/profile.h>
39#include <linux/kvm_para.h>
40#include <linux/pagemap.h>
41#include <linux/mman.h>
42#include <linux/swap.h>
43#include <linux/bitops.h>
44#include <linux/spinlock.h>
45#include <linux/compat.h>
46#include <linux/srcu.h>
47#include <linux/hugetlb.h>
48#include <linux/slab.h>
49#include <linux/sort.h>
50#include <linux/bsearch.h>
51#include <linux/io.h>
52#include <linux/lockdep.h>
53#include <linux/kthread.h>
54#include <linux/suspend.h>
55
56#include <asm/processor.h>
57#include <asm/ioctl.h>
58#include <linux/uaccess.h>
59
60#include "coalesced_mmio.h"
61#include "async_pf.h"
62#include "kvm_mm.h"
63#include "vfio.h"
64
65#include <trace/events/ipi.h>
66
67#define CREATE_TRACE_POINTS
68#include <trace/events/kvm.h>
69
70#include <linux/kvm_dirty_ring.h>
71
72
73/* Worst case buffer size needed for holding an integer. */
74#define ITOA_MAX_LEN 12
75
76MODULE_AUTHOR("Qumranet");
77MODULE_LICENSE("GPL");
78
79/* Architectures should define their poll value according to the halt latency */
80unsigned int halt_poll_ns = KVM_HALT_POLL_NS_DEFAULT;
81module_param(halt_poll_ns, uint, 0644);
82EXPORT_SYMBOL_GPL(halt_poll_ns);
83
84/* Default doubles per-vcpu halt_poll_ns. */
85unsigned int halt_poll_ns_grow = 2;
86module_param(halt_poll_ns_grow, uint, 0644);
87EXPORT_SYMBOL_GPL(halt_poll_ns_grow);
88
89/* The start value to grow halt_poll_ns from */
90unsigned int halt_poll_ns_grow_start = 10000; /* 10us */
91module_param(halt_poll_ns_grow_start, uint, 0644);
92EXPORT_SYMBOL_GPL(halt_poll_ns_grow_start);
93
94/* Default resets per-vcpu halt_poll_ns . */
95unsigned int halt_poll_ns_shrink;
96module_param(halt_poll_ns_shrink, uint, 0644);
97EXPORT_SYMBOL_GPL(halt_poll_ns_shrink);
98
99/*
100 * Ordering of locks:
101 *
102 * kvm->lock --> kvm->slots_lock --> kvm->irq_lock
103 */
104
105DEFINE_MUTEX(kvm_lock);
106LIST_HEAD(vm_list);
107
108static struct kmem_cache *kvm_vcpu_cache;
109
110static __read_mostly struct preempt_ops kvm_preempt_ops;
111static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_running_vcpu);
112
113struct dentry *kvm_debugfs_dir;
114EXPORT_SYMBOL_GPL(kvm_debugfs_dir);
115
116static const struct file_operations stat_fops_per_vm;
117
118static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
119 unsigned long arg);
120#ifdef CONFIG_KVM_COMPAT
121static long kvm_vcpu_compat_ioctl(struct file *file, unsigned int ioctl,
122 unsigned long arg);
123#define KVM_COMPAT(c) .compat_ioctl = (c)
124#else
125/*
126 * For architectures that don't implement a compat infrastructure,
127 * adopt a double line of defense:
128 * - Prevent a compat task from opening /dev/kvm
129 * - If the open has been done by a 64bit task, and the KVM fd
130 * passed to a compat task, let the ioctls fail.
131 */
132static long kvm_no_compat_ioctl(struct file *file, unsigned int ioctl,
133 unsigned long arg) { return -EINVAL; }
134
135static int kvm_no_compat_open(struct inode *inode, struct file *file)
136{
137 return is_compat_task() ? -ENODEV : 0;
138}
139#define KVM_COMPAT(c) .compat_ioctl = kvm_no_compat_ioctl, \
140 .open = kvm_no_compat_open
141#endif
142static int hardware_enable_all(void);
143static void hardware_disable_all(void);
144
145static void kvm_io_bus_destroy(struct kvm_io_bus *bus);
146
147#define KVM_EVENT_CREATE_VM 0
148#define KVM_EVENT_DESTROY_VM 1
149static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm);
150static unsigned long long kvm_createvm_count;
151static unsigned long long kvm_active_vms;
152
153static DEFINE_PER_CPU(cpumask_var_t, cpu_kick_mask);
154
155__weak void kvm_arch_guest_memory_reclaimed(struct kvm *kvm)
156{
157}
158
159bool kvm_is_zone_device_page(struct page *page)
160{
161 /*
162 * The metadata used by is_zone_device_page() to determine whether or
163 * not a page is ZONE_DEVICE is guaranteed to be valid if and only if
164 * the device has been pinned, e.g. by get_user_pages(). WARN if the
165 * page_count() is zero to help detect bad usage of this helper.
166 */
167 if (WARN_ON_ONCE(!page_count(page)))
168 return false;
169
170 return is_zone_device_page(page);
171}
172
173/*
174 * Returns a 'struct page' if the pfn is "valid" and backed by a refcounted
175 * page, NULL otherwise. Note, the list of refcounted PG_reserved page types
176 * is likely incomplete, it has been compiled purely through people wanting to
177 * back guest with a certain type of memory and encountering issues.
178 */
179struct page *kvm_pfn_to_refcounted_page(kvm_pfn_t pfn)
180{
181 struct page *page;
182
183 if (!pfn_valid(pfn))
184 return NULL;
185
186 page = pfn_to_page(pfn);
187 if (!PageReserved(page))
188 return page;
189
190 /* The ZERO_PAGE(s) is marked PG_reserved, but is refcounted. */
191 if (is_zero_pfn(pfn))
192 return page;
193
194 /*
195 * ZONE_DEVICE pages currently set PG_reserved, but from a refcounting
196 * perspective they are "normal" pages, albeit with slightly different
197 * usage rules.
198 */
199 if (kvm_is_zone_device_page(page))
200 return page;
201
202 return NULL;
203}
204
205/*
206 * Switches to specified vcpu, until a matching vcpu_put()
207 */
208void vcpu_load(struct kvm_vcpu *vcpu)
209{
210 int cpu = get_cpu();
211
212 __this_cpu_write(kvm_running_vcpu, vcpu);
213 preempt_notifier_register(&vcpu->preempt_notifier);
214 kvm_arch_vcpu_load(vcpu, cpu);
215 put_cpu();
216}
217EXPORT_SYMBOL_GPL(vcpu_load);
218
219void vcpu_put(struct kvm_vcpu *vcpu)
220{
221 preempt_disable();
222 kvm_arch_vcpu_put(vcpu);
223 preempt_notifier_unregister(&vcpu->preempt_notifier);
224 __this_cpu_write(kvm_running_vcpu, NULL);
225 preempt_enable();
226}
227EXPORT_SYMBOL_GPL(vcpu_put);
228
229/* TODO: merge with kvm_arch_vcpu_should_kick */
230static bool kvm_request_needs_ipi(struct kvm_vcpu *vcpu, unsigned req)
231{
232 int mode = kvm_vcpu_exiting_guest_mode(vcpu);
233
234 /*
235 * We need to wait for the VCPU to reenable interrupts and get out of
236 * READING_SHADOW_PAGE_TABLES mode.
237 */
238 if (req & KVM_REQUEST_WAIT)
239 return mode != OUTSIDE_GUEST_MODE;
240
241 /*
242 * Need to kick a running VCPU, but otherwise there is nothing to do.
243 */
244 return mode == IN_GUEST_MODE;
245}
246
247static void ack_kick(void *_completed)
248{
249}
250
251static inline bool kvm_kick_many_cpus(struct cpumask *cpus, bool wait)
252{
253 if (cpumask_empty(cpus))
254 return false;
255
256 smp_call_function_many(cpus, ack_kick, NULL, wait);
257 return true;
258}
259
260static void kvm_make_vcpu_request(struct kvm_vcpu *vcpu, unsigned int req,
261 struct cpumask *tmp, int current_cpu)
262{
263 int cpu;
264
265 if (likely(!(req & KVM_REQUEST_NO_ACTION)))
266 __kvm_make_request(req, vcpu);
267
268 if (!(req & KVM_REQUEST_NO_WAKEUP) && kvm_vcpu_wake_up(vcpu))
269 return;
270
271 /*
272 * Note, the vCPU could get migrated to a different pCPU at any point
273 * after kvm_request_needs_ipi(), which could result in sending an IPI
274 * to the previous pCPU. But, that's OK because the purpose of the IPI
275 * is to ensure the vCPU returns to OUTSIDE_GUEST_MODE, which is
276 * satisfied if the vCPU migrates. Entering READING_SHADOW_PAGE_TABLES
277 * after this point is also OK, as the requirement is only that KVM wait
278 * for vCPUs that were reading SPTEs _before_ any changes were
279 * finalized. See kvm_vcpu_kick() for more details on handling requests.
280 */
281 if (kvm_request_needs_ipi(vcpu, req)) {
282 cpu = READ_ONCE(vcpu->cpu);
283 if (cpu != -1 && cpu != current_cpu)
284 __cpumask_set_cpu(cpu, tmp);
285 }
286}
287
288bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req,
289 unsigned long *vcpu_bitmap)
290{
291 struct kvm_vcpu *vcpu;
292 struct cpumask *cpus;
293 int i, me;
294 bool called;
295
296 me = get_cpu();
297
298 cpus = this_cpu_cpumask_var_ptr(cpu_kick_mask);
299 cpumask_clear(cpus);
300
301 for_each_set_bit(i, vcpu_bitmap, KVM_MAX_VCPUS) {
302 vcpu = kvm_get_vcpu(kvm, i);
303 if (!vcpu)
304 continue;
305 kvm_make_vcpu_request(vcpu, req, cpus, me);
306 }
307
308 called = kvm_kick_many_cpus(cpus, !!(req & KVM_REQUEST_WAIT));
309 put_cpu();
310
311 return called;
312}
313
314bool kvm_make_all_cpus_request_except(struct kvm *kvm, unsigned int req,
315 struct kvm_vcpu *except)
316{
317 struct kvm_vcpu *vcpu;
318 struct cpumask *cpus;
319 unsigned long i;
320 bool called;
321 int me;
322
323 me = get_cpu();
324
325 cpus = this_cpu_cpumask_var_ptr(cpu_kick_mask);
326 cpumask_clear(cpus);
327
328 kvm_for_each_vcpu(i, vcpu, kvm) {
329 if (vcpu == except)
330 continue;
331 kvm_make_vcpu_request(vcpu, req, cpus, me);
332 }
333
334 called = kvm_kick_many_cpus(cpus, !!(req & KVM_REQUEST_WAIT));
335 put_cpu();
336
337 return called;
338}
339
340bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req)
341{
342 return kvm_make_all_cpus_request_except(kvm, req, NULL);
343}
344EXPORT_SYMBOL_GPL(kvm_make_all_cpus_request);
345
346void kvm_flush_remote_tlbs(struct kvm *kvm)
347{
348 ++kvm->stat.generic.remote_tlb_flush_requests;
349
350 /*
351 * We want to publish modifications to the page tables before reading
352 * mode. Pairs with a memory barrier in arch-specific code.
353 * - x86: smp_mb__after_srcu_read_unlock in vcpu_enter_guest
354 * and smp_mb in walk_shadow_page_lockless_begin/end.
355 * - powerpc: smp_mb in kvmppc_prepare_to_enter.
356 *
357 * There is already an smp_mb__after_atomic() before
358 * kvm_make_all_cpus_request() reads vcpu->mode. We reuse that
359 * barrier here.
360 */
361 if (!kvm_arch_flush_remote_tlbs(kvm)
362 || kvm_make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH))
363 ++kvm->stat.generic.remote_tlb_flush;
364}
365EXPORT_SYMBOL_GPL(kvm_flush_remote_tlbs);
366
367void kvm_flush_remote_tlbs_range(struct kvm *kvm, gfn_t gfn, u64 nr_pages)
368{
369 if (!kvm_arch_flush_remote_tlbs_range(kvm, gfn, nr_pages))
370 return;
371
372 /*
373 * Fall back to a flushing entire TLBs if the architecture range-based
374 * TLB invalidation is unsupported or can't be performed for whatever
375 * reason.
376 */
377 kvm_flush_remote_tlbs(kvm);
378}
379
380void kvm_flush_remote_tlbs_memslot(struct kvm *kvm,
381 const struct kvm_memory_slot *memslot)
382{
383 /*
384 * All current use cases for flushing the TLBs for a specific memslot
385 * are related to dirty logging, and many do the TLB flush out of
386 * mmu_lock. The interaction between the various operations on memslot
387 * must be serialized by slots_locks to ensure the TLB flush from one
388 * operation is observed by any other operation on the same memslot.
389 */
390 lockdep_assert_held(&kvm->slots_lock);
391 kvm_flush_remote_tlbs_range(kvm, memslot->base_gfn, memslot->npages);
392}
393
394static void kvm_flush_shadow_all(struct kvm *kvm)
395{
396 kvm_arch_flush_shadow_all(kvm);
397 kvm_arch_guest_memory_reclaimed(kvm);
398}
399
400#ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE
401static inline void *mmu_memory_cache_alloc_obj(struct kvm_mmu_memory_cache *mc,
402 gfp_t gfp_flags)
403{
404 gfp_flags |= mc->gfp_zero;
405
406 if (mc->kmem_cache)
407 return kmem_cache_alloc(mc->kmem_cache, gfp_flags);
408 else
409 return (void *)__get_free_page(gfp_flags);
410}
411
412int __kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int capacity, int min)
413{
414 gfp_t gfp = mc->gfp_custom ? mc->gfp_custom : GFP_KERNEL_ACCOUNT;
415 void *obj;
416
417 if (mc->nobjs >= min)
418 return 0;
419
420 if (unlikely(!mc->objects)) {
421 if (WARN_ON_ONCE(!capacity))
422 return -EIO;
423
424 mc->objects = kvmalloc_array(capacity, sizeof(void *), gfp);
425 if (!mc->objects)
426 return -ENOMEM;
427
428 mc->capacity = capacity;
429 }
430
431 /* It is illegal to request a different capacity across topups. */
432 if (WARN_ON_ONCE(mc->capacity != capacity))
433 return -EIO;
434
435 while (mc->nobjs < mc->capacity) {
436 obj = mmu_memory_cache_alloc_obj(mc, gfp);
437 if (!obj)
438 return mc->nobjs >= min ? 0 : -ENOMEM;
439 mc->objects[mc->nobjs++] = obj;
440 }
441 return 0;
442}
443
444int kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int min)
445{
446 return __kvm_mmu_topup_memory_cache(mc, KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE, min);
447}
448
449int kvm_mmu_memory_cache_nr_free_objects(struct kvm_mmu_memory_cache *mc)
450{
451 return mc->nobjs;
452}
453
454void kvm_mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
455{
456 while (mc->nobjs) {
457 if (mc->kmem_cache)
458 kmem_cache_free(mc->kmem_cache, mc->objects[--mc->nobjs]);
459 else
460 free_page((unsigned long)mc->objects[--mc->nobjs]);
461 }
462
463 kvfree(mc->objects);
464
465 mc->objects = NULL;
466 mc->capacity = 0;
467}
468
469void *kvm_mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
470{
471 void *p;
472
473 if (WARN_ON(!mc->nobjs))
474 p = mmu_memory_cache_alloc_obj(mc, GFP_ATOMIC | __GFP_ACCOUNT);
475 else
476 p = mc->objects[--mc->nobjs];
477 BUG_ON(!p);
478 return p;
479}
480#endif
481
482static void kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
483{
484 mutex_init(&vcpu->mutex);
485 vcpu->cpu = -1;
486 vcpu->kvm = kvm;
487 vcpu->vcpu_id = id;
488 vcpu->pid = NULL;
489#ifndef __KVM_HAVE_ARCH_WQP
490 rcuwait_init(&vcpu->wait);
491#endif
492 kvm_async_pf_vcpu_init(vcpu);
493
494 kvm_vcpu_set_in_spin_loop(vcpu, false);
495 kvm_vcpu_set_dy_eligible(vcpu, false);
496 vcpu->preempted = false;
497 vcpu->ready = false;
498 preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops);
499 vcpu->last_used_slot = NULL;
500
501 /* Fill the stats id string for the vcpu */
502 snprintf(vcpu->stats_id, sizeof(vcpu->stats_id), "kvm-%d/vcpu-%d",
503 task_pid_nr(current), id);
504}
505
506static void kvm_vcpu_destroy(struct kvm_vcpu *vcpu)
507{
508 kvm_arch_vcpu_destroy(vcpu);
509 kvm_dirty_ring_free(&vcpu->dirty_ring);
510
511 /*
512 * No need for rcu_read_lock as VCPU_RUN is the only place that changes
513 * the vcpu->pid pointer, and at destruction time all file descriptors
514 * are already gone.
515 */
516 put_pid(rcu_dereference_protected(vcpu->pid, 1));
517
518 free_page((unsigned long)vcpu->run);
519 kmem_cache_free(kvm_vcpu_cache, vcpu);
520}
521
522void kvm_destroy_vcpus(struct kvm *kvm)
523{
524 unsigned long i;
525 struct kvm_vcpu *vcpu;
526
527 kvm_for_each_vcpu(i, vcpu, kvm) {
528 kvm_vcpu_destroy(vcpu);
529 xa_erase(&kvm->vcpu_array, i);
530 }
531
532 atomic_set(&kvm->online_vcpus, 0);
533}
534EXPORT_SYMBOL_GPL(kvm_destroy_vcpus);
535
536#ifdef CONFIG_KVM_GENERIC_MMU_NOTIFIER
537static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn)
538{
539 return container_of(mn, struct kvm, mmu_notifier);
540}
541
542typedef bool (*gfn_handler_t)(struct kvm *kvm, struct kvm_gfn_range *range);
543
544typedef void (*on_lock_fn_t)(struct kvm *kvm);
545
546struct kvm_mmu_notifier_range {
547 /*
548 * 64-bit addresses, as KVM notifiers can operate on host virtual
549 * addresses (unsigned long) and guest physical addresses (64-bit).
550 */
551 u64 start;
552 u64 end;
553 union kvm_mmu_notifier_arg arg;
554 gfn_handler_t handler;
555 on_lock_fn_t on_lock;
556 bool flush_on_ret;
557 bool may_block;
558};
559
560/*
561 * The inner-most helper returns a tuple containing the return value from the
562 * arch- and action-specific handler, plus a flag indicating whether or not at
563 * least one memslot was found, i.e. if the handler found guest memory.
564 *
565 * Note, most notifiers are averse to booleans, so even though KVM tracks the
566 * return from arch code as a bool, outer helpers will cast it to an int. :-(
567 */
568typedef struct kvm_mmu_notifier_return {
569 bool ret;
570 bool found_memslot;
571} kvm_mn_ret_t;
572
573/*
574 * Use a dedicated stub instead of NULL to indicate that there is no callback
575 * function/handler. The compiler technically can't guarantee that a real
576 * function will have a non-zero address, and so it will generate code to
577 * check for !NULL, whereas comparing against a stub will be elided at compile
578 * time (unless the compiler is getting long in the tooth, e.g. gcc 4.9).
579 */
580static void kvm_null_fn(void)
581{
582
583}
584#define IS_KVM_NULL_FN(fn) ((fn) == (void *)kvm_null_fn)
585
586static const union kvm_mmu_notifier_arg KVM_MMU_NOTIFIER_NO_ARG;
587
588/* Iterate over each memslot intersecting [start, last] (inclusive) range */
589#define kvm_for_each_memslot_in_hva_range(node, slots, start, last) \
590 for (node = interval_tree_iter_first(&slots->hva_tree, start, last); \
591 node; \
592 node = interval_tree_iter_next(node, start, last)) \
593
594static __always_inline kvm_mn_ret_t __kvm_handle_hva_range(struct kvm *kvm,
595 const struct kvm_mmu_notifier_range *range)
596{
597 struct kvm_mmu_notifier_return r = {
598 .ret = false,
599 .found_memslot = false,
600 };
601 struct kvm_gfn_range gfn_range;
602 struct kvm_memory_slot *slot;
603 struct kvm_memslots *slots;
604 int i, idx;
605
606 if (WARN_ON_ONCE(range->end <= range->start))
607 return r;
608
609 /* A null handler is allowed if and only if on_lock() is provided. */
610 if (WARN_ON_ONCE(IS_KVM_NULL_FN(range->on_lock) &&
611 IS_KVM_NULL_FN(range->handler)))
612 return r;
613
614 idx = srcu_read_lock(&kvm->srcu);
615
616 for (i = 0; i < kvm_arch_nr_memslot_as_ids(kvm); i++) {
617 struct interval_tree_node *node;
618
619 slots = __kvm_memslots(kvm, i);
620 kvm_for_each_memslot_in_hva_range(node, slots,
621 range->start, range->end - 1) {
622 unsigned long hva_start, hva_end;
623
624 slot = container_of(node, struct kvm_memory_slot, hva_node[slots->node_idx]);
625 hva_start = max_t(unsigned long, range->start, slot->userspace_addr);
626 hva_end = min_t(unsigned long, range->end,
627 slot->userspace_addr + (slot->npages << PAGE_SHIFT));
628
629 /*
630 * To optimize for the likely case where the address
631 * range is covered by zero or one memslots, don't
632 * bother making these conditional (to avoid writes on
633 * the second or later invocation of the handler).
634 */
635 gfn_range.arg = range->arg;
636 gfn_range.may_block = range->may_block;
637
638 /*
639 * {gfn(page) | page intersects with [hva_start, hva_end)} =
640 * {gfn_start, gfn_start+1, ..., gfn_end-1}.
641 */
642 gfn_range.start = hva_to_gfn_memslot(hva_start, slot);
643 gfn_range.end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, slot);
644 gfn_range.slot = slot;
645
646 if (!r.found_memslot) {
647 r.found_memslot = true;
648 KVM_MMU_LOCK(kvm);
649 if (!IS_KVM_NULL_FN(range->on_lock))
650 range->on_lock(kvm);
651
652 if (IS_KVM_NULL_FN(range->handler))
653 break;
654 }
655 r.ret |= range->handler(kvm, &gfn_range);
656 }
657 }
658
659 if (range->flush_on_ret && r.ret)
660 kvm_flush_remote_tlbs(kvm);
661
662 if (r.found_memslot)
663 KVM_MMU_UNLOCK(kvm);
664
665 srcu_read_unlock(&kvm->srcu, idx);
666
667 return r;
668}
669
670static __always_inline int kvm_handle_hva_range(struct mmu_notifier *mn,
671 unsigned long start,
672 unsigned long end,
673 union kvm_mmu_notifier_arg arg,
674 gfn_handler_t handler)
675{
676 struct kvm *kvm = mmu_notifier_to_kvm(mn);
677 const struct kvm_mmu_notifier_range range = {
678 .start = start,
679 .end = end,
680 .arg = arg,
681 .handler = handler,
682 .on_lock = (void *)kvm_null_fn,
683 .flush_on_ret = true,
684 .may_block = false,
685 };
686
687 return __kvm_handle_hva_range(kvm, &range).ret;
688}
689
690static __always_inline int kvm_handle_hva_range_no_flush(struct mmu_notifier *mn,
691 unsigned long start,
692 unsigned long end,
693 gfn_handler_t handler)
694{
695 struct kvm *kvm = mmu_notifier_to_kvm(mn);
696 const struct kvm_mmu_notifier_range range = {
697 .start = start,
698 .end = end,
699 .handler = handler,
700 .on_lock = (void *)kvm_null_fn,
701 .flush_on_ret = false,
702 .may_block = false,
703 };
704
705 return __kvm_handle_hva_range(kvm, &range).ret;
706}
707
708static bool kvm_change_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
709{
710 /*
711 * Skipping invalid memslots is correct if and only change_pte() is
712 * surrounded by invalidate_range_{start,end}(), which is currently
713 * guaranteed by the primary MMU. If that ever changes, KVM needs to
714 * unmap the memslot instead of skipping the memslot to ensure that KVM
715 * doesn't hold references to the old PFN.
716 */
717 WARN_ON_ONCE(!READ_ONCE(kvm->mn_active_invalidate_count));
718
719 if (range->slot->flags & KVM_MEMSLOT_INVALID)
720 return false;
721
722 return kvm_set_spte_gfn(kvm, range);
723}
724
725static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn,
726 struct mm_struct *mm,
727 unsigned long address,
728 pte_t pte)
729{
730 struct kvm *kvm = mmu_notifier_to_kvm(mn);
731 const union kvm_mmu_notifier_arg arg = { .pte = pte };
732
733 trace_kvm_set_spte_hva(address);
734
735 /*
736 * .change_pte() must be surrounded by .invalidate_range_{start,end}().
737 * If mmu_invalidate_in_progress is zero, then no in-progress
738 * invalidations, including this one, found a relevant memslot at
739 * start(); rechecking memslots here is unnecessary. Note, a false
740 * positive (count elevated by a different invalidation) is sub-optimal
741 * but functionally ok.
742 */
743 WARN_ON_ONCE(!READ_ONCE(kvm->mn_active_invalidate_count));
744 if (!READ_ONCE(kvm->mmu_invalidate_in_progress))
745 return;
746
747 kvm_handle_hva_range(mn, address, address + 1, arg, kvm_change_spte_gfn);
748}
749
750void kvm_mmu_invalidate_begin(struct kvm *kvm)
751{
752 lockdep_assert_held_write(&kvm->mmu_lock);
753 /*
754 * The count increase must become visible at unlock time as no
755 * spte can be established without taking the mmu_lock and
756 * count is also read inside the mmu_lock critical section.
757 */
758 kvm->mmu_invalidate_in_progress++;
759
760 if (likely(kvm->mmu_invalidate_in_progress == 1)) {
761 kvm->mmu_invalidate_range_start = INVALID_GPA;
762 kvm->mmu_invalidate_range_end = INVALID_GPA;
763 }
764}
765
766void kvm_mmu_invalidate_range_add(struct kvm *kvm, gfn_t start, gfn_t end)
767{
768 lockdep_assert_held_write(&kvm->mmu_lock);
769
770 WARN_ON_ONCE(!kvm->mmu_invalidate_in_progress);
771
772 if (likely(kvm->mmu_invalidate_range_start == INVALID_GPA)) {
773 kvm->mmu_invalidate_range_start = start;
774 kvm->mmu_invalidate_range_end = end;
775 } else {
776 /*
777 * Fully tracking multiple concurrent ranges has diminishing
778 * returns. Keep things simple and just find the minimal range
779 * which includes the current and new ranges. As there won't be
780 * enough information to subtract a range after its invalidate
781 * completes, any ranges invalidated concurrently will
782 * accumulate and persist until all outstanding invalidates
783 * complete.
784 */
785 kvm->mmu_invalidate_range_start =
786 min(kvm->mmu_invalidate_range_start, start);
787 kvm->mmu_invalidate_range_end =
788 max(kvm->mmu_invalidate_range_end, end);
789 }
790}
791
792bool kvm_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
793{
794 kvm_mmu_invalidate_range_add(kvm, range->start, range->end);
795 return kvm_unmap_gfn_range(kvm, range);
796}
797
798static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
799 const struct mmu_notifier_range *range)
800{
801 struct kvm *kvm = mmu_notifier_to_kvm(mn);
802 const struct kvm_mmu_notifier_range hva_range = {
803 .start = range->start,
804 .end = range->end,
805 .handler = kvm_mmu_unmap_gfn_range,
806 .on_lock = kvm_mmu_invalidate_begin,
807 .flush_on_ret = true,
808 .may_block = mmu_notifier_range_blockable(range),
809 };
810
811 trace_kvm_unmap_hva_range(range->start, range->end);
812
813 /*
814 * Prevent memslot modification between range_start() and range_end()
815 * so that conditionally locking provides the same result in both
816 * functions. Without that guarantee, the mmu_invalidate_in_progress
817 * adjustments will be imbalanced.
818 *
819 * Pairs with the decrement in range_end().
820 */
821 spin_lock(&kvm->mn_invalidate_lock);
822 kvm->mn_active_invalidate_count++;
823 spin_unlock(&kvm->mn_invalidate_lock);
824
825 /*
826 * Invalidate pfn caches _before_ invalidating the secondary MMUs, i.e.
827 * before acquiring mmu_lock, to avoid holding mmu_lock while acquiring
828 * each cache's lock. There are relatively few caches in existence at
829 * any given time, and the caches themselves can check for hva overlap,
830 * i.e. don't need to rely on memslot overlap checks for performance.
831 * Because this runs without holding mmu_lock, the pfn caches must use
832 * mn_active_invalidate_count (see above) instead of
833 * mmu_invalidate_in_progress.
834 */
835 gfn_to_pfn_cache_invalidate_start(kvm, range->start, range->end);
836
837 /*
838 * If one or more memslots were found and thus zapped, notify arch code
839 * that guest memory has been reclaimed. This needs to be done *after*
840 * dropping mmu_lock, as x86's reclaim path is slooooow.
841 */
842 if (__kvm_handle_hva_range(kvm, &hva_range).found_memslot)
843 kvm_arch_guest_memory_reclaimed(kvm);
844
845 return 0;
846}
847
848void kvm_mmu_invalidate_end(struct kvm *kvm)
849{
850 lockdep_assert_held_write(&kvm->mmu_lock);
851
852 /*
853 * This sequence increase will notify the kvm page fault that
854 * the page that is going to be mapped in the spte could have
855 * been freed.
856 */
857 kvm->mmu_invalidate_seq++;
858 smp_wmb();
859 /*
860 * The above sequence increase must be visible before the
861 * below count decrease, which is ensured by the smp_wmb above
862 * in conjunction with the smp_rmb in mmu_invalidate_retry().
863 */
864 kvm->mmu_invalidate_in_progress--;
865 KVM_BUG_ON(kvm->mmu_invalidate_in_progress < 0, kvm);
866
867 /*
868 * Assert that at least one range was added between start() and end().
869 * Not adding a range isn't fatal, but it is a KVM bug.
870 */
871 WARN_ON_ONCE(kvm->mmu_invalidate_range_start == INVALID_GPA);
872}
873
874static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn,
875 const struct mmu_notifier_range *range)
876{
877 struct kvm *kvm = mmu_notifier_to_kvm(mn);
878 const struct kvm_mmu_notifier_range hva_range = {
879 .start = range->start,
880 .end = range->end,
881 .handler = (void *)kvm_null_fn,
882 .on_lock = kvm_mmu_invalidate_end,
883 .flush_on_ret = false,
884 .may_block = mmu_notifier_range_blockable(range),
885 };
886 bool wake;
887
888 __kvm_handle_hva_range(kvm, &hva_range);
889
890 /* Pairs with the increment in range_start(). */
891 spin_lock(&kvm->mn_invalidate_lock);
892 if (!WARN_ON_ONCE(!kvm->mn_active_invalidate_count))
893 --kvm->mn_active_invalidate_count;
894 wake = !kvm->mn_active_invalidate_count;
895 spin_unlock(&kvm->mn_invalidate_lock);
896
897 /*
898 * There can only be one waiter, since the wait happens under
899 * slots_lock.
900 */
901 if (wake)
902 rcuwait_wake_up(&kvm->mn_memslots_update_rcuwait);
903}
904
905static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn,
906 struct mm_struct *mm,
907 unsigned long start,
908 unsigned long end)
909{
910 trace_kvm_age_hva(start, end);
911
912 return kvm_handle_hva_range(mn, start, end, KVM_MMU_NOTIFIER_NO_ARG,
913 kvm_age_gfn);
914}
915
916static int kvm_mmu_notifier_clear_young(struct mmu_notifier *mn,
917 struct mm_struct *mm,
918 unsigned long start,
919 unsigned long end)
920{
921 trace_kvm_age_hva(start, end);
922
923 /*
924 * Even though we do not flush TLB, this will still adversely
925 * affect performance on pre-Haswell Intel EPT, where there is
926 * no EPT Access Bit to clear so that we have to tear down EPT
927 * tables instead. If we find this unacceptable, we can always
928 * add a parameter to kvm_age_hva so that it effectively doesn't
929 * do anything on clear_young.
930 *
931 * Also note that currently we never issue secondary TLB flushes
932 * from clear_young, leaving this job up to the regular system
933 * cadence. If we find this inaccurate, we might come up with a
934 * more sophisticated heuristic later.
935 */
936 return kvm_handle_hva_range_no_flush(mn, start, end, kvm_age_gfn);
937}
938
939static int kvm_mmu_notifier_test_young(struct mmu_notifier *mn,
940 struct mm_struct *mm,
941 unsigned long address)
942{
943 trace_kvm_test_age_hva(address);
944
945 return kvm_handle_hva_range_no_flush(mn, address, address + 1,
946 kvm_test_age_gfn);
947}
948
949static void kvm_mmu_notifier_release(struct mmu_notifier *mn,
950 struct mm_struct *mm)
951{
952 struct kvm *kvm = mmu_notifier_to_kvm(mn);
953 int idx;
954
955 idx = srcu_read_lock(&kvm->srcu);
956 kvm_flush_shadow_all(kvm);
957 srcu_read_unlock(&kvm->srcu, idx);
958}
959
960static const struct mmu_notifier_ops kvm_mmu_notifier_ops = {
961 .invalidate_range_start = kvm_mmu_notifier_invalidate_range_start,
962 .invalidate_range_end = kvm_mmu_notifier_invalidate_range_end,
963 .clear_flush_young = kvm_mmu_notifier_clear_flush_young,
964 .clear_young = kvm_mmu_notifier_clear_young,
965 .test_young = kvm_mmu_notifier_test_young,
966 .change_pte = kvm_mmu_notifier_change_pte,
967 .release = kvm_mmu_notifier_release,
968};
969
970static int kvm_init_mmu_notifier(struct kvm *kvm)
971{
972 kvm->mmu_notifier.ops = &kvm_mmu_notifier_ops;
973 return mmu_notifier_register(&kvm->mmu_notifier, current->mm);
974}
975
976#else /* !CONFIG_KVM_GENERIC_MMU_NOTIFIER */
977
978static int kvm_init_mmu_notifier(struct kvm *kvm)
979{
980 return 0;
981}
982
983#endif /* CONFIG_KVM_GENERIC_MMU_NOTIFIER */
984
985#ifdef CONFIG_HAVE_KVM_PM_NOTIFIER
986static int kvm_pm_notifier_call(struct notifier_block *bl,
987 unsigned long state,
988 void *unused)
989{
990 struct kvm *kvm = container_of(bl, struct kvm, pm_notifier);
991
992 return kvm_arch_pm_notifier(kvm, state);
993}
994
995static void kvm_init_pm_notifier(struct kvm *kvm)
996{
997 kvm->pm_notifier.notifier_call = kvm_pm_notifier_call;
998 /* Suspend KVM before we suspend ftrace, RCU, etc. */
999 kvm->pm_notifier.priority = INT_MAX;
1000 register_pm_notifier(&kvm->pm_notifier);
1001}
1002
1003static void kvm_destroy_pm_notifier(struct kvm *kvm)
1004{
1005 unregister_pm_notifier(&kvm->pm_notifier);
1006}
1007#else /* !CONFIG_HAVE_KVM_PM_NOTIFIER */
1008static void kvm_init_pm_notifier(struct kvm *kvm)
1009{
1010}
1011
1012static void kvm_destroy_pm_notifier(struct kvm *kvm)
1013{
1014}
1015#endif /* CONFIG_HAVE_KVM_PM_NOTIFIER */
1016
1017static void kvm_destroy_dirty_bitmap(struct kvm_memory_slot *memslot)
1018{
1019 if (!memslot->dirty_bitmap)
1020 return;
1021
1022 kvfree(memslot->dirty_bitmap);
1023 memslot->dirty_bitmap = NULL;
1024}
1025
1026/* This does not remove the slot from struct kvm_memslots data structures */
1027static void kvm_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot)
1028{
1029 if (slot->flags & KVM_MEM_GUEST_MEMFD)
1030 kvm_gmem_unbind(slot);
1031
1032 kvm_destroy_dirty_bitmap(slot);
1033
1034 kvm_arch_free_memslot(kvm, slot);
1035
1036 kfree(slot);
1037}
1038
1039static void kvm_free_memslots(struct kvm *kvm, struct kvm_memslots *slots)
1040{
1041 struct hlist_node *idnode;
1042 struct kvm_memory_slot *memslot;
1043 int bkt;
1044
1045 /*
1046 * The same memslot objects live in both active and inactive sets,
1047 * arbitrarily free using index '1' so the second invocation of this
1048 * function isn't operating over a structure with dangling pointers
1049 * (even though this function isn't actually touching them).
1050 */
1051 if (!slots->node_idx)
1052 return;
1053
1054 hash_for_each_safe(slots->id_hash, bkt, idnode, memslot, id_node[1])
1055 kvm_free_memslot(kvm, memslot);
1056}
1057
1058static umode_t kvm_stats_debugfs_mode(const struct _kvm_stats_desc *pdesc)
1059{
1060 switch (pdesc->desc.flags & KVM_STATS_TYPE_MASK) {
1061 case KVM_STATS_TYPE_INSTANT:
1062 return 0444;
1063 case KVM_STATS_TYPE_CUMULATIVE:
1064 case KVM_STATS_TYPE_PEAK:
1065 default:
1066 return 0644;
1067 }
1068}
1069
1070
1071static void kvm_destroy_vm_debugfs(struct kvm *kvm)
1072{
1073 int i;
1074 int kvm_debugfs_num_entries = kvm_vm_stats_header.num_desc +
1075 kvm_vcpu_stats_header.num_desc;
1076
1077 if (IS_ERR(kvm->debugfs_dentry))
1078 return;
1079
1080 debugfs_remove_recursive(kvm->debugfs_dentry);
1081
1082 if (kvm->debugfs_stat_data) {
1083 for (i = 0; i < kvm_debugfs_num_entries; i++)
1084 kfree(kvm->debugfs_stat_data[i]);
1085 kfree(kvm->debugfs_stat_data);
1086 }
1087}
1088
1089static int kvm_create_vm_debugfs(struct kvm *kvm, const char *fdname)
1090{
1091 static DEFINE_MUTEX(kvm_debugfs_lock);
1092 struct dentry *dent;
1093 char dir_name[ITOA_MAX_LEN * 2];
1094 struct kvm_stat_data *stat_data;
1095 const struct _kvm_stats_desc *pdesc;
1096 int i, ret = -ENOMEM;
1097 int kvm_debugfs_num_entries = kvm_vm_stats_header.num_desc +
1098 kvm_vcpu_stats_header.num_desc;
1099
1100 if (!debugfs_initialized())
1101 return 0;
1102
1103 snprintf(dir_name, sizeof(dir_name), "%d-%s", task_pid_nr(current), fdname);
1104 mutex_lock(&kvm_debugfs_lock);
1105 dent = debugfs_lookup(dir_name, kvm_debugfs_dir);
1106 if (dent) {
1107 pr_warn_ratelimited("KVM: debugfs: duplicate directory %s\n", dir_name);
1108 dput(dent);
1109 mutex_unlock(&kvm_debugfs_lock);
1110 return 0;
1111 }
1112 dent = debugfs_create_dir(dir_name, kvm_debugfs_dir);
1113 mutex_unlock(&kvm_debugfs_lock);
1114 if (IS_ERR(dent))
1115 return 0;
1116
1117 kvm->debugfs_dentry = dent;
1118 kvm->debugfs_stat_data = kcalloc(kvm_debugfs_num_entries,
1119 sizeof(*kvm->debugfs_stat_data),
1120 GFP_KERNEL_ACCOUNT);
1121 if (!kvm->debugfs_stat_data)
1122 goto out_err;
1123
1124 for (i = 0; i < kvm_vm_stats_header.num_desc; ++i) {
1125 pdesc = &kvm_vm_stats_desc[i];
1126 stat_data = kzalloc(sizeof(*stat_data), GFP_KERNEL_ACCOUNT);
1127 if (!stat_data)
1128 goto out_err;
1129
1130 stat_data->kvm = kvm;
1131 stat_data->desc = pdesc;
1132 stat_data->kind = KVM_STAT_VM;
1133 kvm->debugfs_stat_data[i] = stat_data;
1134 debugfs_create_file(pdesc->name, kvm_stats_debugfs_mode(pdesc),
1135 kvm->debugfs_dentry, stat_data,
1136 &stat_fops_per_vm);
1137 }
1138
1139 for (i = 0; i < kvm_vcpu_stats_header.num_desc; ++i) {
1140 pdesc = &kvm_vcpu_stats_desc[i];
1141 stat_data = kzalloc(sizeof(*stat_data), GFP_KERNEL_ACCOUNT);
1142 if (!stat_data)
1143 goto out_err;
1144
1145 stat_data->kvm = kvm;
1146 stat_data->desc = pdesc;
1147 stat_data->kind = KVM_STAT_VCPU;
1148 kvm->debugfs_stat_data[i + kvm_vm_stats_header.num_desc] = stat_data;
1149 debugfs_create_file(pdesc->name, kvm_stats_debugfs_mode(pdesc),
1150 kvm->debugfs_dentry, stat_data,
1151 &stat_fops_per_vm);
1152 }
1153
1154 kvm_arch_create_vm_debugfs(kvm);
1155 return 0;
1156out_err:
1157 kvm_destroy_vm_debugfs(kvm);
1158 return ret;
1159}
1160
1161/*
1162 * Called after the VM is otherwise initialized, but just before adding it to
1163 * the vm_list.
1164 */
1165int __weak kvm_arch_post_init_vm(struct kvm *kvm)
1166{
1167 return 0;
1168}
1169
1170/*
1171 * Called just after removing the VM from the vm_list, but before doing any
1172 * other destruction.
1173 */
1174void __weak kvm_arch_pre_destroy_vm(struct kvm *kvm)
1175{
1176}
1177
1178/*
1179 * Called after per-vm debugfs created. When called kvm->debugfs_dentry should
1180 * be setup already, so we can create arch-specific debugfs entries under it.
1181 * Cleanup should be automatic done in kvm_destroy_vm_debugfs() recursively, so
1182 * a per-arch destroy interface is not needed.
1183 */
1184void __weak kvm_arch_create_vm_debugfs(struct kvm *kvm)
1185{
1186}
1187
1188static struct kvm *kvm_create_vm(unsigned long type, const char *fdname)
1189{
1190 struct kvm *kvm = kvm_arch_alloc_vm();
1191 struct kvm_memslots *slots;
1192 int r = -ENOMEM;
1193 int i, j;
1194
1195 if (!kvm)
1196 return ERR_PTR(-ENOMEM);
1197
1198 KVM_MMU_LOCK_INIT(kvm);
1199 mmgrab(current->mm);
1200 kvm->mm = current->mm;
1201 kvm_eventfd_init(kvm);
1202 mutex_init(&kvm->lock);
1203 mutex_init(&kvm->irq_lock);
1204 mutex_init(&kvm->slots_lock);
1205 mutex_init(&kvm->slots_arch_lock);
1206 spin_lock_init(&kvm->mn_invalidate_lock);
1207 rcuwait_init(&kvm->mn_memslots_update_rcuwait);
1208 xa_init(&kvm->vcpu_array);
1209#ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES
1210 xa_init(&kvm->mem_attr_array);
1211#endif
1212
1213 INIT_LIST_HEAD(&kvm->gpc_list);
1214 spin_lock_init(&kvm->gpc_lock);
1215
1216 INIT_LIST_HEAD(&kvm->devices);
1217 kvm->max_vcpus = KVM_MAX_VCPUS;
1218
1219 BUILD_BUG_ON(KVM_MEM_SLOTS_NUM > SHRT_MAX);
1220
1221 /*
1222 * Force subsequent debugfs file creations to fail if the VM directory
1223 * is not created (by kvm_create_vm_debugfs()).
1224 */
1225 kvm->debugfs_dentry = ERR_PTR(-ENOENT);
1226
1227 snprintf(kvm->stats_id, sizeof(kvm->stats_id), "kvm-%d",
1228 task_pid_nr(current));
1229
1230 if (init_srcu_struct(&kvm->srcu))
1231 goto out_err_no_srcu;
1232 if (init_srcu_struct(&kvm->irq_srcu))
1233 goto out_err_no_irq_srcu;
1234
1235 refcount_set(&kvm->users_count, 1);
1236 for (i = 0; i < kvm_arch_nr_memslot_as_ids(kvm); i++) {
1237 for (j = 0; j < 2; j++) {
1238 slots = &kvm->__memslots[i][j];
1239
1240 atomic_long_set(&slots->last_used_slot, (unsigned long)NULL);
1241 slots->hva_tree = RB_ROOT_CACHED;
1242 slots->gfn_tree = RB_ROOT;
1243 hash_init(slots->id_hash);
1244 slots->node_idx = j;
1245
1246 /* Generations must be different for each address space. */
1247 slots->generation = i;
1248 }
1249
1250 rcu_assign_pointer(kvm->memslots[i], &kvm->__memslots[i][0]);
1251 }
1252
1253 for (i = 0; i < KVM_NR_BUSES; i++) {
1254 rcu_assign_pointer(kvm->buses[i],
1255 kzalloc(sizeof(struct kvm_io_bus), GFP_KERNEL_ACCOUNT));
1256 if (!kvm->buses[i])
1257 goto out_err_no_arch_destroy_vm;
1258 }
1259
1260 r = kvm_arch_init_vm(kvm, type);
1261 if (r)
1262 goto out_err_no_arch_destroy_vm;
1263
1264 r = hardware_enable_all();
1265 if (r)
1266 goto out_err_no_disable;
1267
1268#ifdef CONFIG_HAVE_KVM_IRQCHIP
1269 INIT_HLIST_HEAD(&kvm->irq_ack_notifier_list);
1270#endif
1271
1272 r = kvm_init_mmu_notifier(kvm);
1273 if (r)
1274 goto out_err_no_mmu_notifier;
1275
1276 r = kvm_coalesced_mmio_init(kvm);
1277 if (r < 0)
1278 goto out_no_coalesced_mmio;
1279
1280 r = kvm_create_vm_debugfs(kvm, fdname);
1281 if (r)
1282 goto out_err_no_debugfs;
1283
1284 r = kvm_arch_post_init_vm(kvm);
1285 if (r)
1286 goto out_err;
1287
1288 mutex_lock(&kvm_lock);
1289 list_add(&kvm->vm_list, &vm_list);
1290 mutex_unlock(&kvm_lock);
1291
1292 preempt_notifier_inc();
1293 kvm_init_pm_notifier(kvm);
1294
1295 return kvm;
1296
1297out_err:
1298 kvm_destroy_vm_debugfs(kvm);
1299out_err_no_debugfs:
1300 kvm_coalesced_mmio_free(kvm);
1301out_no_coalesced_mmio:
1302#ifdef CONFIG_KVM_GENERIC_MMU_NOTIFIER
1303 if (kvm->mmu_notifier.ops)
1304 mmu_notifier_unregister(&kvm->mmu_notifier, current->mm);
1305#endif
1306out_err_no_mmu_notifier:
1307 hardware_disable_all();
1308out_err_no_disable:
1309 kvm_arch_destroy_vm(kvm);
1310out_err_no_arch_destroy_vm:
1311 WARN_ON_ONCE(!refcount_dec_and_test(&kvm->users_count));
1312 for (i = 0; i < KVM_NR_BUSES; i++)
1313 kfree(kvm_get_bus(kvm, i));
1314 cleanup_srcu_struct(&kvm->irq_srcu);
1315out_err_no_irq_srcu:
1316 cleanup_srcu_struct(&kvm->srcu);
1317out_err_no_srcu:
1318 kvm_arch_free_vm(kvm);
1319 mmdrop(current->mm);
1320 return ERR_PTR(r);
1321}
1322
1323static void kvm_destroy_devices(struct kvm *kvm)
1324{
1325 struct kvm_device *dev, *tmp;
1326
1327 /*
1328 * We do not need to take the kvm->lock here, because nobody else
1329 * has a reference to the struct kvm at this point and therefore
1330 * cannot access the devices list anyhow.
1331 */
1332 list_for_each_entry_safe(dev, tmp, &kvm->devices, vm_node) {
1333 list_del(&dev->vm_node);
1334 dev->ops->destroy(dev);
1335 }
1336}
1337
1338static void kvm_destroy_vm(struct kvm *kvm)
1339{
1340 int i;
1341 struct mm_struct *mm = kvm->mm;
1342
1343 kvm_destroy_pm_notifier(kvm);
1344 kvm_uevent_notify_change(KVM_EVENT_DESTROY_VM, kvm);
1345 kvm_destroy_vm_debugfs(kvm);
1346 kvm_arch_sync_events(kvm);
1347 mutex_lock(&kvm_lock);
1348 list_del(&kvm->vm_list);
1349 mutex_unlock(&kvm_lock);
1350 kvm_arch_pre_destroy_vm(kvm);
1351
1352 kvm_free_irq_routing(kvm);
1353 for (i = 0; i < KVM_NR_BUSES; i++) {
1354 struct kvm_io_bus *bus = kvm_get_bus(kvm, i);
1355
1356 if (bus)
1357 kvm_io_bus_destroy(bus);
1358 kvm->buses[i] = NULL;
1359 }
1360 kvm_coalesced_mmio_free(kvm);
1361#ifdef CONFIG_KVM_GENERIC_MMU_NOTIFIER
1362 mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm);
1363 /*
1364 * At this point, pending calls to invalidate_range_start()
1365 * have completed but no more MMU notifiers will run, so
1366 * mn_active_invalidate_count may remain unbalanced.
1367 * No threads can be waiting in kvm_swap_active_memslots() as the
1368 * last reference on KVM has been dropped, but freeing
1369 * memslots would deadlock without this manual intervention.
1370 *
1371 * If the count isn't unbalanced, i.e. KVM did NOT unregister its MMU
1372 * notifier between a start() and end(), then there shouldn't be any
1373 * in-progress invalidations.
1374 */
1375 WARN_ON(rcuwait_active(&kvm->mn_memslots_update_rcuwait));
1376 if (kvm->mn_active_invalidate_count)
1377 kvm->mn_active_invalidate_count = 0;
1378 else
1379 WARN_ON(kvm->mmu_invalidate_in_progress);
1380#else
1381 kvm_flush_shadow_all(kvm);
1382#endif
1383 kvm_arch_destroy_vm(kvm);
1384 kvm_destroy_devices(kvm);
1385 for (i = 0; i < kvm_arch_nr_memslot_as_ids(kvm); i++) {
1386 kvm_free_memslots(kvm, &kvm->__memslots[i][0]);
1387 kvm_free_memslots(kvm, &kvm->__memslots[i][1]);
1388 }
1389 cleanup_srcu_struct(&kvm->irq_srcu);
1390 cleanup_srcu_struct(&kvm->srcu);
1391#ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES
1392 xa_destroy(&kvm->mem_attr_array);
1393#endif
1394 kvm_arch_free_vm(kvm);
1395 preempt_notifier_dec();
1396 hardware_disable_all();
1397 mmdrop(mm);
1398}
1399
1400void kvm_get_kvm(struct kvm *kvm)
1401{
1402 refcount_inc(&kvm->users_count);
1403}
1404EXPORT_SYMBOL_GPL(kvm_get_kvm);
1405
1406/*
1407 * Make sure the vm is not during destruction, which is a safe version of
1408 * kvm_get_kvm(). Return true if kvm referenced successfully, false otherwise.
1409 */
1410bool kvm_get_kvm_safe(struct kvm *kvm)
1411{
1412 return refcount_inc_not_zero(&kvm->users_count);
1413}
1414EXPORT_SYMBOL_GPL(kvm_get_kvm_safe);
1415
1416void kvm_put_kvm(struct kvm *kvm)
1417{
1418 if (refcount_dec_and_test(&kvm->users_count))
1419 kvm_destroy_vm(kvm);
1420}
1421EXPORT_SYMBOL_GPL(kvm_put_kvm);
1422
1423/*
1424 * Used to put a reference that was taken on behalf of an object associated
1425 * with a user-visible file descriptor, e.g. a vcpu or device, if installation
1426 * of the new file descriptor fails and the reference cannot be transferred to
1427 * its final owner. In such cases, the caller is still actively using @kvm and
1428 * will fail miserably if the refcount unexpectedly hits zero.
1429 */
1430void kvm_put_kvm_no_destroy(struct kvm *kvm)
1431{
1432 WARN_ON(refcount_dec_and_test(&kvm->users_count));
1433}
1434EXPORT_SYMBOL_GPL(kvm_put_kvm_no_destroy);
1435
1436static int kvm_vm_release(struct inode *inode, struct file *filp)
1437{
1438 struct kvm *kvm = filp->private_data;
1439
1440 kvm_irqfd_release(kvm);
1441
1442 kvm_put_kvm(kvm);
1443 return 0;
1444}
1445
1446/*
1447 * Allocation size is twice as large as the actual dirty bitmap size.
1448 * See kvm_vm_ioctl_get_dirty_log() why this is needed.
1449 */
1450static int kvm_alloc_dirty_bitmap(struct kvm_memory_slot *memslot)
1451{
1452 unsigned long dirty_bytes = kvm_dirty_bitmap_bytes(memslot);
1453
1454 memslot->dirty_bitmap = __vcalloc(2, dirty_bytes, GFP_KERNEL_ACCOUNT);
1455 if (!memslot->dirty_bitmap)
1456 return -ENOMEM;
1457
1458 return 0;
1459}
1460
1461static struct kvm_memslots *kvm_get_inactive_memslots(struct kvm *kvm, int as_id)
1462{
1463 struct kvm_memslots *active = __kvm_memslots(kvm, as_id);
1464 int node_idx_inactive = active->node_idx ^ 1;
1465
1466 return &kvm->__memslots[as_id][node_idx_inactive];
1467}
1468
1469/*
1470 * Helper to get the address space ID when one of memslot pointers may be NULL.
1471 * This also serves as a sanity that at least one of the pointers is non-NULL,
1472 * and that their address space IDs don't diverge.
1473 */
1474static int kvm_memslots_get_as_id(struct kvm_memory_slot *a,
1475 struct kvm_memory_slot *b)
1476{
1477 if (WARN_ON_ONCE(!a && !b))
1478 return 0;
1479
1480 if (!a)
1481 return b->as_id;
1482 if (!b)
1483 return a->as_id;
1484
1485 WARN_ON_ONCE(a->as_id != b->as_id);
1486 return a->as_id;
1487}
1488
1489static void kvm_insert_gfn_node(struct kvm_memslots *slots,
1490 struct kvm_memory_slot *slot)
1491{
1492 struct rb_root *gfn_tree = &slots->gfn_tree;
1493 struct rb_node **node, *parent;
1494 int idx = slots->node_idx;
1495
1496 parent = NULL;
1497 for (node = &gfn_tree->rb_node; *node; ) {
1498 struct kvm_memory_slot *tmp;
1499
1500 tmp = container_of(*node, struct kvm_memory_slot, gfn_node[idx]);
1501 parent = *node;
1502 if (slot->base_gfn < tmp->base_gfn)
1503 node = &(*node)->rb_left;
1504 else if (slot->base_gfn > tmp->base_gfn)
1505 node = &(*node)->rb_right;
1506 else
1507 BUG();
1508 }
1509
1510 rb_link_node(&slot->gfn_node[idx], parent, node);
1511 rb_insert_color(&slot->gfn_node[idx], gfn_tree);
1512}
1513
1514static void kvm_erase_gfn_node(struct kvm_memslots *slots,
1515 struct kvm_memory_slot *slot)
1516{
1517 rb_erase(&slot->gfn_node[slots->node_idx], &slots->gfn_tree);
1518}
1519
1520static void kvm_replace_gfn_node(struct kvm_memslots *slots,
1521 struct kvm_memory_slot *old,
1522 struct kvm_memory_slot *new)
1523{
1524 int idx = slots->node_idx;
1525
1526 WARN_ON_ONCE(old->base_gfn != new->base_gfn);
1527
1528 rb_replace_node(&old->gfn_node[idx], &new->gfn_node[idx],
1529 &slots->gfn_tree);
1530}
1531
1532/*
1533 * Replace @old with @new in the inactive memslots.
1534 *
1535 * With NULL @old this simply adds @new.
1536 * With NULL @new this simply removes @old.
1537 *
1538 * If @new is non-NULL its hva_node[slots_idx] range has to be set
1539 * appropriately.
1540 */
1541static void kvm_replace_memslot(struct kvm *kvm,
1542 struct kvm_memory_slot *old,
1543 struct kvm_memory_slot *new)
1544{
1545 int as_id = kvm_memslots_get_as_id(old, new);
1546 struct kvm_memslots *slots = kvm_get_inactive_memslots(kvm, as_id);
1547 int idx = slots->node_idx;
1548
1549 if (old) {
1550 hash_del(&old->id_node[idx]);
1551 interval_tree_remove(&old->hva_node[idx], &slots->hva_tree);
1552
1553 if ((long)old == atomic_long_read(&slots->last_used_slot))
1554 atomic_long_set(&slots->last_used_slot, (long)new);
1555
1556 if (!new) {
1557 kvm_erase_gfn_node(slots, old);
1558 return;
1559 }
1560 }
1561
1562 /*
1563 * Initialize @new's hva range. Do this even when replacing an @old
1564 * slot, kvm_copy_memslot() deliberately does not touch node data.
1565 */
1566 new->hva_node[idx].start = new->userspace_addr;
1567 new->hva_node[idx].last = new->userspace_addr +
1568 (new->npages << PAGE_SHIFT) - 1;
1569
1570 /*
1571 * (Re)Add the new memslot. There is no O(1) interval_tree_replace(),
1572 * hva_node needs to be swapped with remove+insert even though hva can't
1573 * change when replacing an existing slot.
1574 */
1575 hash_add(slots->id_hash, &new->id_node[idx], new->id);
1576 interval_tree_insert(&new->hva_node[idx], &slots->hva_tree);
1577
1578 /*
1579 * If the memslot gfn is unchanged, rb_replace_node() can be used to
1580 * switch the node in the gfn tree instead of removing the old and
1581 * inserting the new as two separate operations. Replacement is a
1582 * single O(1) operation versus two O(log(n)) operations for
1583 * remove+insert.
1584 */
1585 if (old && old->base_gfn == new->base_gfn) {
1586 kvm_replace_gfn_node(slots, old, new);
1587 } else {
1588 if (old)
1589 kvm_erase_gfn_node(slots, old);
1590 kvm_insert_gfn_node(slots, new);
1591 }
1592}
1593
1594/*
1595 * Flags that do not access any of the extra space of struct
1596 * kvm_userspace_memory_region2. KVM_SET_USER_MEMORY_REGION_V1_FLAGS
1597 * only allows these.
1598 */
1599#define KVM_SET_USER_MEMORY_REGION_V1_FLAGS \
1600 (KVM_MEM_LOG_DIRTY_PAGES | KVM_MEM_READONLY)
1601
1602static int check_memory_region_flags(struct kvm *kvm,
1603 const struct kvm_userspace_memory_region2 *mem)
1604{
1605 u32 valid_flags = KVM_MEM_LOG_DIRTY_PAGES;
1606
1607 if (kvm_arch_has_private_mem(kvm))
1608 valid_flags |= KVM_MEM_GUEST_MEMFD;
1609
1610 /* Dirty logging private memory is not currently supported. */
1611 if (mem->flags & KVM_MEM_GUEST_MEMFD)
1612 valid_flags &= ~KVM_MEM_LOG_DIRTY_PAGES;
1613
1614#ifdef CONFIG_HAVE_KVM_READONLY_MEM
1615 /*
1616 * GUEST_MEMFD is incompatible with read-only memslots, as writes to
1617 * read-only memslots have emulated MMIO, not page fault, semantics,
1618 * and KVM doesn't allow emulated MMIO for private memory.
1619 */
1620 if (!(mem->flags & KVM_MEM_GUEST_MEMFD))
1621 valid_flags |= KVM_MEM_READONLY;
1622#endif
1623
1624 if (mem->flags & ~valid_flags)
1625 return -EINVAL;
1626
1627 return 0;
1628}
1629
1630static void kvm_swap_active_memslots(struct kvm *kvm, int as_id)
1631{
1632 struct kvm_memslots *slots = kvm_get_inactive_memslots(kvm, as_id);
1633
1634 /* Grab the generation from the activate memslots. */
1635 u64 gen = __kvm_memslots(kvm, as_id)->generation;
1636
1637 WARN_ON(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS);
1638 slots->generation = gen | KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS;
1639
1640 /*
1641 * Do not store the new memslots while there are invalidations in
1642 * progress, otherwise the locking in invalidate_range_start and
1643 * invalidate_range_end will be unbalanced.
1644 */
1645 spin_lock(&kvm->mn_invalidate_lock);
1646 prepare_to_rcuwait(&kvm->mn_memslots_update_rcuwait);
1647 while (kvm->mn_active_invalidate_count) {
1648 set_current_state(TASK_UNINTERRUPTIBLE);
1649 spin_unlock(&kvm->mn_invalidate_lock);
1650 schedule();
1651 spin_lock(&kvm->mn_invalidate_lock);
1652 }
1653 finish_rcuwait(&kvm->mn_memslots_update_rcuwait);
1654 rcu_assign_pointer(kvm->memslots[as_id], slots);
1655 spin_unlock(&kvm->mn_invalidate_lock);
1656
1657 /*
1658 * Acquired in kvm_set_memslot. Must be released before synchronize
1659 * SRCU below in order to avoid deadlock with another thread
1660 * acquiring the slots_arch_lock in an srcu critical section.
1661 */
1662 mutex_unlock(&kvm->slots_arch_lock);
1663
1664 synchronize_srcu_expedited(&kvm->srcu);
1665
1666 /*
1667 * Increment the new memslot generation a second time, dropping the
1668 * update in-progress flag and incrementing the generation based on
1669 * the number of address spaces. This provides a unique and easily
1670 * identifiable generation number while the memslots are in flux.
1671 */
1672 gen = slots->generation & ~KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS;
1673
1674 /*
1675 * Generations must be unique even across address spaces. We do not need
1676 * a global counter for that, instead the generation space is evenly split
1677 * across address spaces. For example, with two address spaces, address
1678 * space 0 will use generations 0, 2, 4, ... while address space 1 will
1679 * use generations 1, 3, 5, ...
1680 */
1681 gen += kvm_arch_nr_memslot_as_ids(kvm);
1682
1683 kvm_arch_memslots_updated(kvm, gen);
1684
1685 slots->generation = gen;
1686}
1687
1688static int kvm_prepare_memory_region(struct kvm *kvm,
1689 const struct kvm_memory_slot *old,
1690 struct kvm_memory_slot *new,
1691 enum kvm_mr_change change)
1692{
1693 int r;
1694
1695 /*
1696 * If dirty logging is disabled, nullify the bitmap; the old bitmap
1697 * will be freed on "commit". If logging is enabled in both old and
1698 * new, reuse the existing bitmap. If logging is enabled only in the
1699 * new and KVM isn't using a ring buffer, allocate and initialize a
1700 * new bitmap.
1701 */
1702 if (change != KVM_MR_DELETE) {
1703 if (!(new->flags & KVM_MEM_LOG_DIRTY_PAGES))
1704 new->dirty_bitmap = NULL;
1705 else if (old && old->dirty_bitmap)
1706 new->dirty_bitmap = old->dirty_bitmap;
1707 else if (kvm_use_dirty_bitmap(kvm)) {
1708 r = kvm_alloc_dirty_bitmap(new);
1709 if (r)
1710 return r;
1711
1712 if (kvm_dirty_log_manual_protect_and_init_set(kvm))
1713 bitmap_set(new->dirty_bitmap, 0, new->npages);
1714 }
1715 }
1716
1717 r = kvm_arch_prepare_memory_region(kvm, old, new, change);
1718
1719 /* Free the bitmap on failure if it was allocated above. */
1720 if (r && new && new->dirty_bitmap && (!old || !old->dirty_bitmap))
1721 kvm_destroy_dirty_bitmap(new);
1722
1723 return r;
1724}
1725
1726static void kvm_commit_memory_region(struct kvm *kvm,
1727 struct kvm_memory_slot *old,
1728 const struct kvm_memory_slot *new,
1729 enum kvm_mr_change change)
1730{
1731 int old_flags = old ? old->flags : 0;
1732 int new_flags = new ? new->flags : 0;
1733 /*
1734 * Update the total number of memslot pages before calling the arch
1735 * hook so that architectures can consume the result directly.
1736 */
1737 if (change == KVM_MR_DELETE)
1738 kvm->nr_memslot_pages -= old->npages;
1739 else if (change == KVM_MR_CREATE)
1740 kvm->nr_memslot_pages += new->npages;
1741
1742 if ((old_flags ^ new_flags) & KVM_MEM_LOG_DIRTY_PAGES) {
1743 int change = (new_flags & KVM_MEM_LOG_DIRTY_PAGES) ? 1 : -1;
1744 atomic_set(&kvm->nr_memslots_dirty_logging,
1745 atomic_read(&kvm->nr_memslots_dirty_logging) + change);
1746 }
1747
1748 kvm_arch_commit_memory_region(kvm, old, new, change);
1749
1750 switch (change) {
1751 case KVM_MR_CREATE:
1752 /* Nothing more to do. */
1753 break;
1754 case KVM_MR_DELETE:
1755 /* Free the old memslot and all its metadata. */
1756 kvm_free_memslot(kvm, old);
1757 break;
1758 case KVM_MR_MOVE:
1759 case KVM_MR_FLAGS_ONLY:
1760 /*
1761 * Free the dirty bitmap as needed; the below check encompasses
1762 * both the flags and whether a ring buffer is being used)
1763 */
1764 if (old->dirty_bitmap && !new->dirty_bitmap)
1765 kvm_destroy_dirty_bitmap(old);
1766
1767 /*
1768 * The final quirk. Free the detached, old slot, but only its
1769 * memory, not any metadata. Metadata, including arch specific
1770 * data, may be reused by @new.
1771 */
1772 kfree(old);
1773 break;
1774 default:
1775 BUG();
1776 }
1777}
1778
1779/*
1780 * Activate @new, which must be installed in the inactive slots by the caller,
1781 * by swapping the active slots and then propagating @new to @old once @old is
1782 * unreachable and can be safely modified.
1783 *
1784 * With NULL @old this simply adds @new to @active (while swapping the sets).
1785 * With NULL @new this simply removes @old from @active and frees it
1786 * (while also swapping the sets).
1787 */
1788static void kvm_activate_memslot(struct kvm *kvm,
1789 struct kvm_memory_slot *old,
1790 struct kvm_memory_slot *new)
1791{
1792 int as_id = kvm_memslots_get_as_id(old, new);
1793
1794 kvm_swap_active_memslots(kvm, as_id);
1795
1796 /* Propagate the new memslot to the now inactive memslots. */
1797 kvm_replace_memslot(kvm, old, new);
1798}
1799
1800static void kvm_copy_memslot(struct kvm_memory_slot *dest,
1801 const struct kvm_memory_slot *src)
1802{
1803 dest->base_gfn = src->base_gfn;
1804 dest->npages = src->npages;
1805 dest->dirty_bitmap = src->dirty_bitmap;
1806 dest->arch = src->arch;
1807 dest->userspace_addr = src->userspace_addr;
1808 dest->flags = src->flags;
1809 dest->id = src->id;
1810 dest->as_id = src->as_id;
1811}
1812
1813static void kvm_invalidate_memslot(struct kvm *kvm,
1814 struct kvm_memory_slot *old,
1815 struct kvm_memory_slot *invalid_slot)
1816{
1817 /*
1818 * Mark the current slot INVALID. As with all memslot modifications,
1819 * this must be done on an unreachable slot to avoid modifying the
1820 * current slot in the active tree.
1821 */
1822 kvm_copy_memslot(invalid_slot, old);
1823 invalid_slot->flags |= KVM_MEMSLOT_INVALID;
1824 kvm_replace_memslot(kvm, old, invalid_slot);
1825
1826 /*
1827 * Activate the slot that is now marked INVALID, but don't propagate
1828 * the slot to the now inactive slots. The slot is either going to be
1829 * deleted or recreated as a new slot.
1830 */
1831 kvm_swap_active_memslots(kvm, old->as_id);
1832
1833 /*
1834 * From this point no new shadow pages pointing to a deleted, or moved,
1835 * memslot will be created. Validation of sp->gfn happens in:
1836 * - gfn_to_hva (kvm_read_guest, gfn_to_pfn)
1837 * - kvm_is_visible_gfn (mmu_check_root)
1838 */
1839 kvm_arch_flush_shadow_memslot(kvm, old);
1840 kvm_arch_guest_memory_reclaimed(kvm);
1841
1842 /* Was released by kvm_swap_active_memslots(), reacquire. */
1843 mutex_lock(&kvm->slots_arch_lock);
1844
1845 /*
1846 * Copy the arch-specific field of the newly-installed slot back to the
1847 * old slot as the arch data could have changed between releasing
1848 * slots_arch_lock in kvm_swap_active_memslots() and re-acquiring the lock
1849 * above. Writers are required to retrieve memslots *after* acquiring
1850 * slots_arch_lock, thus the active slot's data is guaranteed to be fresh.
1851 */
1852 old->arch = invalid_slot->arch;
1853}
1854
1855static void kvm_create_memslot(struct kvm *kvm,
1856 struct kvm_memory_slot *new)
1857{
1858 /* Add the new memslot to the inactive set and activate. */
1859 kvm_replace_memslot(kvm, NULL, new);
1860 kvm_activate_memslot(kvm, NULL, new);
1861}
1862
1863static void kvm_delete_memslot(struct kvm *kvm,
1864 struct kvm_memory_slot *old,
1865 struct kvm_memory_slot *invalid_slot)
1866{
1867 /*
1868 * Remove the old memslot (in the inactive memslots) by passing NULL as
1869 * the "new" slot, and for the invalid version in the active slots.
1870 */
1871 kvm_replace_memslot(kvm, old, NULL);
1872 kvm_activate_memslot(kvm, invalid_slot, NULL);
1873}
1874
1875static void kvm_move_memslot(struct kvm *kvm,
1876 struct kvm_memory_slot *old,
1877 struct kvm_memory_slot *new,
1878 struct kvm_memory_slot *invalid_slot)
1879{
1880 /*
1881 * Replace the old memslot in the inactive slots, and then swap slots
1882 * and replace the current INVALID with the new as well.
1883 */
1884 kvm_replace_memslot(kvm, old, new);
1885 kvm_activate_memslot(kvm, invalid_slot, new);
1886}
1887
1888static void kvm_update_flags_memslot(struct kvm *kvm,
1889 struct kvm_memory_slot *old,
1890 struct kvm_memory_slot *new)
1891{
1892 /*
1893 * Similar to the MOVE case, but the slot doesn't need to be zapped as
1894 * an intermediate step. Instead, the old memslot is simply replaced
1895 * with a new, updated copy in both memslot sets.
1896 */
1897 kvm_replace_memslot(kvm, old, new);
1898 kvm_activate_memslot(kvm, old, new);
1899}
1900
1901static int kvm_set_memslot(struct kvm *kvm,
1902 struct kvm_memory_slot *old,
1903 struct kvm_memory_slot *new,
1904 enum kvm_mr_change change)
1905{
1906 struct kvm_memory_slot *invalid_slot;
1907 int r;
1908
1909 /*
1910 * Released in kvm_swap_active_memslots().
1911 *
1912 * Must be held from before the current memslots are copied until after
1913 * the new memslots are installed with rcu_assign_pointer, then
1914 * released before the synchronize srcu in kvm_swap_active_memslots().
1915 *
1916 * When modifying memslots outside of the slots_lock, must be held
1917 * before reading the pointer to the current memslots until after all
1918 * changes to those memslots are complete.
1919 *
1920 * These rules ensure that installing new memslots does not lose
1921 * changes made to the previous memslots.
1922 */
1923 mutex_lock(&kvm->slots_arch_lock);
1924
1925 /*
1926 * Invalidate the old slot if it's being deleted or moved. This is
1927 * done prior to actually deleting/moving the memslot to allow vCPUs to
1928 * continue running by ensuring there are no mappings or shadow pages
1929 * for the memslot when it is deleted/moved. Without pre-invalidation
1930 * (and without a lock), a window would exist between effecting the
1931 * delete/move and committing the changes in arch code where KVM or a
1932 * guest could access a non-existent memslot.
1933 *
1934 * Modifications are done on a temporary, unreachable slot. The old
1935 * slot needs to be preserved in case a later step fails and the
1936 * invalidation needs to be reverted.
1937 */
1938 if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) {
1939 invalid_slot = kzalloc(sizeof(*invalid_slot), GFP_KERNEL_ACCOUNT);
1940 if (!invalid_slot) {
1941 mutex_unlock(&kvm->slots_arch_lock);
1942 return -ENOMEM;
1943 }
1944 kvm_invalidate_memslot(kvm, old, invalid_slot);
1945 }
1946
1947 r = kvm_prepare_memory_region(kvm, old, new, change);
1948 if (r) {
1949 /*
1950 * For DELETE/MOVE, revert the above INVALID change. No
1951 * modifications required since the original slot was preserved
1952 * in the inactive slots. Changing the active memslots also
1953 * release slots_arch_lock.
1954 */
1955 if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) {
1956 kvm_activate_memslot(kvm, invalid_slot, old);
1957 kfree(invalid_slot);
1958 } else {
1959 mutex_unlock(&kvm->slots_arch_lock);
1960 }
1961 return r;
1962 }
1963
1964 /*
1965 * For DELETE and MOVE, the working slot is now active as the INVALID
1966 * version of the old slot. MOVE is particularly special as it reuses
1967 * the old slot and returns a copy of the old slot (in working_slot).
1968 * For CREATE, there is no old slot. For DELETE and FLAGS_ONLY, the
1969 * old slot is detached but otherwise preserved.
1970 */
1971 if (change == KVM_MR_CREATE)
1972 kvm_create_memslot(kvm, new);
1973 else if (change == KVM_MR_DELETE)
1974 kvm_delete_memslot(kvm, old, invalid_slot);
1975 else if (change == KVM_MR_MOVE)
1976 kvm_move_memslot(kvm, old, new, invalid_slot);
1977 else if (change == KVM_MR_FLAGS_ONLY)
1978 kvm_update_flags_memslot(kvm, old, new);
1979 else
1980 BUG();
1981
1982 /* Free the temporary INVALID slot used for DELETE and MOVE. */
1983 if (change == KVM_MR_DELETE || change == KVM_MR_MOVE)
1984 kfree(invalid_slot);
1985
1986 /*
1987 * No need to refresh new->arch, changes after dropping slots_arch_lock
1988 * will directly hit the final, active memslot. Architectures are
1989 * responsible for knowing that new->arch may be stale.
1990 */
1991 kvm_commit_memory_region(kvm, old, new, change);
1992
1993 return 0;
1994}
1995
1996static bool kvm_check_memslot_overlap(struct kvm_memslots *slots, int id,
1997 gfn_t start, gfn_t end)
1998{
1999 struct kvm_memslot_iter iter;
2000
2001 kvm_for_each_memslot_in_gfn_range(&iter, slots, start, end) {
2002 if (iter.slot->id != id)
2003 return true;
2004 }
2005
2006 return false;
2007}
2008
2009/*
2010 * Allocate some memory and give it an address in the guest physical address
2011 * space.
2012 *
2013 * Discontiguous memory is allowed, mostly for framebuffers.
2014 *
2015 * Must be called holding kvm->slots_lock for write.
2016 */
2017int __kvm_set_memory_region(struct kvm *kvm,
2018 const struct kvm_userspace_memory_region2 *mem)
2019{
2020 struct kvm_memory_slot *old, *new;
2021 struct kvm_memslots *slots;
2022 enum kvm_mr_change change;
2023 unsigned long npages;
2024 gfn_t base_gfn;
2025 int as_id, id;
2026 int r;
2027
2028 r = check_memory_region_flags(kvm, mem);
2029 if (r)
2030 return r;
2031
2032 as_id = mem->slot >> 16;
2033 id = (u16)mem->slot;
2034
2035 /* General sanity checks */
2036 if ((mem->memory_size & (PAGE_SIZE - 1)) ||
2037 (mem->memory_size != (unsigned long)mem->memory_size))
2038 return -EINVAL;
2039 if (mem->guest_phys_addr & (PAGE_SIZE - 1))
2040 return -EINVAL;
2041 /* We can read the guest memory with __xxx_user() later on. */
2042 if ((mem->userspace_addr & (PAGE_SIZE - 1)) ||
2043 (mem->userspace_addr != untagged_addr(mem->userspace_addr)) ||
2044 !access_ok((void __user *)(unsigned long)mem->userspace_addr,
2045 mem->memory_size))
2046 return -EINVAL;
2047 if (mem->flags & KVM_MEM_GUEST_MEMFD &&
2048 (mem->guest_memfd_offset & (PAGE_SIZE - 1) ||
2049 mem->guest_memfd_offset + mem->memory_size < mem->guest_memfd_offset))
2050 return -EINVAL;
2051 if (as_id >= kvm_arch_nr_memslot_as_ids(kvm) || id >= KVM_MEM_SLOTS_NUM)
2052 return -EINVAL;
2053 if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
2054 return -EINVAL;
2055 if ((mem->memory_size >> PAGE_SHIFT) > KVM_MEM_MAX_NR_PAGES)
2056 return -EINVAL;
2057
2058 slots = __kvm_memslots(kvm, as_id);
2059
2060 /*
2061 * Note, the old memslot (and the pointer itself!) may be invalidated
2062 * and/or destroyed by kvm_set_memslot().
2063 */
2064 old = id_to_memslot(slots, id);
2065
2066 if (!mem->memory_size) {
2067 if (!old || !old->npages)
2068 return -EINVAL;
2069
2070 if (WARN_ON_ONCE(kvm->nr_memslot_pages < old->npages))
2071 return -EIO;
2072
2073 return kvm_set_memslot(kvm, old, NULL, KVM_MR_DELETE);
2074 }
2075
2076 base_gfn = (mem->guest_phys_addr >> PAGE_SHIFT);
2077 npages = (mem->memory_size >> PAGE_SHIFT);
2078
2079 if (!old || !old->npages) {
2080 change = KVM_MR_CREATE;
2081
2082 /*
2083 * To simplify KVM internals, the total number of pages across
2084 * all memslots must fit in an unsigned long.
2085 */
2086 if ((kvm->nr_memslot_pages + npages) < kvm->nr_memslot_pages)
2087 return -EINVAL;
2088 } else { /* Modify an existing slot. */
2089 /* Private memslots are immutable, they can only be deleted. */
2090 if (mem->flags & KVM_MEM_GUEST_MEMFD)
2091 return -EINVAL;
2092 if ((mem->userspace_addr != old->userspace_addr) ||
2093 (npages != old->npages) ||
2094 ((mem->flags ^ old->flags) & KVM_MEM_READONLY))
2095 return -EINVAL;
2096
2097 if (base_gfn != old->base_gfn)
2098 change = KVM_MR_MOVE;
2099 else if (mem->flags != old->flags)
2100 change = KVM_MR_FLAGS_ONLY;
2101 else /* Nothing to change. */
2102 return 0;
2103 }
2104
2105 if ((change == KVM_MR_CREATE || change == KVM_MR_MOVE) &&
2106 kvm_check_memslot_overlap(slots, id, base_gfn, base_gfn + npages))
2107 return -EEXIST;
2108
2109 /* Allocate a slot that will persist in the memslot. */
2110 new = kzalloc(sizeof(*new), GFP_KERNEL_ACCOUNT);
2111 if (!new)
2112 return -ENOMEM;
2113
2114 new->as_id = as_id;
2115 new->id = id;
2116 new->base_gfn = base_gfn;
2117 new->npages = npages;
2118 new->flags = mem->flags;
2119 new->userspace_addr = mem->userspace_addr;
2120 if (mem->flags & KVM_MEM_GUEST_MEMFD) {
2121 r = kvm_gmem_bind(kvm, new, mem->guest_memfd, mem->guest_memfd_offset);
2122 if (r)
2123 goto out;
2124 }
2125
2126 r = kvm_set_memslot(kvm, old, new, change);
2127 if (r)
2128 goto out_unbind;
2129
2130 return 0;
2131
2132out_unbind:
2133 if (mem->flags & KVM_MEM_GUEST_MEMFD)
2134 kvm_gmem_unbind(new);
2135out:
2136 kfree(new);
2137 return r;
2138}
2139EXPORT_SYMBOL_GPL(__kvm_set_memory_region);
2140
2141int kvm_set_memory_region(struct kvm *kvm,
2142 const struct kvm_userspace_memory_region2 *mem)
2143{
2144 int r;
2145
2146 mutex_lock(&kvm->slots_lock);
2147 r = __kvm_set_memory_region(kvm, mem);
2148 mutex_unlock(&kvm->slots_lock);
2149 return r;
2150}
2151EXPORT_SYMBOL_GPL(kvm_set_memory_region);
2152
2153static int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
2154 struct kvm_userspace_memory_region2 *mem)
2155{
2156 if ((u16)mem->slot >= KVM_USER_MEM_SLOTS)
2157 return -EINVAL;
2158
2159 return kvm_set_memory_region(kvm, mem);
2160}
2161
2162#ifndef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
2163/**
2164 * kvm_get_dirty_log - get a snapshot of dirty pages
2165 * @kvm: pointer to kvm instance
2166 * @log: slot id and address to which we copy the log
2167 * @is_dirty: set to '1' if any dirty pages were found
2168 * @memslot: set to the associated memslot, always valid on success
2169 */
2170int kvm_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log,
2171 int *is_dirty, struct kvm_memory_slot **memslot)
2172{
2173 struct kvm_memslots *slots;
2174 int i, as_id, id;
2175 unsigned long n;
2176 unsigned long any = 0;
2177
2178 /* Dirty ring tracking may be exclusive to dirty log tracking */
2179 if (!kvm_use_dirty_bitmap(kvm))
2180 return -ENXIO;
2181
2182 *memslot = NULL;
2183 *is_dirty = 0;
2184
2185 as_id = log->slot >> 16;
2186 id = (u16)log->slot;
2187 if (as_id >= kvm_arch_nr_memslot_as_ids(kvm) || id >= KVM_USER_MEM_SLOTS)
2188 return -EINVAL;
2189
2190 slots = __kvm_memslots(kvm, as_id);
2191 *memslot = id_to_memslot(slots, id);
2192 if (!(*memslot) || !(*memslot)->dirty_bitmap)
2193 return -ENOENT;
2194
2195 kvm_arch_sync_dirty_log(kvm, *memslot);
2196
2197 n = kvm_dirty_bitmap_bytes(*memslot);
2198
2199 for (i = 0; !any && i < n/sizeof(long); ++i)
2200 any = (*memslot)->dirty_bitmap[i];
2201
2202 if (copy_to_user(log->dirty_bitmap, (*memslot)->dirty_bitmap, n))
2203 return -EFAULT;
2204
2205 if (any)
2206 *is_dirty = 1;
2207 return 0;
2208}
2209EXPORT_SYMBOL_GPL(kvm_get_dirty_log);
2210
2211#else /* CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT */
2212/**
2213 * kvm_get_dirty_log_protect - get a snapshot of dirty pages
2214 * and reenable dirty page tracking for the corresponding pages.
2215 * @kvm: pointer to kvm instance
2216 * @log: slot id and address to which we copy the log
2217 *
2218 * We need to keep it in mind that VCPU threads can write to the bitmap
2219 * concurrently. So, to avoid losing track of dirty pages we keep the
2220 * following order:
2221 *
2222 * 1. Take a snapshot of the bit and clear it if needed.
2223 * 2. Write protect the corresponding page.
2224 * 3. Copy the snapshot to the userspace.
2225 * 4. Upon return caller flushes TLB's if needed.
2226 *
2227 * Between 2 and 4, the guest may write to the page using the remaining TLB
2228 * entry. This is not a problem because the page is reported dirty using
2229 * the snapshot taken before and step 4 ensures that writes done after
2230 * exiting to userspace will be logged for the next call.
2231 *
2232 */
2233static int kvm_get_dirty_log_protect(struct kvm *kvm, struct kvm_dirty_log *log)
2234{
2235 struct kvm_memslots *slots;
2236 struct kvm_memory_slot *memslot;
2237 int i, as_id, id;
2238 unsigned long n;
2239 unsigned long *dirty_bitmap;
2240 unsigned long *dirty_bitmap_buffer;
2241 bool flush;
2242
2243 /* Dirty ring tracking may be exclusive to dirty log tracking */
2244 if (!kvm_use_dirty_bitmap(kvm))
2245 return -ENXIO;
2246
2247 as_id = log->slot >> 16;
2248 id = (u16)log->slot;
2249 if (as_id >= kvm_arch_nr_memslot_as_ids(kvm) || id >= KVM_USER_MEM_SLOTS)
2250 return -EINVAL;
2251
2252 slots = __kvm_memslots(kvm, as_id);
2253 memslot = id_to_memslot(slots, id);
2254 if (!memslot || !memslot->dirty_bitmap)
2255 return -ENOENT;
2256
2257 dirty_bitmap = memslot->dirty_bitmap;
2258
2259 kvm_arch_sync_dirty_log(kvm, memslot);
2260
2261 n = kvm_dirty_bitmap_bytes(memslot);
2262 flush = false;
2263 if (kvm->manual_dirty_log_protect) {
2264 /*
2265 * Unlike kvm_get_dirty_log, we always return false in *flush,
2266 * because no flush is needed until KVM_CLEAR_DIRTY_LOG. There
2267 * is some code duplication between this function and
2268 * kvm_get_dirty_log, but hopefully all architecture
2269 * transition to kvm_get_dirty_log_protect and kvm_get_dirty_log
2270 * can be eliminated.
2271 */
2272 dirty_bitmap_buffer = dirty_bitmap;
2273 } else {
2274 dirty_bitmap_buffer = kvm_second_dirty_bitmap(memslot);
2275 memset(dirty_bitmap_buffer, 0, n);
2276
2277 KVM_MMU_LOCK(kvm);
2278 for (i = 0; i < n / sizeof(long); i++) {
2279 unsigned long mask;
2280 gfn_t offset;
2281
2282 if (!dirty_bitmap[i])
2283 continue;
2284
2285 flush = true;
2286 mask = xchg(&dirty_bitmap[i], 0);
2287 dirty_bitmap_buffer[i] = mask;
2288
2289 offset = i * BITS_PER_LONG;
2290 kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot,
2291 offset, mask);
2292 }
2293 KVM_MMU_UNLOCK(kvm);
2294 }
2295
2296 if (flush)
2297 kvm_flush_remote_tlbs_memslot(kvm, memslot);
2298
2299 if (copy_to_user(log->dirty_bitmap, dirty_bitmap_buffer, n))
2300 return -EFAULT;
2301 return 0;
2302}
2303
2304
2305/**
2306 * kvm_vm_ioctl_get_dirty_log - get and clear the log of dirty pages in a slot
2307 * @kvm: kvm instance
2308 * @log: slot id and address to which we copy the log
2309 *
2310 * Steps 1-4 below provide general overview of dirty page logging. See
2311 * kvm_get_dirty_log_protect() function description for additional details.
2312 *
2313 * We call kvm_get_dirty_log_protect() to handle steps 1-3, upon return we
2314 * always flush the TLB (step 4) even if previous step failed and the dirty
2315 * bitmap may be corrupt. Regardless of previous outcome the KVM logging API
2316 * does not preclude user space subsequent dirty log read. Flushing TLB ensures
2317 * writes will be marked dirty for next log read.
2318 *
2319 * 1. Take a snapshot of the bit and clear it if needed.
2320 * 2. Write protect the corresponding page.
2321 * 3. Copy the snapshot to the userspace.
2322 * 4. Flush TLB's if needed.
2323 */
2324static int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
2325 struct kvm_dirty_log *log)
2326{
2327 int r;
2328
2329 mutex_lock(&kvm->slots_lock);
2330
2331 r = kvm_get_dirty_log_protect(kvm, log);
2332
2333 mutex_unlock(&kvm->slots_lock);
2334 return r;
2335}
2336
2337/**
2338 * kvm_clear_dirty_log_protect - clear dirty bits in the bitmap
2339 * and reenable dirty page tracking for the corresponding pages.
2340 * @kvm: pointer to kvm instance
2341 * @log: slot id and address from which to fetch the bitmap of dirty pages
2342 */
2343static int kvm_clear_dirty_log_protect(struct kvm *kvm,
2344 struct kvm_clear_dirty_log *log)
2345{
2346 struct kvm_memslots *slots;
2347 struct kvm_memory_slot *memslot;
2348 int as_id, id;
2349 gfn_t offset;
2350 unsigned long i, n;
2351 unsigned long *dirty_bitmap;
2352 unsigned long *dirty_bitmap_buffer;
2353 bool flush;
2354
2355 /* Dirty ring tracking may be exclusive to dirty log tracking */
2356 if (!kvm_use_dirty_bitmap(kvm))
2357 return -ENXIO;
2358
2359 as_id = log->slot >> 16;
2360 id = (u16)log->slot;
2361 if (as_id >= kvm_arch_nr_memslot_as_ids(kvm) || id >= KVM_USER_MEM_SLOTS)
2362 return -EINVAL;
2363
2364 if (log->first_page & 63)
2365 return -EINVAL;
2366
2367 slots = __kvm_memslots(kvm, as_id);
2368 memslot = id_to_memslot(slots, id);
2369 if (!memslot || !memslot->dirty_bitmap)
2370 return -ENOENT;
2371
2372 dirty_bitmap = memslot->dirty_bitmap;
2373
2374 n = ALIGN(log->num_pages, BITS_PER_LONG) / 8;
2375
2376 if (log->first_page > memslot->npages ||
2377 log->num_pages > memslot->npages - log->first_page ||
2378 (log->num_pages < memslot->npages - log->first_page && (log->num_pages & 63)))
2379 return -EINVAL;
2380
2381 kvm_arch_sync_dirty_log(kvm, memslot);
2382
2383 flush = false;
2384 dirty_bitmap_buffer = kvm_second_dirty_bitmap(memslot);
2385 if (copy_from_user(dirty_bitmap_buffer, log->dirty_bitmap, n))
2386 return -EFAULT;
2387
2388 KVM_MMU_LOCK(kvm);
2389 for (offset = log->first_page, i = offset / BITS_PER_LONG,
2390 n = DIV_ROUND_UP(log->num_pages, BITS_PER_LONG); n--;
2391 i++, offset += BITS_PER_LONG) {
2392 unsigned long mask = *dirty_bitmap_buffer++;
2393 atomic_long_t *p = (atomic_long_t *) &dirty_bitmap[i];
2394 if (!mask)
2395 continue;
2396
2397 mask &= atomic_long_fetch_andnot(mask, p);
2398
2399 /*
2400 * mask contains the bits that really have been cleared. This
2401 * never includes any bits beyond the length of the memslot (if
2402 * the length is not aligned to 64 pages), therefore it is not
2403 * a problem if userspace sets them in log->dirty_bitmap.
2404 */
2405 if (mask) {
2406 flush = true;
2407 kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot,
2408 offset, mask);
2409 }
2410 }
2411 KVM_MMU_UNLOCK(kvm);
2412
2413 if (flush)
2414 kvm_flush_remote_tlbs_memslot(kvm, memslot);
2415
2416 return 0;
2417}
2418
2419static int kvm_vm_ioctl_clear_dirty_log(struct kvm *kvm,
2420 struct kvm_clear_dirty_log *log)
2421{
2422 int r;
2423
2424 mutex_lock(&kvm->slots_lock);
2425
2426 r = kvm_clear_dirty_log_protect(kvm, log);
2427
2428 mutex_unlock(&kvm->slots_lock);
2429 return r;
2430}
2431#endif /* CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT */
2432
2433#ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES
2434/*
2435 * Returns true if _all_ gfns in the range [@start, @end) have attributes
2436 * matching @attrs.
2437 */
2438bool kvm_range_has_memory_attributes(struct kvm *kvm, gfn_t start, gfn_t end,
2439 unsigned long attrs)
2440{
2441 XA_STATE(xas, &kvm->mem_attr_array, start);
2442 unsigned long index;
2443 bool has_attrs;
2444 void *entry;
2445
2446 rcu_read_lock();
2447
2448 if (!attrs) {
2449 has_attrs = !xas_find(&xas, end - 1);
2450 goto out;
2451 }
2452
2453 has_attrs = true;
2454 for (index = start; index < end; index++) {
2455 do {
2456 entry = xas_next(&xas);
2457 } while (xas_retry(&xas, entry));
2458
2459 if (xas.xa_index != index || xa_to_value(entry) != attrs) {
2460 has_attrs = false;
2461 break;
2462 }
2463 }
2464
2465out:
2466 rcu_read_unlock();
2467 return has_attrs;
2468}
2469
2470static u64 kvm_supported_mem_attributes(struct kvm *kvm)
2471{
2472 if (!kvm || kvm_arch_has_private_mem(kvm))
2473 return KVM_MEMORY_ATTRIBUTE_PRIVATE;
2474
2475 return 0;
2476}
2477
2478static __always_inline void kvm_handle_gfn_range(struct kvm *kvm,
2479 struct kvm_mmu_notifier_range *range)
2480{
2481 struct kvm_gfn_range gfn_range;
2482 struct kvm_memory_slot *slot;
2483 struct kvm_memslots *slots;
2484 struct kvm_memslot_iter iter;
2485 bool found_memslot = false;
2486 bool ret = false;
2487 int i;
2488
2489 gfn_range.arg = range->arg;
2490 gfn_range.may_block = range->may_block;
2491
2492 for (i = 0; i < kvm_arch_nr_memslot_as_ids(kvm); i++) {
2493 slots = __kvm_memslots(kvm, i);
2494
2495 kvm_for_each_memslot_in_gfn_range(&iter, slots, range->start, range->end) {
2496 slot = iter.slot;
2497 gfn_range.slot = slot;
2498
2499 gfn_range.start = max(range->start, slot->base_gfn);
2500 gfn_range.end = min(range->end, slot->base_gfn + slot->npages);
2501 if (gfn_range.start >= gfn_range.end)
2502 continue;
2503
2504 if (!found_memslot) {
2505 found_memslot = true;
2506 KVM_MMU_LOCK(kvm);
2507 if (!IS_KVM_NULL_FN(range->on_lock))
2508 range->on_lock(kvm);
2509 }
2510
2511 ret |= range->handler(kvm, &gfn_range);
2512 }
2513 }
2514
2515 if (range->flush_on_ret && ret)
2516 kvm_flush_remote_tlbs(kvm);
2517
2518 if (found_memslot)
2519 KVM_MMU_UNLOCK(kvm);
2520}
2521
2522static bool kvm_pre_set_memory_attributes(struct kvm *kvm,
2523 struct kvm_gfn_range *range)
2524{
2525 /*
2526 * Unconditionally add the range to the invalidation set, regardless of
2527 * whether or not the arch callback actually needs to zap SPTEs. E.g.
2528 * if KVM supports RWX attributes in the future and the attributes are
2529 * going from R=>RW, zapping isn't strictly necessary. Unconditionally
2530 * adding the range allows KVM to require that MMU invalidations add at
2531 * least one range between begin() and end(), e.g. allows KVM to detect
2532 * bugs where the add() is missed. Relaxing the rule *might* be safe,
2533 * but it's not obvious that allowing new mappings while the attributes
2534 * are in flux is desirable or worth the complexity.
2535 */
2536 kvm_mmu_invalidate_range_add(kvm, range->start, range->end);
2537
2538 return kvm_arch_pre_set_memory_attributes(kvm, range);
2539}
2540
2541/* Set @attributes for the gfn range [@start, @end). */
2542static int kvm_vm_set_mem_attributes(struct kvm *kvm, gfn_t start, gfn_t end,
2543 unsigned long attributes)
2544{
2545 struct kvm_mmu_notifier_range pre_set_range = {
2546 .start = start,
2547 .end = end,
2548 .handler = kvm_pre_set_memory_attributes,
2549 .on_lock = kvm_mmu_invalidate_begin,
2550 .flush_on_ret = true,
2551 .may_block = true,
2552 };
2553 struct kvm_mmu_notifier_range post_set_range = {
2554 .start = start,
2555 .end = end,
2556 .arg.attributes = attributes,
2557 .handler = kvm_arch_post_set_memory_attributes,
2558 .on_lock = kvm_mmu_invalidate_end,
2559 .may_block = true,
2560 };
2561 unsigned long i;
2562 void *entry;
2563 int r = 0;
2564
2565 entry = attributes ? xa_mk_value(attributes) : NULL;
2566
2567 mutex_lock(&kvm->slots_lock);
2568
2569 /* Nothing to do if the entire range as the desired attributes. */
2570 if (kvm_range_has_memory_attributes(kvm, start, end, attributes))
2571 goto out_unlock;
2572
2573 /*
2574 * Reserve memory ahead of time to avoid having to deal with failures
2575 * partway through setting the new attributes.
2576 */
2577 for (i = start; i < end; i++) {
2578 r = xa_reserve(&kvm->mem_attr_array, i, GFP_KERNEL_ACCOUNT);
2579 if (r)
2580 goto out_unlock;
2581 }
2582
2583 kvm_handle_gfn_range(kvm, &pre_set_range);
2584
2585 for (i = start; i < end; i++) {
2586 r = xa_err(xa_store(&kvm->mem_attr_array, i, entry,
2587 GFP_KERNEL_ACCOUNT));
2588 KVM_BUG_ON(r, kvm);
2589 }
2590
2591 kvm_handle_gfn_range(kvm, &post_set_range);
2592
2593out_unlock:
2594 mutex_unlock(&kvm->slots_lock);
2595
2596 return r;
2597}
2598static int kvm_vm_ioctl_set_mem_attributes(struct kvm *kvm,
2599 struct kvm_memory_attributes *attrs)
2600{
2601 gfn_t start, end;
2602
2603 /* flags is currently not used. */
2604 if (attrs->flags)
2605 return -EINVAL;
2606 if (attrs->attributes & ~kvm_supported_mem_attributes(kvm))
2607 return -EINVAL;
2608 if (attrs->size == 0 || attrs->address + attrs->size < attrs->address)
2609 return -EINVAL;
2610 if (!PAGE_ALIGNED(attrs->address) || !PAGE_ALIGNED(attrs->size))
2611 return -EINVAL;
2612
2613 start = attrs->address >> PAGE_SHIFT;
2614 end = (attrs->address + attrs->size) >> PAGE_SHIFT;
2615
2616 /*
2617 * xarray tracks data using "unsigned long", and as a result so does
2618 * KVM. For simplicity, supports generic attributes only on 64-bit
2619 * architectures.
2620 */
2621 BUILD_BUG_ON(sizeof(attrs->attributes) != sizeof(unsigned long));
2622
2623 return kvm_vm_set_mem_attributes(kvm, start, end, attrs->attributes);
2624}
2625#endif /* CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES */
2626
2627struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
2628{
2629 return __gfn_to_memslot(kvm_memslots(kvm), gfn);
2630}
2631EXPORT_SYMBOL_GPL(gfn_to_memslot);
2632
2633struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn)
2634{
2635 struct kvm_memslots *slots = kvm_vcpu_memslots(vcpu);
2636 u64 gen = slots->generation;
2637 struct kvm_memory_slot *slot;
2638
2639 /*
2640 * This also protects against using a memslot from a different address space,
2641 * since different address spaces have different generation numbers.
2642 */
2643 if (unlikely(gen != vcpu->last_used_slot_gen)) {
2644 vcpu->last_used_slot = NULL;
2645 vcpu->last_used_slot_gen = gen;
2646 }
2647
2648 slot = try_get_memslot(vcpu->last_used_slot, gfn);
2649 if (slot)
2650 return slot;
2651
2652 /*
2653 * Fall back to searching all memslots. We purposely use
2654 * search_memslots() instead of __gfn_to_memslot() to avoid
2655 * thrashing the VM-wide last_used_slot in kvm_memslots.
2656 */
2657 slot = search_memslots(slots, gfn, false);
2658 if (slot) {
2659 vcpu->last_used_slot = slot;
2660 return slot;
2661 }
2662
2663 return NULL;
2664}
2665
2666bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
2667{
2668 struct kvm_memory_slot *memslot = gfn_to_memslot(kvm, gfn);
2669
2670 return kvm_is_visible_memslot(memslot);
2671}
2672EXPORT_SYMBOL_GPL(kvm_is_visible_gfn);
2673
2674bool kvm_vcpu_is_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
2675{
2676 struct kvm_memory_slot *memslot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
2677
2678 return kvm_is_visible_memslot(memslot);
2679}
2680EXPORT_SYMBOL_GPL(kvm_vcpu_is_visible_gfn);
2681
2682unsigned long kvm_host_page_size(struct kvm_vcpu *vcpu, gfn_t gfn)
2683{
2684 struct vm_area_struct *vma;
2685 unsigned long addr, size;
2686
2687 size = PAGE_SIZE;
2688
2689 addr = kvm_vcpu_gfn_to_hva_prot(vcpu, gfn, NULL);
2690 if (kvm_is_error_hva(addr))
2691 return PAGE_SIZE;
2692
2693 mmap_read_lock(current->mm);
2694 vma = find_vma(current->mm, addr);
2695 if (!vma)
2696 goto out;
2697
2698 size = vma_kernel_pagesize(vma);
2699
2700out:
2701 mmap_read_unlock(current->mm);
2702
2703 return size;
2704}
2705
2706static bool memslot_is_readonly(const struct kvm_memory_slot *slot)
2707{
2708 return slot->flags & KVM_MEM_READONLY;
2709}
2710
2711static unsigned long __gfn_to_hva_many(const struct kvm_memory_slot *slot, gfn_t gfn,
2712 gfn_t *nr_pages, bool write)
2713{
2714 if (!slot || slot->flags & KVM_MEMSLOT_INVALID)
2715 return KVM_HVA_ERR_BAD;
2716
2717 if (memslot_is_readonly(slot) && write)
2718 return KVM_HVA_ERR_RO_BAD;
2719
2720 if (nr_pages)
2721 *nr_pages = slot->npages - (gfn - slot->base_gfn);
2722
2723 return __gfn_to_hva_memslot(slot, gfn);
2724}
2725
2726static unsigned long gfn_to_hva_many(struct kvm_memory_slot *slot, gfn_t gfn,
2727 gfn_t *nr_pages)
2728{
2729 return __gfn_to_hva_many(slot, gfn, nr_pages, true);
2730}
2731
2732unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot,
2733 gfn_t gfn)
2734{
2735 return gfn_to_hva_many(slot, gfn, NULL);
2736}
2737EXPORT_SYMBOL_GPL(gfn_to_hva_memslot);
2738
2739unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
2740{
2741 return gfn_to_hva_many(gfn_to_memslot(kvm, gfn), gfn, NULL);
2742}
2743EXPORT_SYMBOL_GPL(gfn_to_hva);
2744
2745unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn)
2746{
2747 return gfn_to_hva_many(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn, NULL);
2748}
2749EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_hva);
2750
2751/*
2752 * Return the hva of a @gfn and the R/W attribute if possible.
2753 *
2754 * @slot: the kvm_memory_slot which contains @gfn
2755 * @gfn: the gfn to be translated
2756 * @writable: used to return the read/write attribute of the @slot if the hva
2757 * is valid and @writable is not NULL
2758 */
2759unsigned long gfn_to_hva_memslot_prot(struct kvm_memory_slot *slot,
2760 gfn_t gfn, bool *writable)
2761{
2762 unsigned long hva = __gfn_to_hva_many(slot, gfn, NULL, false);
2763
2764 if (!kvm_is_error_hva(hva) && writable)
2765 *writable = !memslot_is_readonly(slot);
2766
2767 return hva;
2768}
2769
2770unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable)
2771{
2772 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
2773
2774 return gfn_to_hva_memslot_prot(slot, gfn, writable);
2775}
2776
2777unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable)
2778{
2779 struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
2780
2781 return gfn_to_hva_memslot_prot(slot, gfn, writable);
2782}
2783
2784static inline int check_user_page_hwpoison(unsigned long addr)
2785{
2786 int rc, flags = FOLL_HWPOISON | FOLL_WRITE;
2787
2788 rc = get_user_pages(addr, 1, flags, NULL);
2789 return rc == -EHWPOISON;
2790}
2791
2792/*
2793 * The fast path to get the writable pfn which will be stored in @pfn,
2794 * true indicates success, otherwise false is returned. It's also the
2795 * only part that runs if we can in atomic context.
2796 */
2797static bool hva_to_pfn_fast(unsigned long addr, bool write_fault,
2798 bool *writable, kvm_pfn_t *pfn)
2799{
2800 struct page *page[1];
2801
2802 /*
2803 * Fast pin a writable pfn only if it is a write fault request
2804 * or the caller allows to map a writable pfn for a read fault
2805 * request.
2806 */
2807 if (!(write_fault || writable))
2808 return false;
2809
2810 if (get_user_page_fast_only(addr, FOLL_WRITE, page)) {
2811 *pfn = page_to_pfn(page[0]);
2812
2813 if (writable)
2814 *writable = true;
2815 return true;
2816 }
2817
2818 return false;
2819}
2820
2821/*
2822 * The slow path to get the pfn of the specified host virtual address,
2823 * 1 indicates success, -errno is returned if error is detected.
2824 */
2825static int hva_to_pfn_slow(unsigned long addr, bool *async, bool write_fault,
2826 bool interruptible, bool *writable, kvm_pfn_t *pfn)
2827{
2828 /*
2829 * When a VCPU accesses a page that is not mapped into the secondary
2830 * MMU, we lookup the page using GUP to map it, so the guest VCPU can
2831 * make progress. We always want to honor NUMA hinting faults in that
2832 * case, because GUP usage corresponds to memory accesses from the VCPU.
2833 * Otherwise, we'd not trigger NUMA hinting faults once a page is
2834 * mapped into the secondary MMU and gets accessed by a VCPU.
2835 *
2836 * Note that get_user_page_fast_only() and FOLL_WRITE for now
2837 * implicitly honor NUMA hinting faults and don't need this flag.
2838 */
2839 unsigned int flags = FOLL_HWPOISON | FOLL_HONOR_NUMA_FAULT;
2840 struct page *page;
2841 int npages;
2842
2843 might_sleep();
2844
2845 if (writable)
2846 *writable = write_fault;
2847
2848 if (write_fault)
2849 flags |= FOLL_WRITE;
2850 if (async)
2851 flags |= FOLL_NOWAIT;
2852 if (interruptible)
2853 flags |= FOLL_INTERRUPTIBLE;
2854
2855 npages = get_user_pages_unlocked(addr, 1, &page, flags);
2856 if (npages != 1)
2857 return npages;
2858
2859 /* map read fault as writable if possible */
2860 if (unlikely(!write_fault) && writable) {
2861 struct page *wpage;
2862
2863 if (get_user_page_fast_only(addr, FOLL_WRITE, &wpage)) {
2864 *writable = true;
2865 put_page(page);
2866 page = wpage;
2867 }
2868 }
2869 *pfn = page_to_pfn(page);
2870 return npages;
2871}
2872
2873static bool vma_is_valid(struct vm_area_struct *vma, bool write_fault)
2874{
2875 if (unlikely(!(vma->vm_flags & VM_READ)))
2876 return false;
2877
2878 if (write_fault && (unlikely(!(vma->vm_flags & VM_WRITE))))
2879 return false;
2880
2881 return true;
2882}
2883
2884static int kvm_try_get_pfn(kvm_pfn_t pfn)
2885{
2886 struct page *page = kvm_pfn_to_refcounted_page(pfn);
2887
2888 if (!page)
2889 return 1;
2890
2891 return get_page_unless_zero(page);
2892}
2893
2894static int hva_to_pfn_remapped(struct vm_area_struct *vma,
2895 unsigned long addr, bool write_fault,
2896 bool *writable, kvm_pfn_t *p_pfn)
2897{
2898 kvm_pfn_t pfn;
2899 pte_t *ptep;
2900 pte_t pte;
2901 spinlock_t *ptl;
2902 int r;
2903
2904 r = follow_pte(vma->vm_mm, addr, &ptep, &ptl);
2905 if (r) {
2906 /*
2907 * get_user_pages fails for VM_IO and VM_PFNMAP vmas and does
2908 * not call the fault handler, so do it here.
2909 */
2910 bool unlocked = false;
2911 r = fixup_user_fault(current->mm, addr,
2912 (write_fault ? FAULT_FLAG_WRITE : 0),
2913 &unlocked);
2914 if (unlocked)
2915 return -EAGAIN;
2916 if (r)
2917 return r;
2918
2919 r = follow_pte(vma->vm_mm, addr, &ptep, &ptl);
2920 if (r)
2921 return r;
2922 }
2923
2924 pte = ptep_get(ptep);
2925
2926 if (write_fault && !pte_write(pte)) {
2927 pfn = KVM_PFN_ERR_RO_FAULT;
2928 goto out;
2929 }
2930
2931 if (writable)
2932 *writable = pte_write(pte);
2933 pfn = pte_pfn(pte);
2934
2935 /*
2936 * Get a reference here because callers of *hva_to_pfn* and
2937 * *gfn_to_pfn* ultimately call kvm_release_pfn_clean on the
2938 * returned pfn. This is only needed if the VMA has VM_MIXEDMAP
2939 * set, but the kvm_try_get_pfn/kvm_release_pfn_clean pair will
2940 * simply do nothing for reserved pfns.
2941 *
2942 * Whoever called remap_pfn_range is also going to call e.g.
2943 * unmap_mapping_range before the underlying pages are freed,
2944 * causing a call to our MMU notifier.
2945 *
2946 * Certain IO or PFNMAP mappings can be backed with valid
2947 * struct pages, but be allocated without refcounting e.g.,
2948 * tail pages of non-compound higher order allocations, which
2949 * would then underflow the refcount when the caller does the
2950 * required put_page. Don't allow those pages here.
2951 */
2952 if (!kvm_try_get_pfn(pfn))
2953 r = -EFAULT;
2954
2955out:
2956 pte_unmap_unlock(ptep, ptl);
2957 *p_pfn = pfn;
2958
2959 return r;
2960}
2961
2962/*
2963 * Pin guest page in memory and return its pfn.
2964 * @addr: host virtual address which maps memory to the guest
2965 * @atomic: whether this function can sleep
2966 * @interruptible: whether the process can be interrupted by non-fatal signals
2967 * @async: whether this function need to wait IO complete if the
2968 * host page is not in the memory
2969 * @write_fault: whether we should get a writable host page
2970 * @writable: whether it allows to map a writable host page for !@write_fault
2971 *
2972 * The function will map a writable host page for these two cases:
2973 * 1): @write_fault = true
2974 * 2): @write_fault = false && @writable, @writable will tell the caller
2975 * whether the mapping is writable.
2976 */
2977kvm_pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool interruptible,
2978 bool *async, bool write_fault, bool *writable)
2979{
2980 struct vm_area_struct *vma;
2981 kvm_pfn_t pfn;
2982 int npages, r;
2983
2984 /* we can do it either atomically or asynchronously, not both */
2985 BUG_ON(atomic && async);
2986
2987 if (hva_to_pfn_fast(addr, write_fault, writable, &pfn))
2988 return pfn;
2989
2990 if (atomic)
2991 return KVM_PFN_ERR_FAULT;
2992
2993 npages = hva_to_pfn_slow(addr, async, write_fault, interruptible,
2994 writable, &pfn);
2995 if (npages == 1)
2996 return pfn;
2997 if (npages == -EINTR)
2998 return KVM_PFN_ERR_SIGPENDING;
2999
3000 mmap_read_lock(current->mm);
3001 if (npages == -EHWPOISON ||
3002 (!async && check_user_page_hwpoison(addr))) {
3003 pfn = KVM_PFN_ERR_HWPOISON;
3004 goto exit;
3005 }
3006
3007retry:
3008 vma = vma_lookup(current->mm, addr);
3009
3010 if (vma == NULL)
3011 pfn = KVM_PFN_ERR_FAULT;
3012 else if (vma->vm_flags & (VM_IO | VM_PFNMAP)) {
3013 r = hva_to_pfn_remapped(vma, addr, write_fault, writable, &pfn);
3014 if (r == -EAGAIN)
3015 goto retry;
3016 if (r < 0)
3017 pfn = KVM_PFN_ERR_FAULT;
3018 } else {
3019 if (async && vma_is_valid(vma, write_fault))
3020 *async = true;
3021 pfn = KVM_PFN_ERR_FAULT;
3022 }
3023exit:
3024 mmap_read_unlock(current->mm);
3025 return pfn;
3026}
3027
3028kvm_pfn_t __gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn,
3029 bool atomic, bool interruptible, bool *async,
3030 bool write_fault, bool *writable, hva_t *hva)
3031{
3032 unsigned long addr = __gfn_to_hva_many(slot, gfn, NULL, write_fault);
3033
3034 if (hva)
3035 *hva = addr;
3036
3037 if (addr == KVM_HVA_ERR_RO_BAD) {
3038 if (writable)
3039 *writable = false;
3040 return KVM_PFN_ERR_RO_FAULT;
3041 }
3042
3043 if (kvm_is_error_hva(addr)) {
3044 if (writable)
3045 *writable = false;
3046 return KVM_PFN_NOSLOT;
3047 }
3048
3049 /* Do not map writable pfn in the readonly memslot. */
3050 if (writable && memslot_is_readonly(slot)) {
3051 *writable = false;
3052 writable = NULL;
3053 }
3054
3055 return hva_to_pfn(addr, atomic, interruptible, async, write_fault,
3056 writable);
3057}
3058EXPORT_SYMBOL_GPL(__gfn_to_pfn_memslot);
3059
3060kvm_pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
3061 bool *writable)
3062{
3063 return __gfn_to_pfn_memslot(gfn_to_memslot(kvm, gfn), gfn, false, false,
3064 NULL, write_fault, writable, NULL);
3065}
3066EXPORT_SYMBOL_GPL(gfn_to_pfn_prot);
3067
3068kvm_pfn_t gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn)
3069{
3070 return __gfn_to_pfn_memslot(slot, gfn, false, false, NULL, true,
3071 NULL, NULL);
3072}
3073EXPORT_SYMBOL_GPL(gfn_to_pfn_memslot);
3074
3075kvm_pfn_t gfn_to_pfn_memslot_atomic(const struct kvm_memory_slot *slot, gfn_t gfn)
3076{
3077 return __gfn_to_pfn_memslot(slot, gfn, true, false, NULL, true,
3078 NULL, NULL);
3079}
3080EXPORT_SYMBOL_GPL(gfn_to_pfn_memslot_atomic);
3081
3082kvm_pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn)
3083{
3084 return gfn_to_pfn_memslot_atomic(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn);
3085}
3086EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_pfn_atomic);
3087
3088kvm_pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
3089{
3090 return gfn_to_pfn_memslot(gfn_to_memslot(kvm, gfn), gfn);
3091}
3092EXPORT_SYMBOL_GPL(gfn_to_pfn);
3093
3094kvm_pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn)
3095{
3096 return gfn_to_pfn_memslot(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn);
3097}
3098EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_pfn);
3099
3100int gfn_to_page_many_atomic(struct kvm_memory_slot *slot, gfn_t gfn,
3101 struct page **pages, int nr_pages)
3102{
3103 unsigned long addr;
3104 gfn_t entry = 0;
3105
3106 addr = gfn_to_hva_many(slot, gfn, &entry);
3107 if (kvm_is_error_hva(addr))
3108 return -1;
3109
3110 if (entry < nr_pages)
3111 return 0;
3112
3113 return get_user_pages_fast_only(addr, nr_pages, FOLL_WRITE, pages);
3114}
3115EXPORT_SYMBOL_GPL(gfn_to_page_many_atomic);
3116
3117/*
3118 * Do not use this helper unless you are absolutely certain the gfn _must_ be
3119 * backed by 'struct page'. A valid example is if the backing memslot is
3120 * controlled by KVM. Note, if the returned page is valid, it's refcount has
3121 * been elevated by gfn_to_pfn().
3122 */
3123struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
3124{
3125 struct page *page;
3126 kvm_pfn_t pfn;
3127
3128 pfn = gfn_to_pfn(kvm, gfn);
3129
3130 if (is_error_noslot_pfn(pfn))
3131 return KVM_ERR_PTR_BAD_PAGE;
3132
3133 page = kvm_pfn_to_refcounted_page(pfn);
3134 if (!page)
3135 return KVM_ERR_PTR_BAD_PAGE;
3136
3137 return page;
3138}
3139EXPORT_SYMBOL_GPL(gfn_to_page);
3140
3141void kvm_release_pfn(kvm_pfn_t pfn, bool dirty)
3142{
3143 if (dirty)
3144 kvm_release_pfn_dirty(pfn);
3145 else
3146 kvm_release_pfn_clean(pfn);
3147}
3148
3149int kvm_vcpu_map(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map)
3150{
3151 kvm_pfn_t pfn;
3152 void *hva = NULL;
3153 struct page *page = KVM_UNMAPPED_PAGE;
3154
3155 if (!map)
3156 return -EINVAL;
3157
3158 pfn = gfn_to_pfn(vcpu->kvm, gfn);
3159 if (is_error_noslot_pfn(pfn))
3160 return -EINVAL;
3161
3162 if (pfn_valid(pfn)) {
3163 page = pfn_to_page(pfn);
3164 hva = kmap(page);
3165#ifdef CONFIG_HAS_IOMEM
3166 } else {
3167 hva = memremap(pfn_to_hpa(pfn), PAGE_SIZE, MEMREMAP_WB);
3168#endif
3169 }
3170
3171 if (!hva)
3172 return -EFAULT;
3173
3174 map->page = page;
3175 map->hva = hva;
3176 map->pfn = pfn;
3177 map->gfn = gfn;
3178
3179 return 0;
3180}
3181EXPORT_SYMBOL_GPL(kvm_vcpu_map);
3182
3183void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty)
3184{
3185 if (!map)
3186 return;
3187
3188 if (!map->hva)
3189 return;
3190
3191 if (map->page != KVM_UNMAPPED_PAGE)
3192 kunmap(map->page);
3193#ifdef CONFIG_HAS_IOMEM
3194 else
3195 memunmap(map->hva);
3196#endif
3197
3198 if (dirty)
3199 kvm_vcpu_mark_page_dirty(vcpu, map->gfn);
3200
3201 kvm_release_pfn(map->pfn, dirty);
3202
3203 map->hva = NULL;
3204 map->page = NULL;
3205}
3206EXPORT_SYMBOL_GPL(kvm_vcpu_unmap);
3207
3208static bool kvm_is_ad_tracked_page(struct page *page)
3209{
3210 /*
3211 * Per page-flags.h, pages tagged PG_reserved "should in general not be
3212 * touched (e.g. set dirty) except by its owner".
3213 */
3214 return !PageReserved(page);
3215}
3216
3217static void kvm_set_page_dirty(struct page *page)
3218{
3219 if (kvm_is_ad_tracked_page(page))
3220 SetPageDirty(page);
3221}
3222
3223static void kvm_set_page_accessed(struct page *page)
3224{
3225 if (kvm_is_ad_tracked_page(page))
3226 mark_page_accessed(page);
3227}
3228
3229void kvm_release_page_clean(struct page *page)
3230{
3231 WARN_ON(is_error_page(page));
3232
3233 kvm_set_page_accessed(page);
3234 put_page(page);
3235}
3236EXPORT_SYMBOL_GPL(kvm_release_page_clean);
3237
3238void kvm_release_pfn_clean(kvm_pfn_t pfn)
3239{
3240 struct page *page;
3241
3242 if (is_error_noslot_pfn(pfn))
3243 return;
3244
3245 page = kvm_pfn_to_refcounted_page(pfn);
3246 if (!page)
3247 return;
3248
3249 kvm_release_page_clean(page);
3250}
3251EXPORT_SYMBOL_GPL(kvm_release_pfn_clean);
3252
3253void kvm_release_page_dirty(struct page *page)
3254{
3255 WARN_ON(is_error_page(page));
3256
3257 kvm_set_page_dirty(page);
3258 kvm_release_page_clean(page);
3259}
3260EXPORT_SYMBOL_GPL(kvm_release_page_dirty);
3261
3262void kvm_release_pfn_dirty(kvm_pfn_t pfn)
3263{
3264 struct page *page;
3265
3266 if (is_error_noslot_pfn(pfn))
3267 return;
3268
3269 page = kvm_pfn_to_refcounted_page(pfn);
3270 if (!page)
3271 return;
3272
3273 kvm_release_page_dirty(page);
3274}
3275EXPORT_SYMBOL_GPL(kvm_release_pfn_dirty);
3276
3277/*
3278 * Note, checking for an error/noslot pfn is the caller's responsibility when
3279 * directly marking a page dirty/accessed. Unlike the "release" helpers, the
3280 * "set" helpers are not to be used when the pfn might point at garbage.
3281 */
3282void kvm_set_pfn_dirty(kvm_pfn_t pfn)
3283{
3284 if (WARN_ON(is_error_noslot_pfn(pfn)))
3285 return;
3286
3287 if (pfn_valid(pfn))
3288 kvm_set_page_dirty(pfn_to_page(pfn));
3289}
3290EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty);
3291
3292void kvm_set_pfn_accessed(kvm_pfn_t pfn)
3293{
3294 if (WARN_ON(is_error_noslot_pfn(pfn)))
3295 return;
3296
3297 if (pfn_valid(pfn))
3298 kvm_set_page_accessed(pfn_to_page(pfn));
3299}
3300EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed);
3301
3302static int next_segment(unsigned long len, int offset)
3303{
3304 if (len > PAGE_SIZE - offset)
3305 return PAGE_SIZE - offset;
3306 else
3307 return len;
3308}
3309
3310static int __kvm_read_guest_page(struct kvm_memory_slot *slot, gfn_t gfn,
3311 void *data, int offset, int len)
3312{
3313 int r;
3314 unsigned long addr;
3315
3316 addr = gfn_to_hva_memslot_prot(slot, gfn, NULL);
3317 if (kvm_is_error_hva(addr))
3318 return -EFAULT;
3319 r = __copy_from_user(data, (void __user *)addr + offset, len);
3320 if (r)
3321 return -EFAULT;
3322 return 0;
3323}
3324
3325int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
3326 int len)
3327{
3328 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
3329
3330 return __kvm_read_guest_page(slot, gfn, data, offset, len);
3331}
3332EXPORT_SYMBOL_GPL(kvm_read_guest_page);
3333
3334int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data,
3335 int offset, int len)
3336{
3337 struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
3338
3339 return __kvm_read_guest_page(slot, gfn, data, offset, len);
3340}
3341EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest_page);
3342
3343int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len)
3344{
3345 gfn_t gfn = gpa >> PAGE_SHIFT;
3346 int seg;
3347 int offset = offset_in_page(gpa);
3348 int ret;
3349
3350 while ((seg = next_segment(len, offset)) != 0) {
3351 ret = kvm_read_guest_page(kvm, gfn, data, offset, seg);
3352 if (ret < 0)
3353 return ret;
3354 offset = 0;
3355 len -= seg;
3356 data += seg;
3357 ++gfn;
3358 }
3359 return 0;
3360}
3361EXPORT_SYMBOL_GPL(kvm_read_guest);
3362
3363int kvm_vcpu_read_guest(struct kvm_vcpu *vcpu, gpa_t gpa, void *data, unsigned long len)
3364{
3365 gfn_t gfn = gpa >> PAGE_SHIFT;
3366 int seg;
3367 int offset = offset_in_page(gpa);
3368 int ret;
3369
3370 while ((seg = next_segment(len, offset)) != 0) {
3371 ret = kvm_vcpu_read_guest_page(vcpu, gfn, data, offset, seg);
3372 if (ret < 0)
3373 return ret;
3374 offset = 0;
3375 len -= seg;
3376 data += seg;
3377 ++gfn;
3378 }
3379 return 0;
3380}
3381EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest);
3382
3383static int __kvm_read_guest_atomic(struct kvm_memory_slot *slot, gfn_t gfn,
3384 void *data, int offset, unsigned long len)
3385{
3386 int r;
3387 unsigned long addr;
3388
3389 addr = gfn_to_hva_memslot_prot(slot, gfn, NULL);
3390 if (kvm_is_error_hva(addr))
3391 return -EFAULT;
3392 pagefault_disable();
3393 r = __copy_from_user_inatomic(data, (void __user *)addr + offset, len);
3394 pagefault_enable();
3395 if (r)
3396 return -EFAULT;
3397 return 0;
3398}
3399
3400int kvm_vcpu_read_guest_atomic(struct kvm_vcpu *vcpu, gpa_t gpa,
3401 void *data, unsigned long len)
3402{
3403 gfn_t gfn = gpa >> PAGE_SHIFT;
3404 struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
3405 int offset = offset_in_page(gpa);
3406
3407 return __kvm_read_guest_atomic(slot, gfn, data, offset, len);
3408}
3409EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest_atomic);
3410
3411static int __kvm_write_guest_page(struct kvm *kvm,
3412 struct kvm_memory_slot *memslot, gfn_t gfn,
3413 const void *data, int offset, int len)
3414{
3415 int r;
3416 unsigned long addr;
3417
3418 addr = gfn_to_hva_memslot(memslot, gfn);
3419 if (kvm_is_error_hva(addr))
3420 return -EFAULT;
3421 r = __copy_to_user((void __user *)addr + offset, data, len);
3422 if (r)
3423 return -EFAULT;
3424 mark_page_dirty_in_slot(kvm, memslot, gfn);
3425 return 0;
3426}
3427
3428int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn,
3429 const void *data, int offset, int len)
3430{
3431 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
3432
3433 return __kvm_write_guest_page(kvm, slot, gfn, data, offset, len);
3434}
3435EXPORT_SYMBOL_GPL(kvm_write_guest_page);
3436
3437int kvm_vcpu_write_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn,
3438 const void *data, int offset, int len)
3439{
3440 struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
3441
3442 return __kvm_write_guest_page(vcpu->kvm, slot, gfn, data, offset, len);
3443}
3444EXPORT_SYMBOL_GPL(kvm_vcpu_write_guest_page);
3445
3446int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
3447 unsigned long len)
3448{
3449 gfn_t gfn = gpa >> PAGE_SHIFT;
3450 int seg;
3451 int offset = offset_in_page(gpa);
3452 int ret;
3453
3454 while ((seg = next_segment(len, offset)) != 0) {
3455 ret = kvm_write_guest_page(kvm, gfn, data, offset, seg);
3456 if (ret < 0)
3457 return ret;
3458 offset = 0;
3459 len -= seg;
3460 data += seg;
3461 ++gfn;
3462 }
3463 return 0;
3464}
3465EXPORT_SYMBOL_GPL(kvm_write_guest);
3466
3467int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data,
3468 unsigned long len)
3469{
3470 gfn_t gfn = gpa >> PAGE_SHIFT;
3471 int seg;
3472 int offset = offset_in_page(gpa);
3473 int ret;
3474
3475 while ((seg = next_segment(len, offset)) != 0) {
3476 ret = kvm_vcpu_write_guest_page(vcpu, gfn, data, offset, seg);
3477 if (ret < 0)
3478 return ret;
3479 offset = 0;
3480 len -= seg;
3481 data += seg;
3482 ++gfn;
3483 }
3484 return 0;
3485}
3486EXPORT_SYMBOL_GPL(kvm_vcpu_write_guest);
3487
3488static int __kvm_gfn_to_hva_cache_init(struct kvm_memslots *slots,
3489 struct gfn_to_hva_cache *ghc,
3490 gpa_t gpa, unsigned long len)
3491{
3492 int offset = offset_in_page(gpa);
3493 gfn_t start_gfn = gpa >> PAGE_SHIFT;
3494 gfn_t end_gfn = (gpa + len - 1) >> PAGE_SHIFT;
3495 gfn_t nr_pages_needed = end_gfn - start_gfn + 1;
3496 gfn_t nr_pages_avail;
3497
3498 /* Update ghc->generation before performing any error checks. */
3499 ghc->generation = slots->generation;
3500
3501 if (start_gfn > end_gfn) {
3502 ghc->hva = KVM_HVA_ERR_BAD;
3503 return -EINVAL;
3504 }
3505
3506 /*
3507 * If the requested region crosses two memslots, we still
3508 * verify that the entire region is valid here.
3509 */
3510 for ( ; start_gfn <= end_gfn; start_gfn += nr_pages_avail) {
3511 ghc->memslot = __gfn_to_memslot(slots, start_gfn);
3512 ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn,
3513 &nr_pages_avail);
3514 if (kvm_is_error_hva(ghc->hva))
3515 return -EFAULT;
3516 }
3517
3518 /* Use the slow path for cross page reads and writes. */
3519 if (nr_pages_needed == 1)
3520 ghc->hva += offset;
3521 else
3522 ghc->memslot = NULL;
3523
3524 ghc->gpa = gpa;
3525 ghc->len = len;
3526 return 0;
3527}
3528
3529int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
3530 gpa_t gpa, unsigned long len)
3531{
3532 struct kvm_memslots *slots = kvm_memslots(kvm);
3533 return __kvm_gfn_to_hva_cache_init(slots, ghc, gpa, len);
3534}
3535EXPORT_SYMBOL_GPL(kvm_gfn_to_hva_cache_init);
3536
3537int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
3538 void *data, unsigned int offset,
3539 unsigned long len)
3540{
3541 struct kvm_memslots *slots = kvm_memslots(kvm);
3542 int r;
3543 gpa_t gpa = ghc->gpa + offset;
3544
3545 if (WARN_ON_ONCE(len + offset > ghc->len))
3546 return -EINVAL;
3547
3548 if (slots->generation != ghc->generation) {
3549 if (__kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len))
3550 return -EFAULT;
3551 }
3552
3553 if (kvm_is_error_hva(ghc->hva))
3554 return -EFAULT;
3555
3556 if (unlikely(!ghc->memslot))
3557 return kvm_write_guest(kvm, gpa, data, len);
3558
3559 r = __copy_to_user((void __user *)ghc->hva + offset, data, len);
3560 if (r)
3561 return -EFAULT;
3562 mark_page_dirty_in_slot(kvm, ghc->memslot, gpa >> PAGE_SHIFT);
3563
3564 return 0;
3565}
3566EXPORT_SYMBOL_GPL(kvm_write_guest_offset_cached);
3567
3568int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
3569 void *data, unsigned long len)
3570{
3571 return kvm_write_guest_offset_cached(kvm, ghc, data, 0, len);
3572}
3573EXPORT_SYMBOL_GPL(kvm_write_guest_cached);
3574
3575int kvm_read_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
3576 void *data, unsigned int offset,
3577 unsigned long len)
3578{
3579 struct kvm_memslots *slots = kvm_memslots(kvm);
3580 int r;
3581 gpa_t gpa = ghc->gpa + offset;
3582
3583 if (WARN_ON_ONCE(len + offset > ghc->len))
3584 return -EINVAL;
3585
3586 if (slots->generation != ghc->generation) {
3587 if (__kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len))
3588 return -EFAULT;
3589 }
3590
3591 if (kvm_is_error_hva(ghc->hva))
3592 return -EFAULT;
3593
3594 if (unlikely(!ghc->memslot))
3595 return kvm_read_guest(kvm, gpa, data, len);
3596
3597 r = __copy_from_user(data, (void __user *)ghc->hva + offset, len);
3598 if (r)
3599 return -EFAULT;
3600
3601 return 0;
3602}
3603EXPORT_SYMBOL_GPL(kvm_read_guest_offset_cached);
3604
3605int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
3606 void *data, unsigned long len)
3607{
3608 return kvm_read_guest_offset_cached(kvm, ghc, data, 0, len);
3609}
3610EXPORT_SYMBOL_GPL(kvm_read_guest_cached);
3611
3612int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len)
3613{
3614 const void *zero_page = (const void *) __va(page_to_phys(ZERO_PAGE(0)));
3615 gfn_t gfn = gpa >> PAGE_SHIFT;
3616 int seg;
3617 int offset = offset_in_page(gpa);
3618 int ret;
3619
3620 while ((seg = next_segment(len, offset)) != 0) {
3621 ret = kvm_write_guest_page(kvm, gfn, zero_page, offset, len);
3622 if (ret < 0)
3623 return ret;
3624 offset = 0;
3625 len -= seg;
3626 ++gfn;
3627 }
3628 return 0;
3629}
3630EXPORT_SYMBOL_GPL(kvm_clear_guest);
3631
3632void mark_page_dirty_in_slot(struct kvm *kvm,
3633 const struct kvm_memory_slot *memslot,
3634 gfn_t gfn)
3635{
3636 struct kvm_vcpu *vcpu = kvm_get_running_vcpu();
3637
3638#ifdef CONFIG_HAVE_KVM_DIRTY_RING
3639 if (WARN_ON_ONCE(vcpu && vcpu->kvm != kvm))
3640 return;
3641
3642 WARN_ON_ONCE(!vcpu && !kvm_arch_allow_write_without_running_vcpu(kvm));
3643#endif
3644
3645 if (memslot && kvm_slot_dirty_track_enabled(memslot)) {
3646 unsigned long rel_gfn = gfn - memslot->base_gfn;
3647 u32 slot = (memslot->as_id << 16) | memslot->id;
3648
3649 if (kvm->dirty_ring_size && vcpu)
3650 kvm_dirty_ring_push(vcpu, slot, rel_gfn);
3651 else if (memslot->dirty_bitmap)
3652 set_bit_le(rel_gfn, memslot->dirty_bitmap);
3653 }
3654}
3655EXPORT_SYMBOL_GPL(mark_page_dirty_in_slot);
3656
3657void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
3658{
3659 struct kvm_memory_slot *memslot;
3660
3661 memslot = gfn_to_memslot(kvm, gfn);
3662 mark_page_dirty_in_slot(kvm, memslot, gfn);
3663}
3664EXPORT_SYMBOL_GPL(mark_page_dirty);
3665
3666void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn)
3667{
3668 struct kvm_memory_slot *memslot;
3669
3670 memslot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
3671 mark_page_dirty_in_slot(vcpu->kvm, memslot, gfn);
3672}
3673EXPORT_SYMBOL_GPL(kvm_vcpu_mark_page_dirty);
3674
3675void kvm_sigset_activate(struct kvm_vcpu *vcpu)
3676{
3677 if (!vcpu->sigset_active)
3678 return;
3679
3680 /*
3681 * This does a lockless modification of ->real_blocked, which is fine
3682 * because, only current can change ->real_blocked and all readers of
3683 * ->real_blocked don't care as long ->real_blocked is always a subset
3684 * of ->blocked.
3685 */
3686 sigprocmask(SIG_SETMASK, &vcpu->sigset, ¤t->real_blocked);
3687}
3688
3689void kvm_sigset_deactivate(struct kvm_vcpu *vcpu)
3690{
3691 if (!vcpu->sigset_active)
3692 return;
3693
3694 sigprocmask(SIG_SETMASK, ¤t->real_blocked, NULL);
3695 sigemptyset(¤t->real_blocked);
3696}
3697
3698static void grow_halt_poll_ns(struct kvm_vcpu *vcpu)
3699{
3700 unsigned int old, val, grow, grow_start;
3701
3702 old = val = vcpu->halt_poll_ns;
3703 grow_start = READ_ONCE(halt_poll_ns_grow_start);
3704 grow = READ_ONCE(halt_poll_ns_grow);
3705 if (!grow)
3706 goto out;
3707
3708 val *= grow;
3709 if (val < grow_start)
3710 val = grow_start;
3711
3712 vcpu->halt_poll_ns = val;
3713out:
3714 trace_kvm_halt_poll_ns_grow(vcpu->vcpu_id, val, old);
3715}
3716
3717static void shrink_halt_poll_ns(struct kvm_vcpu *vcpu)
3718{
3719 unsigned int old, val, shrink, grow_start;
3720
3721 old = val = vcpu->halt_poll_ns;
3722 shrink = READ_ONCE(halt_poll_ns_shrink);
3723 grow_start = READ_ONCE(halt_poll_ns_grow_start);
3724 if (shrink == 0)
3725 val = 0;
3726 else
3727 val /= shrink;
3728
3729 if (val < grow_start)
3730 val = 0;
3731
3732 vcpu->halt_poll_ns = val;
3733 trace_kvm_halt_poll_ns_shrink(vcpu->vcpu_id, val, old);
3734}
3735
3736static int kvm_vcpu_check_block(struct kvm_vcpu *vcpu)
3737{
3738 int ret = -EINTR;
3739 int idx = srcu_read_lock(&vcpu->kvm->srcu);
3740
3741 if (kvm_arch_vcpu_runnable(vcpu))
3742 goto out;
3743 if (kvm_cpu_has_pending_timer(vcpu))
3744 goto out;
3745 if (signal_pending(current))
3746 goto out;
3747 if (kvm_check_request(KVM_REQ_UNBLOCK, vcpu))
3748 goto out;
3749
3750 ret = 0;
3751out:
3752 srcu_read_unlock(&vcpu->kvm->srcu, idx);
3753 return ret;
3754}
3755
3756/*
3757 * Block the vCPU until the vCPU is runnable, an event arrives, or a signal is
3758 * pending. This is mostly used when halting a vCPU, but may also be used
3759 * directly for other vCPU non-runnable states, e.g. x86's Wait-For-SIPI.
3760 */
3761bool kvm_vcpu_block(struct kvm_vcpu *vcpu)
3762{
3763 struct rcuwait *wait = kvm_arch_vcpu_get_wait(vcpu);
3764 bool waited = false;
3765
3766 vcpu->stat.generic.blocking = 1;
3767
3768 preempt_disable();
3769 kvm_arch_vcpu_blocking(vcpu);
3770 prepare_to_rcuwait(wait);
3771 preempt_enable();
3772
3773 for (;;) {
3774 set_current_state(TASK_INTERRUPTIBLE);
3775
3776 if (kvm_vcpu_check_block(vcpu) < 0)
3777 break;
3778
3779 waited = true;
3780 schedule();
3781 }
3782
3783 preempt_disable();
3784 finish_rcuwait(wait);
3785 kvm_arch_vcpu_unblocking(vcpu);
3786 preempt_enable();
3787
3788 vcpu->stat.generic.blocking = 0;
3789
3790 return waited;
3791}
3792
3793static inline void update_halt_poll_stats(struct kvm_vcpu *vcpu, ktime_t start,
3794 ktime_t end, bool success)
3795{
3796 struct kvm_vcpu_stat_generic *stats = &vcpu->stat.generic;
3797 u64 poll_ns = ktime_to_ns(ktime_sub(end, start));
3798
3799 ++vcpu->stat.generic.halt_attempted_poll;
3800
3801 if (success) {
3802 ++vcpu->stat.generic.halt_successful_poll;
3803
3804 if (!vcpu_valid_wakeup(vcpu))
3805 ++vcpu->stat.generic.halt_poll_invalid;
3806
3807 stats->halt_poll_success_ns += poll_ns;
3808 KVM_STATS_LOG_HIST_UPDATE(stats->halt_poll_success_hist, poll_ns);
3809 } else {
3810 stats->halt_poll_fail_ns += poll_ns;
3811 KVM_STATS_LOG_HIST_UPDATE(stats->halt_poll_fail_hist, poll_ns);
3812 }
3813}
3814
3815static unsigned int kvm_vcpu_max_halt_poll_ns(struct kvm_vcpu *vcpu)
3816{
3817 struct kvm *kvm = vcpu->kvm;
3818
3819 if (kvm->override_halt_poll_ns) {
3820 /*
3821 * Ensure kvm->max_halt_poll_ns is not read before
3822 * kvm->override_halt_poll_ns.
3823 *
3824 * Pairs with the smp_wmb() when enabling KVM_CAP_HALT_POLL.
3825 */
3826 smp_rmb();
3827 return READ_ONCE(kvm->max_halt_poll_ns);
3828 }
3829
3830 return READ_ONCE(halt_poll_ns);
3831}
3832
3833/*
3834 * Emulate a vCPU halt condition, e.g. HLT on x86, WFI on arm, etc... If halt
3835 * polling is enabled, busy wait for a short time before blocking to avoid the
3836 * expensive block+unblock sequence if a wake event arrives soon after the vCPU
3837 * is halted.
3838 */
3839void kvm_vcpu_halt(struct kvm_vcpu *vcpu)
3840{
3841 unsigned int max_halt_poll_ns = kvm_vcpu_max_halt_poll_ns(vcpu);
3842 bool halt_poll_allowed = !kvm_arch_no_poll(vcpu);
3843 ktime_t start, cur, poll_end;
3844 bool waited = false;
3845 bool do_halt_poll;
3846 u64 halt_ns;
3847
3848 if (vcpu->halt_poll_ns > max_halt_poll_ns)
3849 vcpu->halt_poll_ns = max_halt_poll_ns;
3850
3851 do_halt_poll = halt_poll_allowed && vcpu->halt_poll_ns;
3852
3853 start = cur = poll_end = ktime_get();
3854 if (do_halt_poll) {
3855 ktime_t stop = ktime_add_ns(start, vcpu->halt_poll_ns);
3856
3857 do {
3858 if (kvm_vcpu_check_block(vcpu) < 0)
3859 goto out;
3860 cpu_relax();
3861 poll_end = cur = ktime_get();
3862 } while (kvm_vcpu_can_poll(cur, stop));
3863 }
3864
3865 waited = kvm_vcpu_block(vcpu);
3866
3867 cur = ktime_get();
3868 if (waited) {
3869 vcpu->stat.generic.halt_wait_ns +=
3870 ktime_to_ns(cur) - ktime_to_ns(poll_end);
3871 KVM_STATS_LOG_HIST_UPDATE(vcpu->stat.generic.halt_wait_hist,
3872 ktime_to_ns(cur) - ktime_to_ns(poll_end));
3873 }
3874out:
3875 /* The total time the vCPU was "halted", including polling time. */
3876 halt_ns = ktime_to_ns(cur) - ktime_to_ns(start);
3877
3878 /*
3879 * Note, halt-polling is considered successful so long as the vCPU was
3880 * never actually scheduled out, i.e. even if the wake event arrived
3881 * after of the halt-polling loop itself, but before the full wait.
3882 */
3883 if (do_halt_poll)
3884 update_halt_poll_stats(vcpu, start, poll_end, !waited);
3885
3886 if (halt_poll_allowed) {
3887 /* Recompute the max halt poll time in case it changed. */
3888 max_halt_poll_ns = kvm_vcpu_max_halt_poll_ns(vcpu);
3889
3890 if (!vcpu_valid_wakeup(vcpu)) {
3891 shrink_halt_poll_ns(vcpu);
3892 } else if (max_halt_poll_ns) {
3893 if (halt_ns <= vcpu->halt_poll_ns)
3894 ;
3895 /* we had a long block, shrink polling */
3896 else if (vcpu->halt_poll_ns &&
3897 halt_ns > max_halt_poll_ns)
3898 shrink_halt_poll_ns(vcpu);
3899 /* we had a short halt and our poll time is too small */
3900 else if (vcpu->halt_poll_ns < max_halt_poll_ns &&
3901 halt_ns < max_halt_poll_ns)
3902 grow_halt_poll_ns(vcpu);
3903 } else {
3904 vcpu->halt_poll_ns = 0;
3905 }
3906 }
3907
3908 trace_kvm_vcpu_wakeup(halt_ns, waited, vcpu_valid_wakeup(vcpu));
3909}
3910EXPORT_SYMBOL_GPL(kvm_vcpu_halt);
3911
3912bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu)
3913{
3914 if (__kvm_vcpu_wake_up(vcpu)) {
3915 WRITE_ONCE(vcpu->ready, true);
3916 ++vcpu->stat.generic.halt_wakeup;
3917 return true;
3918 }
3919
3920 return false;
3921}
3922EXPORT_SYMBOL_GPL(kvm_vcpu_wake_up);
3923
3924#ifndef CONFIG_S390
3925/*
3926 * Kick a sleeping VCPU, or a guest VCPU in guest mode, into host kernel mode.
3927 */
3928void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
3929{
3930 int me, cpu;
3931
3932 if (kvm_vcpu_wake_up(vcpu))
3933 return;
3934
3935 me = get_cpu();
3936 /*
3937 * The only state change done outside the vcpu mutex is IN_GUEST_MODE
3938 * to EXITING_GUEST_MODE. Therefore the moderately expensive "should
3939 * kick" check does not need atomic operations if kvm_vcpu_kick is used
3940 * within the vCPU thread itself.
3941 */
3942 if (vcpu == __this_cpu_read(kvm_running_vcpu)) {
3943 if (vcpu->mode == IN_GUEST_MODE)
3944 WRITE_ONCE(vcpu->mode, EXITING_GUEST_MODE);
3945 goto out;
3946 }
3947
3948 /*
3949 * Note, the vCPU could get migrated to a different pCPU at any point
3950 * after kvm_arch_vcpu_should_kick(), which could result in sending an
3951 * IPI to the previous pCPU. But, that's ok because the purpose of the
3952 * IPI is to force the vCPU to leave IN_GUEST_MODE, and migrating the
3953 * vCPU also requires it to leave IN_GUEST_MODE.
3954 */
3955 if (kvm_arch_vcpu_should_kick(vcpu)) {
3956 cpu = READ_ONCE(vcpu->cpu);
3957 if (cpu != me && (unsigned)cpu < nr_cpu_ids && cpu_online(cpu))
3958 smp_send_reschedule(cpu);
3959 }
3960out:
3961 put_cpu();
3962}
3963EXPORT_SYMBOL_GPL(kvm_vcpu_kick);
3964#endif /* !CONFIG_S390 */
3965
3966int kvm_vcpu_yield_to(struct kvm_vcpu *target)
3967{
3968 struct pid *pid;
3969 struct task_struct *task = NULL;
3970 int ret = 0;
3971
3972 rcu_read_lock();
3973 pid = rcu_dereference(target->pid);
3974 if (pid)
3975 task = get_pid_task(pid, PIDTYPE_PID);
3976 rcu_read_unlock();
3977 if (!task)
3978 return ret;
3979 ret = yield_to(task, 1);
3980 put_task_struct(task);
3981
3982 return ret;
3983}
3984EXPORT_SYMBOL_GPL(kvm_vcpu_yield_to);
3985
3986/*
3987 * Helper that checks whether a VCPU is eligible for directed yield.
3988 * Most eligible candidate to yield is decided by following heuristics:
3989 *
3990 * (a) VCPU which has not done pl-exit or cpu relax intercepted recently
3991 * (preempted lock holder), indicated by @in_spin_loop.
3992 * Set at the beginning and cleared at the end of interception/PLE handler.
3993 *
3994 * (b) VCPU which has done pl-exit/ cpu relax intercepted but did not get
3995 * chance last time (mostly it has become eligible now since we have probably
3996 * yielded to lockholder in last iteration. This is done by toggling
3997 * @dy_eligible each time a VCPU checked for eligibility.)
3998 *
3999 * Yielding to a recently pl-exited/cpu relax intercepted VCPU before yielding
4000 * to preempted lock-holder could result in wrong VCPU selection and CPU
4001 * burning. Giving priority for a potential lock-holder increases lock
4002 * progress.
4003 *
4004 * Since algorithm is based on heuristics, accessing another VCPU data without
4005 * locking does not harm. It may result in trying to yield to same VCPU, fail
4006 * and continue with next VCPU and so on.
4007 */
4008static bool kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu *vcpu)
4009{
4010#ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT
4011 bool eligible;
4012
4013 eligible = !vcpu->spin_loop.in_spin_loop ||
4014 vcpu->spin_loop.dy_eligible;
4015
4016 if (vcpu->spin_loop.in_spin_loop)
4017 kvm_vcpu_set_dy_eligible(vcpu, !vcpu->spin_loop.dy_eligible);
4018
4019 return eligible;
4020#else
4021 return true;
4022#endif
4023}
4024
4025/*
4026 * Unlike kvm_arch_vcpu_runnable, this function is called outside
4027 * a vcpu_load/vcpu_put pair. However, for most architectures
4028 * kvm_arch_vcpu_runnable does not require vcpu_load.
4029 */
4030bool __weak kvm_arch_dy_runnable(struct kvm_vcpu *vcpu)
4031{
4032 return kvm_arch_vcpu_runnable(vcpu);
4033}
4034
4035static bool vcpu_dy_runnable(struct kvm_vcpu *vcpu)
4036{
4037 if (kvm_arch_dy_runnable(vcpu))
4038 return true;
4039
4040#ifdef CONFIG_KVM_ASYNC_PF
4041 if (!list_empty_careful(&vcpu->async_pf.done))
4042 return true;
4043#endif
4044
4045 return false;
4046}
4047
4048/*
4049 * By default, simply query the target vCPU's current mode when checking if a
4050 * vCPU was preempted in kernel mode. All architectures except x86 (or more
4051 * specifical, except VMX) allow querying whether or not a vCPU is in kernel
4052 * mode even if the vCPU is NOT loaded, i.e. using kvm_arch_vcpu_in_kernel()
4053 * directly for cross-vCPU checks is functionally correct and accurate.
4054 */
4055bool __weak kvm_arch_vcpu_preempted_in_kernel(struct kvm_vcpu *vcpu)
4056{
4057 return kvm_arch_vcpu_in_kernel(vcpu);
4058}
4059
4060bool __weak kvm_arch_dy_has_pending_interrupt(struct kvm_vcpu *vcpu)
4061{
4062 return false;
4063}
4064
4065void kvm_vcpu_on_spin(struct kvm_vcpu *me, bool yield_to_kernel_mode)
4066{
4067 struct kvm *kvm = me->kvm;
4068 struct kvm_vcpu *vcpu;
4069 int last_boosted_vcpu = me->kvm->last_boosted_vcpu;
4070 unsigned long i;
4071 int yielded = 0;
4072 int try = 3;
4073 int pass;
4074
4075 kvm_vcpu_set_in_spin_loop(me, true);
4076 /*
4077 * We boost the priority of a VCPU that is runnable but not
4078 * currently running, because it got preempted by something
4079 * else and called schedule in __vcpu_run. Hopefully that
4080 * VCPU is holding the lock that we need and will release it.
4081 * We approximate round-robin by starting at the last boosted VCPU.
4082 */
4083 for (pass = 0; pass < 2 && !yielded && try; pass++) {
4084 kvm_for_each_vcpu(i, vcpu, kvm) {
4085 if (!pass && i <= last_boosted_vcpu) {
4086 i = last_boosted_vcpu;
4087 continue;
4088 } else if (pass && i > last_boosted_vcpu)
4089 break;
4090 if (!READ_ONCE(vcpu->ready))
4091 continue;
4092 if (vcpu == me)
4093 continue;
4094 if (kvm_vcpu_is_blocking(vcpu) && !vcpu_dy_runnable(vcpu))
4095 continue;
4096
4097 /*
4098 * Treat the target vCPU as being in-kernel if it has a
4099 * pending interrupt, as the vCPU trying to yield may
4100 * be spinning waiting on IPI delivery, i.e. the target
4101 * vCPU is in-kernel for the purposes of directed yield.
4102 */
4103 if (READ_ONCE(vcpu->preempted) && yield_to_kernel_mode &&
4104 !kvm_arch_dy_has_pending_interrupt(vcpu) &&
4105 !kvm_arch_vcpu_preempted_in_kernel(vcpu))
4106 continue;
4107 if (!kvm_vcpu_eligible_for_directed_yield(vcpu))
4108 continue;
4109
4110 yielded = kvm_vcpu_yield_to(vcpu);
4111 if (yielded > 0) {
4112 kvm->last_boosted_vcpu = i;
4113 break;
4114 } else if (yielded < 0) {
4115 try--;
4116 if (!try)
4117 break;
4118 }
4119 }
4120 }
4121 kvm_vcpu_set_in_spin_loop(me, false);
4122
4123 /* Ensure vcpu is not eligible during next spinloop */
4124 kvm_vcpu_set_dy_eligible(me, false);
4125}
4126EXPORT_SYMBOL_GPL(kvm_vcpu_on_spin);
4127
4128static bool kvm_page_in_dirty_ring(struct kvm *kvm, unsigned long pgoff)
4129{
4130#ifdef CONFIG_HAVE_KVM_DIRTY_RING
4131 return (pgoff >= KVM_DIRTY_LOG_PAGE_OFFSET) &&
4132 (pgoff < KVM_DIRTY_LOG_PAGE_OFFSET +
4133 kvm->dirty_ring_size / PAGE_SIZE);
4134#else
4135 return false;
4136#endif
4137}
4138
4139static vm_fault_t kvm_vcpu_fault(struct vm_fault *vmf)
4140{
4141 struct kvm_vcpu *vcpu = vmf->vma->vm_file->private_data;
4142 struct page *page;
4143
4144 if (vmf->pgoff == 0)
4145 page = virt_to_page(vcpu->run);
4146#ifdef CONFIG_X86
4147 else if (vmf->pgoff == KVM_PIO_PAGE_OFFSET)
4148 page = virt_to_page(vcpu->arch.pio_data);
4149#endif
4150#ifdef CONFIG_KVM_MMIO
4151 else if (vmf->pgoff == KVM_COALESCED_MMIO_PAGE_OFFSET)
4152 page = virt_to_page(vcpu->kvm->coalesced_mmio_ring);
4153#endif
4154 else if (kvm_page_in_dirty_ring(vcpu->kvm, vmf->pgoff))
4155 page = kvm_dirty_ring_get_page(
4156 &vcpu->dirty_ring,
4157 vmf->pgoff - KVM_DIRTY_LOG_PAGE_OFFSET);
4158 else
4159 return kvm_arch_vcpu_fault(vcpu, vmf);
4160 get_page(page);
4161 vmf->page = page;
4162 return 0;
4163}
4164
4165static const struct vm_operations_struct kvm_vcpu_vm_ops = {
4166 .fault = kvm_vcpu_fault,
4167};
4168
4169static int kvm_vcpu_mmap(struct file *file, struct vm_area_struct *vma)
4170{
4171 struct kvm_vcpu *vcpu = file->private_data;
4172 unsigned long pages = vma_pages(vma);
4173
4174 if ((kvm_page_in_dirty_ring(vcpu->kvm, vma->vm_pgoff) ||
4175 kvm_page_in_dirty_ring(vcpu->kvm, vma->vm_pgoff + pages - 1)) &&
4176 ((vma->vm_flags & VM_EXEC) || !(vma->vm_flags & VM_SHARED)))
4177 return -EINVAL;
4178
4179 vma->vm_ops = &kvm_vcpu_vm_ops;
4180 return 0;
4181}
4182
4183static int kvm_vcpu_release(struct inode *inode, struct file *filp)
4184{
4185 struct kvm_vcpu *vcpu = filp->private_data;
4186
4187 kvm_put_kvm(vcpu->kvm);
4188 return 0;
4189}
4190
4191static struct file_operations kvm_vcpu_fops = {
4192 .release = kvm_vcpu_release,
4193 .unlocked_ioctl = kvm_vcpu_ioctl,
4194 .mmap = kvm_vcpu_mmap,
4195 .llseek = noop_llseek,
4196 KVM_COMPAT(kvm_vcpu_compat_ioctl),
4197};
4198
4199/*
4200 * Allocates an inode for the vcpu.
4201 */
4202static int create_vcpu_fd(struct kvm_vcpu *vcpu)
4203{
4204 char name[8 + 1 + ITOA_MAX_LEN + 1];
4205
4206 snprintf(name, sizeof(name), "kvm-vcpu:%d", vcpu->vcpu_id);
4207 return anon_inode_getfd(name, &kvm_vcpu_fops, vcpu, O_RDWR | O_CLOEXEC);
4208}
4209
4210#ifdef __KVM_HAVE_ARCH_VCPU_DEBUGFS
4211static int vcpu_get_pid(void *data, u64 *val)
4212{
4213 struct kvm_vcpu *vcpu = data;
4214
4215 rcu_read_lock();
4216 *val = pid_nr(rcu_dereference(vcpu->pid));
4217 rcu_read_unlock();
4218 return 0;
4219}
4220
4221DEFINE_SIMPLE_ATTRIBUTE(vcpu_get_pid_fops, vcpu_get_pid, NULL, "%llu\n");
4222
4223static void kvm_create_vcpu_debugfs(struct kvm_vcpu *vcpu)
4224{
4225 struct dentry *debugfs_dentry;
4226 char dir_name[ITOA_MAX_LEN * 2];
4227
4228 if (!debugfs_initialized())
4229 return;
4230
4231 snprintf(dir_name, sizeof(dir_name), "vcpu%d", vcpu->vcpu_id);
4232 debugfs_dentry = debugfs_create_dir(dir_name,
4233 vcpu->kvm->debugfs_dentry);
4234 debugfs_create_file("pid", 0444, debugfs_dentry, vcpu,
4235 &vcpu_get_pid_fops);
4236
4237 kvm_arch_create_vcpu_debugfs(vcpu, debugfs_dentry);
4238}
4239#endif
4240
4241/*
4242 * Creates some virtual cpus. Good luck creating more than one.
4243 */
4244static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
4245{
4246 int r;
4247 struct kvm_vcpu *vcpu;
4248 struct page *page;
4249
4250 if (id >= KVM_MAX_VCPU_IDS)
4251 return -EINVAL;
4252
4253 mutex_lock(&kvm->lock);
4254 if (kvm->created_vcpus >= kvm->max_vcpus) {
4255 mutex_unlock(&kvm->lock);
4256 return -EINVAL;
4257 }
4258
4259 r = kvm_arch_vcpu_precreate(kvm, id);
4260 if (r) {
4261 mutex_unlock(&kvm->lock);
4262 return r;
4263 }
4264
4265 kvm->created_vcpus++;
4266 mutex_unlock(&kvm->lock);
4267
4268 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL_ACCOUNT);
4269 if (!vcpu) {
4270 r = -ENOMEM;
4271 goto vcpu_decrement;
4272 }
4273
4274 BUILD_BUG_ON(sizeof(struct kvm_run) > PAGE_SIZE);
4275 page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
4276 if (!page) {
4277 r = -ENOMEM;
4278 goto vcpu_free;
4279 }
4280 vcpu->run = page_address(page);
4281
4282 kvm_vcpu_init(vcpu, kvm, id);
4283
4284 r = kvm_arch_vcpu_create(vcpu);
4285 if (r)
4286 goto vcpu_free_run_page;
4287
4288 if (kvm->dirty_ring_size) {
4289 r = kvm_dirty_ring_alloc(&vcpu->dirty_ring,
4290 id, kvm->dirty_ring_size);
4291 if (r)
4292 goto arch_vcpu_destroy;
4293 }
4294
4295 mutex_lock(&kvm->lock);
4296
4297#ifdef CONFIG_LOCKDEP
4298 /* Ensure that lockdep knows vcpu->mutex is taken *inside* kvm->lock */
4299 mutex_lock(&vcpu->mutex);
4300 mutex_unlock(&vcpu->mutex);
4301#endif
4302
4303 if (kvm_get_vcpu_by_id(kvm, id)) {
4304 r = -EEXIST;
4305 goto unlock_vcpu_destroy;
4306 }
4307
4308 vcpu->vcpu_idx = atomic_read(&kvm->online_vcpus);
4309 r = xa_reserve(&kvm->vcpu_array, vcpu->vcpu_idx, GFP_KERNEL_ACCOUNT);
4310 if (r)
4311 goto unlock_vcpu_destroy;
4312
4313 /* Now it's all set up, let userspace reach it */
4314 kvm_get_kvm(kvm);
4315 r = create_vcpu_fd(vcpu);
4316 if (r < 0)
4317 goto kvm_put_xa_release;
4318
4319 if (KVM_BUG_ON(xa_store(&kvm->vcpu_array, vcpu->vcpu_idx, vcpu, 0), kvm)) {
4320 r = -EINVAL;
4321 goto kvm_put_xa_release;
4322 }
4323
4324 /*
4325 * Pairs with smp_rmb() in kvm_get_vcpu. Store the vcpu
4326 * pointer before kvm->online_vcpu's incremented value.
4327 */
4328 smp_wmb();
4329 atomic_inc(&kvm->online_vcpus);
4330
4331 mutex_unlock(&kvm->lock);
4332 kvm_arch_vcpu_postcreate(vcpu);
4333 kvm_create_vcpu_debugfs(vcpu);
4334 return r;
4335
4336kvm_put_xa_release:
4337 kvm_put_kvm_no_destroy(kvm);
4338 xa_release(&kvm->vcpu_array, vcpu->vcpu_idx);
4339unlock_vcpu_destroy:
4340 mutex_unlock(&kvm->lock);
4341 kvm_dirty_ring_free(&vcpu->dirty_ring);
4342arch_vcpu_destroy:
4343 kvm_arch_vcpu_destroy(vcpu);
4344vcpu_free_run_page:
4345 free_page((unsigned long)vcpu->run);
4346vcpu_free:
4347 kmem_cache_free(kvm_vcpu_cache, vcpu);
4348vcpu_decrement:
4349 mutex_lock(&kvm->lock);
4350 kvm->created_vcpus--;
4351 mutex_unlock(&kvm->lock);
4352 return r;
4353}
4354
4355static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset)
4356{
4357 if (sigset) {
4358 sigdelsetmask(sigset, sigmask(SIGKILL)|sigmask(SIGSTOP));
4359 vcpu->sigset_active = 1;
4360 vcpu->sigset = *sigset;
4361 } else
4362 vcpu->sigset_active = 0;
4363 return 0;
4364}
4365
4366static ssize_t kvm_vcpu_stats_read(struct file *file, char __user *user_buffer,
4367 size_t size, loff_t *offset)
4368{
4369 struct kvm_vcpu *vcpu = file->private_data;
4370
4371 return kvm_stats_read(vcpu->stats_id, &kvm_vcpu_stats_header,
4372 &kvm_vcpu_stats_desc[0], &vcpu->stat,
4373 sizeof(vcpu->stat), user_buffer, size, offset);
4374}
4375
4376static int kvm_vcpu_stats_release(struct inode *inode, struct file *file)
4377{
4378 struct kvm_vcpu *vcpu = file->private_data;
4379
4380 kvm_put_kvm(vcpu->kvm);
4381 return 0;
4382}
4383
4384static const struct file_operations kvm_vcpu_stats_fops = {
4385 .owner = THIS_MODULE,
4386 .read = kvm_vcpu_stats_read,
4387 .release = kvm_vcpu_stats_release,
4388 .llseek = noop_llseek,
4389};
4390
4391static int kvm_vcpu_ioctl_get_stats_fd(struct kvm_vcpu *vcpu)
4392{
4393 int fd;
4394 struct file *file;
4395 char name[15 + ITOA_MAX_LEN + 1];
4396
4397 snprintf(name, sizeof(name), "kvm-vcpu-stats:%d", vcpu->vcpu_id);
4398
4399 fd = get_unused_fd_flags(O_CLOEXEC);
4400 if (fd < 0)
4401 return fd;
4402
4403 file = anon_inode_getfile(name, &kvm_vcpu_stats_fops, vcpu, O_RDONLY);
4404 if (IS_ERR(file)) {
4405 put_unused_fd(fd);
4406 return PTR_ERR(file);
4407 }
4408
4409 kvm_get_kvm(vcpu->kvm);
4410
4411 file->f_mode |= FMODE_PREAD;
4412 fd_install(fd, file);
4413
4414 return fd;
4415}
4416
4417static long kvm_vcpu_ioctl(struct file *filp,
4418 unsigned int ioctl, unsigned long arg)
4419{
4420 struct kvm_vcpu *vcpu = filp->private_data;
4421 void __user *argp = (void __user *)arg;
4422 int r;
4423 struct kvm_fpu *fpu = NULL;
4424 struct kvm_sregs *kvm_sregs = NULL;
4425
4426 if (vcpu->kvm->mm != current->mm || vcpu->kvm->vm_dead)
4427 return -EIO;
4428
4429 if (unlikely(_IOC_TYPE(ioctl) != KVMIO))
4430 return -EINVAL;
4431
4432 /*
4433 * Some architectures have vcpu ioctls that are asynchronous to vcpu
4434 * execution; mutex_lock() would break them.
4435 */
4436 r = kvm_arch_vcpu_async_ioctl(filp, ioctl, arg);
4437 if (r != -ENOIOCTLCMD)
4438 return r;
4439
4440 if (mutex_lock_killable(&vcpu->mutex))
4441 return -EINTR;
4442 switch (ioctl) {
4443 case KVM_RUN: {
4444 struct pid *oldpid;
4445 r = -EINVAL;
4446 if (arg)
4447 goto out;
4448 oldpid = rcu_access_pointer(vcpu->pid);
4449 if (unlikely(oldpid != task_pid(current))) {
4450 /* The thread running this VCPU changed. */
4451 struct pid *newpid;
4452
4453 r = kvm_arch_vcpu_run_pid_change(vcpu);
4454 if (r)
4455 break;
4456
4457 newpid = get_task_pid(current, PIDTYPE_PID);
4458 rcu_assign_pointer(vcpu->pid, newpid);
4459 if (oldpid)
4460 synchronize_rcu();
4461 put_pid(oldpid);
4462 }
4463 r = kvm_arch_vcpu_ioctl_run(vcpu);
4464 trace_kvm_userspace_exit(vcpu->run->exit_reason, r);
4465 break;
4466 }
4467 case KVM_GET_REGS: {
4468 struct kvm_regs *kvm_regs;
4469
4470 r = -ENOMEM;
4471 kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL_ACCOUNT);
4472 if (!kvm_regs)
4473 goto out;
4474 r = kvm_arch_vcpu_ioctl_get_regs(vcpu, kvm_regs);
4475 if (r)
4476 goto out_free1;
4477 r = -EFAULT;
4478 if (copy_to_user(argp, kvm_regs, sizeof(struct kvm_regs)))
4479 goto out_free1;
4480 r = 0;
4481out_free1:
4482 kfree(kvm_regs);
4483 break;
4484 }
4485 case KVM_SET_REGS: {
4486 struct kvm_regs *kvm_regs;
4487
4488 kvm_regs = memdup_user(argp, sizeof(*kvm_regs));
4489 if (IS_ERR(kvm_regs)) {
4490 r = PTR_ERR(kvm_regs);
4491 goto out;
4492 }
4493 r = kvm_arch_vcpu_ioctl_set_regs(vcpu, kvm_regs);
4494 kfree(kvm_regs);
4495 break;
4496 }
4497 case KVM_GET_SREGS: {
4498 kvm_sregs = kzalloc(sizeof(struct kvm_sregs),
4499 GFP_KERNEL_ACCOUNT);
4500 r = -ENOMEM;
4501 if (!kvm_sregs)
4502 goto out;
4503 r = kvm_arch_vcpu_ioctl_get_sregs(vcpu, kvm_sregs);
4504 if (r)
4505 goto out;
4506 r = -EFAULT;
4507 if (copy_to_user(argp, kvm_sregs, sizeof(struct kvm_sregs)))
4508 goto out;
4509 r = 0;
4510 break;
4511 }
4512 case KVM_SET_SREGS: {
4513 kvm_sregs = memdup_user(argp, sizeof(*kvm_sregs));
4514 if (IS_ERR(kvm_sregs)) {
4515 r = PTR_ERR(kvm_sregs);
4516 kvm_sregs = NULL;
4517 goto out;
4518 }
4519 r = kvm_arch_vcpu_ioctl_set_sregs(vcpu, kvm_sregs);
4520 break;
4521 }
4522 case KVM_GET_MP_STATE: {
4523 struct kvm_mp_state mp_state;
4524
4525 r = kvm_arch_vcpu_ioctl_get_mpstate(vcpu, &mp_state);
4526 if (r)
4527 goto out;
4528 r = -EFAULT;
4529 if (copy_to_user(argp, &mp_state, sizeof(mp_state)))
4530 goto out;
4531 r = 0;
4532 break;
4533 }
4534 case KVM_SET_MP_STATE: {
4535 struct kvm_mp_state mp_state;
4536
4537 r = -EFAULT;
4538 if (copy_from_user(&mp_state, argp, sizeof(mp_state)))
4539 goto out;
4540 r = kvm_arch_vcpu_ioctl_set_mpstate(vcpu, &mp_state);
4541 break;
4542 }
4543 case KVM_TRANSLATE: {
4544 struct kvm_translation tr;
4545
4546 r = -EFAULT;
4547 if (copy_from_user(&tr, argp, sizeof(tr)))
4548 goto out;
4549 r = kvm_arch_vcpu_ioctl_translate(vcpu, &tr);
4550 if (r)
4551 goto out;
4552 r = -EFAULT;
4553 if (copy_to_user(argp, &tr, sizeof(tr)))
4554 goto out;
4555 r = 0;
4556 break;
4557 }
4558 case KVM_SET_GUEST_DEBUG: {
4559 struct kvm_guest_debug dbg;
4560
4561 r = -EFAULT;
4562 if (copy_from_user(&dbg, argp, sizeof(dbg)))
4563 goto out;
4564 r = kvm_arch_vcpu_ioctl_set_guest_debug(vcpu, &dbg);
4565 break;
4566 }
4567 case KVM_SET_SIGNAL_MASK: {
4568 struct kvm_signal_mask __user *sigmask_arg = argp;
4569 struct kvm_signal_mask kvm_sigmask;
4570 sigset_t sigset, *p;
4571
4572 p = NULL;
4573 if (argp) {
4574 r = -EFAULT;
4575 if (copy_from_user(&kvm_sigmask, argp,
4576 sizeof(kvm_sigmask)))
4577 goto out;
4578 r = -EINVAL;
4579 if (kvm_sigmask.len != sizeof(sigset))
4580 goto out;
4581 r = -EFAULT;
4582 if (copy_from_user(&sigset, sigmask_arg->sigset,
4583 sizeof(sigset)))
4584 goto out;
4585 p = &sigset;
4586 }
4587 r = kvm_vcpu_ioctl_set_sigmask(vcpu, p);
4588 break;
4589 }
4590 case KVM_GET_FPU: {
4591 fpu = kzalloc(sizeof(struct kvm_fpu), GFP_KERNEL_ACCOUNT);
4592 r = -ENOMEM;
4593 if (!fpu)
4594 goto out;
4595 r = kvm_arch_vcpu_ioctl_get_fpu(vcpu, fpu);
4596 if (r)
4597 goto out;
4598 r = -EFAULT;
4599 if (copy_to_user(argp, fpu, sizeof(struct kvm_fpu)))
4600 goto out;
4601 r = 0;
4602 break;
4603 }
4604 case KVM_SET_FPU: {
4605 fpu = memdup_user(argp, sizeof(*fpu));
4606 if (IS_ERR(fpu)) {
4607 r = PTR_ERR(fpu);
4608 fpu = NULL;
4609 goto out;
4610 }
4611 r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, fpu);
4612 break;
4613 }
4614 case KVM_GET_STATS_FD: {
4615 r = kvm_vcpu_ioctl_get_stats_fd(vcpu);
4616 break;
4617 }
4618 default:
4619 r = kvm_arch_vcpu_ioctl(filp, ioctl, arg);
4620 }
4621out:
4622 mutex_unlock(&vcpu->mutex);
4623 kfree(fpu);
4624 kfree(kvm_sregs);
4625 return r;
4626}
4627
4628#ifdef CONFIG_KVM_COMPAT
4629static long kvm_vcpu_compat_ioctl(struct file *filp,
4630 unsigned int ioctl, unsigned long arg)
4631{
4632 struct kvm_vcpu *vcpu = filp->private_data;
4633 void __user *argp = compat_ptr(arg);
4634 int r;
4635
4636 if (vcpu->kvm->mm != current->mm || vcpu->kvm->vm_dead)
4637 return -EIO;
4638
4639 switch (ioctl) {
4640 case KVM_SET_SIGNAL_MASK: {
4641 struct kvm_signal_mask __user *sigmask_arg = argp;
4642 struct kvm_signal_mask kvm_sigmask;
4643 sigset_t sigset;
4644
4645 if (argp) {
4646 r = -EFAULT;
4647 if (copy_from_user(&kvm_sigmask, argp,
4648 sizeof(kvm_sigmask)))
4649 goto out;
4650 r = -EINVAL;
4651 if (kvm_sigmask.len != sizeof(compat_sigset_t))
4652 goto out;
4653 r = -EFAULT;
4654 if (get_compat_sigset(&sigset,
4655 (compat_sigset_t __user *)sigmask_arg->sigset))
4656 goto out;
4657 r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset);
4658 } else
4659 r = kvm_vcpu_ioctl_set_sigmask(vcpu, NULL);
4660 break;
4661 }
4662 default:
4663 r = kvm_vcpu_ioctl(filp, ioctl, arg);
4664 }
4665
4666out:
4667 return r;
4668}
4669#endif
4670
4671static int kvm_device_mmap(struct file *filp, struct vm_area_struct *vma)
4672{
4673 struct kvm_device *dev = filp->private_data;
4674
4675 if (dev->ops->mmap)
4676 return dev->ops->mmap(dev, vma);
4677
4678 return -ENODEV;
4679}
4680
4681static int kvm_device_ioctl_attr(struct kvm_device *dev,
4682 int (*accessor)(struct kvm_device *dev,
4683 struct kvm_device_attr *attr),
4684 unsigned long arg)
4685{
4686 struct kvm_device_attr attr;
4687
4688 if (!accessor)
4689 return -EPERM;
4690
4691 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
4692 return -EFAULT;
4693
4694 return accessor(dev, &attr);
4695}
4696
4697static long kvm_device_ioctl(struct file *filp, unsigned int ioctl,
4698 unsigned long arg)
4699{
4700 struct kvm_device *dev = filp->private_data;
4701
4702 if (dev->kvm->mm != current->mm || dev->kvm->vm_dead)
4703 return -EIO;
4704
4705 switch (ioctl) {
4706 case KVM_SET_DEVICE_ATTR:
4707 return kvm_device_ioctl_attr(dev, dev->ops->set_attr, arg);
4708 case KVM_GET_DEVICE_ATTR:
4709 return kvm_device_ioctl_attr(dev, dev->ops->get_attr, arg);
4710 case KVM_HAS_DEVICE_ATTR:
4711 return kvm_device_ioctl_attr(dev, dev->ops->has_attr, arg);
4712 default:
4713 if (dev->ops->ioctl)
4714 return dev->ops->ioctl(dev, ioctl, arg);
4715
4716 return -ENOTTY;
4717 }
4718}
4719
4720static int kvm_device_release(struct inode *inode, struct file *filp)
4721{
4722 struct kvm_device *dev = filp->private_data;
4723 struct kvm *kvm = dev->kvm;
4724
4725 if (dev->ops->release) {
4726 mutex_lock(&kvm->lock);
4727 list_del(&dev->vm_node);
4728 dev->ops->release(dev);
4729 mutex_unlock(&kvm->lock);
4730 }
4731
4732 kvm_put_kvm(kvm);
4733 return 0;
4734}
4735
4736static struct file_operations kvm_device_fops = {
4737 .unlocked_ioctl = kvm_device_ioctl,
4738 .release = kvm_device_release,
4739 KVM_COMPAT(kvm_device_ioctl),
4740 .mmap = kvm_device_mmap,
4741};
4742
4743struct kvm_device *kvm_device_from_filp(struct file *filp)
4744{
4745 if (filp->f_op != &kvm_device_fops)
4746 return NULL;
4747
4748 return filp->private_data;
4749}
4750
4751static const struct kvm_device_ops *kvm_device_ops_table[KVM_DEV_TYPE_MAX] = {
4752#ifdef CONFIG_KVM_MPIC
4753 [KVM_DEV_TYPE_FSL_MPIC_20] = &kvm_mpic_ops,
4754 [KVM_DEV_TYPE_FSL_MPIC_42] = &kvm_mpic_ops,
4755#endif
4756};
4757
4758int kvm_register_device_ops(const struct kvm_device_ops *ops, u32 type)
4759{
4760 if (type >= ARRAY_SIZE(kvm_device_ops_table))
4761 return -ENOSPC;
4762
4763 if (kvm_device_ops_table[type] != NULL)
4764 return -EEXIST;
4765
4766 kvm_device_ops_table[type] = ops;
4767 return 0;
4768}
4769
4770void kvm_unregister_device_ops(u32 type)
4771{
4772 if (kvm_device_ops_table[type] != NULL)
4773 kvm_device_ops_table[type] = NULL;
4774}
4775
4776static int kvm_ioctl_create_device(struct kvm *kvm,
4777 struct kvm_create_device *cd)
4778{
4779 const struct kvm_device_ops *ops;
4780 struct kvm_device *dev;
4781 bool test = cd->flags & KVM_CREATE_DEVICE_TEST;
4782 int type;
4783 int ret;
4784
4785 if (cd->type >= ARRAY_SIZE(kvm_device_ops_table))
4786 return -ENODEV;
4787
4788 type = array_index_nospec(cd->type, ARRAY_SIZE(kvm_device_ops_table));
4789 ops = kvm_device_ops_table[type];
4790 if (ops == NULL)
4791 return -ENODEV;
4792
4793 if (test)
4794 return 0;
4795
4796 dev = kzalloc(sizeof(*dev), GFP_KERNEL_ACCOUNT);
4797 if (!dev)
4798 return -ENOMEM;
4799
4800 dev->ops = ops;
4801 dev->kvm = kvm;
4802
4803 mutex_lock(&kvm->lock);
4804 ret = ops->create(dev, type);
4805 if (ret < 0) {
4806 mutex_unlock(&kvm->lock);
4807 kfree(dev);
4808 return ret;
4809 }
4810 list_add(&dev->vm_node, &kvm->devices);
4811 mutex_unlock(&kvm->lock);
4812
4813 if (ops->init)
4814 ops->init(dev);
4815
4816 kvm_get_kvm(kvm);
4817 ret = anon_inode_getfd(ops->name, &kvm_device_fops, dev, O_RDWR | O_CLOEXEC);
4818 if (ret < 0) {
4819 kvm_put_kvm_no_destroy(kvm);
4820 mutex_lock(&kvm->lock);
4821 list_del(&dev->vm_node);
4822 if (ops->release)
4823 ops->release(dev);
4824 mutex_unlock(&kvm->lock);
4825 if (ops->destroy)
4826 ops->destroy(dev);
4827 return ret;
4828 }
4829
4830 cd->fd = ret;
4831 return 0;
4832}
4833
4834static int kvm_vm_ioctl_check_extension_generic(struct kvm *kvm, long arg)
4835{
4836 switch (arg) {
4837 case KVM_CAP_USER_MEMORY:
4838 case KVM_CAP_USER_MEMORY2:
4839 case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
4840 case KVM_CAP_JOIN_MEMORY_REGIONS_WORKS:
4841 case KVM_CAP_INTERNAL_ERROR_DATA:
4842#ifdef CONFIG_HAVE_KVM_MSI
4843 case KVM_CAP_SIGNAL_MSI:
4844#endif
4845#ifdef CONFIG_HAVE_KVM_IRQCHIP
4846 case KVM_CAP_IRQFD:
4847#endif
4848 case KVM_CAP_IOEVENTFD_ANY_LENGTH:
4849 case KVM_CAP_CHECK_EXTENSION_VM:
4850 case KVM_CAP_ENABLE_CAP_VM:
4851 case KVM_CAP_HALT_POLL:
4852 return 1;
4853#ifdef CONFIG_KVM_MMIO
4854 case KVM_CAP_COALESCED_MMIO:
4855 return KVM_COALESCED_MMIO_PAGE_OFFSET;
4856 case KVM_CAP_COALESCED_PIO:
4857 return 1;
4858#endif
4859#ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
4860 case KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2:
4861 return KVM_DIRTY_LOG_MANUAL_CAPS;
4862#endif
4863#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
4864 case KVM_CAP_IRQ_ROUTING:
4865 return KVM_MAX_IRQ_ROUTES;
4866#endif
4867#if KVM_MAX_NR_ADDRESS_SPACES > 1
4868 case KVM_CAP_MULTI_ADDRESS_SPACE:
4869 if (kvm)
4870 return kvm_arch_nr_memslot_as_ids(kvm);
4871 return KVM_MAX_NR_ADDRESS_SPACES;
4872#endif
4873 case KVM_CAP_NR_MEMSLOTS:
4874 return KVM_USER_MEM_SLOTS;
4875 case KVM_CAP_DIRTY_LOG_RING:
4876#ifdef CONFIG_HAVE_KVM_DIRTY_RING_TSO
4877 return KVM_DIRTY_RING_MAX_ENTRIES * sizeof(struct kvm_dirty_gfn);
4878#else
4879 return 0;
4880#endif
4881 case KVM_CAP_DIRTY_LOG_RING_ACQ_REL:
4882#ifdef CONFIG_HAVE_KVM_DIRTY_RING_ACQ_REL
4883 return KVM_DIRTY_RING_MAX_ENTRIES * sizeof(struct kvm_dirty_gfn);
4884#else
4885 return 0;
4886#endif
4887#ifdef CONFIG_NEED_KVM_DIRTY_RING_WITH_BITMAP
4888 case KVM_CAP_DIRTY_LOG_RING_WITH_BITMAP:
4889#endif
4890 case KVM_CAP_BINARY_STATS_FD:
4891 case KVM_CAP_SYSTEM_EVENT_DATA:
4892 case KVM_CAP_DEVICE_CTRL:
4893 return 1;
4894#ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES
4895 case KVM_CAP_MEMORY_ATTRIBUTES:
4896 return kvm_supported_mem_attributes(kvm);
4897#endif
4898#ifdef CONFIG_KVM_PRIVATE_MEM
4899 case KVM_CAP_GUEST_MEMFD:
4900 return !kvm || kvm_arch_has_private_mem(kvm);
4901#endif
4902 default:
4903 break;
4904 }
4905 return kvm_vm_ioctl_check_extension(kvm, arg);
4906}
4907
4908static int kvm_vm_ioctl_enable_dirty_log_ring(struct kvm *kvm, u32 size)
4909{
4910 int r;
4911
4912 if (!KVM_DIRTY_LOG_PAGE_OFFSET)
4913 return -EINVAL;
4914
4915 /* the size should be power of 2 */
4916 if (!size || (size & (size - 1)))
4917 return -EINVAL;
4918
4919 /* Should be bigger to keep the reserved entries, or a page */
4920 if (size < kvm_dirty_ring_get_rsvd_entries() *
4921 sizeof(struct kvm_dirty_gfn) || size < PAGE_SIZE)
4922 return -EINVAL;
4923
4924 if (size > KVM_DIRTY_RING_MAX_ENTRIES *
4925 sizeof(struct kvm_dirty_gfn))
4926 return -E2BIG;
4927
4928 /* We only allow it to set once */
4929 if (kvm->dirty_ring_size)
4930 return -EINVAL;
4931
4932 mutex_lock(&kvm->lock);
4933
4934 if (kvm->created_vcpus) {
4935 /* We don't allow to change this value after vcpu created */
4936 r = -EINVAL;
4937 } else {
4938 kvm->dirty_ring_size = size;
4939 r = 0;
4940 }
4941
4942 mutex_unlock(&kvm->lock);
4943 return r;
4944}
4945
4946static int kvm_vm_ioctl_reset_dirty_pages(struct kvm *kvm)
4947{
4948 unsigned long i;
4949 struct kvm_vcpu *vcpu;
4950 int cleared = 0;
4951
4952 if (!kvm->dirty_ring_size)
4953 return -EINVAL;
4954
4955 mutex_lock(&kvm->slots_lock);
4956
4957 kvm_for_each_vcpu(i, vcpu, kvm)
4958 cleared += kvm_dirty_ring_reset(vcpu->kvm, &vcpu->dirty_ring);
4959
4960 mutex_unlock(&kvm->slots_lock);
4961
4962 if (cleared)
4963 kvm_flush_remote_tlbs(kvm);
4964
4965 return cleared;
4966}
4967
4968int __attribute__((weak)) kvm_vm_ioctl_enable_cap(struct kvm *kvm,
4969 struct kvm_enable_cap *cap)
4970{
4971 return -EINVAL;
4972}
4973
4974bool kvm_are_all_memslots_empty(struct kvm *kvm)
4975{
4976 int i;
4977
4978 lockdep_assert_held(&kvm->slots_lock);
4979
4980 for (i = 0; i < kvm_arch_nr_memslot_as_ids(kvm); i++) {
4981 if (!kvm_memslots_empty(__kvm_memslots(kvm, i)))
4982 return false;
4983 }
4984
4985 return true;
4986}
4987EXPORT_SYMBOL_GPL(kvm_are_all_memslots_empty);
4988
4989static int kvm_vm_ioctl_enable_cap_generic(struct kvm *kvm,
4990 struct kvm_enable_cap *cap)
4991{
4992 switch (cap->cap) {
4993#ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
4994 case KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2: {
4995 u64 allowed_options = KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE;
4996
4997 if (cap->args[0] & KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE)
4998 allowed_options = KVM_DIRTY_LOG_MANUAL_CAPS;
4999
5000 if (cap->flags || (cap->args[0] & ~allowed_options))
5001 return -EINVAL;
5002 kvm->manual_dirty_log_protect = cap->args[0];
5003 return 0;
5004 }
5005#endif
5006 case KVM_CAP_HALT_POLL: {
5007 if (cap->flags || cap->args[0] != (unsigned int)cap->args[0])
5008 return -EINVAL;
5009
5010 kvm->max_halt_poll_ns = cap->args[0];
5011
5012 /*
5013 * Ensure kvm->override_halt_poll_ns does not become visible
5014 * before kvm->max_halt_poll_ns.
5015 *
5016 * Pairs with the smp_rmb() in kvm_vcpu_max_halt_poll_ns().
5017 */
5018 smp_wmb();
5019 kvm->override_halt_poll_ns = true;
5020
5021 return 0;
5022 }
5023 case KVM_CAP_DIRTY_LOG_RING:
5024 case KVM_CAP_DIRTY_LOG_RING_ACQ_REL:
5025 if (!kvm_vm_ioctl_check_extension_generic(kvm, cap->cap))
5026 return -EINVAL;
5027
5028 return kvm_vm_ioctl_enable_dirty_log_ring(kvm, cap->args[0]);
5029 case KVM_CAP_DIRTY_LOG_RING_WITH_BITMAP: {
5030 int r = -EINVAL;
5031
5032 if (!IS_ENABLED(CONFIG_NEED_KVM_DIRTY_RING_WITH_BITMAP) ||
5033 !kvm->dirty_ring_size || cap->flags)
5034 return r;
5035
5036 mutex_lock(&kvm->slots_lock);
5037
5038 /*
5039 * For simplicity, allow enabling ring+bitmap if and only if
5040 * there are no memslots, e.g. to ensure all memslots allocate
5041 * a bitmap after the capability is enabled.
5042 */
5043 if (kvm_are_all_memslots_empty(kvm)) {
5044 kvm->dirty_ring_with_bitmap = true;
5045 r = 0;
5046 }
5047
5048 mutex_unlock(&kvm->slots_lock);
5049
5050 return r;
5051 }
5052 default:
5053 return kvm_vm_ioctl_enable_cap(kvm, cap);
5054 }
5055}
5056
5057static ssize_t kvm_vm_stats_read(struct file *file, char __user *user_buffer,
5058 size_t size, loff_t *offset)
5059{
5060 struct kvm *kvm = file->private_data;
5061
5062 return kvm_stats_read(kvm->stats_id, &kvm_vm_stats_header,
5063 &kvm_vm_stats_desc[0], &kvm->stat,
5064 sizeof(kvm->stat), user_buffer, size, offset);
5065}
5066
5067static int kvm_vm_stats_release(struct inode *inode, struct file *file)
5068{
5069 struct kvm *kvm = file->private_data;
5070
5071 kvm_put_kvm(kvm);
5072 return 0;
5073}
5074
5075static const struct file_operations kvm_vm_stats_fops = {
5076 .owner = THIS_MODULE,
5077 .read = kvm_vm_stats_read,
5078 .release = kvm_vm_stats_release,
5079 .llseek = noop_llseek,
5080};
5081
5082static int kvm_vm_ioctl_get_stats_fd(struct kvm *kvm)
5083{
5084 int fd;
5085 struct file *file;
5086
5087 fd = get_unused_fd_flags(O_CLOEXEC);
5088 if (fd < 0)
5089 return fd;
5090
5091 file = anon_inode_getfile("kvm-vm-stats",
5092 &kvm_vm_stats_fops, kvm, O_RDONLY);
5093 if (IS_ERR(file)) {
5094 put_unused_fd(fd);
5095 return PTR_ERR(file);
5096 }
5097
5098 kvm_get_kvm(kvm);
5099
5100 file->f_mode |= FMODE_PREAD;
5101 fd_install(fd, file);
5102
5103 return fd;
5104}
5105
5106#define SANITY_CHECK_MEM_REGION_FIELD(field) \
5107do { \
5108 BUILD_BUG_ON(offsetof(struct kvm_userspace_memory_region, field) != \
5109 offsetof(struct kvm_userspace_memory_region2, field)); \
5110 BUILD_BUG_ON(sizeof_field(struct kvm_userspace_memory_region, field) != \
5111 sizeof_field(struct kvm_userspace_memory_region2, field)); \
5112} while (0)
5113
5114static long kvm_vm_ioctl(struct file *filp,
5115 unsigned int ioctl, unsigned long arg)
5116{
5117 struct kvm *kvm = filp->private_data;
5118 void __user *argp = (void __user *)arg;
5119 int r;
5120
5121 if (kvm->mm != current->mm || kvm->vm_dead)
5122 return -EIO;
5123 switch (ioctl) {
5124 case KVM_CREATE_VCPU:
5125 r = kvm_vm_ioctl_create_vcpu(kvm, arg);
5126 break;
5127 case KVM_ENABLE_CAP: {
5128 struct kvm_enable_cap cap;
5129
5130 r = -EFAULT;
5131 if (copy_from_user(&cap, argp, sizeof(cap)))
5132 goto out;
5133 r = kvm_vm_ioctl_enable_cap_generic(kvm, &cap);
5134 break;
5135 }
5136 case KVM_SET_USER_MEMORY_REGION2:
5137 case KVM_SET_USER_MEMORY_REGION: {
5138 struct kvm_userspace_memory_region2 mem;
5139 unsigned long size;
5140
5141 if (ioctl == KVM_SET_USER_MEMORY_REGION) {
5142 /*
5143 * Fields beyond struct kvm_userspace_memory_region shouldn't be
5144 * accessed, but avoid leaking kernel memory in case of a bug.
5145 */
5146 memset(&mem, 0, sizeof(mem));
5147 size = sizeof(struct kvm_userspace_memory_region);
5148 } else {
5149 size = sizeof(struct kvm_userspace_memory_region2);
5150 }
5151
5152 /* Ensure the common parts of the two structs are identical. */
5153 SANITY_CHECK_MEM_REGION_FIELD(slot);
5154 SANITY_CHECK_MEM_REGION_FIELD(flags);
5155 SANITY_CHECK_MEM_REGION_FIELD(guest_phys_addr);
5156 SANITY_CHECK_MEM_REGION_FIELD(memory_size);
5157 SANITY_CHECK_MEM_REGION_FIELD(userspace_addr);
5158
5159 r = -EFAULT;
5160 if (copy_from_user(&mem, argp, size))
5161 goto out;
5162
5163 r = -EINVAL;
5164 if (ioctl == KVM_SET_USER_MEMORY_REGION &&
5165 (mem.flags & ~KVM_SET_USER_MEMORY_REGION_V1_FLAGS))
5166 goto out;
5167
5168 r = kvm_vm_ioctl_set_memory_region(kvm, &mem);
5169 break;
5170 }
5171 case KVM_GET_DIRTY_LOG: {
5172 struct kvm_dirty_log log;
5173
5174 r = -EFAULT;
5175 if (copy_from_user(&log, argp, sizeof(log)))
5176 goto out;
5177 r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
5178 break;
5179 }
5180#ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
5181 case KVM_CLEAR_DIRTY_LOG: {
5182 struct kvm_clear_dirty_log log;
5183
5184 r = -EFAULT;
5185 if (copy_from_user(&log, argp, sizeof(log)))
5186 goto out;
5187 r = kvm_vm_ioctl_clear_dirty_log(kvm, &log);
5188 break;
5189 }
5190#endif
5191#ifdef CONFIG_KVM_MMIO
5192 case KVM_REGISTER_COALESCED_MMIO: {
5193 struct kvm_coalesced_mmio_zone zone;
5194
5195 r = -EFAULT;
5196 if (copy_from_user(&zone, argp, sizeof(zone)))
5197 goto out;
5198 r = kvm_vm_ioctl_register_coalesced_mmio(kvm, &zone);
5199 break;
5200 }
5201 case KVM_UNREGISTER_COALESCED_MMIO: {
5202 struct kvm_coalesced_mmio_zone zone;
5203
5204 r = -EFAULT;
5205 if (copy_from_user(&zone, argp, sizeof(zone)))
5206 goto out;
5207 r = kvm_vm_ioctl_unregister_coalesced_mmio(kvm, &zone);
5208 break;
5209 }
5210#endif
5211 case KVM_IRQFD: {
5212 struct kvm_irqfd data;
5213
5214 r = -EFAULT;
5215 if (copy_from_user(&data, argp, sizeof(data)))
5216 goto out;
5217 r = kvm_irqfd(kvm, &data);
5218 break;
5219 }
5220 case KVM_IOEVENTFD: {
5221 struct kvm_ioeventfd data;
5222
5223 r = -EFAULT;
5224 if (copy_from_user(&data, argp, sizeof(data)))
5225 goto out;
5226 r = kvm_ioeventfd(kvm, &data);
5227 break;
5228 }
5229#ifdef CONFIG_HAVE_KVM_MSI
5230 case KVM_SIGNAL_MSI: {
5231 struct kvm_msi msi;
5232
5233 r = -EFAULT;
5234 if (copy_from_user(&msi, argp, sizeof(msi)))
5235 goto out;
5236 r = kvm_send_userspace_msi(kvm, &msi);
5237 break;
5238 }
5239#endif
5240#ifdef __KVM_HAVE_IRQ_LINE
5241 case KVM_IRQ_LINE_STATUS:
5242 case KVM_IRQ_LINE: {
5243 struct kvm_irq_level irq_event;
5244
5245 r = -EFAULT;
5246 if (copy_from_user(&irq_event, argp, sizeof(irq_event)))
5247 goto out;
5248
5249 r = kvm_vm_ioctl_irq_line(kvm, &irq_event,
5250 ioctl == KVM_IRQ_LINE_STATUS);
5251 if (r)
5252 goto out;
5253
5254 r = -EFAULT;
5255 if (ioctl == KVM_IRQ_LINE_STATUS) {
5256 if (copy_to_user(argp, &irq_event, sizeof(irq_event)))
5257 goto out;
5258 }
5259
5260 r = 0;
5261 break;
5262 }
5263#endif
5264#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
5265 case KVM_SET_GSI_ROUTING: {
5266 struct kvm_irq_routing routing;
5267 struct kvm_irq_routing __user *urouting;
5268 struct kvm_irq_routing_entry *entries = NULL;
5269
5270 r = -EFAULT;
5271 if (copy_from_user(&routing, argp, sizeof(routing)))
5272 goto out;
5273 r = -EINVAL;
5274 if (!kvm_arch_can_set_irq_routing(kvm))
5275 goto out;
5276 if (routing.nr > KVM_MAX_IRQ_ROUTES)
5277 goto out;
5278 if (routing.flags)
5279 goto out;
5280 if (routing.nr) {
5281 urouting = argp;
5282 entries = vmemdup_array_user(urouting->entries,
5283 routing.nr, sizeof(*entries));
5284 if (IS_ERR(entries)) {
5285 r = PTR_ERR(entries);
5286 goto out;
5287 }
5288 }
5289 r = kvm_set_irq_routing(kvm, entries, routing.nr,
5290 routing.flags);
5291 kvfree(entries);
5292 break;
5293 }
5294#endif /* CONFIG_HAVE_KVM_IRQ_ROUTING */
5295#ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES
5296 case KVM_SET_MEMORY_ATTRIBUTES: {
5297 struct kvm_memory_attributes attrs;
5298
5299 r = -EFAULT;
5300 if (copy_from_user(&attrs, argp, sizeof(attrs)))
5301 goto out;
5302
5303 r = kvm_vm_ioctl_set_mem_attributes(kvm, &attrs);
5304 break;
5305 }
5306#endif /* CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES */
5307 case KVM_CREATE_DEVICE: {
5308 struct kvm_create_device cd;
5309
5310 r = -EFAULT;
5311 if (copy_from_user(&cd, argp, sizeof(cd)))
5312 goto out;
5313
5314 r = kvm_ioctl_create_device(kvm, &cd);
5315 if (r)
5316 goto out;
5317
5318 r = -EFAULT;
5319 if (copy_to_user(argp, &cd, sizeof(cd)))
5320 goto out;
5321
5322 r = 0;
5323 break;
5324 }
5325 case KVM_CHECK_EXTENSION:
5326 r = kvm_vm_ioctl_check_extension_generic(kvm, arg);
5327 break;
5328 case KVM_RESET_DIRTY_RINGS:
5329 r = kvm_vm_ioctl_reset_dirty_pages(kvm);
5330 break;
5331 case KVM_GET_STATS_FD:
5332 r = kvm_vm_ioctl_get_stats_fd(kvm);
5333 break;
5334#ifdef CONFIG_KVM_PRIVATE_MEM
5335 case KVM_CREATE_GUEST_MEMFD: {
5336 struct kvm_create_guest_memfd guest_memfd;
5337
5338 r = -EFAULT;
5339 if (copy_from_user(&guest_memfd, argp, sizeof(guest_memfd)))
5340 goto out;
5341
5342 r = kvm_gmem_create(kvm, &guest_memfd);
5343 break;
5344 }
5345#endif
5346 default:
5347 r = kvm_arch_vm_ioctl(filp, ioctl, arg);
5348 }
5349out:
5350 return r;
5351}
5352
5353#ifdef CONFIG_KVM_COMPAT
5354struct compat_kvm_dirty_log {
5355 __u32 slot;
5356 __u32 padding1;
5357 union {
5358 compat_uptr_t dirty_bitmap; /* one bit per page */
5359 __u64 padding2;
5360 };
5361};
5362
5363struct compat_kvm_clear_dirty_log {
5364 __u32 slot;
5365 __u32 num_pages;
5366 __u64 first_page;
5367 union {
5368 compat_uptr_t dirty_bitmap; /* one bit per page */
5369 __u64 padding2;
5370 };
5371};
5372
5373long __weak kvm_arch_vm_compat_ioctl(struct file *filp, unsigned int ioctl,
5374 unsigned long arg)
5375{
5376 return -ENOTTY;
5377}
5378
5379static long kvm_vm_compat_ioctl(struct file *filp,
5380 unsigned int ioctl, unsigned long arg)
5381{
5382 struct kvm *kvm = filp->private_data;
5383 int r;
5384
5385 if (kvm->mm != current->mm || kvm->vm_dead)
5386 return -EIO;
5387
5388 r = kvm_arch_vm_compat_ioctl(filp, ioctl, arg);
5389 if (r != -ENOTTY)
5390 return r;
5391
5392 switch (ioctl) {
5393#ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
5394 case KVM_CLEAR_DIRTY_LOG: {
5395 struct compat_kvm_clear_dirty_log compat_log;
5396 struct kvm_clear_dirty_log log;
5397
5398 if (copy_from_user(&compat_log, (void __user *)arg,
5399 sizeof(compat_log)))
5400 return -EFAULT;
5401 log.slot = compat_log.slot;
5402 log.num_pages = compat_log.num_pages;
5403 log.first_page = compat_log.first_page;
5404 log.padding2 = compat_log.padding2;
5405 log.dirty_bitmap = compat_ptr(compat_log.dirty_bitmap);
5406
5407 r = kvm_vm_ioctl_clear_dirty_log(kvm, &log);
5408 break;
5409 }
5410#endif
5411 case KVM_GET_DIRTY_LOG: {
5412 struct compat_kvm_dirty_log compat_log;
5413 struct kvm_dirty_log log;
5414
5415 if (copy_from_user(&compat_log, (void __user *)arg,
5416 sizeof(compat_log)))
5417 return -EFAULT;
5418 log.slot = compat_log.slot;
5419 log.padding1 = compat_log.padding1;
5420 log.padding2 = compat_log.padding2;
5421 log.dirty_bitmap = compat_ptr(compat_log.dirty_bitmap);
5422
5423 r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
5424 break;
5425 }
5426 default:
5427 r = kvm_vm_ioctl(filp, ioctl, arg);
5428 }
5429 return r;
5430}
5431#endif
5432
5433static struct file_operations kvm_vm_fops = {
5434 .release = kvm_vm_release,
5435 .unlocked_ioctl = kvm_vm_ioctl,
5436 .llseek = noop_llseek,
5437 KVM_COMPAT(kvm_vm_compat_ioctl),
5438};
5439
5440bool file_is_kvm(struct file *file)
5441{
5442 return file && file->f_op == &kvm_vm_fops;
5443}
5444EXPORT_SYMBOL_GPL(file_is_kvm);
5445
5446static int kvm_dev_ioctl_create_vm(unsigned long type)
5447{
5448 char fdname[ITOA_MAX_LEN + 1];
5449 int r, fd;
5450 struct kvm *kvm;
5451 struct file *file;
5452
5453 fd = get_unused_fd_flags(O_CLOEXEC);
5454 if (fd < 0)
5455 return fd;
5456
5457 snprintf(fdname, sizeof(fdname), "%d", fd);
5458
5459 kvm = kvm_create_vm(type, fdname);
5460 if (IS_ERR(kvm)) {
5461 r = PTR_ERR(kvm);
5462 goto put_fd;
5463 }
5464
5465 file = anon_inode_getfile("kvm-vm", &kvm_vm_fops, kvm, O_RDWR);
5466 if (IS_ERR(file)) {
5467 r = PTR_ERR(file);
5468 goto put_kvm;
5469 }
5470
5471 /*
5472 * Don't call kvm_put_kvm anymore at this point; file->f_op is
5473 * already set, with ->release() being kvm_vm_release(). In error
5474 * cases it will be called by the final fput(file) and will take
5475 * care of doing kvm_put_kvm(kvm).
5476 */
5477 kvm_uevent_notify_change(KVM_EVENT_CREATE_VM, kvm);
5478
5479 fd_install(fd, file);
5480 return fd;
5481
5482put_kvm:
5483 kvm_put_kvm(kvm);
5484put_fd:
5485 put_unused_fd(fd);
5486 return r;
5487}
5488
5489static long kvm_dev_ioctl(struct file *filp,
5490 unsigned int ioctl, unsigned long arg)
5491{
5492 int r = -EINVAL;
5493
5494 switch (ioctl) {
5495 case KVM_GET_API_VERSION:
5496 if (arg)
5497 goto out;
5498 r = KVM_API_VERSION;
5499 break;
5500 case KVM_CREATE_VM:
5501 r = kvm_dev_ioctl_create_vm(arg);
5502 break;
5503 case KVM_CHECK_EXTENSION:
5504 r = kvm_vm_ioctl_check_extension_generic(NULL, arg);
5505 break;
5506 case KVM_GET_VCPU_MMAP_SIZE:
5507 if (arg)
5508 goto out;
5509 r = PAGE_SIZE; /* struct kvm_run */
5510#ifdef CONFIG_X86
5511 r += PAGE_SIZE; /* pio data page */
5512#endif
5513#ifdef CONFIG_KVM_MMIO
5514 r += PAGE_SIZE; /* coalesced mmio ring page */
5515#endif
5516 break;
5517 default:
5518 return kvm_arch_dev_ioctl(filp, ioctl, arg);
5519 }
5520out:
5521 return r;
5522}
5523
5524static struct file_operations kvm_chardev_ops = {
5525 .unlocked_ioctl = kvm_dev_ioctl,
5526 .llseek = noop_llseek,
5527 KVM_COMPAT(kvm_dev_ioctl),
5528};
5529
5530static struct miscdevice kvm_dev = {
5531 KVM_MINOR,
5532 "kvm",
5533 &kvm_chardev_ops,
5534};
5535
5536#ifdef CONFIG_KVM_GENERIC_HARDWARE_ENABLING
5537__visible bool kvm_rebooting;
5538EXPORT_SYMBOL_GPL(kvm_rebooting);
5539
5540static DEFINE_PER_CPU(bool, hardware_enabled);
5541static int kvm_usage_count;
5542
5543static int __hardware_enable_nolock(void)
5544{
5545 if (__this_cpu_read(hardware_enabled))
5546 return 0;
5547
5548 if (kvm_arch_hardware_enable()) {
5549 pr_info("kvm: enabling virtualization on CPU%d failed\n",
5550 raw_smp_processor_id());
5551 return -EIO;
5552 }
5553
5554 __this_cpu_write(hardware_enabled, true);
5555 return 0;
5556}
5557
5558static void hardware_enable_nolock(void *failed)
5559{
5560 if (__hardware_enable_nolock())
5561 atomic_inc(failed);
5562}
5563
5564static int kvm_online_cpu(unsigned int cpu)
5565{
5566 int ret = 0;
5567
5568 /*
5569 * Abort the CPU online process if hardware virtualization cannot
5570 * be enabled. Otherwise running VMs would encounter unrecoverable
5571 * errors when scheduled to this CPU.
5572 */
5573 mutex_lock(&kvm_lock);
5574 if (kvm_usage_count)
5575 ret = __hardware_enable_nolock();
5576 mutex_unlock(&kvm_lock);
5577 return ret;
5578}
5579
5580static void hardware_disable_nolock(void *junk)
5581{
5582 /*
5583 * Note, hardware_disable_all_nolock() tells all online CPUs to disable
5584 * hardware, not just CPUs that successfully enabled hardware!
5585 */
5586 if (!__this_cpu_read(hardware_enabled))
5587 return;
5588
5589 kvm_arch_hardware_disable();
5590
5591 __this_cpu_write(hardware_enabled, false);
5592}
5593
5594static int kvm_offline_cpu(unsigned int cpu)
5595{
5596 mutex_lock(&kvm_lock);
5597 if (kvm_usage_count)
5598 hardware_disable_nolock(NULL);
5599 mutex_unlock(&kvm_lock);
5600 return 0;
5601}
5602
5603static void hardware_disable_all_nolock(void)
5604{
5605 BUG_ON(!kvm_usage_count);
5606
5607 kvm_usage_count--;
5608 if (!kvm_usage_count)
5609 on_each_cpu(hardware_disable_nolock, NULL, 1);
5610}
5611
5612static void hardware_disable_all(void)
5613{
5614 cpus_read_lock();
5615 mutex_lock(&kvm_lock);
5616 hardware_disable_all_nolock();
5617 mutex_unlock(&kvm_lock);
5618 cpus_read_unlock();
5619}
5620
5621static int hardware_enable_all(void)
5622{
5623 atomic_t failed = ATOMIC_INIT(0);
5624 int r;
5625
5626 /*
5627 * Do not enable hardware virtualization if the system is going down.
5628 * If userspace initiated a forced reboot, e.g. reboot -f, then it's
5629 * possible for an in-flight KVM_CREATE_VM to trigger hardware enabling
5630 * after kvm_reboot() is called. Note, this relies on system_state
5631 * being set _before_ kvm_reboot(), which is why KVM uses a syscore ops
5632 * hook instead of registering a dedicated reboot notifier (the latter
5633 * runs before system_state is updated).
5634 */
5635 if (system_state == SYSTEM_HALT || system_state == SYSTEM_POWER_OFF ||
5636 system_state == SYSTEM_RESTART)
5637 return -EBUSY;
5638
5639 /*
5640 * When onlining a CPU, cpu_online_mask is set before kvm_online_cpu()
5641 * is called, and so on_each_cpu() between them includes the CPU that
5642 * is being onlined. As a result, hardware_enable_nolock() may get
5643 * invoked before kvm_online_cpu(), which also enables hardware if the
5644 * usage count is non-zero. Disable CPU hotplug to avoid attempting to
5645 * enable hardware multiple times.
5646 */
5647 cpus_read_lock();
5648 mutex_lock(&kvm_lock);
5649
5650 r = 0;
5651
5652 kvm_usage_count++;
5653 if (kvm_usage_count == 1) {
5654 on_each_cpu(hardware_enable_nolock, &failed, 1);
5655
5656 if (atomic_read(&failed)) {
5657 hardware_disable_all_nolock();
5658 r = -EBUSY;
5659 }
5660 }
5661
5662 mutex_unlock(&kvm_lock);
5663 cpus_read_unlock();
5664
5665 return r;
5666}
5667
5668static void kvm_shutdown(void)
5669{
5670 /*
5671 * Disable hardware virtualization and set kvm_rebooting to indicate
5672 * that KVM has asynchronously disabled hardware virtualization, i.e.
5673 * that relevant errors and exceptions aren't entirely unexpected.
5674 * Some flavors of hardware virtualization need to be disabled before
5675 * transferring control to firmware (to perform shutdown/reboot), e.g.
5676 * on x86, virtualization can block INIT interrupts, which are used by
5677 * firmware to pull APs back under firmware control. Note, this path
5678 * is used for both shutdown and reboot scenarios, i.e. neither name is
5679 * 100% comprehensive.
5680 */
5681 pr_info("kvm: exiting hardware virtualization\n");
5682 kvm_rebooting = true;
5683 on_each_cpu(hardware_disable_nolock, NULL, 1);
5684}
5685
5686static int kvm_suspend(void)
5687{
5688 /*
5689 * Secondary CPUs and CPU hotplug are disabled across the suspend/resume
5690 * callbacks, i.e. no need to acquire kvm_lock to ensure the usage count
5691 * is stable. Assert that kvm_lock is not held to ensure the system
5692 * isn't suspended while KVM is enabling hardware. Hardware enabling
5693 * can be preempted, but the task cannot be frozen until it has dropped
5694 * all locks (userspace tasks are frozen via a fake signal).
5695 */
5696 lockdep_assert_not_held(&kvm_lock);
5697 lockdep_assert_irqs_disabled();
5698
5699 if (kvm_usage_count)
5700 hardware_disable_nolock(NULL);
5701 return 0;
5702}
5703
5704static void kvm_resume(void)
5705{
5706 lockdep_assert_not_held(&kvm_lock);
5707 lockdep_assert_irqs_disabled();
5708
5709 if (kvm_usage_count)
5710 WARN_ON_ONCE(__hardware_enable_nolock());
5711}
5712
5713static struct syscore_ops kvm_syscore_ops = {
5714 .suspend = kvm_suspend,
5715 .resume = kvm_resume,
5716 .shutdown = kvm_shutdown,
5717};
5718#else /* CONFIG_KVM_GENERIC_HARDWARE_ENABLING */
5719static int hardware_enable_all(void)
5720{
5721 return 0;
5722}
5723
5724static void hardware_disable_all(void)
5725{
5726
5727}
5728#endif /* CONFIG_KVM_GENERIC_HARDWARE_ENABLING */
5729
5730static void kvm_iodevice_destructor(struct kvm_io_device *dev)
5731{
5732 if (dev->ops->destructor)
5733 dev->ops->destructor(dev);
5734}
5735
5736static void kvm_io_bus_destroy(struct kvm_io_bus *bus)
5737{
5738 int i;
5739
5740 for (i = 0; i < bus->dev_count; i++) {
5741 struct kvm_io_device *pos = bus->range[i].dev;
5742
5743 kvm_iodevice_destructor(pos);
5744 }
5745 kfree(bus);
5746}
5747
5748static inline int kvm_io_bus_cmp(const struct kvm_io_range *r1,
5749 const struct kvm_io_range *r2)
5750{
5751 gpa_t addr1 = r1->addr;
5752 gpa_t addr2 = r2->addr;
5753
5754 if (addr1 < addr2)
5755 return -1;
5756
5757 /* If r2->len == 0, match the exact address. If r2->len != 0,
5758 * accept any overlapping write. Any order is acceptable for
5759 * overlapping ranges, because kvm_io_bus_get_first_dev ensures
5760 * we process all of them.
5761 */
5762 if (r2->len) {
5763 addr1 += r1->len;
5764 addr2 += r2->len;
5765 }
5766
5767 if (addr1 > addr2)
5768 return 1;
5769
5770 return 0;
5771}
5772
5773static int kvm_io_bus_sort_cmp(const void *p1, const void *p2)
5774{
5775 return kvm_io_bus_cmp(p1, p2);
5776}
5777
5778static int kvm_io_bus_get_first_dev(struct kvm_io_bus *bus,
5779 gpa_t addr, int len)
5780{
5781 struct kvm_io_range *range, key;
5782 int off;
5783
5784 key = (struct kvm_io_range) {
5785 .addr = addr,
5786 .len = len,
5787 };
5788
5789 range = bsearch(&key, bus->range, bus->dev_count,
5790 sizeof(struct kvm_io_range), kvm_io_bus_sort_cmp);
5791 if (range == NULL)
5792 return -ENOENT;
5793
5794 off = range - bus->range;
5795
5796 while (off > 0 && kvm_io_bus_cmp(&key, &bus->range[off-1]) == 0)
5797 off--;
5798
5799 return off;
5800}
5801
5802static int __kvm_io_bus_write(struct kvm_vcpu *vcpu, struct kvm_io_bus *bus,
5803 struct kvm_io_range *range, const void *val)
5804{
5805 int idx;
5806
5807 idx = kvm_io_bus_get_first_dev(bus, range->addr, range->len);
5808 if (idx < 0)
5809 return -EOPNOTSUPP;
5810
5811 while (idx < bus->dev_count &&
5812 kvm_io_bus_cmp(range, &bus->range[idx]) == 0) {
5813 if (!kvm_iodevice_write(vcpu, bus->range[idx].dev, range->addr,
5814 range->len, val))
5815 return idx;
5816 idx++;
5817 }
5818
5819 return -EOPNOTSUPP;
5820}
5821
5822/* kvm_io_bus_write - called under kvm->slots_lock */
5823int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
5824 int len, const void *val)
5825{
5826 struct kvm_io_bus *bus;
5827 struct kvm_io_range range;
5828 int r;
5829
5830 range = (struct kvm_io_range) {
5831 .addr = addr,
5832 .len = len,
5833 };
5834
5835 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
5836 if (!bus)
5837 return -ENOMEM;
5838 r = __kvm_io_bus_write(vcpu, bus, &range, val);
5839 return r < 0 ? r : 0;
5840}
5841EXPORT_SYMBOL_GPL(kvm_io_bus_write);
5842
5843/* kvm_io_bus_write_cookie - called under kvm->slots_lock */
5844int kvm_io_bus_write_cookie(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx,
5845 gpa_t addr, int len, const void *val, long cookie)
5846{
5847 struct kvm_io_bus *bus;
5848 struct kvm_io_range range;
5849
5850 range = (struct kvm_io_range) {
5851 .addr = addr,
5852 .len = len,
5853 };
5854
5855 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
5856 if (!bus)
5857 return -ENOMEM;
5858
5859 /* First try the device referenced by cookie. */
5860 if ((cookie >= 0) && (cookie < bus->dev_count) &&
5861 (kvm_io_bus_cmp(&range, &bus->range[cookie]) == 0))
5862 if (!kvm_iodevice_write(vcpu, bus->range[cookie].dev, addr, len,
5863 val))
5864 return cookie;
5865
5866 /*
5867 * cookie contained garbage; fall back to search and return the
5868 * correct cookie value.
5869 */
5870 return __kvm_io_bus_write(vcpu, bus, &range, val);
5871}
5872
5873static int __kvm_io_bus_read(struct kvm_vcpu *vcpu, struct kvm_io_bus *bus,
5874 struct kvm_io_range *range, void *val)
5875{
5876 int idx;
5877
5878 idx = kvm_io_bus_get_first_dev(bus, range->addr, range->len);
5879 if (idx < 0)
5880 return -EOPNOTSUPP;
5881
5882 while (idx < bus->dev_count &&
5883 kvm_io_bus_cmp(range, &bus->range[idx]) == 0) {
5884 if (!kvm_iodevice_read(vcpu, bus->range[idx].dev, range->addr,
5885 range->len, val))
5886 return idx;
5887 idx++;
5888 }
5889
5890 return -EOPNOTSUPP;
5891}
5892
5893/* kvm_io_bus_read - called under kvm->slots_lock */
5894int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
5895 int len, void *val)
5896{
5897 struct kvm_io_bus *bus;
5898 struct kvm_io_range range;
5899 int r;
5900
5901 range = (struct kvm_io_range) {
5902 .addr = addr,
5903 .len = len,
5904 };
5905
5906 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
5907 if (!bus)
5908 return -ENOMEM;
5909 r = __kvm_io_bus_read(vcpu, bus, &range, val);
5910 return r < 0 ? r : 0;
5911}
5912
5913int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
5914 int len, struct kvm_io_device *dev)
5915{
5916 int i;
5917 struct kvm_io_bus *new_bus, *bus;
5918 struct kvm_io_range range;
5919
5920 lockdep_assert_held(&kvm->slots_lock);
5921
5922 bus = kvm_get_bus(kvm, bus_idx);
5923 if (!bus)
5924 return -ENOMEM;
5925
5926 /* exclude ioeventfd which is limited by maximum fd */
5927 if (bus->dev_count - bus->ioeventfd_count > NR_IOBUS_DEVS - 1)
5928 return -ENOSPC;
5929
5930 new_bus = kmalloc(struct_size(bus, range, bus->dev_count + 1),
5931 GFP_KERNEL_ACCOUNT);
5932 if (!new_bus)
5933 return -ENOMEM;
5934
5935 range = (struct kvm_io_range) {
5936 .addr = addr,
5937 .len = len,
5938 .dev = dev,
5939 };
5940
5941 for (i = 0; i < bus->dev_count; i++)
5942 if (kvm_io_bus_cmp(&bus->range[i], &range) > 0)
5943 break;
5944
5945 memcpy(new_bus, bus, sizeof(*bus) + i * sizeof(struct kvm_io_range));
5946 new_bus->dev_count++;
5947 new_bus->range[i] = range;
5948 memcpy(new_bus->range + i + 1, bus->range + i,
5949 (bus->dev_count - i) * sizeof(struct kvm_io_range));
5950 rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
5951 synchronize_srcu_expedited(&kvm->srcu);
5952 kfree(bus);
5953
5954 return 0;
5955}
5956
5957int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
5958 struct kvm_io_device *dev)
5959{
5960 int i;
5961 struct kvm_io_bus *new_bus, *bus;
5962
5963 lockdep_assert_held(&kvm->slots_lock);
5964
5965 bus = kvm_get_bus(kvm, bus_idx);
5966 if (!bus)
5967 return 0;
5968
5969 for (i = 0; i < bus->dev_count; i++) {
5970 if (bus->range[i].dev == dev) {
5971 break;
5972 }
5973 }
5974
5975 if (i == bus->dev_count)
5976 return 0;
5977
5978 new_bus = kmalloc(struct_size(bus, range, bus->dev_count - 1),
5979 GFP_KERNEL_ACCOUNT);
5980 if (new_bus) {
5981 memcpy(new_bus, bus, struct_size(bus, range, i));
5982 new_bus->dev_count--;
5983 memcpy(new_bus->range + i, bus->range + i + 1,
5984 flex_array_size(new_bus, range, new_bus->dev_count - i));
5985 }
5986
5987 rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
5988 synchronize_srcu_expedited(&kvm->srcu);
5989
5990 /*
5991 * If NULL bus is installed, destroy the old bus, including all the
5992 * attached devices. Otherwise, destroy the caller's device only.
5993 */
5994 if (!new_bus) {
5995 pr_err("kvm: failed to shrink bus, removing it completely\n");
5996 kvm_io_bus_destroy(bus);
5997 return -ENOMEM;
5998 }
5999
6000 kvm_iodevice_destructor(dev);
6001 kfree(bus);
6002 return 0;
6003}
6004
6005struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx,
6006 gpa_t addr)
6007{
6008 struct kvm_io_bus *bus;
6009 int dev_idx, srcu_idx;
6010 struct kvm_io_device *iodev = NULL;
6011
6012 srcu_idx = srcu_read_lock(&kvm->srcu);
6013
6014 bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu);
6015 if (!bus)
6016 goto out_unlock;
6017
6018 dev_idx = kvm_io_bus_get_first_dev(bus, addr, 1);
6019 if (dev_idx < 0)
6020 goto out_unlock;
6021
6022 iodev = bus->range[dev_idx].dev;
6023
6024out_unlock:
6025 srcu_read_unlock(&kvm->srcu, srcu_idx);
6026
6027 return iodev;
6028}
6029EXPORT_SYMBOL_GPL(kvm_io_bus_get_dev);
6030
6031static int kvm_debugfs_open(struct inode *inode, struct file *file,
6032 int (*get)(void *, u64 *), int (*set)(void *, u64),
6033 const char *fmt)
6034{
6035 int ret;
6036 struct kvm_stat_data *stat_data = inode->i_private;
6037
6038 /*
6039 * The debugfs files are a reference to the kvm struct which
6040 * is still valid when kvm_destroy_vm is called. kvm_get_kvm_safe
6041 * avoids the race between open and the removal of the debugfs directory.
6042 */
6043 if (!kvm_get_kvm_safe(stat_data->kvm))
6044 return -ENOENT;
6045
6046 ret = simple_attr_open(inode, file, get,
6047 kvm_stats_debugfs_mode(stat_data->desc) & 0222
6048 ? set : NULL, fmt);
6049 if (ret)
6050 kvm_put_kvm(stat_data->kvm);
6051
6052 return ret;
6053}
6054
6055static int kvm_debugfs_release(struct inode *inode, struct file *file)
6056{
6057 struct kvm_stat_data *stat_data = inode->i_private;
6058
6059 simple_attr_release(inode, file);
6060 kvm_put_kvm(stat_data->kvm);
6061
6062 return 0;
6063}
6064
6065static int kvm_get_stat_per_vm(struct kvm *kvm, size_t offset, u64 *val)
6066{
6067 *val = *(u64 *)((void *)(&kvm->stat) + offset);
6068
6069 return 0;
6070}
6071
6072static int kvm_clear_stat_per_vm(struct kvm *kvm, size_t offset)
6073{
6074 *(u64 *)((void *)(&kvm->stat) + offset) = 0;
6075
6076 return 0;
6077}
6078
6079static int kvm_get_stat_per_vcpu(struct kvm *kvm, size_t offset, u64 *val)
6080{
6081 unsigned long i;
6082 struct kvm_vcpu *vcpu;
6083
6084 *val = 0;
6085
6086 kvm_for_each_vcpu(i, vcpu, kvm)
6087 *val += *(u64 *)((void *)(&vcpu->stat) + offset);
6088
6089 return 0;
6090}
6091
6092static int kvm_clear_stat_per_vcpu(struct kvm *kvm, size_t offset)
6093{
6094 unsigned long i;
6095 struct kvm_vcpu *vcpu;
6096
6097 kvm_for_each_vcpu(i, vcpu, kvm)
6098 *(u64 *)((void *)(&vcpu->stat) + offset) = 0;
6099
6100 return 0;
6101}
6102
6103static int kvm_stat_data_get(void *data, u64 *val)
6104{
6105 int r = -EFAULT;
6106 struct kvm_stat_data *stat_data = data;
6107
6108 switch (stat_data->kind) {
6109 case KVM_STAT_VM:
6110 r = kvm_get_stat_per_vm(stat_data->kvm,
6111 stat_data->desc->desc.offset, val);
6112 break;
6113 case KVM_STAT_VCPU:
6114 r = kvm_get_stat_per_vcpu(stat_data->kvm,
6115 stat_data->desc->desc.offset, val);
6116 break;
6117 }
6118
6119 return r;
6120}
6121
6122static int kvm_stat_data_clear(void *data, u64 val)
6123{
6124 int r = -EFAULT;
6125 struct kvm_stat_data *stat_data = data;
6126
6127 if (val)
6128 return -EINVAL;
6129
6130 switch (stat_data->kind) {
6131 case KVM_STAT_VM:
6132 r = kvm_clear_stat_per_vm(stat_data->kvm,
6133 stat_data->desc->desc.offset);
6134 break;
6135 case KVM_STAT_VCPU:
6136 r = kvm_clear_stat_per_vcpu(stat_data->kvm,
6137 stat_data->desc->desc.offset);
6138 break;
6139 }
6140
6141 return r;
6142}
6143
6144static int kvm_stat_data_open(struct inode *inode, struct file *file)
6145{
6146 __simple_attr_check_format("%llu\n", 0ull);
6147 return kvm_debugfs_open(inode, file, kvm_stat_data_get,
6148 kvm_stat_data_clear, "%llu\n");
6149}
6150
6151static const struct file_operations stat_fops_per_vm = {
6152 .owner = THIS_MODULE,
6153 .open = kvm_stat_data_open,
6154 .release = kvm_debugfs_release,
6155 .read = simple_attr_read,
6156 .write = simple_attr_write,
6157 .llseek = no_llseek,
6158};
6159
6160static int vm_stat_get(void *_offset, u64 *val)
6161{
6162 unsigned offset = (long)_offset;
6163 struct kvm *kvm;
6164 u64 tmp_val;
6165
6166 *val = 0;
6167 mutex_lock(&kvm_lock);
6168 list_for_each_entry(kvm, &vm_list, vm_list) {
6169 kvm_get_stat_per_vm(kvm, offset, &tmp_val);
6170 *val += tmp_val;
6171 }
6172 mutex_unlock(&kvm_lock);
6173 return 0;
6174}
6175
6176static int vm_stat_clear(void *_offset, u64 val)
6177{
6178 unsigned offset = (long)_offset;
6179 struct kvm *kvm;
6180
6181 if (val)
6182 return -EINVAL;
6183
6184 mutex_lock(&kvm_lock);
6185 list_for_each_entry(kvm, &vm_list, vm_list) {
6186 kvm_clear_stat_per_vm(kvm, offset);
6187 }
6188 mutex_unlock(&kvm_lock);
6189
6190 return 0;
6191}
6192
6193DEFINE_SIMPLE_ATTRIBUTE(vm_stat_fops, vm_stat_get, vm_stat_clear, "%llu\n");
6194DEFINE_SIMPLE_ATTRIBUTE(vm_stat_readonly_fops, vm_stat_get, NULL, "%llu\n");
6195
6196static int vcpu_stat_get(void *_offset, u64 *val)
6197{
6198 unsigned offset = (long)_offset;
6199 struct kvm *kvm;
6200 u64 tmp_val;
6201
6202 *val = 0;
6203 mutex_lock(&kvm_lock);
6204 list_for_each_entry(kvm, &vm_list, vm_list) {
6205 kvm_get_stat_per_vcpu(kvm, offset, &tmp_val);
6206 *val += tmp_val;
6207 }
6208 mutex_unlock(&kvm_lock);
6209 return 0;
6210}
6211
6212static int vcpu_stat_clear(void *_offset, u64 val)
6213{
6214 unsigned offset = (long)_offset;
6215 struct kvm *kvm;
6216
6217 if (val)
6218 return -EINVAL;
6219
6220 mutex_lock(&kvm_lock);
6221 list_for_each_entry(kvm, &vm_list, vm_list) {
6222 kvm_clear_stat_per_vcpu(kvm, offset);
6223 }
6224 mutex_unlock(&kvm_lock);
6225
6226 return 0;
6227}
6228
6229DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops, vcpu_stat_get, vcpu_stat_clear,
6230 "%llu\n");
6231DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_readonly_fops, vcpu_stat_get, NULL, "%llu\n");
6232
6233static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm)
6234{
6235 struct kobj_uevent_env *env;
6236 unsigned long long created, active;
6237
6238 if (!kvm_dev.this_device || !kvm)
6239 return;
6240
6241 mutex_lock(&kvm_lock);
6242 if (type == KVM_EVENT_CREATE_VM) {
6243 kvm_createvm_count++;
6244 kvm_active_vms++;
6245 } else if (type == KVM_EVENT_DESTROY_VM) {
6246 kvm_active_vms--;
6247 }
6248 created = kvm_createvm_count;
6249 active = kvm_active_vms;
6250 mutex_unlock(&kvm_lock);
6251
6252 env = kzalloc(sizeof(*env), GFP_KERNEL_ACCOUNT);
6253 if (!env)
6254 return;
6255
6256 add_uevent_var(env, "CREATED=%llu", created);
6257 add_uevent_var(env, "COUNT=%llu", active);
6258
6259 if (type == KVM_EVENT_CREATE_VM) {
6260 add_uevent_var(env, "EVENT=create");
6261 kvm->userspace_pid = task_pid_nr(current);
6262 } else if (type == KVM_EVENT_DESTROY_VM) {
6263 add_uevent_var(env, "EVENT=destroy");
6264 }
6265 add_uevent_var(env, "PID=%d", kvm->userspace_pid);
6266
6267 if (!IS_ERR(kvm->debugfs_dentry)) {
6268 char *tmp, *p = kmalloc(PATH_MAX, GFP_KERNEL_ACCOUNT);
6269
6270 if (p) {
6271 tmp = dentry_path_raw(kvm->debugfs_dentry, p, PATH_MAX);
6272 if (!IS_ERR(tmp))
6273 add_uevent_var(env, "STATS_PATH=%s", tmp);
6274 kfree(p);
6275 }
6276 }
6277 /* no need for checks, since we are adding at most only 5 keys */
6278 env->envp[env->envp_idx++] = NULL;
6279 kobject_uevent_env(&kvm_dev.this_device->kobj, KOBJ_CHANGE, env->envp);
6280 kfree(env);
6281}
6282
6283static void kvm_init_debug(void)
6284{
6285 const struct file_operations *fops;
6286 const struct _kvm_stats_desc *pdesc;
6287 int i;
6288
6289 kvm_debugfs_dir = debugfs_create_dir("kvm", NULL);
6290
6291 for (i = 0; i < kvm_vm_stats_header.num_desc; ++i) {
6292 pdesc = &kvm_vm_stats_desc[i];
6293 if (kvm_stats_debugfs_mode(pdesc) & 0222)
6294 fops = &vm_stat_fops;
6295 else
6296 fops = &vm_stat_readonly_fops;
6297 debugfs_create_file(pdesc->name, kvm_stats_debugfs_mode(pdesc),
6298 kvm_debugfs_dir,
6299 (void *)(long)pdesc->desc.offset, fops);
6300 }
6301
6302 for (i = 0; i < kvm_vcpu_stats_header.num_desc; ++i) {
6303 pdesc = &kvm_vcpu_stats_desc[i];
6304 if (kvm_stats_debugfs_mode(pdesc) & 0222)
6305 fops = &vcpu_stat_fops;
6306 else
6307 fops = &vcpu_stat_readonly_fops;
6308 debugfs_create_file(pdesc->name, kvm_stats_debugfs_mode(pdesc),
6309 kvm_debugfs_dir,
6310 (void *)(long)pdesc->desc.offset, fops);
6311 }
6312}
6313
6314static inline
6315struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn)
6316{
6317 return container_of(pn, struct kvm_vcpu, preempt_notifier);
6318}
6319
6320static void kvm_sched_in(struct preempt_notifier *pn, int cpu)
6321{
6322 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
6323
6324 WRITE_ONCE(vcpu->preempted, false);
6325 WRITE_ONCE(vcpu->ready, false);
6326
6327 __this_cpu_write(kvm_running_vcpu, vcpu);
6328 kvm_arch_sched_in(vcpu, cpu);
6329 kvm_arch_vcpu_load(vcpu, cpu);
6330}
6331
6332static void kvm_sched_out(struct preempt_notifier *pn,
6333 struct task_struct *next)
6334{
6335 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
6336
6337 if (current->on_rq) {
6338 WRITE_ONCE(vcpu->preempted, true);
6339 WRITE_ONCE(vcpu->ready, true);
6340 }
6341 kvm_arch_vcpu_put(vcpu);
6342 __this_cpu_write(kvm_running_vcpu, NULL);
6343}
6344
6345/**
6346 * kvm_get_running_vcpu - get the vcpu running on the current CPU.
6347 *
6348 * We can disable preemption locally around accessing the per-CPU variable,
6349 * and use the resolved vcpu pointer after enabling preemption again,
6350 * because even if the current thread is migrated to another CPU, reading
6351 * the per-CPU value later will give us the same value as we update the
6352 * per-CPU variable in the preempt notifier handlers.
6353 */
6354struct kvm_vcpu *kvm_get_running_vcpu(void)
6355{
6356 struct kvm_vcpu *vcpu;
6357
6358 preempt_disable();
6359 vcpu = __this_cpu_read(kvm_running_vcpu);
6360 preempt_enable();
6361
6362 return vcpu;
6363}
6364EXPORT_SYMBOL_GPL(kvm_get_running_vcpu);
6365
6366/**
6367 * kvm_get_running_vcpus - get the per-CPU array of currently running vcpus.
6368 */
6369struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void)
6370{
6371 return &kvm_running_vcpu;
6372}
6373
6374#ifdef CONFIG_GUEST_PERF_EVENTS
6375static unsigned int kvm_guest_state(void)
6376{
6377 struct kvm_vcpu *vcpu = kvm_get_running_vcpu();
6378 unsigned int state;
6379
6380 if (!kvm_arch_pmi_in_guest(vcpu))
6381 return 0;
6382
6383 state = PERF_GUEST_ACTIVE;
6384 if (!kvm_arch_vcpu_in_kernel(vcpu))
6385 state |= PERF_GUEST_USER;
6386
6387 return state;
6388}
6389
6390static unsigned long kvm_guest_get_ip(void)
6391{
6392 struct kvm_vcpu *vcpu = kvm_get_running_vcpu();
6393
6394 /* Retrieving the IP must be guarded by a call to kvm_guest_state(). */
6395 if (WARN_ON_ONCE(!kvm_arch_pmi_in_guest(vcpu)))
6396 return 0;
6397
6398 return kvm_arch_vcpu_get_ip(vcpu);
6399}
6400
6401static struct perf_guest_info_callbacks kvm_guest_cbs = {
6402 .state = kvm_guest_state,
6403 .get_ip = kvm_guest_get_ip,
6404 .handle_intel_pt_intr = NULL,
6405};
6406
6407void kvm_register_perf_callbacks(unsigned int (*pt_intr_handler)(void))
6408{
6409 kvm_guest_cbs.handle_intel_pt_intr = pt_intr_handler;
6410 perf_register_guest_info_callbacks(&kvm_guest_cbs);
6411}
6412void kvm_unregister_perf_callbacks(void)
6413{
6414 perf_unregister_guest_info_callbacks(&kvm_guest_cbs);
6415}
6416#endif
6417
6418int kvm_init(unsigned vcpu_size, unsigned vcpu_align, struct module *module)
6419{
6420 int r;
6421 int cpu;
6422
6423#ifdef CONFIG_KVM_GENERIC_HARDWARE_ENABLING
6424 r = cpuhp_setup_state_nocalls(CPUHP_AP_KVM_ONLINE, "kvm/cpu:online",
6425 kvm_online_cpu, kvm_offline_cpu);
6426 if (r)
6427 return r;
6428
6429 register_syscore_ops(&kvm_syscore_ops);
6430#endif
6431
6432 /* A kmem cache lets us meet the alignment requirements of fx_save. */
6433 if (!vcpu_align)
6434 vcpu_align = __alignof__(struct kvm_vcpu);
6435 kvm_vcpu_cache =
6436 kmem_cache_create_usercopy("kvm_vcpu", vcpu_size, vcpu_align,
6437 SLAB_ACCOUNT,
6438 offsetof(struct kvm_vcpu, arch),
6439 offsetofend(struct kvm_vcpu, stats_id)
6440 - offsetof(struct kvm_vcpu, arch),
6441 NULL);
6442 if (!kvm_vcpu_cache) {
6443 r = -ENOMEM;
6444 goto err_vcpu_cache;
6445 }
6446
6447 for_each_possible_cpu(cpu) {
6448 if (!alloc_cpumask_var_node(&per_cpu(cpu_kick_mask, cpu),
6449 GFP_KERNEL, cpu_to_node(cpu))) {
6450 r = -ENOMEM;
6451 goto err_cpu_kick_mask;
6452 }
6453 }
6454
6455 r = kvm_irqfd_init();
6456 if (r)
6457 goto err_irqfd;
6458
6459 r = kvm_async_pf_init();
6460 if (r)
6461 goto err_async_pf;
6462
6463 kvm_chardev_ops.owner = module;
6464 kvm_vm_fops.owner = module;
6465 kvm_vcpu_fops.owner = module;
6466 kvm_device_fops.owner = module;
6467
6468 kvm_preempt_ops.sched_in = kvm_sched_in;
6469 kvm_preempt_ops.sched_out = kvm_sched_out;
6470
6471 kvm_init_debug();
6472
6473 r = kvm_vfio_ops_init();
6474 if (WARN_ON_ONCE(r))
6475 goto err_vfio;
6476
6477 kvm_gmem_init(module);
6478
6479 /*
6480 * Registration _must_ be the very last thing done, as this exposes
6481 * /dev/kvm to userspace, i.e. all infrastructure must be setup!
6482 */
6483 r = misc_register(&kvm_dev);
6484 if (r) {
6485 pr_err("kvm: misc device register failed\n");
6486 goto err_register;
6487 }
6488
6489 return 0;
6490
6491err_register:
6492 kvm_vfio_ops_exit();
6493err_vfio:
6494 kvm_async_pf_deinit();
6495err_async_pf:
6496 kvm_irqfd_exit();
6497err_irqfd:
6498err_cpu_kick_mask:
6499 for_each_possible_cpu(cpu)
6500 free_cpumask_var(per_cpu(cpu_kick_mask, cpu));
6501 kmem_cache_destroy(kvm_vcpu_cache);
6502err_vcpu_cache:
6503#ifdef CONFIG_KVM_GENERIC_HARDWARE_ENABLING
6504 unregister_syscore_ops(&kvm_syscore_ops);
6505 cpuhp_remove_state_nocalls(CPUHP_AP_KVM_ONLINE);
6506#endif
6507 return r;
6508}
6509EXPORT_SYMBOL_GPL(kvm_init);
6510
6511void kvm_exit(void)
6512{
6513 int cpu;
6514
6515 /*
6516 * Note, unregistering /dev/kvm doesn't strictly need to come first,
6517 * fops_get(), a.k.a. try_module_get(), prevents acquiring references
6518 * to KVM while the module is being stopped.
6519 */
6520 misc_deregister(&kvm_dev);
6521
6522 debugfs_remove_recursive(kvm_debugfs_dir);
6523 for_each_possible_cpu(cpu)
6524 free_cpumask_var(per_cpu(cpu_kick_mask, cpu));
6525 kmem_cache_destroy(kvm_vcpu_cache);
6526 kvm_vfio_ops_exit();
6527 kvm_async_pf_deinit();
6528#ifdef CONFIG_KVM_GENERIC_HARDWARE_ENABLING
6529 unregister_syscore_ops(&kvm_syscore_ops);
6530 cpuhp_remove_state_nocalls(CPUHP_AP_KVM_ONLINE);
6531#endif
6532 kvm_irqfd_exit();
6533}
6534EXPORT_SYMBOL_GPL(kvm_exit);
6535
6536struct kvm_vm_worker_thread_context {
6537 struct kvm *kvm;
6538 struct task_struct *parent;
6539 struct completion init_done;
6540 kvm_vm_thread_fn_t thread_fn;
6541 uintptr_t data;
6542 int err;
6543};
6544
6545static int kvm_vm_worker_thread(void *context)
6546{
6547 /*
6548 * The init_context is allocated on the stack of the parent thread, so
6549 * we have to locally copy anything that is needed beyond initialization
6550 */
6551 struct kvm_vm_worker_thread_context *init_context = context;
6552 struct task_struct *parent;
6553 struct kvm *kvm = init_context->kvm;
6554 kvm_vm_thread_fn_t thread_fn = init_context->thread_fn;
6555 uintptr_t data = init_context->data;
6556 int err;
6557
6558 err = kthread_park(current);
6559 /* kthread_park(current) is never supposed to return an error */
6560 WARN_ON(err != 0);
6561 if (err)
6562 goto init_complete;
6563
6564 err = cgroup_attach_task_all(init_context->parent, current);
6565 if (err) {
6566 kvm_err("%s: cgroup_attach_task_all failed with err %d\n",
6567 __func__, err);
6568 goto init_complete;
6569 }
6570
6571 set_user_nice(current, task_nice(init_context->parent));
6572
6573init_complete:
6574 init_context->err = err;
6575 complete(&init_context->init_done);
6576 init_context = NULL;
6577
6578 if (err)
6579 goto out;
6580
6581 /* Wait to be woken up by the spawner before proceeding. */
6582 kthread_parkme();
6583
6584 if (!kthread_should_stop())
6585 err = thread_fn(kvm, data);
6586
6587out:
6588 /*
6589 * Move kthread back to its original cgroup to prevent it lingering in
6590 * the cgroup of the VM process, after the latter finishes its
6591 * execution.
6592 *
6593 * kthread_stop() waits on the 'exited' completion condition which is
6594 * set in exit_mm(), via mm_release(), in do_exit(). However, the
6595 * kthread is removed from the cgroup in the cgroup_exit() which is
6596 * called after the exit_mm(). This causes the kthread_stop() to return
6597 * before the kthread actually quits the cgroup.
6598 */
6599 rcu_read_lock();
6600 parent = rcu_dereference(current->real_parent);
6601 get_task_struct(parent);
6602 rcu_read_unlock();
6603 cgroup_attach_task_all(parent, current);
6604 put_task_struct(parent);
6605
6606 return err;
6607}
6608
6609int kvm_vm_create_worker_thread(struct kvm *kvm, kvm_vm_thread_fn_t thread_fn,
6610 uintptr_t data, const char *name,
6611 struct task_struct **thread_ptr)
6612{
6613 struct kvm_vm_worker_thread_context init_context = {};
6614 struct task_struct *thread;
6615
6616 *thread_ptr = NULL;
6617 init_context.kvm = kvm;
6618 init_context.parent = current;
6619 init_context.thread_fn = thread_fn;
6620 init_context.data = data;
6621 init_completion(&init_context.init_done);
6622
6623 thread = kthread_run(kvm_vm_worker_thread, &init_context,
6624 "%s-%d", name, task_pid_nr(current));
6625 if (IS_ERR(thread))
6626 return PTR_ERR(thread);
6627
6628 /* kthread_run is never supposed to return NULL */
6629 WARN_ON(thread == NULL);
6630
6631 wait_for_completion(&init_context.init_done);
6632
6633 if (!init_context.err)
6634 *thread_ptr = thread;
6635
6636 return init_context.err;
6637}
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Kernel-based Virtual Machine driver for Linux
4 *
5 * This module enables machines with Intel VT-x extensions to run virtual
6 * machines without emulation or binary translation.
7 *
8 * Copyright (C) 2006 Qumranet, Inc.
9 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
10 *
11 * Authors:
12 * Avi Kivity <avi@qumranet.com>
13 * Yaniv Kamay <yaniv@qumranet.com>
14 */
15
16#include <kvm/iodev.h>
17
18#include <linux/kvm_host.h>
19#include <linux/kvm.h>
20#include <linux/module.h>
21#include <linux/errno.h>
22#include <linux/percpu.h>
23#include <linux/mm.h>
24#include <linux/miscdevice.h>
25#include <linux/vmalloc.h>
26#include <linux/reboot.h>
27#include <linux/debugfs.h>
28#include <linux/highmem.h>
29#include <linux/file.h>
30#include <linux/syscore_ops.h>
31#include <linux/cpu.h>
32#include <linux/sched/signal.h>
33#include <linux/sched/mm.h>
34#include <linux/sched/stat.h>
35#include <linux/cpumask.h>
36#include <linux/smp.h>
37#include <linux/anon_inodes.h>
38#include <linux/profile.h>
39#include <linux/kvm_para.h>
40#include <linux/pagemap.h>
41#include <linux/mman.h>
42#include <linux/swap.h>
43#include <linux/bitops.h>
44#include <linux/spinlock.h>
45#include <linux/compat.h>
46#include <linux/srcu.h>
47#include <linux/hugetlb.h>
48#include <linux/slab.h>
49#include <linux/sort.h>
50#include <linux/bsearch.h>
51#include <linux/io.h>
52#include <linux/lockdep.h>
53#include <linux/kthread.h>
54#include <linux/suspend.h>
55
56#include <asm/processor.h>
57#include <asm/ioctl.h>
58#include <linux/uaccess.h>
59
60#include "coalesced_mmio.h"
61#include "async_pf.h"
62#include "kvm_mm.h"
63#include "vfio.h"
64
65#include <trace/events/ipi.h>
66
67#define CREATE_TRACE_POINTS
68#include <trace/events/kvm.h>
69
70#include <linux/kvm_dirty_ring.h>
71
72
73/* Worst case buffer size needed for holding an integer. */
74#define ITOA_MAX_LEN 12
75
76MODULE_AUTHOR("Qumranet");
77MODULE_LICENSE("GPL");
78
79/* Architectures should define their poll value according to the halt latency */
80unsigned int halt_poll_ns = KVM_HALT_POLL_NS_DEFAULT;
81module_param(halt_poll_ns, uint, 0644);
82EXPORT_SYMBOL_GPL(halt_poll_ns);
83
84/* Default doubles per-vcpu halt_poll_ns. */
85unsigned int halt_poll_ns_grow = 2;
86module_param(halt_poll_ns_grow, uint, 0644);
87EXPORT_SYMBOL_GPL(halt_poll_ns_grow);
88
89/* The start value to grow halt_poll_ns from */
90unsigned int halt_poll_ns_grow_start = 10000; /* 10us */
91module_param(halt_poll_ns_grow_start, uint, 0644);
92EXPORT_SYMBOL_GPL(halt_poll_ns_grow_start);
93
94/* Default resets per-vcpu halt_poll_ns . */
95unsigned int halt_poll_ns_shrink;
96module_param(halt_poll_ns_shrink, uint, 0644);
97EXPORT_SYMBOL_GPL(halt_poll_ns_shrink);
98
99/*
100 * Ordering of locks:
101 *
102 * kvm->lock --> kvm->slots_lock --> kvm->irq_lock
103 */
104
105DEFINE_MUTEX(kvm_lock);
106LIST_HEAD(vm_list);
107
108static struct kmem_cache *kvm_vcpu_cache;
109
110static __read_mostly struct preempt_ops kvm_preempt_ops;
111static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_running_vcpu);
112
113struct dentry *kvm_debugfs_dir;
114EXPORT_SYMBOL_GPL(kvm_debugfs_dir);
115
116static const struct file_operations stat_fops_per_vm;
117
118static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
119 unsigned long arg);
120#ifdef CONFIG_KVM_COMPAT
121static long kvm_vcpu_compat_ioctl(struct file *file, unsigned int ioctl,
122 unsigned long arg);
123#define KVM_COMPAT(c) .compat_ioctl = (c)
124#else
125/*
126 * For architectures that don't implement a compat infrastructure,
127 * adopt a double line of defense:
128 * - Prevent a compat task from opening /dev/kvm
129 * - If the open has been done by a 64bit task, and the KVM fd
130 * passed to a compat task, let the ioctls fail.
131 */
132static long kvm_no_compat_ioctl(struct file *file, unsigned int ioctl,
133 unsigned long arg) { return -EINVAL; }
134
135static int kvm_no_compat_open(struct inode *inode, struct file *file)
136{
137 return is_compat_task() ? -ENODEV : 0;
138}
139#define KVM_COMPAT(c) .compat_ioctl = kvm_no_compat_ioctl, \
140 .open = kvm_no_compat_open
141#endif
142static int hardware_enable_all(void);
143static void hardware_disable_all(void);
144
145static void kvm_io_bus_destroy(struct kvm_io_bus *bus);
146
147#define KVM_EVENT_CREATE_VM 0
148#define KVM_EVENT_DESTROY_VM 1
149static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm);
150static unsigned long long kvm_createvm_count;
151static unsigned long long kvm_active_vms;
152
153static DEFINE_PER_CPU(cpumask_var_t, cpu_kick_mask);
154
155__weak void kvm_arch_guest_memory_reclaimed(struct kvm *kvm)
156{
157}
158
159bool kvm_is_zone_device_page(struct page *page)
160{
161 /*
162 * The metadata used by is_zone_device_page() to determine whether or
163 * not a page is ZONE_DEVICE is guaranteed to be valid if and only if
164 * the device has been pinned, e.g. by get_user_pages(). WARN if the
165 * page_count() is zero to help detect bad usage of this helper.
166 */
167 if (WARN_ON_ONCE(!page_count(page)))
168 return false;
169
170 return is_zone_device_page(page);
171}
172
173/*
174 * Returns a 'struct page' if the pfn is "valid" and backed by a refcounted
175 * page, NULL otherwise. Note, the list of refcounted PG_reserved page types
176 * is likely incomplete, it has been compiled purely through people wanting to
177 * back guest with a certain type of memory and encountering issues.
178 */
179struct page *kvm_pfn_to_refcounted_page(kvm_pfn_t pfn)
180{
181 struct page *page;
182
183 if (!pfn_valid(pfn))
184 return NULL;
185
186 page = pfn_to_page(pfn);
187 if (!PageReserved(page))
188 return page;
189
190 /* The ZERO_PAGE(s) is marked PG_reserved, but is refcounted. */
191 if (is_zero_pfn(pfn))
192 return page;
193
194 /*
195 * ZONE_DEVICE pages currently set PG_reserved, but from a refcounting
196 * perspective they are "normal" pages, albeit with slightly different
197 * usage rules.
198 */
199 if (kvm_is_zone_device_page(page))
200 return page;
201
202 return NULL;
203}
204
205/*
206 * Switches to specified vcpu, until a matching vcpu_put()
207 */
208void vcpu_load(struct kvm_vcpu *vcpu)
209{
210 int cpu = get_cpu();
211
212 __this_cpu_write(kvm_running_vcpu, vcpu);
213 preempt_notifier_register(&vcpu->preempt_notifier);
214 kvm_arch_vcpu_load(vcpu, cpu);
215 put_cpu();
216}
217EXPORT_SYMBOL_GPL(vcpu_load);
218
219void vcpu_put(struct kvm_vcpu *vcpu)
220{
221 preempt_disable();
222 kvm_arch_vcpu_put(vcpu);
223 preempt_notifier_unregister(&vcpu->preempt_notifier);
224 __this_cpu_write(kvm_running_vcpu, NULL);
225 preempt_enable();
226}
227EXPORT_SYMBOL_GPL(vcpu_put);
228
229/* TODO: merge with kvm_arch_vcpu_should_kick */
230static bool kvm_request_needs_ipi(struct kvm_vcpu *vcpu, unsigned req)
231{
232 int mode = kvm_vcpu_exiting_guest_mode(vcpu);
233
234 /*
235 * We need to wait for the VCPU to reenable interrupts and get out of
236 * READING_SHADOW_PAGE_TABLES mode.
237 */
238 if (req & KVM_REQUEST_WAIT)
239 return mode != OUTSIDE_GUEST_MODE;
240
241 /*
242 * Need to kick a running VCPU, but otherwise there is nothing to do.
243 */
244 return mode == IN_GUEST_MODE;
245}
246
247static void ack_kick(void *_completed)
248{
249}
250
251static inline bool kvm_kick_many_cpus(struct cpumask *cpus, bool wait)
252{
253 if (cpumask_empty(cpus))
254 return false;
255
256 smp_call_function_many(cpus, ack_kick, NULL, wait);
257 return true;
258}
259
260static void kvm_make_vcpu_request(struct kvm_vcpu *vcpu, unsigned int req,
261 struct cpumask *tmp, int current_cpu)
262{
263 int cpu;
264
265 if (likely(!(req & KVM_REQUEST_NO_ACTION)))
266 __kvm_make_request(req, vcpu);
267
268 if (!(req & KVM_REQUEST_NO_WAKEUP) && kvm_vcpu_wake_up(vcpu))
269 return;
270
271 /*
272 * Note, the vCPU could get migrated to a different pCPU at any point
273 * after kvm_request_needs_ipi(), which could result in sending an IPI
274 * to the previous pCPU. But, that's OK because the purpose of the IPI
275 * is to ensure the vCPU returns to OUTSIDE_GUEST_MODE, which is
276 * satisfied if the vCPU migrates. Entering READING_SHADOW_PAGE_TABLES
277 * after this point is also OK, as the requirement is only that KVM wait
278 * for vCPUs that were reading SPTEs _before_ any changes were
279 * finalized. See kvm_vcpu_kick() for more details on handling requests.
280 */
281 if (kvm_request_needs_ipi(vcpu, req)) {
282 cpu = READ_ONCE(vcpu->cpu);
283 if (cpu != -1 && cpu != current_cpu)
284 __cpumask_set_cpu(cpu, tmp);
285 }
286}
287
288bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req,
289 unsigned long *vcpu_bitmap)
290{
291 struct kvm_vcpu *vcpu;
292 struct cpumask *cpus;
293 int i, me;
294 bool called;
295
296 me = get_cpu();
297
298 cpus = this_cpu_cpumask_var_ptr(cpu_kick_mask);
299 cpumask_clear(cpus);
300
301 for_each_set_bit(i, vcpu_bitmap, KVM_MAX_VCPUS) {
302 vcpu = kvm_get_vcpu(kvm, i);
303 if (!vcpu)
304 continue;
305 kvm_make_vcpu_request(vcpu, req, cpus, me);
306 }
307
308 called = kvm_kick_many_cpus(cpus, !!(req & KVM_REQUEST_WAIT));
309 put_cpu();
310
311 return called;
312}
313
314bool kvm_make_all_cpus_request_except(struct kvm *kvm, unsigned int req,
315 struct kvm_vcpu *except)
316{
317 struct kvm_vcpu *vcpu;
318 struct cpumask *cpus;
319 unsigned long i;
320 bool called;
321 int me;
322
323 me = get_cpu();
324
325 cpus = this_cpu_cpumask_var_ptr(cpu_kick_mask);
326 cpumask_clear(cpus);
327
328 kvm_for_each_vcpu(i, vcpu, kvm) {
329 if (vcpu == except)
330 continue;
331 kvm_make_vcpu_request(vcpu, req, cpus, me);
332 }
333
334 called = kvm_kick_many_cpus(cpus, !!(req & KVM_REQUEST_WAIT));
335 put_cpu();
336
337 return called;
338}
339
340bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req)
341{
342 return kvm_make_all_cpus_request_except(kvm, req, NULL);
343}
344EXPORT_SYMBOL_GPL(kvm_make_all_cpus_request);
345
346void kvm_flush_remote_tlbs(struct kvm *kvm)
347{
348 ++kvm->stat.generic.remote_tlb_flush_requests;
349
350 /*
351 * We want to publish modifications to the page tables before reading
352 * mode. Pairs with a memory barrier in arch-specific code.
353 * - x86: smp_mb__after_srcu_read_unlock in vcpu_enter_guest
354 * and smp_mb in walk_shadow_page_lockless_begin/end.
355 * - powerpc: smp_mb in kvmppc_prepare_to_enter.
356 *
357 * There is already an smp_mb__after_atomic() before
358 * kvm_make_all_cpus_request() reads vcpu->mode. We reuse that
359 * barrier here.
360 */
361 if (!kvm_arch_flush_remote_tlbs(kvm)
362 || kvm_make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH))
363 ++kvm->stat.generic.remote_tlb_flush;
364}
365EXPORT_SYMBOL_GPL(kvm_flush_remote_tlbs);
366
367void kvm_flush_remote_tlbs_range(struct kvm *kvm, gfn_t gfn, u64 nr_pages)
368{
369 if (!kvm_arch_flush_remote_tlbs_range(kvm, gfn, nr_pages))
370 return;
371
372 /*
373 * Fall back to a flushing entire TLBs if the architecture range-based
374 * TLB invalidation is unsupported or can't be performed for whatever
375 * reason.
376 */
377 kvm_flush_remote_tlbs(kvm);
378}
379
380void kvm_flush_remote_tlbs_memslot(struct kvm *kvm,
381 const struct kvm_memory_slot *memslot)
382{
383 /*
384 * All current use cases for flushing the TLBs for a specific memslot
385 * are related to dirty logging, and many do the TLB flush out of
386 * mmu_lock. The interaction between the various operations on memslot
387 * must be serialized by slots_locks to ensure the TLB flush from one
388 * operation is observed by any other operation on the same memslot.
389 */
390 lockdep_assert_held(&kvm->slots_lock);
391 kvm_flush_remote_tlbs_range(kvm, memslot->base_gfn, memslot->npages);
392}
393
394static void kvm_flush_shadow_all(struct kvm *kvm)
395{
396 kvm_arch_flush_shadow_all(kvm);
397 kvm_arch_guest_memory_reclaimed(kvm);
398}
399
400#ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE
401static inline void *mmu_memory_cache_alloc_obj(struct kvm_mmu_memory_cache *mc,
402 gfp_t gfp_flags)
403{
404 gfp_flags |= mc->gfp_zero;
405
406 if (mc->kmem_cache)
407 return kmem_cache_alloc(mc->kmem_cache, gfp_flags);
408 else
409 return (void *)__get_free_page(gfp_flags);
410}
411
412int __kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int capacity, int min)
413{
414 gfp_t gfp = mc->gfp_custom ? mc->gfp_custom : GFP_KERNEL_ACCOUNT;
415 void *obj;
416
417 if (mc->nobjs >= min)
418 return 0;
419
420 if (unlikely(!mc->objects)) {
421 if (WARN_ON_ONCE(!capacity))
422 return -EIO;
423
424 mc->objects = kvmalloc_array(sizeof(void *), capacity, gfp);
425 if (!mc->objects)
426 return -ENOMEM;
427
428 mc->capacity = capacity;
429 }
430
431 /* It is illegal to request a different capacity across topups. */
432 if (WARN_ON_ONCE(mc->capacity != capacity))
433 return -EIO;
434
435 while (mc->nobjs < mc->capacity) {
436 obj = mmu_memory_cache_alloc_obj(mc, gfp);
437 if (!obj)
438 return mc->nobjs >= min ? 0 : -ENOMEM;
439 mc->objects[mc->nobjs++] = obj;
440 }
441 return 0;
442}
443
444int kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int min)
445{
446 return __kvm_mmu_topup_memory_cache(mc, KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE, min);
447}
448
449int kvm_mmu_memory_cache_nr_free_objects(struct kvm_mmu_memory_cache *mc)
450{
451 return mc->nobjs;
452}
453
454void kvm_mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
455{
456 while (mc->nobjs) {
457 if (mc->kmem_cache)
458 kmem_cache_free(mc->kmem_cache, mc->objects[--mc->nobjs]);
459 else
460 free_page((unsigned long)mc->objects[--mc->nobjs]);
461 }
462
463 kvfree(mc->objects);
464
465 mc->objects = NULL;
466 mc->capacity = 0;
467}
468
469void *kvm_mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
470{
471 void *p;
472
473 if (WARN_ON(!mc->nobjs))
474 p = mmu_memory_cache_alloc_obj(mc, GFP_ATOMIC | __GFP_ACCOUNT);
475 else
476 p = mc->objects[--mc->nobjs];
477 BUG_ON(!p);
478 return p;
479}
480#endif
481
482static void kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
483{
484 mutex_init(&vcpu->mutex);
485 vcpu->cpu = -1;
486 vcpu->kvm = kvm;
487 vcpu->vcpu_id = id;
488 vcpu->pid = NULL;
489#ifndef __KVM_HAVE_ARCH_WQP
490 rcuwait_init(&vcpu->wait);
491#endif
492 kvm_async_pf_vcpu_init(vcpu);
493
494 kvm_vcpu_set_in_spin_loop(vcpu, false);
495 kvm_vcpu_set_dy_eligible(vcpu, false);
496 vcpu->preempted = false;
497 vcpu->ready = false;
498 preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops);
499 vcpu->last_used_slot = NULL;
500
501 /* Fill the stats id string for the vcpu */
502 snprintf(vcpu->stats_id, sizeof(vcpu->stats_id), "kvm-%d/vcpu-%d",
503 task_pid_nr(current), id);
504}
505
506static void kvm_vcpu_destroy(struct kvm_vcpu *vcpu)
507{
508 kvm_arch_vcpu_destroy(vcpu);
509 kvm_dirty_ring_free(&vcpu->dirty_ring);
510
511 /*
512 * No need for rcu_read_lock as VCPU_RUN is the only place that changes
513 * the vcpu->pid pointer, and at destruction time all file descriptors
514 * are already gone.
515 */
516 put_pid(rcu_dereference_protected(vcpu->pid, 1));
517
518 free_page((unsigned long)vcpu->run);
519 kmem_cache_free(kvm_vcpu_cache, vcpu);
520}
521
522void kvm_destroy_vcpus(struct kvm *kvm)
523{
524 unsigned long i;
525 struct kvm_vcpu *vcpu;
526
527 kvm_for_each_vcpu(i, vcpu, kvm) {
528 kvm_vcpu_destroy(vcpu);
529 xa_erase(&kvm->vcpu_array, i);
530 }
531
532 atomic_set(&kvm->online_vcpus, 0);
533}
534EXPORT_SYMBOL_GPL(kvm_destroy_vcpus);
535
536#ifdef CONFIG_KVM_GENERIC_MMU_NOTIFIER
537static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn)
538{
539 return container_of(mn, struct kvm, mmu_notifier);
540}
541
542typedef bool (*gfn_handler_t)(struct kvm *kvm, struct kvm_gfn_range *range);
543
544typedef void (*on_lock_fn_t)(struct kvm *kvm);
545
546struct kvm_mmu_notifier_range {
547 /*
548 * 64-bit addresses, as KVM notifiers can operate on host virtual
549 * addresses (unsigned long) and guest physical addresses (64-bit).
550 */
551 u64 start;
552 u64 end;
553 union kvm_mmu_notifier_arg arg;
554 gfn_handler_t handler;
555 on_lock_fn_t on_lock;
556 bool flush_on_ret;
557 bool may_block;
558};
559
560/*
561 * The inner-most helper returns a tuple containing the return value from the
562 * arch- and action-specific handler, plus a flag indicating whether or not at
563 * least one memslot was found, i.e. if the handler found guest memory.
564 *
565 * Note, most notifiers are averse to booleans, so even though KVM tracks the
566 * return from arch code as a bool, outer helpers will cast it to an int. :-(
567 */
568typedef struct kvm_mmu_notifier_return {
569 bool ret;
570 bool found_memslot;
571} kvm_mn_ret_t;
572
573/*
574 * Use a dedicated stub instead of NULL to indicate that there is no callback
575 * function/handler. The compiler technically can't guarantee that a real
576 * function will have a non-zero address, and so it will generate code to
577 * check for !NULL, whereas comparing against a stub will be elided at compile
578 * time (unless the compiler is getting long in the tooth, e.g. gcc 4.9).
579 */
580static void kvm_null_fn(void)
581{
582
583}
584#define IS_KVM_NULL_FN(fn) ((fn) == (void *)kvm_null_fn)
585
586static const union kvm_mmu_notifier_arg KVM_MMU_NOTIFIER_NO_ARG;
587
588/* Iterate over each memslot intersecting [start, last] (inclusive) range */
589#define kvm_for_each_memslot_in_hva_range(node, slots, start, last) \
590 for (node = interval_tree_iter_first(&slots->hva_tree, start, last); \
591 node; \
592 node = interval_tree_iter_next(node, start, last)) \
593
594static __always_inline kvm_mn_ret_t __kvm_handle_hva_range(struct kvm *kvm,
595 const struct kvm_mmu_notifier_range *range)
596{
597 struct kvm_mmu_notifier_return r = {
598 .ret = false,
599 .found_memslot = false,
600 };
601 struct kvm_gfn_range gfn_range;
602 struct kvm_memory_slot *slot;
603 struct kvm_memslots *slots;
604 int i, idx;
605
606 if (WARN_ON_ONCE(range->end <= range->start))
607 return r;
608
609 /* A null handler is allowed if and only if on_lock() is provided. */
610 if (WARN_ON_ONCE(IS_KVM_NULL_FN(range->on_lock) &&
611 IS_KVM_NULL_FN(range->handler)))
612 return r;
613
614 idx = srcu_read_lock(&kvm->srcu);
615
616 for (i = 0; i < kvm_arch_nr_memslot_as_ids(kvm); i++) {
617 struct interval_tree_node *node;
618
619 slots = __kvm_memslots(kvm, i);
620 kvm_for_each_memslot_in_hva_range(node, slots,
621 range->start, range->end - 1) {
622 unsigned long hva_start, hva_end;
623
624 slot = container_of(node, struct kvm_memory_slot, hva_node[slots->node_idx]);
625 hva_start = max_t(unsigned long, range->start, slot->userspace_addr);
626 hva_end = min_t(unsigned long, range->end,
627 slot->userspace_addr + (slot->npages << PAGE_SHIFT));
628
629 /*
630 * To optimize for the likely case where the address
631 * range is covered by zero or one memslots, don't
632 * bother making these conditional (to avoid writes on
633 * the second or later invocation of the handler).
634 */
635 gfn_range.arg = range->arg;
636 gfn_range.may_block = range->may_block;
637
638 /*
639 * {gfn(page) | page intersects with [hva_start, hva_end)} =
640 * {gfn_start, gfn_start+1, ..., gfn_end-1}.
641 */
642 gfn_range.start = hva_to_gfn_memslot(hva_start, slot);
643 gfn_range.end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, slot);
644 gfn_range.slot = slot;
645
646 if (!r.found_memslot) {
647 r.found_memslot = true;
648 KVM_MMU_LOCK(kvm);
649 if (!IS_KVM_NULL_FN(range->on_lock))
650 range->on_lock(kvm);
651
652 if (IS_KVM_NULL_FN(range->handler))
653 break;
654 }
655 r.ret |= range->handler(kvm, &gfn_range);
656 }
657 }
658
659 if (range->flush_on_ret && r.ret)
660 kvm_flush_remote_tlbs(kvm);
661
662 if (r.found_memslot)
663 KVM_MMU_UNLOCK(kvm);
664
665 srcu_read_unlock(&kvm->srcu, idx);
666
667 return r;
668}
669
670static __always_inline int kvm_handle_hva_range(struct mmu_notifier *mn,
671 unsigned long start,
672 unsigned long end,
673 union kvm_mmu_notifier_arg arg,
674 gfn_handler_t handler)
675{
676 struct kvm *kvm = mmu_notifier_to_kvm(mn);
677 const struct kvm_mmu_notifier_range range = {
678 .start = start,
679 .end = end,
680 .arg = arg,
681 .handler = handler,
682 .on_lock = (void *)kvm_null_fn,
683 .flush_on_ret = true,
684 .may_block = false,
685 };
686
687 return __kvm_handle_hva_range(kvm, &range).ret;
688}
689
690static __always_inline int kvm_handle_hva_range_no_flush(struct mmu_notifier *mn,
691 unsigned long start,
692 unsigned long end,
693 gfn_handler_t handler)
694{
695 struct kvm *kvm = mmu_notifier_to_kvm(mn);
696 const struct kvm_mmu_notifier_range range = {
697 .start = start,
698 .end = end,
699 .handler = handler,
700 .on_lock = (void *)kvm_null_fn,
701 .flush_on_ret = false,
702 .may_block = false,
703 };
704
705 return __kvm_handle_hva_range(kvm, &range).ret;
706}
707
708static bool kvm_change_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
709{
710 /*
711 * Skipping invalid memslots is correct if and only change_pte() is
712 * surrounded by invalidate_range_{start,end}(), which is currently
713 * guaranteed by the primary MMU. If that ever changes, KVM needs to
714 * unmap the memslot instead of skipping the memslot to ensure that KVM
715 * doesn't hold references to the old PFN.
716 */
717 WARN_ON_ONCE(!READ_ONCE(kvm->mn_active_invalidate_count));
718
719 if (range->slot->flags & KVM_MEMSLOT_INVALID)
720 return false;
721
722 return kvm_set_spte_gfn(kvm, range);
723}
724
725static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn,
726 struct mm_struct *mm,
727 unsigned long address,
728 pte_t pte)
729{
730 struct kvm *kvm = mmu_notifier_to_kvm(mn);
731 const union kvm_mmu_notifier_arg arg = { .pte = pte };
732
733 trace_kvm_set_spte_hva(address);
734
735 /*
736 * .change_pte() must be surrounded by .invalidate_range_{start,end}().
737 * If mmu_invalidate_in_progress is zero, then no in-progress
738 * invalidations, including this one, found a relevant memslot at
739 * start(); rechecking memslots here is unnecessary. Note, a false
740 * positive (count elevated by a different invalidation) is sub-optimal
741 * but functionally ok.
742 */
743 WARN_ON_ONCE(!READ_ONCE(kvm->mn_active_invalidate_count));
744 if (!READ_ONCE(kvm->mmu_invalidate_in_progress))
745 return;
746
747 kvm_handle_hva_range(mn, address, address + 1, arg, kvm_change_spte_gfn);
748}
749
750void kvm_mmu_invalidate_begin(struct kvm *kvm)
751{
752 lockdep_assert_held_write(&kvm->mmu_lock);
753 /*
754 * The count increase must become visible at unlock time as no
755 * spte can be established without taking the mmu_lock and
756 * count is also read inside the mmu_lock critical section.
757 */
758 kvm->mmu_invalidate_in_progress++;
759
760 if (likely(kvm->mmu_invalidate_in_progress == 1)) {
761 kvm->mmu_invalidate_range_start = INVALID_GPA;
762 kvm->mmu_invalidate_range_end = INVALID_GPA;
763 }
764}
765
766void kvm_mmu_invalidate_range_add(struct kvm *kvm, gfn_t start, gfn_t end)
767{
768 lockdep_assert_held_write(&kvm->mmu_lock);
769
770 WARN_ON_ONCE(!kvm->mmu_invalidate_in_progress);
771
772 if (likely(kvm->mmu_invalidate_range_start == INVALID_GPA)) {
773 kvm->mmu_invalidate_range_start = start;
774 kvm->mmu_invalidate_range_end = end;
775 } else {
776 /*
777 * Fully tracking multiple concurrent ranges has diminishing
778 * returns. Keep things simple and just find the minimal range
779 * which includes the current and new ranges. As there won't be
780 * enough information to subtract a range after its invalidate
781 * completes, any ranges invalidated concurrently will
782 * accumulate and persist until all outstanding invalidates
783 * complete.
784 */
785 kvm->mmu_invalidate_range_start =
786 min(kvm->mmu_invalidate_range_start, start);
787 kvm->mmu_invalidate_range_end =
788 max(kvm->mmu_invalidate_range_end, end);
789 }
790}
791
792bool kvm_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
793{
794 kvm_mmu_invalidate_range_add(kvm, range->start, range->end);
795 return kvm_unmap_gfn_range(kvm, range);
796}
797
798static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
799 const struct mmu_notifier_range *range)
800{
801 struct kvm *kvm = mmu_notifier_to_kvm(mn);
802 const struct kvm_mmu_notifier_range hva_range = {
803 .start = range->start,
804 .end = range->end,
805 .handler = kvm_mmu_unmap_gfn_range,
806 .on_lock = kvm_mmu_invalidate_begin,
807 .flush_on_ret = true,
808 .may_block = mmu_notifier_range_blockable(range),
809 };
810
811 trace_kvm_unmap_hva_range(range->start, range->end);
812
813 /*
814 * Prevent memslot modification between range_start() and range_end()
815 * so that conditionally locking provides the same result in both
816 * functions. Without that guarantee, the mmu_invalidate_in_progress
817 * adjustments will be imbalanced.
818 *
819 * Pairs with the decrement in range_end().
820 */
821 spin_lock(&kvm->mn_invalidate_lock);
822 kvm->mn_active_invalidate_count++;
823 spin_unlock(&kvm->mn_invalidate_lock);
824
825 /*
826 * Invalidate pfn caches _before_ invalidating the secondary MMUs, i.e.
827 * before acquiring mmu_lock, to avoid holding mmu_lock while acquiring
828 * each cache's lock. There are relatively few caches in existence at
829 * any given time, and the caches themselves can check for hva overlap,
830 * i.e. don't need to rely on memslot overlap checks for performance.
831 * Because this runs without holding mmu_lock, the pfn caches must use
832 * mn_active_invalidate_count (see above) instead of
833 * mmu_invalidate_in_progress.
834 */
835 gfn_to_pfn_cache_invalidate_start(kvm, range->start, range->end,
836 hva_range.may_block);
837
838 /*
839 * If one or more memslots were found and thus zapped, notify arch code
840 * that guest memory has been reclaimed. This needs to be done *after*
841 * dropping mmu_lock, as x86's reclaim path is slooooow.
842 */
843 if (__kvm_handle_hva_range(kvm, &hva_range).found_memslot)
844 kvm_arch_guest_memory_reclaimed(kvm);
845
846 return 0;
847}
848
849void kvm_mmu_invalidate_end(struct kvm *kvm)
850{
851 lockdep_assert_held_write(&kvm->mmu_lock);
852
853 /*
854 * This sequence increase will notify the kvm page fault that
855 * the page that is going to be mapped in the spte could have
856 * been freed.
857 */
858 kvm->mmu_invalidate_seq++;
859 smp_wmb();
860 /*
861 * The above sequence increase must be visible before the
862 * below count decrease, which is ensured by the smp_wmb above
863 * in conjunction with the smp_rmb in mmu_invalidate_retry().
864 */
865 kvm->mmu_invalidate_in_progress--;
866 KVM_BUG_ON(kvm->mmu_invalidate_in_progress < 0, kvm);
867
868 /*
869 * Assert that at least one range was added between start() and end().
870 * Not adding a range isn't fatal, but it is a KVM bug.
871 */
872 WARN_ON_ONCE(kvm->mmu_invalidate_range_start == INVALID_GPA);
873}
874
875static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn,
876 const struct mmu_notifier_range *range)
877{
878 struct kvm *kvm = mmu_notifier_to_kvm(mn);
879 const struct kvm_mmu_notifier_range hva_range = {
880 .start = range->start,
881 .end = range->end,
882 .handler = (void *)kvm_null_fn,
883 .on_lock = kvm_mmu_invalidate_end,
884 .flush_on_ret = false,
885 .may_block = mmu_notifier_range_blockable(range),
886 };
887 bool wake;
888
889 __kvm_handle_hva_range(kvm, &hva_range);
890
891 /* Pairs with the increment in range_start(). */
892 spin_lock(&kvm->mn_invalidate_lock);
893 wake = (--kvm->mn_active_invalidate_count == 0);
894 spin_unlock(&kvm->mn_invalidate_lock);
895
896 /*
897 * There can only be one waiter, since the wait happens under
898 * slots_lock.
899 */
900 if (wake)
901 rcuwait_wake_up(&kvm->mn_memslots_update_rcuwait);
902}
903
904static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn,
905 struct mm_struct *mm,
906 unsigned long start,
907 unsigned long end)
908{
909 trace_kvm_age_hva(start, end);
910
911 return kvm_handle_hva_range(mn, start, end, KVM_MMU_NOTIFIER_NO_ARG,
912 kvm_age_gfn);
913}
914
915static int kvm_mmu_notifier_clear_young(struct mmu_notifier *mn,
916 struct mm_struct *mm,
917 unsigned long start,
918 unsigned long end)
919{
920 trace_kvm_age_hva(start, end);
921
922 /*
923 * Even though we do not flush TLB, this will still adversely
924 * affect performance on pre-Haswell Intel EPT, where there is
925 * no EPT Access Bit to clear so that we have to tear down EPT
926 * tables instead. If we find this unacceptable, we can always
927 * add a parameter to kvm_age_hva so that it effectively doesn't
928 * do anything on clear_young.
929 *
930 * Also note that currently we never issue secondary TLB flushes
931 * from clear_young, leaving this job up to the regular system
932 * cadence. If we find this inaccurate, we might come up with a
933 * more sophisticated heuristic later.
934 */
935 return kvm_handle_hva_range_no_flush(mn, start, end, kvm_age_gfn);
936}
937
938static int kvm_mmu_notifier_test_young(struct mmu_notifier *mn,
939 struct mm_struct *mm,
940 unsigned long address)
941{
942 trace_kvm_test_age_hva(address);
943
944 return kvm_handle_hva_range_no_flush(mn, address, address + 1,
945 kvm_test_age_gfn);
946}
947
948static void kvm_mmu_notifier_release(struct mmu_notifier *mn,
949 struct mm_struct *mm)
950{
951 struct kvm *kvm = mmu_notifier_to_kvm(mn);
952 int idx;
953
954 idx = srcu_read_lock(&kvm->srcu);
955 kvm_flush_shadow_all(kvm);
956 srcu_read_unlock(&kvm->srcu, idx);
957}
958
959static const struct mmu_notifier_ops kvm_mmu_notifier_ops = {
960 .invalidate_range_start = kvm_mmu_notifier_invalidate_range_start,
961 .invalidate_range_end = kvm_mmu_notifier_invalidate_range_end,
962 .clear_flush_young = kvm_mmu_notifier_clear_flush_young,
963 .clear_young = kvm_mmu_notifier_clear_young,
964 .test_young = kvm_mmu_notifier_test_young,
965 .change_pte = kvm_mmu_notifier_change_pte,
966 .release = kvm_mmu_notifier_release,
967};
968
969static int kvm_init_mmu_notifier(struct kvm *kvm)
970{
971 kvm->mmu_notifier.ops = &kvm_mmu_notifier_ops;
972 return mmu_notifier_register(&kvm->mmu_notifier, current->mm);
973}
974
975#else /* !CONFIG_KVM_GENERIC_MMU_NOTIFIER */
976
977static int kvm_init_mmu_notifier(struct kvm *kvm)
978{
979 return 0;
980}
981
982#endif /* CONFIG_KVM_GENERIC_MMU_NOTIFIER */
983
984#ifdef CONFIG_HAVE_KVM_PM_NOTIFIER
985static int kvm_pm_notifier_call(struct notifier_block *bl,
986 unsigned long state,
987 void *unused)
988{
989 struct kvm *kvm = container_of(bl, struct kvm, pm_notifier);
990
991 return kvm_arch_pm_notifier(kvm, state);
992}
993
994static void kvm_init_pm_notifier(struct kvm *kvm)
995{
996 kvm->pm_notifier.notifier_call = kvm_pm_notifier_call;
997 /* Suspend KVM before we suspend ftrace, RCU, etc. */
998 kvm->pm_notifier.priority = INT_MAX;
999 register_pm_notifier(&kvm->pm_notifier);
1000}
1001
1002static void kvm_destroy_pm_notifier(struct kvm *kvm)
1003{
1004 unregister_pm_notifier(&kvm->pm_notifier);
1005}
1006#else /* !CONFIG_HAVE_KVM_PM_NOTIFIER */
1007static void kvm_init_pm_notifier(struct kvm *kvm)
1008{
1009}
1010
1011static void kvm_destroy_pm_notifier(struct kvm *kvm)
1012{
1013}
1014#endif /* CONFIG_HAVE_KVM_PM_NOTIFIER */
1015
1016static void kvm_destroy_dirty_bitmap(struct kvm_memory_slot *memslot)
1017{
1018 if (!memslot->dirty_bitmap)
1019 return;
1020
1021 kvfree(memslot->dirty_bitmap);
1022 memslot->dirty_bitmap = NULL;
1023}
1024
1025/* This does not remove the slot from struct kvm_memslots data structures */
1026static void kvm_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot)
1027{
1028 if (slot->flags & KVM_MEM_GUEST_MEMFD)
1029 kvm_gmem_unbind(slot);
1030
1031 kvm_destroy_dirty_bitmap(slot);
1032
1033 kvm_arch_free_memslot(kvm, slot);
1034
1035 kfree(slot);
1036}
1037
1038static void kvm_free_memslots(struct kvm *kvm, struct kvm_memslots *slots)
1039{
1040 struct hlist_node *idnode;
1041 struct kvm_memory_slot *memslot;
1042 int bkt;
1043
1044 /*
1045 * The same memslot objects live in both active and inactive sets,
1046 * arbitrarily free using index '1' so the second invocation of this
1047 * function isn't operating over a structure with dangling pointers
1048 * (even though this function isn't actually touching them).
1049 */
1050 if (!slots->node_idx)
1051 return;
1052
1053 hash_for_each_safe(slots->id_hash, bkt, idnode, memslot, id_node[1])
1054 kvm_free_memslot(kvm, memslot);
1055}
1056
1057static umode_t kvm_stats_debugfs_mode(const struct _kvm_stats_desc *pdesc)
1058{
1059 switch (pdesc->desc.flags & KVM_STATS_TYPE_MASK) {
1060 case KVM_STATS_TYPE_INSTANT:
1061 return 0444;
1062 case KVM_STATS_TYPE_CUMULATIVE:
1063 case KVM_STATS_TYPE_PEAK:
1064 default:
1065 return 0644;
1066 }
1067}
1068
1069
1070static void kvm_destroy_vm_debugfs(struct kvm *kvm)
1071{
1072 int i;
1073 int kvm_debugfs_num_entries = kvm_vm_stats_header.num_desc +
1074 kvm_vcpu_stats_header.num_desc;
1075
1076 if (IS_ERR(kvm->debugfs_dentry))
1077 return;
1078
1079 debugfs_remove_recursive(kvm->debugfs_dentry);
1080
1081 if (kvm->debugfs_stat_data) {
1082 for (i = 0; i < kvm_debugfs_num_entries; i++)
1083 kfree(kvm->debugfs_stat_data[i]);
1084 kfree(kvm->debugfs_stat_data);
1085 }
1086}
1087
1088static int kvm_create_vm_debugfs(struct kvm *kvm, const char *fdname)
1089{
1090 static DEFINE_MUTEX(kvm_debugfs_lock);
1091 struct dentry *dent;
1092 char dir_name[ITOA_MAX_LEN * 2];
1093 struct kvm_stat_data *stat_data;
1094 const struct _kvm_stats_desc *pdesc;
1095 int i, ret = -ENOMEM;
1096 int kvm_debugfs_num_entries = kvm_vm_stats_header.num_desc +
1097 kvm_vcpu_stats_header.num_desc;
1098
1099 if (!debugfs_initialized())
1100 return 0;
1101
1102 snprintf(dir_name, sizeof(dir_name), "%d-%s", task_pid_nr(current), fdname);
1103 mutex_lock(&kvm_debugfs_lock);
1104 dent = debugfs_lookup(dir_name, kvm_debugfs_dir);
1105 if (dent) {
1106 pr_warn_ratelimited("KVM: debugfs: duplicate directory %s\n", dir_name);
1107 dput(dent);
1108 mutex_unlock(&kvm_debugfs_lock);
1109 return 0;
1110 }
1111 dent = debugfs_create_dir(dir_name, kvm_debugfs_dir);
1112 mutex_unlock(&kvm_debugfs_lock);
1113 if (IS_ERR(dent))
1114 return 0;
1115
1116 kvm->debugfs_dentry = dent;
1117 kvm->debugfs_stat_data = kcalloc(kvm_debugfs_num_entries,
1118 sizeof(*kvm->debugfs_stat_data),
1119 GFP_KERNEL_ACCOUNT);
1120 if (!kvm->debugfs_stat_data)
1121 goto out_err;
1122
1123 for (i = 0; i < kvm_vm_stats_header.num_desc; ++i) {
1124 pdesc = &kvm_vm_stats_desc[i];
1125 stat_data = kzalloc(sizeof(*stat_data), GFP_KERNEL_ACCOUNT);
1126 if (!stat_data)
1127 goto out_err;
1128
1129 stat_data->kvm = kvm;
1130 stat_data->desc = pdesc;
1131 stat_data->kind = KVM_STAT_VM;
1132 kvm->debugfs_stat_data[i] = stat_data;
1133 debugfs_create_file(pdesc->name, kvm_stats_debugfs_mode(pdesc),
1134 kvm->debugfs_dentry, stat_data,
1135 &stat_fops_per_vm);
1136 }
1137
1138 for (i = 0; i < kvm_vcpu_stats_header.num_desc; ++i) {
1139 pdesc = &kvm_vcpu_stats_desc[i];
1140 stat_data = kzalloc(sizeof(*stat_data), GFP_KERNEL_ACCOUNT);
1141 if (!stat_data)
1142 goto out_err;
1143
1144 stat_data->kvm = kvm;
1145 stat_data->desc = pdesc;
1146 stat_data->kind = KVM_STAT_VCPU;
1147 kvm->debugfs_stat_data[i + kvm_vm_stats_header.num_desc] = stat_data;
1148 debugfs_create_file(pdesc->name, kvm_stats_debugfs_mode(pdesc),
1149 kvm->debugfs_dentry, stat_data,
1150 &stat_fops_per_vm);
1151 }
1152
1153 ret = kvm_arch_create_vm_debugfs(kvm);
1154 if (ret)
1155 goto out_err;
1156
1157 return 0;
1158out_err:
1159 kvm_destroy_vm_debugfs(kvm);
1160 return ret;
1161}
1162
1163/*
1164 * Called after the VM is otherwise initialized, but just before adding it to
1165 * the vm_list.
1166 */
1167int __weak kvm_arch_post_init_vm(struct kvm *kvm)
1168{
1169 return 0;
1170}
1171
1172/*
1173 * Called just after removing the VM from the vm_list, but before doing any
1174 * other destruction.
1175 */
1176void __weak kvm_arch_pre_destroy_vm(struct kvm *kvm)
1177{
1178}
1179
1180/*
1181 * Called after per-vm debugfs created. When called kvm->debugfs_dentry should
1182 * be setup already, so we can create arch-specific debugfs entries under it.
1183 * Cleanup should be automatic done in kvm_destroy_vm_debugfs() recursively, so
1184 * a per-arch destroy interface is not needed.
1185 */
1186int __weak kvm_arch_create_vm_debugfs(struct kvm *kvm)
1187{
1188 return 0;
1189}
1190
1191static struct kvm *kvm_create_vm(unsigned long type, const char *fdname)
1192{
1193 struct kvm *kvm = kvm_arch_alloc_vm();
1194 struct kvm_memslots *slots;
1195 int r = -ENOMEM;
1196 int i, j;
1197
1198 if (!kvm)
1199 return ERR_PTR(-ENOMEM);
1200
1201 KVM_MMU_LOCK_INIT(kvm);
1202 mmgrab(current->mm);
1203 kvm->mm = current->mm;
1204 kvm_eventfd_init(kvm);
1205 mutex_init(&kvm->lock);
1206 mutex_init(&kvm->irq_lock);
1207 mutex_init(&kvm->slots_lock);
1208 mutex_init(&kvm->slots_arch_lock);
1209 spin_lock_init(&kvm->mn_invalidate_lock);
1210 rcuwait_init(&kvm->mn_memslots_update_rcuwait);
1211 xa_init(&kvm->vcpu_array);
1212#ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES
1213 xa_init(&kvm->mem_attr_array);
1214#endif
1215
1216 INIT_LIST_HEAD(&kvm->gpc_list);
1217 spin_lock_init(&kvm->gpc_lock);
1218
1219 INIT_LIST_HEAD(&kvm->devices);
1220 kvm->max_vcpus = KVM_MAX_VCPUS;
1221
1222 BUILD_BUG_ON(KVM_MEM_SLOTS_NUM > SHRT_MAX);
1223
1224 /*
1225 * Force subsequent debugfs file creations to fail if the VM directory
1226 * is not created (by kvm_create_vm_debugfs()).
1227 */
1228 kvm->debugfs_dentry = ERR_PTR(-ENOENT);
1229
1230 snprintf(kvm->stats_id, sizeof(kvm->stats_id), "kvm-%d",
1231 task_pid_nr(current));
1232
1233 if (init_srcu_struct(&kvm->srcu))
1234 goto out_err_no_srcu;
1235 if (init_srcu_struct(&kvm->irq_srcu))
1236 goto out_err_no_irq_srcu;
1237
1238 refcount_set(&kvm->users_count, 1);
1239 for (i = 0; i < kvm_arch_nr_memslot_as_ids(kvm); i++) {
1240 for (j = 0; j < 2; j++) {
1241 slots = &kvm->__memslots[i][j];
1242
1243 atomic_long_set(&slots->last_used_slot, (unsigned long)NULL);
1244 slots->hva_tree = RB_ROOT_CACHED;
1245 slots->gfn_tree = RB_ROOT;
1246 hash_init(slots->id_hash);
1247 slots->node_idx = j;
1248
1249 /* Generations must be different for each address space. */
1250 slots->generation = i;
1251 }
1252
1253 rcu_assign_pointer(kvm->memslots[i], &kvm->__memslots[i][0]);
1254 }
1255
1256 for (i = 0; i < KVM_NR_BUSES; i++) {
1257 rcu_assign_pointer(kvm->buses[i],
1258 kzalloc(sizeof(struct kvm_io_bus), GFP_KERNEL_ACCOUNT));
1259 if (!kvm->buses[i])
1260 goto out_err_no_arch_destroy_vm;
1261 }
1262
1263 r = kvm_arch_init_vm(kvm, type);
1264 if (r)
1265 goto out_err_no_arch_destroy_vm;
1266
1267 r = hardware_enable_all();
1268 if (r)
1269 goto out_err_no_disable;
1270
1271#ifdef CONFIG_HAVE_KVM_IRQCHIP
1272 INIT_HLIST_HEAD(&kvm->irq_ack_notifier_list);
1273#endif
1274
1275 r = kvm_init_mmu_notifier(kvm);
1276 if (r)
1277 goto out_err_no_mmu_notifier;
1278
1279 r = kvm_coalesced_mmio_init(kvm);
1280 if (r < 0)
1281 goto out_no_coalesced_mmio;
1282
1283 r = kvm_create_vm_debugfs(kvm, fdname);
1284 if (r)
1285 goto out_err_no_debugfs;
1286
1287 r = kvm_arch_post_init_vm(kvm);
1288 if (r)
1289 goto out_err;
1290
1291 mutex_lock(&kvm_lock);
1292 list_add(&kvm->vm_list, &vm_list);
1293 mutex_unlock(&kvm_lock);
1294
1295 preempt_notifier_inc();
1296 kvm_init_pm_notifier(kvm);
1297
1298 return kvm;
1299
1300out_err:
1301 kvm_destroy_vm_debugfs(kvm);
1302out_err_no_debugfs:
1303 kvm_coalesced_mmio_free(kvm);
1304out_no_coalesced_mmio:
1305#ifdef CONFIG_KVM_GENERIC_MMU_NOTIFIER
1306 if (kvm->mmu_notifier.ops)
1307 mmu_notifier_unregister(&kvm->mmu_notifier, current->mm);
1308#endif
1309out_err_no_mmu_notifier:
1310 hardware_disable_all();
1311out_err_no_disable:
1312 kvm_arch_destroy_vm(kvm);
1313out_err_no_arch_destroy_vm:
1314 WARN_ON_ONCE(!refcount_dec_and_test(&kvm->users_count));
1315 for (i = 0; i < KVM_NR_BUSES; i++)
1316 kfree(kvm_get_bus(kvm, i));
1317 cleanup_srcu_struct(&kvm->irq_srcu);
1318out_err_no_irq_srcu:
1319 cleanup_srcu_struct(&kvm->srcu);
1320out_err_no_srcu:
1321 kvm_arch_free_vm(kvm);
1322 mmdrop(current->mm);
1323 return ERR_PTR(r);
1324}
1325
1326static void kvm_destroy_devices(struct kvm *kvm)
1327{
1328 struct kvm_device *dev, *tmp;
1329
1330 /*
1331 * We do not need to take the kvm->lock here, because nobody else
1332 * has a reference to the struct kvm at this point and therefore
1333 * cannot access the devices list anyhow.
1334 */
1335 list_for_each_entry_safe(dev, tmp, &kvm->devices, vm_node) {
1336 list_del(&dev->vm_node);
1337 dev->ops->destroy(dev);
1338 }
1339}
1340
1341static void kvm_destroy_vm(struct kvm *kvm)
1342{
1343 int i;
1344 struct mm_struct *mm = kvm->mm;
1345
1346 kvm_destroy_pm_notifier(kvm);
1347 kvm_uevent_notify_change(KVM_EVENT_DESTROY_VM, kvm);
1348 kvm_destroy_vm_debugfs(kvm);
1349 kvm_arch_sync_events(kvm);
1350 mutex_lock(&kvm_lock);
1351 list_del(&kvm->vm_list);
1352 mutex_unlock(&kvm_lock);
1353 kvm_arch_pre_destroy_vm(kvm);
1354
1355 kvm_free_irq_routing(kvm);
1356 for (i = 0; i < KVM_NR_BUSES; i++) {
1357 struct kvm_io_bus *bus = kvm_get_bus(kvm, i);
1358
1359 if (bus)
1360 kvm_io_bus_destroy(bus);
1361 kvm->buses[i] = NULL;
1362 }
1363 kvm_coalesced_mmio_free(kvm);
1364#ifdef CONFIG_KVM_GENERIC_MMU_NOTIFIER
1365 mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm);
1366 /*
1367 * At this point, pending calls to invalidate_range_start()
1368 * have completed but no more MMU notifiers will run, so
1369 * mn_active_invalidate_count may remain unbalanced.
1370 * No threads can be waiting in kvm_swap_active_memslots() as the
1371 * last reference on KVM has been dropped, but freeing
1372 * memslots would deadlock without this manual intervention.
1373 *
1374 * If the count isn't unbalanced, i.e. KVM did NOT unregister its MMU
1375 * notifier between a start() and end(), then there shouldn't be any
1376 * in-progress invalidations.
1377 */
1378 WARN_ON(rcuwait_active(&kvm->mn_memslots_update_rcuwait));
1379 if (kvm->mn_active_invalidate_count)
1380 kvm->mn_active_invalidate_count = 0;
1381 else
1382 WARN_ON(kvm->mmu_invalidate_in_progress);
1383#else
1384 kvm_flush_shadow_all(kvm);
1385#endif
1386 kvm_arch_destroy_vm(kvm);
1387 kvm_destroy_devices(kvm);
1388 for (i = 0; i < kvm_arch_nr_memslot_as_ids(kvm); i++) {
1389 kvm_free_memslots(kvm, &kvm->__memslots[i][0]);
1390 kvm_free_memslots(kvm, &kvm->__memslots[i][1]);
1391 }
1392 cleanup_srcu_struct(&kvm->irq_srcu);
1393 cleanup_srcu_struct(&kvm->srcu);
1394#ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES
1395 xa_destroy(&kvm->mem_attr_array);
1396#endif
1397 kvm_arch_free_vm(kvm);
1398 preempt_notifier_dec();
1399 hardware_disable_all();
1400 mmdrop(mm);
1401}
1402
1403void kvm_get_kvm(struct kvm *kvm)
1404{
1405 refcount_inc(&kvm->users_count);
1406}
1407EXPORT_SYMBOL_GPL(kvm_get_kvm);
1408
1409/*
1410 * Make sure the vm is not during destruction, which is a safe version of
1411 * kvm_get_kvm(). Return true if kvm referenced successfully, false otherwise.
1412 */
1413bool kvm_get_kvm_safe(struct kvm *kvm)
1414{
1415 return refcount_inc_not_zero(&kvm->users_count);
1416}
1417EXPORT_SYMBOL_GPL(kvm_get_kvm_safe);
1418
1419void kvm_put_kvm(struct kvm *kvm)
1420{
1421 if (refcount_dec_and_test(&kvm->users_count))
1422 kvm_destroy_vm(kvm);
1423}
1424EXPORT_SYMBOL_GPL(kvm_put_kvm);
1425
1426/*
1427 * Used to put a reference that was taken on behalf of an object associated
1428 * with a user-visible file descriptor, e.g. a vcpu or device, if installation
1429 * of the new file descriptor fails and the reference cannot be transferred to
1430 * its final owner. In such cases, the caller is still actively using @kvm and
1431 * will fail miserably if the refcount unexpectedly hits zero.
1432 */
1433void kvm_put_kvm_no_destroy(struct kvm *kvm)
1434{
1435 WARN_ON(refcount_dec_and_test(&kvm->users_count));
1436}
1437EXPORT_SYMBOL_GPL(kvm_put_kvm_no_destroy);
1438
1439static int kvm_vm_release(struct inode *inode, struct file *filp)
1440{
1441 struct kvm *kvm = filp->private_data;
1442
1443 kvm_irqfd_release(kvm);
1444
1445 kvm_put_kvm(kvm);
1446 return 0;
1447}
1448
1449/*
1450 * Allocation size is twice as large as the actual dirty bitmap size.
1451 * See kvm_vm_ioctl_get_dirty_log() why this is needed.
1452 */
1453static int kvm_alloc_dirty_bitmap(struct kvm_memory_slot *memslot)
1454{
1455 unsigned long dirty_bytes = kvm_dirty_bitmap_bytes(memslot);
1456
1457 memslot->dirty_bitmap = __vcalloc(2, dirty_bytes, GFP_KERNEL_ACCOUNT);
1458 if (!memslot->dirty_bitmap)
1459 return -ENOMEM;
1460
1461 return 0;
1462}
1463
1464static struct kvm_memslots *kvm_get_inactive_memslots(struct kvm *kvm, int as_id)
1465{
1466 struct kvm_memslots *active = __kvm_memslots(kvm, as_id);
1467 int node_idx_inactive = active->node_idx ^ 1;
1468
1469 return &kvm->__memslots[as_id][node_idx_inactive];
1470}
1471
1472/*
1473 * Helper to get the address space ID when one of memslot pointers may be NULL.
1474 * This also serves as a sanity that at least one of the pointers is non-NULL,
1475 * and that their address space IDs don't diverge.
1476 */
1477static int kvm_memslots_get_as_id(struct kvm_memory_slot *a,
1478 struct kvm_memory_slot *b)
1479{
1480 if (WARN_ON_ONCE(!a && !b))
1481 return 0;
1482
1483 if (!a)
1484 return b->as_id;
1485 if (!b)
1486 return a->as_id;
1487
1488 WARN_ON_ONCE(a->as_id != b->as_id);
1489 return a->as_id;
1490}
1491
1492static void kvm_insert_gfn_node(struct kvm_memslots *slots,
1493 struct kvm_memory_slot *slot)
1494{
1495 struct rb_root *gfn_tree = &slots->gfn_tree;
1496 struct rb_node **node, *parent;
1497 int idx = slots->node_idx;
1498
1499 parent = NULL;
1500 for (node = &gfn_tree->rb_node; *node; ) {
1501 struct kvm_memory_slot *tmp;
1502
1503 tmp = container_of(*node, struct kvm_memory_slot, gfn_node[idx]);
1504 parent = *node;
1505 if (slot->base_gfn < tmp->base_gfn)
1506 node = &(*node)->rb_left;
1507 else if (slot->base_gfn > tmp->base_gfn)
1508 node = &(*node)->rb_right;
1509 else
1510 BUG();
1511 }
1512
1513 rb_link_node(&slot->gfn_node[idx], parent, node);
1514 rb_insert_color(&slot->gfn_node[idx], gfn_tree);
1515}
1516
1517static void kvm_erase_gfn_node(struct kvm_memslots *slots,
1518 struct kvm_memory_slot *slot)
1519{
1520 rb_erase(&slot->gfn_node[slots->node_idx], &slots->gfn_tree);
1521}
1522
1523static void kvm_replace_gfn_node(struct kvm_memslots *slots,
1524 struct kvm_memory_slot *old,
1525 struct kvm_memory_slot *new)
1526{
1527 int idx = slots->node_idx;
1528
1529 WARN_ON_ONCE(old->base_gfn != new->base_gfn);
1530
1531 rb_replace_node(&old->gfn_node[idx], &new->gfn_node[idx],
1532 &slots->gfn_tree);
1533}
1534
1535/*
1536 * Replace @old with @new in the inactive memslots.
1537 *
1538 * With NULL @old this simply adds @new.
1539 * With NULL @new this simply removes @old.
1540 *
1541 * If @new is non-NULL its hva_node[slots_idx] range has to be set
1542 * appropriately.
1543 */
1544static void kvm_replace_memslot(struct kvm *kvm,
1545 struct kvm_memory_slot *old,
1546 struct kvm_memory_slot *new)
1547{
1548 int as_id = kvm_memslots_get_as_id(old, new);
1549 struct kvm_memslots *slots = kvm_get_inactive_memslots(kvm, as_id);
1550 int idx = slots->node_idx;
1551
1552 if (old) {
1553 hash_del(&old->id_node[idx]);
1554 interval_tree_remove(&old->hva_node[idx], &slots->hva_tree);
1555
1556 if ((long)old == atomic_long_read(&slots->last_used_slot))
1557 atomic_long_set(&slots->last_used_slot, (long)new);
1558
1559 if (!new) {
1560 kvm_erase_gfn_node(slots, old);
1561 return;
1562 }
1563 }
1564
1565 /*
1566 * Initialize @new's hva range. Do this even when replacing an @old
1567 * slot, kvm_copy_memslot() deliberately does not touch node data.
1568 */
1569 new->hva_node[idx].start = new->userspace_addr;
1570 new->hva_node[idx].last = new->userspace_addr +
1571 (new->npages << PAGE_SHIFT) - 1;
1572
1573 /*
1574 * (Re)Add the new memslot. There is no O(1) interval_tree_replace(),
1575 * hva_node needs to be swapped with remove+insert even though hva can't
1576 * change when replacing an existing slot.
1577 */
1578 hash_add(slots->id_hash, &new->id_node[idx], new->id);
1579 interval_tree_insert(&new->hva_node[idx], &slots->hva_tree);
1580
1581 /*
1582 * If the memslot gfn is unchanged, rb_replace_node() can be used to
1583 * switch the node in the gfn tree instead of removing the old and
1584 * inserting the new as two separate operations. Replacement is a
1585 * single O(1) operation versus two O(log(n)) operations for
1586 * remove+insert.
1587 */
1588 if (old && old->base_gfn == new->base_gfn) {
1589 kvm_replace_gfn_node(slots, old, new);
1590 } else {
1591 if (old)
1592 kvm_erase_gfn_node(slots, old);
1593 kvm_insert_gfn_node(slots, new);
1594 }
1595}
1596
1597/*
1598 * Flags that do not access any of the extra space of struct
1599 * kvm_userspace_memory_region2. KVM_SET_USER_MEMORY_REGION_V1_FLAGS
1600 * only allows these.
1601 */
1602#define KVM_SET_USER_MEMORY_REGION_V1_FLAGS \
1603 (KVM_MEM_LOG_DIRTY_PAGES | KVM_MEM_READONLY)
1604
1605static int check_memory_region_flags(struct kvm *kvm,
1606 const struct kvm_userspace_memory_region2 *mem)
1607{
1608 u32 valid_flags = KVM_MEM_LOG_DIRTY_PAGES;
1609
1610 if (kvm_arch_has_private_mem(kvm))
1611 valid_flags |= KVM_MEM_GUEST_MEMFD;
1612
1613 /* Dirty logging private memory is not currently supported. */
1614 if (mem->flags & KVM_MEM_GUEST_MEMFD)
1615 valid_flags &= ~KVM_MEM_LOG_DIRTY_PAGES;
1616
1617#ifdef __KVM_HAVE_READONLY_MEM
1618 /*
1619 * GUEST_MEMFD is incompatible with read-only memslots, as writes to
1620 * read-only memslots have emulated MMIO, not page fault, semantics,
1621 * and KVM doesn't allow emulated MMIO for private memory.
1622 */
1623 if (!(mem->flags & KVM_MEM_GUEST_MEMFD))
1624 valid_flags |= KVM_MEM_READONLY;
1625#endif
1626
1627 if (mem->flags & ~valid_flags)
1628 return -EINVAL;
1629
1630 return 0;
1631}
1632
1633static void kvm_swap_active_memslots(struct kvm *kvm, int as_id)
1634{
1635 struct kvm_memslots *slots = kvm_get_inactive_memslots(kvm, as_id);
1636
1637 /* Grab the generation from the activate memslots. */
1638 u64 gen = __kvm_memslots(kvm, as_id)->generation;
1639
1640 WARN_ON(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS);
1641 slots->generation = gen | KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS;
1642
1643 /*
1644 * Do not store the new memslots while there are invalidations in
1645 * progress, otherwise the locking in invalidate_range_start and
1646 * invalidate_range_end will be unbalanced.
1647 */
1648 spin_lock(&kvm->mn_invalidate_lock);
1649 prepare_to_rcuwait(&kvm->mn_memslots_update_rcuwait);
1650 while (kvm->mn_active_invalidate_count) {
1651 set_current_state(TASK_UNINTERRUPTIBLE);
1652 spin_unlock(&kvm->mn_invalidate_lock);
1653 schedule();
1654 spin_lock(&kvm->mn_invalidate_lock);
1655 }
1656 finish_rcuwait(&kvm->mn_memslots_update_rcuwait);
1657 rcu_assign_pointer(kvm->memslots[as_id], slots);
1658 spin_unlock(&kvm->mn_invalidate_lock);
1659
1660 /*
1661 * Acquired in kvm_set_memslot. Must be released before synchronize
1662 * SRCU below in order to avoid deadlock with another thread
1663 * acquiring the slots_arch_lock in an srcu critical section.
1664 */
1665 mutex_unlock(&kvm->slots_arch_lock);
1666
1667 synchronize_srcu_expedited(&kvm->srcu);
1668
1669 /*
1670 * Increment the new memslot generation a second time, dropping the
1671 * update in-progress flag and incrementing the generation based on
1672 * the number of address spaces. This provides a unique and easily
1673 * identifiable generation number while the memslots are in flux.
1674 */
1675 gen = slots->generation & ~KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS;
1676
1677 /*
1678 * Generations must be unique even across address spaces. We do not need
1679 * a global counter for that, instead the generation space is evenly split
1680 * across address spaces. For example, with two address spaces, address
1681 * space 0 will use generations 0, 2, 4, ... while address space 1 will
1682 * use generations 1, 3, 5, ...
1683 */
1684 gen += kvm_arch_nr_memslot_as_ids(kvm);
1685
1686 kvm_arch_memslots_updated(kvm, gen);
1687
1688 slots->generation = gen;
1689}
1690
1691static int kvm_prepare_memory_region(struct kvm *kvm,
1692 const struct kvm_memory_slot *old,
1693 struct kvm_memory_slot *new,
1694 enum kvm_mr_change change)
1695{
1696 int r;
1697
1698 /*
1699 * If dirty logging is disabled, nullify the bitmap; the old bitmap
1700 * will be freed on "commit". If logging is enabled in both old and
1701 * new, reuse the existing bitmap. If logging is enabled only in the
1702 * new and KVM isn't using a ring buffer, allocate and initialize a
1703 * new bitmap.
1704 */
1705 if (change != KVM_MR_DELETE) {
1706 if (!(new->flags & KVM_MEM_LOG_DIRTY_PAGES))
1707 new->dirty_bitmap = NULL;
1708 else if (old && old->dirty_bitmap)
1709 new->dirty_bitmap = old->dirty_bitmap;
1710 else if (kvm_use_dirty_bitmap(kvm)) {
1711 r = kvm_alloc_dirty_bitmap(new);
1712 if (r)
1713 return r;
1714
1715 if (kvm_dirty_log_manual_protect_and_init_set(kvm))
1716 bitmap_set(new->dirty_bitmap, 0, new->npages);
1717 }
1718 }
1719
1720 r = kvm_arch_prepare_memory_region(kvm, old, new, change);
1721
1722 /* Free the bitmap on failure if it was allocated above. */
1723 if (r && new && new->dirty_bitmap && (!old || !old->dirty_bitmap))
1724 kvm_destroy_dirty_bitmap(new);
1725
1726 return r;
1727}
1728
1729static void kvm_commit_memory_region(struct kvm *kvm,
1730 struct kvm_memory_slot *old,
1731 const struct kvm_memory_slot *new,
1732 enum kvm_mr_change change)
1733{
1734 int old_flags = old ? old->flags : 0;
1735 int new_flags = new ? new->flags : 0;
1736 /*
1737 * Update the total number of memslot pages before calling the arch
1738 * hook so that architectures can consume the result directly.
1739 */
1740 if (change == KVM_MR_DELETE)
1741 kvm->nr_memslot_pages -= old->npages;
1742 else if (change == KVM_MR_CREATE)
1743 kvm->nr_memslot_pages += new->npages;
1744
1745 if ((old_flags ^ new_flags) & KVM_MEM_LOG_DIRTY_PAGES) {
1746 int change = (new_flags & KVM_MEM_LOG_DIRTY_PAGES) ? 1 : -1;
1747 atomic_set(&kvm->nr_memslots_dirty_logging,
1748 atomic_read(&kvm->nr_memslots_dirty_logging) + change);
1749 }
1750
1751 kvm_arch_commit_memory_region(kvm, old, new, change);
1752
1753 switch (change) {
1754 case KVM_MR_CREATE:
1755 /* Nothing more to do. */
1756 break;
1757 case KVM_MR_DELETE:
1758 /* Free the old memslot and all its metadata. */
1759 kvm_free_memslot(kvm, old);
1760 break;
1761 case KVM_MR_MOVE:
1762 case KVM_MR_FLAGS_ONLY:
1763 /*
1764 * Free the dirty bitmap as needed; the below check encompasses
1765 * both the flags and whether a ring buffer is being used)
1766 */
1767 if (old->dirty_bitmap && !new->dirty_bitmap)
1768 kvm_destroy_dirty_bitmap(old);
1769
1770 /*
1771 * The final quirk. Free the detached, old slot, but only its
1772 * memory, not any metadata. Metadata, including arch specific
1773 * data, may be reused by @new.
1774 */
1775 kfree(old);
1776 break;
1777 default:
1778 BUG();
1779 }
1780}
1781
1782/*
1783 * Activate @new, which must be installed in the inactive slots by the caller,
1784 * by swapping the active slots and then propagating @new to @old once @old is
1785 * unreachable and can be safely modified.
1786 *
1787 * With NULL @old this simply adds @new to @active (while swapping the sets).
1788 * With NULL @new this simply removes @old from @active and frees it
1789 * (while also swapping the sets).
1790 */
1791static void kvm_activate_memslot(struct kvm *kvm,
1792 struct kvm_memory_slot *old,
1793 struct kvm_memory_slot *new)
1794{
1795 int as_id = kvm_memslots_get_as_id(old, new);
1796
1797 kvm_swap_active_memslots(kvm, as_id);
1798
1799 /* Propagate the new memslot to the now inactive memslots. */
1800 kvm_replace_memslot(kvm, old, new);
1801}
1802
1803static void kvm_copy_memslot(struct kvm_memory_slot *dest,
1804 const struct kvm_memory_slot *src)
1805{
1806 dest->base_gfn = src->base_gfn;
1807 dest->npages = src->npages;
1808 dest->dirty_bitmap = src->dirty_bitmap;
1809 dest->arch = src->arch;
1810 dest->userspace_addr = src->userspace_addr;
1811 dest->flags = src->flags;
1812 dest->id = src->id;
1813 dest->as_id = src->as_id;
1814}
1815
1816static void kvm_invalidate_memslot(struct kvm *kvm,
1817 struct kvm_memory_slot *old,
1818 struct kvm_memory_slot *invalid_slot)
1819{
1820 /*
1821 * Mark the current slot INVALID. As with all memslot modifications,
1822 * this must be done on an unreachable slot to avoid modifying the
1823 * current slot in the active tree.
1824 */
1825 kvm_copy_memslot(invalid_slot, old);
1826 invalid_slot->flags |= KVM_MEMSLOT_INVALID;
1827 kvm_replace_memslot(kvm, old, invalid_slot);
1828
1829 /*
1830 * Activate the slot that is now marked INVALID, but don't propagate
1831 * the slot to the now inactive slots. The slot is either going to be
1832 * deleted or recreated as a new slot.
1833 */
1834 kvm_swap_active_memslots(kvm, old->as_id);
1835
1836 /*
1837 * From this point no new shadow pages pointing to a deleted, or moved,
1838 * memslot will be created. Validation of sp->gfn happens in:
1839 * - gfn_to_hva (kvm_read_guest, gfn_to_pfn)
1840 * - kvm_is_visible_gfn (mmu_check_root)
1841 */
1842 kvm_arch_flush_shadow_memslot(kvm, old);
1843 kvm_arch_guest_memory_reclaimed(kvm);
1844
1845 /* Was released by kvm_swap_active_memslots(), reacquire. */
1846 mutex_lock(&kvm->slots_arch_lock);
1847
1848 /*
1849 * Copy the arch-specific field of the newly-installed slot back to the
1850 * old slot as the arch data could have changed between releasing
1851 * slots_arch_lock in kvm_swap_active_memslots() and re-acquiring the lock
1852 * above. Writers are required to retrieve memslots *after* acquiring
1853 * slots_arch_lock, thus the active slot's data is guaranteed to be fresh.
1854 */
1855 old->arch = invalid_slot->arch;
1856}
1857
1858static void kvm_create_memslot(struct kvm *kvm,
1859 struct kvm_memory_slot *new)
1860{
1861 /* Add the new memslot to the inactive set and activate. */
1862 kvm_replace_memslot(kvm, NULL, new);
1863 kvm_activate_memslot(kvm, NULL, new);
1864}
1865
1866static void kvm_delete_memslot(struct kvm *kvm,
1867 struct kvm_memory_slot *old,
1868 struct kvm_memory_slot *invalid_slot)
1869{
1870 /*
1871 * Remove the old memslot (in the inactive memslots) by passing NULL as
1872 * the "new" slot, and for the invalid version in the active slots.
1873 */
1874 kvm_replace_memslot(kvm, old, NULL);
1875 kvm_activate_memslot(kvm, invalid_slot, NULL);
1876}
1877
1878static void kvm_move_memslot(struct kvm *kvm,
1879 struct kvm_memory_slot *old,
1880 struct kvm_memory_slot *new,
1881 struct kvm_memory_slot *invalid_slot)
1882{
1883 /*
1884 * Replace the old memslot in the inactive slots, and then swap slots
1885 * and replace the current INVALID with the new as well.
1886 */
1887 kvm_replace_memslot(kvm, old, new);
1888 kvm_activate_memslot(kvm, invalid_slot, new);
1889}
1890
1891static void kvm_update_flags_memslot(struct kvm *kvm,
1892 struct kvm_memory_slot *old,
1893 struct kvm_memory_slot *new)
1894{
1895 /*
1896 * Similar to the MOVE case, but the slot doesn't need to be zapped as
1897 * an intermediate step. Instead, the old memslot is simply replaced
1898 * with a new, updated copy in both memslot sets.
1899 */
1900 kvm_replace_memslot(kvm, old, new);
1901 kvm_activate_memslot(kvm, old, new);
1902}
1903
1904static int kvm_set_memslot(struct kvm *kvm,
1905 struct kvm_memory_slot *old,
1906 struct kvm_memory_slot *new,
1907 enum kvm_mr_change change)
1908{
1909 struct kvm_memory_slot *invalid_slot;
1910 int r;
1911
1912 /*
1913 * Released in kvm_swap_active_memslots().
1914 *
1915 * Must be held from before the current memslots are copied until after
1916 * the new memslots are installed with rcu_assign_pointer, then
1917 * released before the synchronize srcu in kvm_swap_active_memslots().
1918 *
1919 * When modifying memslots outside of the slots_lock, must be held
1920 * before reading the pointer to the current memslots until after all
1921 * changes to those memslots are complete.
1922 *
1923 * These rules ensure that installing new memslots does not lose
1924 * changes made to the previous memslots.
1925 */
1926 mutex_lock(&kvm->slots_arch_lock);
1927
1928 /*
1929 * Invalidate the old slot if it's being deleted or moved. This is
1930 * done prior to actually deleting/moving the memslot to allow vCPUs to
1931 * continue running by ensuring there are no mappings or shadow pages
1932 * for the memslot when it is deleted/moved. Without pre-invalidation
1933 * (and without a lock), a window would exist between effecting the
1934 * delete/move and committing the changes in arch code where KVM or a
1935 * guest could access a non-existent memslot.
1936 *
1937 * Modifications are done on a temporary, unreachable slot. The old
1938 * slot needs to be preserved in case a later step fails and the
1939 * invalidation needs to be reverted.
1940 */
1941 if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) {
1942 invalid_slot = kzalloc(sizeof(*invalid_slot), GFP_KERNEL_ACCOUNT);
1943 if (!invalid_slot) {
1944 mutex_unlock(&kvm->slots_arch_lock);
1945 return -ENOMEM;
1946 }
1947 kvm_invalidate_memslot(kvm, old, invalid_slot);
1948 }
1949
1950 r = kvm_prepare_memory_region(kvm, old, new, change);
1951 if (r) {
1952 /*
1953 * For DELETE/MOVE, revert the above INVALID change. No
1954 * modifications required since the original slot was preserved
1955 * in the inactive slots. Changing the active memslots also
1956 * release slots_arch_lock.
1957 */
1958 if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) {
1959 kvm_activate_memslot(kvm, invalid_slot, old);
1960 kfree(invalid_slot);
1961 } else {
1962 mutex_unlock(&kvm->slots_arch_lock);
1963 }
1964 return r;
1965 }
1966
1967 /*
1968 * For DELETE and MOVE, the working slot is now active as the INVALID
1969 * version of the old slot. MOVE is particularly special as it reuses
1970 * the old slot and returns a copy of the old slot (in working_slot).
1971 * For CREATE, there is no old slot. For DELETE and FLAGS_ONLY, the
1972 * old slot is detached but otherwise preserved.
1973 */
1974 if (change == KVM_MR_CREATE)
1975 kvm_create_memslot(kvm, new);
1976 else if (change == KVM_MR_DELETE)
1977 kvm_delete_memslot(kvm, old, invalid_slot);
1978 else if (change == KVM_MR_MOVE)
1979 kvm_move_memslot(kvm, old, new, invalid_slot);
1980 else if (change == KVM_MR_FLAGS_ONLY)
1981 kvm_update_flags_memslot(kvm, old, new);
1982 else
1983 BUG();
1984
1985 /* Free the temporary INVALID slot used for DELETE and MOVE. */
1986 if (change == KVM_MR_DELETE || change == KVM_MR_MOVE)
1987 kfree(invalid_slot);
1988
1989 /*
1990 * No need to refresh new->arch, changes after dropping slots_arch_lock
1991 * will directly hit the final, active memslot. Architectures are
1992 * responsible for knowing that new->arch may be stale.
1993 */
1994 kvm_commit_memory_region(kvm, old, new, change);
1995
1996 return 0;
1997}
1998
1999static bool kvm_check_memslot_overlap(struct kvm_memslots *slots, int id,
2000 gfn_t start, gfn_t end)
2001{
2002 struct kvm_memslot_iter iter;
2003
2004 kvm_for_each_memslot_in_gfn_range(&iter, slots, start, end) {
2005 if (iter.slot->id != id)
2006 return true;
2007 }
2008
2009 return false;
2010}
2011
2012/*
2013 * Allocate some memory and give it an address in the guest physical address
2014 * space.
2015 *
2016 * Discontiguous memory is allowed, mostly for framebuffers.
2017 *
2018 * Must be called holding kvm->slots_lock for write.
2019 */
2020int __kvm_set_memory_region(struct kvm *kvm,
2021 const struct kvm_userspace_memory_region2 *mem)
2022{
2023 struct kvm_memory_slot *old, *new;
2024 struct kvm_memslots *slots;
2025 enum kvm_mr_change change;
2026 unsigned long npages;
2027 gfn_t base_gfn;
2028 int as_id, id;
2029 int r;
2030
2031 r = check_memory_region_flags(kvm, mem);
2032 if (r)
2033 return r;
2034
2035 as_id = mem->slot >> 16;
2036 id = (u16)mem->slot;
2037
2038 /* General sanity checks */
2039 if ((mem->memory_size & (PAGE_SIZE - 1)) ||
2040 (mem->memory_size != (unsigned long)mem->memory_size))
2041 return -EINVAL;
2042 if (mem->guest_phys_addr & (PAGE_SIZE - 1))
2043 return -EINVAL;
2044 /* We can read the guest memory with __xxx_user() later on. */
2045 if ((mem->userspace_addr & (PAGE_SIZE - 1)) ||
2046 (mem->userspace_addr != untagged_addr(mem->userspace_addr)) ||
2047 !access_ok((void __user *)(unsigned long)mem->userspace_addr,
2048 mem->memory_size))
2049 return -EINVAL;
2050 if (mem->flags & KVM_MEM_GUEST_MEMFD &&
2051 (mem->guest_memfd_offset & (PAGE_SIZE - 1) ||
2052 mem->guest_memfd_offset + mem->memory_size < mem->guest_memfd_offset))
2053 return -EINVAL;
2054 if (as_id >= kvm_arch_nr_memslot_as_ids(kvm) || id >= KVM_MEM_SLOTS_NUM)
2055 return -EINVAL;
2056 if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
2057 return -EINVAL;
2058 if ((mem->memory_size >> PAGE_SHIFT) > KVM_MEM_MAX_NR_PAGES)
2059 return -EINVAL;
2060
2061 slots = __kvm_memslots(kvm, as_id);
2062
2063 /*
2064 * Note, the old memslot (and the pointer itself!) may be invalidated
2065 * and/or destroyed by kvm_set_memslot().
2066 */
2067 old = id_to_memslot(slots, id);
2068
2069 if (!mem->memory_size) {
2070 if (!old || !old->npages)
2071 return -EINVAL;
2072
2073 if (WARN_ON_ONCE(kvm->nr_memslot_pages < old->npages))
2074 return -EIO;
2075
2076 return kvm_set_memslot(kvm, old, NULL, KVM_MR_DELETE);
2077 }
2078
2079 base_gfn = (mem->guest_phys_addr >> PAGE_SHIFT);
2080 npages = (mem->memory_size >> PAGE_SHIFT);
2081
2082 if (!old || !old->npages) {
2083 change = KVM_MR_CREATE;
2084
2085 /*
2086 * To simplify KVM internals, the total number of pages across
2087 * all memslots must fit in an unsigned long.
2088 */
2089 if ((kvm->nr_memslot_pages + npages) < kvm->nr_memslot_pages)
2090 return -EINVAL;
2091 } else { /* Modify an existing slot. */
2092 /* Private memslots are immutable, they can only be deleted. */
2093 if (mem->flags & KVM_MEM_GUEST_MEMFD)
2094 return -EINVAL;
2095 if ((mem->userspace_addr != old->userspace_addr) ||
2096 (npages != old->npages) ||
2097 ((mem->flags ^ old->flags) & KVM_MEM_READONLY))
2098 return -EINVAL;
2099
2100 if (base_gfn != old->base_gfn)
2101 change = KVM_MR_MOVE;
2102 else if (mem->flags != old->flags)
2103 change = KVM_MR_FLAGS_ONLY;
2104 else /* Nothing to change. */
2105 return 0;
2106 }
2107
2108 if ((change == KVM_MR_CREATE || change == KVM_MR_MOVE) &&
2109 kvm_check_memslot_overlap(slots, id, base_gfn, base_gfn + npages))
2110 return -EEXIST;
2111
2112 /* Allocate a slot that will persist in the memslot. */
2113 new = kzalloc(sizeof(*new), GFP_KERNEL_ACCOUNT);
2114 if (!new)
2115 return -ENOMEM;
2116
2117 new->as_id = as_id;
2118 new->id = id;
2119 new->base_gfn = base_gfn;
2120 new->npages = npages;
2121 new->flags = mem->flags;
2122 new->userspace_addr = mem->userspace_addr;
2123 if (mem->flags & KVM_MEM_GUEST_MEMFD) {
2124 r = kvm_gmem_bind(kvm, new, mem->guest_memfd, mem->guest_memfd_offset);
2125 if (r)
2126 goto out;
2127 }
2128
2129 r = kvm_set_memslot(kvm, old, new, change);
2130 if (r)
2131 goto out_unbind;
2132
2133 return 0;
2134
2135out_unbind:
2136 if (mem->flags & KVM_MEM_GUEST_MEMFD)
2137 kvm_gmem_unbind(new);
2138out:
2139 kfree(new);
2140 return r;
2141}
2142EXPORT_SYMBOL_GPL(__kvm_set_memory_region);
2143
2144int kvm_set_memory_region(struct kvm *kvm,
2145 const struct kvm_userspace_memory_region2 *mem)
2146{
2147 int r;
2148
2149 mutex_lock(&kvm->slots_lock);
2150 r = __kvm_set_memory_region(kvm, mem);
2151 mutex_unlock(&kvm->slots_lock);
2152 return r;
2153}
2154EXPORT_SYMBOL_GPL(kvm_set_memory_region);
2155
2156static int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
2157 struct kvm_userspace_memory_region2 *mem)
2158{
2159 if ((u16)mem->slot >= KVM_USER_MEM_SLOTS)
2160 return -EINVAL;
2161
2162 return kvm_set_memory_region(kvm, mem);
2163}
2164
2165#ifndef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
2166/**
2167 * kvm_get_dirty_log - get a snapshot of dirty pages
2168 * @kvm: pointer to kvm instance
2169 * @log: slot id and address to which we copy the log
2170 * @is_dirty: set to '1' if any dirty pages were found
2171 * @memslot: set to the associated memslot, always valid on success
2172 */
2173int kvm_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log,
2174 int *is_dirty, struct kvm_memory_slot **memslot)
2175{
2176 struct kvm_memslots *slots;
2177 int i, as_id, id;
2178 unsigned long n;
2179 unsigned long any = 0;
2180
2181 /* Dirty ring tracking may be exclusive to dirty log tracking */
2182 if (!kvm_use_dirty_bitmap(kvm))
2183 return -ENXIO;
2184
2185 *memslot = NULL;
2186 *is_dirty = 0;
2187
2188 as_id = log->slot >> 16;
2189 id = (u16)log->slot;
2190 if (as_id >= kvm_arch_nr_memslot_as_ids(kvm) || id >= KVM_USER_MEM_SLOTS)
2191 return -EINVAL;
2192
2193 slots = __kvm_memslots(kvm, as_id);
2194 *memslot = id_to_memslot(slots, id);
2195 if (!(*memslot) || !(*memslot)->dirty_bitmap)
2196 return -ENOENT;
2197
2198 kvm_arch_sync_dirty_log(kvm, *memslot);
2199
2200 n = kvm_dirty_bitmap_bytes(*memslot);
2201
2202 for (i = 0; !any && i < n/sizeof(long); ++i)
2203 any = (*memslot)->dirty_bitmap[i];
2204
2205 if (copy_to_user(log->dirty_bitmap, (*memslot)->dirty_bitmap, n))
2206 return -EFAULT;
2207
2208 if (any)
2209 *is_dirty = 1;
2210 return 0;
2211}
2212EXPORT_SYMBOL_GPL(kvm_get_dirty_log);
2213
2214#else /* CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT */
2215/**
2216 * kvm_get_dirty_log_protect - get a snapshot of dirty pages
2217 * and reenable dirty page tracking for the corresponding pages.
2218 * @kvm: pointer to kvm instance
2219 * @log: slot id and address to which we copy the log
2220 *
2221 * We need to keep it in mind that VCPU threads can write to the bitmap
2222 * concurrently. So, to avoid losing track of dirty pages we keep the
2223 * following order:
2224 *
2225 * 1. Take a snapshot of the bit and clear it if needed.
2226 * 2. Write protect the corresponding page.
2227 * 3. Copy the snapshot to the userspace.
2228 * 4. Upon return caller flushes TLB's if needed.
2229 *
2230 * Between 2 and 4, the guest may write to the page using the remaining TLB
2231 * entry. This is not a problem because the page is reported dirty using
2232 * the snapshot taken before and step 4 ensures that writes done after
2233 * exiting to userspace will be logged for the next call.
2234 *
2235 */
2236static int kvm_get_dirty_log_protect(struct kvm *kvm, struct kvm_dirty_log *log)
2237{
2238 struct kvm_memslots *slots;
2239 struct kvm_memory_slot *memslot;
2240 int i, as_id, id;
2241 unsigned long n;
2242 unsigned long *dirty_bitmap;
2243 unsigned long *dirty_bitmap_buffer;
2244 bool flush;
2245
2246 /* Dirty ring tracking may be exclusive to dirty log tracking */
2247 if (!kvm_use_dirty_bitmap(kvm))
2248 return -ENXIO;
2249
2250 as_id = log->slot >> 16;
2251 id = (u16)log->slot;
2252 if (as_id >= kvm_arch_nr_memslot_as_ids(kvm) || id >= KVM_USER_MEM_SLOTS)
2253 return -EINVAL;
2254
2255 slots = __kvm_memslots(kvm, as_id);
2256 memslot = id_to_memslot(slots, id);
2257 if (!memslot || !memslot->dirty_bitmap)
2258 return -ENOENT;
2259
2260 dirty_bitmap = memslot->dirty_bitmap;
2261
2262 kvm_arch_sync_dirty_log(kvm, memslot);
2263
2264 n = kvm_dirty_bitmap_bytes(memslot);
2265 flush = false;
2266 if (kvm->manual_dirty_log_protect) {
2267 /*
2268 * Unlike kvm_get_dirty_log, we always return false in *flush,
2269 * because no flush is needed until KVM_CLEAR_DIRTY_LOG. There
2270 * is some code duplication between this function and
2271 * kvm_get_dirty_log, but hopefully all architecture
2272 * transition to kvm_get_dirty_log_protect and kvm_get_dirty_log
2273 * can be eliminated.
2274 */
2275 dirty_bitmap_buffer = dirty_bitmap;
2276 } else {
2277 dirty_bitmap_buffer = kvm_second_dirty_bitmap(memslot);
2278 memset(dirty_bitmap_buffer, 0, n);
2279
2280 KVM_MMU_LOCK(kvm);
2281 for (i = 0; i < n / sizeof(long); i++) {
2282 unsigned long mask;
2283 gfn_t offset;
2284
2285 if (!dirty_bitmap[i])
2286 continue;
2287
2288 flush = true;
2289 mask = xchg(&dirty_bitmap[i], 0);
2290 dirty_bitmap_buffer[i] = mask;
2291
2292 offset = i * BITS_PER_LONG;
2293 kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot,
2294 offset, mask);
2295 }
2296 KVM_MMU_UNLOCK(kvm);
2297 }
2298
2299 if (flush)
2300 kvm_flush_remote_tlbs_memslot(kvm, memslot);
2301
2302 if (copy_to_user(log->dirty_bitmap, dirty_bitmap_buffer, n))
2303 return -EFAULT;
2304 return 0;
2305}
2306
2307
2308/**
2309 * kvm_vm_ioctl_get_dirty_log - get and clear the log of dirty pages in a slot
2310 * @kvm: kvm instance
2311 * @log: slot id and address to which we copy the log
2312 *
2313 * Steps 1-4 below provide general overview of dirty page logging. See
2314 * kvm_get_dirty_log_protect() function description for additional details.
2315 *
2316 * We call kvm_get_dirty_log_protect() to handle steps 1-3, upon return we
2317 * always flush the TLB (step 4) even if previous step failed and the dirty
2318 * bitmap may be corrupt. Regardless of previous outcome the KVM logging API
2319 * does not preclude user space subsequent dirty log read. Flushing TLB ensures
2320 * writes will be marked dirty for next log read.
2321 *
2322 * 1. Take a snapshot of the bit and clear it if needed.
2323 * 2. Write protect the corresponding page.
2324 * 3. Copy the snapshot to the userspace.
2325 * 4. Flush TLB's if needed.
2326 */
2327static int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
2328 struct kvm_dirty_log *log)
2329{
2330 int r;
2331
2332 mutex_lock(&kvm->slots_lock);
2333
2334 r = kvm_get_dirty_log_protect(kvm, log);
2335
2336 mutex_unlock(&kvm->slots_lock);
2337 return r;
2338}
2339
2340/**
2341 * kvm_clear_dirty_log_protect - clear dirty bits in the bitmap
2342 * and reenable dirty page tracking for the corresponding pages.
2343 * @kvm: pointer to kvm instance
2344 * @log: slot id and address from which to fetch the bitmap of dirty pages
2345 */
2346static int kvm_clear_dirty_log_protect(struct kvm *kvm,
2347 struct kvm_clear_dirty_log *log)
2348{
2349 struct kvm_memslots *slots;
2350 struct kvm_memory_slot *memslot;
2351 int as_id, id;
2352 gfn_t offset;
2353 unsigned long i, n;
2354 unsigned long *dirty_bitmap;
2355 unsigned long *dirty_bitmap_buffer;
2356 bool flush;
2357
2358 /* Dirty ring tracking may be exclusive to dirty log tracking */
2359 if (!kvm_use_dirty_bitmap(kvm))
2360 return -ENXIO;
2361
2362 as_id = log->slot >> 16;
2363 id = (u16)log->slot;
2364 if (as_id >= kvm_arch_nr_memslot_as_ids(kvm) || id >= KVM_USER_MEM_SLOTS)
2365 return -EINVAL;
2366
2367 if (log->first_page & 63)
2368 return -EINVAL;
2369
2370 slots = __kvm_memslots(kvm, as_id);
2371 memslot = id_to_memslot(slots, id);
2372 if (!memslot || !memslot->dirty_bitmap)
2373 return -ENOENT;
2374
2375 dirty_bitmap = memslot->dirty_bitmap;
2376
2377 n = ALIGN(log->num_pages, BITS_PER_LONG) / 8;
2378
2379 if (log->first_page > memslot->npages ||
2380 log->num_pages > memslot->npages - log->first_page ||
2381 (log->num_pages < memslot->npages - log->first_page && (log->num_pages & 63)))
2382 return -EINVAL;
2383
2384 kvm_arch_sync_dirty_log(kvm, memslot);
2385
2386 flush = false;
2387 dirty_bitmap_buffer = kvm_second_dirty_bitmap(memslot);
2388 if (copy_from_user(dirty_bitmap_buffer, log->dirty_bitmap, n))
2389 return -EFAULT;
2390
2391 KVM_MMU_LOCK(kvm);
2392 for (offset = log->first_page, i = offset / BITS_PER_LONG,
2393 n = DIV_ROUND_UP(log->num_pages, BITS_PER_LONG); n--;
2394 i++, offset += BITS_PER_LONG) {
2395 unsigned long mask = *dirty_bitmap_buffer++;
2396 atomic_long_t *p = (atomic_long_t *) &dirty_bitmap[i];
2397 if (!mask)
2398 continue;
2399
2400 mask &= atomic_long_fetch_andnot(mask, p);
2401
2402 /*
2403 * mask contains the bits that really have been cleared. This
2404 * never includes any bits beyond the length of the memslot (if
2405 * the length is not aligned to 64 pages), therefore it is not
2406 * a problem if userspace sets them in log->dirty_bitmap.
2407 */
2408 if (mask) {
2409 flush = true;
2410 kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot,
2411 offset, mask);
2412 }
2413 }
2414 KVM_MMU_UNLOCK(kvm);
2415
2416 if (flush)
2417 kvm_flush_remote_tlbs_memslot(kvm, memslot);
2418
2419 return 0;
2420}
2421
2422static int kvm_vm_ioctl_clear_dirty_log(struct kvm *kvm,
2423 struct kvm_clear_dirty_log *log)
2424{
2425 int r;
2426
2427 mutex_lock(&kvm->slots_lock);
2428
2429 r = kvm_clear_dirty_log_protect(kvm, log);
2430
2431 mutex_unlock(&kvm->slots_lock);
2432 return r;
2433}
2434#endif /* CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT */
2435
2436#ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES
2437/*
2438 * Returns true if _all_ gfns in the range [@start, @end) have attributes
2439 * matching @attrs.
2440 */
2441bool kvm_range_has_memory_attributes(struct kvm *kvm, gfn_t start, gfn_t end,
2442 unsigned long attrs)
2443{
2444 XA_STATE(xas, &kvm->mem_attr_array, start);
2445 unsigned long index;
2446 bool has_attrs;
2447 void *entry;
2448
2449 rcu_read_lock();
2450
2451 if (!attrs) {
2452 has_attrs = !xas_find(&xas, end - 1);
2453 goto out;
2454 }
2455
2456 has_attrs = true;
2457 for (index = start; index < end; index++) {
2458 do {
2459 entry = xas_next(&xas);
2460 } while (xas_retry(&xas, entry));
2461
2462 if (xas.xa_index != index || xa_to_value(entry) != attrs) {
2463 has_attrs = false;
2464 break;
2465 }
2466 }
2467
2468out:
2469 rcu_read_unlock();
2470 return has_attrs;
2471}
2472
2473static u64 kvm_supported_mem_attributes(struct kvm *kvm)
2474{
2475 if (!kvm || kvm_arch_has_private_mem(kvm))
2476 return KVM_MEMORY_ATTRIBUTE_PRIVATE;
2477
2478 return 0;
2479}
2480
2481static __always_inline void kvm_handle_gfn_range(struct kvm *kvm,
2482 struct kvm_mmu_notifier_range *range)
2483{
2484 struct kvm_gfn_range gfn_range;
2485 struct kvm_memory_slot *slot;
2486 struct kvm_memslots *slots;
2487 struct kvm_memslot_iter iter;
2488 bool found_memslot = false;
2489 bool ret = false;
2490 int i;
2491
2492 gfn_range.arg = range->arg;
2493 gfn_range.may_block = range->may_block;
2494
2495 for (i = 0; i < kvm_arch_nr_memslot_as_ids(kvm); i++) {
2496 slots = __kvm_memslots(kvm, i);
2497
2498 kvm_for_each_memslot_in_gfn_range(&iter, slots, range->start, range->end) {
2499 slot = iter.slot;
2500 gfn_range.slot = slot;
2501
2502 gfn_range.start = max(range->start, slot->base_gfn);
2503 gfn_range.end = min(range->end, slot->base_gfn + slot->npages);
2504 if (gfn_range.start >= gfn_range.end)
2505 continue;
2506
2507 if (!found_memslot) {
2508 found_memslot = true;
2509 KVM_MMU_LOCK(kvm);
2510 if (!IS_KVM_NULL_FN(range->on_lock))
2511 range->on_lock(kvm);
2512 }
2513
2514 ret |= range->handler(kvm, &gfn_range);
2515 }
2516 }
2517
2518 if (range->flush_on_ret && ret)
2519 kvm_flush_remote_tlbs(kvm);
2520
2521 if (found_memslot)
2522 KVM_MMU_UNLOCK(kvm);
2523}
2524
2525static bool kvm_pre_set_memory_attributes(struct kvm *kvm,
2526 struct kvm_gfn_range *range)
2527{
2528 /*
2529 * Unconditionally add the range to the invalidation set, regardless of
2530 * whether or not the arch callback actually needs to zap SPTEs. E.g.
2531 * if KVM supports RWX attributes in the future and the attributes are
2532 * going from R=>RW, zapping isn't strictly necessary. Unconditionally
2533 * adding the range allows KVM to require that MMU invalidations add at
2534 * least one range between begin() and end(), e.g. allows KVM to detect
2535 * bugs where the add() is missed. Relaxing the rule *might* be safe,
2536 * but it's not obvious that allowing new mappings while the attributes
2537 * are in flux is desirable or worth the complexity.
2538 */
2539 kvm_mmu_invalidate_range_add(kvm, range->start, range->end);
2540
2541 return kvm_arch_pre_set_memory_attributes(kvm, range);
2542}
2543
2544/* Set @attributes for the gfn range [@start, @end). */
2545static int kvm_vm_set_mem_attributes(struct kvm *kvm, gfn_t start, gfn_t end,
2546 unsigned long attributes)
2547{
2548 struct kvm_mmu_notifier_range pre_set_range = {
2549 .start = start,
2550 .end = end,
2551 .handler = kvm_pre_set_memory_attributes,
2552 .on_lock = kvm_mmu_invalidate_begin,
2553 .flush_on_ret = true,
2554 .may_block = true,
2555 };
2556 struct kvm_mmu_notifier_range post_set_range = {
2557 .start = start,
2558 .end = end,
2559 .arg.attributes = attributes,
2560 .handler = kvm_arch_post_set_memory_attributes,
2561 .on_lock = kvm_mmu_invalidate_end,
2562 .may_block = true,
2563 };
2564 unsigned long i;
2565 void *entry;
2566 int r = 0;
2567
2568 entry = attributes ? xa_mk_value(attributes) : NULL;
2569
2570 mutex_lock(&kvm->slots_lock);
2571
2572 /* Nothing to do if the entire range as the desired attributes. */
2573 if (kvm_range_has_memory_attributes(kvm, start, end, attributes))
2574 goto out_unlock;
2575
2576 /*
2577 * Reserve memory ahead of time to avoid having to deal with failures
2578 * partway through setting the new attributes.
2579 */
2580 for (i = start; i < end; i++) {
2581 r = xa_reserve(&kvm->mem_attr_array, i, GFP_KERNEL_ACCOUNT);
2582 if (r)
2583 goto out_unlock;
2584 }
2585
2586 kvm_handle_gfn_range(kvm, &pre_set_range);
2587
2588 for (i = start; i < end; i++) {
2589 r = xa_err(xa_store(&kvm->mem_attr_array, i, entry,
2590 GFP_KERNEL_ACCOUNT));
2591 KVM_BUG_ON(r, kvm);
2592 }
2593
2594 kvm_handle_gfn_range(kvm, &post_set_range);
2595
2596out_unlock:
2597 mutex_unlock(&kvm->slots_lock);
2598
2599 return r;
2600}
2601static int kvm_vm_ioctl_set_mem_attributes(struct kvm *kvm,
2602 struct kvm_memory_attributes *attrs)
2603{
2604 gfn_t start, end;
2605
2606 /* flags is currently not used. */
2607 if (attrs->flags)
2608 return -EINVAL;
2609 if (attrs->attributes & ~kvm_supported_mem_attributes(kvm))
2610 return -EINVAL;
2611 if (attrs->size == 0 || attrs->address + attrs->size < attrs->address)
2612 return -EINVAL;
2613 if (!PAGE_ALIGNED(attrs->address) || !PAGE_ALIGNED(attrs->size))
2614 return -EINVAL;
2615
2616 start = attrs->address >> PAGE_SHIFT;
2617 end = (attrs->address + attrs->size) >> PAGE_SHIFT;
2618
2619 /*
2620 * xarray tracks data using "unsigned long", and as a result so does
2621 * KVM. For simplicity, supports generic attributes only on 64-bit
2622 * architectures.
2623 */
2624 BUILD_BUG_ON(sizeof(attrs->attributes) != sizeof(unsigned long));
2625
2626 return kvm_vm_set_mem_attributes(kvm, start, end, attrs->attributes);
2627}
2628#endif /* CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES */
2629
2630struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
2631{
2632 return __gfn_to_memslot(kvm_memslots(kvm), gfn);
2633}
2634EXPORT_SYMBOL_GPL(gfn_to_memslot);
2635
2636struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn)
2637{
2638 struct kvm_memslots *slots = kvm_vcpu_memslots(vcpu);
2639 u64 gen = slots->generation;
2640 struct kvm_memory_slot *slot;
2641
2642 /*
2643 * This also protects against using a memslot from a different address space,
2644 * since different address spaces have different generation numbers.
2645 */
2646 if (unlikely(gen != vcpu->last_used_slot_gen)) {
2647 vcpu->last_used_slot = NULL;
2648 vcpu->last_used_slot_gen = gen;
2649 }
2650
2651 slot = try_get_memslot(vcpu->last_used_slot, gfn);
2652 if (slot)
2653 return slot;
2654
2655 /*
2656 * Fall back to searching all memslots. We purposely use
2657 * search_memslots() instead of __gfn_to_memslot() to avoid
2658 * thrashing the VM-wide last_used_slot in kvm_memslots.
2659 */
2660 slot = search_memslots(slots, gfn, false);
2661 if (slot) {
2662 vcpu->last_used_slot = slot;
2663 return slot;
2664 }
2665
2666 return NULL;
2667}
2668
2669bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
2670{
2671 struct kvm_memory_slot *memslot = gfn_to_memslot(kvm, gfn);
2672
2673 return kvm_is_visible_memslot(memslot);
2674}
2675EXPORT_SYMBOL_GPL(kvm_is_visible_gfn);
2676
2677bool kvm_vcpu_is_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
2678{
2679 struct kvm_memory_slot *memslot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
2680
2681 return kvm_is_visible_memslot(memslot);
2682}
2683EXPORT_SYMBOL_GPL(kvm_vcpu_is_visible_gfn);
2684
2685unsigned long kvm_host_page_size(struct kvm_vcpu *vcpu, gfn_t gfn)
2686{
2687 struct vm_area_struct *vma;
2688 unsigned long addr, size;
2689
2690 size = PAGE_SIZE;
2691
2692 addr = kvm_vcpu_gfn_to_hva_prot(vcpu, gfn, NULL);
2693 if (kvm_is_error_hva(addr))
2694 return PAGE_SIZE;
2695
2696 mmap_read_lock(current->mm);
2697 vma = find_vma(current->mm, addr);
2698 if (!vma)
2699 goto out;
2700
2701 size = vma_kernel_pagesize(vma);
2702
2703out:
2704 mmap_read_unlock(current->mm);
2705
2706 return size;
2707}
2708
2709static bool memslot_is_readonly(const struct kvm_memory_slot *slot)
2710{
2711 return slot->flags & KVM_MEM_READONLY;
2712}
2713
2714static unsigned long __gfn_to_hva_many(const struct kvm_memory_slot *slot, gfn_t gfn,
2715 gfn_t *nr_pages, bool write)
2716{
2717 if (!slot || slot->flags & KVM_MEMSLOT_INVALID)
2718 return KVM_HVA_ERR_BAD;
2719
2720 if (memslot_is_readonly(slot) && write)
2721 return KVM_HVA_ERR_RO_BAD;
2722
2723 if (nr_pages)
2724 *nr_pages = slot->npages - (gfn - slot->base_gfn);
2725
2726 return __gfn_to_hva_memslot(slot, gfn);
2727}
2728
2729static unsigned long gfn_to_hva_many(struct kvm_memory_slot *slot, gfn_t gfn,
2730 gfn_t *nr_pages)
2731{
2732 return __gfn_to_hva_many(slot, gfn, nr_pages, true);
2733}
2734
2735unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot,
2736 gfn_t gfn)
2737{
2738 return gfn_to_hva_many(slot, gfn, NULL);
2739}
2740EXPORT_SYMBOL_GPL(gfn_to_hva_memslot);
2741
2742unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
2743{
2744 return gfn_to_hva_many(gfn_to_memslot(kvm, gfn), gfn, NULL);
2745}
2746EXPORT_SYMBOL_GPL(gfn_to_hva);
2747
2748unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn)
2749{
2750 return gfn_to_hva_many(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn, NULL);
2751}
2752EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_hva);
2753
2754/*
2755 * Return the hva of a @gfn and the R/W attribute if possible.
2756 *
2757 * @slot: the kvm_memory_slot which contains @gfn
2758 * @gfn: the gfn to be translated
2759 * @writable: used to return the read/write attribute of the @slot if the hva
2760 * is valid and @writable is not NULL
2761 */
2762unsigned long gfn_to_hva_memslot_prot(struct kvm_memory_slot *slot,
2763 gfn_t gfn, bool *writable)
2764{
2765 unsigned long hva = __gfn_to_hva_many(slot, gfn, NULL, false);
2766
2767 if (!kvm_is_error_hva(hva) && writable)
2768 *writable = !memslot_is_readonly(slot);
2769
2770 return hva;
2771}
2772
2773unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable)
2774{
2775 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
2776
2777 return gfn_to_hva_memslot_prot(slot, gfn, writable);
2778}
2779
2780unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable)
2781{
2782 struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
2783
2784 return gfn_to_hva_memslot_prot(slot, gfn, writable);
2785}
2786
2787static inline int check_user_page_hwpoison(unsigned long addr)
2788{
2789 int rc, flags = FOLL_HWPOISON | FOLL_WRITE;
2790
2791 rc = get_user_pages(addr, 1, flags, NULL);
2792 return rc == -EHWPOISON;
2793}
2794
2795/*
2796 * The fast path to get the writable pfn which will be stored in @pfn,
2797 * true indicates success, otherwise false is returned. It's also the
2798 * only part that runs if we can in atomic context.
2799 */
2800static bool hva_to_pfn_fast(unsigned long addr, bool write_fault,
2801 bool *writable, kvm_pfn_t *pfn)
2802{
2803 struct page *page[1];
2804
2805 /*
2806 * Fast pin a writable pfn only if it is a write fault request
2807 * or the caller allows to map a writable pfn for a read fault
2808 * request.
2809 */
2810 if (!(write_fault || writable))
2811 return false;
2812
2813 if (get_user_page_fast_only(addr, FOLL_WRITE, page)) {
2814 *pfn = page_to_pfn(page[0]);
2815
2816 if (writable)
2817 *writable = true;
2818 return true;
2819 }
2820
2821 return false;
2822}
2823
2824/*
2825 * The slow path to get the pfn of the specified host virtual address,
2826 * 1 indicates success, -errno is returned if error is detected.
2827 */
2828static int hva_to_pfn_slow(unsigned long addr, bool *async, bool write_fault,
2829 bool interruptible, bool *writable, kvm_pfn_t *pfn)
2830{
2831 /*
2832 * When a VCPU accesses a page that is not mapped into the secondary
2833 * MMU, we lookup the page using GUP to map it, so the guest VCPU can
2834 * make progress. We always want to honor NUMA hinting faults in that
2835 * case, because GUP usage corresponds to memory accesses from the VCPU.
2836 * Otherwise, we'd not trigger NUMA hinting faults once a page is
2837 * mapped into the secondary MMU and gets accessed by a VCPU.
2838 *
2839 * Note that get_user_page_fast_only() and FOLL_WRITE for now
2840 * implicitly honor NUMA hinting faults and don't need this flag.
2841 */
2842 unsigned int flags = FOLL_HWPOISON | FOLL_HONOR_NUMA_FAULT;
2843 struct page *page;
2844 int npages;
2845
2846 might_sleep();
2847
2848 if (writable)
2849 *writable = write_fault;
2850
2851 if (write_fault)
2852 flags |= FOLL_WRITE;
2853 if (async)
2854 flags |= FOLL_NOWAIT;
2855 if (interruptible)
2856 flags |= FOLL_INTERRUPTIBLE;
2857
2858 npages = get_user_pages_unlocked(addr, 1, &page, flags);
2859 if (npages != 1)
2860 return npages;
2861
2862 /* map read fault as writable if possible */
2863 if (unlikely(!write_fault) && writable) {
2864 struct page *wpage;
2865
2866 if (get_user_page_fast_only(addr, FOLL_WRITE, &wpage)) {
2867 *writable = true;
2868 put_page(page);
2869 page = wpage;
2870 }
2871 }
2872 *pfn = page_to_pfn(page);
2873 return npages;
2874}
2875
2876static bool vma_is_valid(struct vm_area_struct *vma, bool write_fault)
2877{
2878 if (unlikely(!(vma->vm_flags & VM_READ)))
2879 return false;
2880
2881 if (write_fault && (unlikely(!(vma->vm_flags & VM_WRITE))))
2882 return false;
2883
2884 return true;
2885}
2886
2887static int kvm_try_get_pfn(kvm_pfn_t pfn)
2888{
2889 struct page *page = kvm_pfn_to_refcounted_page(pfn);
2890
2891 if (!page)
2892 return 1;
2893
2894 return get_page_unless_zero(page);
2895}
2896
2897static int hva_to_pfn_remapped(struct vm_area_struct *vma,
2898 unsigned long addr, bool write_fault,
2899 bool *writable, kvm_pfn_t *p_pfn)
2900{
2901 kvm_pfn_t pfn;
2902 pte_t *ptep;
2903 pte_t pte;
2904 spinlock_t *ptl;
2905 int r;
2906
2907 r = follow_pte(vma->vm_mm, addr, &ptep, &ptl);
2908 if (r) {
2909 /*
2910 * get_user_pages fails for VM_IO and VM_PFNMAP vmas and does
2911 * not call the fault handler, so do it here.
2912 */
2913 bool unlocked = false;
2914 r = fixup_user_fault(current->mm, addr,
2915 (write_fault ? FAULT_FLAG_WRITE : 0),
2916 &unlocked);
2917 if (unlocked)
2918 return -EAGAIN;
2919 if (r)
2920 return r;
2921
2922 r = follow_pte(vma->vm_mm, addr, &ptep, &ptl);
2923 if (r)
2924 return r;
2925 }
2926
2927 pte = ptep_get(ptep);
2928
2929 if (write_fault && !pte_write(pte)) {
2930 pfn = KVM_PFN_ERR_RO_FAULT;
2931 goto out;
2932 }
2933
2934 if (writable)
2935 *writable = pte_write(pte);
2936 pfn = pte_pfn(pte);
2937
2938 /*
2939 * Get a reference here because callers of *hva_to_pfn* and
2940 * *gfn_to_pfn* ultimately call kvm_release_pfn_clean on the
2941 * returned pfn. This is only needed if the VMA has VM_MIXEDMAP
2942 * set, but the kvm_try_get_pfn/kvm_release_pfn_clean pair will
2943 * simply do nothing for reserved pfns.
2944 *
2945 * Whoever called remap_pfn_range is also going to call e.g.
2946 * unmap_mapping_range before the underlying pages are freed,
2947 * causing a call to our MMU notifier.
2948 *
2949 * Certain IO or PFNMAP mappings can be backed with valid
2950 * struct pages, but be allocated without refcounting e.g.,
2951 * tail pages of non-compound higher order allocations, which
2952 * would then underflow the refcount when the caller does the
2953 * required put_page. Don't allow those pages here.
2954 */
2955 if (!kvm_try_get_pfn(pfn))
2956 r = -EFAULT;
2957
2958out:
2959 pte_unmap_unlock(ptep, ptl);
2960 *p_pfn = pfn;
2961
2962 return r;
2963}
2964
2965/*
2966 * Pin guest page in memory and return its pfn.
2967 * @addr: host virtual address which maps memory to the guest
2968 * @atomic: whether this function can sleep
2969 * @interruptible: whether the process can be interrupted by non-fatal signals
2970 * @async: whether this function need to wait IO complete if the
2971 * host page is not in the memory
2972 * @write_fault: whether we should get a writable host page
2973 * @writable: whether it allows to map a writable host page for !@write_fault
2974 *
2975 * The function will map a writable host page for these two cases:
2976 * 1): @write_fault = true
2977 * 2): @write_fault = false && @writable, @writable will tell the caller
2978 * whether the mapping is writable.
2979 */
2980kvm_pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool interruptible,
2981 bool *async, bool write_fault, bool *writable)
2982{
2983 struct vm_area_struct *vma;
2984 kvm_pfn_t pfn;
2985 int npages, r;
2986
2987 /* we can do it either atomically or asynchronously, not both */
2988 BUG_ON(atomic && async);
2989
2990 if (hva_to_pfn_fast(addr, write_fault, writable, &pfn))
2991 return pfn;
2992
2993 if (atomic)
2994 return KVM_PFN_ERR_FAULT;
2995
2996 npages = hva_to_pfn_slow(addr, async, write_fault, interruptible,
2997 writable, &pfn);
2998 if (npages == 1)
2999 return pfn;
3000 if (npages == -EINTR)
3001 return KVM_PFN_ERR_SIGPENDING;
3002
3003 mmap_read_lock(current->mm);
3004 if (npages == -EHWPOISON ||
3005 (!async && check_user_page_hwpoison(addr))) {
3006 pfn = KVM_PFN_ERR_HWPOISON;
3007 goto exit;
3008 }
3009
3010retry:
3011 vma = vma_lookup(current->mm, addr);
3012
3013 if (vma == NULL)
3014 pfn = KVM_PFN_ERR_FAULT;
3015 else if (vma->vm_flags & (VM_IO | VM_PFNMAP)) {
3016 r = hva_to_pfn_remapped(vma, addr, write_fault, writable, &pfn);
3017 if (r == -EAGAIN)
3018 goto retry;
3019 if (r < 0)
3020 pfn = KVM_PFN_ERR_FAULT;
3021 } else {
3022 if (async && vma_is_valid(vma, write_fault))
3023 *async = true;
3024 pfn = KVM_PFN_ERR_FAULT;
3025 }
3026exit:
3027 mmap_read_unlock(current->mm);
3028 return pfn;
3029}
3030
3031kvm_pfn_t __gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn,
3032 bool atomic, bool interruptible, bool *async,
3033 bool write_fault, bool *writable, hva_t *hva)
3034{
3035 unsigned long addr = __gfn_to_hva_many(slot, gfn, NULL, write_fault);
3036
3037 if (hva)
3038 *hva = addr;
3039
3040 if (addr == KVM_HVA_ERR_RO_BAD) {
3041 if (writable)
3042 *writable = false;
3043 return KVM_PFN_ERR_RO_FAULT;
3044 }
3045
3046 if (kvm_is_error_hva(addr)) {
3047 if (writable)
3048 *writable = false;
3049 return KVM_PFN_NOSLOT;
3050 }
3051
3052 /* Do not map writable pfn in the readonly memslot. */
3053 if (writable && memslot_is_readonly(slot)) {
3054 *writable = false;
3055 writable = NULL;
3056 }
3057
3058 return hva_to_pfn(addr, atomic, interruptible, async, write_fault,
3059 writable);
3060}
3061EXPORT_SYMBOL_GPL(__gfn_to_pfn_memslot);
3062
3063kvm_pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
3064 bool *writable)
3065{
3066 return __gfn_to_pfn_memslot(gfn_to_memslot(kvm, gfn), gfn, false, false,
3067 NULL, write_fault, writable, NULL);
3068}
3069EXPORT_SYMBOL_GPL(gfn_to_pfn_prot);
3070
3071kvm_pfn_t gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn)
3072{
3073 return __gfn_to_pfn_memslot(slot, gfn, false, false, NULL, true,
3074 NULL, NULL);
3075}
3076EXPORT_SYMBOL_GPL(gfn_to_pfn_memslot);
3077
3078kvm_pfn_t gfn_to_pfn_memslot_atomic(const struct kvm_memory_slot *slot, gfn_t gfn)
3079{
3080 return __gfn_to_pfn_memslot(slot, gfn, true, false, NULL, true,
3081 NULL, NULL);
3082}
3083EXPORT_SYMBOL_GPL(gfn_to_pfn_memslot_atomic);
3084
3085kvm_pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn)
3086{
3087 return gfn_to_pfn_memslot_atomic(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn);
3088}
3089EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_pfn_atomic);
3090
3091kvm_pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
3092{
3093 return gfn_to_pfn_memslot(gfn_to_memslot(kvm, gfn), gfn);
3094}
3095EXPORT_SYMBOL_GPL(gfn_to_pfn);
3096
3097kvm_pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn)
3098{
3099 return gfn_to_pfn_memslot(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn);
3100}
3101EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_pfn);
3102
3103int gfn_to_page_many_atomic(struct kvm_memory_slot *slot, gfn_t gfn,
3104 struct page **pages, int nr_pages)
3105{
3106 unsigned long addr;
3107 gfn_t entry = 0;
3108
3109 addr = gfn_to_hva_many(slot, gfn, &entry);
3110 if (kvm_is_error_hva(addr))
3111 return -1;
3112
3113 if (entry < nr_pages)
3114 return 0;
3115
3116 return get_user_pages_fast_only(addr, nr_pages, FOLL_WRITE, pages);
3117}
3118EXPORT_SYMBOL_GPL(gfn_to_page_many_atomic);
3119
3120/*
3121 * Do not use this helper unless you are absolutely certain the gfn _must_ be
3122 * backed by 'struct page'. A valid example is if the backing memslot is
3123 * controlled by KVM. Note, if the returned page is valid, it's refcount has
3124 * been elevated by gfn_to_pfn().
3125 */
3126struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
3127{
3128 struct page *page;
3129 kvm_pfn_t pfn;
3130
3131 pfn = gfn_to_pfn(kvm, gfn);
3132
3133 if (is_error_noslot_pfn(pfn))
3134 return KVM_ERR_PTR_BAD_PAGE;
3135
3136 page = kvm_pfn_to_refcounted_page(pfn);
3137 if (!page)
3138 return KVM_ERR_PTR_BAD_PAGE;
3139
3140 return page;
3141}
3142EXPORT_SYMBOL_GPL(gfn_to_page);
3143
3144void kvm_release_pfn(kvm_pfn_t pfn, bool dirty)
3145{
3146 if (dirty)
3147 kvm_release_pfn_dirty(pfn);
3148 else
3149 kvm_release_pfn_clean(pfn);
3150}
3151
3152int kvm_vcpu_map(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map)
3153{
3154 kvm_pfn_t pfn;
3155 void *hva = NULL;
3156 struct page *page = KVM_UNMAPPED_PAGE;
3157
3158 if (!map)
3159 return -EINVAL;
3160
3161 pfn = gfn_to_pfn(vcpu->kvm, gfn);
3162 if (is_error_noslot_pfn(pfn))
3163 return -EINVAL;
3164
3165 if (pfn_valid(pfn)) {
3166 page = pfn_to_page(pfn);
3167 hva = kmap(page);
3168#ifdef CONFIG_HAS_IOMEM
3169 } else {
3170 hva = memremap(pfn_to_hpa(pfn), PAGE_SIZE, MEMREMAP_WB);
3171#endif
3172 }
3173
3174 if (!hva)
3175 return -EFAULT;
3176
3177 map->page = page;
3178 map->hva = hva;
3179 map->pfn = pfn;
3180 map->gfn = gfn;
3181
3182 return 0;
3183}
3184EXPORT_SYMBOL_GPL(kvm_vcpu_map);
3185
3186void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty)
3187{
3188 if (!map)
3189 return;
3190
3191 if (!map->hva)
3192 return;
3193
3194 if (map->page != KVM_UNMAPPED_PAGE)
3195 kunmap(map->page);
3196#ifdef CONFIG_HAS_IOMEM
3197 else
3198 memunmap(map->hva);
3199#endif
3200
3201 if (dirty)
3202 kvm_vcpu_mark_page_dirty(vcpu, map->gfn);
3203
3204 kvm_release_pfn(map->pfn, dirty);
3205
3206 map->hva = NULL;
3207 map->page = NULL;
3208}
3209EXPORT_SYMBOL_GPL(kvm_vcpu_unmap);
3210
3211static bool kvm_is_ad_tracked_page(struct page *page)
3212{
3213 /*
3214 * Per page-flags.h, pages tagged PG_reserved "should in general not be
3215 * touched (e.g. set dirty) except by its owner".
3216 */
3217 return !PageReserved(page);
3218}
3219
3220static void kvm_set_page_dirty(struct page *page)
3221{
3222 if (kvm_is_ad_tracked_page(page))
3223 SetPageDirty(page);
3224}
3225
3226static void kvm_set_page_accessed(struct page *page)
3227{
3228 if (kvm_is_ad_tracked_page(page))
3229 mark_page_accessed(page);
3230}
3231
3232void kvm_release_page_clean(struct page *page)
3233{
3234 WARN_ON(is_error_page(page));
3235
3236 kvm_set_page_accessed(page);
3237 put_page(page);
3238}
3239EXPORT_SYMBOL_GPL(kvm_release_page_clean);
3240
3241void kvm_release_pfn_clean(kvm_pfn_t pfn)
3242{
3243 struct page *page;
3244
3245 if (is_error_noslot_pfn(pfn))
3246 return;
3247
3248 page = kvm_pfn_to_refcounted_page(pfn);
3249 if (!page)
3250 return;
3251
3252 kvm_release_page_clean(page);
3253}
3254EXPORT_SYMBOL_GPL(kvm_release_pfn_clean);
3255
3256void kvm_release_page_dirty(struct page *page)
3257{
3258 WARN_ON(is_error_page(page));
3259
3260 kvm_set_page_dirty(page);
3261 kvm_release_page_clean(page);
3262}
3263EXPORT_SYMBOL_GPL(kvm_release_page_dirty);
3264
3265void kvm_release_pfn_dirty(kvm_pfn_t pfn)
3266{
3267 struct page *page;
3268
3269 if (is_error_noslot_pfn(pfn))
3270 return;
3271
3272 page = kvm_pfn_to_refcounted_page(pfn);
3273 if (!page)
3274 return;
3275
3276 kvm_release_page_dirty(page);
3277}
3278EXPORT_SYMBOL_GPL(kvm_release_pfn_dirty);
3279
3280/*
3281 * Note, checking for an error/noslot pfn is the caller's responsibility when
3282 * directly marking a page dirty/accessed. Unlike the "release" helpers, the
3283 * "set" helpers are not to be used when the pfn might point at garbage.
3284 */
3285void kvm_set_pfn_dirty(kvm_pfn_t pfn)
3286{
3287 if (WARN_ON(is_error_noslot_pfn(pfn)))
3288 return;
3289
3290 if (pfn_valid(pfn))
3291 kvm_set_page_dirty(pfn_to_page(pfn));
3292}
3293EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty);
3294
3295void kvm_set_pfn_accessed(kvm_pfn_t pfn)
3296{
3297 if (WARN_ON(is_error_noslot_pfn(pfn)))
3298 return;
3299
3300 if (pfn_valid(pfn))
3301 kvm_set_page_accessed(pfn_to_page(pfn));
3302}
3303EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed);
3304
3305static int next_segment(unsigned long len, int offset)
3306{
3307 if (len > PAGE_SIZE - offset)
3308 return PAGE_SIZE - offset;
3309 else
3310 return len;
3311}
3312
3313static int __kvm_read_guest_page(struct kvm_memory_slot *slot, gfn_t gfn,
3314 void *data, int offset, int len)
3315{
3316 int r;
3317 unsigned long addr;
3318
3319 addr = gfn_to_hva_memslot_prot(slot, gfn, NULL);
3320 if (kvm_is_error_hva(addr))
3321 return -EFAULT;
3322 r = __copy_from_user(data, (void __user *)addr + offset, len);
3323 if (r)
3324 return -EFAULT;
3325 return 0;
3326}
3327
3328int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
3329 int len)
3330{
3331 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
3332
3333 return __kvm_read_guest_page(slot, gfn, data, offset, len);
3334}
3335EXPORT_SYMBOL_GPL(kvm_read_guest_page);
3336
3337int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data,
3338 int offset, int len)
3339{
3340 struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
3341
3342 return __kvm_read_guest_page(slot, gfn, data, offset, len);
3343}
3344EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest_page);
3345
3346int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len)
3347{
3348 gfn_t gfn = gpa >> PAGE_SHIFT;
3349 int seg;
3350 int offset = offset_in_page(gpa);
3351 int ret;
3352
3353 while ((seg = next_segment(len, offset)) != 0) {
3354 ret = kvm_read_guest_page(kvm, gfn, data, offset, seg);
3355 if (ret < 0)
3356 return ret;
3357 offset = 0;
3358 len -= seg;
3359 data += seg;
3360 ++gfn;
3361 }
3362 return 0;
3363}
3364EXPORT_SYMBOL_GPL(kvm_read_guest);
3365
3366int kvm_vcpu_read_guest(struct kvm_vcpu *vcpu, gpa_t gpa, void *data, unsigned long len)
3367{
3368 gfn_t gfn = gpa >> PAGE_SHIFT;
3369 int seg;
3370 int offset = offset_in_page(gpa);
3371 int ret;
3372
3373 while ((seg = next_segment(len, offset)) != 0) {
3374 ret = kvm_vcpu_read_guest_page(vcpu, gfn, data, offset, seg);
3375 if (ret < 0)
3376 return ret;
3377 offset = 0;
3378 len -= seg;
3379 data += seg;
3380 ++gfn;
3381 }
3382 return 0;
3383}
3384EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest);
3385
3386static int __kvm_read_guest_atomic(struct kvm_memory_slot *slot, gfn_t gfn,
3387 void *data, int offset, unsigned long len)
3388{
3389 int r;
3390 unsigned long addr;
3391
3392 addr = gfn_to_hva_memslot_prot(slot, gfn, NULL);
3393 if (kvm_is_error_hva(addr))
3394 return -EFAULT;
3395 pagefault_disable();
3396 r = __copy_from_user_inatomic(data, (void __user *)addr + offset, len);
3397 pagefault_enable();
3398 if (r)
3399 return -EFAULT;
3400 return 0;
3401}
3402
3403int kvm_vcpu_read_guest_atomic(struct kvm_vcpu *vcpu, gpa_t gpa,
3404 void *data, unsigned long len)
3405{
3406 gfn_t gfn = gpa >> PAGE_SHIFT;
3407 struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
3408 int offset = offset_in_page(gpa);
3409
3410 return __kvm_read_guest_atomic(slot, gfn, data, offset, len);
3411}
3412EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest_atomic);
3413
3414static int __kvm_write_guest_page(struct kvm *kvm,
3415 struct kvm_memory_slot *memslot, gfn_t gfn,
3416 const void *data, int offset, int len)
3417{
3418 int r;
3419 unsigned long addr;
3420
3421 addr = gfn_to_hva_memslot(memslot, gfn);
3422 if (kvm_is_error_hva(addr))
3423 return -EFAULT;
3424 r = __copy_to_user((void __user *)addr + offset, data, len);
3425 if (r)
3426 return -EFAULT;
3427 mark_page_dirty_in_slot(kvm, memslot, gfn);
3428 return 0;
3429}
3430
3431int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn,
3432 const void *data, int offset, int len)
3433{
3434 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
3435
3436 return __kvm_write_guest_page(kvm, slot, gfn, data, offset, len);
3437}
3438EXPORT_SYMBOL_GPL(kvm_write_guest_page);
3439
3440int kvm_vcpu_write_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn,
3441 const void *data, int offset, int len)
3442{
3443 struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
3444
3445 return __kvm_write_guest_page(vcpu->kvm, slot, gfn, data, offset, len);
3446}
3447EXPORT_SYMBOL_GPL(kvm_vcpu_write_guest_page);
3448
3449int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
3450 unsigned long len)
3451{
3452 gfn_t gfn = gpa >> PAGE_SHIFT;
3453 int seg;
3454 int offset = offset_in_page(gpa);
3455 int ret;
3456
3457 while ((seg = next_segment(len, offset)) != 0) {
3458 ret = kvm_write_guest_page(kvm, gfn, data, offset, seg);
3459 if (ret < 0)
3460 return ret;
3461 offset = 0;
3462 len -= seg;
3463 data += seg;
3464 ++gfn;
3465 }
3466 return 0;
3467}
3468EXPORT_SYMBOL_GPL(kvm_write_guest);
3469
3470int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data,
3471 unsigned long len)
3472{
3473 gfn_t gfn = gpa >> PAGE_SHIFT;
3474 int seg;
3475 int offset = offset_in_page(gpa);
3476 int ret;
3477
3478 while ((seg = next_segment(len, offset)) != 0) {
3479 ret = kvm_vcpu_write_guest_page(vcpu, gfn, data, offset, seg);
3480 if (ret < 0)
3481 return ret;
3482 offset = 0;
3483 len -= seg;
3484 data += seg;
3485 ++gfn;
3486 }
3487 return 0;
3488}
3489EXPORT_SYMBOL_GPL(kvm_vcpu_write_guest);
3490
3491static int __kvm_gfn_to_hva_cache_init(struct kvm_memslots *slots,
3492 struct gfn_to_hva_cache *ghc,
3493 gpa_t gpa, unsigned long len)
3494{
3495 int offset = offset_in_page(gpa);
3496 gfn_t start_gfn = gpa >> PAGE_SHIFT;
3497 gfn_t end_gfn = (gpa + len - 1) >> PAGE_SHIFT;
3498 gfn_t nr_pages_needed = end_gfn - start_gfn + 1;
3499 gfn_t nr_pages_avail;
3500
3501 /* Update ghc->generation before performing any error checks. */
3502 ghc->generation = slots->generation;
3503
3504 if (start_gfn > end_gfn) {
3505 ghc->hva = KVM_HVA_ERR_BAD;
3506 return -EINVAL;
3507 }
3508
3509 /*
3510 * If the requested region crosses two memslots, we still
3511 * verify that the entire region is valid here.
3512 */
3513 for ( ; start_gfn <= end_gfn; start_gfn += nr_pages_avail) {
3514 ghc->memslot = __gfn_to_memslot(slots, start_gfn);
3515 ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn,
3516 &nr_pages_avail);
3517 if (kvm_is_error_hva(ghc->hva))
3518 return -EFAULT;
3519 }
3520
3521 /* Use the slow path for cross page reads and writes. */
3522 if (nr_pages_needed == 1)
3523 ghc->hva += offset;
3524 else
3525 ghc->memslot = NULL;
3526
3527 ghc->gpa = gpa;
3528 ghc->len = len;
3529 return 0;
3530}
3531
3532int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
3533 gpa_t gpa, unsigned long len)
3534{
3535 struct kvm_memslots *slots = kvm_memslots(kvm);
3536 return __kvm_gfn_to_hva_cache_init(slots, ghc, gpa, len);
3537}
3538EXPORT_SYMBOL_GPL(kvm_gfn_to_hva_cache_init);
3539
3540int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
3541 void *data, unsigned int offset,
3542 unsigned long len)
3543{
3544 struct kvm_memslots *slots = kvm_memslots(kvm);
3545 int r;
3546 gpa_t gpa = ghc->gpa + offset;
3547
3548 if (WARN_ON_ONCE(len + offset > ghc->len))
3549 return -EINVAL;
3550
3551 if (slots->generation != ghc->generation) {
3552 if (__kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len))
3553 return -EFAULT;
3554 }
3555
3556 if (kvm_is_error_hva(ghc->hva))
3557 return -EFAULT;
3558
3559 if (unlikely(!ghc->memslot))
3560 return kvm_write_guest(kvm, gpa, data, len);
3561
3562 r = __copy_to_user((void __user *)ghc->hva + offset, data, len);
3563 if (r)
3564 return -EFAULT;
3565 mark_page_dirty_in_slot(kvm, ghc->memslot, gpa >> PAGE_SHIFT);
3566
3567 return 0;
3568}
3569EXPORT_SYMBOL_GPL(kvm_write_guest_offset_cached);
3570
3571int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
3572 void *data, unsigned long len)
3573{
3574 return kvm_write_guest_offset_cached(kvm, ghc, data, 0, len);
3575}
3576EXPORT_SYMBOL_GPL(kvm_write_guest_cached);
3577
3578int kvm_read_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
3579 void *data, unsigned int offset,
3580 unsigned long len)
3581{
3582 struct kvm_memslots *slots = kvm_memslots(kvm);
3583 int r;
3584 gpa_t gpa = ghc->gpa + offset;
3585
3586 if (WARN_ON_ONCE(len + offset > ghc->len))
3587 return -EINVAL;
3588
3589 if (slots->generation != ghc->generation) {
3590 if (__kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len))
3591 return -EFAULT;
3592 }
3593
3594 if (kvm_is_error_hva(ghc->hva))
3595 return -EFAULT;
3596
3597 if (unlikely(!ghc->memslot))
3598 return kvm_read_guest(kvm, gpa, data, len);
3599
3600 r = __copy_from_user(data, (void __user *)ghc->hva + offset, len);
3601 if (r)
3602 return -EFAULT;
3603
3604 return 0;
3605}
3606EXPORT_SYMBOL_GPL(kvm_read_guest_offset_cached);
3607
3608int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
3609 void *data, unsigned long len)
3610{
3611 return kvm_read_guest_offset_cached(kvm, ghc, data, 0, len);
3612}
3613EXPORT_SYMBOL_GPL(kvm_read_guest_cached);
3614
3615int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len)
3616{
3617 const void *zero_page = (const void *) __va(page_to_phys(ZERO_PAGE(0)));
3618 gfn_t gfn = gpa >> PAGE_SHIFT;
3619 int seg;
3620 int offset = offset_in_page(gpa);
3621 int ret;
3622
3623 while ((seg = next_segment(len, offset)) != 0) {
3624 ret = kvm_write_guest_page(kvm, gfn, zero_page, offset, len);
3625 if (ret < 0)
3626 return ret;
3627 offset = 0;
3628 len -= seg;
3629 ++gfn;
3630 }
3631 return 0;
3632}
3633EXPORT_SYMBOL_GPL(kvm_clear_guest);
3634
3635void mark_page_dirty_in_slot(struct kvm *kvm,
3636 const struct kvm_memory_slot *memslot,
3637 gfn_t gfn)
3638{
3639 struct kvm_vcpu *vcpu = kvm_get_running_vcpu();
3640
3641#ifdef CONFIG_HAVE_KVM_DIRTY_RING
3642 if (WARN_ON_ONCE(vcpu && vcpu->kvm != kvm))
3643 return;
3644
3645 WARN_ON_ONCE(!vcpu && !kvm_arch_allow_write_without_running_vcpu(kvm));
3646#endif
3647
3648 if (memslot && kvm_slot_dirty_track_enabled(memslot)) {
3649 unsigned long rel_gfn = gfn - memslot->base_gfn;
3650 u32 slot = (memslot->as_id << 16) | memslot->id;
3651
3652 if (kvm->dirty_ring_size && vcpu)
3653 kvm_dirty_ring_push(vcpu, slot, rel_gfn);
3654 else if (memslot->dirty_bitmap)
3655 set_bit_le(rel_gfn, memslot->dirty_bitmap);
3656 }
3657}
3658EXPORT_SYMBOL_GPL(mark_page_dirty_in_slot);
3659
3660void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
3661{
3662 struct kvm_memory_slot *memslot;
3663
3664 memslot = gfn_to_memslot(kvm, gfn);
3665 mark_page_dirty_in_slot(kvm, memslot, gfn);
3666}
3667EXPORT_SYMBOL_GPL(mark_page_dirty);
3668
3669void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn)
3670{
3671 struct kvm_memory_slot *memslot;
3672
3673 memslot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
3674 mark_page_dirty_in_slot(vcpu->kvm, memslot, gfn);
3675}
3676EXPORT_SYMBOL_GPL(kvm_vcpu_mark_page_dirty);
3677
3678void kvm_sigset_activate(struct kvm_vcpu *vcpu)
3679{
3680 if (!vcpu->sigset_active)
3681 return;
3682
3683 /*
3684 * This does a lockless modification of ->real_blocked, which is fine
3685 * because, only current can change ->real_blocked and all readers of
3686 * ->real_blocked don't care as long ->real_blocked is always a subset
3687 * of ->blocked.
3688 */
3689 sigprocmask(SIG_SETMASK, &vcpu->sigset, ¤t->real_blocked);
3690}
3691
3692void kvm_sigset_deactivate(struct kvm_vcpu *vcpu)
3693{
3694 if (!vcpu->sigset_active)
3695 return;
3696
3697 sigprocmask(SIG_SETMASK, ¤t->real_blocked, NULL);
3698 sigemptyset(¤t->real_blocked);
3699}
3700
3701static void grow_halt_poll_ns(struct kvm_vcpu *vcpu)
3702{
3703 unsigned int old, val, grow, grow_start;
3704
3705 old = val = vcpu->halt_poll_ns;
3706 grow_start = READ_ONCE(halt_poll_ns_grow_start);
3707 grow = READ_ONCE(halt_poll_ns_grow);
3708 if (!grow)
3709 goto out;
3710
3711 val *= grow;
3712 if (val < grow_start)
3713 val = grow_start;
3714
3715 vcpu->halt_poll_ns = val;
3716out:
3717 trace_kvm_halt_poll_ns_grow(vcpu->vcpu_id, val, old);
3718}
3719
3720static void shrink_halt_poll_ns(struct kvm_vcpu *vcpu)
3721{
3722 unsigned int old, val, shrink, grow_start;
3723
3724 old = val = vcpu->halt_poll_ns;
3725 shrink = READ_ONCE(halt_poll_ns_shrink);
3726 grow_start = READ_ONCE(halt_poll_ns_grow_start);
3727 if (shrink == 0)
3728 val = 0;
3729 else
3730 val /= shrink;
3731
3732 if (val < grow_start)
3733 val = 0;
3734
3735 vcpu->halt_poll_ns = val;
3736 trace_kvm_halt_poll_ns_shrink(vcpu->vcpu_id, val, old);
3737}
3738
3739static int kvm_vcpu_check_block(struct kvm_vcpu *vcpu)
3740{
3741 int ret = -EINTR;
3742 int idx = srcu_read_lock(&vcpu->kvm->srcu);
3743
3744 if (kvm_arch_vcpu_runnable(vcpu))
3745 goto out;
3746 if (kvm_cpu_has_pending_timer(vcpu))
3747 goto out;
3748 if (signal_pending(current))
3749 goto out;
3750 if (kvm_check_request(KVM_REQ_UNBLOCK, vcpu))
3751 goto out;
3752
3753 ret = 0;
3754out:
3755 srcu_read_unlock(&vcpu->kvm->srcu, idx);
3756 return ret;
3757}
3758
3759/*
3760 * Block the vCPU until the vCPU is runnable, an event arrives, or a signal is
3761 * pending. This is mostly used when halting a vCPU, but may also be used
3762 * directly for other vCPU non-runnable states, e.g. x86's Wait-For-SIPI.
3763 */
3764bool kvm_vcpu_block(struct kvm_vcpu *vcpu)
3765{
3766 struct rcuwait *wait = kvm_arch_vcpu_get_wait(vcpu);
3767 bool waited = false;
3768
3769 vcpu->stat.generic.blocking = 1;
3770
3771 preempt_disable();
3772 kvm_arch_vcpu_blocking(vcpu);
3773 prepare_to_rcuwait(wait);
3774 preempt_enable();
3775
3776 for (;;) {
3777 set_current_state(TASK_INTERRUPTIBLE);
3778
3779 if (kvm_vcpu_check_block(vcpu) < 0)
3780 break;
3781
3782 waited = true;
3783 schedule();
3784 }
3785
3786 preempt_disable();
3787 finish_rcuwait(wait);
3788 kvm_arch_vcpu_unblocking(vcpu);
3789 preempt_enable();
3790
3791 vcpu->stat.generic.blocking = 0;
3792
3793 return waited;
3794}
3795
3796static inline void update_halt_poll_stats(struct kvm_vcpu *vcpu, ktime_t start,
3797 ktime_t end, bool success)
3798{
3799 struct kvm_vcpu_stat_generic *stats = &vcpu->stat.generic;
3800 u64 poll_ns = ktime_to_ns(ktime_sub(end, start));
3801
3802 ++vcpu->stat.generic.halt_attempted_poll;
3803
3804 if (success) {
3805 ++vcpu->stat.generic.halt_successful_poll;
3806
3807 if (!vcpu_valid_wakeup(vcpu))
3808 ++vcpu->stat.generic.halt_poll_invalid;
3809
3810 stats->halt_poll_success_ns += poll_ns;
3811 KVM_STATS_LOG_HIST_UPDATE(stats->halt_poll_success_hist, poll_ns);
3812 } else {
3813 stats->halt_poll_fail_ns += poll_ns;
3814 KVM_STATS_LOG_HIST_UPDATE(stats->halt_poll_fail_hist, poll_ns);
3815 }
3816}
3817
3818static unsigned int kvm_vcpu_max_halt_poll_ns(struct kvm_vcpu *vcpu)
3819{
3820 struct kvm *kvm = vcpu->kvm;
3821
3822 if (kvm->override_halt_poll_ns) {
3823 /*
3824 * Ensure kvm->max_halt_poll_ns is not read before
3825 * kvm->override_halt_poll_ns.
3826 *
3827 * Pairs with the smp_wmb() when enabling KVM_CAP_HALT_POLL.
3828 */
3829 smp_rmb();
3830 return READ_ONCE(kvm->max_halt_poll_ns);
3831 }
3832
3833 return READ_ONCE(halt_poll_ns);
3834}
3835
3836/*
3837 * Emulate a vCPU halt condition, e.g. HLT on x86, WFI on arm, etc... If halt
3838 * polling is enabled, busy wait for a short time before blocking to avoid the
3839 * expensive block+unblock sequence if a wake event arrives soon after the vCPU
3840 * is halted.
3841 */
3842void kvm_vcpu_halt(struct kvm_vcpu *vcpu)
3843{
3844 unsigned int max_halt_poll_ns = kvm_vcpu_max_halt_poll_ns(vcpu);
3845 bool halt_poll_allowed = !kvm_arch_no_poll(vcpu);
3846 ktime_t start, cur, poll_end;
3847 bool waited = false;
3848 bool do_halt_poll;
3849 u64 halt_ns;
3850
3851 if (vcpu->halt_poll_ns > max_halt_poll_ns)
3852 vcpu->halt_poll_ns = max_halt_poll_ns;
3853
3854 do_halt_poll = halt_poll_allowed && vcpu->halt_poll_ns;
3855
3856 start = cur = poll_end = ktime_get();
3857 if (do_halt_poll) {
3858 ktime_t stop = ktime_add_ns(start, vcpu->halt_poll_ns);
3859
3860 do {
3861 if (kvm_vcpu_check_block(vcpu) < 0)
3862 goto out;
3863 cpu_relax();
3864 poll_end = cur = ktime_get();
3865 } while (kvm_vcpu_can_poll(cur, stop));
3866 }
3867
3868 waited = kvm_vcpu_block(vcpu);
3869
3870 cur = ktime_get();
3871 if (waited) {
3872 vcpu->stat.generic.halt_wait_ns +=
3873 ktime_to_ns(cur) - ktime_to_ns(poll_end);
3874 KVM_STATS_LOG_HIST_UPDATE(vcpu->stat.generic.halt_wait_hist,
3875 ktime_to_ns(cur) - ktime_to_ns(poll_end));
3876 }
3877out:
3878 /* The total time the vCPU was "halted", including polling time. */
3879 halt_ns = ktime_to_ns(cur) - ktime_to_ns(start);
3880
3881 /*
3882 * Note, halt-polling is considered successful so long as the vCPU was
3883 * never actually scheduled out, i.e. even if the wake event arrived
3884 * after of the halt-polling loop itself, but before the full wait.
3885 */
3886 if (do_halt_poll)
3887 update_halt_poll_stats(vcpu, start, poll_end, !waited);
3888
3889 if (halt_poll_allowed) {
3890 /* Recompute the max halt poll time in case it changed. */
3891 max_halt_poll_ns = kvm_vcpu_max_halt_poll_ns(vcpu);
3892
3893 if (!vcpu_valid_wakeup(vcpu)) {
3894 shrink_halt_poll_ns(vcpu);
3895 } else if (max_halt_poll_ns) {
3896 if (halt_ns <= vcpu->halt_poll_ns)
3897 ;
3898 /* we had a long block, shrink polling */
3899 else if (vcpu->halt_poll_ns &&
3900 halt_ns > max_halt_poll_ns)
3901 shrink_halt_poll_ns(vcpu);
3902 /* we had a short halt and our poll time is too small */
3903 else if (vcpu->halt_poll_ns < max_halt_poll_ns &&
3904 halt_ns < max_halt_poll_ns)
3905 grow_halt_poll_ns(vcpu);
3906 } else {
3907 vcpu->halt_poll_ns = 0;
3908 }
3909 }
3910
3911 trace_kvm_vcpu_wakeup(halt_ns, waited, vcpu_valid_wakeup(vcpu));
3912}
3913EXPORT_SYMBOL_GPL(kvm_vcpu_halt);
3914
3915bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu)
3916{
3917 if (__kvm_vcpu_wake_up(vcpu)) {
3918 WRITE_ONCE(vcpu->ready, true);
3919 ++vcpu->stat.generic.halt_wakeup;
3920 return true;
3921 }
3922
3923 return false;
3924}
3925EXPORT_SYMBOL_GPL(kvm_vcpu_wake_up);
3926
3927#ifndef CONFIG_S390
3928/*
3929 * Kick a sleeping VCPU, or a guest VCPU in guest mode, into host kernel mode.
3930 */
3931void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
3932{
3933 int me, cpu;
3934
3935 if (kvm_vcpu_wake_up(vcpu))
3936 return;
3937
3938 me = get_cpu();
3939 /*
3940 * The only state change done outside the vcpu mutex is IN_GUEST_MODE
3941 * to EXITING_GUEST_MODE. Therefore the moderately expensive "should
3942 * kick" check does not need atomic operations if kvm_vcpu_kick is used
3943 * within the vCPU thread itself.
3944 */
3945 if (vcpu == __this_cpu_read(kvm_running_vcpu)) {
3946 if (vcpu->mode == IN_GUEST_MODE)
3947 WRITE_ONCE(vcpu->mode, EXITING_GUEST_MODE);
3948 goto out;
3949 }
3950
3951 /*
3952 * Note, the vCPU could get migrated to a different pCPU at any point
3953 * after kvm_arch_vcpu_should_kick(), which could result in sending an
3954 * IPI to the previous pCPU. But, that's ok because the purpose of the
3955 * IPI is to force the vCPU to leave IN_GUEST_MODE, and migrating the
3956 * vCPU also requires it to leave IN_GUEST_MODE.
3957 */
3958 if (kvm_arch_vcpu_should_kick(vcpu)) {
3959 cpu = READ_ONCE(vcpu->cpu);
3960 if (cpu != me && (unsigned)cpu < nr_cpu_ids && cpu_online(cpu))
3961 smp_send_reschedule(cpu);
3962 }
3963out:
3964 put_cpu();
3965}
3966EXPORT_SYMBOL_GPL(kvm_vcpu_kick);
3967#endif /* !CONFIG_S390 */
3968
3969int kvm_vcpu_yield_to(struct kvm_vcpu *target)
3970{
3971 struct pid *pid;
3972 struct task_struct *task = NULL;
3973 int ret = 0;
3974
3975 rcu_read_lock();
3976 pid = rcu_dereference(target->pid);
3977 if (pid)
3978 task = get_pid_task(pid, PIDTYPE_PID);
3979 rcu_read_unlock();
3980 if (!task)
3981 return ret;
3982 ret = yield_to(task, 1);
3983 put_task_struct(task);
3984
3985 return ret;
3986}
3987EXPORT_SYMBOL_GPL(kvm_vcpu_yield_to);
3988
3989/*
3990 * Helper that checks whether a VCPU is eligible for directed yield.
3991 * Most eligible candidate to yield is decided by following heuristics:
3992 *
3993 * (a) VCPU which has not done pl-exit or cpu relax intercepted recently
3994 * (preempted lock holder), indicated by @in_spin_loop.
3995 * Set at the beginning and cleared at the end of interception/PLE handler.
3996 *
3997 * (b) VCPU which has done pl-exit/ cpu relax intercepted but did not get
3998 * chance last time (mostly it has become eligible now since we have probably
3999 * yielded to lockholder in last iteration. This is done by toggling
4000 * @dy_eligible each time a VCPU checked for eligibility.)
4001 *
4002 * Yielding to a recently pl-exited/cpu relax intercepted VCPU before yielding
4003 * to preempted lock-holder could result in wrong VCPU selection and CPU
4004 * burning. Giving priority for a potential lock-holder increases lock
4005 * progress.
4006 *
4007 * Since algorithm is based on heuristics, accessing another VCPU data without
4008 * locking does not harm. It may result in trying to yield to same VCPU, fail
4009 * and continue with next VCPU and so on.
4010 */
4011static bool kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu *vcpu)
4012{
4013#ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT
4014 bool eligible;
4015
4016 eligible = !vcpu->spin_loop.in_spin_loop ||
4017 vcpu->spin_loop.dy_eligible;
4018
4019 if (vcpu->spin_loop.in_spin_loop)
4020 kvm_vcpu_set_dy_eligible(vcpu, !vcpu->spin_loop.dy_eligible);
4021
4022 return eligible;
4023#else
4024 return true;
4025#endif
4026}
4027
4028/*
4029 * Unlike kvm_arch_vcpu_runnable, this function is called outside
4030 * a vcpu_load/vcpu_put pair. However, for most architectures
4031 * kvm_arch_vcpu_runnable does not require vcpu_load.
4032 */
4033bool __weak kvm_arch_dy_runnable(struct kvm_vcpu *vcpu)
4034{
4035 return kvm_arch_vcpu_runnable(vcpu);
4036}
4037
4038static bool vcpu_dy_runnable(struct kvm_vcpu *vcpu)
4039{
4040 if (kvm_arch_dy_runnable(vcpu))
4041 return true;
4042
4043#ifdef CONFIG_KVM_ASYNC_PF
4044 if (!list_empty_careful(&vcpu->async_pf.done))
4045 return true;
4046#endif
4047
4048 return false;
4049}
4050
4051bool __weak kvm_arch_dy_has_pending_interrupt(struct kvm_vcpu *vcpu)
4052{
4053 return false;
4054}
4055
4056void kvm_vcpu_on_spin(struct kvm_vcpu *me, bool yield_to_kernel_mode)
4057{
4058 struct kvm *kvm = me->kvm;
4059 struct kvm_vcpu *vcpu;
4060 int last_boosted_vcpu = me->kvm->last_boosted_vcpu;
4061 unsigned long i;
4062 int yielded = 0;
4063 int try = 3;
4064 int pass;
4065
4066 kvm_vcpu_set_in_spin_loop(me, true);
4067 /*
4068 * We boost the priority of a VCPU that is runnable but not
4069 * currently running, because it got preempted by something
4070 * else and called schedule in __vcpu_run. Hopefully that
4071 * VCPU is holding the lock that we need and will release it.
4072 * We approximate round-robin by starting at the last boosted VCPU.
4073 */
4074 for (pass = 0; pass < 2 && !yielded && try; pass++) {
4075 kvm_for_each_vcpu(i, vcpu, kvm) {
4076 if (!pass && i <= last_boosted_vcpu) {
4077 i = last_boosted_vcpu;
4078 continue;
4079 } else if (pass && i > last_boosted_vcpu)
4080 break;
4081 if (!READ_ONCE(vcpu->ready))
4082 continue;
4083 if (vcpu == me)
4084 continue;
4085 if (kvm_vcpu_is_blocking(vcpu) && !vcpu_dy_runnable(vcpu))
4086 continue;
4087 if (READ_ONCE(vcpu->preempted) && yield_to_kernel_mode &&
4088 !kvm_arch_dy_has_pending_interrupt(vcpu) &&
4089 !kvm_arch_vcpu_in_kernel(vcpu))
4090 continue;
4091 if (!kvm_vcpu_eligible_for_directed_yield(vcpu))
4092 continue;
4093
4094 yielded = kvm_vcpu_yield_to(vcpu);
4095 if (yielded > 0) {
4096 kvm->last_boosted_vcpu = i;
4097 break;
4098 } else if (yielded < 0) {
4099 try--;
4100 if (!try)
4101 break;
4102 }
4103 }
4104 }
4105 kvm_vcpu_set_in_spin_loop(me, false);
4106
4107 /* Ensure vcpu is not eligible during next spinloop */
4108 kvm_vcpu_set_dy_eligible(me, false);
4109}
4110EXPORT_SYMBOL_GPL(kvm_vcpu_on_spin);
4111
4112static bool kvm_page_in_dirty_ring(struct kvm *kvm, unsigned long pgoff)
4113{
4114#ifdef CONFIG_HAVE_KVM_DIRTY_RING
4115 return (pgoff >= KVM_DIRTY_LOG_PAGE_OFFSET) &&
4116 (pgoff < KVM_DIRTY_LOG_PAGE_OFFSET +
4117 kvm->dirty_ring_size / PAGE_SIZE);
4118#else
4119 return false;
4120#endif
4121}
4122
4123static vm_fault_t kvm_vcpu_fault(struct vm_fault *vmf)
4124{
4125 struct kvm_vcpu *vcpu = vmf->vma->vm_file->private_data;
4126 struct page *page;
4127
4128 if (vmf->pgoff == 0)
4129 page = virt_to_page(vcpu->run);
4130#ifdef CONFIG_X86
4131 else if (vmf->pgoff == KVM_PIO_PAGE_OFFSET)
4132 page = virt_to_page(vcpu->arch.pio_data);
4133#endif
4134#ifdef CONFIG_KVM_MMIO
4135 else if (vmf->pgoff == KVM_COALESCED_MMIO_PAGE_OFFSET)
4136 page = virt_to_page(vcpu->kvm->coalesced_mmio_ring);
4137#endif
4138 else if (kvm_page_in_dirty_ring(vcpu->kvm, vmf->pgoff))
4139 page = kvm_dirty_ring_get_page(
4140 &vcpu->dirty_ring,
4141 vmf->pgoff - KVM_DIRTY_LOG_PAGE_OFFSET);
4142 else
4143 return kvm_arch_vcpu_fault(vcpu, vmf);
4144 get_page(page);
4145 vmf->page = page;
4146 return 0;
4147}
4148
4149static const struct vm_operations_struct kvm_vcpu_vm_ops = {
4150 .fault = kvm_vcpu_fault,
4151};
4152
4153static int kvm_vcpu_mmap(struct file *file, struct vm_area_struct *vma)
4154{
4155 struct kvm_vcpu *vcpu = file->private_data;
4156 unsigned long pages = vma_pages(vma);
4157
4158 if ((kvm_page_in_dirty_ring(vcpu->kvm, vma->vm_pgoff) ||
4159 kvm_page_in_dirty_ring(vcpu->kvm, vma->vm_pgoff + pages - 1)) &&
4160 ((vma->vm_flags & VM_EXEC) || !(vma->vm_flags & VM_SHARED)))
4161 return -EINVAL;
4162
4163 vma->vm_ops = &kvm_vcpu_vm_ops;
4164 return 0;
4165}
4166
4167static int kvm_vcpu_release(struct inode *inode, struct file *filp)
4168{
4169 struct kvm_vcpu *vcpu = filp->private_data;
4170
4171 kvm_put_kvm(vcpu->kvm);
4172 return 0;
4173}
4174
4175static struct file_operations kvm_vcpu_fops = {
4176 .release = kvm_vcpu_release,
4177 .unlocked_ioctl = kvm_vcpu_ioctl,
4178 .mmap = kvm_vcpu_mmap,
4179 .llseek = noop_llseek,
4180 KVM_COMPAT(kvm_vcpu_compat_ioctl),
4181};
4182
4183/*
4184 * Allocates an inode for the vcpu.
4185 */
4186static int create_vcpu_fd(struct kvm_vcpu *vcpu)
4187{
4188 char name[8 + 1 + ITOA_MAX_LEN + 1];
4189
4190 snprintf(name, sizeof(name), "kvm-vcpu:%d", vcpu->vcpu_id);
4191 return anon_inode_getfd(name, &kvm_vcpu_fops, vcpu, O_RDWR | O_CLOEXEC);
4192}
4193
4194#ifdef __KVM_HAVE_ARCH_VCPU_DEBUGFS
4195static int vcpu_get_pid(void *data, u64 *val)
4196{
4197 struct kvm_vcpu *vcpu = data;
4198
4199 rcu_read_lock();
4200 *val = pid_nr(rcu_dereference(vcpu->pid));
4201 rcu_read_unlock();
4202 return 0;
4203}
4204
4205DEFINE_SIMPLE_ATTRIBUTE(vcpu_get_pid_fops, vcpu_get_pid, NULL, "%llu\n");
4206
4207static void kvm_create_vcpu_debugfs(struct kvm_vcpu *vcpu)
4208{
4209 struct dentry *debugfs_dentry;
4210 char dir_name[ITOA_MAX_LEN * 2];
4211
4212 if (!debugfs_initialized())
4213 return;
4214
4215 snprintf(dir_name, sizeof(dir_name), "vcpu%d", vcpu->vcpu_id);
4216 debugfs_dentry = debugfs_create_dir(dir_name,
4217 vcpu->kvm->debugfs_dentry);
4218 debugfs_create_file("pid", 0444, debugfs_dentry, vcpu,
4219 &vcpu_get_pid_fops);
4220
4221 kvm_arch_create_vcpu_debugfs(vcpu, debugfs_dentry);
4222}
4223#endif
4224
4225/*
4226 * Creates some virtual cpus. Good luck creating more than one.
4227 */
4228static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
4229{
4230 int r;
4231 struct kvm_vcpu *vcpu;
4232 struct page *page;
4233
4234 if (id >= KVM_MAX_VCPU_IDS)
4235 return -EINVAL;
4236
4237 mutex_lock(&kvm->lock);
4238 if (kvm->created_vcpus >= kvm->max_vcpus) {
4239 mutex_unlock(&kvm->lock);
4240 return -EINVAL;
4241 }
4242
4243 r = kvm_arch_vcpu_precreate(kvm, id);
4244 if (r) {
4245 mutex_unlock(&kvm->lock);
4246 return r;
4247 }
4248
4249 kvm->created_vcpus++;
4250 mutex_unlock(&kvm->lock);
4251
4252 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL_ACCOUNT);
4253 if (!vcpu) {
4254 r = -ENOMEM;
4255 goto vcpu_decrement;
4256 }
4257
4258 BUILD_BUG_ON(sizeof(struct kvm_run) > PAGE_SIZE);
4259 page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
4260 if (!page) {
4261 r = -ENOMEM;
4262 goto vcpu_free;
4263 }
4264 vcpu->run = page_address(page);
4265
4266 kvm_vcpu_init(vcpu, kvm, id);
4267
4268 r = kvm_arch_vcpu_create(vcpu);
4269 if (r)
4270 goto vcpu_free_run_page;
4271
4272 if (kvm->dirty_ring_size) {
4273 r = kvm_dirty_ring_alloc(&vcpu->dirty_ring,
4274 id, kvm->dirty_ring_size);
4275 if (r)
4276 goto arch_vcpu_destroy;
4277 }
4278
4279 mutex_lock(&kvm->lock);
4280
4281#ifdef CONFIG_LOCKDEP
4282 /* Ensure that lockdep knows vcpu->mutex is taken *inside* kvm->lock */
4283 mutex_lock(&vcpu->mutex);
4284 mutex_unlock(&vcpu->mutex);
4285#endif
4286
4287 if (kvm_get_vcpu_by_id(kvm, id)) {
4288 r = -EEXIST;
4289 goto unlock_vcpu_destroy;
4290 }
4291
4292 vcpu->vcpu_idx = atomic_read(&kvm->online_vcpus);
4293 r = xa_reserve(&kvm->vcpu_array, vcpu->vcpu_idx, GFP_KERNEL_ACCOUNT);
4294 if (r)
4295 goto unlock_vcpu_destroy;
4296
4297 /* Now it's all set up, let userspace reach it */
4298 kvm_get_kvm(kvm);
4299 r = create_vcpu_fd(vcpu);
4300 if (r < 0)
4301 goto kvm_put_xa_release;
4302
4303 if (KVM_BUG_ON(xa_store(&kvm->vcpu_array, vcpu->vcpu_idx, vcpu, 0), kvm)) {
4304 r = -EINVAL;
4305 goto kvm_put_xa_release;
4306 }
4307
4308 /*
4309 * Pairs with smp_rmb() in kvm_get_vcpu. Store the vcpu
4310 * pointer before kvm->online_vcpu's incremented value.
4311 */
4312 smp_wmb();
4313 atomic_inc(&kvm->online_vcpus);
4314
4315 mutex_unlock(&kvm->lock);
4316 kvm_arch_vcpu_postcreate(vcpu);
4317 kvm_create_vcpu_debugfs(vcpu);
4318 return r;
4319
4320kvm_put_xa_release:
4321 kvm_put_kvm_no_destroy(kvm);
4322 xa_release(&kvm->vcpu_array, vcpu->vcpu_idx);
4323unlock_vcpu_destroy:
4324 mutex_unlock(&kvm->lock);
4325 kvm_dirty_ring_free(&vcpu->dirty_ring);
4326arch_vcpu_destroy:
4327 kvm_arch_vcpu_destroy(vcpu);
4328vcpu_free_run_page:
4329 free_page((unsigned long)vcpu->run);
4330vcpu_free:
4331 kmem_cache_free(kvm_vcpu_cache, vcpu);
4332vcpu_decrement:
4333 mutex_lock(&kvm->lock);
4334 kvm->created_vcpus--;
4335 mutex_unlock(&kvm->lock);
4336 return r;
4337}
4338
4339static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset)
4340{
4341 if (sigset) {
4342 sigdelsetmask(sigset, sigmask(SIGKILL)|sigmask(SIGSTOP));
4343 vcpu->sigset_active = 1;
4344 vcpu->sigset = *sigset;
4345 } else
4346 vcpu->sigset_active = 0;
4347 return 0;
4348}
4349
4350static ssize_t kvm_vcpu_stats_read(struct file *file, char __user *user_buffer,
4351 size_t size, loff_t *offset)
4352{
4353 struct kvm_vcpu *vcpu = file->private_data;
4354
4355 return kvm_stats_read(vcpu->stats_id, &kvm_vcpu_stats_header,
4356 &kvm_vcpu_stats_desc[0], &vcpu->stat,
4357 sizeof(vcpu->stat), user_buffer, size, offset);
4358}
4359
4360static int kvm_vcpu_stats_release(struct inode *inode, struct file *file)
4361{
4362 struct kvm_vcpu *vcpu = file->private_data;
4363
4364 kvm_put_kvm(vcpu->kvm);
4365 return 0;
4366}
4367
4368static const struct file_operations kvm_vcpu_stats_fops = {
4369 .owner = THIS_MODULE,
4370 .read = kvm_vcpu_stats_read,
4371 .release = kvm_vcpu_stats_release,
4372 .llseek = noop_llseek,
4373};
4374
4375static int kvm_vcpu_ioctl_get_stats_fd(struct kvm_vcpu *vcpu)
4376{
4377 int fd;
4378 struct file *file;
4379 char name[15 + ITOA_MAX_LEN + 1];
4380
4381 snprintf(name, sizeof(name), "kvm-vcpu-stats:%d", vcpu->vcpu_id);
4382
4383 fd = get_unused_fd_flags(O_CLOEXEC);
4384 if (fd < 0)
4385 return fd;
4386
4387 file = anon_inode_getfile(name, &kvm_vcpu_stats_fops, vcpu, O_RDONLY);
4388 if (IS_ERR(file)) {
4389 put_unused_fd(fd);
4390 return PTR_ERR(file);
4391 }
4392
4393 kvm_get_kvm(vcpu->kvm);
4394
4395 file->f_mode |= FMODE_PREAD;
4396 fd_install(fd, file);
4397
4398 return fd;
4399}
4400
4401static long kvm_vcpu_ioctl(struct file *filp,
4402 unsigned int ioctl, unsigned long arg)
4403{
4404 struct kvm_vcpu *vcpu = filp->private_data;
4405 void __user *argp = (void __user *)arg;
4406 int r;
4407 struct kvm_fpu *fpu = NULL;
4408 struct kvm_sregs *kvm_sregs = NULL;
4409
4410 if (vcpu->kvm->mm != current->mm || vcpu->kvm->vm_dead)
4411 return -EIO;
4412
4413 if (unlikely(_IOC_TYPE(ioctl) != KVMIO))
4414 return -EINVAL;
4415
4416 /*
4417 * Some architectures have vcpu ioctls that are asynchronous to vcpu
4418 * execution; mutex_lock() would break them.
4419 */
4420 r = kvm_arch_vcpu_async_ioctl(filp, ioctl, arg);
4421 if (r != -ENOIOCTLCMD)
4422 return r;
4423
4424 if (mutex_lock_killable(&vcpu->mutex))
4425 return -EINTR;
4426 switch (ioctl) {
4427 case KVM_RUN: {
4428 struct pid *oldpid;
4429 r = -EINVAL;
4430 if (arg)
4431 goto out;
4432 oldpid = rcu_access_pointer(vcpu->pid);
4433 if (unlikely(oldpid != task_pid(current))) {
4434 /* The thread running this VCPU changed. */
4435 struct pid *newpid;
4436
4437 r = kvm_arch_vcpu_run_pid_change(vcpu);
4438 if (r)
4439 break;
4440
4441 newpid = get_task_pid(current, PIDTYPE_PID);
4442 rcu_assign_pointer(vcpu->pid, newpid);
4443 if (oldpid)
4444 synchronize_rcu();
4445 put_pid(oldpid);
4446 }
4447 r = kvm_arch_vcpu_ioctl_run(vcpu);
4448 trace_kvm_userspace_exit(vcpu->run->exit_reason, r);
4449 break;
4450 }
4451 case KVM_GET_REGS: {
4452 struct kvm_regs *kvm_regs;
4453
4454 r = -ENOMEM;
4455 kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL_ACCOUNT);
4456 if (!kvm_regs)
4457 goto out;
4458 r = kvm_arch_vcpu_ioctl_get_regs(vcpu, kvm_regs);
4459 if (r)
4460 goto out_free1;
4461 r = -EFAULT;
4462 if (copy_to_user(argp, kvm_regs, sizeof(struct kvm_regs)))
4463 goto out_free1;
4464 r = 0;
4465out_free1:
4466 kfree(kvm_regs);
4467 break;
4468 }
4469 case KVM_SET_REGS: {
4470 struct kvm_regs *kvm_regs;
4471
4472 kvm_regs = memdup_user(argp, sizeof(*kvm_regs));
4473 if (IS_ERR(kvm_regs)) {
4474 r = PTR_ERR(kvm_regs);
4475 goto out;
4476 }
4477 r = kvm_arch_vcpu_ioctl_set_regs(vcpu, kvm_regs);
4478 kfree(kvm_regs);
4479 break;
4480 }
4481 case KVM_GET_SREGS: {
4482 kvm_sregs = kzalloc(sizeof(struct kvm_sregs),
4483 GFP_KERNEL_ACCOUNT);
4484 r = -ENOMEM;
4485 if (!kvm_sregs)
4486 goto out;
4487 r = kvm_arch_vcpu_ioctl_get_sregs(vcpu, kvm_sregs);
4488 if (r)
4489 goto out;
4490 r = -EFAULT;
4491 if (copy_to_user(argp, kvm_sregs, sizeof(struct kvm_sregs)))
4492 goto out;
4493 r = 0;
4494 break;
4495 }
4496 case KVM_SET_SREGS: {
4497 kvm_sregs = memdup_user(argp, sizeof(*kvm_sregs));
4498 if (IS_ERR(kvm_sregs)) {
4499 r = PTR_ERR(kvm_sregs);
4500 kvm_sregs = NULL;
4501 goto out;
4502 }
4503 r = kvm_arch_vcpu_ioctl_set_sregs(vcpu, kvm_sregs);
4504 break;
4505 }
4506 case KVM_GET_MP_STATE: {
4507 struct kvm_mp_state mp_state;
4508
4509 r = kvm_arch_vcpu_ioctl_get_mpstate(vcpu, &mp_state);
4510 if (r)
4511 goto out;
4512 r = -EFAULT;
4513 if (copy_to_user(argp, &mp_state, sizeof(mp_state)))
4514 goto out;
4515 r = 0;
4516 break;
4517 }
4518 case KVM_SET_MP_STATE: {
4519 struct kvm_mp_state mp_state;
4520
4521 r = -EFAULT;
4522 if (copy_from_user(&mp_state, argp, sizeof(mp_state)))
4523 goto out;
4524 r = kvm_arch_vcpu_ioctl_set_mpstate(vcpu, &mp_state);
4525 break;
4526 }
4527 case KVM_TRANSLATE: {
4528 struct kvm_translation tr;
4529
4530 r = -EFAULT;
4531 if (copy_from_user(&tr, argp, sizeof(tr)))
4532 goto out;
4533 r = kvm_arch_vcpu_ioctl_translate(vcpu, &tr);
4534 if (r)
4535 goto out;
4536 r = -EFAULT;
4537 if (copy_to_user(argp, &tr, sizeof(tr)))
4538 goto out;
4539 r = 0;
4540 break;
4541 }
4542 case KVM_SET_GUEST_DEBUG: {
4543 struct kvm_guest_debug dbg;
4544
4545 r = -EFAULT;
4546 if (copy_from_user(&dbg, argp, sizeof(dbg)))
4547 goto out;
4548 r = kvm_arch_vcpu_ioctl_set_guest_debug(vcpu, &dbg);
4549 break;
4550 }
4551 case KVM_SET_SIGNAL_MASK: {
4552 struct kvm_signal_mask __user *sigmask_arg = argp;
4553 struct kvm_signal_mask kvm_sigmask;
4554 sigset_t sigset, *p;
4555
4556 p = NULL;
4557 if (argp) {
4558 r = -EFAULT;
4559 if (copy_from_user(&kvm_sigmask, argp,
4560 sizeof(kvm_sigmask)))
4561 goto out;
4562 r = -EINVAL;
4563 if (kvm_sigmask.len != sizeof(sigset))
4564 goto out;
4565 r = -EFAULT;
4566 if (copy_from_user(&sigset, sigmask_arg->sigset,
4567 sizeof(sigset)))
4568 goto out;
4569 p = &sigset;
4570 }
4571 r = kvm_vcpu_ioctl_set_sigmask(vcpu, p);
4572 break;
4573 }
4574 case KVM_GET_FPU: {
4575 fpu = kzalloc(sizeof(struct kvm_fpu), GFP_KERNEL_ACCOUNT);
4576 r = -ENOMEM;
4577 if (!fpu)
4578 goto out;
4579 r = kvm_arch_vcpu_ioctl_get_fpu(vcpu, fpu);
4580 if (r)
4581 goto out;
4582 r = -EFAULT;
4583 if (copy_to_user(argp, fpu, sizeof(struct kvm_fpu)))
4584 goto out;
4585 r = 0;
4586 break;
4587 }
4588 case KVM_SET_FPU: {
4589 fpu = memdup_user(argp, sizeof(*fpu));
4590 if (IS_ERR(fpu)) {
4591 r = PTR_ERR(fpu);
4592 fpu = NULL;
4593 goto out;
4594 }
4595 r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, fpu);
4596 break;
4597 }
4598 case KVM_GET_STATS_FD: {
4599 r = kvm_vcpu_ioctl_get_stats_fd(vcpu);
4600 break;
4601 }
4602 default:
4603 r = kvm_arch_vcpu_ioctl(filp, ioctl, arg);
4604 }
4605out:
4606 mutex_unlock(&vcpu->mutex);
4607 kfree(fpu);
4608 kfree(kvm_sregs);
4609 return r;
4610}
4611
4612#ifdef CONFIG_KVM_COMPAT
4613static long kvm_vcpu_compat_ioctl(struct file *filp,
4614 unsigned int ioctl, unsigned long arg)
4615{
4616 struct kvm_vcpu *vcpu = filp->private_data;
4617 void __user *argp = compat_ptr(arg);
4618 int r;
4619
4620 if (vcpu->kvm->mm != current->mm || vcpu->kvm->vm_dead)
4621 return -EIO;
4622
4623 switch (ioctl) {
4624 case KVM_SET_SIGNAL_MASK: {
4625 struct kvm_signal_mask __user *sigmask_arg = argp;
4626 struct kvm_signal_mask kvm_sigmask;
4627 sigset_t sigset;
4628
4629 if (argp) {
4630 r = -EFAULT;
4631 if (copy_from_user(&kvm_sigmask, argp,
4632 sizeof(kvm_sigmask)))
4633 goto out;
4634 r = -EINVAL;
4635 if (kvm_sigmask.len != sizeof(compat_sigset_t))
4636 goto out;
4637 r = -EFAULT;
4638 if (get_compat_sigset(&sigset,
4639 (compat_sigset_t __user *)sigmask_arg->sigset))
4640 goto out;
4641 r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset);
4642 } else
4643 r = kvm_vcpu_ioctl_set_sigmask(vcpu, NULL);
4644 break;
4645 }
4646 default:
4647 r = kvm_vcpu_ioctl(filp, ioctl, arg);
4648 }
4649
4650out:
4651 return r;
4652}
4653#endif
4654
4655static int kvm_device_mmap(struct file *filp, struct vm_area_struct *vma)
4656{
4657 struct kvm_device *dev = filp->private_data;
4658
4659 if (dev->ops->mmap)
4660 return dev->ops->mmap(dev, vma);
4661
4662 return -ENODEV;
4663}
4664
4665static int kvm_device_ioctl_attr(struct kvm_device *dev,
4666 int (*accessor)(struct kvm_device *dev,
4667 struct kvm_device_attr *attr),
4668 unsigned long arg)
4669{
4670 struct kvm_device_attr attr;
4671
4672 if (!accessor)
4673 return -EPERM;
4674
4675 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
4676 return -EFAULT;
4677
4678 return accessor(dev, &attr);
4679}
4680
4681static long kvm_device_ioctl(struct file *filp, unsigned int ioctl,
4682 unsigned long arg)
4683{
4684 struct kvm_device *dev = filp->private_data;
4685
4686 if (dev->kvm->mm != current->mm || dev->kvm->vm_dead)
4687 return -EIO;
4688
4689 switch (ioctl) {
4690 case KVM_SET_DEVICE_ATTR:
4691 return kvm_device_ioctl_attr(dev, dev->ops->set_attr, arg);
4692 case KVM_GET_DEVICE_ATTR:
4693 return kvm_device_ioctl_attr(dev, dev->ops->get_attr, arg);
4694 case KVM_HAS_DEVICE_ATTR:
4695 return kvm_device_ioctl_attr(dev, dev->ops->has_attr, arg);
4696 default:
4697 if (dev->ops->ioctl)
4698 return dev->ops->ioctl(dev, ioctl, arg);
4699
4700 return -ENOTTY;
4701 }
4702}
4703
4704static int kvm_device_release(struct inode *inode, struct file *filp)
4705{
4706 struct kvm_device *dev = filp->private_data;
4707 struct kvm *kvm = dev->kvm;
4708
4709 if (dev->ops->release) {
4710 mutex_lock(&kvm->lock);
4711 list_del(&dev->vm_node);
4712 dev->ops->release(dev);
4713 mutex_unlock(&kvm->lock);
4714 }
4715
4716 kvm_put_kvm(kvm);
4717 return 0;
4718}
4719
4720static struct file_operations kvm_device_fops = {
4721 .unlocked_ioctl = kvm_device_ioctl,
4722 .release = kvm_device_release,
4723 KVM_COMPAT(kvm_device_ioctl),
4724 .mmap = kvm_device_mmap,
4725};
4726
4727struct kvm_device *kvm_device_from_filp(struct file *filp)
4728{
4729 if (filp->f_op != &kvm_device_fops)
4730 return NULL;
4731
4732 return filp->private_data;
4733}
4734
4735static const struct kvm_device_ops *kvm_device_ops_table[KVM_DEV_TYPE_MAX] = {
4736#ifdef CONFIG_KVM_MPIC
4737 [KVM_DEV_TYPE_FSL_MPIC_20] = &kvm_mpic_ops,
4738 [KVM_DEV_TYPE_FSL_MPIC_42] = &kvm_mpic_ops,
4739#endif
4740};
4741
4742int kvm_register_device_ops(const struct kvm_device_ops *ops, u32 type)
4743{
4744 if (type >= ARRAY_SIZE(kvm_device_ops_table))
4745 return -ENOSPC;
4746
4747 if (kvm_device_ops_table[type] != NULL)
4748 return -EEXIST;
4749
4750 kvm_device_ops_table[type] = ops;
4751 return 0;
4752}
4753
4754void kvm_unregister_device_ops(u32 type)
4755{
4756 if (kvm_device_ops_table[type] != NULL)
4757 kvm_device_ops_table[type] = NULL;
4758}
4759
4760static int kvm_ioctl_create_device(struct kvm *kvm,
4761 struct kvm_create_device *cd)
4762{
4763 const struct kvm_device_ops *ops;
4764 struct kvm_device *dev;
4765 bool test = cd->flags & KVM_CREATE_DEVICE_TEST;
4766 int type;
4767 int ret;
4768
4769 if (cd->type >= ARRAY_SIZE(kvm_device_ops_table))
4770 return -ENODEV;
4771
4772 type = array_index_nospec(cd->type, ARRAY_SIZE(kvm_device_ops_table));
4773 ops = kvm_device_ops_table[type];
4774 if (ops == NULL)
4775 return -ENODEV;
4776
4777 if (test)
4778 return 0;
4779
4780 dev = kzalloc(sizeof(*dev), GFP_KERNEL_ACCOUNT);
4781 if (!dev)
4782 return -ENOMEM;
4783
4784 dev->ops = ops;
4785 dev->kvm = kvm;
4786
4787 mutex_lock(&kvm->lock);
4788 ret = ops->create(dev, type);
4789 if (ret < 0) {
4790 mutex_unlock(&kvm->lock);
4791 kfree(dev);
4792 return ret;
4793 }
4794 list_add(&dev->vm_node, &kvm->devices);
4795 mutex_unlock(&kvm->lock);
4796
4797 if (ops->init)
4798 ops->init(dev);
4799
4800 kvm_get_kvm(kvm);
4801 ret = anon_inode_getfd(ops->name, &kvm_device_fops, dev, O_RDWR | O_CLOEXEC);
4802 if (ret < 0) {
4803 kvm_put_kvm_no_destroy(kvm);
4804 mutex_lock(&kvm->lock);
4805 list_del(&dev->vm_node);
4806 if (ops->release)
4807 ops->release(dev);
4808 mutex_unlock(&kvm->lock);
4809 if (ops->destroy)
4810 ops->destroy(dev);
4811 return ret;
4812 }
4813
4814 cd->fd = ret;
4815 return 0;
4816}
4817
4818static int kvm_vm_ioctl_check_extension_generic(struct kvm *kvm, long arg)
4819{
4820 switch (arg) {
4821 case KVM_CAP_USER_MEMORY:
4822 case KVM_CAP_USER_MEMORY2:
4823 case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
4824 case KVM_CAP_JOIN_MEMORY_REGIONS_WORKS:
4825 case KVM_CAP_INTERNAL_ERROR_DATA:
4826#ifdef CONFIG_HAVE_KVM_MSI
4827 case KVM_CAP_SIGNAL_MSI:
4828#endif
4829#ifdef CONFIG_HAVE_KVM_IRQCHIP
4830 case KVM_CAP_IRQFD:
4831#endif
4832 case KVM_CAP_IOEVENTFD_ANY_LENGTH:
4833 case KVM_CAP_CHECK_EXTENSION_VM:
4834 case KVM_CAP_ENABLE_CAP_VM:
4835 case KVM_CAP_HALT_POLL:
4836 return 1;
4837#ifdef CONFIG_KVM_MMIO
4838 case KVM_CAP_COALESCED_MMIO:
4839 return KVM_COALESCED_MMIO_PAGE_OFFSET;
4840 case KVM_CAP_COALESCED_PIO:
4841 return 1;
4842#endif
4843#ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
4844 case KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2:
4845 return KVM_DIRTY_LOG_MANUAL_CAPS;
4846#endif
4847#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
4848 case KVM_CAP_IRQ_ROUTING:
4849 return KVM_MAX_IRQ_ROUTES;
4850#endif
4851#if KVM_MAX_NR_ADDRESS_SPACES > 1
4852 case KVM_CAP_MULTI_ADDRESS_SPACE:
4853 if (kvm)
4854 return kvm_arch_nr_memslot_as_ids(kvm);
4855 return KVM_MAX_NR_ADDRESS_SPACES;
4856#endif
4857 case KVM_CAP_NR_MEMSLOTS:
4858 return KVM_USER_MEM_SLOTS;
4859 case KVM_CAP_DIRTY_LOG_RING:
4860#ifdef CONFIG_HAVE_KVM_DIRTY_RING_TSO
4861 return KVM_DIRTY_RING_MAX_ENTRIES * sizeof(struct kvm_dirty_gfn);
4862#else
4863 return 0;
4864#endif
4865 case KVM_CAP_DIRTY_LOG_RING_ACQ_REL:
4866#ifdef CONFIG_HAVE_KVM_DIRTY_RING_ACQ_REL
4867 return KVM_DIRTY_RING_MAX_ENTRIES * sizeof(struct kvm_dirty_gfn);
4868#else
4869 return 0;
4870#endif
4871#ifdef CONFIG_NEED_KVM_DIRTY_RING_WITH_BITMAP
4872 case KVM_CAP_DIRTY_LOG_RING_WITH_BITMAP:
4873#endif
4874 case KVM_CAP_BINARY_STATS_FD:
4875 case KVM_CAP_SYSTEM_EVENT_DATA:
4876 case KVM_CAP_DEVICE_CTRL:
4877 return 1;
4878#ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES
4879 case KVM_CAP_MEMORY_ATTRIBUTES:
4880 return kvm_supported_mem_attributes(kvm);
4881#endif
4882#ifdef CONFIG_KVM_PRIVATE_MEM
4883 case KVM_CAP_GUEST_MEMFD:
4884 return !kvm || kvm_arch_has_private_mem(kvm);
4885#endif
4886 default:
4887 break;
4888 }
4889 return kvm_vm_ioctl_check_extension(kvm, arg);
4890}
4891
4892static int kvm_vm_ioctl_enable_dirty_log_ring(struct kvm *kvm, u32 size)
4893{
4894 int r;
4895
4896 if (!KVM_DIRTY_LOG_PAGE_OFFSET)
4897 return -EINVAL;
4898
4899 /* the size should be power of 2 */
4900 if (!size || (size & (size - 1)))
4901 return -EINVAL;
4902
4903 /* Should be bigger to keep the reserved entries, or a page */
4904 if (size < kvm_dirty_ring_get_rsvd_entries() *
4905 sizeof(struct kvm_dirty_gfn) || size < PAGE_SIZE)
4906 return -EINVAL;
4907
4908 if (size > KVM_DIRTY_RING_MAX_ENTRIES *
4909 sizeof(struct kvm_dirty_gfn))
4910 return -E2BIG;
4911
4912 /* We only allow it to set once */
4913 if (kvm->dirty_ring_size)
4914 return -EINVAL;
4915
4916 mutex_lock(&kvm->lock);
4917
4918 if (kvm->created_vcpus) {
4919 /* We don't allow to change this value after vcpu created */
4920 r = -EINVAL;
4921 } else {
4922 kvm->dirty_ring_size = size;
4923 r = 0;
4924 }
4925
4926 mutex_unlock(&kvm->lock);
4927 return r;
4928}
4929
4930static int kvm_vm_ioctl_reset_dirty_pages(struct kvm *kvm)
4931{
4932 unsigned long i;
4933 struct kvm_vcpu *vcpu;
4934 int cleared = 0;
4935
4936 if (!kvm->dirty_ring_size)
4937 return -EINVAL;
4938
4939 mutex_lock(&kvm->slots_lock);
4940
4941 kvm_for_each_vcpu(i, vcpu, kvm)
4942 cleared += kvm_dirty_ring_reset(vcpu->kvm, &vcpu->dirty_ring);
4943
4944 mutex_unlock(&kvm->slots_lock);
4945
4946 if (cleared)
4947 kvm_flush_remote_tlbs(kvm);
4948
4949 return cleared;
4950}
4951
4952int __attribute__((weak)) kvm_vm_ioctl_enable_cap(struct kvm *kvm,
4953 struct kvm_enable_cap *cap)
4954{
4955 return -EINVAL;
4956}
4957
4958bool kvm_are_all_memslots_empty(struct kvm *kvm)
4959{
4960 int i;
4961
4962 lockdep_assert_held(&kvm->slots_lock);
4963
4964 for (i = 0; i < kvm_arch_nr_memslot_as_ids(kvm); i++) {
4965 if (!kvm_memslots_empty(__kvm_memslots(kvm, i)))
4966 return false;
4967 }
4968
4969 return true;
4970}
4971EXPORT_SYMBOL_GPL(kvm_are_all_memslots_empty);
4972
4973static int kvm_vm_ioctl_enable_cap_generic(struct kvm *kvm,
4974 struct kvm_enable_cap *cap)
4975{
4976 switch (cap->cap) {
4977#ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
4978 case KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2: {
4979 u64 allowed_options = KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE;
4980
4981 if (cap->args[0] & KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE)
4982 allowed_options = KVM_DIRTY_LOG_MANUAL_CAPS;
4983
4984 if (cap->flags || (cap->args[0] & ~allowed_options))
4985 return -EINVAL;
4986 kvm->manual_dirty_log_protect = cap->args[0];
4987 return 0;
4988 }
4989#endif
4990 case KVM_CAP_HALT_POLL: {
4991 if (cap->flags || cap->args[0] != (unsigned int)cap->args[0])
4992 return -EINVAL;
4993
4994 kvm->max_halt_poll_ns = cap->args[0];
4995
4996 /*
4997 * Ensure kvm->override_halt_poll_ns does not become visible
4998 * before kvm->max_halt_poll_ns.
4999 *
5000 * Pairs with the smp_rmb() in kvm_vcpu_max_halt_poll_ns().
5001 */
5002 smp_wmb();
5003 kvm->override_halt_poll_ns = true;
5004
5005 return 0;
5006 }
5007 case KVM_CAP_DIRTY_LOG_RING:
5008 case KVM_CAP_DIRTY_LOG_RING_ACQ_REL:
5009 if (!kvm_vm_ioctl_check_extension_generic(kvm, cap->cap))
5010 return -EINVAL;
5011
5012 return kvm_vm_ioctl_enable_dirty_log_ring(kvm, cap->args[0]);
5013 case KVM_CAP_DIRTY_LOG_RING_WITH_BITMAP: {
5014 int r = -EINVAL;
5015
5016 if (!IS_ENABLED(CONFIG_NEED_KVM_DIRTY_RING_WITH_BITMAP) ||
5017 !kvm->dirty_ring_size || cap->flags)
5018 return r;
5019
5020 mutex_lock(&kvm->slots_lock);
5021
5022 /*
5023 * For simplicity, allow enabling ring+bitmap if and only if
5024 * there are no memslots, e.g. to ensure all memslots allocate
5025 * a bitmap after the capability is enabled.
5026 */
5027 if (kvm_are_all_memslots_empty(kvm)) {
5028 kvm->dirty_ring_with_bitmap = true;
5029 r = 0;
5030 }
5031
5032 mutex_unlock(&kvm->slots_lock);
5033
5034 return r;
5035 }
5036 default:
5037 return kvm_vm_ioctl_enable_cap(kvm, cap);
5038 }
5039}
5040
5041static ssize_t kvm_vm_stats_read(struct file *file, char __user *user_buffer,
5042 size_t size, loff_t *offset)
5043{
5044 struct kvm *kvm = file->private_data;
5045
5046 return kvm_stats_read(kvm->stats_id, &kvm_vm_stats_header,
5047 &kvm_vm_stats_desc[0], &kvm->stat,
5048 sizeof(kvm->stat), user_buffer, size, offset);
5049}
5050
5051static int kvm_vm_stats_release(struct inode *inode, struct file *file)
5052{
5053 struct kvm *kvm = file->private_data;
5054
5055 kvm_put_kvm(kvm);
5056 return 0;
5057}
5058
5059static const struct file_operations kvm_vm_stats_fops = {
5060 .owner = THIS_MODULE,
5061 .read = kvm_vm_stats_read,
5062 .release = kvm_vm_stats_release,
5063 .llseek = noop_llseek,
5064};
5065
5066static int kvm_vm_ioctl_get_stats_fd(struct kvm *kvm)
5067{
5068 int fd;
5069 struct file *file;
5070
5071 fd = get_unused_fd_flags(O_CLOEXEC);
5072 if (fd < 0)
5073 return fd;
5074
5075 file = anon_inode_getfile("kvm-vm-stats",
5076 &kvm_vm_stats_fops, kvm, O_RDONLY);
5077 if (IS_ERR(file)) {
5078 put_unused_fd(fd);
5079 return PTR_ERR(file);
5080 }
5081
5082 kvm_get_kvm(kvm);
5083
5084 file->f_mode |= FMODE_PREAD;
5085 fd_install(fd, file);
5086
5087 return fd;
5088}
5089
5090#define SANITY_CHECK_MEM_REGION_FIELD(field) \
5091do { \
5092 BUILD_BUG_ON(offsetof(struct kvm_userspace_memory_region, field) != \
5093 offsetof(struct kvm_userspace_memory_region2, field)); \
5094 BUILD_BUG_ON(sizeof_field(struct kvm_userspace_memory_region, field) != \
5095 sizeof_field(struct kvm_userspace_memory_region2, field)); \
5096} while (0)
5097
5098static long kvm_vm_ioctl(struct file *filp,
5099 unsigned int ioctl, unsigned long arg)
5100{
5101 struct kvm *kvm = filp->private_data;
5102 void __user *argp = (void __user *)arg;
5103 int r;
5104
5105 if (kvm->mm != current->mm || kvm->vm_dead)
5106 return -EIO;
5107 switch (ioctl) {
5108 case KVM_CREATE_VCPU:
5109 r = kvm_vm_ioctl_create_vcpu(kvm, arg);
5110 break;
5111 case KVM_ENABLE_CAP: {
5112 struct kvm_enable_cap cap;
5113
5114 r = -EFAULT;
5115 if (copy_from_user(&cap, argp, sizeof(cap)))
5116 goto out;
5117 r = kvm_vm_ioctl_enable_cap_generic(kvm, &cap);
5118 break;
5119 }
5120 case KVM_SET_USER_MEMORY_REGION2:
5121 case KVM_SET_USER_MEMORY_REGION: {
5122 struct kvm_userspace_memory_region2 mem;
5123 unsigned long size;
5124
5125 if (ioctl == KVM_SET_USER_MEMORY_REGION) {
5126 /*
5127 * Fields beyond struct kvm_userspace_memory_region shouldn't be
5128 * accessed, but avoid leaking kernel memory in case of a bug.
5129 */
5130 memset(&mem, 0, sizeof(mem));
5131 size = sizeof(struct kvm_userspace_memory_region);
5132 } else {
5133 size = sizeof(struct kvm_userspace_memory_region2);
5134 }
5135
5136 /* Ensure the common parts of the two structs are identical. */
5137 SANITY_CHECK_MEM_REGION_FIELD(slot);
5138 SANITY_CHECK_MEM_REGION_FIELD(flags);
5139 SANITY_CHECK_MEM_REGION_FIELD(guest_phys_addr);
5140 SANITY_CHECK_MEM_REGION_FIELD(memory_size);
5141 SANITY_CHECK_MEM_REGION_FIELD(userspace_addr);
5142
5143 r = -EFAULT;
5144 if (copy_from_user(&mem, argp, size))
5145 goto out;
5146
5147 r = -EINVAL;
5148 if (ioctl == KVM_SET_USER_MEMORY_REGION &&
5149 (mem.flags & ~KVM_SET_USER_MEMORY_REGION_V1_FLAGS))
5150 goto out;
5151
5152 r = kvm_vm_ioctl_set_memory_region(kvm, &mem);
5153 break;
5154 }
5155 case KVM_GET_DIRTY_LOG: {
5156 struct kvm_dirty_log log;
5157
5158 r = -EFAULT;
5159 if (copy_from_user(&log, argp, sizeof(log)))
5160 goto out;
5161 r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
5162 break;
5163 }
5164#ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
5165 case KVM_CLEAR_DIRTY_LOG: {
5166 struct kvm_clear_dirty_log log;
5167
5168 r = -EFAULT;
5169 if (copy_from_user(&log, argp, sizeof(log)))
5170 goto out;
5171 r = kvm_vm_ioctl_clear_dirty_log(kvm, &log);
5172 break;
5173 }
5174#endif
5175#ifdef CONFIG_KVM_MMIO
5176 case KVM_REGISTER_COALESCED_MMIO: {
5177 struct kvm_coalesced_mmio_zone zone;
5178
5179 r = -EFAULT;
5180 if (copy_from_user(&zone, argp, sizeof(zone)))
5181 goto out;
5182 r = kvm_vm_ioctl_register_coalesced_mmio(kvm, &zone);
5183 break;
5184 }
5185 case KVM_UNREGISTER_COALESCED_MMIO: {
5186 struct kvm_coalesced_mmio_zone zone;
5187
5188 r = -EFAULT;
5189 if (copy_from_user(&zone, argp, sizeof(zone)))
5190 goto out;
5191 r = kvm_vm_ioctl_unregister_coalesced_mmio(kvm, &zone);
5192 break;
5193 }
5194#endif
5195 case KVM_IRQFD: {
5196 struct kvm_irqfd data;
5197
5198 r = -EFAULT;
5199 if (copy_from_user(&data, argp, sizeof(data)))
5200 goto out;
5201 r = kvm_irqfd(kvm, &data);
5202 break;
5203 }
5204 case KVM_IOEVENTFD: {
5205 struct kvm_ioeventfd data;
5206
5207 r = -EFAULT;
5208 if (copy_from_user(&data, argp, sizeof(data)))
5209 goto out;
5210 r = kvm_ioeventfd(kvm, &data);
5211 break;
5212 }
5213#ifdef CONFIG_HAVE_KVM_MSI
5214 case KVM_SIGNAL_MSI: {
5215 struct kvm_msi msi;
5216
5217 r = -EFAULT;
5218 if (copy_from_user(&msi, argp, sizeof(msi)))
5219 goto out;
5220 r = kvm_send_userspace_msi(kvm, &msi);
5221 break;
5222 }
5223#endif
5224#ifdef __KVM_HAVE_IRQ_LINE
5225 case KVM_IRQ_LINE_STATUS:
5226 case KVM_IRQ_LINE: {
5227 struct kvm_irq_level irq_event;
5228
5229 r = -EFAULT;
5230 if (copy_from_user(&irq_event, argp, sizeof(irq_event)))
5231 goto out;
5232
5233 r = kvm_vm_ioctl_irq_line(kvm, &irq_event,
5234 ioctl == KVM_IRQ_LINE_STATUS);
5235 if (r)
5236 goto out;
5237
5238 r = -EFAULT;
5239 if (ioctl == KVM_IRQ_LINE_STATUS) {
5240 if (copy_to_user(argp, &irq_event, sizeof(irq_event)))
5241 goto out;
5242 }
5243
5244 r = 0;
5245 break;
5246 }
5247#endif
5248#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
5249 case KVM_SET_GSI_ROUTING: {
5250 struct kvm_irq_routing routing;
5251 struct kvm_irq_routing __user *urouting;
5252 struct kvm_irq_routing_entry *entries = NULL;
5253
5254 r = -EFAULT;
5255 if (copy_from_user(&routing, argp, sizeof(routing)))
5256 goto out;
5257 r = -EINVAL;
5258 if (!kvm_arch_can_set_irq_routing(kvm))
5259 goto out;
5260 if (routing.nr > KVM_MAX_IRQ_ROUTES)
5261 goto out;
5262 if (routing.flags)
5263 goto out;
5264 if (routing.nr) {
5265 urouting = argp;
5266 entries = vmemdup_array_user(urouting->entries,
5267 routing.nr, sizeof(*entries));
5268 if (IS_ERR(entries)) {
5269 r = PTR_ERR(entries);
5270 goto out;
5271 }
5272 }
5273 r = kvm_set_irq_routing(kvm, entries, routing.nr,
5274 routing.flags);
5275 kvfree(entries);
5276 break;
5277 }
5278#endif /* CONFIG_HAVE_KVM_IRQ_ROUTING */
5279#ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES
5280 case KVM_SET_MEMORY_ATTRIBUTES: {
5281 struct kvm_memory_attributes attrs;
5282
5283 r = -EFAULT;
5284 if (copy_from_user(&attrs, argp, sizeof(attrs)))
5285 goto out;
5286
5287 r = kvm_vm_ioctl_set_mem_attributes(kvm, &attrs);
5288 break;
5289 }
5290#endif /* CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES */
5291 case KVM_CREATE_DEVICE: {
5292 struct kvm_create_device cd;
5293
5294 r = -EFAULT;
5295 if (copy_from_user(&cd, argp, sizeof(cd)))
5296 goto out;
5297
5298 r = kvm_ioctl_create_device(kvm, &cd);
5299 if (r)
5300 goto out;
5301
5302 r = -EFAULT;
5303 if (copy_to_user(argp, &cd, sizeof(cd)))
5304 goto out;
5305
5306 r = 0;
5307 break;
5308 }
5309 case KVM_CHECK_EXTENSION:
5310 r = kvm_vm_ioctl_check_extension_generic(kvm, arg);
5311 break;
5312 case KVM_RESET_DIRTY_RINGS:
5313 r = kvm_vm_ioctl_reset_dirty_pages(kvm);
5314 break;
5315 case KVM_GET_STATS_FD:
5316 r = kvm_vm_ioctl_get_stats_fd(kvm);
5317 break;
5318#ifdef CONFIG_KVM_PRIVATE_MEM
5319 case KVM_CREATE_GUEST_MEMFD: {
5320 struct kvm_create_guest_memfd guest_memfd;
5321
5322 r = -EFAULT;
5323 if (copy_from_user(&guest_memfd, argp, sizeof(guest_memfd)))
5324 goto out;
5325
5326 r = kvm_gmem_create(kvm, &guest_memfd);
5327 break;
5328 }
5329#endif
5330 default:
5331 r = kvm_arch_vm_ioctl(filp, ioctl, arg);
5332 }
5333out:
5334 return r;
5335}
5336
5337#ifdef CONFIG_KVM_COMPAT
5338struct compat_kvm_dirty_log {
5339 __u32 slot;
5340 __u32 padding1;
5341 union {
5342 compat_uptr_t dirty_bitmap; /* one bit per page */
5343 __u64 padding2;
5344 };
5345};
5346
5347struct compat_kvm_clear_dirty_log {
5348 __u32 slot;
5349 __u32 num_pages;
5350 __u64 first_page;
5351 union {
5352 compat_uptr_t dirty_bitmap; /* one bit per page */
5353 __u64 padding2;
5354 };
5355};
5356
5357long __weak kvm_arch_vm_compat_ioctl(struct file *filp, unsigned int ioctl,
5358 unsigned long arg)
5359{
5360 return -ENOTTY;
5361}
5362
5363static long kvm_vm_compat_ioctl(struct file *filp,
5364 unsigned int ioctl, unsigned long arg)
5365{
5366 struct kvm *kvm = filp->private_data;
5367 int r;
5368
5369 if (kvm->mm != current->mm || kvm->vm_dead)
5370 return -EIO;
5371
5372 r = kvm_arch_vm_compat_ioctl(filp, ioctl, arg);
5373 if (r != -ENOTTY)
5374 return r;
5375
5376 switch (ioctl) {
5377#ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
5378 case KVM_CLEAR_DIRTY_LOG: {
5379 struct compat_kvm_clear_dirty_log compat_log;
5380 struct kvm_clear_dirty_log log;
5381
5382 if (copy_from_user(&compat_log, (void __user *)arg,
5383 sizeof(compat_log)))
5384 return -EFAULT;
5385 log.slot = compat_log.slot;
5386 log.num_pages = compat_log.num_pages;
5387 log.first_page = compat_log.first_page;
5388 log.padding2 = compat_log.padding2;
5389 log.dirty_bitmap = compat_ptr(compat_log.dirty_bitmap);
5390
5391 r = kvm_vm_ioctl_clear_dirty_log(kvm, &log);
5392 break;
5393 }
5394#endif
5395 case KVM_GET_DIRTY_LOG: {
5396 struct compat_kvm_dirty_log compat_log;
5397 struct kvm_dirty_log log;
5398
5399 if (copy_from_user(&compat_log, (void __user *)arg,
5400 sizeof(compat_log)))
5401 return -EFAULT;
5402 log.slot = compat_log.slot;
5403 log.padding1 = compat_log.padding1;
5404 log.padding2 = compat_log.padding2;
5405 log.dirty_bitmap = compat_ptr(compat_log.dirty_bitmap);
5406
5407 r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
5408 break;
5409 }
5410 default:
5411 r = kvm_vm_ioctl(filp, ioctl, arg);
5412 }
5413 return r;
5414}
5415#endif
5416
5417static struct file_operations kvm_vm_fops = {
5418 .release = kvm_vm_release,
5419 .unlocked_ioctl = kvm_vm_ioctl,
5420 .llseek = noop_llseek,
5421 KVM_COMPAT(kvm_vm_compat_ioctl),
5422};
5423
5424bool file_is_kvm(struct file *file)
5425{
5426 return file && file->f_op == &kvm_vm_fops;
5427}
5428EXPORT_SYMBOL_GPL(file_is_kvm);
5429
5430static int kvm_dev_ioctl_create_vm(unsigned long type)
5431{
5432 char fdname[ITOA_MAX_LEN + 1];
5433 int r, fd;
5434 struct kvm *kvm;
5435 struct file *file;
5436
5437 fd = get_unused_fd_flags(O_CLOEXEC);
5438 if (fd < 0)
5439 return fd;
5440
5441 snprintf(fdname, sizeof(fdname), "%d", fd);
5442
5443 kvm = kvm_create_vm(type, fdname);
5444 if (IS_ERR(kvm)) {
5445 r = PTR_ERR(kvm);
5446 goto put_fd;
5447 }
5448
5449 file = anon_inode_getfile("kvm-vm", &kvm_vm_fops, kvm, O_RDWR);
5450 if (IS_ERR(file)) {
5451 r = PTR_ERR(file);
5452 goto put_kvm;
5453 }
5454
5455 /*
5456 * Don't call kvm_put_kvm anymore at this point; file->f_op is
5457 * already set, with ->release() being kvm_vm_release(). In error
5458 * cases it will be called by the final fput(file) and will take
5459 * care of doing kvm_put_kvm(kvm).
5460 */
5461 kvm_uevent_notify_change(KVM_EVENT_CREATE_VM, kvm);
5462
5463 fd_install(fd, file);
5464 return fd;
5465
5466put_kvm:
5467 kvm_put_kvm(kvm);
5468put_fd:
5469 put_unused_fd(fd);
5470 return r;
5471}
5472
5473static long kvm_dev_ioctl(struct file *filp,
5474 unsigned int ioctl, unsigned long arg)
5475{
5476 int r = -EINVAL;
5477
5478 switch (ioctl) {
5479 case KVM_GET_API_VERSION:
5480 if (arg)
5481 goto out;
5482 r = KVM_API_VERSION;
5483 break;
5484 case KVM_CREATE_VM:
5485 r = kvm_dev_ioctl_create_vm(arg);
5486 break;
5487 case KVM_CHECK_EXTENSION:
5488 r = kvm_vm_ioctl_check_extension_generic(NULL, arg);
5489 break;
5490 case KVM_GET_VCPU_MMAP_SIZE:
5491 if (arg)
5492 goto out;
5493 r = PAGE_SIZE; /* struct kvm_run */
5494#ifdef CONFIG_X86
5495 r += PAGE_SIZE; /* pio data page */
5496#endif
5497#ifdef CONFIG_KVM_MMIO
5498 r += PAGE_SIZE; /* coalesced mmio ring page */
5499#endif
5500 break;
5501 default:
5502 return kvm_arch_dev_ioctl(filp, ioctl, arg);
5503 }
5504out:
5505 return r;
5506}
5507
5508static struct file_operations kvm_chardev_ops = {
5509 .unlocked_ioctl = kvm_dev_ioctl,
5510 .llseek = noop_llseek,
5511 KVM_COMPAT(kvm_dev_ioctl),
5512};
5513
5514static struct miscdevice kvm_dev = {
5515 KVM_MINOR,
5516 "kvm",
5517 &kvm_chardev_ops,
5518};
5519
5520#ifdef CONFIG_KVM_GENERIC_HARDWARE_ENABLING
5521__visible bool kvm_rebooting;
5522EXPORT_SYMBOL_GPL(kvm_rebooting);
5523
5524static DEFINE_PER_CPU(bool, hardware_enabled);
5525static int kvm_usage_count;
5526
5527static int __hardware_enable_nolock(void)
5528{
5529 if (__this_cpu_read(hardware_enabled))
5530 return 0;
5531
5532 if (kvm_arch_hardware_enable()) {
5533 pr_info("kvm: enabling virtualization on CPU%d failed\n",
5534 raw_smp_processor_id());
5535 return -EIO;
5536 }
5537
5538 __this_cpu_write(hardware_enabled, true);
5539 return 0;
5540}
5541
5542static void hardware_enable_nolock(void *failed)
5543{
5544 if (__hardware_enable_nolock())
5545 atomic_inc(failed);
5546}
5547
5548static int kvm_online_cpu(unsigned int cpu)
5549{
5550 int ret = 0;
5551
5552 /*
5553 * Abort the CPU online process if hardware virtualization cannot
5554 * be enabled. Otherwise running VMs would encounter unrecoverable
5555 * errors when scheduled to this CPU.
5556 */
5557 mutex_lock(&kvm_lock);
5558 if (kvm_usage_count)
5559 ret = __hardware_enable_nolock();
5560 mutex_unlock(&kvm_lock);
5561 return ret;
5562}
5563
5564static void hardware_disable_nolock(void *junk)
5565{
5566 /*
5567 * Note, hardware_disable_all_nolock() tells all online CPUs to disable
5568 * hardware, not just CPUs that successfully enabled hardware!
5569 */
5570 if (!__this_cpu_read(hardware_enabled))
5571 return;
5572
5573 kvm_arch_hardware_disable();
5574
5575 __this_cpu_write(hardware_enabled, false);
5576}
5577
5578static int kvm_offline_cpu(unsigned int cpu)
5579{
5580 mutex_lock(&kvm_lock);
5581 if (kvm_usage_count)
5582 hardware_disable_nolock(NULL);
5583 mutex_unlock(&kvm_lock);
5584 return 0;
5585}
5586
5587static void hardware_disable_all_nolock(void)
5588{
5589 BUG_ON(!kvm_usage_count);
5590
5591 kvm_usage_count--;
5592 if (!kvm_usage_count)
5593 on_each_cpu(hardware_disable_nolock, NULL, 1);
5594}
5595
5596static void hardware_disable_all(void)
5597{
5598 cpus_read_lock();
5599 mutex_lock(&kvm_lock);
5600 hardware_disable_all_nolock();
5601 mutex_unlock(&kvm_lock);
5602 cpus_read_unlock();
5603}
5604
5605static int hardware_enable_all(void)
5606{
5607 atomic_t failed = ATOMIC_INIT(0);
5608 int r;
5609
5610 /*
5611 * Do not enable hardware virtualization if the system is going down.
5612 * If userspace initiated a forced reboot, e.g. reboot -f, then it's
5613 * possible for an in-flight KVM_CREATE_VM to trigger hardware enabling
5614 * after kvm_reboot() is called. Note, this relies on system_state
5615 * being set _before_ kvm_reboot(), which is why KVM uses a syscore ops
5616 * hook instead of registering a dedicated reboot notifier (the latter
5617 * runs before system_state is updated).
5618 */
5619 if (system_state == SYSTEM_HALT || system_state == SYSTEM_POWER_OFF ||
5620 system_state == SYSTEM_RESTART)
5621 return -EBUSY;
5622
5623 /*
5624 * When onlining a CPU, cpu_online_mask is set before kvm_online_cpu()
5625 * is called, and so on_each_cpu() between them includes the CPU that
5626 * is being onlined. As a result, hardware_enable_nolock() may get
5627 * invoked before kvm_online_cpu(), which also enables hardware if the
5628 * usage count is non-zero. Disable CPU hotplug to avoid attempting to
5629 * enable hardware multiple times.
5630 */
5631 cpus_read_lock();
5632 mutex_lock(&kvm_lock);
5633
5634 r = 0;
5635
5636 kvm_usage_count++;
5637 if (kvm_usage_count == 1) {
5638 on_each_cpu(hardware_enable_nolock, &failed, 1);
5639
5640 if (atomic_read(&failed)) {
5641 hardware_disable_all_nolock();
5642 r = -EBUSY;
5643 }
5644 }
5645
5646 mutex_unlock(&kvm_lock);
5647 cpus_read_unlock();
5648
5649 return r;
5650}
5651
5652static void kvm_shutdown(void)
5653{
5654 /*
5655 * Disable hardware virtualization and set kvm_rebooting to indicate
5656 * that KVM has asynchronously disabled hardware virtualization, i.e.
5657 * that relevant errors and exceptions aren't entirely unexpected.
5658 * Some flavors of hardware virtualization need to be disabled before
5659 * transferring control to firmware (to perform shutdown/reboot), e.g.
5660 * on x86, virtualization can block INIT interrupts, which are used by
5661 * firmware to pull APs back under firmware control. Note, this path
5662 * is used for both shutdown and reboot scenarios, i.e. neither name is
5663 * 100% comprehensive.
5664 */
5665 pr_info("kvm: exiting hardware virtualization\n");
5666 kvm_rebooting = true;
5667 on_each_cpu(hardware_disable_nolock, NULL, 1);
5668}
5669
5670static int kvm_suspend(void)
5671{
5672 /*
5673 * Secondary CPUs and CPU hotplug are disabled across the suspend/resume
5674 * callbacks, i.e. no need to acquire kvm_lock to ensure the usage count
5675 * is stable. Assert that kvm_lock is not held to ensure the system
5676 * isn't suspended while KVM is enabling hardware. Hardware enabling
5677 * can be preempted, but the task cannot be frozen until it has dropped
5678 * all locks (userspace tasks are frozen via a fake signal).
5679 */
5680 lockdep_assert_not_held(&kvm_lock);
5681 lockdep_assert_irqs_disabled();
5682
5683 if (kvm_usage_count)
5684 hardware_disable_nolock(NULL);
5685 return 0;
5686}
5687
5688static void kvm_resume(void)
5689{
5690 lockdep_assert_not_held(&kvm_lock);
5691 lockdep_assert_irqs_disabled();
5692
5693 if (kvm_usage_count)
5694 WARN_ON_ONCE(__hardware_enable_nolock());
5695}
5696
5697static struct syscore_ops kvm_syscore_ops = {
5698 .suspend = kvm_suspend,
5699 .resume = kvm_resume,
5700 .shutdown = kvm_shutdown,
5701};
5702#else /* CONFIG_KVM_GENERIC_HARDWARE_ENABLING */
5703static int hardware_enable_all(void)
5704{
5705 return 0;
5706}
5707
5708static void hardware_disable_all(void)
5709{
5710
5711}
5712#endif /* CONFIG_KVM_GENERIC_HARDWARE_ENABLING */
5713
5714static void kvm_iodevice_destructor(struct kvm_io_device *dev)
5715{
5716 if (dev->ops->destructor)
5717 dev->ops->destructor(dev);
5718}
5719
5720static void kvm_io_bus_destroy(struct kvm_io_bus *bus)
5721{
5722 int i;
5723
5724 for (i = 0; i < bus->dev_count; i++) {
5725 struct kvm_io_device *pos = bus->range[i].dev;
5726
5727 kvm_iodevice_destructor(pos);
5728 }
5729 kfree(bus);
5730}
5731
5732static inline int kvm_io_bus_cmp(const struct kvm_io_range *r1,
5733 const struct kvm_io_range *r2)
5734{
5735 gpa_t addr1 = r1->addr;
5736 gpa_t addr2 = r2->addr;
5737
5738 if (addr1 < addr2)
5739 return -1;
5740
5741 /* If r2->len == 0, match the exact address. If r2->len != 0,
5742 * accept any overlapping write. Any order is acceptable for
5743 * overlapping ranges, because kvm_io_bus_get_first_dev ensures
5744 * we process all of them.
5745 */
5746 if (r2->len) {
5747 addr1 += r1->len;
5748 addr2 += r2->len;
5749 }
5750
5751 if (addr1 > addr2)
5752 return 1;
5753
5754 return 0;
5755}
5756
5757static int kvm_io_bus_sort_cmp(const void *p1, const void *p2)
5758{
5759 return kvm_io_bus_cmp(p1, p2);
5760}
5761
5762static int kvm_io_bus_get_first_dev(struct kvm_io_bus *bus,
5763 gpa_t addr, int len)
5764{
5765 struct kvm_io_range *range, key;
5766 int off;
5767
5768 key = (struct kvm_io_range) {
5769 .addr = addr,
5770 .len = len,
5771 };
5772
5773 range = bsearch(&key, bus->range, bus->dev_count,
5774 sizeof(struct kvm_io_range), kvm_io_bus_sort_cmp);
5775 if (range == NULL)
5776 return -ENOENT;
5777
5778 off = range - bus->range;
5779
5780 while (off > 0 && kvm_io_bus_cmp(&key, &bus->range[off-1]) == 0)
5781 off--;
5782
5783 return off;
5784}
5785
5786static int __kvm_io_bus_write(struct kvm_vcpu *vcpu, struct kvm_io_bus *bus,
5787 struct kvm_io_range *range, const void *val)
5788{
5789 int idx;
5790
5791 idx = kvm_io_bus_get_first_dev(bus, range->addr, range->len);
5792 if (idx < 0)
5793 return -EOPNOTSUPP;
5794
5795 while (idx < bus->dev_count &&
5796 kvm_io_bus_cmp(range, &bus->range[idx]) == 0) {
5797 if (!kvm_iodevice_write(vcpu, bus->range[idx].dev, range->addr,
5798 range->len, val))
5799 return idx;
5800 idx++;
5801 }
5802
5803 return -EOPNOTSUPP;
5804}
5805
5806/* kvm_io_bus_write - called under kvm->slots_lock */
5807int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
5808 int len, const void *val)
5809{
5810 struct kvm_io_bus *bus;
5811 struct kvm_io_range range;
5812 int r;
5813
5814 range = (struct kvm_io_range) {
5815 .addr = addr,
5816 .len = len,
5817 };
5818
5819 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
5820 if (!bus)
5821 return -ENOMEM;
5822 r = __kvm_io_bus_write(vcpu, bus, &range, val);
5823 return r < 0 ? r : 0;
5824}
5825EXPORT_SYMBOL_GPL(kvm_io_bus_write);
5826
5827/* kvm_io_bus_write_cookie - called under kvm->slots_lock */
5828int kvm_io_bus_write_cookie(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx,
5829 gpa_t addr, int len, const void *val, long cookie)
5830{
5831 struct kvm_io_bus *bus;
5832 struct kvm_io_range range;
5833
5834 range = (struct kvm_io_range) {
5835 .addr = addr,
5836 .len = len,
5837 };
5838
5839 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
5840 if (!bus)
5841 return -ENOMEM;
5842
5843 /* First try the device referenced by cookie. */
5844 if ((cookie >= 0) && (cookie < bus->dev_count) &&
5845 (kvm_io_bus_cmp(&range, &bus->range[cookie]) == 0))
5846 if (!kvm_iodevice_write(vcpu, bus->range[cookie].dev, addr, len,
5847 val))
5848 return cookie;
5849
5850 /*
5851 * cookie contained garbage; fall back to search and return the
5852 * correct cookie value.
5853 */
5854 return __kvm_io_bus_write(vcpu, bus, &range, val);
5855}
5856
5857static int __kvm_io_bus_read(struct kvm_vcpu *vcpu, struct kvm_io_bus *bus,
5858 struct kvm_io_range *range, void *val)
5859{
5860 int idx;
5861
5862 idx = kvm_io_bus_get_first_dev(bus, range->addr, range->len);
5863 if (idx < 0)
5864 return -EOPNOTSUPP;
5865
5866 while (idx < bus->dev_count &&
5867 kvm_io_bus_cmp(range, &bus->range[idx]) == 0) {
5868 if (!kvm_iodevice_read(vcpu, bus->range[idx].dev, range->addr,
5869 range->len, val))
5870 return idx;
5871 idx++;
5872 }
5873
5874 return -EOPNOTSUPP;
5875}
5876
5877/* kvm_io_bus_read - called under kvm->slots_lock */
5878int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
5879 int len, void *val)
5880{
5881 struct kvm_io_bus *bus;
5882 struct kvm_io_range range;
5883 int r;
5884
5885 range = (struct kvm_io_range) {
5886 .addr = addr,
5887 .len = len,
5888 };
5889
5890 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
5891 if (!bus)
5892 return -ENOMEM;
5893 r = __kvm_io_bus_read(vcpu, bus, &range, val);
5894 return r < 0 ? r : 0;
5895}
5896
5897int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
5898 int len, struct kvm_io_device *dev)
5899{
5900 int i;
5901 struct kvm_io_bus *new_bus, *bus;
5902 struct kvm_io_range range;
5903
5904 lockdep_assert_held(&kvm->slots_lock);
5905
5906 bus = kvm_get_bus(kvm, bus_idx);
5907 if (!bus)
5908 return -ENOMEM;
5909
5910 /* exclude ioeventfd which is limited by maximum fd */
5911 if (bus->dev_count - bus->ioeventfd_count > NR_IOBUS_DEVS - 1)
5912 return -ENOSPC;
5913
5914 new_bus = kmalloc(struct_size(bus, range, bus->dev_count + 1),
5915 GFP_KERNEL_ACCOUNT);
5916 if (!new_bus)
5917 return -ENOMEM;
5918
5919 range = (struct kvm_io_range) {
5920 .addr = addr,
5921 .len = len,
5922 .dev = dev,
5923 };
5924
5925 for (i = 0; i < bus->dev_count; i++)
5926 if (kvm_io_bus_cmp(&bus->range[i], &range) > 0)
5927 break;
5928
5929 memcpy(new_bus, bus, sizeof(*bus) + i * sizeof(struct kvm_io_range));
5930 new_bus->dev_count++;
5931 new_bus->range[i] = range;
5932 memcpy(new_bus->range + i + 1, bus->range + i,
5933 (bus->dev_count - i) * sizeof(struct kvm_io_range));
5934 rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
5935 synchronize_srcu_expedited(&kvm->srcu);
5936 kfree(bus);
5937
5938 return 0;
5939}
5940
5941int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
5942 struct kvm_io_device *dev)
5943{
5944 int i;
5945 struct kvm_io_bus *new_bus, *bus;
5946
5947 lockdep_assert_held(&kvm->slots_lock);
5948
5949 bus = kvm_get_bus(kvm, bus_idx);
5950 if (!bus)
5951 return 0;
5952
5953 for (i = 0; i < bus->dev_count; i++) {
5954 if (bus->range[i].dev == dev) {
5955 break;
5956 }
5957 }
5958
5959 if (i == bus->dev_count)
5960 return 0;
5961
5962 new_bus = kmalloc(struct_size(bus, range, bus->dev_count - 1),
5963 GFP_KERNEL_ACCOUNT);
5964 if (new_bus) {
5965 memcpy(new_bus, bus, struct_size(bus, range, i));
5966 new_bus->dev_count--;
5967 memcpy(new_bus->range + i, bus->range + i + 1,
5968 flex_array_size(new_bus, range, new_bus->dev_count - i));
5969 }
5970
5971 rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
5972 synchronize_srcu_expedited(&kvm->srcu);
5973
5974 /*
5975 * If NULL bus is installed, destroy the old bus, including all the
5976 * attached devices. Otherwise, destroy the caller's device only.
5977 */
5978 if (!new_bus) {
5979 pr_err("kvm: failed to shrink bus, removing it completely\n");
5980 kvm_io_bus_destroy(bus);
5981 return -ENOMEM;
5982 }
5983
5984 kvm_iodevice_destructor(dev);
5985 kfree(bus);
5986 return 0;
5987}
5988
5989struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx,
5990 gpa_t addr)
5991{
5992 struct kvm_io_bus *bus;
5993 int dev_idx, srcu_idx;
5994 struct kvm_io_device *iodev = NULL;
5995
5996 srcu_idx = srcu_read_lock(&kvm->srcu);
5997
5998 bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu);
5999 if (!bus)
6000 goto out_unlock;
6001
6002 dev_idx = kvm_io_bus_get_first_dev(bus, addr, 1);
6003 if (dev_idx < 0)
6004 goto out_unlock;
6005
6006 iodev = bus->range[dev_idx].dev;
6007
6008out_unlock:
6009 srcu_read_unlock(&kvm->srcu, srcu_idx);
6010
6011 return iodev;
6012}
6013EXPORT_SYMBOL_GPL(kvm_io_bus_get_dev);
6014
6015static int kvm_debugfs_open(struct inode *inode, struct file *file,
6016 int (*get)(void *, u64 *), int (*set)(void *, u64),
6017 const char *fmt)
6018{
6019 int ret;
6020 struct kvm_stat_data *stat_data = inode->i_private;
6021
6022 /*
6023 * The debugfs files are a reference to the kvm struct which
6024 * is still valid when kvm_destroy_vm is called. kvm_get_kvm_safe
6025 * avoids the race between open and the removal of the debugfs directory.
6026 */
6027 if (!kvm_get_kvm_safe(stat_data->kvm))
6028 return -ENOENT;
6029
6030 ret = simple_attr_open(inode, file, get,
6031 kvm_stats_debugfs_mode(stat_data->desc) & 0222
6032 ? set : NULL, fmt);
6033 if (ret)
6034 kvm_put_kvm(stat_data->kvm);
6035
6036 return ret;
6037}
6038
6039static int kvm_debugfs_release(struct inode *inode, struct file *file)
6040{
6041 struct kvm_stat_data *stat_data = inode->i_private;
6042
6043 simple_attr_release(inode, file);
6044 kvm_put_kvm(stat_data->kvm);
6045
6046 return 0;
6047}
6048
6049static int kvm_get_stat_per_vm(struct kvm *kvm, size_t offset, u64 *val)
6050{
6051 *val = *(u64 *)((void *)(&kvm->stat) + offset);
6052
6053 return 0;
6054}
6055
6056static int kvm_clear_stat_per_vm(struct kvm *kvm, size_t offset)
6057{
6058 *(u64 *)((void *)(&kvm->stat) + offset) = 0;
6059
6060 return 0;
6061}
6062
6063static int kvm_get_stat_per_vcpu(struct kvm *kvm, size_t offset, u64 *val)
6064{
6065 unsigned long i;
6066 struct kvm_vcpu *vcpu;
6067
6068 *val = 0;
6069
6070 kvm_for_each_vcpu(i, vcpu, kvm)
6071 *val += *(u64 *)((void *)(&vcpu->stat) + offset);
6072
6073 return 0;
6074}
6075
6076static int kvm_clear_stat_per_vcpu(struct kvm *kvm, size_t offset)
6077{
6078 unsigned long i;
6079 struct kvm_vcpu *vcpu;
6080
6081 kvm_for_each_vcpu(i, vcpu, kvm)
6082 *(u64 *)((void *)(&vcpu->stat) + offset) = 0;
6083
6084 return 0;
6085}
6086
6087static int kvm_stat_data_get(void *data, u64 *val)
6088{
6089 int r = -EFAULT;
6090 struct kvm_stat_data *stat_data = data;
6091
6092 switch (stat_data->kind) {
6093 case KVM_STAT_VM:
6094 r = kvm_get_stat_per_vm(stat_data->kvm,
6095 stat_data->desc->desc.offset, val);
6096 break;
6097 case KVM_STAT_VCPU:
6098 r = kvm_get_stat_per_vcpu(stat_data->kvm,
6099 stat_data->desc->desc.offset, val);
6100 break;
6101 }
6102
6103 return r;
6104}
6105
6106static int kvm_stat_data_clear(void *data, u64 val)
6107{
6108 int r = -EFAULT;
6109 struct kvm_stat_data *stat_data = data;
6110
6111 if (val)
6112 return -EINVAL;
6113
6114 switch (stat_data->kind) {
6115 case KVM_STAT_VM:
6116 r = kvm_clear_stat_per_vm(stat_data->kvm,
6117 stat_data->desc->desc.offset);
6118 break;
6119 case KVM_STAT_VCPU:
6120 r = kvm_clear_stat_per_vcpu(stat_data->kvm,
6121 stat_data->desc->desc.offset);
6122 break;
6123 }
6124
6125 return r;
6126}
6127
6128static int kvm_stat_data_open(struct inode *inode, struct file *file)
6129{
6130 __simple_attr_check_format("%llu\n", 0ull);
6131 return kvm_debugfs_open(inode, file, kvm_stat_data_get,
6132 kvm_stat_data_clear, "%llu\n");
6133}
6134
6135static const struct file_operations stat_fops_per_vm = {
6136 .owner = THIS_MODULE,
6137 .open = kvm_stat_data_open,
6138 .release = kvm_debugfs_release,
6139 .read = simple_attr_read,
6140 .write = simple_attr_write,
6141 .llseek = no_llseek,
6142};
6143
6144static int vm_stat_get(void *_offset, u64 *val)
6145{
6146 unsigned offset = (long)_offset;
6147 struct kvm *kvm;
6148 u64 tmp_val;
6149
6150 *val = 0;
6151 mutex_lock(&kvm_lock);
6152 list_for_each_entry(kvm, &vm_list, vm_list) {
6153 kvm_get_stat_per_vm(kvm, offset, &tmp_val);
6154 *val += tmp_val;
6155 }
6156 mutex_unlock(&kvm_lock);
6157 return 0;
6158}
6159
6160static int vm_stat_clear(void *_offset, u64 val)
6161{
6162 unsigned offset = (long)_offset;
6163 struct kvm *kvm;
6164
6165 if (val)
6166 return -EINVAL;
6167
6168 mutex_lock(&kvm_lock);
6169 list_for_each_entry(kvm, &vm_list, vm_list) {
6170 kvm_clear_stat_per_vm(kvm, offset);
6171 }
6172 mutex_unlock(&kvm_lock);
6173
6174 return 0;
6175}
6176
6177DEFINE_SIMPLE_ATTRIBUTE(vm_stat_fops, vm_stat_get, vm_stat_clear, "%llu\n");
6178DEFINE_SIMPLE_ATTRIBUTE(vm_stat_readonly_fops, vm_stat_get, NULL, "%llu\n");
6179
6180static int vcpu_stat_get(void *_offset, u64 *val)
6181{
6182 unsigned offset = (long)_offset;
6183 struct kvm *kvm;
6184 u64 tmp_val;
6185
6186 *val = 0;
6187 mutex_lock(&kvm_lock);
6188 list_for_each_entry(kvm, &vm_list, vm_list) {
6189 kvm_get_stat_per_vcpu(kvm, offset, &tmp_val);
6190 *val += tmp_val;
6191 }
6192 mutex_unlock(&kvm_lock);
6193 return 0;
6194}
6195
6196static int vcpu_stat_clear(void *_offset, u64 val)
6197{
6198 unsigned offset = (long)_offset;
6199 struct kvm *kvm;
6200
6201 if (val)
6202 return -EINVAL;
6203
6204 mutex_lock(&kvm_lock);
6205 list_for_each_entry(kvm, &vm_list, vm_list) {
6206 kvm_clear_stat_per_vcpu(kvm, offset);
6207 }
6208 mutex_unlock(&kvm_lock);
6209
6210 return 0;
6211}
6212
6213DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops, vcpu_stat_get, vcpu_stat_clear,
6214 "%llu\n");
6215DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_readonly_fops, vcpu_stat_get, NULL, "%llu\n");
6216
6217static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm)
6218{
6219 struct kobj_uevent_env *env;
6220 unsigned long long created, active;
6221
6222 if (!kvm_dev.this_device || !kvm)
6223 return;
6224
6225 mutex_lock(&kvm_lock);
6226 if (type == KVM_EVENT_CREATE_VM) {
6227 kvm_createvm_count++;
6228 kvm_active_vms++;
6229 } else if (type == KVM_EVENT_DESTROY_VM) {
6230 kvm_active_vms--;
6231 }
6232 created = kvm_createvm_count;
6233 active = kvm_active_vms;
6234 mutex_unlock(&kvm_lock);
6235
6236 env = kzalloc(sizeof(*env), GFP_KERNEL_ACCOUNT);
6237 if (!env)
6238 return;
6239
6240 add_uevent_var(env, "CREATED=%llu", created);
6241 add_uevent_var(env, "COUNT=%llu", active);
6242
6243 if (type == KVM_EVENT_CREATE_VM) {
6244 add_uevent_var(env, "EVENT=create");
6245 kvm->userspace_pid = task_pid_nr(current);
6246 } else if (type == KVM_EVENT_DESTROY_VM) {
6247 add_uevent_var(env, "EVENT=destroy");
6248 }
6249 add_uevent_var(env, "PID=%d", kvm->userspace_pid);
6250
6251 if (!IS_ERR(kvm->debugfs_dentry)) {
6252 char *tmp, *p = kmalloc(PATH_MAX, GFP_KERNEL_ACCOUNT);
6253
6254 if (p) {
6255 tmp = dentry_path_raw(kvm->debugfs_dentry, p, PATH_MAX);
6256 if (!IS_ERR(tmp))
6257 add_uevent_var(env, "STATS_PATH=%s", tmp);
6258 kfree(p);
6259 }
6260 }
6261 /* no need for checks, since we are adding at most only 5 keys */
6262 env->envp[env->envp_idx++] = NULL;
6263 kobject_uevent_env(&kvm_dev.this_device->kobj, KOBJ_CHANGE, env->envp);
6264 kfree(env);
6265}
6266
6267static void kvm_init_debug(void)
6268{
6269 const struct file_operations *fops;
6270 const struct _kvm_stats_desc *pdesc;
6271 int i;
6272
6273 kvm_debugfs_dir = debugfs_create_dir("kvm", NULL);
6274
6275 for (i = 0; i < kvm_vm_stats_header.num_desc; ++i) {
6276 pdesc = &kvm_vm_stats_desc[i];
6277 if (kvm_stats_debugfs_mode(pdesc) & 0222)
6278 fops = &vm_stat_fops;
6279 else
6280 fops = &vm_stat_readonly_fops;
6281 debugfs_create_file(pdesc->name, kvm_stats_debugfs_mode(pdesc),
6282 kvm_debugfs_dir,
6283 (void *)(long)pdesc->desc.offset, fops);
6284 }
6285
6286 for (i = 0; i < kvm_vcpu_stats_header.num_desc; ++i) {
6287 pdesc = &kvm_vcpu_stats_desc[i];
6288 if (kvm_stats_debugfs_mode(pdesc) & 0222)
6289 fops = &vcpu_stat_fops;
6290 else
6291 fops = &vcpu_stat_readonly_fops;
6292 debugfs_create_file(pdesc->name, kvm_stats_debugfs_mode(pdesc),
6293 kvm_debugfs_dir,
6294 (void *)(long)pdesc->desc.offset, fops);
6295 }
6296}
6297
6298static inline
6299struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn)
6300{
6301 return container_of(pn, struct kvm_vcpu, preempt_notifier);
6302}
6303
6304static void kvm_sched_in(struct preempt_notifier *pn, int cpu)
6305{
6306 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
6307
6308 WRITE_ONCE(vcpu->preempted, false);
6309 WRITE_ONCE(vcpu->ready, false);
6310
6311 __this_cpu_write(kvm_running_vcpu, vcpu);
6312 kvm_arch_sched_in(vcpu, cpu);
6313 kvm_arch_vcpu_load(vcpu, cpu);
6314}
6315
6316static void kvm_sched_out(struct preempt_notifier *pn,
6317 struct task_struct *next)
6318{
6319 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
6320
6321 if (current->on_rq) {
6322 WRITE_ONCE(vcpu->preempted, true);
6323 WRITE_ONCE(vcpu->ready, true);
6324 }
6325 kvm_arch_vcpu_put(vcpu);
6326 __this_cpu_write(kvm_running_vcpu, NULL);
6327}
6328
6329/**
6330 * kvm_get_running_vcpu - get the vcpu running on the current CPU.
6331 *
6332 * We can disable preemption locally around accessing the per-CPU variable,
6333 * and use the resolved vcpu pointer after enabling preemption again,
6334 * because even if the current thread is migrated to another CPU, reading
6335 * the per-CPU value later will give us the same value as we update the
6336 * per-CPU variable in the preempt notifier handlers.
6337 */
6338struct kvm_vcpu *kvm_get_running_vcpu(void)
6339{
6340 struct kvm_vcpu *vcpu;
6341
6342 preempt_disable();
6343 vcpu = __this_cpu_read(kvm_running_vcpu);
6344 preempt_enable();
6345
6346 return vcpu;
6347}
6348EXPORT_SYMBOL_GPL(kvm_get_running_vcpu);
6349
6350/**
6351 * kvm_get_running_vcpus - get the per-CPU array of currently running vcpus.
6352 */
6353struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void)
6354{
6355 return &kvm_running_vcpu;
6356}
6357
6358#ifdef CONFIG_GUEST_PERF_EVENTS
6359static unsigned int kvm_guest_state(void)
6360{
6361 struct kvm_vcpu *vcpu = kvm_get_running_vcpu();
6362 unsigned int state;
6363
6364 if (!kvm_arch_pmi_in_guest(vcpu))
6365 return 0;
6366
6367 state = PERF_GUEST_ACTIVE;
6368 if (!kvm_arch_vcpu_in_kernel(vcpu))
6369 state |= PERF_GUEST_USER;
6370
6371 return state;
6372}
6373
6374static unsigned long kvm_guest_get_ip(void)
6375{
6376 struct kvm_vcpu *vcpu = kvm_get_running_vcpu();
6377
6378 /* Retrieving the IP must be guarded by a call to kvm_guest_state(). */
6379 if (WARN_ON_ONCE(!kvm_arch_pmi_in_guest(vcpu)))
6380 return 0;
6381
6382 return kvm_arch_vcpu_get_ip(vcpu);
6383}
6384
6385static struct perf_guest_info_callbacks kvm_guest_cbs = {
6386 .state = kvm_guest_state,
6387 .get_ip = kvm_guest_get_ip,
6388 .handle_intel_pt_intr = NULL,
6389};
6390
6391void kvm_register_perf_callbacks(unsigned int (*pt_intr_handler)(void))
6392{
6393 kvm_guest_cbs.handle_intel_pt_intr = pt_intr_handler;
6394 perf_register_guest_info_callbacks(&kvm_guest_cbs);
6395}
6396void kvm_unregister_perf_callbacks(void)
6397{
6398 perf_unregister_guest_info_callbacks(&kvm_guest_cbs);
6399}
6400#endif
6401
6402int kvm_init(unsigned vcpu_size, unsigned vcpu_align, struct module *module)
6403{
6404 int r;
6405 int cpu;
6406
6407#ifdef CONFIG_KVM_GENERIC_HARDWARE_ENABLING
6408 r = cpuhp_setup_state_nocalls(CPUHP_AP_KVM_ONLINE, "kvm/cpu:online",
6409 kvm_online_cpu, kvm_offline_cpu);
6410 if (r)
6411 return r;
6412
6413 register_syscore_ops(&kvm_syscore_ops);
6414#endif
6415
6416 /* A kmem cache lets us meet the alignment requirements of fx_save. */
6417 if (!vcpu_align)
6418 vcpu_align = __alignof__(struct kvm_vcpu);
6419 kvm_vcpu_cache =
6420 kmem_cache_create_usercopy("kvm_vcpu", vcpu_size, vcpu_align,
6421 SLAB_ACCOUNT,
6422 offsetof(struct kvm_vcpu, arch),
6423 offsetofend(struct kvm_vcpu, stats_id)
6424 - offsetof(struct kvm_vcpu, arch),
6425 NULL);
6426 if (!kvm_vcpu_cache) {
6427 r = -ENOMEM;
6428 goto err_vcpu_cache;
6429 }
6430
6431 for_each_possible_cpu(cpu) {
6432 if (!alloc_cpumask_var_node(&per_cpu(cpu_kick_mask, cpu),
6433 GFP_KERNEL, cpu_to_node(cpu))) {
6434 r = -ENOMEM;
6435 goto err_cpu_kick_mask;
6436 }
6437 }
6438
6439 r = kvm_irqfd_init();
6440 if (r)
6441 goto err_irqfd;
6442
6443 r = kvm_async_pf_init();
6444 if (r)
6445 goto err_async_pf;
6446
6447 kvm_chardev_ops.owner = module;
6448 kvm_vm_fops.owner = module;
6449 kvm_vcpu_fops.owner = module;
6450 kvm_device_fops.owner = module;
6451
6452 kvm_preempt_ops.sched_in = kvm_sched_in;
6453 kvm_preempt_ops.sched_out = kvm_sched_out;
6454
6455 kvm_init_debug();
6456
6457 r = kvm_vfio_ops_init();
6458 if (WARN_ON_ONCE(r))
6459 goto err_vfio;
6460
6461 kvm_gmem_init(module);
6462
6463 /*
6464 * Registration _must_ be the very last thing done, as this exposes
6465 * /dev/kvm to userspace, i.e. all infrastructure must be setup!
6466 */
6467 r = misc_register(&kvm_dev);
6468 if (r) {
6469 pr_err("kvm: misc device register failed\n");
6470 goto err_register;
6471 }
6472
6473 return 0;
6474
6475err_register:
6476 kvm_vfio_ops_exit();
6477err_vfio:
6478 kvm_async_pf_deinit();
6479err_async_pf:
6480 kvm_irqfd_exit();
6481err_irqfd:
6482err_cpu_kick_mask:
6483 for_each_possible_cpu(cpu)
6484 free_cpumask_var(per_cpu(cpu_kick_mask, cpu));
6485 kmem_cache_destroy(kvm_vcpu_cache);
6486err_vcpu_cache:
6487#ifdef CONFIG_KVM_GENERIC_HARDWARE_ENABLING
6488 unregister_syscore_ops(&kvm_syscore_ops);
6489 cpuhp_remove_state_nocalls(CPUHP_AP_KVM_ONLINE);
6490#endif
6491 return r;
6492}
6493EXPORT_SYMBOL_GPL(kvm_init);
6494
6495void kvm_exit(void)
6496{
6497 int cpu;
6498
6499 /*
6500 * Note, unregistering /dev/kvm doesn't strictly need to come first,
6501 * fops_get(), a.k.a. try_module_get(), prevents acquiring references
6502 * to KVM while the module is being stopped.
6503 */
6504 misc_deregister(&kvm_dev);
6505
6506 debugfs_remove_recursive(kvm_debugfs_dir);
6507 for_each_possible_cpu(cpu)
6508 free_cpumask_var(per_cpu(cpu_kick_mask, cpu));
6509 kmem_cache_destroy(kvm_vcpu_cache);
6510 kvm_vfio_ops_exit();
6511 kvm_async_pf_deinit();
6512#ifdef CONFIG_KVM_GENERIC_HARDWARE_ENABLING
6513 unregister_syscore_ops(&kvm_syscore_ops);
6514 cpuhp_remove_state_nocalls(CPUHP_AP_KVM_ONLINE);
6515#endif
6516 kvm_irqfd_exit();
6517}
6518EXPORT_SYMBOL_GPL(kvm_exit);
6519
6520struct kvm_vm_worker_thread_context {
6521 struct kvm *kvm;
6522 struct task_struct *parent;
6523 struct completion init_done;
6524 kvm_vm_thread_fn_t thread_fn;
6525 uintptr_t data;
6526 int err;
6527};
6528
6529static int kvm_vm_worker_thread(void *context)
6530{
6531 /*
6532 * The init_context is allocated on the stack of the parent thread, so
6533 * we have to locally copy anything that is needed beyond initialization
6534 */
6535 struct kvm_vm_worker_thread_context *init_context = context;
6536 struct task_struct *parent;
6537 struct kvm *kvm = init_context->kvm;
6538 kvm_vm_thread_fn_t thread_fn = init_context->thread_fn;
6539 uintptr_t data = init_context->data;
6540 int err;
6541
6542 err = kthread_park(current);
6543 /* kthread_park(current) is never supposed to return an error */
6544 WARN_ON(err != 0);
6545 if (err)
6546 goto init_complete;
6547
6548 err = cgroup_attach_task_all(init_context->parent, current);
6549 if (err) {
6550 kvm_err("%s: cgroup_attach_task_all failed with err %d\n",
6551 __func__, err);
6552 goto init_complete;
6553 }
6554
6555 set_user_nice(current, task_nice(init_context->parent));
6556
6557init_complete:
6558 init_context->err = err;
6559 complete(&init_context->init_done);
6560 init_context = NULL;
6561
6562 if (err)
6563 goto out;
6564
6565 /* Wait to be woken up by the spawner before proceeding. */
6566 kthread_parkme();
6567
6568 if (!kthread_should_stop())
6569 err = thread_fn(kvm, data);
6570
6571out:
6572 /*
6573 * Move kthread back to its original cgroup to prevent it lingering in
6574 * the cgroup of the VM process, after the latter finishes its
6575 * execution.
6576 *
6577 * kthread_stop() waits on the 'exited' completion condition which is
6578 * set in exit_mm(), via mm_release(), in do_exit(). However, the
6579 * kthread is removed from the cgroup in the cgroup_exit() which is
6580 * called after the exit_mm(). This causes the kthread_stop() to return
6581 * before the kthread actually quits the cgroup.
6582 */
6583 rcu_read_lock();
6584 parent = rcu_dereference(current->real_parent);
6585 get_task_struct(parent);
6586 rcu_read_unlock();
6587 cgroup_attach_task_all(parent, current);
6588 put_task_struct(parent);
6589
6590 return err;
6591}
6592
6593int kvm_vm_create_worker_thread(struct kvm *kvm, kvm_vm_thread_fn_t thread_fn,
6594 uintptr_t data, const char *name,
6595 struct task_struct **thread_ptr)
6596{
6597 struct kvm_vm_worker_thread_context init_context = {};
6598 struct task_struct *thread;
6599
6600 *thread_ptr = NULL;
6601 init_context.kvm = kvm;
6602 init_context.parent = current;
6603 init_context.thread_fn = thread_fn;
6604 init_context.data = data;
6605 init_completion(&init_context.init_done);
6606
6607 thread = kthread_run(kvm_vm_worker_thread, &init_context,
6608 "%s-%d", name, task_pid_nr(current));
6609 if (IS_ERR(thread))
6610 return PTR_ERR(thread);
6611
6612 /* kthread_run is never supposed to return NULL */
6613 WARN_ON(thread == NULL);
6614
6615 wait_for_completion(&init_context.init_done);
6616
6617 if (!init_context.err)
6618 *thread_ptr = thread;
6619
6620 return init_context.err;
6621}