Linux Audio

Check our new training course

Loading...
v3.5.6
 
   1/*
   2 * Kernel-based Virtual Machine driver for Linux
   3 *
   4 * This module enables machines with Intel VT-x extensions to run virtual
   5 * machines without emulation or binary translation.
   6 *
   7 * Copyright (C) 2006 Qumranet, Inc.
   8 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
   9 *
  10 * Authors:
  11 *   Avi Kivity   <avi@qumranet.com>
  12 *   Yaniv Kamay  <yaniv@qumranet.com>
  13 *
  14 * This work is licensed under the terms of the GNU GPL, version 2.  See
  15 * the COPYING file in the top-level directory.
  16 *
  17 */
  18
  19#include "iodev.h"
  20
  21#include <linux/kvm_host.h>
  22#include <linux/kvm.h>
  23#include <linux/module.h>
  24#include <linux/errno.h>
  25#include <linux/percpu.h>
  26#include <linux/mm.h>
  27#include <linux/miscdevice.h>
  28#include <linux/vmalloc.h>
  29#include <linux/reboot.h>
  30#include <linux/debugfs.h>
  31#include <linux/highmem.h>
  32#include <linux/file.h>
  33#include <linux/syscore_ops.h>
  34#include <linux/cpu.h>
  35#include <linux/sched.h>
 
 
  36#include <linux/cpumask.h>
  37#include <linux/smp.h>
  38#include <linux/anon_inodes.h>
  39#include <linux/profile.h>
  40#include <linux/kvm_para.h>
  41#include <linux/pagemap.h>
  42#include <linux/mman.h>
  43#include <linux/swap.h>
  44#include <linux/bitops.h>
  45#include <linux/spinlock.h>
  46#include <linux/compat.h>
  47#include <linux/srcu.h>
  48#include <linux/hugetlb.h>
  49#include <linux/slab.h>
  50#include <linux/sort.h>
  51#include <linux/bsearch.h>
 
 
 
 
  52
  53#include <asm/processor.h>
  54#include <asm/io.h>
  55#include <asm/uaccess.h>
  56#include <asm/pgtable.h>
  57
  58#include "coalesced_mmio.h"
  59#include "async_pf.h"
 
 
 
 
  60
  61#define CREATE_TRACE_POINTS
  62#include <trace/events/kvm.h>
  63
 
 
 
 
 
 
  64MODULE_AUTHOR("Qumranet");
  65MODULE_LICENSE("GPL");
  66
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  67/*
  68 * Ordering of locks:
  69 *
  70 * 		kvm->lock --> kvm->slots_lock --> kvm->irq_lock
  71 */
  72
  73DEFINE_RAW_SPINLOCK(kvm_lock);
  74LIST_HEAD(vm_list);
  75
  76static cpumask_var_t cpus_hardware_enabled;
  77static int kvm_usage_count = 0;
  78static atomic_t hardware_enable_failed;
  79
  80struct kmem_cache *kvm_vcpu_cache;
  81EXPORT_SYMBOL_GPL(kvm_vcpu_cache);
  82
  83static __read_mostly struct preempt_ops kvm_preempt_ops;
 
  84
  85struct dentry *kvm_debugfs_dir;
 
 
 
  86
  87static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
  88			   unsigned long arg);
  89#ifdef CONFIG_COMPAT
  90static long kvm_vcpu_compat_ioctl(struct file *file, unsigned int ioctl,
  91				  unsigned long arg);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  92#endif
  93static int hardware_enable_all(void);
  94static void hardware_disable_all(void);
  95
  96static void kvm_io_bus_destroy(struct kvm_io_bus *bus);
  97
  98bool kvm_rebooting;
  99EXPORT_SYMBOL_GPL(kvm_rebooting);
 
 
 
 100
 101static bool largepages_enabled = true;
 
 
 
 
 102
 103static struct page *hwpoison_page;
 104static pfn_t hwpoison_pfn;
 
 
 
 
 
 
 
 
 105
 106struct page *fault_page;
 107pfn_t fault_pfn;
 108
 109inline int kvm_is_mmio_pfn(pfn_t pfn)
 
 
 
 
 
 
 110{
 111	if (pfn_valid(pfn)) {
 112		int reserved;
 113		struct page *tail = pfn_to_page(pfn);
 114		struct page *head = compound_trans_head(tail);
 115		reserved = PageReserved(head);
 116		if (head != tail) {
 117			/*
 118			 * "head" is not a dangling pointer
 119			 * (compound_trans_head takes care of that)
 120			 * but the hugepage may have been splitted
 121			 * from under us (and we may not hold a
 122			 * reference count on the head page so it can
 123			 * be reused before we run PageReferenced), so
 124			 * we've to check PageTail before returning
 125			 * what we just read.
 126			 */
 127			smp_rmb();
 128			if (PageTail(tail))
 129				return reserved;
 130		}
 131		return PageReserved(tail);
 132	}
 133
 134	return true;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 135}
 136
 137/*
 138 * Switches to specified vcpu, until a matching vcpu_put()
 139 */
 140void vcpu_load(struct kvm_vcpu *vcpu)
 141{
 142	int cpu;
 143
 144	mutex_lock(&vcpu->mutex);
 145	if (unlikely(vcpu->pid != current->pids[PIDTYPE_PID].pid)) {
 146		/* The thread running this VCPU changed. */
 147		struct pid *oldpid = vcpu->pid;
 148		struct pid *newpid = get_task_pid(current, PIDTYPE_PID);
 149		rcu_assign_pointer(vcpu->pid, newpid);
 150		synchronize_rcu();
 151		put_pid(oldpid);
 152	}
 153	cpu = get_cpu();
 154	preempt_notifier_register(&vcpu->preempt_notifier);
 155	kvm_arch_vcpu_load(vcpu, cpu);
 156	put_cpu();
 157}
 
 158
 159void vcpu_put(struct kvm_vcpu *vcpu)
 160{
 161	preempt_disable();
 162	kvm_arch_vcpu_put(vcpu);
 163	preempt_notifier_unregister(&vcpu->preempt_notifier);
 
 164	preempt_enable();
 165	mutex_unlock(&vcpu->mutex);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 166}
 167
 168static void ack_flush(void *_completed)
 169{
 170}
 171
 172static bool make_all_cpus_request(struct kvm *kvm, unsigned int req)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 173{
 174	int i, cpu, me;
 175	cpumask_var_t cpus;
 176	bool called = true;
 177	struct kvm_vcpu *vcpu;
 
 
 
 
 
 178
 179	zalloc_cpumask_var(&cpus, GFP_ATOMIC);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 180
 181	me = get_cpu();
 182	kvm_for_each_vcpu(i, vcpu, kvm) {
 183		kvm_make_request(req, vcpu);
 184		cpu = vcpu->cpu;
 185
 186		/* Set ->requests bit before we read ->mode */
 187		smp_mb();
 188
 189		if (cpus != NULL && cpu != -1 && cpu != me &&
 190		      kvm_vcpu_exiting_guest_mode(vcpu) != OUTSIDE_GUEST_MODE)
 191			cpumask_set_cpu(cpu, cpus);
 192	}
 193	if (unlikely(cpus == NULL))
 194		smp_call_function_many(cpu_online_mask, ack_flush, NULL, 1);
 195	else if (!cpumask_empty(cpus))
 196		smp_call_function_many(cpus, ack_flush, NULL, 1);
 197	else
 198		called = false;
 199	put_cpu();
 200	free_cpumask_var(cpus);
 201	return called;
 202}
 203
 
 
 
 
 
 
 204void kvm_flush_remote_tlbs(struct kvm *kvm)
 205{
 206	long dirty_count = kvm->tlbs_dirty;
 207
 208	smp_mb();
 209	if (make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH))
 210		++kvm->stat.remote_tlb_flush;
 211	cmpxchg(&kvm->tlbs_dirty, dirty_count, 0);
 
 
 
 
 
 
 
 
 
 
 212}
 
 213
 214void kvm_reload_remote_mmus(struct kvm *kvm)
 215{
 216	make_all_cpus_request(kvm, KVM_REQ_MMU_RELOAD);
 
 
 
 
 
 
 
 
 217}
 218
 219int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
 
 220{
 221	struct page *page;
 222	int r;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 223
 
 
 224	mutex_init(&vcpu->mutex);
 225	vcpu->cpu = -1;
 226	vcpu->kvm = kvm;
 227	vcpu->vcpu_id = id;
 228	vcpu->pid = NULL;
 229	init_waitqueue_head(&vcpu->wq);
 
 
 230	kvm_async_pf_vcpu_init(vcpu);
 231
 232	page = alloc_page(GFP_KERNEL | __GFP_ZERO);
 233	if (!page) {
 234		r = -ENOMEM;
 235		goto fail;
 236	}
 237	vcpu->run = page_address(page);
 238
 239	r = kvm_arch_vcpu_init(vcpu);
 240	if (r < 0)
 241		goto fail_free_run;
 242	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 243
 244fail_free_run:
 245	free_page((unsigned long)vcpu->run);
 246fail:
 247	return r;
 248}
 249EXPORT_SYMBOL_GPL(kvm_vcpu_init);
 250
 251void kvm_vcpu_uninit(struct kvm_vcpu *vcpu)
 252{
 253	put_pid(vcpu->pid);
 254	kvm_arch_vcpu_uninit(vcpu);
 255	free_page((unsigned long)vcpu->run);
 
 
 
 
 
 
 256}
 257EXPORT_SYMBOL_GPL(kvm_vcpu_uninit);
 258
 259#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
 260static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn)
 261{
 262	return container_of(mn, struct kvm, mmu_notifier);
 263}
 264
 265static void kvm_mmu_notifier_invalidate_page(struct mmu_notifier *mn,
 266					     struct mm_struct *mm,
 267					     unsigned long address)
 268{
 269	struct kvm *kvm = mmu_notifier_to_kvm(mn);
 270	int need_tlb_flush, idx;
 271
 
 272	/*
 273	 * When ->invalidate_page runs, the linux pte has been zapped
 274	 * already but the page is still allocated until
 275	 * ->invalidate_page returns. So if we increase the sequence
 276	 * here the kvm page fault will notice if the spte can't be
 277	 * established because the page is going to be freed. If
 278	 * instead the kvm page fault establishes the spte before
 279	 * ->invalidate_page runs, kvm_unmap_hva will release it
 280	 * before returning.
 281	 *
 282	 * The sequence increase only need to be seen at spin_unlock
 283	 * time, and not at spin_lock time.
 284	 *
 285	 * Increasing the sequence after the spin_unlock would be
 286	 * unsafe because the kvm page fault could then establish the
 287	 * pte after kvm_unmap_hva returned, without noticing the page
 288	 * is going to be freed.
 289	 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 290	idx = srcu_read_lock(&kvm->srcu);
 291	spin_lock(&kvm->mmu_lock);
 292
 293	kvm->mmu_notifier_seq++;
 294	need_tlb_flush = kvm_unmap_hva(kvm, address) | kvm->tlbs_dirty;
 295	/* we've to flush the tlb before the pages can be freed */
 296	if (need_tlb_flush)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 297		kvm_flush_remote_tlbs(kvm);
 298
 299	spin_unlock(&kvm->mmu_lock);
 
 
 300	srcu_read_unlock(&kvm->srcu, idx);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 301}
 302
 303static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn,
 304					struct mm_struct *mm,
 305					unsigned long address,
 306					pte_t pte)
 307{
 308	struct kvm *kvm = mmu_notifier_to_kvm(mn);
 309	int idx;
 310
 311	idx = srcu_read_lock(&kvm->srcu);
 312	spin_lock(&kvm->mmu_lock);
 313	kvm->mmu_notifier_seq++;
 314	kvm_set_spte_hva(kvm, address, pte);
 315	spin_unlock(&kvm->mmu_lock);
 316	srcu_read_unlock(&kvm->srcu, idx);
 
 
 
 
 
 
 
 
 
 317}
 318
 319static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
 320						    struct mm_struct *mm,
 321						    unsigned long start,
 322						    unsigned long end)
 323{
 324	struct kvm *kvm = mmu_notifier_to_kvm(mn);
 325	int need_tlb_flush = 0, idx;
 326
 327	idx = srcu_read_lock(&kvm->srcu);
 328	spin_lock(&kvm->mmu_lock);
 329	/*
 330	 * The count increase must become visible at unlock time as no
 331	 * spte can be established without taking the mmu_lock and
 332	 * count is also read inside the mmu_lock critical section.
 333	 */
 334	kvm->mmu_notifier_count++;
 335	for (; start < end; start += PAGE_SIZE)
 336		need_tlb_flush |= kvm_unmap_hva(kvm, start);
 337	need_tlb_flush |= kvm->tlbs_dirty;
 338	/* we've to flush the tlb before the pages can be freed */
 339	if (need_tlb_flush)
 340		kvm_flush_remote_tlbs(kvm);
 341
 342	spin_unlock(&kvm->mmu_lock);
 343	srcu_read_unlock(&kvm->srcu, idx);
 
 
 344}
 345
 346static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn,
 347						  struct mm_struct *mm,
 348						  unsigned long start,
 349						  unsigned long end)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 350{
 351	struct kvm *kvm = mmu_notifier_to_kvm(mn);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 352
 353	spin_lock(&kvm->mmu_lock);
 354	/*
 355	 * This sequence increase will notify the kvm page fault that
 356	 * the page that is going to be mapped in the spte could have
 357	 * been freed.
 358	 */
 359	kvm->mmu_notifier_seq++;
 360	smp_wmb();
 361	/*
 362	 * The above sequence increase must be visible before the
 363	 * below count decrease, which is ensured by the smp_wmb above
 364	 * in conjunction with the smp_rmb in mmu_notifier_retry().
 365	 */
 366	kvm->mmu_notifier_count--;
 367	spin_unlock(&kvm->mmu_lock);
 368
 369	BUG_ON(kvm->mmu_notifier_count < 0);
 
 
 
 
 370}
 371
 372static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn,
 373					      struct mm_struct *mm,
 374					      unsigned long address)
 375{
 376	struct kvm *kvm = mmu_notifier_to_kvm(mn);
 377	int young, idx;
 
 
 
 
 
 
 
 
 378
 379	idx = srcu_read_lock(&kvm->srcu);
 380	spin_lock(&kvm->mmu_lock);
 381
 382	young = kvm_age_hva(kvm, address);
 383	if (young)
 384		kvm_flush_remote_tlbs(kvm);
 
 385
 386	spin_unlock(&kvm->mmu_lock);
 387	srcu_read_unlock(&kvm->srcu, idx);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 388
 389	return young;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 390}
 391
 392static int kvm_mmu_notifier_test_young(struct mmu_notifier *mn,
 393				       struct mm_struct *mm,
 394				       unsigned long address)
 395{
 396	struct kvm *kvm = mmu_notifier_to_kvm(mn);
 397	int young, idx;
 398
 399	idx = srcu_read_lock(&kvm->srcu);
 400	spin_lock(&kvm->mmu_lock);
 401	young = kvm_test_age_hva(kvm, address);
 402	spin_unlock(&kvm->mmu_lock);
 403	srcu_read_unlock(&kvm->srcu, idx);
 404
 405	return young;
 
 406}
 407
 408static void kvm_mmu_notifier_release(struct mmu_notifier *mn,
 409				     struct mm_struct *mm)
 410{
 411	struct kvm *kvm = mmu_notifier_to_kvm(mn);
 412	int idx;
 413
 414	idx = srcu_read_lock(&kvm->srcu);
 415	kvm_arch_flush_shadow(kvm);
 416	srcu_read_unlock(&kvm->srcu, idx);
 417}
 418
 419static const struct mmu_notifier_ops kvm_mmu_notifier_ops = {
 420	.invalidate_page	= kvm_mmu_notifier_invalidate_page,
 421	.invalidate_range_start	= kvm_mmu_notifier_invalidate_range_start,
 422	.invalidate_range_end	= kvm_mmu_notifier_invalidate_range_end,
 423	.clear_flush_young	= kvm_mmu_notifier_clear_flush_young,
 
 424	.test_young		= kvm_mmu_notifier_test_young,
 425	.change_pte		= kvm_mmu_notifier_change_pte,
 426	.release		= kvm_mmu_notifier_release,
 427};
 428
 429static int kvm_init_mmu_notifier(struct kvm *kvm)
 430{
 431	kvm->mmu_notifier.ops = &kvm_mmu_notifier_ops;
 432	return mmu_notifier_register(&kvm->mmu_notifier, current->mm);
 433}
 434
 435#else  /* !(CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER) */
 436
 437static int kvm_init_mmu_notifier(struct kvm *kvm)
 438{
 439	return 0;
 440}
 441
 442#endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 443
 444static void kvm_init_memslots_id(struct kvm *kvm)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 445{
 446	int i;
 447	struct kvm_memslots *slots = kvm->memslots;
 
 448
 449	for (i = 0; i < KVM_MEM_SLOTS_NUM; i++)
 450		slots->id_to_index[i] = slots->memslots[i].id = i;
 
 
 
 
 
 
 
 
 451}
 452
 453static struct kvm *kvm_create_vm(unsigned long type)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 454{
 455	int r, i;
 456	struct kvm *kvm = kvm_arch_alloc_vm();
 
 
 
 457
 458	if (!kvm)
 459		return ERR_PTR(-ENOMEM);
 460
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 461	r = kvm_arch_init_vm(kvm, type);
 462	if (r)
 463		goto out_err_nodisable;
 464
 465	r = hardware_enable_all();
 466	if (r)
 467		goto out_err_nodisable;
 468
 469#ifdef CONFIG_HAVE_KVM_IRQCHIP
 470	INIT_HLIST_HEAD(&kvm->mask_notifier_list);
 471	INIT_HLIST_HEAD(&kvm->irq_ack_notifier_list);
 472#endif
 473
 474	r = -ENOMEM;
 475	kvm->memslots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
 476	if (!kvm->memslots)
 477		goto out_err_nosrcu;
 478	kvm_init_memslots_id(kvm);
 479	if (init_srcu_struct(&kvm->srcu))
 480		goto out_err_nosrcu;
 481	for (i = 0; i < KVM_NR_BUSES; i++) {
 482		kvm->buses[i] = kzalloc(sizeof(struct kvm_io_bus),
 483					GFP_KERNEL);
 484		if (!kvm->buses[i])
 485			goto out_err;
 486	}
 487
 488	spin_lock_init(&kvm->mmu_lock);
 489	kvm->mm = current->mm;
 490	atomic_inc(&kvm->mm->mm_count);
 491	kvm_eventfd_init(kvm);
 492	mutex_init(&kvm->lock);
 493	mutex_init(&kvm->irq_lock);
 494	mutex_init(&kvm->slots_lock);
 495	atomic_set(&kvm->users_count, 1);
 496
 497	r = kvm_init_mmu_notifier(kvm);
 
 
 
 
 498	if (r)
 499		goto out_err;
 500
 501	raw_spin_lock(&kvm_lock);
 502	list_add(&kvm->vm_list, &vm_list);
 503	raw_spin_unlock(&kvm_lock);
 
 
 
 504
 505	return kvm;
 506
 507out_err:
 508	cleanup_srcu_struct(&kvm->srcu);
 509out_err_nosrcu:
 
 
 
 
 
 
 
 510	hardware_disable_all();
 511out_err_nodisable:
 
 
 
 512	for (i = 0; i < KVM_NR_BUSES; i++)
 513		kfree(kvm->buses[i]);
 514	kfree(kvm->memslots);
 
 
 
 515	kvm_arch_free_vm(kvm);
 
 516	return ERR_PTR(r);
 517}
 518
 519static void kvm_destroy_dirty_bitmap(struct kvm_memory_slot *memslot)
 520{
 521	if (!memslot->dirty_bitmap)
 522		return;
 523
 524	if (2 * kvm_dirty_bitmap_bytes(memslot) > PAGE_SIZE)
 525		vfree(memslot->dirty_bitmap);
 526	else
 527		kfree(memslot->dirty_bitmap);
 528
 529	memslot->dirty_bitmap = NULL;
 530}
 531
 532/*
 533 * Free any memory in @free but not in @dont.
 534 */
 535static void kvm_free_physmem_slot(struct kvm_memory_slot *free,
 536				  struct kvm_memory_slot *dont)
 537{
 538	if (!dont || free->rmap != dont->rmap)
 539		vfree(free->rmap);
 540
 541	if (!dont || free->dirty_bitmap != dont->dirty_bitmap)
 542		kvm_destroy_dirty_bitmap(free);
 543
 544	kvm_arch_free_memslot(free, dont);
 545
 546	free->npages = 0;
 547	free->rmap = NULL;
 548}
 549
 550void kvm_free_physmem(struct kvm *kvm)
 551{
 552	struct kvm_memslots *slots = kvm->memslots;
 553	struct kvm_memory_slot *memslot;
 554
 555	kvm_for_each_memslot(memslot, slots)
 556		kvm_free_physmem_slot(memslot, NULL);
 557
 558	kfree(kvm->memslots);
 559}
 560
 561static void kvm_destroy_vm(struct kvm *kvm)
 562{
 563	int i;
 564	struct mm_struct *mm = kvm->mm;
 565
 
 
 
 566	kvm_arch_sync_events(kvm);
 567	raw_spin_lock(&kvm_lock);
 568	list_del(&kvm->vm_list);
 569	raw_spin_unlock(&kvm_lock);
 
 
 570	kvm_free_irq_routing(kvm);
 571	for (i = 0; i < KVM_NR_BUSES; i++)
 572		kvm_io_bus_destroy(kvm->buses[i]);
 
 
 
 
 
 573	kvm_coalesced_mmio_free(kvm);
 574#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
 575	mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 576#else
 577	kvm_arch_flush_shadow(kvm);
 578#endif
 579	kvm_arch_destroy_vm(kvm);
 580	kvm_free_physmem(kvm);
 
 
 
 
 
 581	cleanup_srcu_struct(&kvm->srcu);
 
 
 
 582	kvm_arch_free_vm(kvm);
 
 583	hardware_disable_all();
 584	mmdrop(mm);
 585}
 586
 587void kvm_get_kvm(struct kvm *kvm)
 588{
 589	atomic_inc(&kvm->users_count);
 590}
 591EXPORT_SYMBOL_GPL(kvm_get_kvm);
 592
 
 
 
 
 
 
 
 
 
 
 593void kvm_put_kvm(struct kvm *kvm)
 594{
 595	if (atomic_dec_and_test(&kvm->users_count))
 596		kvm_destroy_vm(kvm);
 597}
 598EXPORT_SYMBOL_GPL(kvm_put_kvm);
 599
 
 
 
 
 
 
 
 
 
 
 
 
 600
 601static int kvm_vm_release(struct inode *inode, struct file *filp)
 602{
 603	struct kvm *kvm = filp->private_data;
 604
 605	kvm_irqfd_release(kvm);
 606
 607	kvm_put_kvm(kvm);
 608	return 0;
 609}
 610
 611/*
 612 * Allocation size is twice as large as the actual dirty bitmap size.
 613 * See x86's kvm_vm_ioctl_get_dirty_log() why this is needed.
 614 */
 615static int kvm_create_dirty_bitmap(struct kvm_memory_slot *memslot)
 616{
 617#ifndef CONFIG_S390
 618	unsigned long dirty_bytes = 2 * kvm_dirty_bitmap_bytes(memslot);
 619
 620	if (dirty_bytes > PAGE_SIZE)
 621		memslot->dirty_bitmap = vzalloc(dirty_bytes);
 622	else
 623		memslot->dirty_bitmap = kzalloc(dirty_bytes, GFP_KERNEL);
 624
 
 625	if (!memslot->dirty_bitmap)
 626		return -ENOMEM;
 627
 628#endif /* !CONFIG_S390 */
 629	return 0;
 630}
 631
 632static int cmp_memslot(const void *slot1, const void *slot2)
 633{
 634	struct kvm_memory_slot *s1, *s2;
 
 635
 636	s1 = (struct kvm_memory_slot *)slot1;
 637	s2 = (struct kvm_memory_slot *)slot2;
 638
 639	if (s1->npages < s2->npages)
 640		return 1;
 641	if (s1->npages > s2->npages)
 642		return -1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 643
 644	return 0;
 645}
 646
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 647/*
 648 * Sort the memslots base on its size, so the larger slots
 649 * will get better fit.
 
 
 
 
 
 650 */
 651static void sort_memslots(struct kvm_memslots *slots)
 
 
 652{
 653	int i;
 
 
 654
 655	sort(slots->memslots, KVM_MEM_SLOTS_NUM,
 656	      sizeof(struct kvm_memory_slot), cmp_memslot, NULL);
 
 657
 658	for (i = 0; i < KVM_MEM_SLOTS_NUM; i++)
 659		slots->id_to_index[slots->memslots[i].id] = i;
 
 
 
 
 
 
 
 
 
 660}
 661
 662void update_memslots(struct kvm_memslots *slots, struct kvm_memory_slot *new)
 
 
 663{
 664	if (new) {
 665		int id = new->id;
 666		struct kvm_memory_slot *old = id_to_memslot(slots, id);
 667		unsigned long npages = old->npages;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 668
 669		*old = *new;
 670		if (new->npages != npages)
 671			sort_memslots(slots);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 672	}
 673
 674	slots->generation++;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 675}
 676
 677/*
 678 * Allocate some memory and give it an address in the guest physical address
 679 * space.
 680 *
 681 * Discontiguous memory is allowed, mostly for framebuffers.
 682 *
 683 * Must be called holding mmap_sem for write.
 684 */
 685int __kvm_set_memory_region(struct kvm *kvm,
 686			    struct kvm_userspace_memory_region *mem,
 687			    int user_alloc)
 688{
 689	int r;
 690	gfn_t base_gfn;
 
 691	unsigned long npages;
 692	unsigned long i;
 693	struct kvm_memory_slot *memslot;
 694	struct kvm_memory_slot old, new;
 695	struct kvm_memslots *slots, *old_memslots;
 
 
 
 
 
 
 696
 697	r = -EINVAL;
 698	/* General sanity checks */
 699	if (mem->memory_size & (PAGE_SIZE - 1))
 700		goto out;
 
 701	if (mem->guest_phys_addr & (PAGE_SIZE - 1))
 702		goto out;
 703	/* We can read the guest memory with __xxx_user() later on. */
 704	if (user_alloc &&
 705	    ((mem->userspace_addr & (PAGE_SIZE - 1)) ||
 706	     !access_ok(VERIFY_WRITE,
 707			(void __user *)(unsigned long)mem->userspace_addr,
 708			mem->memory_size)))
 709		goto out;
 710	if (mem->slot >= KVM_MEM_SLOTS_NUM)
 711		goto out;
 
 
 
 712	if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
 713		goto out;
 714
 715	memslot = id_to_memslot(kvm->memslots, mem->slot);
 716	base_gfn = mem->guest_phys_addr >> PAGE_SHIFT;
 717	npages = mem->memory_size >> PAGE_SHIFT;
 718
 719	r = -EINVAL;
 720	if (npages > KVM_MEM_MAX_NR_PAGES)
 721		goto out;
 722
 723	if (!npages)
 724		mem->flags &= ~KVM_MEM_LOG_DIRTY_PAGES;
 
 
 
 725
 726	new = old = *memslot;
 
 
 727
 728	new.id = mem->slot;
 729	new.base_gfn = base_gfn;
 730	new.npages = npages;
 731	new.flags = mem->flags;
 732
 733	/* Disallow changing a memory slot's size. */
 734	r = -EINVAL;
 735	if (npages && old.npages && npages != old.npages)
 736		goto out_free;
 737
 738	/* Check for overlaps */
 739	r = -EEXIST;
 740	for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
 741		struct kvm_memory_slot *s = &kvm->memslots->memslots[i];
 742
 743		if (s == memslot || !s->npages)
 744			continue;
 745		if (!((base_gfn + npages <= s->base_gfn) ||
 746		      (base_gfn >= s->base_gfn + s->npages)))
 747			goto out_free;
 748	}
 749
 750	/* Free page dirty bitmap if unneeded */
 751	if (!(new.flags & KVM_MEM_LOG_DIRTY_PAGES))
 752		new.dirty_bitmap = NULL;
 753
 754	r = -ENOMEM;
 755
 756	/* Allocate if a slot is being created */
 757	if (npages && !old.npages) {
 758		new.user_alloc = user_alloc;
 759		new.userspace_addr = mem->userspace_addr;
 760#ifndef CONFIG_S390
 761		new.rmap = vzalloc(npages * sizeof(*new.rmap));
 762		if (!new.rmap)
 763			goto out_free;
 764#endif /* not defined CONFIG_S390 */
 765		if (kvm_arch_create_memslot(&new, npages))
 766			goto out_free;
 767	}
 768
 769	/* Allocate page dirty bitmap if needed */
 770	if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) {
 771		if (kvm_create_dirty_bitmap(&new) < 0)
 772			goto out_free;
 773		/* destroy any largepage mappings for dirty tracking */
 774	}
 775
 776	if (!npages) {
 777		struct kvm_memory_slot *slot;
 778
 779		r = -ENOMEM;
 780		slots = kmemdup(kvm->memslots, sizeof(struct kvm_memslots),
 781				GFP_KERNEL);
 782		if (!slots)
 783			goto out_free;
 784		slot = id_to_memslot(slots, mem->slot);
 785		slot->flags |= KVM_MEMSLOT_INVALID;
 786
 787		update_memslots(slots, NULL);
 788
 789		old_memslots = kvm->memslots;
 790		rcu_assign_pointer(kvm->memslots, slots);
 791		synchronize_srcu_expedited(&kvm->srcu);
 792		/* From this point no new shadow pages pointing to a deleted
 793		 * memslot will be created.
 794		 *
 795		 * validation of sp->gfn happens in:
 796		 * 	- gfn_to_hva (kvm_read_guest, gfn_to_pfn)
 797		 * 	- kvm_is_visible_gfn (mmu_check_roots)
 798		 */
 799		kvm_arch_flush_shadow(kvm);
 800		kfree(old_memslots);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 801	}
 802
 803	r = kvm_arch_prepare_memory_region(kvm, &new, old, mem, user_alloc);
 804	if (r)
 805		goto out_free;
 
 
 
 
 
 806
 807	/* map/unmap the pages in iommu page table */
 808	if (npages) {
 809		r = kvm_iommu_map_pages(kvm, &new);
 
 
 
 
 
 810		if (r)
 811			goto out_free;
 812	} else
 813		kvm_iommu_unmap_pages(kvm, &old);
 814
 815	r = -ENOMEM;
 816	slots = kmemdup(kvm->memslots, sizeof(struct kvm_memslots),
 817			GFP_KERNEL);
 818	if (!slots)
 819		goto out_free;
 820
 821	/* actual memory is freed via old in kvm_free_physmem_slot below */
 822	if (!npages) {
 823		new.rmap = NULL;
 824		new.dirty_bitmap = NULL;
 825		memset(&new.arch, 0, sizeof(new.arch));
 826	}
 827
 828	update_memslots(slots, &new);
 829	old_memslots = kvm->memslots;
 830	rcu_assign_pointer(kvm->memslots, slots);
 831	synchronize_srcu_expedited(&kvm->srcu);
 832
 833	kvm_arch_commit_memory_region(kvm, mem, old, user_alloc);
 834
 835	/*
 836	 * If the new memory slot is created, we need to clear all
 837	 * mmio sptes.
 838	 */
 839	if (npages && old.base_gfn != mem->guest_phys_addr >> PAGE_SHIFT)
 840		kvm_arch_flush_shadow(kvm);
 841
 842	kvm_free_physmem_slot(&old, &new);
 843	kfree(old_memslots);
 
 844
 845	return 0;
 846
 847out_free:
 848	kvm_free_physmem_slot(&new, &old);
 
 849out:
 
 850	return r;
 851
 852}
 853EXPORT_SYMBOL_GPL(__kvm_set_memory_region);
 854
 855int kvm_set_memory_region(struct kvm *kvm,
 856			  struct kvm_userspace_memory_region *mem,
 857			  int user_alloc)
 858{
 859	int r;
 860
 861	mutex_lock(&kvm->slots_lock);
 862	r = __kvm_set_memory_region(kvm, mem, user_alloc);
 863	mutex_unlock(&kvm->slots_lock);
 864	return r;
 865}
 866EXPORT_SYMBOL_GPL(kvm_set_memory_region);
 867
 868int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
 869				   struct
 870				   kvm_userspace_memory_region *mem,
 871				   int user_alloc)
 872{
 873	if (mem->slot >= KVM_MEMORY_SLOTS)
 874		return -EINVAL;
 875	return kvm_set_memory_region(kvm, mem, user_alloc);
 
 876}
 877
 878int kvm_get_dirty_log(struct kvm *kvm,
 879			struct kvm_dirty_log *log, int *is_dirty)
 
 
 
 
 
 
 
 
 880{
 881	struct kvm_memory_slot *memslot;
 882	int r, i;
 883	unsigned long n;
 884	unsigned long any = 0;
 885
 886	r = -EINVAL;
 887	if (log->slot >= KVM_MEMORY_SLOTS)
 888		goto out;
 
 
 
 
 
 
 
 
 889
 890	memslot = id_to_memslot(kvm->memslots, log->slot);
 891	r = -ENOENT;
 892	if (!memslot->dirty_bitmap)
 893		goto out;
 894
 895	n = kvm_dirty_bitmap_bytes(memslot);
 
 
 896
 897	for (i = 0; !any && i < n/sizeof(long); ++i)
 898		any = memslot->dirty_bitmap[i];
 899
 900	r = -EFAULT;
 901	if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n))
 902		goto out;
 903
 904	if (any)
 905		*is_dirty = 1;
 906
 907	r = 0;
 908out:
 909	return r;
 910}
 
 911
 912bool kvm_largepages_enabled(void)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 913{
 914	return largepages_enabled;
 915}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 916
 917void kvm_disable_largepages(void)
 918{
 919	largepages_enabled = false;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 920}
 921EXPORT_SYMBOL_GPL(kvm_disable_largepages);
 922
 923int is_error_page(struct page *page)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 924{
 925	return page == bad_page || page == hwpoison_page || page == fault_page;
 
 
 
 
 
 
 
 926}
 927EXPORT_SYMBOL_GPL(is_error_page);
 928
 929int is_error_pfn(pfn_t pfn)
 
 
 
 
 
 
 
 930{
 931	return pfn == bad_pfn || pfn == hwpoison_pfn || pfn == fault_pfn;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 932}
 933EXPORT_SYMBOL_GPL(is_error_pfn);
 934
 935int is_hwpoison_pfn(pfn_t pfn)
 
 936{
 937	return pfn == hwpoison_pfn;
 
 
 
 
 
 
 
 938}
 939EXPORT_SYMBOL_GPL(is_hwpoison_pfn);
 940
 941int is_fault_pfn(pfn_t pfn)
 
 
 
 
 
 
 942{
 943	return pfn == fault_pfn;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 944}
 945EXPORT_SYMBOL_GPL(is_fault_pfn);
 946
 947int is_noslot_pfn(pfn_t pfn)
 948{
 949	return pfn == bad_pfn;
 
 
 
 950}
 951EXPORT_SYMBOL_GPL(is_noslot_pfn);
 952
 953int is_invalid_pfn(pfn_t pfn)
 
 954{
 955	return pfn == hwpoison_pfn || pfn == fault_pfn;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 956}
 957EXPORT_SYMBOL_GPL(is_invalid_pfn);
 958
 959static inline unsigned long bad_hva(void)
 
 960{
 961	return PAGE_OFFSET;
 
 
 
 
 
 
 
 
 
 
 
 
 
 962}
 963
 964int kvm_is_error_hva(unsigned long addr)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 965{
 966	return addr == bad_hva();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 967}
 968EXPORT_SYMBOL_GPL(kvm_is_error_hva);
 969
 970struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
 971{
 972	return __gfn_to_memslot(kvm_memslots(kvm), gfn);
 973}
 974EXPORT_SYMBOL_GPL(gfn_to_memslot);
 975
 976int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
 977{
 978	struct kvm_memory_slot *memslot = gfn_to_memslot(kvm, gfn);
 
 
 979
 980	if (!memslot || memslot->id >= KVM_MEMORY_SLOTS ||
 981	      memslot->flags & KVM_MEMSLOT_INVALID)
 982		return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 983
 984	return 1;
 
 
 
 
 985}
 986EXPORT_SYMBOL_GPL(kvm_is_visible_gfn);
 987
 988unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn)
 
 
 
 
 
 
 
 
 989{
 990	struct vm_area_struct *vma;
 991	unsigned long addr, size;
 992
 993	size = PAGE_SIZE;
 994
 995	addr = gfn_to_hva(kvm, gfn);
 996	if (kvm_is_error_hva(addr))
 997		return PAGE_SIZE;
 998
 999	down_read(&current->mm->mmap_sem);
1000	vma = find_vma(current->mm, addr);
1001	if (!vma)
1002		goto out;
1003
1004	size = vma_kernel_pagesize(vma);
1005
1006out:
1007	up_read(&current->mm->mmap_sem);
1008
1009	return size;
1010}
1011
1012static unsigned long gfn_to_hva_many(struct kvm_memory_slot *slot, gfn_t gfn,
1013				     gfn_t *nr_pages)
 
 
 
 
 
1014{
1015	if (!slot || slot->flags & KVM_MEMSLOT_INVALID)
1016		return bad_hva();
 
 
 
1017
1018	if (nr_pages)
1019		*nr_pages = slot->npages - (gfn - slot->base_gfn);
1020
1021	return gfn_to_hva_memslot(slot, gfn);
1022}
1023
 
 
 
 
 
 
 
 
 
 
 
 
 
1024unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
1025{
1026	return gfn_to_hva_many(gfn_to_memslot(kvm, gfn), gfn, NULL);
1027}
1028EXPORT_SYMBOL_GPL(gfn_to_hva);
1029
1030static pfn_t get_fault_pfn(void)
1031{
1032	get_page(fault_page);
1033	return fault_pfn;
1034}
 
1035
1036int get_user_page_nowait(struct task_struct *tsk, struct mm_struct *mm,
1037	unsigned long start, int write, struct page **page)
 
 
 
 
 
 
 
 
1038{
1039	int flags = FOLL_TOUCH | FOLL_NOWAIT | FOLL_HWPOISON | FOLL_GET;
1040
1041	if (write)
1042		flags |= FOLL_WRITE;
 
 
 
 
 
 
 
1043
1044	return __get_user_pages(tsk, mm, start, 1, flags, page, NULL, NULL);
 
 
 
 
 
 
 
1045}
1046
1047static inline int check_user_page_hwpoison(unsigned long addr)
1048{
1049	int rc, flags = FOLL_TOUCH | FOLL_HWPOISON | FOLL_WRITE;
1050
1051	rc = __get_user_pages(current, current->mm, addr, 1,
1052			      flags, NULL, NULL, NULL);
1053	return rc == -EHWPOISON;
1054}
1055
1056static pfn_t hva_to_pfn(struct kvm *kvm, unsigned long addr, bool atomic,
1057			bool *async, bool write_fault, bool *writable)
 
 
 
 
 
1058{
1059	struct page *page[1];
1060	int npages = 0;
1061	pfn_t pfn;
1062
1063	/* we can do it either atomically or asynchronously, not both */
1064	BUG_ON(atomic && async);
 
 
 
 
 
1065
1066	BUG_ON(!write_fault && !writable);
 
1067
1068	if (writable)
1069		*writable = true;
 
 
1070
1071	if (atomic || async)
1072		npages = __get_user_pages_fast(addr, 1, 1, page);
1073
1074	if (unlikely(npages != 1) && !atomic) {
1075		might_sleep();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1076
1077		if (writable)
1078			*writable = write_fault;
1079
1080		if (async) {
1081			down_read(&current->mm->mmap_sem);
1082			npages = get_user_page_nowait(current, current->mm,
1083						     addr, write_fault, page);
1084			up_read(&current->mm->mmap_sem);
1085		} else
1086			npages = get_user_pages_fast(addr, 1, write_fault,
1087						     page);
1088
1089		/* map read fault as writable if possible */
1090		if (unlikely(!write_fault) && npages == 1) {
1091			struct page *wpage[1];
1092
1093			npages = __get_user_pages_fast(addr, 1, 1, wpage);
1094			if (npages == 1) {
1095				*writable = true;
1096				put_page(page[0]);
1097				page[0] = wpage[0];
1098			}
1099			npages = 1;
 
 
 
 
 
 
 
 
1100		}
1101	}
 
 
 
1102
1103	if (unlikely(npages != 1)) {
1104		struct vm_area_struct *vma;
 
 
1105
1106		if (atomic)
1107			return get_fault_pfn();
1108
1109		down_read(&current->mm->mmap_sem);
1110		if (npages == -EHWPOISON ||
1111			(!async && check_user_page_hwpoison(addr))) {
1112			up_read(&current->mm->mmap_sem);
1113			get_page(hwpoison_page);
1114			return page_to_pfn(hwpoison_page);
1115		}
1116
1117		vma = find_vma_intersection(current->mm, addr, addr+1);
 
 
1118
1119		if (vma == NULL)
1120			pfn = get_fault_pfn();
1121		else if ((vma->vm_flags & VM_PFNMAP)) {
1122			pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) +
1123				vma->vm_pgoff;
1124			BUG_ON(!kvm_is_mmio_pfn(pfn));
1125		} else {
1126			if (async && (vma->vm_flags & VM_WRITE))
1127				*async = true;
1128			pfn = get_fault_pfn();
1129		}
1130		up_read(&current->mm->mmap_sem);
1131	} else
1132		pfn = page_to_pfn(page[0]);
1133
1134	return pfn;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1135}
1136
1137pfn_t hva_to_pfn_atomic(struct kvm *kvm, unsigned long addr)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1138{
1139	return hva_to_pfn(kvm, addr, true, NULL, true, NULL);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1140}
1141EXPORT_SYMBOL_GPL(hva_to_pfn_atomic);
1142
1143static pfn_t __gfn_to_pfn(struct kvm *kvm, gfn_t gfn, bool atomic, bool *async,
1144			  bool write_fault, bool *writable)
 
1145{
1146	unsigned long addr;
1147
1148	if (async)
1149		*async = false;
 
 
 
 
 
 
1150
1151	addr = gfn_to_hva(kvm, gfn);
1152	if (kvm_is_error_hva(addr)) {
1153		get_page(bad_page);
1154		return page_to_pfn(bad_page);
 
 
 
 
 
 
 
1155	}
1156
1157	return hva_to_pfn(kvm, addr, atomic, async, write_fault, writable);
 
1158}
 
1159
1160pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn)
 
1161{
1162	return __gfn_to_pfn(kvm, gfn, true, NULL, true, NULL);
 
1163}
1164EXPORT_SYMBOL_GPL(gfn_to_pfn_atomic);
1165
1166pfn_t gfn_to_pfn_async(struct kvm *kvm, gfn_t gfn, bool *async,
1167		       bool write_fault, bool *writable)
1168{
1169	return __gfn_to_pfn(kvm, gfn, false, async, write_fault, writable);
 
1170}
1171EXPORT_SYMBOL_GPL(gfn_to_pfn_async);
1172
1173pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
1174{
1175	return __gfn_to_pfn(kvm, gfn, false, NULL, true, NULL);
 
1176}
1177EXPORT_SYMBOL_GPL(gfn_to_pfn);
1178
1179pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
1180		      bool *writable)
1181{
1182	return __gfn_to_pfn(kvm, gfn, false, NULL, write_fault, writable);
1183}
1184EXPORT_SYMBOL_GPL(gfn_to_pfn_prot);
1185
1186pfn_t gfn_to_pfn_memslot(struct kvm *kvm,
1187			 struct kvm_memory_slot *slot, gfn_t gfn)
1188{
1189	unsigned long addr = gfn_to_hva_memslot(slot, gfn);
1190	return hva_to_pfn(kvm, addr, false, NULL, true, NULL);
1191}
 
1192
1193int gfn_to_page_many_atomic(struct kvm *kvm, gfn_t gfn, struct page **pages,
1194								  int nr_pages)
 
 
 
 
 
 
1195{
1196	unsigned long addr;
1197	gfn_t entry;
1198
1199	addr = gfn_to_hva_many(gfn_to_memslot(kvm, gfn), gfn, &entry);
1200	if (kvm_is_error_hva(addr))
1201		return -1;
1202
1203	if (entry < nr_pages)
1204		return 0;
1205
1206	return __get_user_pages_fast(addr, nr_pages, 1, pages);
1207}
1208EXPORT_SYMBOL_GPL(gfn_to_page_many_atomic);
1209
 
 
 
 
 
 
1210struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
1211{
1212	pfn_t pfn;
 
1213
1214	pfn = gfn_to_pfn(kvm, gfn);
1215	if (!kvm_is_mmio_pfn(pfn))
1216		return pfn_to_page(pfn);
1217
1218	WARN_ON(kvm_is_mmio_pfn(pfn));
 
1219
1220	get_page(bad_page);
1221	return bad_page;
1222}
1223
 
 
1224EXPORT_SYMBOL_GPL(gfn_to_page);
1225
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1226void kvm_release_page_clean(struct page *page)
1227{
1228	kvm_release_pfn_clean(page_to_pfn(page));
 
 
 
1229}
1230EXPORT_SYMBOL_GPL(kvm_release_page_clean);
1231
1232void kvm_release_pfn_clean(pfn_t pfn)
1233{
1234	if (!kvm_is_mmio_pfn(pfn))
1235		put_page(pfn_to_page(pfn));
 
 
 
 
 
 
 
 
1236}
1237EXPORT_SYMBOL_GPL(kvm_release_pfn_clean);
1238
1239void kvm_release_page_dirty(struct page *page)
1240{
1241	kvm_release_pfn_dirty(page_to_pfn(page));
 
 
 
1242}
1243EXPORT_SYMBOL_GPL(kvm_release_page_dirty);
1244
1245void kvm_release_pfn_dirty(pfn_t pfn)
1246{
1247	kvm_set_pfn_dirty(pfn);
1248	kvm_release_pfn_clean(pfn);
 
 
 
 
 
 
 
 
1249}
1250EXPORT_SYMBOL_GPL(kvm_release_pfn_dirty);
1251
1252void kvm_set_page_dirty(struct page *page)
 
 
 
 
 
1253{
1254	kvm_set_pfn_dirty(page_to_pfn(page));
1255}
1256EXPORT_SYMBOL_GPL(kvm_set_page_dirty);
1257
1258void kvm_set_pfn_dirty(pfn_t pfn)
1259{
1260	if (!kvm_is_mmio_pfn(pfn)) {
1261		struct page *page = pfn_to_page(pfn);
1262		if (!PageReserved(page))
1263			SetPageDirty(page);
1264	}
1265}
1266EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty);
1267
1268void kvm_set_pfn_accessed(pfn_t pfn)
1269{
1270	if (!kvm_is_mmio_pfn(pfn))
1271		mark_page_accessed(pfn_to_page(pfn));
1272}
1273EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed);
1274
1275void kvm_get_pfn(pfn_t pfn)
1276{
1277	if (!kvm_is_mmio_pfn(pfn))
1278		get_page(pfn_to_page(pfn));
1279}
1280EXPORT_SYMBOL_GPL(kvm_get_pfn);
1281
1282static int next_segment(unsigned long len, int offset)
1283{
1284	if (len > PAGE_SIZE - offset)
1285		return PAGE_SIZE - offset;
1286	else
1287		return len;
1288}
1289
1290int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
1291			int len)
1292{
1293	int r;
1294	unsigned long addr;
1295
1296	addr = gfn_to_hva(kvm, gfn);
1297	if (kvm_is_error_hva(addr))
1298		return -EFAULT;
1299	r = __copy_from_user(data, (void __user *)addr + offset, len);
1300	if (r)
1301		return -EFAULT;
1302	return 0;
1303}
 
 
 
 
 
 
 
 
1304EXPORT_SYMBOL_GPL(kvm_read_guest_page);
1305
 
 
 
 
 
 
 
 
 
1306int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len)
1307{
1308	gfn_t gfn = gpa >> PAGE_SHIFT;
1309	int seg;
1310	int offset = offset_in_page(gpa);
1311	int ret;
1312
1313	while ((seg = next_segment(len, offset)) != 0) {
1314		ret = kvm_read_guest_page(kvm, gfn, data, offset, seg);
1315		if (ret < 0)
1316			return ret;
1317		offset = 0;
1318		len -= seg;
1319		data += seg;
1320		++gfn;
1321	}
1322	return 0;
1323}
1324EXPORT_SYMBOL_GPL(kvm_read_guest);
1325
1326int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
1327			  unsigned long len)
1328{
1329	int r;
1330	unsigned long addr;
1331	gfn_t gfn = gpa >> PAGE_SHIFT;
 
1332	int offset = offset_in_page(gpa);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1333
1334	addr = gfn_to_hva(kvm, gfn);
 
 
 
 
 
 
1335	if (kvm_is_error_hva(addr))
1336		return -EFAULT;
1337	pagefault_disable();
1338	r = __copy_from_user_inatomic(data, (void __user *)addr + offset, len);
1339	pagefault_enable();
1340	if (r)
1341		return -EFAULT;
1342	return 0;
1343}
1344EXPORT_SYMBOL(kvm_read_guest_atomic);
1345
1346int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
1347			 int offset, int len)
 
 
 
 
 
 
 
 
 
 
 
 
1348{
1349	int r;
1350	unsigned long addr;
1351
1352	addr = gfn_to_hva(kvm, gfn);
1353	if (kvm_is_error_hva(addr))
1354		return -EFAULT;
1355	r = __copy_to_user((void __user *)addr + offset, data, len);
1356	if (r)
1357		return -EFAULT;
1358	mark_page_dirty(kvm, gfn);
1359	return 0;
1360}
 
 
 
 
 
 
 
 
1361EXPORT_SYMBOL_GPL(kvm_write_guest_page);
1362
 
 
 
 
 
 
 
 
 
1363int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
1364		    unsigned long len)
1365{
1366	gfn_t gfn = gpa >> PAGE_SHIFT;
1367	int seg;
1368	int offset = offset_in_page(gpa);
1369	int ret;
1370
1371	while ((seg = next_segment(len, offset)) != 0) {
1372		ret = kvm_write_guest_page(kvm, gfn, data, offset, seg);
1373		if (ret < 0)
1374			return ret;
1375		offset = 0;
1376		len -= seg;
1377		data += seg;
1378		++gfn;
1379	}
1380	return 0;
1381}
 
1382
1383int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
1384			      gpa_t gpa)
1385{
1386	struct kvm_memslots *slots = kvm_memslots(kvm);
1387	int offset = offset_in_page(gpa);
1388	gfn_t gfn = gpa >> PAGE_SHIFT;
 
 
 
1389
1390	ghc->gpa = gpa;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1391	ghc->generation = slots->generation;
1392	ghc->memslot = gfn_to_memslot(kvm, gfn);
1393	ghc->hva = gfn_to_hva_many(ghc->memslot, gfn, NULL);
1394	if (!kvm_is_error_hva(ghc->hva))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1395		ghc->hva += offset;
1396	else
1397		return -EFAULT;
1398
 
 
1399	return 0;
1400}
 
 
 
 
 
 
 
1401EXPORT_SYMBOL_GPL(kvm_gfn_to_hva_cache_init);
1402
1403int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
1404			   void *data, unsigned long len)
 
1405{
1406	struct kvm_memslots *slots = kvm_memslots(kvm);
1407	int r;
 
 
 
 
1408
1409	if (slots->generation != ghc->generation)
1410		kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa);
 
 
1411
1412	if (kvm_is_error_hva(ghc->hva))
1413		return -EFAULT;
1414
1415	r = __copy_to_user((void __user *)ghc->hva, data, len);
 
 
 
1416	if (r)
1417		return -EFAULT;
1418	mark_page_dirty_in_slot(kvm, ghc->memslot, ghc->gpa >> PAGE_SHIFT);
1419
1420	return 0;
1421}
1422EXPORT_SYMBOL_GPL(kvm_write_guest_cached);
1423
1424int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
1425			   void *data, unsigned long len)
1426{
 
 
 
 
 
 
 
 
1427	struct kvm_memslots *slots = kvm_memslots(kvm);
1428	int r;
 
1429
1430	if (slots->generation != ghc->generation)
1431		kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa);
 
 
 
 
 
1432
1433	if (kvm_is_error_hva(ghc->hva))
1434		return -EFAULT;
1435
1436	r = __copy_from_user(data, (void __user *)ghc->hva, len);
 
 
 
1437	if (r)
1438		return -EFAULT;
1439
1440	return 0;
1441}
1442EXPORT_SYMBOL_GPL(kvm_read_guest_cached);
1443
1444int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len)
 
1445{
1446	return kvm_write_guest_page(kvm, gfn, (const void *) empty_zero_page,
1447				    offset, len);
1448}
1449EXPORT_SYMBOL_GPL(kvm_clear_guest_page);
1450
1451int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len)
1452{
 
1453	gfn_t gfn = gpa >> PAGE_SHIFT;
1454	int seg;
1455	int offset = offset_in_page(gpa);
1456	int ret;
1457
1458        while ((seg = next_segment(len, offset)) != 0) {
1459		ret = kvm_clear_guest_page(kvm, gfn, offset, seg);
1460		if (ret < 0)
1461			return ret;
1462		offset = 0;
1463		len -= seg;
1464		++gfn;
1465	}
1466	return 0;
1467}
1468EXPORT_SYMBOL_GPL(kvm_clear_guest);
1469
1470void mark_page_dirty_in_slot(struct kvm *kvm, struct kvm_memory_slot *memslot,
1471			     gfn_t gfn)
 
1472{
1473	if (memslot && memslot->dirty_bitmap) {
 
 
 
 
 
 
 
 
 
1474		unsigned long rel_gfn = gfn - memslot->base_gfn;
 
1475
1476		/* TODO: introduce set_bit_le() and use it */
1477		test_and_set_bit_le(rel_gfn, memslot->dirty_bitmap);
 
 
1478	}
1479}
 
1480
1481void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
1482{
1483	struct kvm_memory_slot *memslot;
1484
1485	memslot = gfn_to_memslot(kvm, gfn);
1486	mark_page_dirty_in_slot(kvm, memslot, gfn);
1487}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1488
1489/*
1490 * The vCPU has executed a HLT instruction with in-kernel mode enabled.
 
 
1491 */
1492void kvm_vcpu_block(struct kvm_vcpu *vcpu)
1493{
1494	DEFINE_WAIT(wait);
 
 
 
 
 
 
 
 
1495
1496	for (;;) {
1497		prepare_to_wait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE);
1498
1499		if (kvm_arch_vcpu_runnable(vcpu)) {
1500			kvm_make_request(KVM_REQ_UNHALT, vcpu);
1501			break;
1502		}
1503		if (kvm_cpu_has_pending_timer(vcpu))
1504			break;
1505		if (signal_pending(current))
1506			break;
1507
 
1508		schedule();
1509	}
1510
1511	finish_wait(&vcpu->wq, &wait);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1512}
 
1513
1514#ifndef CONFIG_S390
1515/*
1516 * Kick a sleeping VCPU, or a guest VCPU in guest mode, into host kernel mode.
1517 */
1518void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
1519{
1520	int me;
1521	int cpu = vcpu->cpu;
1522	wait_queue_head_t *wqp;
1523
1524	wqp = kvm_arch_vcpu_wq(vcpu);
1525	if (waitqueue_active(wqp)) {
1526		wake_up_interruptible(wqp);
1527		++vcpu->stat.halt_wakeup;
1528	}
1529
1530	me = get_cpu();
1531	if (cpu != me && (unsigned)cpu < nr_cpu_ids && cpu_online(cpu))
1532		if (kvm_arch_vcpu_should_kick(vcpu))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1533			smp_send_reschedule(cpu);
 
 
1534	put_cpu();
1535}
 
1536#endif /* !CONFIG_S390 */
1537
1538void kvm_resched(struct kvm_vcpu *vcpu)
1539{
1540	if (!need_resched())
1541		return;
1542	cond_resched();
1543}
1544EXPORT_SYMBOL_GPL(kvm_resched);
1545
1546bool kvm_vcpu_yield_to(struct kvm_vcpu *target)
1547{
1548	struct pid *pid;
1549	struct task_struct *task = NULL;
 
1550
1551	rcu_read_lock();
1552	pid = rcu_dereference(target->pid);
1553	if (pid)
1554		task = get_pid_task(target->pid, PIDTYPE_PID);
1555	rcu_read_unlock();
1556	if (!task)
1557		return false;
1558	if (task->flags & PF_VCPU) {
1559		put_task_struct(task);
1560		return false;
1561	}
1562	if (yield_to(task, 1)) {
1563		put_task_struct(task);
1564		return true;
1565	}
1566	put_task_struct(task);
1567	return false;
 
1568}
1569EXPORT_SYMBOL_GPL(kvm_vcpu_yield_to);
1570
1571void kvm_vcpu_on_spin(struct kvm_vcpu *me)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1572{
1573	struct kvm *kvm = me->kvm;
1574	struct kvm_vcpu *vcpu;
1575	int last_boosted_vcpu = me->kvm->last_boosted_vcpu;
 
1576	int yielded = 0;
 
1577	int pass;
1578	int i;
1579
 
1580	/*
1581	 * We boost the priority of a VCPU that is runnable but not
1582	 * currently running, because it got preempted by something
1583	 * else and called schedule in __vcpu_run.  Hopefully that
1584	 * VCPU is holding the lock that we need and will release it.
1585	 * We approximate round-robin by starting at the last boosted VCPU.
1586	 */
1587	for (pass = 0; pass < 2 && !yielded; pass++) {
1588		kvm_for_each_vcpu(i, vcpu, kvm) {
1589			if (!pass && i < last_boosted_vcpu) {
1590				i = last_boosted_vcpu;
1591				continue;
1592			} else if (pass && i > last_boosted_vcpu)
1593				break;
 
 
1594			if (vcpu == me)
1595				continue;
1596			if (waitqueue_active(&vcpu->wq))
 
 
 
 
1597				continue;
1598			if (kvm_vcpu_yield_to(vcpu)) {
 
 
 
 
1599				kvm->last_boosted_vcpu = i;
1600				yielded = 1;
1601				break;
 
 
 
 
1602			}
1603		}
1604	}
 
 
 
 
1605}
1606EXPORT_SYMBOL_GPL(kvm_vcpu_on_spin);
1607
1608static int kvm_vcpu_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1609{
1610	struct kvm_vcpu *vcpu = vma->vm_file->private_data;
 
 
 
 
 
 
 
 
 
 
 
1611	struct page *page;
1612
1613	if (vmf->pgoff == 0)
1614		page = virt_to_page(vcpu->run);
1615#ifdef CONFIG_X86
1616	else if (vmf->pgoff == KVM_PIO_PAGE_OFFSET)
1617		page = virt_to_page(vcpu->arch.pio_data);
1618#endif
1619#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
1620	else if (vmf->pgoff == KVM_COALESCED_MMIO_PAGE_OFFSET)
1621		page = virt_to_page(vcpu->kvm->coalesced_mmio_ring);
1622#endif
 
 
 
 
1623	else
1624		return kvm_arch_vcpu_fault(vcpu, vmf);
1625	get_page(page);
1626	vmf->page = page;
1627	return 0;
1628}
1629
1630static const struct vm_operations_struct kvm_vcpu_vm_ops = {
1631	.fault = kvm_vcpu_fault,
1632};
1633
1634static int kvm_vcpu_mmap(struct file *file, struct vm_area_struct *vma)
1635{
 
 
 
 
 
 
 
 
1636	vma->vm_ops = &kvm_vcpu_vm_ops;
1637	return 0;
1638}
1639
1640static int kvm_vcpu_release(struct inode *inode, struct file *filp)
1641{
1642	struct kvm_vcpu *vcpu = filp->private_data;
1643
1644	kvm_put_kvm(vcpu->kvm);
1645	return 0;
1646}
1647
1648static struct file_operations kvm_vcpu_fops = {
1649	.release        = kvm_vcpu_release,
1650	.unlocked_ioctl = kvm_vcpu_ioctl,
1651#ifdef CONFIG_COMPAT
1652	.compat_ioctl   = kvm_vcpu_compat_ioctl,
1653#endif
1654	.mmap           = kvm_vcpu_mmap,
1655	.llseek		= noop_llseek,
 
1656};
1657
1658/*
1659 * Allocates an inode for the vcpu.
1660 */
1661static int create_vcpu_fd(struct kvm_vcpu *vcpu)
1662{
1663	return anon_inode_getfd("kvm-vcpu", &kvm_vcpu_fops, vcpu, O_RDWR);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1664}
1665
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1666/*
1667 * Creates some virtual cpus.  Good luck creating more than one.
1668 */
1669static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
1670{
1671	int r;
1672	struct kvm_vcpu *vcpu, *v;
 
1673
1674	vcpu = kvm_arch_vcpu_create(kvm, id);
1675	if (IS_ERR(vcpu))
1676		return PTR_ERR(vcpu);
1677
1678	preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops);
 
 
 
 
 
 
 
 
 
 
1679
1680	r = kvm_arch_vcpu_setup(vcpu);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1681	if (r)
1682		goto vcpu_destroy;
1683
1684	mutex_lock(&kvm->lock);
1685	if (!kvm_vcpu_compatible(vcpu)) {
1686		r = -EINVAL;
1687		goto unlock_vcpu_destroy;
 
1688	}
1689	if (atomic_read(&kvm->online_vcpus) == KVM_MAX_VCPUS) {
1690		r = -EINVAL;
 
 
 
 
 
 
 
 
 
1691		goto unlock_vcpu_destroy;
1692	}
1693
1694	kvm_for_each_vcpu(r, v, kvm)
1695		if (v->vcpu_id == id) {
1696			r = -EEXIST;
1697			goto unlock_vcpu_destroy;
1698		}
1699
1700	BUG_ON(kvm->vcpus[atomic_read(&kvm->online_vcpus)]);
1701
1702	/* Now it's all set up, let userspace reach it */
1703	kvm_get_kvm(kvm);
1704	r = create_vcpu_fd(vcpu);
1705	if (r < 0) {
1706		kvm_put_kvm(kvm);
1707		goto unlock_vcpu_destroy;
 
 
 
1708	}
1709
1710	kvm->vcpus[atomic_read(&kvm->online_vcpus)] = vcpu;
 
 
 
1711	smp_wmb();
1712	atomic_inc(&kvm->online_vcpus);
1713
1714	mutex_unlock(&kvm->lock);
 
 
1715	return r;
1716
 
 
 
1717unlock_vcpu_destroy:
1718	mutex_unlock(&kvm->lock);
1719vcpu_destroy:
 
1720	kvm_arch_vcpu_destroy(vcpu);
 
 
 
 
 
 
 
 
1721	return r;
1722}
1723
1724static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset)
1725{
1726	if (sigset) {
1727		sigdelsetmask(sigset, sigmask(SIGKILL)|sigmask(SIGSTOP));
1728		vcpu->sigset_active = 1;
1729		vcpu->sigset = *sigset;
1730	} else
1731		vcpu->sigset_active = 0;
1732	return 0;
1733}
1734
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1735static long kvm_vcpu_ioctl(struct file *filp,
1736			   unsigned int ioctl, unsigned long arg)
1737{
1738	struct kvm_vcpu *vcpu = filp->private_data;
1739	void __user *argp = (void __user *)arg;
1740	int r;
1741	struct kvm_fpu *fpu = NULL;
1742	struct kvm_sregs *kvm_sregs = NULL;
1743
1744	if (vcpu->kvm->mm != current->mm)
1745		return -EIO;
1746
1747#if defined(CONFIG_S390) || defined(CONFIG_PPC)
 
 
1748	/*
1749	 * Special cases: vcpu ioctls that are asynchronous to vcpu execution,
1750	 * so vcpu_load() would break it.
1751	 */
1752	if (ioctl == KVM_S390_INTERRUPT || ioctl == KVM_INTERRUPT)
1753		return kvm_arch_vcpu_ioctl(filp, ioctl, arg);
1754#endif
1755
1756
1757	vcpu_load(vcpu);
 
1758	switch (ioctl) {
1759	case KVM_RUN:
 
1760		r = -EINVAL;
1761		if (arg)
1762			goto out;
1763		r = kvm_arch_vcpu_ioctl_run(vcpu, vcpu->run);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1764		trace_kvm_userspace_exit(vcpu->run->exit_reason, r);
1765		break;
 
1766	case KVM_GET_REGS: {
1767		struct kvm_regs *kvm_regs;
1768
1769		r = -ENOMEM;
1770		kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL);
1771		if (!kvm_regs)
1772			goto out;
1773		r = kvm_arch_vcpu_ioctl_get_regs(vcpu, kvm_regs);
1774		if (r)
1775			goto out_free1;
1776		r = -EFAULT;
1777		if (copy_to_user(argp, kvm_regs, sizeof(struct kvm_regs)))
1778			goto out_free1;
1779		r = 0;
1780out_free1:
1781		kfree(kvm_regs);
1782		break;
1783	}
1784	case KVM_SET_REGS: {
1785		struct kvm_regs *kvm_regs;
1786
1787		r = -ENOMEM;
1788		kvm_regs = memdup_user(argp, sizeof(*kvm_regs));
1789		if (IS_ERR(kvm_regs)) {
1790			r = PTR_ERR(kvm_regs);
1791			goto out;
1792		}
1793		r = kvm_arch_vcpu_ioctl_set_regs(vcpu, kvm_regs);
1794		if (r)
1795			goto out_free2;
1796		r = 0;
1797out_free2:
1798		kfree(kvm_regs);
1799		break;
1800	}
1801	case KVM_GET_SREGS: {
1802		kvm_sregs = kzalloc(sizeof(struct kvm_sregs), GFP_KERNEL);
 
1803		r = -ENOMEM;
1804		if (!kvm_sregs)
1805			goto out;
1806		r = kvm_arch_vcpu_ioctl_get_sregs(vcpu, kvm_sregs);
1807		if (r)
1808			goto out;
1809		r = -EFAULT;
1810		if (copy_to_user(argp, kvm_sregs, sizeof(struct kvm_sregs)))
1811			goto out;
1812		r = 0;
1813		break;
1814	}
1815	case KVM_SET_SREGS: {
1816		kvm_sregs = memdup_user(argp, sizeof(*kvm_sregs));
1817		if (IS_ERR(kvm_sregs)) {
1818			r = PTR_ERR(kvm_sregs);
 
1819			goto out;
1820		}
1821		r = kvm_arch_vcpu_ioctl_set_sregs(vcpu, kvm_sregs);
1822		if (r)
1823			goto out;
1824		r = 0;
1825		break;
1826	}
1827	case KVM_GET_MP_STATE: {
1828		struct kvm_mp_state mp_state;
1829
1830		r = kvm_arch_vcpu_ioctl_get_mpstate(vcpu, &mp_state);
1831		if (r)
1832			goto out;
1833		r = -EFAULT;
1834		if (copy_to_user(argp, &mp_state, sizeof mp_state))
1835			goto out;
1836		r = 0;
1837		break;
1838	}
1839	case KVM_SET_MP_STATE: {
1840		struct kvm_mp_state mp_state;
1841
1842		r = -EFAULT;
1843		if (copy_from_user(&mp_state, argp, sizeof mp_state))
1844			goto out;
1845		r = kvm_arch_vcpu_ioctl_set_mpstate(vcpu, &mp_state);
1846		if (r)
1847			goto out;
1848		r = 0;
1849		break;
1850	}
1851	case KVM_TRANSLATE: {
1852		struct kvm_translation tr;
1853
1854		r = -EFAULT;
1855		if (copy_from_user(&tr, argp, sizeof tr))
1856			goto out;
1857		r = kvm_arch_vcpu_ioctl_translate(vcpu, &tr);
1858		if (r)
1859			goto out;
1860		r = -EFAULT;
1861		if (copy_to_user(argp, &tr, sizeof tr))
1862			goto out;
1863		r = 0;
1864		break;
1865	}
1866	case KVM_SET_GUEST_DEBUG: {
1867		struct kvm_guest_debug dbg;
1868
1869		r = -EFAULT;
1870		if (copy_from_user(&dbg, argp, sizeof dbg))
1871			goto out;
1872		r = kvm_arch_vcpu_ioctl_set_guest_debug(vcpu, &dbg);
1873		if (r)
1874			goto out;
1875		r = 0;
1876		break;
1877	}
1878	case KVM_SET_SIGNAL_MASK: {
1879		struct kvm_signal_mask __user *sigmask_arg = argp;
1880		struct kvm_signal_mask kvm_sigmask;
1881		sigset_t sigset, *p;
1882
1883		p = NULL;
1884		if (argp) {
1885			r = -EFAULT;
1886			if (copy_from_user(&kvm_sigmask, argp,
1887					   sizeof kvm_sigmask))
1888				goto out;
1889			r = -EINVAL;
1890			if (kvm_sigmask.len != sizeof sigset)
1891				goto out;
1892			r = -EFAULT;
1893			if (copy_from_user(&sigset, sigmask_arg->sigset,
1894					   sizeof sigset))
1895				goto out;
1896			p = &sigset;
1897		}
1898		r = kvm_vcpu_ioctl_set_sigmask(vcpu, p);
1899		break;
1900	}
1901	case KVM_GET_FPU: {
1902		fpu = kzalloc(sizeof(struct kvm_fpu), GFP_KERNEL);
1903		r = -ENOMEM;
1904		if (!fpu)
1905			goto out;
1906		r = kvm_arch_vcpu_ioctl_get_fpu(vcpu, fpu);
1907		if (r)
1908			goto out;
1909		r = -EFAULT;
1910		if (copy_to_user(argp, fpu, sizeof(struct kvm_fpu)))
1911			goto out;
1912		r = 0;
1913		break;
1914	}
1915	case KVM_SET_FPU: {
1916		fpu = memdup_user(argp, sizeof(*fpu));
1917		if (IS_ERR(fpu)) {
1918			r = PTR_ERR(fpu);
 
1919			goto out;
1920		}
1921		r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, fpu);
1922		if (r)
1923			goto out;
1924		r = 0;
 
1925		break;
1926	}
1927	default:
1928		r = kvm_arch_vcpu_ioctl(filp, ioctl, arg);
1929	}
1930out:
1931	vcpu_put(vcpu);
1932	kfree(fpu);
1933	kfree(kvm_sregs);
1934	return r;
1935}
1936
1937#ifdef CONFIG_COMPAT
1938static long kvm_vcpu_compat_ioctl(struct file *filp,
1939				  unsigned int ioctl, unsigned long arg)
1940{
1941	struct kvm_vcpu *vcpu = filp->private_data;
1942	void __user *argp = compat_ptr(arg);
1943	int r;
1944
1945	if (vcpu->kvm->mm != current->mm)
1946		return -EIO;
1947
1948	switch (ioctl) {
1949	case KVM_SET_SIGNAL_MASK: {
1950		struct kvm_signal_mask __user *sigmask_arg = argp;
1951		struct kvm_signal_mask kvm_sigmask;
1952		compat_sigset_t csigset;
1953		sigset_t sigset;
1954
1955		if (argp) {
1956			r = -EFAULT;
1957			if (copy_from_user(&kvm_sigmask, argp,
1958					   sizeof kvm_sigmask))
1959				goto out;
1960			r = -EINVAL;
1961			if (kvm_sigmask.len != sizeof csigset)
1962				goto out;
1963			r = -EFAULT;
1964			if (copy_from_user(&csigset, sigmask_arg->sigset,
1965					   sizeof csigset))
1966				goto out;
1967		}
1968		sigset_from_compat(&sigset, &csigset);
1969		r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset);
1970		break;
1971	}
1972	default:
1973		r = kvm_vcpu_ioctl(filp, ioctl, arg);
1974	}
1975
1976out:
1977	return r;
1978}
1979#endif
1980
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1981static long kvm_vm_ioctl(struct file *filp,
1982			   unsigned int ioctl, unsigned long arg)
1983{
1984	struct kvm *kvm = filp->private_data;
1985	void __user *argp = (void __user *)arg;
1986	int r;
1987
1988	if (kvm->mm != current->mm)
1989		return -EIO;
1990	switch (ioctl) {
1991	case KVM_CREATE_VCPU:
1992		r = kvm_vm_ioctl_create_vcpu(kvm, arg);
1993		if (r < 0)
 
 
 
 
 
1994			goto out;
 
1995		break;
 
 
1996	case KVM_SET_USER_MEMORY_REGION: {
1997		struct kvm_userspace_memory_region kvm_userspace_mem;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1998
1999		r = -EFAULT;
2000		if (copy_from_user(&kvm_userspace_mem, argp,
2001						sizeof kvm_userspace_mem))
2002			goto out;
2003
2004		r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem, 1);
2005		if (r)
 
2006			goto out;
 
 
2007		break;
2008	}
2009	case KVM_GET_DIRTY_LOG: {
2010		struct kvm_dirty_log log;
2011
2012		r = -EFAULT;
2013		if (copy_from_user(&log, argp, sizeof log))
2014			goto out;
2015		r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
2016		if (r)
 
 
 
 
 
 
 
2017			goto out;
 
2018		break;
2019	}
2020#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
 
2021	case KVM_REGISTER_COALESCED_MMIO: {
2022		struct kvm_coalesced_mmio_zone zone;
 
2023		r = -EFAULT;
2024		if (copy_from_user(&zone, argp, sizeof zone))
2025			goto out;
2026		r = kvm_vm_ioctl_register_coalesced_mmio(kvm, &zone);
2027		if (r)
2028			goto out;
2029		r = 0;
2030		break;
2031	}
2032	case KVM_UNREGISTER_COALESCED_MMIO: {
2033		struct kvm_coalesced_mmio_zone zone;
 
2034		r = -EFAULT;
2035		if (copy_from_user(&zone, argp, sizeof zone))
2036			goto out;
2037		r = kvm_vm_ioctl_unregister_coalesced_mmio(kvm, &zone);
2038		if (r)
2039			goto out;
2040		r = 0;
2041		break;
2042	}
2043#endif
2044	case KVM_IRQFD: {
2045		struct kvm_irqfd data;
2046
2047		r = -EFAULT;
2048		if (copy_from_user(&data, argp, sizeof data))
2049			goto out;
2050		r = kvm_irqfd(kvm, &data);
2051		break;
2052	}
2053	case KVM_IOEVENTFD: {
2054		struct kvm_ioeventfd data;
2055
2056		r = -EFAULT;
2057		if (copy_from_user(&data, argp, sizeof data))
2058			goto out;
2059		r = kvm_ioeventfd(kvm, &data);
2060		break;
2061	}
2062#ifdef CONFIG_KVM_APIC_ARCHITECTURE
2063	case KVM_SET_BOOT_CPU_ID:
2064		r = 0;
2065		mutex_lock(&kvm->lock);
2066		if (atomic_read(&kvm->online_vcpus) != 0)
2067			r = -EBUSY;
2068		else
2069			kvm->bsp_vcpu_id = arg;
2070		mutex_unlock(&kvm->lock);
2071		break;
2072#endif
2073#ifdef CONFIG_HAVE_KVM_MSI
2074	case KVM_SIGNAL_MSI: {
2075		struct kvm_msi msi;
2076
2077		r = -EFAULT;
2078		if (copy_from_user(&msi, argp, sizeof msi))
2079			goto out;
2080		r = kvm_send_userspace_msi(kvm, &msi);
2081		break;
2082	}
2083#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2084	default:
2085		r = kvm_arch_vm_ioctl(filp, ioctl, arg);
2086		if (r == -ENOTTY)
2087			r = kvm_vm_ioctl_assigned_device(kvm, ioctl, arg);
2088	}
2089out:
2090	return r;
2091}
2092
2093#ifdef CONFIG_COMPAT
2094struct compat_kvm_dirty_log {
2095	__u32 slot;
2096	__u32 padding1;
2097	union {
2098		compat_uptr_t dirty_bitmap; /* one bit per page */
2099		__u64 padding2;
2100	};
2101};
2102
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2103static long kvm_vm_compat_ioctl(struct file *filp,
2104			   unsigned int ioctl, unsigned long arg)
2105{
2106	struct kvm *kvm = filp->private_data;
2107	int r;
2108
2109	if (kvm->mm != current->mm)
2110		return -EIO;
 
 
 
 
 
2111	switch (ioctl) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2112	case KVM_GET_DIRTY_LOG: {
2113		struct compat_kvm_dirty_log compat_log;
2114		struct kvm_dirty_log log;
2115
2116		r = -EFAULT;
2117		if (copy_from_user(&compat_log, (void __user *)arg,
2118				   sizeof(compat_log)))
2119			goto out;
2120		log.slot	 = compat_log.slot;
2121		log.padding1	 = compat_log.padding1;
2122		log.padding2	 = compat_log.padding2;
2123		log.dirty_bitmap = compat_ptr(compat_log.dirty_bitmap);
2124
2125		r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
2126		if (r)
2127			goto out;
2128		break;
2129	}
2130	default:
2131		r = kvm_vm_ioctl(filp, ioctl, arg);
2132	}
2133
2134out:
2135	return r;
2136}
2137#endif
2138
2139static int kvm_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
2140{
2141	struct page *page[1];
2142	unsigned long addr;
2143	int npages;
2144	gfn_t gfn = vmf->pgoff;
2145	struct kvm *kvm = vma->vm_file->private_data;
2146
2147	addr = gfn_to_hva(kvm, gfn);
2148	if (kvm_is_error_hva(addr))
2149		return VM_FAULT_SIGBUS;
2150
2151	npages = get_user_pages(current, current->mm, addr, 1, 1, 0, page,
2152				NULL);
2153	if (unlikely(npages != 1))
2154		return VM_FAULT_SIGBUS;
2155
2156	vmf->page = page[0];
2157	return 0;
2158}
2159
2160static const struct vm_operations_struct kvm_vm_vm_ops = {
2161	.fault = kvm_vm_fault,
2162};
2163
2164static int kvm_vm_mmap(struct file *file, struct vm_area_struct *vma)
2165{
2166	vma->vm_ops = &kvm_vm_vm_ops;
2167	return 0;
2168}
2169
2170static struct file_operations kvm_vm_fops = {
2171	.release        = kvm_vm_release,
2172	.unlocked_ioctl = kvm_vm_ioctl,
2173#ifdef CONFIG_COMPAT
2174	.compat_ioctl   = kvm_vm_compat_ioctl,
2175#endif
2176	.mmap           = kvm_vm_mmap,
2177	.llseek		= noop_llseek,
 
2178};
2179
 
 
 
 
 
 
2180static int kvm_dev_ioctl_create_vm(unsigned long type)
2181{
2182	int r;
 
2183	struct kvm *kvm;
 
2184
2185	kvm = kvm_create_vm(type);
2186	if (IS_ERR(kvm))
2187		return PTR_ERR(kvm);
2188#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
2189	r = kvm_coalesced_mmio_init(kvm);
2190	if (r < 0) {
2191		kvm_put_kvm(kvm);
2192		return r;
2193	}
2194#endif
2195	r = anon_inode_getfd("kvm-vm", &kvm_vm_fops, kvm, O_RDWR);
2196	if (r < 0)
2197		kvm_put_kvm(kvm);
2198
2199	return r;
2200}
2201
2202static long kvm_dev_ioctl_check_extension_generic(long arg)
2203{
2204	switch (arg) {
2205	case KVM_CAP_USER_MEMORY:
2206	case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
2207	case KVM_CAP_JOIN_MEMORY_REGIONS_WORKS:
2208#ifdef CONFIG_KVM_APIC_ARCHITECTURE
2209	case KVM_CAP_SET_BOOT_CPU_ID:
2210#endif
2211	case KVM_CAP_INTERNAL_ERROR_DATA:
2212#ifdef CONFIG_HAVE_KVM_MSI
2213	case KVM_CAP_SIGNAL_MSI:
2214#endif
2215		return 1;
2216#ifdef CONFIG_HAVE_KVM_IRQCHIP
2217	case KVM_CAP_IRQ_ROUTING:
2218		return KVM_MAX_IRQ_ROUTES;
2219#endif
2220	default:
2221		break;
2222	}
2223	return kvm_dev_ioctl_check_extension(arg);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2224}
2225
2226static long kvm_dev_ioctl(struct file *filp,
2227			  unsigned int ioctl, unsigned long arg)
2228{
2229	long r = -EINVAL;
2230
2231	switch (ioctl) {
2232	case KVM_GET_API_VERSION:
2233		r = -EINVAL;
2234		if (arg)
2235			goto out;
2236		r = KVM_API_VERSION;
2237		break;
2238	case KVM_CREATE_VM:
2239		r = kvm_dev_ioctl_create_vm(arg);
2240		break;
2241	case KVM_CHECK_EXTENSION:
2242		r = kvm_dev_ioctl_check_extension_generic(arg);
2243		break;
2244	case KVM_GET_VCPU_MMAP_SIZE:
2245		r = -EINVAL;
2246		if (arg)
2247			goto out;
2248		r = PAGE_SIZE;     /* struct kvm_run */
2249#ifdef CONFIG_X86
2250		r += PAGE_SIZE;    /* pio data page */
2251#endif
2252#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
2253		r += PAGE_SIZE;    /* coalesced mmio ring page */
2254#endif
2255		break;
2256	case KVM_TRACE_ENABLE:
2257	case KVM_TRACE_PAUSE:
2258	case KVM_TRACE_DISABLE:
2259		r = -EOPNOTSUPP;
2260		break;
2261	default:
2262		return kvm_arch_dev_ioctl(filp, ioctl, arg);
2263	}
2264out:
2265	return r;
2266}
2267
2268static struct file_operations kvm_chardev_ops = {
2269	.unlocked_ioctl = kvm_dev_ioctl,
2270	.compat_ioctl   = kvm_dev_ioctl,
2271	.llseek		= noop_llseek,
 
2272};
2273
2274static struct miscdevice kvm_dev = {
2275	KVM_MINOR,
2276	"kvm",
2277	&kvm_chardev_ops,
2278};
2279
2280static void hardware_enable_nolock(void *junk)
2281{
2282	int cpu = raw_smp_processor_id();
2283	int r;
2284
2285	if (cpumask_test_cpu(cpu, cpus_hardware_enabled))
2286		return;
2287
2288	cpumask_set_cpu(cpu, cpus_hardware_enabled);
 
2289
2290	r = kvm_arch_hardware_enable(NULL);
 
 
 
2291
2292	if (r) {
2293		cpumask_clear_cpu(cpu, cpus_hardware_enabled);
2294		atomic_inc(&hardware_enable_failed);
2295		printk(KERN_INFO "kvm: enabling virtualization on "
2296				 "CPU%d failed\n", cpu);
2297	}
 
 
 
2298}
2299
2300static void hardware_enable(void *junk)
2301{
2302	raw_spin_lock(&kvm_lock);
2303	hardware_enable_nolock(junk);
2304	raw_spin_unlock(&kvm_lock);
2305}
2306
2307static void hardware_disable_nolock(void *junk)
2308{
2309	int cpu = raw_smp_processor_id();
 
 
 
 
 
 
 
 
 
 
 
 
2310
2311	if (!cpumask_test_cpu(cpu, cpus_hardware_enabled))
 
 
 
 
 
 
2312		return;
2313	cpumask_clear_cpu(cpu, cpus_hardware_enabled);
2314	kvm_arch_hardware_disable(NULL);
 
 
2315}
2316
2317static void hardware_disable(void *junk)
2318{
2319	raw_spin_lock(&kvm_lock);
2320	hardware_disable_nolock(junk);
2321	raw_spin_unlock(&kvm_lock);
 
 
2322}
2323
2324static void hardware_disable_all_nolock(void)
2325{
2326	BUG_ON(!kvm_usage_count);
2327
2328	kvm_usage_count--;
2329	if (!kvm_usage_count)
2330		on_each_cpu(hardware_disable_nolock, NULL, 1);
2331}
2332
2333static void hardware_disable_all(void)
2334{
2335	raw_spin_lock(&kvm_lock);
 
2336	hardware_disable_all_nolock();
2337	raw_spin_unlock(&kvm_lock);
 
2338}
2339
2340static int hardware_enable_all(void)
2341{
2342	int r = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2343
2344	raw_spin_lock(&kvm_lock);
2345
2346	kvm_usage_count++;
2347	if (kvm_usage_count == 1) {
2348		atomic_set(&hardware_enable_failed, 0);
2349		on_each_cpu(hardware_enable_nolock, NULL, 1);
2350
2351		if (atomic_read(&hardware_enable_failed)) {
2352			hardware_disable_all_nolock();
2353			r = -EBUSY;
2354		}
2355	}
2356
2357	raw_spin_unlock(&kvm_lock);
 
2358
2359	return r;
2360}
2361
2362static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val,
2363			   void *v)
2364{
2365	int cpu = (long)v;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2366
2367	if (!kvm_usage_count)
2368		return NOTIFY_OK;
 
 
 
 
 
 
 
 
 
 
2369
2370	val &= ~CPU_TASKS_FROZEN;
2371	switch (val) {
2372	case CPU_DYING:
2373		printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n",
2374		       cpu);
2375		hardware_disable(NULL);
2376		break;
2377	case CPU_STARTING:
2378		printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n",
2379		       cpu);
2380		hardware_enable(NULL);
2381		break;
2382	}
2383	return NOTIFY_OK;
2384}
2385
 
 
 
 
 
 
 
 
2386
2387asmlinkage void kvm_spurious_fault(void)
 
 
 
 
 
 
2388{
2389	/* Fault while not rebooting.  We want the trace. */
2390	BUG();
2391}
2392EXPORT_SYMBOL_GPL(kvm_spurious_fault);
2393
2394static int kvm_reboot(struct notifier_block *notifier, unsigned long val,
2395		      void *v)
2396{
2397	/*
2398	 * Some (well, at least mine) BIOSes hang on reboot if
2399	 * in vmx root mode.
2400	 *
2401	 * And Intel TXT required VMX off for all cpu when system shutdown.
2402	 */
2403	printk(KERN_INFO "kvm: exiting hardware virtualization\n");
2404	kvm_rebooting = true;
2405	on_each_cpu(hardware_disable_nolock, NULL, 1);
2406	return NOTIFY_OK;
2407}
 
2408
2409static struct notifier_block kvm_reboot_notifier = {
2410	.notifier_call = kvm_reboot,
2411	.priority = 0,
2412};
 
2413
2414static void kvm_io_bus_destroy(struct kvm_io_bus *bus)
2415{
2416	int i;
2417
2418	for (i = 0; i < bus->dev_count; i++) {
2419		struct kvm_io_device *pos = bus->range[i].dev;
2420
2421		kvm_iodevice_destructor(pos);
2422	}
2423	kfree(bus);
2424}
2425
2426int kvm_io_bus_sort_cmp(const void *p1, const void *p2)
 
2427{
2428	const struct kvm_io_range *r1 = p1;
2429	const struct kvm_io_range *r2 = p2;
2430
2431	if (r1->addr < r2->addr)
2432		return -1;
2433	if (r1->addr + r1->len > r2->addr + r2->len)
 
 
 
 
 
 
 
 
 
 
 
2434		return 1;
 
2435	return 0;
2436}
2437
2438int kvm_io_bus_insert_dev(struct kvm_io_bus *bus, struct kvm_io_device *dev,
2439			  gpa_t addr, int len)
2440{
2441	bus->range[bus->dev_count++] = (struct kvm_io_range) {
2442		.addr = addr,
2443		.len = len,
2444		.dev = dev,
2445	};
2446
2447	sort(bus->range, bus->dev_count, sizeof(struct kvm_io_range),
2448		kvm_io_bus_sort_cmp, NULL);
2449
2450	return 0;
2451}
2452
2453int kvm_io_bus_get_first_dev(struct kvm_io_bus *bus,
2454			     gpa_t addr, int len)
2455{
2456	struct kvm_io_range *range, key;
2457	int off;
2458
2459	key = (struct kvm_io_range) {
2460		.addr = addr,
2461		.len = len,
2462	};
2463
2464	range = bsearch(&key, bus->range, bus->dev_count,
2465			sizeof(struct kvm_io_range), kvm_io_bus_sort_cmp);
2466	if (range == NULL)
2467		return -ENOENT;
2468
2469	off = range - bus->range;
2470
2471	while (off > 0 && kvm_io_bus_sort_cmp(&key, &bus->range[off-1]) == 0)
2472		off--;
2473
2474	return off;
2475}
2476
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2477/* kvm_io_bus_write - called under kvm->slots_lock */
2478int kvm_io_bus_write(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
2479		     int len, const void *val)
2480{
2481	int idx;
2482	struct kvm_io_bus *bus;
2483	struct kvm_io_range range;
 
2484
2485	range = (struct kvm_io_range) {
2486		.addr = addr,
2487		.len = len,
2488	};
2489
2490	bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu);
2491	idx = kvm_io_bus_get_first_dev(bus, addr, len);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2492	if (idx < 0)
2493		return -EOPNOTSUPP;
2494
2495	while (idx < bus->dev_count &&
2496		kvm_io_bus_sort_cmp(&range, &bus->range[idx]) == 0) {
2497		if (!kvm_iodevice_write(bus->range[idx].dev, addr, len, val))
2498			return 0;
 
2499		idx++;
2500	}
2501
2502	return -EOPNOTSUPP;
2503}
2504
2505/* kvm_io_bus_read - called under kvm->slots_lock */
2506int kvm_io_bus_read(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
2507		    int len, void *val)
2508{
2509	int idx;
2510	struct kvm_io_bus *bus;
2511	struct kvm_io_range range;
 
2512
2513	range = (struct kvm_io_range) {
2514		.addr = addr,
2515		.len = len,
2516	};
2517
2518	bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu);
2519	idx = kvm_io_bus_get_first_dev(bus, addr, len);
2520	if (idx < 0)
2521		return -EOPNOTSUPP;
2522
2523	while (idx < bus->dev_count &&
2524		kvm_io_bus_sort_cmp(&range, &bus->range[idx]) == 0) {
2525		if (!kvm_iodevice_read(bus->range[idx].dev, addr, len, val))
2526			return 0;
2527		idx++;
2528	}
2529
2530	return -EOPNOTSUPP;
2531}
2532
2533/* Caller must hold slots_lock. */
2534int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
2535			    int len, struct kvm_io_device *dev)
2536{
 
2537	struct kvm_io_bus *new_bus, *bus;
 
 
 
2538
2539	bus = kvm->buses[bus_idx];
2540	if (bus->dev_count > NR_IOBUS_DEVS - 1)
 
 
 
 
2541		return -ENOSPC;
2542
2543	new_bus = kzalloc(sizeof(*bus) + ((bus->dev_count + 1) *
2544			  sizeof(struct kvm_io_range)), GFP_KERNEL);
2545	if (!new_bus)
2546		return -ENOMEM;
2547	memcpy(new_bus, bus, sizeof(*bus) + (bus->dev_count *
2548	       sizeof(struct kvm_io_range)));
2549	kvm_io_bus_insert_dev(new_bus, dev, addr, len);
 
 
 
 
 
 
 
 
 
 
 
 
 
2550	rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
2551	synchronize_srcu_expedited(&kvm->srcu);
2552	kfree(bus);
2553
2554	return 0;
2555}
2556
2557/* Caller must hold slots_lock. */
2558int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
2559			      struct kvm_io_device *dev)
2560{
2561	int i, r;
2562	struct kvm_io_bus *new_bus, *bus;
2563
2564	bus = kvm->buses[bus_idx];
2565	r = -ENOENT;
2566	for (i = 0; i < bus->dev_count; i++)
 
 
 
 
2567		if (bus->range[i].dev == dev) {
2568			r = 0;
2569			break;
2570		}
 
2571
2572	if (r)
2573		return r;
2574
2575	new_bus = kzalloc(sizeof(*bus) + ((bus->dev_count - 1) *
2576			  sizeof(struct kvm_io_range)), GFP_KERNEL);
2577	if (!new_bus)
2578		return -ENOMEM;
2579
2580	memcpy(new_bus, bus, sizeof(*bus) + i * sizeof(struct kvm_io_range));
2581	new_bus->dev_count--;
2582	memcpy(new_bus->range + i, bus->range + i + 1,
2583	       (new_bus->dev_count - i) * sizeof(struct kvm_io_range));
 
 
 
 
2584
2585	rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
2586	synchronize_srcu_expedited(&kvm->srcu);
 
 
 
 
 
 
 
 
 
 
 
 
2587	kfree(bus);
2588	return r;
2589}
2590
2591static struct notifier_block kvm_cpu_notifier = {
2592	.notifier_call = kvm_cpu_hotplug,
2593};
 
 
 
2594
2595static int vm_stat_get(void *_offset, u64 *val)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2596{
2597	unsigned offset = (long)_offset;
2598	struct kvm *kvm;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2599
2600	*val = 0;
2601	raw_spin_lock(&kvm_lock);
2602	list_for_each_entry(kvm, &vm_list, vm_list)
2603		*val += *(u32 *)((void *)kvm + offset);
2604	raw_spin_unlock(&kvm_lock);
2605	return 0;
2606}
2607
2608DEFINE_SIMPLE_ATTRIBUTE(vm_stat_fops, vm_stat_get, NULL, "%llu\n");
 
 
2609
2610static int vcpu_stat_get(void *_offset, u64 *val)
 
 
 
2611{
2612	unsigned offset = (long)_offset;
2613	struct kvm *kvm;
 
 
 
 
 
 
2614	struct kvm_vcpu *vcpu;
2615	int i;
2616
2617	*val = 0;
2618	raw_spin_lock(&kvm_lock);
2619	list_for_each_entry(kvm, &vm_list, vm_list)
2620		kvm_for_each_vcpu(i, vcpu, kvm)
2621			*val += *(u32 *)((void *)vcpu + offset);
2622
2623	raw_spin_unlock(&kvm_lock);
 
 
2624	return 0;
2625}
2626
2627DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops, vcpu_stat_get, NULL, "%llu\n");
 
 
 
2628
2629static const struct file_operations *stat_fops[] = {
2630	[KVM_STAT_VCPU] = &vcpu_stat_fops,
2631	[KVM_STAT_VM]   = &vm_stat_fops,
2632};
 
2633
2634static int kvm_init_debug(void)
2635{
2636	int r = -EFAULT;
2637	struct kvm_stats_debugfs_item *p;
2638
2639	kvm_debugfs_dir = debugfs_create_dir("kvm", NULL);
2640	if (kvm_debugfs_dir == NULL)
2641		goto out;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2642
2643	for (p = debugfs_entries; p->name; ++p) {
2644		p->dentry = debugfs_create_file(p->name, 0444, kvm_debugfs_dir,
2645						(void *)(long)p->offset,
2646						stat_fops[p->kind]);
2647		if (p->dentry == NULL)
2648			goto out_dir;
 
 
 
2649	}
2650
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2651	return 0;
 
2652
2653out_dir:
2654	debugfs_remove_recursive(kvm_debugfs_dir);
2655out:
2656	return r;
 
 
 
 
 
 
 
 
 
 
 
2657}
2658
2659static void kvm_exit_debug(void)
 
 
 
2660{
2661	struct kvm_stats_debugfs_item *p;
 
 
2662
2663	for (p = debugfs_entries; p->name; ++p)
2664		debugfs_remove(p->dentry);
2665	debugfs_remove(kvm_debugfs_dir);
 
 
 
 
 
2666}
2667
2668static int kvm_suspend(void)
2669{
2670	if (kvm_usage_count)
2671		hardware_disable_nolock(NULL);
 
 
 
 
 
 
 
 
 
 
2672	return 0;
2673}
2674
2675static void kvm_resume(void)
 
 
 
 
2676{
2677	if (kvm_usage_count) {
2678		WARN_ON(raw_spin_is_locked(&kvm_lock));
2679		hardware_enable_nolock(NULL);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2680	}
 
 
 
 
2681}
2682
2683static struct syscore_ops kvm_syscore_ops = {
2684	.suspend = kvm_suspend,
2685	.resume = kvm_resume,
2686};
 
 
 
2687
2688struct page *bad_page;
2689pfn_t bad_pfn;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2690
2691static inline
2692struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn)
2693{
2694	return container_of(pn, struct kvm_vcpu, preempt_notifier);
2695}
2696
2697static void kvm_sched_in(struct preempt_notifier *pn, int cpu)
2698{
2699	struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
2700
 
 
 
 
 
2701	kvm_arch_vcpu_load(vcpu, cpu);
2702}
2703
2704static void kvm_sched_out(struct preempt_notifier *pn,
2705			  struct task_struct *next)
2706{
2707	struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
2708
 
 
 
 
2709	kvm_arch_vcpu_put(vcpu);
 
2710}
2711
2712int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
2713		  struct module *module)
 
 
 
 
 
 
 
 
2714{
2715	int r;
2716	int cpu;
2717
2718	r = kvm_arch_init(opaque);
2719	if (r)
2720		goto out_fail;
2721
2722	bad_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
 
 
2723
2724	if (bad_page == NULL) {
2725		r = -ENOMEM;
2726		goto out;
2727	}
 
 
 
2728
2729	bad_pfn = page_to_pfn(bad_page);
 
 
 
 
2730
2731	hwpoison_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
 
2732
2733	if (hwpoison_page == NULL) {
2734		r = -ENOMEM;
2735		goto out_free_0;
2736	}
2737
2738	hwpoison_pfn = page_to_pfn(hwpoison_page);
 
2739
2740	fault_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
 
 
2741
2742	if (fault_page == NULL) {
2743		r = -ENOMEM;
2744		goto out_free_0;
2745	}
2746
2747	fault_pfn = page_to_pfn(fault_page);
 
2748
2749	if (!zalloc_cpumask_var(&cpus_hardware_enabled, GFP_KERNEL)) {
2750		r = -ENOMEM;
2751		goto out_free_0;
2752	}
 
2753
2754	r = kvm_arch_hardware_setup();
2755	if (r < 0)
2756		goto out_free_0a;
 
 
 
 
 
 
 
2757
2758	for_each_online_cpu(cpu) {
2759		smp_call_function_single(cpu,
2760				kvm_arch_check_processor_compat,
2761				&r, 1);
2762		if (r < 0)
2763			goto out_free_1;
2764	}
2765
2766	r = register_cpu_notifier(&kvm_cpu_notifier);
 
 
2767	if (r)
2768		goto out_free_2;
2769	register_reboot_notifier(&kvm_reboot_notifier);
 
 
2770
2771	/* A kmem cache lets us meet the alignment requirements of fx_save. */
2772	if (!vcpu_align)
2773		vcpu_align = __alignof__(struct kvm_vcpu);
2774	kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size, vcpu_align,
2775					   0, NULL);
 
 
 
 
 
2776	if (!kvm_vcpu_cache) {
2777		r = -ENOMEM;
2778		goto out_free_3;
2779	}
2780
 
 
 
 
 
 
 
 
 
 
 
 
2781	r = kvm_async_pf_init();
2782	if (r)
2783		goto out_free;
2784
2785	kvm_chardev_ops.owner = module;
2786	kvm_vm_fops.owner = module;
2787	kvm_vcpu_fops.owner = module;
2788
2789	r = misc_register(&kvm_dev);
2790	if (r) {
2791		printk(KERN_ERR "kvm: misc device register failed\n");
2792		goto out_unreg;
2793	}
2794
2795	register_syscore_ops(&kvm_syscore_ops);
2796
2797	kvm_preempt_ops.sched_in = kvm_sched_in;
2798	kvm_preempt_ops.sched_out = kvm_sched_out;
2799
2800	r = kvm_init_debug();
 
 
 
 
 
 
 
 
 
 
 
 
2801	if (r) {
2802		printk(KERN_ERR "kvm: create debugfs files failed\n");
2803		goto out_undebugfs;
2804	}
2805
2806	return 0;
2807
2808out_undebugfs:
2809	unregister_syscore_ops(&kvm_syscore_ops);
2810out_unreg:
2811	kvm_async_pf_deinit();
2812out_free:
 
 
 
 
 
2813	kmem_cache_destroy(kvm_vcpu_cache);
2814out_free_3:
2815	unregister_reboot_notifier(&kvm_reboot_notifier);
2816	unregister_cpu_notifier(&kvm_cpu_notifier);
2817out_free_2:
2818out_free_1:
2819	kvm_arch_hardware_unsetup();
2820out_free_0a:
2821	free_cpumask_var(cpus_hardware_enabled);
2822out_free_0:
2823	if (fault_page)
2824		__free_page(fault_page);
2825	if (hwpoison_page)
2826		__free_page(hwpoison_page);
2827	__free_page(bad_page);
2828out:
2829	kvm_arch_exit();
2830out_fail:
2831	return r;
2832}
2833EXPORT_SYMBOL_GPL(kvm_init);
2834
2835void kvm_exit(void)
2836{
2837	kvm_exit_debug();
 
 
 
 
 
 
2838	misc_deregister(&kvm_dev);
 
 
 
 
2839	kmem_cache_destroy(kvm_vcpu_cache);
 
2840	kvm_async_pf_deinit();
 
2841	unregister_syscore_ops(&kvm_syscore_ops);
2842	unregister_reboot_notifier(&kvm_reboot_notifier);
2843	unregister_cpu_notifier(&kvm_cpu_notifier);
2844	on_each_cpu(hardware_disable_nolock, NULL, 1);
2845	kvm_arch_hardware_unsetup();
2846	kvm_arch_exit();
2847	free_cpumask_var(cpus_hardware_enabled);
2848	__free_page(fault_page);
2849	__free_page(hwpoison_page);
2850	__free_page(bad_page);
2851}
2852EXPORT_SYMBOL_GPL(kvm_exit);
v6.8
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Kernel-based Virtual Machine driver for Linux
   4 *
   5 * This module enables machines with Intel VT-x extensions to run virtual
   6 * machines without emulation or binary translation.
   7 *
   8 * Copyright (C) 2006 Qumranet, Inc.
   9 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
  10 *
  11 * Authors:
  12 *   Avi Kivity   <avi@qumranet.com>
  13 *   Yaniv Kamay  <yaniv@qumranet.com>
 
 
 
 
  14 */
  15
  16#include <kvm/iodev.h>
  17
  18#include <linux/kvm_host.h>
  19#include <linux/kvm.h>
  20#include <linux/module.h>
  21#include <linux/errno.h>
  22#include <linux/percpu.h>
  23#include <linux/mm.h>
  24#include <linux/miscdevice.h>
  25#include <linux/vmalloc.h>
  26#include <linux/reboot.h>
  27#include <linux/debugfs.h>
  28#include <linux/highmem.h>
  29#include <linux/file.h>
  30#include <linux/syscore_ops.h>
  31#include <linux/cpu.h>
  32#include <linux/sched/signal.h>
  33#include <linux/sched/mm.h>
  34#include <linux/sched/stat.h>
  35#include <linux/cpumask.h>
  36#include <linux/smp.h>
  37#include <linux/anon_inodes.h>
  38#include <linux/profile.h>
  39#include <linux/kvm_para.h>
  40#include <linux/pagemap.h>
  41#include <linux/mman.h>
  42#include <linux/swap.h>
  43#include <linux/bitops.h>
  44#include <linux/spinlock.h>
  45#include <linux/compat.h>
  46#include <linux/srcu.h>
  47#include <linux/hugetlb.h>
  48#include <linux/slab.h>
  49#include <linux/sort.h>
  50#include <linux/bsearch.h>
  51#include <linux/io.h>
  52#include <linux/lockdep.h>
  53#include <linux/kthread.h>
  54#include <linux/suspend.h>
  55
  56#include <asm/processor.h>
  57#include <asm/ioctl.h>
  58#include <linux/uaccess.h>
 
  59
  60#include "coalesced_mmio.h"
  61#include "async_pf.h"
  62#include "kvm_mm.h"
  63#include "vfio.h"
  64
  65#include <trace/events/ipi.h>
  66
  67#define CREATE_TRACE_POINTS
  68#include <trace/events/kvm.h>
  69
  70#include <linux/kvm_dirty_ring.h>
  71
  72
  73/* Worst case buffer size needed for holding an integer. */
  74#define ITOA_MAX_LEN 12
  75
  76MODULE_AUTHOR("Qumranet");
  77MODULE_LICENSE("GPL");
  78
  79/* Architectures should define their poll value according to the halt latency */
  80unsigned int halt_poll_ns = KVM_HALT_POLL_NS_DEFAULT;
  81module_param(halt_poll_ns, uint, 0644);
  82EXPORT_SYMBOL_GPL(halt_poll_ns);
  83
  84/* Default doubles per-vcpu halt_poll_ns. */
  85unsigned int halt_poll_ns_grow = 2;
  86module_param(halt_poll_ns_grow, uint, 0644);
  87EXPORT_SYMBOL_GPL(halt_poll_ns_grow);
  88
  89/* The start value to grow halt_poll_ns from */
  90unsigned int halt_poll_ns_grow_start = 10000; /* 10us */
  91module_param(halt_poll_ns_grow_start, uint, 0644);
  92EXPORT_SYMBOL_GPL(halt_poll_ns_grow_start);
  93
  94/* Default resets per-vcpu halt_poll_ns . */
  95unsigned int halt_poll_ns_shrink;
  96module_param(halt_poll_ns_shrink, uint, 0644);
  97EXPORT_SYMBOL_GPL(halt_poll_ns_shrink);
  98
  99/*
 100 * Ordering of locks:
 101 *
 102 *	kvm->lock --> kvm->slots_lock --> kvm->irq_lock
 103 */
 104
 105DEFINE_MUTEX(kvm_lock);
 106LIST_HEAD(vm_list);
 107
 108static struct kmem_cache *kvm_vcpu_cache;
 
 
 
 
 
 109
 110static __read_mostly struct preempt_ops kvm_preempt_ops;
 111static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_running_vcpu);
 112
 113struct dentry *kvm_debugfs_dir;
 114EXPORT_SYMBOL_GPL(kvm_debugfs_dir);
 115
 116static const struct file_operations stat_fops_per_vm;
 117
 118static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
 119			   unsigned long arg);
 120#ifdef CONFIG_KVM_COMPAT
 121static long kvm_vcpu_compat_ioctl(struct file *file, unsigned int ioctl,
 122				  unsigned long arg);
 123#define KVM_COMPAT(c)	.compat_ioctl	= (c)
 124#else
 125/*
 126 * For architectures that don't implement a compat infrastructure,
 127 * adopt a double line of defense:
 128 * - Prevent a compat task from opening /dev/kvm
 129 * - If the open has been done by a 64bit task, and the KVM fd
 130 *   passed to a compat task, let the ioctls fail.
 131 */
 132static long kvm_no_compat_ioctl(struct file *file, unsigned int ioctl,
 133				unsigned long arg) { return -EINVAL; }
 134
 135static int kvm_no_compat_open(struct inode *inode, struct file *file)
 136{
 137	return is_compat_task() ? -ENODEV : 0;
 138}
 139#define KVM_COMPAT(c)	.compat_ioctl	= kvm_no_compat_ioctl,	\
 140			.open		= kvm_no_compat_open
 141#endif
 142static int hardware_enable_all(void);
 143static void hardware_disable_all(void);
 144
 145static void kvm_io_bus_destroy(struct kvm_io_bus *bus);
 146
 147#define KVM_EVENT_CREATE_VM 0
 148#define KVM_EVENT_DESTROY_VM 1
 149static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm);
 150static unsigned long long kvm_createvm_count;
 151static unsigned long long kvm_active_vms;
 152
 153static DEFINE_PER_CPU(cpumask_var_t, cpu_kick_mask);
 154
 155__weak void kvm_arch_guest_memory_reclaimed(struct kvm *kvm)
 156{
 157}
 158
 159bool kvm_is_zone_device_page(struct page *page)
 160{
 161	/*
 162	 * The metadata used by is_zone_device_page() to determine whether or
 163	 * not a page is ZONE_DEVICE is guaranteed to be valid if and only if
 164	 * the device has been pinned, e.g. by get_user_pages().  WARN if the
 165	 * page_count() is zero to help detect bad usage of this helper.
 166	 */
 167	if (WARN_ON_ONCE(!page_count(page)))
 168		return false;
 169
 170	return is_zone_device_page(page);
 171}
 172
 173/*
 174 * Returns a 'struct page' if the pfn is "valid" and backed by a refcounted
 175 * page, NULL otherwise.  Note, the list of refcounted PG_reserved page types
 176 * is likely incomplete, it has been compiled purely through people wanting to
 177 * back guest with a certain type of memory and encountering issues.
 178 */
 179struct page *kvm_pfn_to_refcounted_page(kvm_pfn_t pfn)
 180{
 181	struct page *page;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 182
 183	if (!pfn_valid(pfn))
 184		return NULL;
 185
 186	page = pfn_to_page(pfn);
 187	if (!PageReserved(page))
 188		return page;
 189
 190	/* The ZERO_PAGE(s) is marked PG_reserved, but is refcounted. */
 191	if (is_zero_pfn(pfn))
 192		return page;
 193
 194	/*
 195	 * ZONE_DEVICE pages currently set PG_reserved, but from a refcounting
 196	 * perspective they are "normal" pages, albeit with slightly different
 197	 * usage rules.
 198	 */
 199	if (kvm_is_zone_device_page(page))
 200		return page;
 201
 202	return NULL;
 203}
 204
 205/*
 206 * Switches to specified vcpu, until a matching vcpu_put()
 207 */
 208void vcpu_load(struct kvm_vcpu *vcpu)
 209{
 210	int cpu = get_cpu();
 211
 212	__this_cpu_write(kvm_running_vcpu, vcpu);
 
 
 
 
 
 
 
 
 
 213	preempt_notifier_register(&vcpu->preempt_notifier);
 214	kvm_arch_vcpu_load(vcpu, cpu);
 215	put_cpu();
 216}
 217EXPORT_SYMBOL_GPL(vcpu_load);
 218
 219void vcpu_put(struct kvm_vcpu *vcpu)
 220{
 221	preempt_disable();
 222	kvm_arch_vcpu_put(vcpu);
 223	preempt_notifier_unregister(&vcpu->preempt_notifier);
 224	__this_cpu_write(kvm_running_vcpu, NULL);
 225	preempt_enable();
 226}
 227EXPORT_SYMBOL_GPL(vcpu_put);
 228
 229/* TODO: merge with kvm_arch_vcpu_should_kick */
 230static bool kvm_request_needs_ipi(struct kvm_vcpu *vcpu, unsigned req)
 231{
 232	int mode = kvm_vcpu_exiting_guest_mode(vcpu);
 233
 234	/*
 235	 * We need to wait for the VCPU to reenable interrupts and get out of
 236	 * READING_SHADOW_PAGE_TABLES mode.
 237	 */
 238	if (req & KVM_REQUEST_WAIT)
 239		return mode != OUTSIDE_GUEST_MODE;
 240
 241	/*
 242	 * Need to kick a running VCPU, but otherwise there is nothing to do.
 243	 */
 244	return mode == IN_GUEST_MODE;
 245}
 246
 247static void ack_kick(void *_completed)
 248{
 249}
 250
 251static inline bool kvm_kick_many_cpus(struct cpumask *cpus, bool wait)
 252{
 253	if (cpumask_empty(cpus))
 254		return false;
 255
 256	smp_call_function_many(cpus, ack_kick, NULL, wait);
 257	return true;
 258}
 259
 260static void kvm_make_vcpu_request(struct kvm_vcpu *vcpu, unsigned int req,
 261				  struct cpumask *tmp, int current_cpu)
 262{
 263	int cpu;
 264
 265	if (likely(!(req & KVM_REQUEST_NO_ACTION)))
 266		__kvm_make_request(req, vcpu);
 267
 268	if (!(req & KVM_REQUEST_NO_WAKEUP) && kvm_vcpu_wake_up(vcpu))
 269		return;
 270
 271	/*
 272	 * Note, the vCPU could get migrated to a different pCPU at any point
 273	 * after kvm_request_needs_ipi(), which could result in sending an IPI
 274	 * to the previous pCPU.  But, that's OK because the purpose of the IPI
 275	 * is to ensure the vCPU returns to OUTSIDE_GUEST_MODE, which is
 276	 * satisfied if the vCPU migrates. Entering READING_SHADOW_PAGE_TABLES
 277	 * after this point is also OK, as the requirement is only that KVM wait
 278	 * for vCPUs that were reading SPTEs _before_ any changes were
 279	 * finalized. See kvm_vcpu_kick() for more details on handling requests.
 280	 */
 281	if (kvm_request_needs_ipi(vcpu, req)) {
 282		cpu = READ_ONCE(vcpu->cpu);
 283		if (cpu != -1 && cpu != current_cpu)
 284			__cpumask_set_cpu(cpu, tmp);
 285	}
 286}
 287
 288bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req,
 289				 unsigned long *vcpu_bitmap)
 290{
 
 
 
 291	struct kvm_vcpu *vcpu;
 292	struct cpumask *cpus;
 293	int i, me;
 294	bool called;
 295
 296	me = get_cpu();
 297
 298	cpus = this_cpu_cpumask_var_ptr(cpu_kick_mask);
 299	cpumask_clear(cpus);
 300
 301	for_each_set_bit(i, vcpu_bitmap, KVM_MAX_VCPUS) {
 302		vcpu = kvm_get_vcpu(kvm, i);
 303		if (!vcpu)
 304			continue;
 305		kvm_make_vcpu_request(vcpu, req, cpus, me);
 306	}
 307
 308	called = kvm_kick_many_cpus(cpus, !!(req & KVM_REQUEST_WAIT));
 309	put_cpu();
 310
 311	return called;
 312}
 313
 314bool kvm_make_all_cpus_request_except(struct kvm *kvm, unsigned int req,
 315				      struct kvm_vcpu *except)
 316{
 317	struct kvm_vcpu *vcpu;
 318	struct cpumask *cpus;
 319	unsigned long i;
 320	bool called;
 321	int me;
 322
 323	me = get_cpu();
 
 
 
 324
 325	cpus = this_cpu_cpumask_var_ptr(cpu_kick_mask);
 326	cpumask_clear(cpus);
 327
 328	kvm_for_each_vcpu(i, vcpu, kvm) {
 329		if (vcpu == except)
 330			continue;
 331		kvm_make_vcpu_request(vcpu, req, cpus, me);
 332	}
 333
 334	called = kvm_kick_many_cpus(cpus, !!(req & KVM_REQUEST_WAIT));
 
 
 
 335	put_cpu();
 336
 337	return called;
 338}
 339
 340bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req)
 341{
 342	return kvm_make_all_cpus_request_except(kvm, req, NULL);
 343}
 344EXPORT_SYMBOL_GPL(kvm_make_all_cpus_request);
 345
 346void kvm_flush_remote_tlbs(struct kvm *kvm)
 347{
 348	++kvm->stat.generic.remote_tlb_flush_requests;
 349
 350	/*
 351	 * We want to publish modifications to the page tables before reading
 352	 * mode. Pairs with a memory barrier in arch-specific code.
 353	 * - x86: smp_mb__after_srcu_read_unlock in vcpu_enter_guest
 354	 * and smp_mb in walk_shadow_page_lockless_begin/end.
 355	 * - powerpc: smp_mb in kvmppc_prepare_to_enter.
 356	 *
 357	 * There is already an smp_mb__after_atomic() before
 358	 * kvm_make_all_cpus_request() reads vcpu->mode. We reuse that
 359	 * barrier here.
 360	 */
 361	if (!kvm_arch_flush_remote_tlbs(kvm)
 362	    || kvm_make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH))
 363		++kvm->stat.generic.remote_tlb_flush;
 364}
 365EXPORT_SYMBOL_GPL(kvm_flush_remote_tlbs);
 366
 367void kvm_flush_remote_tlbs_range(struct kvm *kvm, gfn_t gfn, u64 nr_pages)
 368{
 369	if (!kvm_arch_flush_remote_tlbs_range(kvm, gfn, nr_pages))
 370		return;
 371
 372	/*
 373	 * Fall back to a flushing entire TLBs if the architecture range-based
 374	 * TLB invalidation is unsupported or can't be performed for whatever
 375	 * reason.
 376	 */
 377	kvm_flush_remote_tlbs(kvm);
 378}
 379
 380void kvm_flush_remote_tlbs_memslot(struct kvm *kvm,
 381				   const struct kvm_memory_slot *memslot)
 382{
 383	/*
 384	 * All current use cases for flushing the TLBs for a specific memslot
 385	 * are related to dirty logging, and many do the TLB flush out of
 386	 * mmu_lock. The interaction between the various operations on memslot
 387	 * must be serialized by slots_locks to ensure the TLB flush from one
 388	 * operation is observed by any other operation on the same memslot.
 389	 */
 390	lockdep_assert_held(&kvm->slots_lock);
 391	kvm_flush_remote_tlbs_range(kvm, memslot->base_gfn, memslot->npages);
 392}
 393
 394static void kvm_flush_shadow_all(struct kvm *kvm)
 395{
 396	kvm_arch_flush_shadow_all(kvm);
 397	kvm_arch_guest_memory_reclaimed(kvm);
 398}
 399
 400#ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE
 401static inline void *mmu_memory_cache_alloc_obj(struct kvm_mmu_memory_cache *mc,
 402					       gfp_t gfp_flags)
 403{
 404	gfp_flags |= mc->gfp_zero;
 405
 406	if (mc->kmem_cache)
 407		return kmem_cache_alloc(mc->kmem_cache, gfp_flags);
 408	else
 409		return (void *)__get_free_page(gfp_flags);
 410}
 411
 412int __kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int capacity, int min)
 413{
 414	gfp_t gfp = mc->gfp_custom ? mc->gfp_custom : GFP_KERNEL_ACCOUNT;
 415	void *obj;
 416
 417	if (mc->nobjs >= min)
 418		return 0;
 419
 420	if (unlikely(!mc->objects)) {
 421		if (WARN_ON_ONCE(!capacity))
 422			return -EIO;
 423
 424		mc->objects = kvmalloc_array(sizeof(void *), capacity, gfp);
 425		if (!mc->objects)
 426			return -ENOMEM;
 427
 428		mc->capacity = capacity;
 429	}
 430
 431	/* It is illegal to request a different capacity across topups. */
 432	if (WARN_ON_ONCE(mc->capacity != capacity))
 433		return -EIO;
 434
 435	while (mc->nobjs < mc->capacity) {
 436		obj = mmu_memory_cache_alloc_obj(mc, gfp);
 437		if (!obj)
 438			return mc->nobjs >= min ? 0 : -ENOMEM;
 439		mc->objects[mc->nobjs++] = obj;
 440	}
 441	return 0;
 442}
 443
 444int kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int min)
 445{
 446	return __kvm_mmu_topup_memory_cache(mc, KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE, min);
 447}
 448
 449int kvm_mmu_memory_cache_nr_free_objects(struct kvm_mmu_memory_cache *mc)
 450{
 451	return mc->nobjs;
 452}
 453
 454void kvm_mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
 455{
 456	while (mc->nobjs) {
 457		if (mc->kmem_cache)
 458			kmem_cache_free(mc->kmem_cache, mc->objects[--mc->nobjs]);
 459		else
 460			free_page((unsigned long)mc->objects[--mc->nobjs]);
 461	}
 462
 463	kvfree(mc->objects);
 464
 465	mc->objects = NULL;
 466	mc->capacity = 0;
 467}
 468
 469void *kvm_mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
 470{
 471	void *p;
 472
 473	if (WARN_ON(!mc->nobjs))
 474		p = mmu_memory_cache_alloc_obj(mc, GFP_ATOMIC | __GFP_ACCOUNT);
 475	else
 476		p = mc->objects[--mc->nobjs];
 477	BUG_ON(!p);
 478	return p;
 479}
 480#endif
 481
 482static void kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
 483{
 484	mutex_init(&vcpu->mutex);
 485	vcpu->cpu = -1;
 486	vcpu->kvm = kvm;
 487	vcpu->vcpu_id = id;
 488	vcpu->pid = NULL;
 489#ifndef __KVM_HAVE_ARCH_WQP
 490	rcuwait_init(&vcpu->wait);
 491#endif
 492	kvm_async_pf_vcpu_init(vcpu);
 493
 494	kvm_vcpu_set_in_spin_loop(vcpu, false);
 495	kvm_vcpu_set_dy_eligible(vcpu, false);
 496	vcpu->preempted = false;
 497	vcpu->ready = false;
 498	preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops);
 499	vcpu->last_used_slot = NULL;
 500
 501	/* Fill the stats id string for the vcpu */
 502	snprintf(vcpu->stats_id, sizeof(vcpu->stats_id), "kvm-%d/vcpu-%d",
 503		 task_pid_nr(current), id);
 504}
 505
 506static void kvm_vcpu_destroy(struct kvm_vcpu *vcpu)
 507{
 508	kvm_arch_vcpu_destroy(vcpu);
 509	kvm_dirty_ring_free(&vcpu->dirty_ring);
 510
 511	/*
 512	 * No need for rcu_read_lock as VCPU_RUN is the only place that changes
 513	 * the vcpu->pid pointer, and at destruction time all file descriptors
 514	 * are already gone.
 515	 */
 516	put_pid(rcu_dereference_protected(vcpu->pid, 1));
 517
 
 518	free_page((unsigned long)vcpu->run);
 519	kmem_cache_free(kvm_vcpu_cache, vcpu);
 
 520}
 
 521
 522void kvm_destroy_vcpus(struct kvm *kvm)
 523{
 524	unsigned long i;
 525	struct kvm_vcpu *vcpu;
 526
 527	kvm_for_each_vcpu(i, vcpu, kvm) {
 528		kvm_vcpu_destroy(vcpu);
 529		xa_erase(&kvm->vcpu_array, i);
 530	}
 531
 532	atomic_set(&kvm->online_vcpus, 0);
 533}
 534EXPORT_SYMBOL_GPL(kvm_destroy_vcpus);
 535
 536#ifdef CONFIG_KVM_GENERIC_MMU_NOTIFIER
 537static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn)
 538{
 539	return container_of(mn, struct kvm, mmu_notifier);
 540}
 541
 542typedef bool (*gfn_handler_t)(struct kvm *kvm, struct kvm_gfn_range *range);
 543
 544typedef void (*on_lock_fn_t)(struct kvm *kvm);
 
 
 
 545
 546struct kvm_mmu_notifier_range {
 547	/*
 548	 * 64-bit addresses, as KVM notifiers can operate on host virtual
 549	 * addresses (unsigned long) and guest physical addresses (64-bit).
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 550	 */
 551	u64 start;
 552	u64 end;
 553	union kvm_mmu_notifier_arg arg;
 554	gfn_handler_t handler;
 555	on_lock_fn_t on_lock;
 556	bool flush_on_ret;
 557	bool may_block;
 558};
 559
 560/*
 561 * The inner-most helper returns a tuple containing the return value from the
 562 * arch- and action-specific handler, plus a flag indicating whether or not at
 563 * least one memslot was found, i.e. if the handler found guest memory.
 564 *
 565 * Note, most notifiers are averse to booleans, so even though KVM tracks the
 566 * return from arch code as a bool, outer helpers will cast it to an int. :-(
 567 */
 568typedef struct kvm_mmu_notifier_return {
 569	bool ret;
 570	bool found_memslot;
 571} kvm_mn_ret_t;
 572
 573/*
 574 * Use a dedicated stub instead of NULL to indicate that there is no callback
 575 * function/handler.  The compiler technically can't guarantee that a real
 576 * function will have a non-zero address, and so it will generate code to
 577 * check for !NULL, whereas comparing against a stub will be elided at compile
 578 * time (unless the compiler is getting long in the tooth, e.g. gcc 4.9).
 579 */
 580static void kvm_null_fn(void)
 581{
 582
 583}
 584#define IS_KVM_NULL_FN(fn) ((fn) == (void *)kvm_null_fn)
 585
 586static const union kvm_mmu_notifier_arg KVM_MMU_NOTIFIER_NO_ARG;
 587
 588/* Iterate over each memslot intersecting [start, last] (inclusive) range */
 589#define kvm_for_each_memslot_in_hva_range(node, slots, start, last)	     \
 590	for (node = interval_tree_iter_first(&slots->hva_tree, start, last); \
 591	     node;							     \
 592	     node = interval_tree_iter_next(node, start, last))	     \
 593
 594static __always_inline kvm_mn_ret_t __kvm_handle_hva_range(struct kvm *kvm,
 595							   const struct kvm_mmu_notifier_range *range)
 596{
 597	struct kvm_mmu_notifier_return r = {
 598		.ret = false,
 599		.found_memslot = false,
 600	};
 601	struct kvm_gfn_range gfn_range;
 602	struct kvm_memory_slot *slot;
 603	struct kvm_memslots *slots;
 604	int i, idx;
 605
 606	if (WARN_ON_ONCE(range->end <= range->start))
 607		return r;
 608
 609	/* A null handler is allowed if and only if on_lock() is provided. */
 610	if (WARN_ON_ONCE(IS_KVM_NULL_FN(range->on_lock) &&
 611			 IS_KVM_NULL_FN(range->handler)))
 612		return r;
 613
 614	idx = srcu_read_lock(&kvm->srcu);
 
 615
 616	for (i = 0; i < kvm_arch_nr_memslot_as_ids(kvm); i++) {
 617		struct interval_tree_node *node;
 618
 619		slots = __kvm_memslots(kvm, i);
 620		kvm_for_each_memslot_in_hva_range(node, slots,
 621						  range->start, range->end - 1) {
 622			unsigned long hva_start, hva_end;
 623
 624			slot = container_of(node, struct kvm_memory_slot, hva_node[slots->node_idx]);
 625			hva_start = max_t(unsigned long, range->start, slot->userspace_addr);
 626			hva_end = min_t(unsigned long, range->end,
 627					slot->userspace_addr + (slot->npages << PAGE_SHIFT));
 628
 629			/*
 630			 * To optimize for the likely case where the address
 631			 * range is covered by zero or one memslots, don't
 632			 * bother making these conditional (to avoid writes on
 633			 * the second or later invocation of the handler).
 634			 */
 635			gfn_range.arg = range->arg;
 636			gfn_range.may_block = range->may_block;
 637
 638			/*
 639			 * {gfn(page) | page intersects with [hva_start, hva_end)} =
 640			 * {gfn_start, gfn_start+1, ..., gfn_end-1}.
 641			 */
 642			gfn_range.start = hva_to_gfn_memslot(hva_start, slot);
 643			gfn_range.end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, slot);
 644			gfn_range.slot = slot;
 645
 646			if (!r.found_memslot) {
 647				r.found_memslot = true;
 648				KVM_MMU_LOCK(kvm);
 649				if (!IS_KVM_NULL_FN(range->on_lock))
 650					range->on_lock(kvm);
 651
 652				if (IS_KVM_NULL_FN(range->handler))
 653					break;
 654			}
 655			r.ret |= range->handler(kvm, &gfn_range);
 656		}
 657	}
 658
 659	if (range->flush_on_ret && r.ret)
 660		kvm_flush_remote_tlbs(kvm);
 661
 662	if (r.found_memslot)
 663		KVM_MMU_UNLOCK(kvm);
 664
 665	srcu_read_unlock(&kvm->srcu, idx);
 666
 667	return r;
 668}
 669
 670static __always_inline int kvm_handle_hva_range(struct mmu_notifier *mn,
 671						unsigned long start,
 672						unsigned long end,
 673						union kvm_mmu_notifier_arg arg,
 674						gfn_handler_t handler)
 675{
 676	struct kvm *kvm = mmu_notifier_to_kvm(mn);
 677	const struct kvm_mmu_notifier_range range = {
 678		.start		= start,
 679		.end		= end,
 680		.arg		= arg,
 681		.handler	= handler,
 682		.on_lock	= (void *)kvm_null_fn,
 683		.flush_on_ret	= true,
 684		.may_block	= false,
 685	};
 686
 687	return __kvm_handle_hva_range(kvm, &range).ret;
 688}
 689
 690static __always_inline int kvm_handle_hva_range_no_flush(struct mmu_notifier *mn,
 691							 unsigned long start,
 692							 unsigned long end,
 693							 gfn_handler_t handler)
 694{
 695	struct kvm *kvm = mmu_notifier_to_kvm(mn);
 696	const struct kvm_mmu_notifier_range range = {
 697		.start		= start,
 698		.end		= end,
 699		.handler	= handler,
 700		.on_lock	= (void *)kvm_null_fn,
 701		.flush_on_ret	= false,
 702		.may_block	= false,
 703	};
 704
 705	return __kvm_handle_hva_range(kvm, &range).ret;
 706}
 707
 708static bool kvm_change_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
 709{
 710	/*
 711	 * Skipping invalid memslots is correct if and only change_pte() is
 712	 * surrounded by invalidate_range_{start,end}(), which is currently
 713	 * guaranteed by the primary MMU.  If that ever changes, KVM needs to
 714	 * unmap the memslot instead of skipping the memslot to ensure that KVM
 715	 * doesn't hold references to the old PFN.
 716	 */
 717	WARN_ON_ONCE(!READ_ONCE(kvm->mn_active_invalidate_count));
 718
 719	if (range->slot->flags & KVM_MEMSLOT_INVALID)
 720		return false;
 721
 722	return kvm_set_spte_gfn(kvm, range);
 723}
 724
 725static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn,
 726					struct mm_struct *mm,
 727					unsigned long address,
 728					pte_t pte)
 729{
 730	struct kvm *kvm = mmu_notifier_to_kvm(mn);
 731	const union kvm_mmu_notifier_arg arg = { .pte = pte };
 732
 733	trace_kvm_set_spte_hva(address);
 734
 735	/*
 736	 * .change_pte() must be surrounded by .invalidate_range_{start,end}().
 737	 * If mmu_invalidate_in_progress is zero, then no in-progress
 738	 * invalidations, including this one, found a relevant memslot at
 739	 * start(); rechecking memslots here is unnecessary.  Note, a false
 740	 * positive (count elevated by a different invalidation) is sub-optimal
 741	 * but functionally ok.
 742	 */
 743	WARN_ON_ONCE(!READ_ONCE(kvm->mn_active_invalidate_count));
 744	if (!READ_ONCE(kvm->mmu_invalidate_in_progress))
 745		return;
 746
 747	kvm_handle_hva_range(mn, address, address + 1, arg, kvm_change_spte_gfn);
 748}
 749
 750void kvm_mmu_invalidate_begin(struct kvm *kvm)
 
 
 
 751{
 752	lockdep_assert_held_write(&kvm->mmu_lock);
 
 
 
 
 753	/*
 754	 * The count increase must become visible at unlock time as no
 755	 * spte can be established without taking the mmu_lock and
 756	 * count is also read inside the mmu_lock critical section.
 757	 */
 758	kvm->mmu_invalidate_in_progress++;
 
 
 
 
 
 
 759
 760	if (likely(kvm->mmu_invalidate_in_progress == 1)) {
 761		kvm->mmu_invalidate_range_start = INVALID_GPA;
 762		kvm->mmu_invalidate_range_end = INVALID_GPA;
 763	}
 764}
 765
 766void kvm_mmu_invalidate_range_add(struct kvm *kvm, gfn_t start, gfn_t end)
 767{
 768	lockdep_assert_held_write(&kvm->mmu_lock);
 769
 770	WARN_ON_ONCE(!kvm->mmu_invalidate_in_progress);
 771
 772	if (likely(kvm->mmu_invalidate_range_start == INVALID_GPA)) {
 773		kvm->mmu_invalidate_range_start = start;
 774		kvm->mmu_invalidate_range_end = end;
 775	} else {
 776		/*
 777		 * Fully tracking multiple concurrent ranges has diminishing
 778		 * returns. Keep things simple and just find the minimal range
 779		 * which includes the current and new ranges. As there won't be
 780		 * enough information to subtract a range after its invalidate
 781		 * completes, any ranges invalidated concurrently will
 782		 * accumulate and persist until all outstanding invalidates
 783		 * complete.
 784		 */
 785		kvm->mmu_invalidate_range_start =
 786			min(kvm->mmu_invalidate_range_start, start);
 787		kvm->mmu_invalidate_range_end =
 788			max(kvm->mmu_invalidate_range_end, end);
 789	}
 790}
 791
 792bool kvm_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
 793{
 794	kvm_mmu_invalidate_range_add(kvm, range->start, range->end);
 795	return kvm_unmap_gfn_range(kvm, range);
 796}
 797
 798static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
 799					const struct mmu_notifier_range *range)
 800{
 801	struct kvm *kvm = mmu_notifier_to_kvm(mn);
 802	const struct kvm_mmu_notifier_range hva_range = {
 803		.start		= range->start,
 804		.end		= range->end,
 805		.handler	= kvm_mmu_unmap_gfn_range,
 806		.on_lock	= kvm_mmu_invalidate_begin,
 807		.flush_on_ret	= true,
 808		.may_block	= mmu_notifier_range_blockable(range),
 809	};
 810
 811	trace_kvm_unmap_hva_range(range->start, range->end);
 812
 813	/*
 814	 * Prevent memslot modification between range_start() and range_end()
 815	 * so that conditionally locking provides the same result in both
 816	 * functions.  Without that guarantee, the mmu_invalidate_in_progress
 817	 * adjustments will be imbalanced.
 818	 *
 819	 * Pairs with the decrement in range_end().
 820	 */
 821	spin_lock(&kvm->mn_invalidate_lock);
 822	kvm->mn_active_invalidate_count++;
 823	spin_unlock(&kvm->mn_invalidate_lock);
 824
 825	/*
 826	 * Invalidate pfn caches _before_ invalidating the secondary MMUs, i.e.
 827	 * before acquiring mmu_lock, to avoid holding mmu_lock while acquiring
 828	 * each cache's lock.  There are relatively few caches in existence at
 829	 * any given time, and the caches themselves can check for hva overlap,
 830	 * i.e. don't need to rely on memslot overlap checks for performance.
 831	 * Because this runs without holding mmu_lock, the pfn caches must use
 832	 * mn_active_invalidate_count (see above) instead of
 833	 * mmu_invalidate_in_progress.
 834	 */
 835	gfn_to_pfn_cache_invalidate_start(kvm, range->start, range->end,
 836					  hva_range.may_block);
 837
 838	/*
 839	 * If one or more memslots were found and thus zapped, notify arch code
 840	 * that guest memory has been reclaimed.  This needs to be done *after*
 841	 * dropping mmu_lock, as x86's reclaim path is slooooow.
 842	 */
 843	if (__kvm_handle_hva_range(kvm, &hva_range).found_memslot)
 844		kvm_arch_guest_memory_reclaimed(kvm);
 845
 846	return 0;
 847}
 848
 849void kvm_mmu_invalidate_end(struct kvm *kvm)
 850{
 851	lockdep_assert_held_write(&kvm->mmu_lock);
 852
 
 853	/*
 854	 * This sequence increase will notify the kvm page fault that
 855	 * the page that is going to be mapped in the spte could have
 856	 * been freed.
 857	 */
 858	kvm->mmu_invalidate_seq++;
 859	smp_wmb();
 860	/*
 861	 * The above sequence increase must be visible before the
 862	 * below count decrease, which is ensured by the smp_wmb above
 863	 * in conjunction with the smp_rmb in mmu_invalidate_retry().
 864	 */
 865	kvm->mmu_invalidate_in_progress--;
 866	KVM_BUG_ON(kvm->mmu_invalidate_in_progress < 0, kvm);
 867
 868	/*
 869	 * Assert that at least one range was added between start() and end().
 870	 * Not adding a range isn't fatal, but it is a KVM bug.
 871	 */
 872	WARN_ON_ONCE(kvm->mmu_invalidate_range_start == INVALID_GPA);
 873}
 874
 875static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn,
 876					const struct mmu_notifier_range *range)
 
 877{
 878	struct kvm *kvm = mmu_notifier_to_kvm(mn);
 879	const struct kvm_mmu_notifier_range hva_range = {
 880		.start		= range->start,
 881		.end		= range->end,
 882		.handler	= (void *)kvm_null_fn,
 883		.on_lock	= kvm_mmu_invalidate_end,
 884		.flush_on_ret	= false,
 885		.may_block	= mmu_notifier_range_blockable(range),
 886	};
 887	bool wake;
 888
 889	__kvm_handle_hva_range(kvm, &hva_range);
 
 890
 891	/* Pairs with the increment in range_start(). */
 892	spin_lock(&kvm->mn_invalidate_lock);
 893	wake = (--kvm->mn_active_invalidate_count == 0);
 894	spin_unlock(&kvm->mn_invalidate_lock);
 895
 896	/*
 897	 * There can only be one waiter, since the wait happens under
 898	 * slots_lock.
 899	 */
 900	if (wake)
 901		rcuwait_wake_up(&kvm->mn_memslots_update_rcuwait);
 902}
 903
 904static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn,
 905					      struct mm_struct *mm,
 906					      unsigned long start,
 907					      unsigned long end)
 908{
 909	trace_kvm_age_hva(start, end);
 910
 911	return kvm_handle_hva_range(mn, start, end, KVM_MMU_NOTIFIER_NO_ARG,
 912				    kvm_age_gfn);
 913}
 914
 915static int kvm_mmu_notifier_clear_young(struct mmu_notifier *mn,
 916					struct mm_struct *mm,
 917					unsigned long start,
 918					unsigned long end)
 919{
 920	trace_kvm_age_hva(start, end);
 921
 922	/*
 923	 * Even though we do not flush TLB, this will still adversely
 924	 * affect performance on pre-Haswell Intel EPT, where there is
 925	 * no EPT Access Bit to clear so that we have to tear down EPT
 926	 * tables instead. If we find this unacceptable, we can always
 927	 * add a parameter to kvm_age_hva so that it effectively doesn't
 928	 * do anything on clear_young.
 929	 *
 930	 * Also note that currently we never issue secondary TLB flushes
 931	 * from clear_young, leaving this job up to the regular system
 932	 * cadence. If we find this inaccurate, we might come up with a
 933	 * more sophisticated heuristic later.
 934	 */
 935	return kvm_handle_hva_range_no_flush(mn, start, end, kvm_age_gfn);
 936}
 937
 938static int kvm_mmu_notifier_test_young(struct mmu_notifier *mn,
 939				       struct mm_struct *mm,
 940				       unsigned long address)
 941{
 942	trace_kvm_test_age_hva(address);
 
 
 
 
 
 
 
 943
 944	return kvm_handle_hva_range_no_flush(mn, address, address + 1,
 945					     kvm_test_age_gfn);
 946}
 947
 948static void kvm_mmu_notifier_release(struct mmu_notifier *mn,
 949				     struct mm_struct *mm)
 950{
 951	struct kvm *kvm = mmu_notifier_to_kvm(mn);
 952	int idx;
 953
 954	idx = srcu_read_lock(&kvm->srcu);
 955	kvm_flush_shadow_all(kvm);
 956	srcu_read_unlock(&kvm->srcu, idx);
 957}
 958
 959static const struct mmu_notifier_ops kvm_mmu_notifier_ops = {
 
 960	.invalidate_range_start	= kvm_mmu_notifier_invalidate_range_start,
 961	.invalidate_range_end	= kvm_mmu_notifier_invalidate_range_end,
 962	.clear_flush_young	= kvm_mmu_notifier_clear_flush_young,
 963	.clear_young		= kvm_mmu_notifier_clear_young,
 964	.test_young		= kvm_mmu_notifier_test_young,
 965	.change_pte		= kvm_mmu_notifier_change_pte,
 966	.release		= kvm_mmu_notifier_release,
 967};
 968
 969static int kvm_init_mmu_notifier(struct kvm *kvm)
 970{
 971	kvm->mmu_notifier.ops = &kvm_mmu_notifier_ops;
 972	return mmu_notifier_register(&kvm->mmu_notifier, current->mm);
 973}
 974
 975#else  /* !CONFIG_KVM_GENERIC_MMU_NOTIFIER */
 976
 977static int kvm_init_mmu_notifier(struct kvm *kvm)
 978{
 979	return 0;
 980}
 981
 982#endif /* CONFIG_KVM_GENERIC_MMU_NOTIFIER */
 983
 984#ifdef CONFIG_HAVE_KVM_PM_NOTIFIER
 985static int kvm_pm_notifier_call(struct notifier_block *bl,
 986				unsigned long state,
 987				void *unused)
 988{
 989	struct kvm *kvm = container_of(bl, struct kvm, pm_notifier);
 990
 991	return kvm_arch_pm_notifier(kvm, state);
 992}
 993
 994static void kvm_init_pm_notifier(struct kvm *kvm)
 995{
 996	kvm->pm_notifier.notifier_call = kvm_pm_notifier_call;
 997	/* Suspend KVM before we suspend ftrace, RCU, etc. */
 998	kvm->pm_notifier.priority = INT_MAX;
 999	register_pm_notifier(&kvm->pm_notifier);
1000}
1001
1002static void kvm_destroy_pm_notifier(struct kvm *kvm)
1003{
1004	unregister_pm_notifier(&kvm->pm_notifier);
1005}
1006#else /* !CONFIG_HAVE_KVM_PM_NOTIFIER */
1007static void kvm_init_pm_notifier(struct kvm *kvm)
1008{
1009}
1010
1011static void kvm_destroy_pm_notifier(struct kvm *kvm)
1012{
1013}
1014#endif /* CONFIG_HAVE_KVM_PM_NOTIFIER */
1015
1016static void kvm_destroy_dirty_bitmap(struct kvm_memory_slot *memslot)
1017{
1018	if (!memslot->dirty_bitmap)
1019		return;
1020
1021	kvfree(memslot->dirty_bitmap);
1022	memslot->dirty_bitmap = NULL;
1023}
1024
1025/* This does not remove the slot from struct kvm_memslots data structures */
1026static void kvm_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot)
1027{
1028	if (slot->flags & KVM_MEM_GUEST_MEMFD)
1029		kvm_gmem_unbind(slot);
1030
1031	kvm_destroy_dirty_bitmap(slot);
1032
1033	kvm_arch_free_memslot(kvm, slot);
1034
1035	kfree(slot);
1036}
1037
1038static void kvm_free_memslots(struct kvm *kvm, struct kvm_memslots *slots)
1039{
1040	struct hlist_node *idnode;
1041	struct kvm_memory_slot *memslot;
1042	int bkt;
1043
1044	/*
1045	 * The same memslot objects live in both active and inactive sets,
1046	 * arbitrarily free using index '1' so the second invocation of this
1047	 * function isn't operating over a structure with dangling pointers
1048	 * (even though this function isn't actually touching them).
1049	 */
1050	if (!slots->node_idx)
1051		return;
1052
1053	hash_for_each_safe(slots->id_hash, bkt, idnode, memslot, id_node[1])
1054		kvm_free_memslot(kvm, memslot);
1055}
1056
1057static umode_t kvm_stats_debugfs_mode(const struct _kvm_stats_desc *pdesc)
1058{
1059	switch (pdesc->desc.flags & KVM_STATS_TYPE_MASK) {
1060	case KVM_STATS_TYPE_INSTANT:
1061		return 0444;
1062	case KVM_STATS_TYPE_CUMULATIVE:
1063	case KVM_STATS_TYPE_PEAK:
1064	default:
1065		return 0644;
1066	}
1067}
1068
1069
1070static void kvm_destroy_vm_debugfs(struct kvm *kvm)
1071{
1072	int i;
1073	int kvm_debugfs_num_entries = kvm_vm_stats_header.num_desc +
1074				      kvm_vcpu_stats_header.num_desc;
1075
1076	if (IS_ERR(kvm->debugfs_dentry))
1077		return;
1078
1079	debugfs_remove_recursive(kvm->debugfs_dentry);
1080
1081	if (kvm->debugfs_stat_data) {
1082		for (i = 0; i < kvm_debugfs_num_entries; i++)
1083			kfree(kvm->debugfs_stat_data[i]);
1084		kfree(kvm->debugfs_stat_data);
1085	}
1086}
1087
1088static int kvm_create_vm_debugfs(struct kvm *kvm, const char *fdname)
1089{
1090	static DEFINE_MUTEX(kvm_debugfs_lock);
1091	struct dentry *dent;
1092	char dir_name[ITOA_MAX_LEN * 2];
1093	struct kvm_stat_data *stat_data;
1094	const struct _kvm_stats_desc *pdesc;
1095	int i, ret = -ENOMEM;
1096	int kvm_debugfs_num_entries = kvm_vm_stats_header.num_desc +
1097				      kvm_vcpu_stats_header.num_desc;
1098
1099	if (!debugfs_initialized())
1100		return 0;
1101
1102	snprintf(dir_name, sizeof(dir_name), "%d-%s", task_pid_nr(current), fdname);
1103	mutex_lock(&kvm_debugfs_lock);
1104	dent = debugfs_lookup(dir_name, kvm_debugfs_dir);
1105	if (dent) {
1106		pr_warn_ratelimited("KVM: debugfs: duplicate directory %s\n", dir_name);
1107		dput(dent);
1108		mutex_unlock(&kvm_debugfs_lock);
1109		return 0;
1110	}
1111	dent = debugfs_create_dir(dir_name, kvm_debugfs_dir);
1112	mutex_unlock(&kvm_debugfs_lock);
1113	if (IS_ERR(dent))
1114		return 0;
1115
1116	kvm->debugfs_dentry = dent;
1117	kvm->debugfs_stat_data = kcalloc(kvm_debugfs_num_entries,
1118					 sizeof(*kvm->debugfs_stat_data),
1119					 GFP_KERNEL_ACCOUNT);
1120	if (!kvm->debugfs_stat_data)
1121		goto out_err;
1122
1123	for (i = 0; i < kvm_vm_stats_header.num_desc; ++i) {
1124		pdesc = &kvm_vm_stats_desc[i];
1125		stat_data = kzalloc(sizeof(*stat_data), GFP_KERNEL_ACCOUNT);
1126		if (!stat_data)
1127			goto out_err;
1128
1129		stat_data->kvm = kvm;
1130		stat_data->desc = pdesc;
1131		stat_data->kind = KVM_STAT_VM;
1132		kvm->debugfs_stat_data[i] = stat_data;
1133		debugfs_create_file(pdesc->name, kvm_stats_debugfs_mode(pdesc),
1134				    kvm->debugfs_dentry, stat_data,
1135				    &stat_fops_per_vm);
1136	}
1137
1138	for (i = 0; i < kvm_vcpu_stats_header.num_desc; ++i) {
1139		pdesc = &kvm_vcpu_stats_desc[i];
1140		stat_data = kzalloc(sizeof(*stat_data), GFP_KERNEL_ACCOUNT);
1141		if (!stat_data)
1142			goto out_err;
1143
1144		stat_data->kvm = kvm;
1145		stat_data->desc = pdesc;
1146		stat_data->kind = KVM_STAT_VCPU;
1147		kvm->debugfs_stat_data[i + kvm_vm_stats_header.num_desc] = stat_data;
1148		debugfs_create_file(pdesc->name, kvm_stats_debugfs_mode(pdesc),
1149				    kvm->debugfs_dentry, stat_data,
1150				    &stat_fops_per_vm);
1151	}
1152
1153	ret = kvm_arch_create_vm_debugfs(kvm);
1154	if (ret)
1155		goto out_err;
1156
1157	return 0;
1158out_err:
1159	kvm_destroy_vm_debugfs(kvm);
1160	return ret;
1161}
1162
1163/*
1164 * Called after the VM is otherwise initialized, but just before adding it to
1165 * the vm_list.
1166 */
1167int __weak kvm_arch_post_init_vm(struct kvm *kvm)
1168{
1169	return 0;
1170}
1171
1172/*
1173 * Called just after removing the VM from the vm_list, but before doing any
1174 * other destruction.
1175 */
1176void __weak kvm_arch_pre_destroy_vm(struct kvm *kvm)
1177{
1178}
1179
1180/*
1181 * Called after per-vm debugfs created.  When called kvm->debugfs_dentry should
1182 * be setup already, so we can create arch-specific debugfs entries under it.
1183 * Cleanup should be automatic done in kvm_destroy_vm_debugfs() recursively, so
1184 * a per-arch destroy interface is not needed.
1185 */
1186int __weak kvm_arch_create_vm_debugfs(struct kvm *kvm)
1187{
1188	return 0;
1189}
1190
1191static struct kvm *kvm_create_vm(unsigned long type, const char *fdname)
1192{
 
1193	struct kvm *kvm = kvm_arch_alloc_vm();
1194	struct kvm_memslots *slots;
1195	int r = -ENOMEM;
1196	int i, j;
1197
1198	if (!kvm)
1199		return ERR_PTR(-ENOMEM);
1200
1201	KVM_MMU_LOCK_INIT(kvm);
1202	mmgrab(current->mm);
1203	kvm->mm = current->mm;
1204	kvm_eventfd_init(kvm);
1205	mutex_init(&kvm->lock);
1206	mutex_init(&kvm->irq_lock);
1207	mutex_init(&kvm->slots_lock);
1208	mutex_init(&kvm->slots_arch_lock);
1209	spin_lock_init(&kvm->mn_invalidate_lock);
1210	rcuwait_init(&kvm->mn_memslots_update_rcuwait);
1211	xa_init(&kvm->vcpu_array);
1212#ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES
1213	xa_init(&kvm->mem_attr_array);
1214#endif
1215
1216	INIT_LIST_HEAD(&kvm->gpc_list);
1217	spin_lock_init(&kvm->gpc_lock);
1218
1219	INIT_LIST_HEAD(&kvm->devices);
1220	kvm->max_vcpus = KVM_MAX_VCPUS;
1221
1222	BUILD_BUG_ON(KVM_MEM_SLOTS_NUM > SHRT_MAX);
1223
1224	/*
1225	 * Force subsequent debugfs file creations to fail if the VM directory
1226	 * is not created (by kvm_create_vm_debugfs()).
1227	 */
1228	kvm->debugfs_dentry = ERR_PTR(-ENOENT);
1229
1230	snprintf(kvm->stats_id, sizeof(kvm->stats_id), "kvm-%d",
1231		 task_pid_nr(current));
1232
1233	if (init_srcu_struct(&kvm->srcu))
1234		goto out_err_no_srcu;
1235	if (init_srcu_struct(&kvm->irq_srcu))
1236		goto out_err_no_irq_srcu;
1237
1238	refcount_set(&kvm->users_count, 1);
1239	for (i = 0; i < kvm_arch_nr_memslot_as_ids(kvm); i++) {
1240		for (j = 0; j < 2; j++) {
1241			slots = &kvm->__memslots[i][j];
1242
1243			atomic_long_set(&slots->last_used_slot, (unsigned long)NULL);
1244			slots->hva_tree = RB_ROOT_CACHED;
1245			slots->gfn_tree = RB_ROOT;
1246			hash_init(slots->id_hash);
1247			slots->node_idx = j;
1248
1249			/* Generations must be different for each address space. */
1250			slots->generation = i;
1251		}
1252
1253		rcu_assign_pointer(kvm->memslots[i], &kvm->__memslots[i][0]);
1254	}
1255
1256	for (i = 0; i < KVM_NR_BUSES; i++) {
1257		rcu_assign_pointer(kvm->buses[i],
1258			kzalloc(sizeof(struct kvm_io_bus), GFP_KERNEL_ACCOUNT));
1259		if (!kvm->buses[i])
1260			goto out_err_no_arch_destroy_vm;
1261	}
1262
1263	r = kvm_arch_init_vm(kvm, type);
1264	if (r)
1265		goto out_err_no_arch_destroy_vm;
1266
1267	r = hardware_enable_all();
1268	if (r)
1269		goto out_err_no_disable;
1270
1271#ifdef CONFIG_HAVE_KVM_IRQCHIP
 
1272	INIT_HLIST_HEAD(&kvm->irq_ack_notifier_list);
1273#endif
1274
1275	r = kvm_init_mmu_notifier(kvm);
1276	if (r)
1277		goto out_err_no_mmu_notifier;
 
 
 
 
 
 
 
 
 
 
1278
1279	r = kvm_coalesced_mmio_init(kvm);
1280	if (r < 0)
1281		goto out_no_coalesced_mmio;
 
 
 
 
 
1282
1283	r = kvm_create_vm_debugfs(kvm, fdname);
1284	if (r)
1285		goto out_err_no_debugfs;
1286
1287	r = kvm_arch_post_init_vm(kvm);
1288	if (r)
1289		goto out_err;
1290
1291	mutex_lock(&kvm_lock);
1292	list_add(&kvm->vm_list, &vm_list);
1293	mutex_unlock(&kvm_lock);
1294
1295	preempt_notifier_inc();
1296	kvm_init_pm_notifier(kvm);
1297
1298	return kvm;
1299
1300out_err:
1301	kvm_destroy_vm_debugfs(kvm);
1302out_err_no_debugfs:
1303	kvm_coalesced_mmio_free(kvm);
1304out_no_coalesced_mmio:
1305#ifdef CONFIG_KVM_GENERIC_MMU_NOTIFIER
1306	if (kvm->mmu_notifier.ops)
1307		mmu_notifier_unregister(&kvm->mmu_notifier, current->mm);
1308#endif
1309out_err_no_mmu_notifier:
1310	hardware_disable_all();
1311out_err_no_disable:
1312	kvm_arch_destroy_vm(kvm);
1313out_err_no_arch_destroy_vm:
1314	WARN_ON_ONCE(!refcount_dec_and_test(&kvm->users_count));
1315	for (i = 0; i < KVM_NR_BUSES; i++)
1316		kfree(kvm_get_bus(kvm, i));
1317	cleanup_srcu_struct(&kvm->irq_srcu);
1318out_err_no_irq_srcu:
1319	cleanup_srcu_struct(&kvm->srcu);
1320out_err_no_srcu:
1321	kvm_arch_free_vm(kvm);
1322	mmdrop(current->mm);
1323	return ERR_PTR(r);
1324}
1325
1326static void kvm_destroy_devices(struct kvm *kvm)
1327{
1328	struct kvm_device *dev, *tmp;
 
1329
1330	/*
1331	 * We do not need to take the kvm->lock here, because nobody else
1332	 * has a reference to the struct kvm at this point and therefore
1333	 * cannot access the devices list anyhow.
1334	 */
1335	list_for_each_entry_safe(dev, tmp, &kvm->devices, vm_node) {
1336		list_del(&dev->vm_node);
1337		dev->ops->destroy(dev);
1338	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1339}
1340
1341static void kvm_destroy_vm(struct kvm *kvm)
1342{
1343	int i;
1344	struct mm_struct *mm = kvm->mm;
1345
1346	kvm_destroy_pm_notifier(kvm);
1347	kvm_uevent_notify_change(KVM_EVENT_DESTROY_VM, kvm);
1348	kvm_destroy_vm_debugfs(kvm);
1349	kvm_arch_sync_events(kvm);
1350	mutex_lock(&kvm_lock);
1351	list_del(&kvm->vm_list);
1352	mutex_unlock(&kvm_lock);
1353	kvm_arch_pre_destroy_vm(kvm);
1354
1355	kvm_free_irq_routing(kvm);
1356	for (i = 0; i < KVM_NR_BUSES; i++) {
1357		struct kvm_io_bus *bus = kvm_get_bus(kvm, i);
1358
1359		if (bus)
1360			kvm_io_bus_destroy(bus);
1361		kvm->buses[i] = NULL;
1362	}
1363	kvm_coalesced_mmio_free(kvm);
1364#ifdef CONFIG_KVM_GENERIC_MMU_NOTIFIER
1365	mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm);
1366	/*
1367	 * At this point, pending calls to invalidate_range_start()
1368	 * have completed but no more MMU notifiers will run, so
1369	 * mn_active_invalidate_count may remain unbalanced.
1370	 * No threads can be waiting in kvm_swap_active_memslots() as the
1371	 * last reference on KVM has been dropped, but freeing
1372	 * memslots would deadlock without this manual intervention.
1373	 *
1374	 * If the count isn't unbalanced, i.e. KVM did NOT unregister its MMU
1375	 * notifier between a start() and end(), then there shouldn't be any
1376	 * in-progress invalidations.
1377	 */
1378	WARN_ON(rcuwait_active(&kvm->mn_memslots_update_rcuwait));
1379	if (kvm->mn_active_invalidate_count)
1380		kvm->mn_active_invalidate_count = 0;
1381	else
1382		WARN_ON(kvm->mmu_invalidate_in_progress);
1383#else
1384	kvm_flush_shadow_all(kvm);
1385#endif
1386	kvm_arch_destroy_vm(kvm);
1387	kvm_destroy_devices(kvm);
1388	for (i = 0; i < kvm_arch_nr_memslot_as_ids(kvm); i++) {
1389		kvm_free_memslots(kvm, &kvm->__memslots[i][0]);
1390		kvm_free_memslots(kvm, &kvm->__memslots[i][1]);
1391	}
1392	cleanup_srcu_struct(&kvm->irq_srcu);
1393	cleanup_srcu_struct(&kvm->srcu);
1394#ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES
1395	xa_destroy(&kvm->mem_attr_array);
1396#endif
1397	kvm_arch_free_vm(kvm);
1398	preempt_notifier_dec();
1399	hardware_disable_all();
1400	mmdrop(mm);
1401}
1402
1403void kvm_get_kvm(struct kvm *kvm)
1404{
1405	refcount_inc(&kvm->users_count);
1406}
1407EXPORT_SYMBOL_GPL(kvm_get_kvm);
1408
1409/*
1410 * Make sure the vm is not during destruction, which is a safe version of
1411 * kvm_get_kvm().  Return true if kvm referenced successfully, false otherwise.
1412 */
1413bool kvm_get_kvm_safe(struct kvm *kvm)
1414{
1415	return refcount_inc_not_zero(&kvm->users_count);
1416}
1417EXPORT_SYMBOL_GPL(kvm_get_kvm_safe);
1418
1419void kvm_put_kvm(struct kvm *kvm)
1420{
1421	if (refcount_dec_and_test(&kvm->users_count))
1422		kvm_destroy_vm(kvm);
1423}
1424EXPORT_SYMBOL_GPL(kvm_put_kvm);
1425
1426/*
1427 * Used to put a reference that was taken on behalf of an object associated
1428 * with a user-visible file descriptor, e.g. a vcpu or device, if installation
1429 * of the new file descriptor fails and the reference cannot be transferred to
1430 * its final owner.  In such cases, the caller is still actively using @kvm and
1431 * will fail miserably if the refcount unexpectedly hits zero.
1432 */
1433void kvm_put_kvm_no_destroy(struct kvm *kvm)
1434{
1435	WARN_ON(refcount_dec_and_test(&kvm->users_count));
1436}
1437EXPORT_SYMBOL_GPL(kvm_put_kvm_no_destroy);
1438
1439static int kvm_vm_release(struct inode *inode, struct file *filp)
1440{
1441	struct kvm *kvm = filp->private_data;
1442
1443	kvm_irqfd_release(kvm);
1444
1445	kvm_put_kvm(kvm);
1446	return 0;
1447}
1448
1449/*
1450 * Allocation size is twice as large as the actual dirty bitmap size.
1451 * See kvm_vm_ioctl_get_dirty_log() why this is needed.
1452 */
1453static int kvm_alloc_dirty_bitmap(struct kvm_memory_slot *memslot)
1454{
1455	unsigned long dirty_bytes = kvm_dirty_bitmap_bytes(memslot);
 
 
 
 
 
 
1456
1457	memslot->dirty_bitmap = __vcalloc(2, dirty_bytes, GFP_KERNEL_ACCOUNT);
1458	if (!memslot->dirty_bitmap)
1459		return -ENOMEM;
1460
 
1461	return 0;
1462}
1463
1464static struct kvm_memslots *kvm_get_inactive_memslots(struct kvm *kvm, int as_id)
1465{
1466	struct kvm_memslots *active = __kvm_memslots(kvm, as_id);
1467	int node_idx_inactive = active->node_idx ^ 1;
1468
1469	return &kvm->__memslots[as_id][node_idx_inactive];
1470}
1471
1472/*
1473 * Helper to get the address space ID when one of memslot pointers may be NULL.
1474 * This also serves as a sanity that at least one of the pointers is non-NULL,
1475 * and that their address space IDs don't diverge.
1476 */
1477static int kvm_memslots_get_as_id(struct kvm_memory_slot *a,
1478				  struct kvm_memory_slot *b)
1479{
1480	if (WARN_ON_ONCE(!a && !b))
1481		return 0;
1482
1483	if (!a)
1484		return b->as_id;
1485	if (!b)
1486		return a->as_id;
1487
1488	WARN_ON_ONCE(a->as_id != b->as_id);
1489	return a->as_id;
1490}
1491
1492static void kvm_insert_gfn_node(struct kvm_memslots *slots,
1493				struct kvm_memory_slot *slot)
1494{
1495	struct rb_root *gfn_tree = &slots->gfn_tree;
1496	struct rb_node **node, *parent;
1497	int idx = slots->node_idx;
1498
1499	parent = NULL;
1500	for (node = &gfn_tree->rb_node; *node; ) {
1501		struct kvm_memory_slot *tmp;
1502
1503		tmp = container_of(*node, struct kvm_memory_slot, gfn_node[idx]);
1504		parent = *node;
1505		if (slot->base_gfn < tmp->base_gfn)
1506			node = &(*node)->rb_left;
1507		else if (slot->base_gfn > tmp->base_gfn)
1508			node = &(*node)->rb_right;
1509		else
1510			BUG();
1511	}
1512
1513	rb_link_node(&slot->gfn_node[idx], parent, node);
1514	rb_insert_color(&slot->gfn_node[idx], gfn_tree);
1515}
1516
1517static void kvm_erase_gfn_node(struct kvm_memslots *slots,
1518			       struct kvm_memory_slot *slot)
1519{
1520	rb_erase(&slot->gfn_node[slots->node_idx], &slots->gfn_tree);
1521}
1522
1523static void kvm_replace_gfn_node(struct kvm_memslots *slots,
1524				 struct kvm_memory_slot *old,
1525				 struct kvm_memory_slot *new)
1526{
1527	int idx = slots->node_idx;
1528
1529	WARN_ON_ONCE(old->base_gfn != new->base_gfn);
1530
1531	rb_replace_node(&old->gfn_node[idx], &new->gfn_node[idx],
1532			&slots->gfn_tree);
1533}
1534
1535/*
1536 * Replace @old with @new in the inactive memslots.
1537 *
1538 * With NULL @old this simply adds @new.
1539 * With NULL @new this simply removes @old.
1540 *
1541 * If @new is non-NULL its hva_node[slots_idx] range has to be set
1542 * appropriately.
1543 */
1544static void kvm_replace_memslot(struct kvm *kvm,
1545				struct kvm_memory_slot *old,
1546				struct kvm_memory_slot *new)
1547{
1548	int as_id = kvm_memslots_get_as_id(old, new);
1549	struct kvm_memslots *slots = kvm_get_inactive_memslots(kvm, as_id);
1550	int idx = slots->node_idx;
1551
1552	if (old) {
1553		hash_del(&old->id_node[idx]);
1554		interval_tree_remove(&old->hva_node[idx], &slots->hva_tree);
1555
1556		if ((long)old == atomic_long_read(&slots->last_used_slot))
1557			atomic_long_set(&slots->last_used_slot, (long)new);
1558
1559		if (!new) {
1560			kvm_erase_gfn_node(slots, old);
1561			return;
1562		}
1563	}
1564
1565	/*
1566	 * Initialize @new's hva range.  Do this even when replacing an @old
1567	 * slot, kvm_copy_memslot() deliberately does not touch node data.
1568	 */
1569	new->hva_node[idx].start = new->userspace_addr;
1570	new->hva_node[idx].last = new->userspace_addr +
1571				  (new->npages << PAGE_SHIFT) - 1;
1572
1573	/*
1574	 * (Re)Add the new memslot.  There is no O(1) interval_tree_replace(),
1575	 * hva_node needs to be swapped with remove+insert even though hva can't
1576	 * change when replacing an existing slot.
1577	 */
1578	hash_add(slots->id_hash, &new->id_node[idx], new->id);
1579	interval_tree_insert(&new->hva_node[idx], &slots->hva_tree);
1580
1581	/*
1582	 * If the memslot gfn is unchanged, rb_replace_node() can be used to
1583	 * switch the node in the gfn tree instead of removing the old and
1584	 * inserting the new as two separate operations. Replacement is a
1585	 * single O(1) operation versus two O(log(n)) operations for
1586	 * remove+insert.
1587	 */
1588	if (old && old->base_gfn == new->base_gfn) {
1589		kvm_replace_gfn_node(slots, old, new);
1590	} else {
1591		if (old)
1592			kvm_erase_gfn_node(slots, old);
1593		kvm_insert_gfn_node(slots, new);
1594	}
1595}
1596
1597/*
1598 * Flags that do not access any of the extra space of struct
1599 * kvm_userspace_memory_region2.  KVM_SET_USER_MEMORY_REGION_V1_FLAGS
1600 * only allows these.
1601 */
1602#define KVM_SET_USER_MEMORY_REGION_V1_FLAGS \
1603	(KVM_MEM_LOG_DIRTY_PAGES | KVM_MEM_READONLY)
1604
1605static int check_memory_region_flags(struct kvm *kvm,
1606				     const struct kvm_userspace_memory_region2 *mem)
1607{
1608	u32 valid_flags = KVM_MEM_LOG_DIRTY_PAGES;
1609
1610	if (kvm_arch_has_private_mem(kvm))
1611		valid_flags |= KVM_MEM_GUEST_MEMFD;
1612
1613	/* Dirty logging private memory is not currently supported. */
1614	if (mem->flags & KVM_MEM_GUEST_MEMFD)
1615		valid_flags &= ~KVM_MEM_LOG_DIRTY_PAGES;
1616
1617#ifdef __KVM_HAVE_READONLY_MEM
1618	/*
1619	 * GUEST_MEMFD is incompatible with read-only memslots, as writes to
1620	 * read-only memslots have emulated MMIO, not page fault, semantics,
1621	 * and KVM doesn't allow emulated MMIO for private memory.
1622	 */
1623	if (!(mem->flags & KVM_MEM_GUEST_MEMFD))
1624		valid_flags |= KVM_MEM_READONLY;
1625#endif
1626
1627	if (mem->flags & ~valid_flags)
1628		return -EINVAL;
1629
1630	return 0;
1631}
1632
1633static void kvm_swap_active_memslots(struct kvm *kvm, int as_id)
1634{
1635	struct kvm_memslots *slots = kvm_get_inactive_memslots(kvm, as_id);
1636
1637	/* Grab the generation from the activate memslots. */
1638	u64 gen = __kvm_memslots(kvm, as_id)->generation;
1639
1640	WARN_ON(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS);
1641	slots->generation = gen | KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS;
1642
1643	/*
1644	 * Do not store the new memslots while there are invalidations in
1645	 * progress, otherwise the locking in invalidate_range_start and
1646	 * invalidate_range_end will be unbalanced.
1647	 */
1648	spin_lock(&kvm->mn_invalidate_lock);
1649	prepare_to_rcuwait(&kvm->mn_memslots_update_rcuwait);
1650	while (kvm->mn_active_invalidate_count) {
1651		set_current_state(TASK_UNINTERRUPTIBLE);
1652		spin_unlock(&kvm->mn_invalidate_lock);
1653		schedule();
1654		spin_lock(&kvm->mn_invalidate_lock);
1655	}
1656	finish_rcuwait(&kvm->mn_memslots_update_rcuwait);
1657	rcu_assign_pointer(kvm->memslots[as_id], slots);
1658	spin_unlock(&kvm->mn_invalidate_lock);
1659
1660	/*
1661	 * Acquired in kvm_set_memslot. Must be released before synchronize
1662	 * SRCU below in order to avoid deadlock with another thread
1663	 * acquiring the slots_arch_lock in an srcu critical section.
1664	 */
1665	mutex_unlock(&kvm->slots_arch_lock);
1666
1667	synchronize_srcu_expedited(&kvm->srcu);
1668
1669	/*
1670	 * Increment the new memslot generation a second time, dropping the
1671	 * update in-progress flag and incrementing the generation based on
1672	 * the number of address spaces.  This provides a unique and easily
1673	 * identifiable generation number while the memslots are in flux.
1674	 */
1675	gen = slots->generation & ~KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS;
1676
1677	/*
1678	 * Generations must be unique even across address spaces.  We do not need
1679	 * a global counter for that, instead the generation space is evenly split
1680	 * across address spaces.  For example, with two address spaces, address
1681	 * space 0 will use generations 0, 2, 4, ... while address space 1 will
1682	 * use generations 1, 3, 5, ...
1683	 */
1684	gen += kvm_arch_nr_memslot_as_ids(kvm);
1685
1686	kvm_arch_memslots_updated(kvm, gen);
1687
1688	slots->generation = gen;
1689}
1690
1691static int kvm_prepare_memory_region(struct kvm *kvm,
1692				     const struct kvm_memory_slot *old,
1693				     struct kvm_memory_slot *new,
1694				     enum kvm_mr_change change)
1695{
1696	int r;
1697
1698	/*
1699	 * If dirty logging is disabled, nullify the bitmap; the old bitmap
1700	 * will be freed on "commit".  If logging is enabled in both old and
1701	 * new, reuse the existing bitmap.  If logging is enabled only in the
1702	 * new and KVM isn't using a ring buffer, allocate and initialize a
1703	 * new bitmap.
1704	 */
1705	if (change != KVM_MR_DELETE) {
1706		if (!(new->flags & KVM_MEM_LOG_DIRTY_PAGES))
1707			new->dirty_bitmap = NULL;
1708		else if (old && old->dirty_bitmap)
1709			new->dirty_bitmap = old->dirty_bitmap;
1710		else if (kvm_use_dirty_bitmap(kvm)) {
1711			r = kvm_alloc_dirty_bitmap(new);
1712			if (r)
1713				return r;
1714
1715			if (kvm_dirty_log_manual_protect_and_init_set(kvm))
1716				bitmap_set(new->dirty_bitmap, 0, new->npages);
1717		}
1718	}
1719
1720	r = kvm_arch_prepare_memory_region(kvm, old, new, change);
1721
1722	/* Free the bitmap on failure if it was allocated above. */
1723	if (r && new && new->dirty_bitmap && (!old || !old->dirty_bitmap))
1724		kvm_destroy_dirty_bitmap(new);
1725
1726	return r;
1727}
1728
1729static void kvm_commit_memory_region(struct kvm *kvm,
1730				     struct kvm_memory_slot *old,
1731				     const struct kvm_memory_slot *new,
1732				     enum kvm_mr_change change)
1733{
1734	int old_flags = old ? old->flags : 0;
1735	int new_flags = new ? new->flags : 0;
1736	/*
1737	 * Update the total number of memslot pages before calling the arch
1738	 * hook so that architectures can consume the result directly.
1739	 */
1740	if (change == KVM_MR_DELETE)
1741		kvm->nr_memslot_pages -= old->npages;
1742	else if (change == KVM_MR_CREATE)
1743		kvm->nr_memslot_pages += new->npages;
1744
1745	if ((old_flags ^ new_flags) & KVM_MEM_LOG_DIRTY_PAGES) {
1746		int change = (new_flags & KVM_MEM_LOG_DIRTY_PAGES) ? 1 : -1;
1747		atomic_set(&kvm->nr_memslots_dirty_logging,
1748			   atomic_read(&kvm->nr_memslots_dirty_logging) + change);
1749	}
1750
1751	kvm_arch_commit_memory_region(kvm, old, new, change);
1752
1753	switch (change) {
1754	case KVM_MR_CREATE:
1755		/* Nothing more to do. */
1756		break;
1757	case KVM_MR_DELETE:
1758		/* Free the old memslot and all its metadata. */
1759		kvm_free_memslot(kvm, old);
1760		break;
1761	case KVM_MR_MOVE:
1762	case KVM_MR_FLAGS_ONLY:
1763		/*
1764		 * Free the dirty bitmap as needed; the below check encompasses
1765		 * both the flags and whether a ring buffer is being used)
1766		 */
1767		if (old->dirty_bitmap && !new->dirty_bitmap)
1768			kvm_destroy_dirty_bitmap(old);
1769
1770		/*
1771		 * The final quirk.  Free the detached, old slot, but only its
1772		 * memory, not any metadata.  Metadata, including arch specific
1773		 * data, may be reused by @new.
1774		 */
1775		kfree(old);
1776		break;
1777	default:
1778		BUG();
1779	}
1780}
1781
1782/*
1783 * Activate @new, which must be installed in the inactive slots by the caller,
1784 * by swapping the active slots and then propagating @new to @old once @old is
1785 * unreachable and can be safely modified.
1786 *
1787 * With NULL @old this simply adds @new to @active (while swapping the sets).
1788 * With NULL @new this simply removes @old from @active and frees it
1789 * (while also swapping the sets).
1790 */
1791static void kvm_activate_memslot(struct kvm *kvm,
1792				 struct kvm_memory_slot *old,
1793				 struct kvm_memory_slot *new)
1794{
1795	int as_id = kvm_memslots_get_as_id(old, new);
1796
1797	kvm_swap_active_memslots(kvm, as_id);
1798
1799	/* Propagate the new memslot to the now inactive memslots. */
1800	kvm_replace_memslot(kvm, old, new);
1801}
1802
1803static void kvm_copy_memslot(struct kvm_memory_slot *dest,
1804			     const struct kvm_memory_slot *src)
1805{
1806	dest->base_gfn = src->base_gfn;
1807	dest->npages = src->npages;
1808	dest->dirty_bitmap = src->dirty_bitmap;
1809	dest->arch = src->arch;
1810	dest->userspace_addr = src->userspace_addr;
1811	dest->flags = src->flags;
1812	dest->id = src->id;
1813	dest->as_id = src->as_id;
1814}
1815
1816static void kvm_invalidate_memslot(struct kvm *kvm,
1817				   struct kvm_memory_slot *old,
1818				   struct kvm_memory_slot *invalid_slot)
1819{
1820	/*
1821	 * Mark the current slot INVALID.  As with all memslot modifications,
1822	 * this must be done on an unreachable slot to avoid modifying the
1823	 * current slot in the active tree.
1824	 */
1825	kvm_copy_memslot(invalid_slot, old);
1826	invalid_slot->flags |= KVM_MEMSLOT_INVALID;
1827	kvm_replace_memslot(kvm, old, invalid_slot);
1828
1829	/*
1830	 * Activate the slot that is now marked INVALID, but don't propagate
1831	 * the slot to the now inactive slots. The slot is either going to be
1832	 * deleted or recreated as a new slot.
1833	 */
1834	kvm_swap_active_memslots(kvm, old->as_id);
1835
1836	/*
1837	 * From this point no new shadow pages pointing to a deleted, or moved,
1838	 * memslot will be created.  Validation of sp->gfn happens in:
1839	 *	- gfn_to_hva (kvm_read_guest, gfn_to_pfn)
1840	 *	- kvm_is_visible_gfn (mmu_check_root)
1841	 */
1842	kvm_arch_flush_shadow_memslot(kvm, old);
1843	kvm_arch_guest_memory_reclaimed(kvm);
1844
1845	/* Was released by kvm_swap_active_memslots(), reacquire. */
1846	mutex_lock(&kvm->slots_arch_lock);
1847
1848	/*
1849	 * Copy the arch-specific field of the newly-installed slot back to the
1850	 * old slot as the arch data could have changed between releasing
1851	 * slots_arch_lock in kvm_swap_active_memslots() and re-acquiring the lock
1852	 * above.  Writers are required to retrieve memslots *after* acquiring
1853	 * slots_arch_lock, thus the active slot's data is guaranteed to be fresh.
1854	 */
1855	old->arch = invalid_slot->arch;
1856}
1857
1858static void kvm_create_memslot(struct kvm *kvm,
1859			       struct kvm_memory_slot *new)
1860{
1861	/* Add the new memslot to the inactive set and activate. */
1862	kvm_replace_memslot(kvm, NULL, new);
1863	kvm_activate_memslot(kvm, NULL, new);
1864}
1865
1866static void kvm_delete_memslot(struct kvm *kvm,
1867			       struct kvm_memory_slot *old,
1868			       struct kvm_memory_slot *invalid_slot)
1869{
1870	/*
1871	 * Remove the old memslot (in the inactive memslots) by passing NULL as
1872	 * the "new" slot, and for the invalid version in the active slots.
1873	 */
1874	kvm_replace_memslot(kvm, old, NULL);
1875	kvm_activate_memslot(kvm, invalid_slot, NULL);
1876}
1877
1878static void kvm_move_memslot(struct kvm *kvm,
1879			     struct kvm_memory_slot *old,
1880			     struct kvm_memory_slot *new,
1881			     struct kvm_memory_slot *invalid_slot)
1882{
1883	/*
1884	 * Replace the old memslot in the inactive slots, and then swap slots
1885	 * and replace the current INVALID with the new as well.
1886	 */
1887	kvm_replace_memslot(kvm, old, new);
1888	kvm_activate_memslot(kvm, invalid_slot, new);
1889}
1890
1891static void kvm_update_flags_memslot(struct kvm *kvm,
1892				     struct kvm_memory_slot *old,
1893				     struct kvm_memory_slot *new)
1894{
1895	/*
1896	 * Similar to the MOVE case, but the slot doesn't need to be zapped as
1897	 * an intermediate step. Instead, the old memslot is simply replaced
1898	 * with a new, updated copy in both memslot sets.
1899	 */
1900	kvm_replace_memslot(kvm, old, new);
1901	kvm_activate_memslot(kvm, old, new);
1902}
1903
1904static int kvm_set_memslot(struct kvm *kvm,
1905			   struct kvm_memory_slot *old,
1906			   struct kvm_memory_slot *new,
1907			   enum kvm_mr_change change)
1908{
1909	struct kvm_memory_slot *invalid_slot;
1910	int r;
1911
1912	/*
1913	 * Released in kvm_swap_active_memslots().
1914	 *
1915	 * Must be held from before the current memslots are copied until after
1916	 * the new memslots are installed with rcu_assign_pointer, then
1917	 * released before the synchronize srcu in kvm_swap_active_memslots().
1918	 *
1919	 * When modifying memslots outside of the slots_lock, must be held
1920	 * before reading the pointer to the current memslots until after all
1921	 * changes to those memslots are complete.
1922	 *
1923	 * These rules ensure that installing new memslots does not lose
1924	 * changes made to the previous memslots.
1925	 */
1926	mutex_lock(&kvm->slots_arch_lock);
1927
1928	/*
1929	 * Invalidate the old slot if it's being deleted or moved.  This is
1930	 * done prior to actually deleting/moving the memslot to allow vCPUs to
1931	 * continue running by ensuring there are no mappings or shadow pages
1932	 * for the memslot when it is deleted/moved.  Without pre-invalidation
1933	 * (and without a lock), a window would exist between effecting the
1934	 * delete/move and committing the changes in arch code where KVM or a
1935	 * guest could access a non-existent memslot.
1936	 *
1937	 * Modifications are done on a temporary, unreachable slot.  The old
1938	 * slot needs to be preserved in case a later step fails and the
1939	 * invalidation needs to be reverted.
1940	 */
1941	if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) {
1942		invalid_slot = kzalloc(sizeof(*invalid_slot), GFP_KERNEL_ACCOUNT);
1943		if (!invalid_slot) {
1944			mutex_unlock(&kvm->slots_arch_lock);
1945			return -ENOMEM;
1946		}
1947		kvm_invalidate_memslot(kvm, old, invalid_slot);
1948	}
1949
1950	r = kvm_prepare_memory_region(kvm, old, new, change);
1951	if (r) {
1952		/*
1953		 * For DELETE/MOVE, revert the above INVALID change.  No
1954		 * modifications required since the original slot was preserved
1955		 * in the inactive slots.  Changing the active memslots also
1956		 * release slots_arch_lock.
1957		 */
1958		if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) {
1959			kvm_activate_memslot(kvm, invalid_slot, old);
1960			kfree(invalid_slot);
1961		} else {
1962			mutex_unlock(&kvm->slots_arch_lock);
1963		}
1964		return r;
1965	}
1966
1967	/*
1968	 * For DELETE and MOVE, the working slot is now active as the INVALID
1969	 * version of the old slot.  MOVE is particularly special as it reuses
1970	 * the old slot and returns a copy of the old slot (in working_slot).
1971	 * For CREATE, there is no old slot.  For DELETE and FLAGS_ONLY, the
1972	 * old slot is detached but otherwise preserved.
1973	 */
1974	if (change == KVM_MR_CREATE)
1975		kvm_create_memslot(kvm, new);
1976	else if (change == KVM_MR_DELETE)
1977		kvm_delete_memslot(kvm, old, invalid_slot);
1978	else if (change == KVM_MR_MOVE)
1979		kvm_move_memslot(kvm, old, new, invalid_slot);
1980	else if (change == KVM_MR_FLAGS_ONLY)
1981		kvm_update_flags_memslot(kvm, old, new);
1982	else
1983		BUG();
1984
1985	/* Free the temporary INVALID slot used for DELETE and MOVE. */
1986	if (change == KVM_MR_DELETE || change == KVM_MR_MOVE)
1987		kfree(invalid_slot);
1988
1989	/*
1990	 * No need to refresh new->arch, changes after dropping slots_arch_lock
1991	 * will directly hit the final, active memslot.  Architectures are
1992	 * responsible for knowing that new->arch may be stale.
1993	 */
1994	kvm_commit_memory_region(kvm, old, new, change);
1995
1996	return 0;
1997}
1998
1999static bool kvm_check_memslot_overlap(struct kvm_memslots *slots, int id,
2000				      gfn_t start, gfn_t end)
2001{
2002	struct kvm_memslot_iter iter;
2003
2004	kvm_for_each_memslot_in_gfn_range(&iter, slots, start, end) {
2005		if (iter.slot->id != id)
2006			return true;
2007	}
2008
2009	return false;
2010}
2011
2012/*
2013 * Allocate some memory and give it an address in the guest physical address
2014 * space.
2015 *
2016 * Discontiguous memory is allowed, mostly for framebuffers.
2017 *
2018 * Must be called holding kvm->slots_lock for write.
2019 */
2020int __kvm_set_memory_region(struct kvm *kvm,
2021			    const struct kvm_userspace_memory_region2 *mem)
 
2022{
2023	struct kvm_memory_slot *old, *new;
2024	struct kvm_memslots *slots;
2025	enum kvm_mr_change change;
2026	unsigned long npages;
2027	gfn_t base_gfn;
2028	int as_id, id;
2029	int r;
2030
2031	r = check_memory_region_flags(kvm, mem);
2032	if (r)
2033		return r;
2034
2035	as_id = mem->slot >> 16;
2036	id = (u16)mem->slot;
2037
 
2038	/* General sanity checks */
2039	if ((mem->memory_size & (PAGE_SIZE - 1)) ||
2040	    (mem->memory_size != (unsigned long)mem->memory_size))
2041		return -EINVAL;
2042	if (mem->guest_phys_addr & (PAGE_SIZE - 1))
2043		return -EINVAL;
2044	/* We can read the guest memory with __xxx_user() later on. */
2045	if ((mem->userspace_addr & (PAGE_SIZE - 1)) ||
2046	    (mem->userspace_addr != untagged_addr(mem->userspace_addr)) ||
2047	     !access_ok((void __user *)(unsigned long)mem->userspace_addr,
2048			mem->memory_size))
2049		return -EINVAL;
2050	if (mem->flags & KVM_MEM_GUEST_MEMFD &&
2051	    (mem->guest_memfd_offset & (PAGE_SIZE - 1) ||
2052	     mem->guest_memfd_offset + mem->memory_size < mem->guest_memfd_offset))
2053		return -EINVAL;
2054	if (as_id >= kvm_arch_nr_memslot_as_ids(kvm) || id >= KVM_MEM_SLOTS_NUM)
2055		return -EINVAL;
2056	if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
2057		return -EINVAL;
2058	if ((mem->memory_size >> PAGE_SHIFT) > KVM_MEM_MAX_NR_PAGES)
2059		return -EINVAL;
 
 
2060
2061	slots = __kvm_memslots(kvm, as_id);
 
 
2062
2063	/*
2064	 * Note, the old memslot (and the pointer itself!) may be invalidated
2065	 * and/or destroyed by kvm_set_memslot().
2066	 */
2067	old = id_to_memslot(slots, id);
2068
2069	if (!mem->memory_size) {
2070		if (!old || !old->npages)
2071			return -EINVAL;
2072
2073		if (WARN_ON_ONCE(kvm->nr_memslot_pages < old->npages))
2074			return -EIO;
 
 
 
 
 
 
 
 
 
 
 
 
2075
2076		return kvm_set_memslot(kvm, old, NULL, KVM_MR_DELETE);
 
 
 
 
2077	}
2078
2079	base_gfn = (mem->guest_phys_addr >> PAGE_SHIFT);
2080	npages = (mem->memory_size >> PAGE_SHIFT);
 
 
 
2081
2082	if (!old || !old->npages) {
2083		change = KVM_MR_CREATE;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2084
2085		/*
2086		 * To simplify KVM internals, the total number of pages across
2087		 * all memslots must fit in an unsigned long.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2088		 */
2089		if ((kvm->nr_memslot_pages + npages) < kvm->nr_memslot_pages)
2090			return -EINVAL;
2091	} else { /* Modify an existing slot. */
2092		/* Private memslots are immutable, they can only be deleted. */
2093		if (mem->flags & KVM_MEM_GUEST_MEMFD)
2094			return -EINVAL;
2095		if ((mem->userspace_addr != old->userspace_addr) ||
2096		    (npages != old->npages) ||
2097		    ((mem->flags ^ old->flags) & KVM_MEM_READONLY))
2098			return -EINVAL;
2099
2100		if (base_gfn != old->base_gfn)
2101			change = KVM_MR_MOVE;
2102		else if (mem->flags != old->flags)
2103			change = KVM_MR_FLAGS_ONLY;
2104		else /* Nothing to change. */
2105			return 0;
2106	}
2107
2108	if ((change == KVM_MR_CREATE || change == KVM_MR_MOVE) &&
2109	    kvm_check_memslot_overlap(slots, id, base_gfn, base_gfn + npages))
2110		return -EEXIST;
2111
2112	/* Allocate a slot that will persist in the memslot. */
2113	new = kzalloc(sizeof(*new), GFP_KERNEL_ACCOUNT);
2114	if (!new)
2115		return -ENOMEM;
2116
2117	new->as_id = as_id;
2118	new->id = id;
2119	new->base_gfn = base_gfn;
2120	new->npages = npages;
2121	new->flags = mem->flags;
2122	new->userspace_addr = mem->userspace_addr;
2123	if (mem->flags & KVM_MEM_GUEST_MEMFD) {
2124		r = kvm_gmem_bind(kvm, new, mem->guest_memfd, mem->guest_memfd_offset);
2125		if (r)
2126			goto out;
2127	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2128
2129	r = kvm_set_memslot(kvm, old, new, change);
2130	if (r)
2131		goto out_unbind;
2132
2133	return 0;
2134
2135out_unbind:
2136	if (mem->flags & KVM_MEM_GUEST_MEMFD)
2137		kvm_gmem_unbind(new);
2138out:
2139	kfree(new);
2140	return r;
 
2141}
2142EXPORT_SYMBOL_GPL(__kvm_set_memory_region);
2143
2144int kvm_set_memory_region(struct kvm *kvm,
2145			  const struct kvm_userspace_memory_region2 *mem)
 
2146{
2147	int r;
2148
2149	mutex_lock(&kvm->slots_lock);
2150	r = __kvm_set_memory_region(kvm, mem);
2151	mutex_unlock(&kvm->slots_lock);
2152	return r;
2153}
2154EXPORT_SYMBOL_GPL(kvm_set_memory_region);
2155
2156static int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
2157					  struct kvm_userspace_memory_region2 *mem)
 
 
2158{
2159	if ((u16)mem->slot >= KVM_USER_MEM_SLOTS)
2160		return -EINVAL;
2161
2162	return kvm_set_memory_region(kvm, mem);
2163}
2164
2165#ifndef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
2166/**
2167 * kvm_get_dirty_log - get a snapshot of dirty pages
2168 * @kvm:	pointer to kvm instance
2169 * @log:	slot id and address to which we copy the log
2170 * @is_dirty:	set to '1' if any dirty pages were found
2171 * @memslot:	set to the associated memslot, always valid on success
2172 */
2173int kvm_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log,
2174		      int *is_dirty, struct kvm_memory_slot **memslot)
2175{
2176	struct kvm_memslots *slots;
2177	int i, as_id, id;
2178	unsigned long n;
2179	unsigned long any = 0;
2180
2181	/* Dirty ring tracking may be exclusive to dirty log tracking */
2182	if (!kvm_use_dirty_bitmap(kvm))
2183		return -ENXIO;
2184
2185	*memslot = NULL;
2186	*is_dirty = 0;
2187
2188	as_id = log->slot >> 16;
2189	id = (u16)log->slot;
2190	if (as_id >= kvm_arch_nr_memslot_as_ids(kvm) || id >= KVM_USER_MEM_SLOTS)
2191		return -EINVAL;
2192
2193	slots = __kvm_memslots(kvm, as_id);
2194	*memslot = id_to_memslot(slots, id);
2195	if (!(*memslot) || !(*memslot)->dirty_bitmap)
2196		return -ENOENT;
2197
2198	kvm_arch_sync_dirty_log(kvm, *memslot);
2199
2200	n = kvm_dirty_bitmap_bytes(*memslot);
2201
2202	for (i = 0; !any && i < n/sizeof(long); ++i)
2203		any = (*memslot)->dirty_bitmap[i];
2204
2205	if (copy_to_user(log->dirty_bitmap, (*memslot)->dirty_bitmap, n))
2206		return -EFAULT;
 
2207
2208	if (any)
2209		*is_dirty = 1;
2210	return 0;
 
 
 
2211}
2212EXPORT_SYMBOL_GPL(kvm_get_dirty_log);
2213
2214#else /* CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT */
2215/**
2216 * kvm_get_dirty_log_protect - get a snapshot of dirty pages
2217 *	and reenable dirty page tracking for the corresponding pages.
2218 * @kvm:	pointer to kvm instance
2219 * @log:	slot id and address to which we copy the log
2220 *
2221 * We need to keep it in mind that VCPU threads can write to the bitmap
2222 * concurrently. So, to avoid losing track of dirty pages we keep the
2223 * following order:
2224 *
2225 *    1. Take a snapshot of the bit and clear it if needed.
2226 *    2. Write protect the corresponding page.
2227 *    3. Copy the snapshot to the userspace.
2228 *    4. Upon return caller flushes TLB's if needed.
2229 *
2230 * Between 2 and 4, the guest may write to the page using the remaining TLB
2231 * entry.  This is not a problem because the page is reported dirty using
2232 * the snapshot taken before and step 4 ensures that writes done after
2233 * exiting to userspace will be logged for the next call.
2234 *
2235 */
2236static int kvm_get_dirty_log_protect(struct kvm *kvm, struct kvm_dirty_log *log)
2237{
2238	struct kvm_memslots *slots;
2239	struct kvm_memory_slot *memslot;
2240	int i, as_id, id;
2241	unsigned long n;
2242	unsigned long *dirty_bitmap;
2243	unsigned long *dirty_bitmap_buffer;
2244	bool flush;
2245
2246	/* Dirty ring tracking may be exclusive to dirty log tracking */
2247	if (!kvm_use_dirty_bitmap(kvm))
2248		return -ENXIO;
2249
2250	as_id = log->slot >> 16;
2251	id = (u16)log->slot;
2252	if (as_id >= kvm_arch_nr_memslot_as_ids(kvm) || id >= KVM_USER_MEM_SLOTS)
2253		return -EINVAL;
2254
2255	slots = __kvm_memslots(kvm, as_id);
2256	memslot = id_to_memslot(slots, id);
2257	if (!memslot || !memslot->dirty_bitmap)
2258		return -ENOENT;
2259
2260	dirty_bitmap = memslot->dirty_bitmap;
2261
2262	kvm_arch_sync_dirty_log(kvm, memslot);
2263
2264	n = kvm_dirty_bitmap_bytes(memslot);
2265	flush = false;
2266	if (kvm->manual_dirty_log_protect) {
2267		/*
2268		 * Unlike kvm_get_dirty_log, we always return false in *flush,
2269		 * because no flush is needed until KVM_CLEAR_DIRTY_LOG.  There
2270		 * is some code duplication between this function and
2271		 * kvm_get_dirty_log, but hopefully all architecture
2272		 * transition to kvm_get_dirty_log_protect and kvm_get_dirty_log
2273		 * can be eliminated.
2274		 */
2275		dirty_bitmap_buffer = dirty_bitmap;
2276	} else {
2277		dirty_bitmap_buffer = kvm_second_dirty_bitmap(memslot);
2278		memset(dirty_bitmap_buffer, 0, n);
2279
2280		KVM_MMU_LOCK(kvm);
2281		for (i = 0; i < n / sizeof(long); i++) {
2282			unsigned long mask;
2283			gfn_t offset;
2284
2285			if (!dirty_bitmap[i])
2286				continue;
2287
2288			flush = true;
2289			mask = xchg(&dirty_bitmap[i], 0);
2290			dirty_bitmap_buffer[i] = mask;
2291
2292			offset = i * BITS_PER_LONG;
2293			kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot,
2294								offset, mask);
2295		}
2296		KVM_MMU_UNLOCK(kvm);
2297	}
2298
2299	if (flush)
2300		kvm_flush_remote_tlbs_memslot(kvm, memslot);
2301
2302	if (copy_to_user(log->dirty_bitmap, dirty_bitmap_buffer, n))
2303		return -EFAULT;
2304	return 0;
2305}
 
2306
2307
2308/**
2309 * kvm_vm_ioctl_get_dirty_log - get and clear the log of dirty pages in a slot
2310 * @kvm: kvm instance
2311 * @log: slot id and address to which we copy the log
2312 *
2313 * Steps 1-4 below provide general overview of dirty page logging. See
2314 * kvm_get_dirty_log_protect() function description for additional details.
2315 *
2316 * We call kvm_get_dirty_log_protect() to handle steps 1-3, upon return we
2317 * always flush the TLB (step 4) even if previous step failed  and the dirty
2318 * bitmap may be corrupt. Regardless of previous outcome the KVM logging API
2319 * does not preclude user space subsequent dirty log read. Flushing TLB ensures
2320 * writes will be marked dirty for next log read.
2321 *
2322 *   1. Take a snapshot of the bit and clear it if needed.
2323 *   2. Write protect the corresponding page.
2324 *   3. Copy the snapshot to the userspace.
2325 *   4. Flush TLB's if needed.
2326 */
2327static int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
2328				      struct kvm_dirty_log *log)
2329{
2330	int r;
2331
2332	mutex_lock(&kvm->slots_lock);
2333
2334	r = kvm_get_dirty_log_protect(kvm, log);
2335
2336	mutex_unlock(&kvm->slots_lock);
2337	return r;
2338}
 
2339
2340/**
2341 * kvm_clear_dirty_log_protect - clear dirty bits in the bitmap
2342 *	and reenable dirty page tracking for the corresponding pages.
2343 * @kvm:	pointer to kvm instance
2344 * @log:	slot id and address from which to fetch the bitmap of dirty pages
2345 */
2346static int kvm_clear_dirty_log_protect(struct kvm *kvm,
2347				       struct kvm_clear_dirty_log *log)
2348{
2349	struct kvm_memslots *slots;
2350	struct kvm_memory_slot *memslot;
2351	int as_id, id;
2352	gfn_t offset;
2353	unsigned long i, n;
2354	unsigned long *dirty_bitmap;
2355	unsigned long *dirty_bitmap_buffer;
2356	bool flush;
2357
2358	/* Dirty ring tracking may be exclusive to dirty log tracking */
2359	if (!kvm_use_dirty_bitmap(kvm))
2360		return -ENXIO;
2361
2362	as_id = log->slot >> 16;
2363	id = (u16)log->slot;
2364	if (as_id >= kvm_arch_nr_memslot_as_ids(kvm) || id >= KVM_USER_MEM_SLOTS)
2365		return -EINVAL;
2366
2367	if (log->first_page & 63)
2368		return -EINVAL;
2369
2370	slots = __kvm_memslots(kvm, as_id);
2371	memslot = id_to_memslot(slots, id);
2372	if (!memslot || !memslot->dirty_bitmap)
2373		return -ENOENT;
2374
2375	dirty_bitmap = memslot->dirty_bitmap;
2376
2377	n = ALIGN(log->num_pages, BITS_PER_LONG) / 8;
2378
2379	if (log->first_page > memslot->npages ||
2380	    log->num_pages > memslot->npages - log->first_page ||
2381	    (log->num_pages < memslot->npages - log->first_page && (log->num_pages & 63)))
2382	    return -EINVAL;
2383
2384	kvm_arch_sync_dirty_log(kvm, memslot);
2385
2386	flush = false;
2387	dirty_bitmap_buffer = kvm_second_dirty_bitmap(memslot);
2388	if (copy_from_user(dirty_bitmap_buffer, log->dirty_bitmap, n))
2389		return -EFAULT;
2390
2391	KVM_MMU_LOCK(kvm);
2392	for (offset = log->first_page, i = offset / BITS_PER_LONG,
2393		 n = DIV_ROUND_UP(log->num_pages, BITS_PER_LONG); n--;
2394	     i++, offset += BITS_PER_LONG) {
2395		unsigned long mask = *dirty_bitmap_buffer++;
2396		atomic_long_t *p = (atomic_long_t *) &dirty_bitmap[i];
2397		if (!mask)
2398			continue;
2399
2400		mask &= atomic_long_fetch_andnot(mask, p);
2401
2402		/*
2403		 * mask contains the bits that really have been cleared.  This
2404		 * never includes any bits beyond the length of the memslot (if
2405		 * the length is not aligned to 64 pages), therefore it is not
2406		 * a problem if userspace sets them in log->dirty_bitmap.
2407		*/
2408		if (mask) {
2409			flush = true;
2410			kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot,
2411								offset, mask);
2412		}
2413	}
2414	KVM_MMU_UNLOCK(kvm);
2415
2416	if (flush)
2417		kvm_flush_remote_tlbs_memslot(kvm, memslot);
2418
2419	return 0;
2420}
 
2421
2422static int kvm_vm_ioctl_clear_dirty_log(struct kvm *kvm,
2423					struct kvm_clear_dirty_log *log)
2424{
2425	int r;
2426
2427	mutex_lock(&kvm->slots_lock);
2428
2429	r = kvm_clear_dirty_log_protect(kvm, log);
2430
2431	mutex_unlock(&kvm->slots_lock);
2432	return r;
2433}
2434#endif /* CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT */
2435
2436#ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES
2437/*
2438 * Returns true if _all_ gfns in the range [@start, @end) have attributes
2439 * matching @attrs.
2440 */
2441bool kvm_range_has_memory_attributes(struct kvm *kvm, gfn_t start, gfn_t end,
2442				     unsigned long attrs)
2443{
2444	XA_STATE(xas, &kvm->mem_attr_array, start);
2445	unsigned long index;
2446	bool has_attrs;
2447	void *entry;
2448
2449	rcu_read_lock();
2450
2451	if (!attrs) {
2452		has_attrs = !xas_find(&xas, end - 1);
2453		goto out;
2454	}
2455
2456	has_attrs = true;
2457	for (index = start; index < end; index++) {
2458		do {
2459			entry = xas_next(&xas);
2460		} while (xas_retry(&xas, entry));
2461
2462		if (xas.xa_index != index || xa_to_value(entry) != attrs) {
2463			has_attrs = false;
2464			break;
2465		}
2466	}
2467
2468out:
2469	rcu_read_unlock();
2470	return has_attrs;
2471}
 
2472
2473static u64 kvm_supported_mem_attributes(struct kvm *kvm)
2474{
2475	if (!kvm || kvm_arch_has_private_mem(kvm))
2476		return KVM_MEMORY_ATTRIBUTE_PRIVATE;
2477
2478	return 0;
2479}
 
2480
2481static __always_inline void kvm_handle_gfn_range(struct kvm *kvm,
2482						 struct kvm_mmu_notifier_range *range)
2483{
2484	struct kvm_gfn_range gfn_range;
2485	struct kvm_memory_slot *slot;
2486	struct kvm_memslots *slots;
2487	struct kvm_memslot_iter iter;
2488	bool found_memslot = false;
2489	bool ret = false;
2490	int i;
2491
2492	gfn_range.arg = range->arg;
2493	gfn_range.may_block = range->may_block;
2494
2495	for (i = 0; i < kvm_arch_nr_memslot_as_ids(kvm); i++) {
2496		slots = __kvm_memslots(kvm, i);
2497
2498		kvm_for_each_memslot_in_gfn_range(&iter, slots, range->start, range->end) {
2499			slot = iter.slot;
2500			gfn_range.slot = slot;
2501
2502			gfn_range.start = max(range->start, slot->base_gfn);
2503			gfn_range.end = min(range->end, slot->base_gfn + slot->npages);
2504			if (gfn_range.start >= gfn_range.end)
2505				continue;
2506
2507			if (!found_memslot) {
2508				found_memslot = true;
2509				KVM_MMU_LOCK(kvm);
2510				if (!IS_KVM_NULL_FN(range->on_lock))
2511					range->on_lock(kvm);
2512			}
2513
2514			ret |= range->handler(kvm, &gfn_range);
2515		}
2516	}
2517
2518	if (range->flush_on_ret && ret)
2519		kvm_flush_remote_tlbs(kvm);
2520
2521	if (found_memslot)
2522		KVM_MMU_UNLOCK(kvm);
2523}
 
2524
2525static bool kvm_pre_set_memory_attributes(struct kvm *kvm,
2526					  struct kvm_gfn_range *range)
2527{
2528	/*
2529	 * Unconditionally add the range to the invalidation set, regardless of
2530	 * whether or not the arch callback actually needs to zap SPTEs.  E.g.
2531	 * if KVM supports RWX attributes in the future and the attributes are
2532	 * going from R=>RW, zapping isn't strictly necessary.  Unconditionally
2533	 * adding the range allows KVM to require that MMU invalidations add at
2534	 * least one range between begin() and end(), e.g. allows KVM to detect
2535	 * bugs where the add() is missed.  Relaxing the rule *might* be safe,
2536	 * but it's not obvious that allowing new mappings while the attributes
2537	 * are in flux is desirable or worth the complexity.
2538	 */
2539	kvm_mmu_invalidate_range_add(kvm, range->start, range->end);
2540
2541	return kvm_arch_pre_set_memory_attributes(kvm, range);
2542}
2543
2544/* Set @attributes for the gfn range [@start, @end). */
2545static int kvm_vm_set_mem_attributes(struct kvm *kvm, gfn_t start, gfn_t end,
2546				     unsigned long attributes)
2547{
2548	struct kvm_mmu_notifier_range pre_set_range = {
2549		.start = start,
2550		.end = end,
2551		.handler = kvm_pre_set_memory_attributes,
2552		.on_lock = kvm_mmu_invalidate_begin,
2553		.flush_on_ret = true,
2554		.may_block = true,
2555	};
2556	struct kvm_mmu_notifier_range post_set_range = {
2557		.start = start,
2558		.end = end,
2559		.arg.attributes = attributes,
2560		.handler = kvm_arch_post_set_memory_attributes,
2561		.on_lock = kvm_mmu_invalidate_end,
2562		.may_block = true,
2563	};
2564	unsigned long i;
2565	void *entry;
2566	int r = 0;
2567
2568	entry = attributes ? xa_mk_value(attributes) : NULL;
2569
2570	mutex_lock(&kvm->slots_lock);
2571
2572	/* Nothing to do if the entire range as the desired attributes. */
2573	if (kvm_range_has_memory_attributes(kvm, start, end, attributes))
2574		goto out_unlock;
2575
2576	/*
2577	 * Reserve memory ahead of time to avoid having to deal with failures
2578	 * partway through setting the new attributes.
2579	 */
2580	for (i = start; i < end; i++) {
2581		r = xa_reserve(&kvm->mem_attr_array, i, GFP_KERNEL_ACCOUNT);
2582		if (r)
2583			goto out_unlock;
2584	}
2585
2586	kvm_handle_gfn_range(kvm, &pre_set_range);
2587
2588	for (i = start; i < end; i++) {
2589		r = xa_err(xa_store(&kvm->mem_attr_array, i, entry,
2590				    GFP_KERNEL_ACCOUNT));
2591		KVM_BUG_ON(r, kvm);
2592	}
2593
2594	kvm_handle_gfn_range(kvm, &post_set_range);
2595
2596out_unlock:
2597	mutex_unlock(&kvm->slots_lock);
2598
2599	return r;
2600}
2601static int kvm_vm_ioctl_set_mem_attributes(struct kvm *kvm,
2602					   struct kvm_memory_attributes *attrs)
2603{
2604	gfn_t start, end;
2605
2606	/* flags is currently not used. */
2607	if (attrs->flags)
2608		return -EINVAL;
2609	if (attrs->attributes & ~kvm_supported_mem_attributes(kvm))
2610		return -EINVAL;
2611	if (attrs->size == 0 || attrs->address + attrs->size < attrs->address)
2612		return -EINVAL;
2613	if (!PAGE_ALIGNED(attrs->address) || !PAGE_ALIGNED(attrs->size))
2614		return -EINVAL;
2615
2616	start = attrs->address >> PAGE_SHIFT;
2617	end = (attrs->address + attrs->size) >> PAGE_SHIFT;
2618
2619	/*
2620	 * xarray tracks data using "unsigned long", and as a result so does
2621	 * KVM.  For simplicity, supports generic attributes only on 64-bit
2622	 * architectures.
2623	 */
2624	BUILD_BUG_ON(sizeof(attrs->attributes) != sizeof(unsigned long));
2625
2626	return kvm_vm_set_mem_attributes(kvm, start, end, attrs->attributes);
2627}
2628#endif /* CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES */
2629
2630struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
2631{
2632	return __gfn_to_memslot(kvm_memslots(kvm), gfn);
2633}
2634EXPORT_SYMBOL_GPL(gfn_to_memslot);
2635
2636struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn)
2637{
2638	struct kvm_memslots *slots = kvm_vcpu_memslots(vcpu);
2639	u64 gen = slots->generation;
2640	struct kvm_memory_slot *slot;
2641
2642	/*
2643	 * This also protects against using a memslot from a different address space,
2644	 * since different address spaces have different generation numbers.
2645	 */
2646	if (unlikely(gen != vcpu->last_used_slot_gen)) {
2647		vcpu->last_used_slot = NULL;
2648		vcpu->last_used_slot_gen = gen;
2649	}
2650
2651	slot = try_get_memslot(vcpu->last_used_slot, gfn);
2652	if (slot)
2653		return slot;
2654
2655	/*
2656	 * Fall back to searching all memslots. We purposely use
2657	 * search_memslots() instead of __gfn_to_memslot() to avoid
2658	 * thrashing the VM-wide last_used_slot in kvm_memslots.
2659	 */
2660	slot = search_memslots(slots, gfn, false);
2661	if (slot) {
2662		vcpu->last_used_slot = slot;
2663		return slot;
2664	}
2665
2666	return NULL;
2667}
2668
2669bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
2670{
2671	struct kvm_memory_slot *memslot = gfn_to_memslot(kvm, gfn);
2672
2673	return kvm_is_visible_memslot(memslot);
2674}
2675EXPORT_SYMBOL_GPL(kvm_is_visible_gfn);
2676
2677bool kvm_vcpu_is_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
2678{
2679	struct kvm_memory_slot *memslot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
2680
2681	return kvm_is_visible_memslot(memslot);
2682}
2683EXPORT_SYMBOL_GPL(kvm_vcpu_is_visible_gfn);
2684
2685unsigned long kvm_host_page_size(struct kvm_vcpu *vcpu, gfn_t gfn)
2686{
2687	struct vm_area_struct *vma;
2688	unsigned long addr, size;
2689
2690	size = PAGE_SIZE;
2691
2692	addr = kvm_vcpu_gfn_to_hva_prot(vcpu, gfn, NULL);
2693	if (kvm_is_error_hva(addr))
2694		return PAGE_SIZE;
2695
2696	mmap_read_lock(current->mm);
2697	vma = find_vma(current->mm, addr);
2698	if (!vma)
2699		goto out;
2700
2701	size = vma_kernel_pagesize(vma);
2702
2703out:
2704	mmap_read_unlock(current->mm);
2705
2706	return size;
2707}
2708
2709static bool memslot_is_readonly(const struct kvm_memory_slot *slot)
2710{
2711	return slot->flags & KVM_MEM_READONLY;
2712}
2713
2714static unsigned long __gfn_to_hva_many(const struct kvm_memory_slot *slot, gfn_t gfn,
2715				       gfn_t *nr_pages, bool write)
2716{
2717	if (!slot || slot->flags & KVM_MEMSLOT_INVALID)
2718		return KVM_HVA_ERR_BAD;
2719
2720	if (memslot_is_readonly(slot) && write)
2721		return KVM_HVA_ERR_RO_BAD;
2722
2723	if (nr_pages)
2724		*nr_pages = slot->npages - (gfn - slot->base_gfn);
2725
2726	return __gfn_to_hva_memslot(slot, gfn);
2727}
2728
2729static unsigned long gfn_to_hva_many(struct kvm_memory_slot *slot, gfn_t gfn,
2730				     gfn_t *nr_pages)
2731{
2732	return __gfn_to_hva_many(slot, gfn, nr_pages, true);
2733}
2734
2735unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot,
2736					gfn_t gfn)
2737{
2738	return gfn_to_hva_many(slot, gfn, NULL);
2739}
2740EXPORT_SYMBOL_GPL(gfn_to_hva_memslot);
2741
2742unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
2743{
2744	return gfn_to_hva_many(gfn_to_memslot(kvm, gfn), gfn, NULL);
2745}
2746EXPORT_SYMBOL_GPL(gfn_to_hva);
2747
2748unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn)
2749{
2750	return gfn_to_hva_many(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn, NULL);
 
2751}
2752EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_hva);
2753
2754/*
2755 * Return the hva of a @gfn and the R/W attribute if possible.
2756 *
2757 * @slot: the kvm_memory_slot which contains @gfn
2758 * @gfn: the gfn to be translated
2759 * @writable: used to return the read/write attribute of the @slot if the hva
2760 * is valid and @writable is not NULL
2761 */
2762unsigned long gfn_to_hva_memslot_prot(struct kvm_memory_slot *slot,
2763				      gfn_t gfn, bool *writable)
2764{
2765	unsigned long hva = __gfn_to_hva_many(slot, gfn, NULL, false);
2766
2767	if (!kvm_is_error_hva(hva) && writable)
2768		*writable = !memslot_is_readonly(slot);
2769
2770	return hva;
2771}
2772
2773unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable)
2774{
2775	struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
2776
2777	return gfn_to_hva_memslot_prot(slot, gfn, writable);
2778}
2779
2780unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable)
2781{
2782	struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
2783
2784	return gfn_to_hva_memslot_prot(slot, gfn, writable);
2785}
2786
2787static inline int check_user_page_hwpoison(unsigned long addr)
2788{
2789	int rc, flags = FOLL_HWPOISON | FOLL_WRITE;
2790
2791	rc = get_user_pages(addr, 1, flags, NULL);
 
2792	return rc == -EHWPOISON;
2793}
2794
2795/*
2796 * The fast path to get the writable pfn which will be stored in @pfn,
2797 * true indicates success, otherwise false is returned.  It's also the
2798 * only part that runs if we can in atomic context.
2799 */
2800static bool hva_to_pfn_fast(unsigned long addr, bool write_fault,
2801			    bool *writable, kvm_pfn_t *pfn)
2802{
2803	struct page *page[1];
 
 
2804
2805	/*
2806	 * Fast pin a writable pfn only if it is a write fault request
2807	 * or the caller allows to map a writable pfn for a read fault
2808	 * request.
2809	 */
2810	if (!(write_fault || writable))
2811		return false;
2812
2813	if (get_user_page_fast_only(addr, FOLL_WRITE, page)) {
2814		*pfn = page_to_pfn(page[0]);
2815
2816		if (writable)
2817			*writable = true;
2818		return true;
2819	}
2820
2821	return false;
2822}
2823
2824/*
2825 * The slow path to get the pfn of the specified host virtual address,
2826 * 1 indicates success, -errno is returned if error is detected.
2827 */
2828static int hva_to_pfn_slow(unsigned long addr, bool *async, bool write_fault,
2829			   bool interruptible, bool *writable, kvm_pfn_t *pfn)
2830{
2831	/*
2832	 * When a VCPU accesses a page that is not mapped into the secondary
2833	 * MMU, we lookup the page using GUP to map it, so the guest VCPU can
2834	 * make progress. We always want to honor NUMA hinting faults in that
2835	 * case, because GUP usage corresponds to memory accesses from the VCPU.
2836	 * Otherwise, we'd not trigger NUMA hinting faults once a page is
2837	 * mapped into the secondary MMU and gets accessed by a VCPU.
2838	 *
2839	 * Note that get_user_page_fast_only() and FOLL_WRITE for now
2840	 * implicitly honor NUMA hinting faults and don't need this flag.
2841	 */
2842	unsigned int flags = FOLL_HWPOISON | FOLL_HONOR_NUMA_FAULT;
2843	struct page *page;
2844	int npages;
2845
2846	might_sleep();
 
2847
2848	if (writable)
2849		*writable = write_fault;
 
 
 
 
 
 
2850
2851	if (write_fault)
2852		flags |= FOLL_WRITE;
2853	if (async)
2854		flags |= FOLL_NOWAIT;
2855	if (interruptible)
2856		flags |= FOLL_INTERRUPTIBLE;
2857
2858	npages = get_user_pages_unlocked(addr, 1, &page, flags);
2859	if (npages != 1)
2860		return npages;
2861
2862	/* map read fault as writable if possible */
2863	if (unlikely(!write_fault) && writable) {
2864		struct page *wpage;
2865
2866		if (get_user_page_fast_only(addr, FOLL_WRITE, &wpage)) {
2867			*writable = true;
2868			put_page(page);
2869			page = wpage;
2870		}
2871	}
2872	*pfn = page_to_pfn(page);
2873	return npages;
2874}
2875
2876static bool vma_is_valid(struct vm_area_struct *vma, bool write_fault)
2877{
2878	if (unlikely(!(vma->vm_flags & VM_READ)))
2879		return false;
2880
2881	if (write_fault && (unlikely(!(vma->vm_flags & VM_WRITE))))
2882		return false;
2883
2884	return true;
2885}
 
 
 
 
 
2886
2887static int kvm_try_get_pfn(kvm_pfn_t pfn)
2888{
2889	struct page *page = kvm_pfn_to_refcounted_page(pfn);
2890
2891	if (!page)
2892		return 1;
 
 
 
 
 
 
 
 
 
 
 
 
2893
2894	return get_page_unless_zero(page);
2895}
2896
2897static int hva_to_pfn_remapped(struct vm_area_struct *vma,
2898			       unsigned long addr, bool write_fault,
2899			       bool *writable, kvm_pfn_t *p_pfn)
2900{
2901	kvm_pfn_t pfn;
2902	pte_t *ptep;
2903	pte_t pte;
2904	spinlock_t *ptl;
2905	int r;
2906
2907	r = follow_pte(vma->vm_mm, addr, &ptep, &ptl);
2908	if (r) {
2909		/*
2910		 * get_user_pages fails for VM_IO and VM_PFNMAP vmas and does
2911		 * not call the fault handler, so do it here.
2912		 */
2913		bool unlocked = false;
2914		r = fixup_user_fault(current->mm, addr,
2915				     (write_fault ? FAULT_FLAG_WRITE : 0),
2916				     &unlocked);
2917		if (unlocked)
2918			return -EAGAIN;
2919		if (r)
2920			return r;
2921
2922		r = follow_pte(vma->vm_mm, addr, &ptep, &ptl);
2923		if (r)
2924			return r;
2925	}
2926
2927	pte = ptep_get(ptep);
2928
2929	if (write_fault && !pte_write(pte)) {
2930		pfn = KVM_PFN_ERR_RO_FAULT;
2931		goto out;
2932	}
2933
2934	if (writable)
2935		*writable = pte_write(pte);
2936	pfn = pte_pfn(pte);
2937
2938	/*
2939	 * Get a reference here because callers of *hva_to_pfn* and
2940	 * *gfn_to_pfn* ultimately call kvm_release_pfn_clean on the
2941	 * returned pfn.  This is only needed if the VMA has VM_MIXEDMAP
2942	 * set, but the kvm_try_get_pfn/kvm_release_pfn_clean pair will
2943	 * simply do nothing for reserved pfns.
2944	 *
2945	 * Whoever called remap_pfn_range is also going to call e.g.
2946	 * unmap_mapping_range before the underlying pages are freed,
2947	 * causing a call to our MMU notifier.
2948	 *
2949	 * Certain IO or PFNMAP mappings can be backed with valid
2950	 * struct pages, but be allocated without refcounting e.g.,
2951	 * tail pages of non-compound higher order allocations, which
2952	 * would then underflow the refcount when the caller does the
2953	 * required put_page. Don't allow those pages here.
2954	 */
2955	if (!kvm_try_get_pfn(pfn))
2956		r = -EFAULT;
2957
2958out:
2959	pte_unmap_unlock(ptep, ptl);
2960	*p_pfn = pfn;
2961
2962	return r;
2963}
2964
2965/*
2966 * Pin guest page in memory and return its pfn.
2967 * @addr: host virtual address which maps memory to the guest
2968 * @atomic: whether this function can sleep
2969 * @interruptible: whether the process can be interrupted by non-fatal signals
2970 * @async: whether this function need to wait IO complete if the
2971 *         host page is not in the memory
2972 * @write_fault: whether we should get a writable host page
2973 * @writable: whether it allows to map a writable host page for !@write_fault
2974 *
2975 * The function will map a writable host page for these two cases:
2976 * 1): @write_fault = true
2977 * 2): @write_fault = false && @writable, @writable will tell the caller
2978 *     whether the mapping is writable.
2979 */
2980kvm_pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool interruptible,
2981		     bool *async, bool write_fault, bool *writable)
2982{
2983	struct vm_area_struct *vma;
2984	kvm_pfn_t pfn;
2985	int npages, r;
2986
2987	/* we can do it either atomically or asynchronously, not both */
2988	BUG_ON(atomic && async);
2989
2990	if (hva_to_pfn_fast(addr, write_fault, writable, &pfn))
2991		return pfn;
2992
2993	if (atomic)
2994		return KVM_PFN_ERR_FAULT;
2995
2996	npages = hva_to_pfn_slow(addr, async, write_fault, interruptible,
2997				 writable, &pfn);
2998	if (npages == 1)
2999		return pfn;
3000	if (npages == -EINTR)
3001		return KVM_PFN_ERR_SIGPENDING;
3002
3003	mmap_read_lock(current->mm);
3004	if (npages == -EHWPOISON ||
3005	      (!async && check_user_page_hwpoison(addr))) {
3006		pfn = KVM_PFN_ERR_HWPOISON;
3007		goto exit;
3008	}
3009
3010retry:
3011	vma = vma_lookup(current->mm, addr);
3012
3013	if (vma == NULL)
3014		pfn = KVM_PFN_ERR_FAULT;
3015	else if (vma->vm_flags & (VM_IO | VM_PFNMAP)) {
3016		r = hva_to_pfn_remapped(vma, addr, write_fault, writable, &pfn);
3017		if (r == -EAGAIN)
3018			goto retry;
3019		if (r < 0)
3020			pfn = KVM_PFN_ERR_FAULT;
3021	} else {
3022		if (async && vma_is_valid(vma, write_fault))
3023			*async = true;
3024		pfn = KVM_PFN_ERR_FAULT;
3025	}
3026exit:
3027	mmap_read_unlock(current->mm);
3028	return pfn;
3029}
 
3030
3031kvm_pfn_t __gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn,
3032			       bool atomic, bool interruptible, bool *async,
3033			       bool write_fault, bool *writable, hva_t *hva)
3034{
3035	unsigned long addr = __gfn_to_hva_many(slot, gfn, NULL, write_fault);
3036
3037	if (hva)
3038		*hva = addr;
3039
3040	if (addr == KVM_HVA_ERR_RO_BAD) {
3041		if (writable)
3042			*writable = false;
3043		return KVM_PFN_ERR_RO_FAULT;
3044	}
3045
 
3046	if (kvm_is_error_hva(addr)) {
3047		if (writable)
3048			*writable = false;
3049		return KVM_PFN_NOSLOT;
3050	}
3051
3052	/* Do not map writable pfn in the readonly memslot. */
3053	if (writable && memslot_is_readonly(slot)) {
3054		*writable = false;
3055		writable = NULL;
3056	}
3057
3058	return hva_to_pfn(addr, atomic, interruptible, async, write_fault,
3059			  writable);
3060}
3061EXPORT_SYMBOL_GPL(__gfn_to_pfn_memslot);
3062
3063kvm_pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
3064		      bool *writable)
3065{
3066	return __gfn_to_pfn_memslot(gfn_to_memslot(kvm, gfn), gfn, false, false,
3067				    NULL, write_fault, writable, NULL);
3068}
3069EXPORT_SYMBOL_GPL(gfn_to_pfn_prot);
3070
3071kvm_pfn_t gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn)
 
3072{
3073	return __gfn_to_pfn_memslot(slot, gfn, false, false, NULL, true,
3074				    NULL, NULL);
3075}
3076EXPORT_SYMBOL_GPL(gfn_to_pfn_memslot);
3077
3078kvm_pfn_t gfn_to_pfn_memslot_atomic(const struct kvm_memory_slot *slot, gfn_t gfn)
3079{
3080	return __gfn_to_pfn_memslot(slot, gfn, true, false, NULL, true,
3081				    NULL, NULL);
3082}
3083EXPORT_SYMBOL_GPL(gfn_to_pfn_memslot_atomic);
3084
3085kvm_pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn)
 
3086{
3087	return gfn_to_pfn_memslot_atomic(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn);
3088}
3089EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_pfn_atomic);
3090
3091kvm_pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
 
3092{
3093	return gfn_to_pfn_memslot(gfn_to_memslot(kvm, gfn), gfn);
 
3094}
3095EXPORT_SYMBOL_GPL(gfn_to_pfn);
3096
3097kvm_pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn)
3098{
3099	return gfn_to_pfn_memslot(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn);
3100}
3101EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_pfn);
3102
3103int gfn_to_page_many_atomic(struct kvm_memory_slot *slot, gfn_t gfn,
3104			    struct page **pages, int nr_pages)
3105{
3106	unsigned long addr;
3107	gfn_t entry = 0;
3108
3109	addr = gfn_to_hva_many(slot, gfn, &entry);
3110	if (kvm_is_error_hva(addr))
3111		return -1;
3112
3113	if (entry < nr_pages)
3114		return 0;
3115
3116	return get_user_pages_fast_only(addr, nr_pages, FOLL_WRITE, pages);
3117}
3118EXPORT_SYMBOL_GPL(gfn_to_page_many_atomic);
3119
3120/*
3121 * Do not use this helper unless you are absolutely certain the gfn _must_ be
3122 * backed by 'struct page'.  A valid example is if the backing memslot is
3123 * controlled by KVM.  Note, if the returned page is valid, it's refcount has
3124 * been elevated by gfn_to_pfn().
3125 */
3126struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
3127{
3128	struct page *page;
3129	kvm_pfn_t pfn;
3130
3131	pfn = gfn_to_pfn(kvm, gfn);
 
 
3132
3133	if (is_error_noslot_pfn(pfn))
3134		return KVM_ERR_PTR_BAD_PAGE;
3135
3136	page = kvm_pfn_to_refcounted_page(pfn);
3137	if (!page)
3138		return KVM_ERR_PTR_BAD_PAGE;
3139
3140	return page;
3141}
3142EXPORT_SYMBOL_GPL(gfn_to_page);
3143
3144void kvm_release_pfn(kvm_pfn_t pfn, bool dirty)
3145{
3146	if (dirty)
3147		kvm_release_pfn_dirty(pfn);
3148	else
3149		kvm_release_pfn_clean(pfn);
3150}
3151
3152int kvm_vcpu_map(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map)
3153{
3154	kvm_pfn_t pfn;
3155	void *hva = NULL;
3156	struct page *page = KVM_UNMAPPED_PAGE;
3157
3158	if (!map)
3159		return -EINVAL;
3160
3161	pfn = gfn_to_pfn(vcpu->kvm, gfn);
3162	if (is_error_noslot_pfn(pfn))
3163		return -EINVAL;
3164
3165	if (pfn_valid(pfn)) {
3166		page = pfn_to_page(pfn);
3167		hva = kmap(page);
3168#ifdef CONFIG_HAS_IOMEM
3169	} else {
3170		hva = memremap(pfn_to_hpa(pfn), PAGE_SIZE, MEMREMAP_WB);
3171#endif
3172	}
3173
3174	if (!hva)
3175		return -EFAULT;
3176
3177	map->page = page;
3178	map->hva = hva;
3179	map->pfn = pfn;
3180	map->gfn = gfn;
3181
3182	return 0;
3183}
3184EXPORT_SYMBOL_GPL(kvm_vcpu_map);
3185
3186void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty)
3187{
3188	if (!map)
3189		return;
3190
3191	if (!map->hva)
3192		return;
3193
3194	if (map->page != KVM_UNMAPPED_PAGE)
3195		kunmap(map->page);
3196#ifdef CONFIG_HAS_IOMEM
3197	else
3198		memunmap(map->hva);
3199#endif
3200
3201	if (dirty)
3202		kvm_vcpu_mark_page_dirty(vcpu, map->gfn);
3203
3204	kvm_release_pfn(map->pfn, dirty);
3205
3206	map->hva = NULL;
3207	map->page = NULL;
3208}
3209EXPORT_SYMBOL_GPL(kvm_vcpu_unmap);
3210
3211static bool kvm_is_ad_tracked_page(struct page *page)
3212{
3213	/*
3214	 * Per page-flags.h, pages tagged PG_reserved "should in general not be
3215	 * touched (e.g. set dirty) except by its owner".
3216	 */
3217	return !PageReserved(page);
3218}
3219
3220static void kvm_set_page_dirty(struct page *page)
3221{
3222	if (kvm_is_ad_tracked_page(page))
3223		SetPageDirty(page);
3224}
3225
3226static void kvm_set_page_accessed(struct page *page)
3227{
3228	if (kvm_is_ad_tracked_page(page))
3229		mark_page_accessed(page);
3230}
3231
3232void kvm_release_page_clean(struct page *page)
3233{
3234	WARN_ON(is_error_page(page));
3235
3236	kvm_set_page_accessed(page);
3237	put_page(page);
3238}
3239EXPORT_SYMBOL_GPL(kvm_release_page_clean);
3240
3241void kvm_release_pfn_clean(kvm_pfn_t pfn)
3242{
3243	struct page *page;
3244
3245	if (is_error_noslot_pfn(pfn))
3246		return;
3247
3248	page = kvm_pfn_to_refcounted_page(pfn);
3249	if (!page)
3250		return;
3251
3252	kvm_release_page_clean(page);
3253}
3254EXPORT_SYMBOL_GPL(kvm_release_pfn_clean);
3255
3256void kvm_release_page_dirty(struct page *page)
3257{
3258	WARN_ON(is_error_page(page));
3259
3260	kvm_set_page_dirty(page);
3261	kvm_release_page_clean(page);
3262}
3263EXPORT_SYMBOL_GPL(kvm_release_page_dirty);
3264
3265void kvm_release_pfn_dirty(kvm_pfn_t pfn)
3266{
3267	struct page *page;
3268
3269	if (is_error_noslot_pfn(pfn))
3270		return;
3271
3272	page = kvm_pfn_to_refcounted_page(pfn);
3273	if (!page)
3274		return;
3275
3276	kvm_release_page_dirty(page);
3277}
3278EXPORT_SYMBOL_GPL(kvm_release_pfn_dirty);
3279
3280/*
3281 * Note, checking for an error/noslot pfn is the caller's responsibility when
3282 * directly marking a page dirty/accessed.  Unlike the "release" helpers, the
3283 * "set" helpers are not to be used when the pfn might point at garbage.
3284 */
3285void kvm_set_pfn_dirty(kvm_pfn_t pfn)
3286{
3287	if (WARN_ON(is_error_noslot_pfn(pfn)))
3288		return;
 
3289
3290	if (pfn_valid(pfn))
3291		kvm_set_page_dirty(pfn_to_page(pfn));
 
 
 
 
 
3292}
3293EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty);
3294
3295void kvm_set_pfn_accessed(kvm_pfn_t pfn)
3296{
3297	if (WARN_ON(is_error_noslot_pfn(pfn)))
3298		return;
 
 
3299
3300	if (pfn_valid(pfn))
3301		kvm_set_page_accessed(pfn_to_page(pfn));
 
 
3302}
3303EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed);
3304
3305static int next_segment(unsigned long len, int offset)
3306{
3307	if (len > PAGE_SIZE - offset)
3308		return PAGE_SIZE - offset;
3309	else
3310		return len;
3311}
3312
3313static int __kvm_read_guest_page(struct kvm_memory_slot *slot, gfn_t gfn,
3314				 void *data, int offset, int len)
3315{
3316	int r;
3317	unsigned long addr;
3318
3319	addr = gfn_to_hva_memslot_prot(slot, gfn, NULL);
3320	if (kvm_is_error_hva(addr))
3321		return -EFAULT;
3322	r = __copy_from_user(data, (void __user *)addr + offset, len);
3323	if (r)
3324		return -EFAULT;
3325	return 0;
3326}
3327
3328int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
3329			int len)
3330{
3331	struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
3332
3333	return __kvm_read_guest_page(slot, gfn, data, offset, len);
3334}
3335EXPORT_SYMBOL_GPL(kvm_read_guest_page);
3336
3337int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data,
3338			     int offset, int len)
3339{
3340	struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
3341
3342	return __kvm_read_guest_page(slot, gfn, data, offset, len);
3343}
3344EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest_page);
3345
3346int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len)
3347{
3348	gfn_t gfn = gpa >> PAGE_SHIFT;
3349	int seg;
3350	int offset = offset_in_page(gpa);
3351	int ret;
3352
3353	while ((seg = next_segment(len, offset)) != 0) {
3354		ret = kvm_read_guest_page(kvm, gfn, data, offset, seg);
3355		if (ret < 0)
3356			return ret;
3357		offset = 0;
3358		len -= seg;
3359		data += seg;
3360		++gfn;
3361	}
3362	return 0;
3363}
3364EXPORT_SYMBOL_GPL(kvm_read_guest);
3365
3366int kvm_vcpu_read_guest(struct kvm_vcpu *vcpu, gpa_t gpa, void *data, unsigned long len)
 
3367{
 
 
3368	gfn_t gfn = gpa >> PAGE_SHIFT;
3369	int seg;
3370	int offset = offset_in_page(gpa);
3371	int ret;
3372
3373	while ((seg = next_segment(len, offset)) != 0) {
3374		ret = kvm_vcpu_read_guest_page(vcpu, gfn, data, offset, seg);
3375		if (ret < 0)
3376			return ret;
3377		offset = 0;
3378		len -= seg;
3379		data += seg;
3380		++gfn;
3381	}
3382	return 0;
3383}
3384EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest);
3385
3386static int __kvm_read_guest_atomic(struct kvm_memory_slot *slot, gfn_t gfn,
3387			           void *data, int offset, unsigned long len)
3388{
3389	int r;
3390	unsigned long addr;
3391
3392	addr = gfn_to_hva_memslot_prot(slot, gfn, NULL);
3393	if (kvm_is_error_hva(addr))
3394		return -EFAULT;
3395	pagefault_disable();
3396	r = __copy_from_user_inatomic(data, (void __user *)addr + offset, len);
3397	pagefault_enable();
3398	if (r)
3399		return -EFAULT;
3400	return 0;
3401}
 
3402
3403int kvm_vcpu_read_guest_atomic(struct kvm_vcpu *vcpu, gpa_t gpa,
3404			       void *data, unsigned long len)
3405{
3406	gfn_t gfn = gpa >> PAGE_SHIFT;
3407	struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
3408	int offset = offset_in_page(gpa);
3409
3410	return __kvm_read_guest_atomic(slot, gfn, data, offset, len);
3411}
3412EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest_atomic);
3413
3414static int __kvm_write_guest_page(struct kvm *kvm,
3415				  struct kvm_memory_slot *memslot, gfn_t gfn,
3416			          const void *data, int offset, int len)
3417{
3418	int r;
3419	unsigned long addr;
3420
3421	addr = gfn_to_hva_memslot(memslot, gfn);
3422	if (kvm_is_error_hva(addr))
3423		return -EFAULT;
3424	r = __copy_to_user((void __user *)addr + offset, data, len);
3425	if (r)
3426		return -EFAULT;
3427	mark_page_dirty_in_slot(kvm, memslot, gfn);
3428	return 0;
3429}
3430
3431int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn,
3432			 const void *data, int offset, int len)
3433{
3434	struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
3435
3436	return __kvm_write_guest_page(kvm, slot, gfn, data, offset, len);
3437}
3438EXPORT_SYMBOL_GPL(kvm_write_guest_page);
3439
3440int kvm_vcpu_write_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn,
3441			      const void *data, int offset, int len)
3442{
3443	struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
3444
3445	return __kvm_write_guest_page(vcpu->kvm, slot, gfn, data, offset, len);
3446}
3447EXPORT_SYMBOL_GPL(kvm_vcpu_write_guest_page);
3448
3449int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
3450		    unsigned long len)
3451{
3452	gfn_t gfn = gpa >> PAGE_SHIFT;
3453	int seg;
3454	int offset = offset_in_page(gpa);
3455	int ret;
3456
3457	while ((seg = next_segment(len, offset)) != 0) {
3458		ret = kvm_write_guest_page(kvm, gfn, data, offset, seg);
3459		if (ret < 0)
3460			return ret;
3461		offset = 0;
3462		len -= seg;
3463		data += seg;
3464		++gfn;
3465	}
3466	return 0;
3467}
3468EXPORT_SYMBOL_GPL(kvm_write_guest);
3469
3470int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data,
3471		         unsigned long len)
3472{
 
 
3473	gfn_t gfn = gpa >> PAGE_SHIFT;
3474	int seg;
3475	int offset = offset_in_page(gpa);
3476	int ret;
3477
3478	while ((seg = next_segment(len, offset)) != 0) {
3479		ret = kvm_vcpu_write_guest_page(vcpu, gfn, data, offset, seg);
3480		if (ret < 0)
3481			return ret;
3482		offset = 0;
3483		len -= seg;
3484		data += seg;
3485		++gfn;
3486	}
3487	return 0;
3488}
3489EXPORT_SYMBOL_GPL(kvm_vcpu_write_guest);
3490
3491static int __kvm_gfn_to_hva_cache_init(struct kvm_memslots *slots,
3492				       struct gfn_to_hva_cache *ghc,
3493				       gpa_t gpa, unsigned long len)
3494{
3495	int offset = offset_in_page(gpa);
3496	gfn_t start_gfn = gpa >> PAGE_SHIFT;
3497	gfn_t end_gfn = (gpa + len - 1) >> PAGE_SHIFT;
3498	gfn_t nr_pages_needed = end_gfn - start_gfn + 1;
3499	gfn_t nr_pages_avail;
3500
3501	/* Update ghc->generation before performing any error checks. */
3502	ghc->generation = slots->generation;
3503
3504	if (start_gfn > end_gfn) {
3505		ghc->hva = KVM_HVA_ERR_BAD;
3506		return -EINVAL;
3507	}
3508
3509	/*
3510	 * If the requested region crosses two memslots, we still
3511	 * verify that the entire region is valid here.
3512	 */
3513	for ( ; start_gfn <= end_gfn; start_gfn += nr_pages_avail) {
3514		ghc->memslot = __gfn_to_memslot(slots, start_gfn);
3515		ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn,
3516					   &nr_pages_avail);
3517		if (kvm_is_error_hva(ghc->hva))
3518			return -EFAULT;
3519	}
3520
3521	/* Use the slow path for cross page reads and writes. */
3522	if (nr_pages_needed == 1)
3523		ghc->hva += offset;
3524	else
3525		ghc->memslot = NULL;
3526
3527	ghc->gpa = gpa;
3528	ghc->len = len;
3529	return 0;
3530}
3531
3532int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
3533			      gpa_t gpa, unsigned long len)
3534{
3535	struct kvm_memslots *slots = kvm_memslots(kvm);
3536	return __kvm_gfn_to_hva_cache_init(slots, ghc, gpa, len);
3537}
3538EXPORT_SYMBOL_GPL(kvm_gfn_to_hva_cache_init);
3539
3540int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
3541				  void *data, unsigned int offset,
3542				  unsigned long len)
3543{
3544	struct kvm_memslots *slots = kvm_memslots(kvm);
3545	int r;
3546	gpa_t gpa = ghc->gpa + offset;
3547
3548	if (WARN_ON_ONCE(len + offset > ghc->len))
3549		return -EINVAL;
3550
3551	if (slots->generation != ghc->generation) {
3552		if (__kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len))
3553			return -EFAULT;
3554	}
3555
3556	if (kvm_is_error_hva(ghc->hva))
3557		return -EFAULT;
3558
3559	if (unlikely(!ghc->memslot))
3560		return kvm_write_guest(kvm, gpa, data, len);
3561
3562	r = __copy_to_user((void __user *)ghc->hva + offset, data, len);
3563	if (r)
3564		return -EFAULT;
3565	mark_page_dirty_in_slot(kvm, ghc->memslot, gpa >> PAGE_SHIFT);
3566
3567	return 0;
3568}
3569EXPORT_SYMBOL_GPL(kvm_write_guest_offset_cached);
3570
3571int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
3572			   void *data, unsigned long len)
3573{
3574	return kvm_write_guest_offset_cached(kvm, ghc, data, 0, len);
3575}
3576EXPORT_SYMBOL_GPL(kvm_write_guest_cached);
3577
3578int kvm_read_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
3579				 void *data, unsigned int offset,
3580				 unsigned long len)
3581{
3582	struct kvm_memslots *slots = kvm_memslots(kvm);
3583	int r;
3584	gpa_t gpa = ghc->gpa + offset;
3585
3586	if (WARN_ON_ONCE(len + offset > ghc->len))
3587		return -EINVAL;
3588
3589	if (slots->generation != ghc->generation) {
3590		if (__kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len))
3591			return -EFAULT;
3592	}
3593
3594	if (kvm_is_error_hva(ghc->hva))
3595		return -EFAULT;
3596
3597	if (unlikely(!ghc->memslot))
3598		return kvm_read_guest(kvm, gpa, data, len);
3599
3600	r = __copy_from_user(data, (void __user *)ghc->hva + offset, len);
3601	if (r)
3602		return -EFAULT;
3603
3604	return 0;
3605}
3606EXPORT_SYMBOL_GPL(kvm_read_guest_offset_cached);
3607
3608int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
3609			  void *data, unsigned long len)
3610{
3611	return kvm_read_guest_offset_cached(kvm, ghc, data, 0, len);
 
3612}
3613EXPORT_SYMBOL_GPL(kvm_read_guest_cached);
3614
3615int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len)
3616{
3617	const void *zero_page = (const void *) __va(page_to_phys(ZERO_PAGE(0)));
3618	gfn_t gfn = gpa >> PAGE_SHIFT;
3619	int seg;
3620	int offset = offset_in_page(gpa);
3621	int ret;
3622
3623	while ((seg = next_segment(len, offset)) != 0) {
3624		ret = kvm_write_guest_page(kvm, gfn, zero_page, offset, len);
3625		if (ret < 0)
3626			return ret;
3627		offset = 0;
3628		len -= seg;
3629		++gfn;
3630	}
3631	return 0;
3632}
3633EXPORT_SYMBOL_GPL(kvm_clear_guest);
3634
3635void mark_page_dirty_in_slot(struct kvm *kvm,
3636			     const struct kvm_memory_slot *memslot,
3637		 	     gfn_t gfn)
3638{
3639	struct kvm_vcpu *vcpu = kvm_get_running_vcpu();
3640
3641#ifdef CONFIG_HAVE_KVM_DIRTY_RING
3642	if (WARN_ON_ONCE(vcpu && vcpu->kvm != kvm))
3643		return;
3644
3645	WARN_ON_ONCE(!vcpu && !kvm_arch_allow_write_without_running_vcpu(kvm));
3646#endif
3647
3648	if (memslot && kvm_slot_dirty_track_enabled(memslot)) {
3649		unsigned long rel_gfn = gfn - memslot->base_gfn;
3650		u32 slot = (memslot->as_id << 16) | memslot->id;
3651
3652		if (kvm->dirty_ring_size && vcpu)
3653			kvm_dirty_ring_push(vcpu, slot, rel_gfn);
3654		else if (memslot->dirty_bitmap)
3655			set_bit_le(rel_gfn, memslot->dirty_bitmap);
3656	}
3657}
3658EXPORT_SYMBOL_GPL(mark_page_dirty_in_slot);
3659
3660void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
3661{
3662	struct kvm_memory_slot *memslot;
3663
3664	memslot = gfn_to_memslot(kvm, gfn);
3665	mark_page_dirty_in_slot(kvm, memslot, gfn);
3666}
3667EXPORT_SYMBOL_GPL(mark_page_dirty);
3668
3669void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn)
3670{
3671	struct kvm_memory_slot *memslot;
3672
3673	memslot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
3674	mark_page_dirty_in_slot(vcpu->kvm, memslot, gfn);
3675}
3676EXPORT_SYMBOL_GPL(kvm_vcpu_mark_page_dirty);
3677
3678void kvm_sigset_activate(struct kvm_vcpu *vcpu)
3679{
3680	if (!vcpu->sigset_active)
3681		return;
3682
3683	/*
3684	 * This does a lockless modification of ->real_blocked, which is fine
3685	 * because, only current can change ->real_blocked and all readers of
3686	 * ->real_blocked don't care as long ->real_blocked is always a subset
3687	 * of ->blocked.
3688	 */
3689	sigprocmask(SIG_SETMASK, &vcpu->sigset, &current->real_blocked);
3690}
3691
3692void kvm_sigset_deactivate(struct kvm_vcpu *vcpu)
3693{
3694	if (!vcpu->sigset_active)
3695		return;
3696
3697	sigprocmask(SIG_SETMASK, &current->real_blocked, NULL);
3698	sigemptyset(&current->real_blocked);
3699}
3700
3701static void grow_halt_poll_ns(struct kvm_vcpu *vcpu)
3702{
3703	unsigned int old, val, grow, grow_start;
3704
3705	old = val = vcpu->halt_poll_ns;
3706	grow_start = READ_ONCE(halt_poll_ns_grow_start);
3707	grow = READ_ONCE(halt_poll_ns_grow);
3708	if (!grow)
3709		goto out;
3710
3711	val *= grow;
3712	if (val < grow_start)
3713		val = grow_start;
3714
3715	vcpu->halt_poll_ns = val;
3716out:
3717	trace_kvm_halt_poll_ns_grow(vcpu->vcpu_id, val, old);
3718}
3719
3720static void shrink_halt_poll_ns(struct kvm_vcpu *vcpu)
3721{
3722	unsigned int old, val, shrink, grow_start;
3723
3724	old = val = vcpu->halt_poll_ns;
3725	shrink = READ_ONCE(halt_poll_ns_shrink);
3726	grow_start = READ_ONCE(halt_poll_ns_grow_start);
3727	if (shrink == 0)
3728		val = 0;
3729	else
3730		val /= shrink;
3731
3732	if (val < grow_start)
3733		val = 0;
3734
3735	vcpu->halt_poll_ns = val;
3736	trace_kvm_halt_poll_ns_shrink(vcpu->vcpu_id, val, old);
3737}
3738
3739static int kvm_vcpu_check_block(struct kvm_vcpu *vcpu)
3740{
3741	int ret = -EINTR;
3742	int idx = srcu_read_lock(&vcpu->kvm->srcu);
3743
3744	if (kvm_arch_vcpu_runnable(vcpu))
3745		goto out;
3746	if (kvm_cpu_has_pending_timer(vcpu))
3747		goto out;
3748	if (signal_pending(current))
3749		goto out;
3750	if (kvm_check_request(KVM_REQ_UNBLOCK, vcpu))
3751		goto out;
3752
3753	ret = 0;
3754out:
3755	srcu_read_unlock(&vcpu->kvm->srcu, idx);
3756	return ret;
3757}
3758
3759/*
3760 * Block the vCPU until the vCPU is runnable, an event arrives, or a signal is
3761 * pending.  This is mostly used when halting a vCPU, but may also be used
3762 * directly for other vCPU non-runnable states, e.g. x86's Wait-For-SIPI.
3763 */
3764bool kvm_vcpu_block(struct kvm_vcpu *vcpu)
3765{
3766	struct rcuwait *wait = kvm_arch_vcpu_get_wait(vcpu);
3767	bool waited = false;
3768
3769	vcpu->stat.generic.blocking = 1;
3770
3771	preempt_disable();
3772	kvm_arch_vcpu_blocking(vcpu);
3773	prepare_to_rcuwait(wait);
3774	preempt_enable();
3775
3776	for (;;) {
3777		set_current_state(TASK_INTERRUPTIBLE);
3778
3779		if (kvm_vcpu_check_block(vcpu) < 0)
 
 
 
 
 
 
3780			break;
3781
3782		waited = true;
3783		schedule();
3784	}
3785
3786	preempt_disable();
3787	finish_rcuwait(wait);
3788	kvm_arch_vcpu_unblocking(vcpu);
3789	preempt_enable();
3790
3791	vcpu->stat.generic.blocking = 0;
3792
3793	return waited;
3794}
3795
3796static inline void update_halt_poll_stats(struct kvm_vcpu *vcpu, ktime_t start,
3797					  ktime_t end, bool success)
3798{
3799	struct kvm_vcpu_stat_generic *stats = &vcpu->stat.generic;
3800	u64 poll_ns = ktime_to_ns(ktime_sub(end, start));
3801
3802	++vcpu->stat.generic.halt_attempted_poll;
3803
3804	if (success) {
3805		++vcpu->stat.generic.halt_successful_poll;
3806
3807		if (!vcpu_valid_wakeup(vcpu))
3808			++vcpu->stat.generic.halt_poll_invalid;
3809
3810		stats->halt_poll_success_ns += poll_ns;
3811		KVM_STATS_LOG_HIST_UPDATE(stats->halt_poll_success_hist, poll_ns);
3812	} else {
3813		stats->halt_poll_fail_ns += poll_ns;
3814		KVM_STATS_LOG_HIST_UPDATE(stats->halt_poll_fail_hist, poll_ns);
3815	}
3816}
3817
3818static unsigned int kvm_vcpu_max_halt_poll_ns(struct kvm_vcpu *vcpu)
3819{
3820	struct kvm *kvm = vcpu->kvm;
3821
3822	if (kvm->override_halt_poll_ns) {
3823		/*
3824		 * Ensure kvm->max_halt_poll_ns is not read before
3825		 * kvm->override_halt_poll_ns.
3826		 *
3827		 * Pairs with the smp_wmb() when enabling KVM_CAP_HALT_POLL.
3828		 */
3829		smp_rmb();
3830		return READ_ONCE(kvm->max_halt_poll_ns);
3831	}
3832
3833	return READ_ONCE(halt_poll_ns);
3834}
3835
3836/*
3837 * Emulate a vCPU halt condition, e.g. HLT on x86, WFI on arm, etc...  If halt
3838 * polling is enabled, busy wait for a short time before blocking to avoid the
3839 * expensive block+unblock sequence if a wake event arrives soon after the vCPU
3840 * is halted.
3841 */
3842void kvm_vcpu_halt(struct kvm_vcpu *vcpu)
3843{
3844	unsigned int max_halt_poll_ns = kvm_vcpu_max_halt_poll_ns(vcpu);
3845	bool halt_poll_allowed = !kvm_arch_no_poll(vcpu);
3846	ktime_t start, cur, poll_end;
3847	bool waited = false;
3848	bool do_halt_poll;
3849	u64 halt_ns;
3850
3851	if (vcpu->halt_poll_ns > max_halt_poll_ns)
3852		vcpu->halt_poll_ns = max_halt_poll_ns;
3853
3854	do_halt_poll = halt_poll_allowed && vcpu->halt_poll_ns;
3855
3856	start = cur = poll_end = ktime_get();
3857	if (do_halt_poll) {
3858		ktime_t stop = ktime_add_ns(start, vcpu->halt_poll_ns);
3859
3860		do {
3861			if (kvm_vcpu_check_block(vcpu) < 0)
3862				goto out;
3863			cpu_relax();
3864			poll_end = cur = ktime_get();
3865		} while (kvm_vcpu_can_poll(cur, stop));
3866	}
3867
3868	waited = kvm_vcpu_block(vcpu);
3869
3870	cur = ktime_get();
3871	if (waited) {
3872		vcpu->stat.generic.halt_wait_ns +=
3873			ktime_to_ns(cur) - ktime_to_ns(poll_end);
3874		KVM_STATS_LOG_HIST_UPDATE(vcpu->stat.generic.halt_wait_hist,
3875				ktime_to_ns(cur) - ktime_to_ns(poll_end));
3876	}
3877out:
3878	/* The total time the vCPU was "halted", including polling time. */
3879	halt_ns = ktime_to_ns(cur) - ktime_to_ns(start);
3880
3881	/*
3882	 * Note, halt-polling is considered successful so long as the vCPU was
3883	 * never actually scheduled out, i.e. even if the wake event arrived
3884	 * after of the halt-polling loop itself, but before the full wait.
3885	 */
3886	if (do_halt_poll)
3887		update_halt_poll_stats(vcpu, start, poll_end, !waited);
3888
3889	if (halt_poll_allowed) {
3890		/* Recompute the max halt poll time in case it changed. */
3891		max_halt_poll_ns = kvm_vcpu_max_halt_poll_ns(vcpu);
3892
3893		if (!vcpu_valid_wakeup(vcpu)) {
3894			shrink_halt_poll_ns(vcpu);
3895		} else if (max_halt_poll_ns) {
3896			if (halt_ns <= vcpu->halt_poll_ns)
3897				;
3898			/* we had a long block, shrink polling */
3899			else if (vcpu->halt_poll_ns &&
3900				 halt_ns > max_halt_poll_ns)
3901				shrink_halt_poll_ns(vcpu);
3902			/* we had a short halt and our poll time is too small */
3903			else if (vcpu->halt_poll_ns < max_halt_poll_ns &&
3904				 halt_ns < max_halt_poll_ns)
3905				grow_halt_poll_ns(vcpu);
3906		} else {
3907			vcpu->halt_poll_ns = 0;
3908		}
3909	}
3910
3911	trace_kvm_vcpu_wakeup(halt_ns, waited, vcpu_valid_wakeup(vcpu));
3912}
3913EXPORT_SYMBOL_GPL(kvm_vcpu_halt);
3914
3915bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu)
3916{
3917	if (__kvm_vcpu_wake_up(vcpu)) {
3918		WRITE_ONCE(vcpu->ready, true);
3919		++vcpu->stat.generic.halt_wakeup;
3920		return true;
3921	}
3922
3923	return false;
3924}
3925EXPORT_SYMBOL_GPL(kvm_vcpu_wake_up);
3926
3927#ifndef CONFIG_S390
3928/*
3929 * Kick a sleeping VCPU, or a guest VCPU in guest mode, into host kernel mode.
3930 */
3931void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
3932{
3933	int me, cpu;
 
 
3934
3935	if (kvm_vcpu_wake_up(vcpu))
3936		return;
 
 
 
3937
3938	me = get_cpu();
3939	/*
3940	 * The only state change done outside the vcpu mutex is IN_GUEST_MODE
3941	 * to EXITING_GUEST_MODE.  Therefore the moderately expensive "should
3942	 * kick" check does not need atomic operations if kvm_vcpu_kick is used
3943	 * within the vCPU thread itself.
3944	 */
3945	if (vcpu == __this_cpu_read(kvm_running_vcpu)) {
3946		if (vcpu->mode == IN_GUEST_MODE)
3947			WRITE_ONCE(vcpu->mode, EXITING_GUEST_MODE);
3948		goto out;
3949	}
3950
3951	/*
3952	 * Note, the vCPU could get migrated to a different pCPU at any point
3953	 * after kvm_arch_vcpu_should_kick(), which could result in sending an
3954	 * IPI to the previous pCPU.  But, that's ok because the purpose of the
3955	 * IPI is to force the vCPU to leave IN_GUEST_MODE, and migrating the
3956	 * vCPU also requires it to leave IN_GUEST_MODE.
3957	 */
3958	if (kvm_arch_vcpu_should_kick(vcpu)) {
3959		cpu = READ_ONCE(vcpu->cpu);
3960		if (cpu != me && (unsigned)cpu < nr_cpu_ids && cpu_online(cpu))
3961			smp_send_reschedule(cpu);
3962	}
3963out:
3964	put_cpu();
3965}
3966EXPORT_SYMBOL_GPL(kvm_vcpu_kick);
3967#endif /* !CONFIG_S390 */
3968
3969int kvm_vcpu_yield_to(struct kvm_vcpu *target)
 
 
 
 
 
 
 
 
3970{
3971	struct pid *pid;
3972	struct task_struct *task = NULL;
3973	int ret = 0;
3974
3975	rcu_read_lock();
3976	pid = rcu_dereference(target->pid);
3977	if (pid)
3978		task = get_pid_task(pid, PIDTYPE_PID);
3979	rcu_read_unlock();
3980	if (!task)
3981		return ret;
3982	ret = yield_to(task, 1);
 
 
 
 
 
 
 
3983	put_task_struct(task);
3984
3985	return ret;
3986}
3987EXPORT_SYMBOL_GPL(kvm_vcpu_yield_to);
3988
3989/*
3990 * Helper that checks whether a VCPU is eligible for directed yield.
3991 * Most eligible candidate to yield is decided by following heuristics:
3992 *
3993 *  (a) VCPU which has not done pl-exit or cpu relax intercepted recently
3994 *  (preempted lock holder), indicated by @in_spin_loop.
3995 *  Set at the beginning and cleared at the end of interception/PLE handler.
3996 *
3997 *  (b) VCPU which has done pl-exit/ cpu relax intercepted but did not get
3998 *  chance last time (mostly it has become eligible now since we have probably
3999 *  yielded to lockholder in last iteration. This is done by toggling
4000 *  @dy_eligible each time a VCPU checked for eligibility.)
4001 *
4002 *  Yielding to a recently pl-exited/cpu relax intercepted VCPU before yielding
4003 *  to preempted lock-holder could result in wrong VCPU selection and CPU
4004 *  burning. Giving priority for a potential lock-holder increases lock
4005 *  progress.
4006 *
4007 *  Since algorithm is based on heuristics, accessing another VCPU data without
4008 *  locking does not harm. It may result in trying to yield to  same VCPU, fail
4009 *  and continue with next VCPU and so on.
4010 */
4011static bool kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu *vcpu)
4012{
4013#ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT
4014	bool eligible;
4015
4016	eligible = !vcpu->spin_loop.in_spin_loop ||
4017		    vcpu->spin_loop.dy_eligible;
4018
4019	if (vcpu->spin_loop.in_spin_loop)
4020		kvm_vcpu_set_dy_eligible(vcpu, !vcpu->spin_loop.dy_eligible);
4021
4022	return eligible;
4023#else
4024	return true;
4025#endif
4026}
4027
4028/*
4029 * Unlike kvm_arch_vcpu_runnable, this function is called outside
4030 * a vcpu_load/vcpu_put pair.  However, for most architectures
4031 * kvm_arch_vcpu_runnable does not require vcpu_load.
4032 */
4033bool __weak kvm_arch_dy_runnable(struct kvm_vcpu *vcpu)
4034{
4035	return kvm_arch_vcpu_runnable(vcpu);
4036}
4037
4038static bool vcpu_dy_runnable(struct kvm_vcpu *vcpu)
4039{
4040	if (kvm_arch_dy_runnable(vcpu))
4041		return true;
4042
4043#ifdef CONFIG_KVM_ASYNC_PF
4044	if (!list_empty_careful(&vcpu->async_pf.done))
4045		return true;
4046#endif
4047
4048	return false;
4049}
4050
4051bool __weak kvm_arch_dy_has_pending_interrupt(struct kvm_vcpu *vcpu)
4052{
4053	return false;
4054}
4055
4056void kvm_vcpu_on_spin(struct kvm_vcpu *me, bool yield_to_kernel_mode)
4057{
4058	struct kvm *kvm = me->kvm;
4059	struct kvm_vcpu *vcpu;
4060	int last_boosted_vcpu = me->kvm->last_boosted_vcpu;
4061	unsigned long i;
4062	int yielded = 0;
4063	int try = 3;
4064	int pass;
 
4065
4066	kvm_vcpu_set_in_spin_loop(me, true);
4067	/*
4068	 * We boost the priority of a VCPU that is runnable but not
4069	 * currently running, because it got preempted by something
4070	 * else and called schedule in __vcpu_run.  Hopefully that
4071	 * VCPU is holding the lock that we need and will release it.
4072	 * We approximate round-robin by starting at the last boosted VCPU.
4073	 */
4074	for (pass = 0; pass < 2 && !yielded && try; pass++) {
4075		kvm_for_each_vcpu(i, vcpu, kvm) {
4076			if (!pass && i <= last_boosted_vcpu) {
4077				i = last_boosted_vcpu;
4078				continue;
4079			} else if (pass && i > last_boosted_vcpu)
4080				break;
4081			if (!READ_ONCE(vcpu->ready))
4082				continue;
4083			if (vcpu == me)
4084				continue;
4085			if (kvm_vcpu_is_blocking(vcpu) && !vcpu_dy_runnable(vcpu))
4086				continue;
4087			if (READ_ONCE(vcpu->preempted) && yield_to_kernel_mode &&
4088			    !kvm_arch_dy_has_pending_interrupt(vcpu) &&
4089			    !kvm_arch_vcpu_in_kernel(vcpu))
4090				continue;
4091			if (!kvm_vcpu_eligible_for_directed_yield(vcpu))
4092				continue;
4093
4094			yielded = kvm_vcpu_yield_to(vcpu);
4095			if (yielded > 0) {
4096				kvm->last_boosted_vcpu = i;
 
4097				break;
4098			} else if (yielded < 0) {
4099				try--;
4100				if (!try)
4101					break;
4102			}
4103		}
4104	}
4105	kvm_vcpu_set_in_spin_loop(me, false);
4106
4107	/* Ensure vcpu is not eligible during next spinloop */
4108	kvm_vcpu_set_dy_eligible(me, false);
4109}
4110EXPORT_SYMBOL_GPL(kvm_vcpu_on_spin);
4111
4112static bool kvm_page_in_dirty_ring(struct kvm *kvm, unsigned long pgoff)
4113{
4114#ifdef CONFIG_HAVE_KVM_DIRTY_RING
4115	return (pgoff >= KVM_DIRTY_LOG_PAGE_OFFSET) &&
4116	    (pgoff < KVM_DIRTY_LOG_PAGE_OFFSET +
4117	     kvm->dirty_ring_size / PAGE_SIZE);
4118#else
4119	return false;
4120#endif
4121}
4122
4123static vm_fault_t kvm_vcpu_fault(struct vm_fault *vmf)
4124{
4125	struct kvm_vcpu *vcpu = vmf->vma->vm_file->private_data;
4126	struct page *page;
4127
4128	if (vmf->pgoff == 0)
4129		page = virt_to_page(vcpu->run);
4130#ifdef CONFIG_X86
4131	else if (vmf->pgoff == KVM_PIO_PAGE_OFFSET)
4132		page = virt_to_page(vcpu->arch.pio_data);
4133#endif
4134#ifdef CONFIG_KVM_MMIO
4135	else if (vmf->pgoff == KVM_COALESCED_MMIO_PAGE_OFFSET)
4136		page = virt_to_page(vcpu->kvm->coalesced_mmio_ring);
4137#endif
4138	else if (kvm_page_in_dirty_ring(vcpu->kvm, vmf->pgoff))
4139		page = kvm_dirty_ring_get_page(
4140		    &vcpu->dirty_ring,
4141		    vmf->pgoff - KVM_DIRTY_LOG_PAGE_OFFSET);
4142	else
4143		return kvm_arch_vcpu_fault(vcpu, vmf);
4144	get_page(page);
4145	vmf->page = page;
4146	return 0;
4147}
4148
4149static const struct vm_operations_struct kvm_vcpu_vm_ops = {
4150	.fault = kvm_vcpu_fault,
4151};
4152
4153static int kvm_vcpu_mmap(struct file *file, struct vm_area_struct *vma)
4154{
4155	struct kvm_vcpu *vcpu = file->private_data;
4156	unsigned long pages = vma_pages(vma);
4157
4158	if ((kvm_page_in_dirty_ring(vcpu->kvm, vma->vm_pgoff) ||
4159	     kvm_page_in_dirty_ring(vcpu->kvm, vma->vm_pgoff + pages - 1)) &&
4160	    ((vma->vm_flags & VM_EXEC) || !(vma->vm_flags & VM_SHARED)))
4161		return -EINVAL;
4162
4163	vma->vm_ops = &kvm_vcpu_vm_ops;
4164	return 0;
4165}
4166
4167static int kvm_vcpu_release(struct inode *inode, struct file *filp)
4168{
4169	struct kvm_vcpu *vcpu = filp->private_data;
4170
4171	kvm_put_kvm(vcpu->kvm);
4172	return 0;
4173}
4174
4175static struct file_operations kvm_vcpu_fops = {
4176	.release        = kvm_vcpu_release,
4177	.unlocked_ioctl = kvm_vcpu_ioctl,
 
 
 
4178	.mmap           = kvm_vcpu_mmap,
4179	.llseek		= noop_llseek,
4180	KVM_COMPAT(kvm_vcpu_compat_ioctl),
4181};
4182
4183/*
4184 * Allocates an inode for the vcpu.
4185 */
4186static int create_vcpu_fd(struct kvm_vcpu *vcpu)
4187{
4188	char name[8 + 1 + ITOA_MAX_LEN + 1];
4189
4190	snprintf(name, sizeof(name), "kvm-vcpu:%d", vcpu->vcpu_id);
4191	return anon_inode_getfd(name, &kvm_vcpu_fops, vcpu, O_RDWR | O_CLOEXEC);
4192}
4193
4194#ifdef __KVM_HAVE_ARCH_VCPU_DEBUGFS
4195static int vcpu_get_pid(void *data, u64 *val)
4196{
4197	struct kvm_vcpu *vcpu = data;
4198
4199	rcu_read_lock();
4200	*val = pid_nr(rcu_dereference(vcpu->pid));
4201	rcu_read_unlock();
4202	return 0;
4203}
4204
4205DEFINE_SIMPLE_ATTRIBUTE(vcpu_get_pid_fops, vcpu_get_pid, NULL, "%llu\n");
4206
4207static void kvm_create_vcpu_debugfs(struct kvm_vcpu *vcpu)
4208{
4209	struct dentry *debugfs_dentry;
4210	char dir_name[ITOA_MAX_LEN * 2];
4211
4212	if (!debugfs_initialized())
4213		return;
4214
4215	snprintf(dir_name, sizeof(dir_name), "vcpu%d", vcpu->vcpu_id);
4216	debugfs_dentry = debugfs_create_dir(dir_name,
4217					    vcpu->kvm->debugfs_dentry);
4218	debugfs_create_file("pid", 0444, debugfs_dentry, vcpu,
4219			    &vcpu_get_pid_fops);
4220
4221	kvm_arch_create_vcpu_debugfs(vcpu, debugfs_dentry);
4222}
4223#endif
4224
4225/*
4226 * Creates some virtual cpus.  Good luck creating more than one.
4227 */
4228static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
4229{
4230	int r;
4231	struct kvm_vcpu *vcpu;
4232	struct page *page;
4233
4234	if (id >= KVM_MAX_VCPU_IDS)
4235		return -EINVAL;
 
4236
4237	mutex_lock(&kvm->lock);
4238	if (kvm->created_vcpus >= kvm->max_vcpus) {
4239		mutex_unlock(&kvm->lock);
4240		return -EINVAL;
4241	}
4242
4243	r = kvm_arch_vcpu_precreate(kvm, id);
4244	if (r) {
4245		mutex_unlock(&kvm->lock);
4246		return r;
4247	}
4248
4249	kvm->created_vcpus++;
4250	mutex_unlock(&kvm->lock);
4251
4252	vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL_ACCOUNT);
4253	if (!vcpu) {
4254		r = -ENOMEM;
4255		goto vcpu_decrement;
4256	}
4257
4258	BUILD_BUG_ON(sizeof(struct kvm_run) > PAGE_SIZE);
4259	page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
4260	if (!page) {
4261		r = -ENOMEM;
4262		goto vcpu_free;
4263	}
4264	vcpu->run = page_address(page);
4265
4266	kvm_vcpu_init(vcpu, kvm, id);
4267
4268	r = kvm_arch_vcpu_create(vcpu);
4269	if (r)
4270		goto vcpu_free_run_page;
4271
4272	if (kvm->dirty_ring_size) {
4273		r = kvm_dirty_ring_alloc(&vcpu->dirty_ring,
4274					 id, kvm->dirty_ring_size);
4275		if (r)
4276			goto arch_vcpu_destroy;
4277	}
4278
4279	mutex_lock(&kvm->lock);
4280
4281#ifdef CONFIG_LOCKDEP
4282	/* Ensure that lockdep knows vcpu->mutex is taken *inside* kvm->lock */
4283	mutex_lock(&vcpu->mutex);
4284	mutex_unlock(&vcpu->mutex);
4285#endif
4286
4287	if (kvm_get_vcpu_by_id(kvm, id)) {
4288		r = -EEXIST;
4289		goto unlock_vcpu_destroy;
4290	}
4291
4292	vcpu->vcpu_idx = atomic_read(&kvm->online_vcpus);
4293	r = xa_reserve(&kvm->vcpu_array, vcpu->vcpu_idx, GFP_KERNEL_ACCOUNT);
4294	if (r)
4295		goto unlock_vcpu_destroy;
 
 
 
4296
4297	/* Now it's all set up, let userspace reach it */
4298	kvm_get_kvm(kvm);
4299	r = create_vcpu_fd(vcpu);
4300	if (r < 0)
4301		goto kvm_put_xa_release;
4302
4303	if (KVM_BUG_ON(xa_store(&kvm->vcpu_array, vcpu->vcpu_idx, vcpu, 0), kvm)) {
4304		r = -EINVAL;
4305		goto kvm_put_xa_release;
4306	}
4307
4308	/*
4309	 * Pairs with smp_rmb() in kvm_get_vcpu.  Store the vcpu
4310	 * pointer before kvm->online_vcpu's incremented value.
4311	 */
4312	smp_wmb();
4313	atomic_inc(&kvm->online_vcpus);
4314
4315	mutex_unlock(&kvm->lock);
4316	kvm_arch_vcpu_postcreate(vcpu);
4317	kvm_create_vcpu_debugfs(vcpu);
4318	return r;
4319
4320kvm_put_xa_release:
4321	kvm_put_kvm_no_destroy(kvm);
4322	xa_release(&kvm->vcpu_array, vcpu->vcpu_idx);
4323unlock_vcpu_destroy:
4324	mutex_unlock(&kvm->lock);
4325	kvm_dirty_ring_free(&vcpu->dirty_ring);
4326arch_vcpu_destroy:
4327	kvm_arch_vcpu_destroy(vcpu);
4328vcpu_free_run_page:
4329	free_page((unsigned long)vcpu->run);
4330vcpu_free:
4331	kmem_cache_free(kvm_vcpu_cache, vcpu);
4332vcpu_decrement:
4333	mutex_lock(&kvm->lock);
4334	kvm->created_vcpus--;
4335	mutex_unlock(&kvm->lock);
4336	return r;
4337}
4338
4339static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset)
4340{
4341	if (sigset) {
4342		sigdelsetmask(sigset, sigmask(SIGKILL)|sigmask(SIGSTOP));
4343		vcpu->sigset_active = 1;
4344		vcpu->sigset = *sigset;
4345	} else
4346		vcpu->sigset_active = 0;
4347	return 0;
4348}
4349
4350static ssize_t kvm_vcpu_stats_read(struct file *file, char __user *user_buffer,
4351			      size_t size, loff_t *offset)
4352{
4353	struct kvm_vcpu *vcpu = file->private_data;
4354
4355	return kvm_stats_read(vcpu->stats_id, &kvm_vcpu_stats_header,
4356			&kvm_vcpu_stats_desc[0], &vcpu->stat,
4357			sizeof(vcpu->stat), user_buffer, size, offset);
4358}
4359
4360static int kvm_vcpu_stats_release(struct inode *inode, struct file *file)
4361{
4362	struct kvm_vcpu *vcpu = file->private_data;
4363
4364	kvm_put_kvm(vcpu->kvm);
4365	return 0;
4366}
4367
4368static const struct file_operations kvm_vcpu_stats_fops = {
4369	.owner = THIS_MODULE,
4370	.read = kvm_vcpu_stats_read,
4371	.release = kvm_vcpu_stats_release,
4372	.llseek = noop_llseek,
4373};
4374
4375static int kvm_vcpu_ioctl_get_stats_fd(struct kvm_vcpu *vcpu)
4376{
4377	int fd;
4378	struct file *file;
4379	char name[15 + ITOA_MAX_LEN + 1];
4380
4381	snprintf(name, sizeof(name), "kvm-vcpu-stats:%d", vcpu->vcpu_id);
4382
4383	fd = get_unused_fd_flags(O_CLOEXEC);
4384	if (fd < 0)
4385		return fd;
4386
4387	file = anon_inode_getfile(name, &kvm_vcpu_stats_fops, vcpu, O_RDONLY);
4388	if (IS_ERR(file)) {
4389		put_unused_fd(fd);
4390		return PTR_ERR(file);
4391	}
4392
4393	kvm_get_kvm(vcpu->kvm);
4394
4395	file->f_mode |= FMODE_PREAD;
4396	fd_install(fd, file);
4397
4398	return fd;
4399}
4400
4401static long kvm_vcpu_ioctl(struct file *filp,
4402			   unsigned int ioctl, unsigned long arg)
4403{
4404	struct kvm_vcpu *vcpu = filp->private_data;
4405	void __user *argp = (void __user *)arg;
4406	int r;
4407	struct kvm_fpu *fpu = NULL;
4408	struct kvm_sregs *kvm_sregs = NULL;
4409
4410	if (vcpu->kvm->mm != current->mm || vcpu->kvm->vm_dead)
4411		return -EIO;
4412
4413	if (unlikely(_IOC_TYPE(ioctl) != KVMIO))
4414		return -EINVAL;
4415
4416	/*
4417	 * Some architectures have vcpu ioctls that are asynchronous to vcpu
4418	 * execution; mutex_lock() would break them.
4419	 */
4420	r = kvm_arch_vcpu_async_ioctl(filp, ioctl, arg);
4421	if (r != -ENOIOCTLCMD)
4422		return r;
 
4423
4424	if (mutex_lock_killable(&vcpu->mutex))
4425		return -EINTR;
4426	switch (ioctl) {
4427	case KVM_RUN: {
4428		struct pid *oldpid;
4429		r = -EINVAL;
4430		if (arg)
4431			goto out;
4432		oldpid = rcu_access_pointer(vcpu->pid);
4433		if (unlikely(oldpid != task_pid(current))) {
4434			/* The thread running this VCPU changed. */
4435			struct pid *newpid;
4436
4437			r = kvm_arch_vcpu_run_pid_change(vcpu);
4438			if (r)
4439				break;
4440
4441			newpid = get_task_pid(current, PIDTYPE_PID);
4442			rcu_assign_pointer(vcpu->pid, newpid);
4443			if (oldpid)
4444				synchronize_rcu();
4445			put_pid(oldpid);
4446		}
4447		r = kvm_arch_vcpu_ioctl_run(vcpu);
4448		trace_kvm_userspace_exit(vcpu->run->exit_reason, r);
4449		break;
4450	}
4451	case KVM_GET_REGS: {
4452		struct kvm_regs *kvm_regs;
4453
4454		r = -ENOMEM;
4455		kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL_ACCOUNT);
4456		if (!kvm_regs)
4457			goto out;
4458		r = kvm_arch_vcpu_ioctl_get_regs(vcpu, kvm_regs);
4459		if (r)
4460			goto out_free1;
4461		r = -EFAULT;
4462		if (copy_to_user(argp, kvm_regs, sizeof(struct kvm_regs)))
4463			goto out_free1;
4464		r = 0;
4465out_free1:
4466		kfree(kvm_regs);
4467		break;
4468	}
4469	case KVM_SET_REGS: {
4470		struct kvm_regs *kvm_regs;
4471
 
4472		kvm_regs = memdup_user(argp, sizeof(*kvm_regs));
4473		if (IS_ERR(kvm_regs)) {
4474			r = PTR_ERR(kvm_regs);
4475			goto out;
4476		}
4477		r = kvm_arch_vcpu_ioctl_set_regs(vcpu, kvm_regs);
 
 
 
 
4478		kfree(kvm_regs);
4479		break;
4480	}
4481	case KVM_GET_SREGS: {
4482		kvm_sregs = kzalloc(sizeof(struct kvm_sregs),
4483				    GFP_KERNEL_ACCOUNT);
4484		r = -ENOMEM;
4485		if (!kvm_sregs)
4486			goto out;
4487		r = kvm_arch_vcpu_ioctl_get_sregs(vcpu, kvm_sregs);
4488		if (r)
4489			goto out;
4490		r = -EFAULT;
4491		if (copy_to_user(argp, kvm_sregs, sizeof(struct kvm_sregs)))
4492			goto out;
4493		r = 0;
4494		break;
4495	}
4496	case KVM_SET_SREGS: {
4497		kvm_sregs = memdup_user(argp, sizeof(*kvm_sregs));
4498		if (IS_ERR(kvm_sregs)) {
4499			r = PTR_ERR(kvm_sregs);
4500			kvm_sregs = NULL;
4501			goto out;
4502		}
4503		r = kvm_arch_vcpu_ioctl_set_sregs(vcpu, kvm_sregs);
 
 
 
4504		break;
4505	}
4506	case KVM_GET_MP_STATE: {
4507		struct kvm_mp_state mp_state;
4508
4509		r = kvm_arch_vcpu_ioctl_get_mpstate(vcpu, &mp_state);
4510		if (r)
4511			goto out;
4512		r = -EFAULT;
4513		if (copy_to_user(argp, &mp_state, sizeof(mp_state)))
4514			goto out;
4515		r = 0;
4516		break;
4517	}
4518	case KVM_SET_MP_STATE: {
4519		struct kvm_mp_state mp_state;
4520
4521		r = -EFAULT;
4522		if (copy_from_user(&mp_state, argp, sizeof(mp_state)))
4523			goto out;
4524		r = kvm_arch_vcpu_ioctl_set_mpstate(vcpu, &mp_state);
 
 
 
4525		break;
4526	}
4527	case KVM_TRANSLATE: {
4528		struct kvm_translation tr;
4529
4530		r = -EFAULT;
4531		if (copy_from_user(&tr, argp, sizeof(tr)))
4532			goto out;
4533		r = kvm_arch_vcpu_ioctl_translate(vcpu, &tr);
4534		if (r)
4535			goto out;
4536		r = -EFAULT;
4537		if (copy_to_user(argp, &tr, sizeof(tr)))
4538			goto out;
4539		r = 0;
4540		break;
4541	}
4542	case KVM_SET_GUEST_DEBUG: {
4543		struct kvm_guest_debug dbg;
4544
4545		r = -EFAULT;
4546		if (copy_from_user(&dbg, argp, sizeof(dbg)))
4547			goto out;
4548		r = kvm_arch_vcpu_ioctl_set_guest_debug(vcpu, &dbg);
 
 
 
4549		break;
4550	}
4551	case KVM_SET_SIGNAL_MASK: {
4552		struct kvm_signal_mask __user *sigmask_arg = argp;
4553		struct kvm_signal_mask kvm_sigmask;
4554		sigset_t sigset, *p;
4555
4556		p = NULL;
4557		if (argp) {
4558			r = -EFAULT;
4559			if (copy_from_user(&kvm_sigmask, argp,
4560					   sizeof(kvm_sigmask)))
4561				goto out;
4562			r = -EINVAL;
4563			if (kvm_sigmask.len != sizeof(sigset))
4564				goto out;
4565			r = -EFAULT;
4566			if (copy_from_user(&sigset, sigmask_arg->sigset,
4567					   sizeof(sigset)))
4568				goto out;
4569			p = &sigset;
4570		}
4571		r = kvm_vcpu_ioctl_set_sigmask(vcpu, p);
4572		break;
4573	}
4574	case KVM_GET_FPU: {
4575		fpu = kzalloc(sizeof(struct kvm_fpu), GFP_KERNEL_ACCOUNT);
4576		r = -ENOMEM;
4577		if (!fpu)
4578			goto out;
4579		r = kvm_arch_vcpu_ioctl_get_fpu(vcpu, fpu);
4580		if (r)
4581			goto out;
4582		r = -EFAULT;
4583		if (copy_to_user(argp, fpu, sizeof(struct kvm_fpu)))
4584			goto out;
4585		r = 0;
4586		break;
4587	}
4588	case KVM_SET_FPU: {
4589		fpu = memdup_user(argp, sizeof(*fpu));
4590		if (IS_ERR(fpu)) {
4591			r = PTR_ERR(fpu);
4592			fpu = NULL;
4593			goto out;
4594		}
4595		r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, fpu);
4596		break;
4597	}
4598	case KVM_GET_STATS_FD: {
4599		r = kvm_vcpu_ioctl_get_stats_fd(vcpu);
4600		break;
4601	}
4602	default:
4603		r = kvm_arch_vcpu_ioctl(filp, ioctl, arg);
4604	}
4605out:
4606	mutex_unlock(&vcpu->mutex);
4607	kfree(fpu);
4608	kfree(kvm_sregs);
4609	return r;
4610}
4611
4612#ifdef CONFIG_KVM_COMPAT
4613static long kvm_vcpu_compat_ioctl(struct file *filp,
4614				  unsigned int ioctl, unsigned long arg)
4615{
4616	struct kvm_vcpu *vcpu = filp->private_data;
4617	void __user *argp = compat_ptr(arg);
4618	int r;
4619
4620	if (vcpu->kvm->mm != current->mm || vcpu->kvm->vm_dead)
4621		return -EIO;
4622
4623	switch (ioctl) {
4624	case KVM_SET_SIGNAL_MASK: {
4625		struct kvm_signal_mask __user *sigmask_arg = argp;
4626		struct kvm_signal_mask kvm_sigmask;
 
4627		sigset_t sigset;
4628
4629		if (argp) {
4630			r = -EFAULT;
4631			if (copy_from_user(&kvm_sigmask, argp,
4632					   sizeof(kvm_sigmask)))
4633				goto out;
4634			r = -EINVAL;
4635			if (kvm_sigmask.len != sizeof(compat_sigset_t))
4636				goto out;
4637			r = -EFAULT;
4638			if (get_compat_sigset(&sigset,
4639					      (compat_sigset_t __user *)sigmask_arg->sigset))
4640				goto out;
4641			r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset);
4642		} else
4643			r = kvm_vcpu_ioctl_set_sigmask(vcpu, NULL);
4644		break;
4645	}
4646	default:
4647		r = kvm_vcpu_ioctl(filp, ioctl, arg);
4648	}
4649
4650out:
4651	return r;
4652}
4653#endif
4654
4655static int kvm_device_mmap(struct file *filp, struct vm_area_struct *vma)
4656{
4657	struct kvm_device *dev = filp->private_data;
4658
4659	if (dev->ops->mmap)
4660		return dev->ops->mmap(dev, vma);
4661
4662	return -ENODEV;
4663}
4664
4665static int kvm_device_ioctl_attr(struct kvm_device *dev,
4666				 int (*accessor)(struct kvm_device *dev,
4667						 struct kvm_device_attr *attr),
4668				 unsigned long arg)
4669{
4670	struct kvm_device_attr attr;
4671
4672	if (!accessor)
4673		return -EPERM;
4674
4675	if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
4676		return -EFAULT;
4677
4678	return accessor(dev, &attr);
4679}
4680
4681static long kvm_device_ioctl(struct file *filp, unsigned int ioctl,
4682			     unsigned long arg)
4683{
4684	struct kvm_device *dev = filp->private_data;
4685
4686	if (dev->kvm->mm != current->mm || dev->kvm->vm_dead)
4687		return -EIO;
4688
4689	switch (ioctl) {
4690	case KVM_SET_DEVICE_ATTR:
4691		return kvm_device_ioctl_attr(dev, dev->ops->set_attr, arg);
4692	case KVM_GET_DEVICE_ATTR:
4693		return kvm_device_ioctl_attr(dev, dev->ops->get_attr, arg);
4694	case KVM_HAS_DEVICE_ATTR:
4695		return kvm_device_ioctl_attr(dev, dev->ops->has_attr, arg);
4696	default:
4697		if (dev->ops->ioctl)
4698			return dev->ops->ioctl(dev, ioctl, arg);
4699
4700		return -ENOTTY;
4701	}
4702}
4703
4704static int kvm_device_release(struct inode *inode, struct file *filp)
4705{
4706	struct kvm_device *dev = filp->private_data;
4707	struct kvm *kvm = dev->kvm;
4708
4709	if (dev->ops->release) {
4710		mutex_lock(&kvm->lock);
4711		list_del(&dev->vm_node);
4712		dev->ops->release(dev);
4713		mutex_unlock(&kvm->lock);
4714	}
4715
4716	kvm_put_kvm(kvm);
4717	return 0;
4718}
4719
4720static struct file_operations kvm_device_fops = {
4721	.unlocked_ioctl = kvm_device_ioctl,
4722	.release = kvm_device_release,
4723	KVM_COMPAT(kvm_device_ioctl),
4724	.mmap = kvm_device_mmap,
4725};
4726
4727struct kvm_device *kvm_device_from_filp(struct file *filp)
4728{
4729	if (filp->f_op != &kvm_device_fops)
4730		return NULL;
4731
4732	return filp->private_data;
4733}
4734
4735static const struct kvm_device_ops *kvm_device_ops_table[KVM_DEV_TYPE_MAX] = {
4736#ifdef CONFIG_KVM_MPIC
4737	[KVM_DEV_TYPE_FSL_MPIC_20]	= &kvm_mpic_ops,
4738	[KVM_DEV_TYPE_FSL_MPIC_42]	= &kvm_mpic_ops,
4739#endif
4740};
4741
4742int kvm_register_device_ops(const struct kvm_device_ops *ops, u32 type)
4743{
4744	if (type >= ARRAY_SIZE(kvm_device_ops_table))
4745		return -ENOSPC;
4746
4747	if (kvm_device_ops_table[type] != NULL)
4748		return -EEXIST;
4749
4750	kvm_device_ops_table[type] = ops;
4751	return 0;
4752}
4753
4754void kvm_unregister_device_ops(u32 type)
4755{
4756	if (kvm_device_ops_table[type] != NULL)
4757		kvm_device_ops_table[type] = NULL;
4758}
4759
4760static int kvm_ioctl_create_device(struct kvm *kvm,
4761				   struct kvm_create_device *cd)
4762{
4763	const struct kvm_device_ops *ops;
4764	struct kvm_device *dev;
4765	bool test = cd->flags & KVM_CREATE_DEVICE_TEST;
4766	int type;
4767	int ret;
4768
4769	if (cd->type >= ARRAY_SIZE(kvm_device_ops_table))
4770		return -ENODEV;
4771
4772	type = array_index_nospec(cd->type, ARRAY_SIZE(kvm_device_ops_table));
4773	ops = kvm_device_ops_table[type];
4774	if (ops == NULL)
4775		return -ENODEV;
4776
4777	if (test)
4778		return 0;
4779
4780	dev = kzalloc(sizeof(*dev), GFP_KERNEL_ACCOUNT);
4781	if (!dev)
4782		return -ENOMEM;
4783
4784	dev->ops = ops;
4785	dev->kvm = kvm;
4786
4787	mutex_lock(&kvm->lock);
4788	ret = ops->create(dev, type);
4789	if (ret < 0) {
4790		mutex_unlock(&kvm->lock);
4791		kfree(dev);
4792		return ret;
4793	}
4794	list_add(&dev->vm_node, &kvm->devices);
4795	mutex_unlock(&kvm->lock);
4796
4797	if (ops->init)
4798		ops->init(dev);
4799
4800	kvm_get_kvm(kvm);
4801	ret = anon_inode_getfd(ops->name, &kvm_device_fops, dev, O_RDWR | O_CLOEXEC);
4802	if (ret < 0) {
4803		kvm_put_kvm_no_destroy(kvm);
4804		mutex_lock(&kvm->lock);
4805		list_del(&dev->vm_node);
4806		if (ops->release)
4807			ops->release(dev);
4808		mutex_unlock(&kvm->lock);
4809		if (ops->destroy)
4810			ops->destroy(dev);
4811		return ret;
4812	}
4813
4814	cd->fd = ret;
4815	return 0;
4816}
4817
4818static int kvm_vm_ioctl_check_extension_generic(struct kvm *kvm, long arg)
4819{
4820	switch (arg) {
4821	case KVM_CAP_USER_MEMORY:
4822	case KVM_CAP_USER_MEMORY2:
4823	case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
4824	case KVM_CAP_JOIN_MEMORY_REGIONS_WORKS:
4825	case KVM_CAP_INTERNAL_ERROR_DATA:
4826#ifdef CONFIG_HAVE_KVM_MSI
4827	case KVM_CAP_SIGNAL_MSI:
4828#endif
4829#ifdef CONFIG_HAVE_KVM_IRQCHIP
4830	case KVM_CAP_IRQFD:
4831#endif
4832	case KVM_CAP_IOEVENTFD_ANY_LENGTH:
4833	case KVM_CAP_CHECK_EXTENSION_VM:
4834	case KVM_CAP_ENABLE_CAP_VM:
4835	case KVM_CAP_HALT_POLL:
4836		return 1;
4837#ifdef CONFIG_KVM_MMIO
4838	case KVM_CAP_COALESCED_MMIO:
4839		return KVM_COALESCED_MMIO_PAGE_OFFSET;
4840	case KVM_CAP_COALESCED_PIO:
4841		return 1;
4842#endif
4843#ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
4844	case KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2:
4845		return KVM_DIRTY_LOG_MANUAL_CAPS;
4846#endif
4847#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
4848	case KVM_CAP_IRQ_ROUTING:
4849		return KVM_MAX_IRQ_ROUTES;
4850#endif
4851#if KVM_MAX_NR_ADDRESS_SPACES > 1
4852	case KVM_CAP_MULTI_ADDRESS_SPACE:
4853		if (kvm)
4854			return kvm_arch_nr_memslot_as_ids(kvm);
4855		return KVM_MAX_NR_ADDRESS_SPACES;
4856#endif
4857	case KVM_CAP_NR_MEMSLOTS:
4858		return KVM_USER_MEM_SLOTS;
4859	case KVM_CAP_DIRTY_LOG_RING:
4860#ifdef CONFIG_HAVE_KVM_DIRTY_RING_TSO
4861		return KVM_DIRTY_RING_MAX_ENTRIES * sizeof(struct kvm_dirty_gfn);
4862#else
4863		return 0;
4864#endif
4865	case KVM_CAP_DIRTY_LOG_RING_ACQ_REL:
4866#ifdef CONFIG_HAVE_KVM_DIRTY_RING_ACQ_REL
4867		return KVM_DIRTY_RING_MAX_ENTRIES * sizeof(struct kvm_dirty_gfn);
4868#else
4869		return 0;
4870#endif
4871#ifdef CONFIG_NEED_KVM_DIRTY_RING_WITH_BITMAP
4872	case KVM_CAP_DIRTY_LOG_RING_WITH_BITMAP:
4873#endif
4874	case KVM_CAP_BINARY_STATS_FD:
4875	case KVM_CAP_SYSTEM_EVENT_DATA:
4876	case KVM_CAP_DEVICE_CTRL:
4877		return 1;
4878#ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES
4879	case KVM_CAP_MEMORY_ATTRIBUTES:
4880		return kvm_supported_mem_attributes(kvm);
4881#endif
4882#ifdef CONFIG_KVM_PRIVATE_MEM
4883	case KVM_CAP_GUEST_MEMFD:
4884		return !kvm || kvm_arch_has_private_mem(kvm);
4885#endif
4886	default:
4887		break;
4888	}
4889	return kvm_vm_ioctl_check_extension(kvm, arg);
4890}
4891
4892static int kvm_vm_ioctl_enable_dirty_log_ring(struct kvm *kvm, u32 size)
4893{
4894	int r;
4895
4896	if (!KVM_DIRTY_LOG_PAGE_OFFSET)
4897		return -EINVAL;
4898
4899	/* the size should be power of 2 */
4900	if (!size || (size & (size - 1)))
4901		return -EINVAL;
4902
4903	/* Should be bigger to keep the reserved entries, or a page */
4904	if (size < kvm_dirty_ring_get_rsvd_entries() *
4905	    sizeof(struct kvm_dirty_gfn) || size < PAGE_SIZE)
4906		return -EINVAL;
4907
4908	if (size > KVM_DIRTY_RING_MAX_ENTRIES *
4909	    sizeof(struct kvm_dirty_gfn))
4910		return -E2BIG;
4911
4912	/* We only allow it to set once */
4913	if (kvm->dirty_ring_size)
4914		return -EINVAL;
4915
4916	mutex_lock(&kvm->lock);
4917
4918	if (kvm->created_vcpus) {
4919		/* We don't allow to change this value after vcpu created */
4920		r = -EINVAL;
4921	} else {
4922		kvm->dirty_ring_size = size;
4923		r = 0;
4924	}
4925
4926	mutex_unlock(&kvm->lock);
4927	return r;
4928}
4929
4930static int kvm_vm_ioctl_reset_dirty_pages(struct kvm *kvm)
4931{
4932	unsigned long i;
4933	struct kvm_vcpu *vcpu;
4934	int cleared = 0;
4935
4936	if (!kvm->dirty_ring_size)
4937		return -EINVAL;
4938
4939	mutex_lock(&kvm->slots_lock);
4940
4941	kvm_for_each_vcpu(i, vcpu, kvm)
4942		cleared += kvm_dirty_ring_reset(vcpu->kvm, &vcpu->dirty_ring);
4943
4944	mutex_unlock(&kvm->slots_lock);
4945
4946	if (cleared)
4947		kvm_flush_remote_tlbs(kvm);
4948
4949	return cleared;
4950}
4951
4952int __attribute__((weak)) kvm_vm_ioctl_enable_cap(struct kvm *kvm,
4953						  struct kvm_enable_cap *cap)
4954{
4955	return -EINVAL;
4956}
4957
4958bool kvm_are_all_memslots_empty(struct kvm *kvm)
4959{
4960	int i;
4961
4962	lockdep_assert_held(&kvm->slots_lock);
4963
4964	for (i = 0; i < kvm_arch_nr_memslot_as_ids(kvm); i++) {
4965		if (!kvm_memslots_empty(__kvm_memslots(kvm, i)))
4966			return false;
4967	}
4968
4969	return true;
4970}
4971EXPORT_SYMBOL_GPL(kvm_are_all_memslots_empty);
4972
4973static int kvm_vm_ioctl_enable_cap_generic(struct kvm *kvm,
4974					   struct kvm_enable_cap *cap)
4975{
4976	switch (cap->cap) {
4977#ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
4978	case KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2: {
4979		u64 allowed_options = KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE;
4980
4981		if (cap->args[0] & KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE)
4982			allowed_options = KVM_DIRTY_LOG_MANUAL_CAPS;
4983
4984		if (cap->flags || (cap->args[0] & ~allowed_options))
4985			return -EINVAL;
4986		kvm->manual_dirty_log_protect = cap->args[0];
4987		return 0;
4988	}
4989#endif
4990	case KVM_CAP_HALT_POLL: {
4991		if (cap->flags || cap->args[0] != (unsigned int)cap->args[0])
4992			return -EINVAL;
4993
4994		kvm->max_halt_poll_ns = cap->args[0];
4995
4996		/*
4997		 * Ensure kvm->override_halt_poll_ns does not become visible
4998		 * before kvm->max_halt_poll_ns.
4999		 *
5000		 * Pairs with the smp_rmb() in kvm_vcpu_max_halt_poll_ns().
5001		 */
5002		smp_wmb();
5003		kvm->override_halt_poll_ns = true;
5004
5005		return 0;
5006	}
5007	case KVM_CAP_DIRTY_LOG_RING:
5008	case KVM_CAP_DIRTY_LOG_RING_ACQ_REL:
5009		if (!kvm_vm_ioctl_check_extension_generic(kvm, cap->cap))
5010			return -EINVAL;
5011
5012		return kvm_vm_ioctl_enable_dirty_log_ring(kvm, cap->args[0]);
5013	case KVM_CAP_DIRTY_LOG_RING_WITH_BITMAP: {
5014		int r = -EINVAL;
5015
5016		if (!IS_ENABLED(CONFIG_NEED_KVM_DIRTY_RING_WITH_BITMAP) ||
5017		    !kvm->dirty_ring_size || cap->flags)
5018			return r;
5019
5020		mutex_lock(&kvm->slots_lock);
5021
5022		/*
5023		 * For simplicity, allow enabling ring+bitmap if and only if
5024		 * there are no memslots, e.g. to ensure all memslots allocate
5025		 * a bitmap after the capability is enabled.
5026		 */
5027		if (kvm_are_all_memslots_empty(kvm)) {
5028			kvm->dirty_ring_with_bitmap = true;
5029			r = 0;
5030		}
5031
5032		mutex_unlock(&kvm->slots_lock);
5033
5034		return r;
5035	}
5036	default:
5037		return kvm_vm_ioctl_enable_cap(kvm, cap);
5038	}
5039}
5040
5041static ssize_t kvm_vm_stats_read(struct file *file, char __user *user_buffer,
5042			      size_t size, loff_t *offset)
5043{
5044	struct kvm *kvm = file->private_data;
5045
5046	return kvm_stats_read(kvm->stats_id, &kvm_vm_stats_header,
5047				&kvm_vm_stats_desc[0], &kvm->stat,
5048				sizeof(kvm->stat), user_buffer, size, offset);
5049}
5050
5051static int kvm_vm_stats_release(struct inode *inode, struct file *file)
5052{
5053	struct kvm *kvm = file->private_data;
5054
5055	kvm_put_kvm(kvm);
5056	return 0;
5057}
5058
5059static const struct file_operations kvm_vm_stats_fops = {
5060	.owner = THIS_MODULE,
5061	.read = kvm_vm_stats_read,
5062	.release = kvm_vm_stats_release,
5063	.llseek = noop_llseek,
5064};
5065
5066static int kvm_vm_ioctl_get_stats_fd(struct kvm *kvm)
5067{
5068	int fd;
5069	struct file *file;
5070
5071	fd = get_unused_fd_flags(O_CLOEXEC);
5072	if (fd < 0)
5073		return fd;
5074
5075	file = anon_inode_getfile("kvm-vm-stats",
5076			&kvm_vm_stats_fops, kvm, O_RDONLY);
5077	if (IS_ERR(file)) {
5078		put_unused_fd(fd);
5079		return PTR_ERR(file);
5080	}
5081
5082	kvm_get_kvm(kvm);
5083
5084	file->f_mode |= FMODE_PREAD;
5085	fd_install(fd, file);
5086
5087	return fd;
5088}
5089
5090#define SANITY_CHECK_MEM_REGION_FIELD(field)					\
5091do {										\
5092	BUILD_BUG_ON(offsetof(struct kvm_userspace_memory_region, field) !=		\
5093		     offsetof(struct kvm_userspace_memory_region2, field));	\
5094	BUILD_BUG_ON(sizeof_field(struct kvm_userspace_memory_region, field) !=		\
5095		     sizeof_field(struct kvm_userspace_memory_region2, field));	\
5096} while (0)
5097
5098static long kvm_vm_ioctl(struct file *filp,
5099			   unsigned int ioctl, unsigned long arg)
5100{
5101	struct kvm *kvm = filp->private_data;
5102	void __user *argp = (void __user *)arg;
5103	int r;
5104
5105	if (kvm->mm != current->mm || kvm->vm_dead)
5106		return -EIO;
5107	switch (ioctl) {
5108	case KVM_CREATE_VCPU:
5109		r = kvm_vm_ioctl_create_vcpu(kvm, arg);
5110		break;
5111	case KVM_ENABLE_CAP: {
5112		struct kvm_enable_cap cap;
5113
5114		r = -EFAULT;
5115		if (copy_from_user(&cap, argp, sizeof(cap)))
5116			goto out;
5117		r = kvm_vm_ioctl_enable_cap_generic(kvm, &cap);
5118		break;
5119	}
5120	case KVM_SET_USER_MEMORY_REGION2:
5121	case KVM_SET_USER_MEMORY_REGION: {
5122		struct kvm_userspace_memory_region2 mem;
5123		unsigned long size;
5124
5125		if (ioctl == KVM_SET_USER_MEMORY_REGION) {
5126			/*
5127			 * Fields beyond struct kvm_userspace_memory_region shouldn't be
5128			 * accessed, but avoid leaking kernel memory in case of a bug.
5129			 */
5130			memset(&mem, 0, sizeof(mem));
5131			size = sizeof(struct kvm_userspace_memory_region);
5132		} else {
5133			size = sizeof(struct kvm_userspace_memory_region2);
5134		}
5135
5136		/* Ensure the common parts of the two structs are identical. */
5137		SANITY_CHECK_MEM_REGION_FIELD(slot);
5138		SANITY_CHECK_MEM_REGION_FIELD(flags);
5139		SANITY_CHECK_MEM_REGION_FIELD(guest_phys_addr);
5140		SANITY_CHECK_MEM_REGION_FIELD(memory_size);
5141		SANITY_CHECK_MEM_REGION_FIELD(userspace_addr);
5142
5143		r = -EFAULT;
5144		if (copy_from_user(&mem, argp, size))
 
5145			goto out;
5146
5147		r = -EINVAL;
5148		if (ioctl == KVM_SET_USER_MEMORY_REGION &&
5149		    (mem.flags & ~KVM_SET_USER_MEMORY_REGION_V1_FLAGS))
5150			goto out;
5151
5152		r = kvm_vm_ioctl_set_memory_region(kvm, &mem);
5153		break;
5154	}
5155	case KVM_GET_DIRTY_LOG: {
5156		struct kvm_dirty_log log;
5157
5158		r = -EFAULT;
5159		if (copy_from_user(&log, argp, sizeof(log)))
5160			goto out;
5161		r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
5162		break;
5163	}
5164#ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
5165	case KVM_CLEAR_DIRTY_LOG: {
5166		struct kvm_clear_dirty_log log;
5167
5168		r = -EFAULT;
5169		if (copy_from_user(&log, argp, sizeof(log)))
5170			goto out;
5171		r = kvm_vm_ioctl_clear_dirty_log(kvm, &log);
5172		break;
5173	}
5174#endif
5175#ifdef CONFIG_KVM_MMIO
5176	case KVM_REGISTER_COALESCED_MMIO: {
5177		struct kvm_coalesced_mmio_zone zone;
5178
5179		r = -EFAULT;
5180		if (copy_from_user(&zone, argp, sizeof(zone)))
5181			goto out;
5182		r = kvm_vm_ioctl_register_coalesced_mmio(kvm, &zone);
 
 
 
5183		break;
5184	}
5185	case KVM_UNREGISTER_COALESCED_MMIO: {
5186		struct kvm_coalesced_mmio_zone zone;
5187
5188		r = -EFAULT;
5189		if (copy_from_user(&zone, argp, sizeof(zone)))
5190			goto out;
5191		r = kvm_vm_ioctl_unregister_coalesced_mmio(kvm, &zone);
 
 
 
5192		break;
5193	}
5194#endif
5195	case KVM_IRQFD: {
5196		struct kvm_irqfd data;
5197
5198		r = -EFAULT;
5199		if (copy_from_user(&data, argp, sizeof(data)))
5200			goto out;
5201		r = kvm_irqfd(kvm, &data);
5202		break;
5203	}
5204	case KVM_IOEVENTFD: {
5205		struct kvm_ioeventfd data;
5206
5207		r = -EFAULT;
5208		if (copy_from_user(&data, argp, sizeof(data)))
5209			goto out;
5210		r = kvm_ioeventfd(kvm, &data);
5211		break;
5212	}
 
 
 
 
 
 
 
 
 
 
 
5213#ifdef CONFIG_HAVE_KVM_MSI
5214	case KVM_SIGNAL_MSI: {
5215		struct kvm_msi msi;
5216
5217		r = -EFAULT;
5218		if (copy_from_user(&msi, argp, sizeof(msi)))
5219			goto out;
5220		r = kvm_send_userspace_msi(kvm, &msi);
5221		break;
5222	}
5223#endif
5224#ifdef __KVM_HAVE_IRQ_LINE
5225	case KVM_IRQ_LINE_STATUS:
5226	case KVM_IRQ_LINE: {
5227		struct kvm_irq_level irq_event;
5228
5229		r = -EFAULT;
5230		if (copy_from_user(&irq_event, argp, sizeof(irq_event)))
5231			goto out;
5232
5233		r = kvm_vm_ioctl_irq_line(kvm, &irq_event,
5234					ioctl == KVM_IRQ_LINE_STATUS);
5235		if (r)
5236			goto out;
5237
5238		r = -EFAULT;
5239		if (ioctl == KVM_IRQ_LINE_STATUS) {
5240			if (copy_to_user(argp, &irq_event, sizeof(irq_event)))
5241				goto out;
5242		}
5243
5244		r = 0;
5245		break;
5246	}
5247#endif
5248#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
5249	case KVM_SET_GSI_ROUTING: {
5250		struct kvm_irq_routing routing;
5251		struct kvm_irq_routing __user *urouting;
5252		struct kvm_irq_routing_entry *entries = NULL;
5253
5254		r = -EFAULT;
5255		if (copy_from_user(&routing, argp, sizeof(routing)))
5256			goto out;
5257		r = -EINVAL;
5258		if (!kvm_arch_can_set_irq_routing(kvm))
5259			goto out;
5260		if (routing.nr > KVM_MAX_IRQ_ROUTES)
5261			goto out;
5262		if (routing.flags)
5263			goto out;
5264		if (routing.nr) {
5265			urouting = argp;
5266			entries = vmemdup_array_user(urouting->entries,
5267						     routing.nr, sizeof(*entries));
5268			if (IS_ERR(entries)) {
5269				r = PTR_ERR(entries);
5270				goto out;
5271			}
5272		}
5273		r = kvm_set_irq_routing(kvm, entries, routing.nr,
5274					routing.flags);
5275		kvfree(entries);
5276		break;
5277	}
5278#endif /* CONFIG_HAVE_KVM_IRQ_ROUTING */
5279#ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES
5280	case KVM_SET_MEMORY_ATTRIBUTES: {
5281		struct kvm_memory_attributes attrs;
5282
5283		r = -EFAULT;
5284		if (copy_from_user(&attrs, argp, sizeof(attrs)))
5285			goto out;
5286
5287		r = kvm_vm_ioctl_set_mem_attributes(kvm, &attrs);
5288		break;
5289	}
5290#endif /* CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES */
5291	case KVM_CREATE_DEVICE: {
5292		struct kvm_create_device cd;
5293
5294		r = -EFAULT;
5295		if (copy_from_user(&cd, argp, sizeof(cd)))
5296			goto out;
5297
5298		r = kvm_ioctl_create_device(kvm, &cd);
5299		if (r)
5300			goto out;
5301
5302		r = -EFAULT;
5303		if (copy_to_user(argp, &cd, sizeof(cd)))
5304			goto out;
5305
5306		r = 0;
5307		break;
5308	}
5309	case KVM_CHECK_EXTENSION:
5310		r = kvm_vm_ioctl_check_extension_generic(kvm, arg);
5311		break;
5312	case KVM_RESET_DIRTY_RINGS:
5313		r = kvm_vm_ioctl_reset_dirty_pages(kvm);
5314		break;
5315	case KVM_GET_STATS_FD:
5316		r = kvm_vm_ioctl_get_stats_fd(kvm);
5317		break;
5318#ifdef CONFIG_KVM_PRIVATE_MEM
5319	case KVM_CREATE_GUEST_MEMFD: {
5320		struct kvm_create_guest_memfd guest_memfd;
5321
5322		r = -EFAULT;
5323		if (copy_from_user(&guest_memfd, argp, sizeof(guest_memfd)))
5324			goto out;
5325
5326		r = kvm_gmem_create(kvm, &guest_memfd);
5327		break;
5328	}
5329#endif
5330	default:
5331		r = kvm_arch_vm_ioctl(filp, ioctl, arg);
 
 
5332	}
5333out:
5334	return r;
5335}
5336
5337#ifdef CONFIG_KVM_COMPAT
5338struct compat_kvm_dirty_log {
5339	__u32 slot;
5340	__u32 padding1;
5341	union {
5342		compat_uptr_t dirty_bitmap; /* one bit per page */
5343		__u64 padding2;
5344	};
5345};
5346
5347struct compat_kvm_clear_dirty_log {
5348	__u32 slot;
5349	__u32 num_pages;
5350	__u64 first_page;
5351	union {
5352		compat_uptr_t dirty_bitmap; /* one bit per page */
5353		__u64 padding2;
5354	};
5355};
5356
5357long __weak kvm_arch_vm_compat_ioctl(struct file *filp, unsigned int ioctl,
5358				     unsigned long arg)
5359{
5360	return -ENOTTY;
5361}
5362
5363static long kvm_vm_compat_ioctl(struct file *filp,
5364			   unsigned int ioctl, unsigned long arg)
5365{
5366	struct kvm *kvm = filp->private_data;
5367	int r;
5368
5369	if (kvm->mm != current->mm || kvm->vm_dead)
5370		return -EIO;
5371
5372	r = kvm_arch_vm_compat_ioctl(filp, ioctl, arg);
5373	if (r != -ENOTTY)
5374		return r;
5375
5376	switch (ioctl) {
5377#ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
5378	case KVM_CLEAR_DIRTY_LOG: {
5379		struct compat_kvm_clear_dirty_log compat_log;
5380		struct kvm_clear_dirty_log log;
5381
5382		if (copy_from_user(&compat_log, (void __user *)arg,
5383				   sizeof(compat_log)))
5384			return -EFAULT;
5385		log.slot	 = compat_log.slot;
5386		log.num_pages	 = compat_log.num_pages;
5387		log.first_page	 = compat_log.first_page;
5388		log.padding2	 = compat_log.padding2;
5389		log.dirty_bitmap = compat_ptr(compat_log.dirty_bitmap);
5390
5391		r = kvm_vm_ioctl_clear_dirty_log(kvm, &log);
5392		break;
5393	}
5394#endif
5395	case KVM_GET_DIRTY_LOG: {
5396		struct compat_kvm_dirty_log compat_log;
5397		struct kvm_dirty_log log;
5398
 
5399		if (copy_from_user(&compat_log, (void __user *)arg,
5400				   sizeof(compat_log)))
5401			return -EFAULT;
5402		log.slot	 = compat_log.slot;
5403		log.padding1	 = compat_log.padding1;
5404		log.padding2	 = compat_log.padding2;
5405		log.dirty_bitmap = compat_ptr(compat_log.dirty_bitmap);
5406
5407		r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
 
 
5408		break;
5409	}
5410	default:
5411		r = kvm_vm_ioctl(filp, ioctl, arg);
5412	}
 
 
5413	return r;
5414}
5415#endif
5416
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5417static struct file_operations kvm_vm_fops = {
5418	.release        = kvm_vm_release,
5419	.unlocked_ioctl = kvm_vm_ioctl,
 
 
 
 
5420	.llseek		= noop_llseek,
5421	KVM_COMPAT(kvm_vm_compat_ioctl),
5422};
5423
5424bool file_is_kvm(struct file *file)
5425{
5426	return file && file->f_op == &kvm_vm_fops;
5427}
5428EXPORT_SYMBOL_GPL(file_is_kvm);
5429
5430static int kvm_dev_ioctl_create_vm(unsigned long type)
5431{
5432	char fdname[ITOA_MAX_LEN + 1];
5433	int r, fd;
5434	struct kvm *kvm;
5435	struct file *file;
5436
5437	fd = get_unused_fd_flags(O_CLOEXEC);
5438	if (fd < 0)
5439		return fd;
 
 
 
 
 
 
 
 
 
 
5440
5441	snprintf(fdname, sizeof(fdname), "%d", fd);
 
5442
5443	kvm = kvm_create_vm(type, fdname);
5444	if (IS_ERR(kvm)) {
5445		r = PTR_ERR(kvm);
5446		goto put_fd;
5447	}
5448
5449	file = anon_inode_getfile("kvm-vm", &kvm_vm_fops, kvm, O_RDWR);
5450	if (IS_ERR(file)) {
5451		r = PTR_ERR(file);
5452		goto put_kvm;
 
 
 
 
 
 
 
 
 
 
5453	}
5454
5455	/*
5456	 * Don't call kvm_put_kvm anymore at this point; file->f_op is
5457	 * already set, with ->release() being kvm_vm_release().  In error
5458	 * cases it will be called by the final fput(file) and will take
5459	 * care of doing kvm_put_kvm(kvm).
5460	 */
5461	kvm_uevent_notify_change(KVM_EVENT_CREATE_VM, kvm);
5462
5463	fd_install(fd, file);
5464	return fd;
5465
5466put_kvm:
5467	kvm_put_kvm(kvm);
5468put_fd:
5469	put_unused_fd(fd);
5470	return r;
5471}
5472
5473static long kvm_dev_ioctl(struct file *filp,
5474			  unsigned int ioctl, unsigned long arg)
5475{
5476	int r = -EINVAL;
5477
5478	switch (ioctl) {
5479	case KVM_GET_API_VERSION:
 
5480		if (arg)
5481			goto out;
5482		r = KVM_API_VERSION;
5483		break;
5484	case KVM_CREATE_VM:
5485		r = kvm_dev_ioctl_create_vm(arg);
5486		break;
5487	case KVM_CHECK_EXTENSION:
5488		r = kvm_vm_ioctl_check_extension_generic(NULL, arg);
5489		break;
5490	case KVM_GET_VCPU_MMAP_SIZE:
 
5491		if (arg)
5492			goto out;
5493		r = PAGE_SIZE;     /* struct kvm_run */
5494#ifdef CONFIG_X86
5495		r += PAGE_SIZE;    /* pio data page */
5496#endif
5497#ifdef CONFIG_KVM_MMIO
5498		r += PAGE_SIZE;    /* coalesced mmio ring page */
5499#endif
5500		break;
 
 
 
 
 
5501	default:
5502		return kvm_arch_dev_ioctl(filp, ioctl, arg);
5503	}
5504out:
5505	return r;
5506}
5507
5508static struct file_operations kvm_chardev_ops = {
5509	.unlocked_ioctl = kvm_dev_ioctl,
 
5510	.llseek		= noop_llseek,
5511	KVM_COMPAT(kvm_dev_ioctl),
5512};
5513
5514static struct miscdevice kvm_dev = {
5515	KVM_MINOR,
5516	"kvm",
5517	&kvm_chardev_ops,
5518};
5519
5520#ifdef CONFIG_KVM_GENERIC_HARDWARE_ENABLING
5521__visible bool kvm_rebooting;
5522EXPORT_SYMBOL_GPL(kvm_rebooting);
 
 
 
 
5523
5524static DEFINE_PER_CPU(bool, hardware_enabled);
5525static int kvm_usage_count;
5526
5527static int __hardware_enable_nolock(void)
5528{
5529	if (__this_cpu_read(hardware_enabled))
5530		return 0;
5531
5532	if (kvm_arch_hardware_enable()) {
5533		pr_info("kvm: enabling virtualization on CPU%d failed\n",
5534			raw_smp_processor_id());
5535		return -EIO;
 
5536	}
5537
5538	__this_cpu_write(hardware_enabled, true);
5539	return 0;
5540}
5541
5542static void hardware_enable_nolock(void *failed)
5543{
5544	if (__hardware_enable_nolock())
5545		atomic_inc(failed);
 
5546}
5547
5548static int kvm_online_cpu(unsigned int cpu)
5549{
5550	int ret = 0;
5551
5552	/*
5553	 * Abort the CPU online process if hardware virtualization cannot
5554	 * be enabled. Otherwise running VMs would encounter unrecoverable
5555	 * errors when scheduled to this CPU.
5556	 */
5557	mutex_lock(&kvm_lock);
5558	if (kvm_usage_count)
5559		ret = __hardware_enable_nolock();
5560	mutex_unlock(&kvm_lock);
5561	return ret;
5562}
5563
5564static void hardware_disable_nolock(void *junk)
5565{
5566	/*
5567	 * Note, hardware_disable_all_nolock() tells all online CPUs to disable
5568	 * hardware, not just CPUs that successfully enabled hardware!
5569	 */
5570	if (!__this_cpu_read(hardware_enabled))
5571		return;
5572
5573	kvm_arch_hardware_disable();
5574
5575	__this_cpu_write(hardware_enabled, false);
5576}
5577
5578static int kvm_offline_cpu(unsigned int cpu)
5579{
5580	mutex_lock(&kvm_lock);
5581	if (kvm_usage_count)
5582		hardware_disable_nolock(NULL);
5583	mutex_unlock(&kvm_lock);
5584	return 0;
5585}
5586
5587static void hardware_disable_all_nolock(void)
5588{
5589	BUG_ON(!kvm_usage_count);
5590
5591	kvm_usage_count--;
5592	if (!kvm_usage_count)
5593		on_each_cpu(hardware_disable_nolock, NULL, 1);
5594}
5595
5596static void hardware_disable_all(void)
5597{
5598	cpus_read_lock();
5599	mutex_lock(&kvm_lock);
5600	hardware_disable_all_nolock();
5601	mutex_unlock(&kvm_lock);
5602	cpus_read_unlock();
5603}
5604
5605static int hardware_enable_all(void)
5606{
5607	atomic_t failed = ATOMIC_INIT(0);
5608	int r;
5609
5610	/*
5611	 * Do not enable hardware virtualization if the system is going down.
5612	 * If userspace initiated a forced reboot, e.g. reboot -f, then it's
5613	 * possible for an in-flight KVM_CREATE_VM to trigger hardware enabling
5614	 * after kvm_reboot() is called.  Note, this relies on system_state
5615	 * being set _before_ kvm_reboot(), which is why KVM uses a syscore ops
5616	 * hook instead of registering a dedicated reboot notifier (the latter
5617	 * runs before system_state is updated).
5618	 */
5619	if (system_state == SYSTEM_HALT || system_state == SYSTEM_POWER_OFF ||
5620	    system_state == SYSTEM_RESTART)
5621		return -EBUSY;
5622
5623	/*
5624	 * When onlining a CPU, cpu_online_mask is set before kvm_online_cpu()
5625	 * is called, and so on_each_cpu() between them includes the CPU that
5626	 * is being onlined.  As a result, hardware_enable_nolock() may get
5627	 * invoked before kvm_online_cpu(), which also enables hardware if the
5628	 * usage count is non-zero.  Disable CPU hotplug to avoid attempting to
5629	 * enable hardware multiple times.
5630	 */
5631	cpus_read_lock();
5632	mutex_lock(&kvm_lock);
5633
5634	r = 0;
5635
5636	kvm_usage_count++;
5637	if (kvm_usage_count == 1) {
5638		on_each_cpu(hardware_enable_nolock, &failed, 1);
 
5639
5640		if (atomic_read(&failed)) {
5641			hardware_disable_all_nolock();
5642			r = -EBUSY;
5643		}
5644	}
5645
5646	mutex_unlock(&kvm_lock);
5647	cpus_read_unlock();
5648
5649	return r;
5650}
5651
5652static void kvm_shutdown(void)
 
5653{
5654	/*
5655	 * Disable hardware virtualization and set kvm_rebooting to indicate
5656	 * that KVM has asynchronously disabled hardware virtualization, i.e.
5657	 * that relevant errors and exceptions aren't entirely unexpected.
5658	 * Some flavors of hardware virtualization need to be disabled before
5659	 * transferring control to firmware (to perform shutdown/reboot), e.g.
5660	 * on x86, virtualization can block INIT interrupts, which are used by
5661	 * firmware to pull APs back under firmware control.  Note, this path
5662	 * is used for both shutdown and reboot scenarios, i.e. neither name is
5663	 * 100% comprehensive.
5664	 */
5665	pr_info("kvm: exiting hardware virtualization\n");
5666	kvm_rebooting = true;
5667	on_each_cpu(hardware_disable_nolock, NULL, 1);
5668}
5669
5670static int kvm_suspend(void)
5671{
5672	/*
5673	 * Secondary CPUs and CPU hotplug are disabled across the suspend/resume
5674	 * callbacks, i.e. no need to acquire kvm_lock to ensure the usage count
5675	 * is stable.  Assert that kvm_lock is not held to ensure the system
5676	 * isn't suspended while KVM is enabling hardware.  Hardware enabling
5677	 * can be preempted, but the task cannot be frozen until it has dropped
5678	 * all locks (userspace tasks are frozen via a fake signal).
5679	 */
5680	lockdep_assert_not_held(&kvm_lock);
5681	lockdep_assert_irqs_disabled();
5682
5683	if (kvm_usage_count)
5684		hardware_disable_nolock(NULL);
5685	return 0;
 
 
 
 
 
 
 
 
 
 
 
5686}
5687
5688static void kvm_resume(void)
5689{
5690	lockdep_assert_not_held(&kvm_lock);
5691	lockdep_assert_irqs_disabled();
5692
5693	if (kvm_usage_count)
5694		WARN_ON_ONCE(__hardware_enable_nolock());
5695}
5696
5697static struct syscore_ops kvm_syscore_ops = {
5698	.suspend = kvm_suspend,
5699	.resume = kvm_resume,
5700	.shutdown = kvm_shutdown,
5701};
5702#else /* CONFIG_KVM_GENERIC_HARDWARE_ENABLING */
5703static int hardware_enable_all(void)
5704{
5705	return 0;
 
5706}
 
5707
5708static void hardware_disable_all(void)
 
5709{
5710
 
 
 
 
 
 
 
 
 
5711}
5712#endif /* CONFIG_KVM_GENERIC_HARDWARE_ENABLING */
5713
5714static void kvm_iodevice_destructor(struct kvm_io_device *dev)
5715{
5716	if (dev->ops->destructor)
5717		dev->ops->destructor(dev);
5718}
5719
5720static void kvm_io_bus_destroy(struct kvm_io_bus *bus)
5721{
5722	int i;
5723
5724	for (i = 0; i < bus->dev_count; i++) {
5725		struct kvm_io_device *pos = bus->range[i].dev;
5726
5727		kvm_iodevice_destructor(pos);
5728	}
5729	kfree(bus);
5730}
5731
5732static inline int kvm_io_bus_cmp(const struct kvm_io_range *r1,
5733				 const struct kvm_io_range *r2)
5734{
5735	gpa_t addr1 = r1->addr;
5736	gpa_t addr2 = r2->addr;
5737
5738	if (addr1 < addr2)
5739		return -1;
5740
5741	/* If r2->len == 0, match the exact address.  If r2->len != 0,
5742	 * accept any overlapping write.  Any order is acceptable for
5743	 * overlapping ranges, because kvm_io_bus_get_first_dev ensures
5744	 * we process all of them.
5745	 */
5746	if (r2->len) {
5747		addr1 += r1->len;
5748		addr2 += r2->len;
5749	}
5750
5751	if (addr1 > addr2)
5752		return 1;
5753
5754	return 0;
5755}
5756
5757static int kvm_io_bus_sort_cmp(const void *p1, const void *p2)
 
5758{
5759	return kvm_io_bus_cmp(p1, p2);
 
 
 
 
 
 
 
 
 
5760}
5761
5762static int kvm_io_bus_get_first_dev(struct kvm_io_bus *bus,
5763			     gpa_t addr, int len)
5764{
5765	struct kvm_io_range *range, key;
5766	int off;
5767
5768	key = (struct kvm_io_range) {
5769		.addr = addr,
5770		.len = len,
5771	};
5772
5773	range = bsearch(&key, bus->range, bus->dev_count,
5774			sizeof(struct kvm_io_range), kvm_io_bus_sort_cmp);
5775	if (range == NULL)
5776		return -ENOENT;
5777
5778	off = range - bus->range;
5779
5780	while (off > 0 && kvm_io_bus_cmp(&key, &bus->range[off-1]) == 0)
5781		off--;
5782
5783	return off;
5784}
5785
5786static int __kvm_io_bus_write(struct kvm_vcpu *vcpu, struct kvm_io_bus *bus,
5787			      struct kvm_io_range *range, const void *val)
5788{
5789	int idx;
5790
5791	idx = kvm_io_bus_get_first_dev(bus, range->addr, range->len);
5792	if (idx < 0)
5793		return -EOPNOTSUPP;
5794
5795	while (idx < bus->dev_count &&
5796		kvm_io_bus_cmp(range, &bus->range[idx]) == 0) {
5797		if (!kvm_iodevice_write(vcpu, bus->range[idx].dev, range->addr,
5798					range->len, val))
5799			return idx;
5800		idx++;
5801	}
5802
5803	return -EOPNOTSUPP;
5804}
5805
5806/* kvm_io_bus_write - called under kvm->slots_lock */
5807int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
5808		     int len, const void *val)
5809{
 
5810	struct kvm_io_bus *bus;
5811	struct kvm_io_range range;
5812	int r;
5813
5814	range = (struct kvm_io_range) {
5815		.addr = addr,
5816		.len = len,
5817	};
5818
5819	bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
5820	if (!bus)
5821		return -ENOMEM;
5822	r = __kvm_io_bus_write(vcpu, bus, &range, val);
5823	return r < 0 ? r : 0;
5824}
5825EXPORT_SYMBOL_GPL(kvm_io_bus_write);
5826
5827/* kvm_io_bus_write_cookie - called under kvm->slots_lock */
5828int kvm_io_bus_write_cookie(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx,
5829			    gpa_t addr, int len, const void *val, long cookie)
5830{
5831	struct kvm_io_bus *bus;
5832	struct kvm_io_range range;
5833
5834	range = (struct kvm_io_range) {
5835		.addr = addr,
5836		.len = len,
5837	};
5838
5839	bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
5840	if (!bus)
5841		return -ENOMEM;
5842
5843	/* First try the device referenced by cookie. */
5844	if ((cookie >= 0) && (cookie < bus->dev_count) &&
5845	    (kvm_io_bus_cmp(&range, &bus->range[cookie]) == 0))
5846		if (!kvm_iodevice_write(vcpu, bus->range[cookie].dev, addr, len,
5847					val))
5848			return cookie;
5849
5850	/*
5851	 * cookie contained garbage; fall back to search and return the
5852	 * correct cookie value.
5853	 */
5854	return __kvm_io_bus_write(vcpu, bus, &range, val);
5855}
5856
5857static int __kvm_io_bus_read(struct kvm_vcpu *vcpu, struct kvm_io_bus *bus,
5858			     struct kvm_io_range *range, void *val)
5859{
5860	int idx;
5861
5862	idx = kvm_io_bus_get_first_dev(bus, range->addr, range->len);
5863	if (idx < 0)
5864		return -EOPNOTSUPP;
5865
5866	while (idx < bus->dev_count &&
5867		kvm_io_bus_cmp(range, &bus->range[idx]) == 0) {
5868		if (!kvm_iodevice_read(vcpu, bus->range[idx].dev, range->addr,
5869				       range->len, val))
5870			return idx;
5871		idx++;
5872	}
5873
5874	return -EOPNOTSUPP;
5875}
5876
5877/* kvm_io_bus_read - called under kvm->slots_lock */
5878int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
5879		    int len, void *val)
5880{
 
5881	struct kvm_io_bus *bus;
5882	struct kvm_io_range range;
5883	int r;
5884
5885	range = (struct kvm_io_range) {
5886		.addr = addr,
5887		.len = len,
5888	};
5889
5890	bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
5891	if (!bus)
5892		return -ENOMEM;
5893	r = __kvm_io_bus_read(vcpu, bus, &range, val);
5894	return r < 0 ? r : 0;
 
 
 
 
 
 
 
 
5895}
5896
 
5897int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
5898			    int len, struct kvm_io_device *dev)
5899{
5900	int i;
5901	struct kvm_io_bus *new_bus, *bus;
5902	struct kvm_io_range range;
5903
5904	lockdep_assert_held(&kvm->slots_lock);
5905
5906	bus = kvm_get_bus(kvm, bus_idx);
5907	if (!bus)
5908		return -ENOMEM;
5909
5910	/* exclude ioeventfd which is limited by maximum fd */
5911	if (bus->dev_count - bus->ioeventfd_count > NR_IOBUS_DEVS - 1)
5912		return -ENOSPC;
5913
5914	new_bus = kmalloc(struct_size(bus, range, bus->dev_count + 1),
5915			  GFP_KERNEL_ACCOUNT);
5916	if (!new_bus)
5917		return -ENOMEM;
5918
5919	range = (struct kvm_io_range) {
5920		.addr = addr,
5921		.len = len,
5922		.dev = dev,
5923	};
5924
5925	for (i = 0; i < bus->dev_count; i++)
5926		if (kvm_io_bus_cmp(&bus->range[i], &range) > 0)
5927			break;
5928
5929	memcpy(new_bus, bus, sizeof(*bus) + i * sizeof(struct kvm_io_range));
5930	new_bus->dev_count++;
5931	new_bus->range[i] = range;
5932	memcpy(new_bus->range + i + 1, bus->range + i,
5933		(bus->dev_count - i) * sizeof(struct kvm_io_range));
5934	rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
5935	synchronize_srcu_expedited(&kvm->srcu);
5936	kfree(bus);
5937
5938	return 0;
5939}
5940
 
5941int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
5942			      struct kvm_io_device *dev)
5943{
5944	int i;
5945	struct kvm_io_bus *new_bus, *bus;
5946
5947	lockdep_assert_held(&kvm->slots_lock);
5948
5949	bus = kvm_get_bus(kvm, bus_idx);
5950	if (!bus)
5951		return 0;
5952
5953	for (i = 0; i < bus->dev_count; i++) {
5954		if (bus->range[i].dev == dev) {
 
5955			break;
5956		}
5957	}
5958
5959	if (i == bus->dev_count)
5960		return 0;
 
 
 
 
 
5961
5962	new_bus = kmalloc(struct_size(bus, range, bus->dev_count - 1),
5963			  GFP_KERNEL_ACCOUNT);
5964	if (new_bus) {
5965		memcpy(new_bus, bus, struct_size(bus, range, i));
5966		new_bus->dev_count--;
5967		memcpy(new_bus->range + i, bus->range + i + 1,
5968				flex_array_size(new_bus, range, new_bus->dev_count - i));
5969	}
5970
5971	rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
5972	synchronize_srcu_expedited(&kvm->srcu);
5973
5974	/*
5975	 * If NULL bus is installed, destroy the old bus, including all the
5976	 * attached devices. Otherwise, destroy the caller's device only.
5977	 */
5978	if (!new_bus) {
5979		pr_err("kvm: failed to shrink bus, removing it completely\n");
5980		kvm_io_bus_destroy(bus);
5981		return -ENOMEM;
5982	}
5983
5984	kvm_iodevice_destructor(dev);
5985	kfree(bus);
5986	return 0;
5987}
5988
5989struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx,
5990					 gpa_t addr)
5991{
5992	struct kvm_io_bus *bus;
5993	int dev_idx, srcu_idx;
5994	struct kvm_io_device *iodev = NULL;
5995
5996	srcu_idx = srcu_read_lock(&kvm->srcu);
5997
5998	bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu);
5999	if (!bus)
6000		goto out_unlock;
6001
6002	dev_idx = kvm_io_bus_get_first_dev(bus, addr, 1);
6003	if (dev_idx < 0)
6004		goto out_unlock;
6005
6006	iodev = bus->range[dev_idx].dev;
6007
6008out_unlock:
6009	srcu_read_unlock(&kvm->srcu, srcu_idx);
6010
6011	return iodev;
6012}
6013EXPORT_SYMBOL_GPL(kvm_io_bus_get_dev);
6014
6015static int kvm_debugfs_open(struct inode *inode, struct file *file,
6016			   int (*get)(void *, u64 *), int (*set)(void *, u64),
6017			   const char *fmt)
6018{
6019	int ret;
6020	struct kvm_stat_data *stat_data = inode->i_private;
6021
6022	/*
6023	 * The debugfs files are a reference to the kvm struct which
6024        * is still valid when kvm_destroy_vm is called.  kvm_get_kvm_safe
6025        * avoids the race between open and the removal of the debugfs directory.
6026	 */
6027	if (!kvm_get_kvm_safe(stat_data->kvm))
6028		return -ENOENT;
6029
6030	ret = simple_attr_open(inode, file, get,
6031			       kvm_stats_debugfs_mode(stat_data->desc) & 0222
6032			       ? set : NULL, fmt);
6033	if (ret)
6034		kvm_put_kvm(stat_data->kvm);
6035
6036	return ret;
6037}
6038
6039static int kvm_debugfs_release(struct inode *inode, struct file *file)
6040{
6041	struct kvm_stat_data *stat_data = inode->i_private;
6042
6043	simple_attr_release(inode, file);
6044	kvm_put_kvm(stat_data->kvm);
6045
 
 
 
 
 
6046	return 0;
6047}
6048
6049static int kvm_get_stat_per_vm(struct kvm *kvm, size_t offset, u64 *val)
6050{
6051	*val = *(u64 *)((void *)(&kvm->stat) + offset);
6052
6053	return 0;
6054}
6055
6056static int kvm_clear_stat_per_vm(struct kvm *kvm, size_t offset)
6057{
6058	*(u64 *)((void *)(&kvm->stat) + offset) = 0;
6059
6060	return 0;
6061}
6062
6063static int kvm_get_stat_per_vcpu(struct kvm *kvm, size_t offset, u64 *val)
6064{
6065	unsigned long i;
6066	struct kvm_vcpu *vcpu;
 
6067
6068	*val = 0;
 
 
 
 
6069
6070	kvm_for_each_vcpu(i, vcpu, kvm)
6071		*val += *(u64 *)((void *)(&vcpu->stat) + offset);
6072
6073	return 0;
6074}
6075
6076static int kvm_clear_stat_per_vcpu(struct kvm *kvm, size_t offset)
6077{
6078	unsigned long i;
6079	struct kvm_vcpu *vcpu;
6080
6081	kvm_for_each_vcpu(i, vcpu, kvm)
6082		*(u64 *)((void *)(&vcpu->stat) + offset) = 0;
6083
6084	return 0;
6085}
6086
6087static int kvm_stat_data_get(void *data, u64 *val)
6088{
6089	int r = -EFAULT;
6090	struct kvm_stat_data *stat_data = data;
6091
6092	switch (stat_data->kind) {
6093	case KVM_STAT_VM:
6094		r = kvm_get_stat_per_vm(stat_data->kvm,
6095					stat_data->desc->desc.offset, val);
6096		break;
6097	case KVM_STAT_VCPU:
6098		r = kvm_get_stat_per_vcpu(stat_data->kvm,
6099					  stat_data->desc->desc.offset, val);
6100		break;
6101	}
6102
6103	return r;
6104}
6105
6106static int kvm_stat_data_clear(void *data, u64 val)
6107{
6108	int r = -EFAULT;
6109	struct kvm_stat_data *stat_data = data;
6110
6111	if (val)
6112		return -EINVAL;
6113
6114	switch (stat_data->kind) {
6115	case KVM_STAT_VM:
6116		r = kvm_clear_stat_per_vm(stat_data->kvm,
6117					  stat_data->desc->desc.offset);
6118		break;
6119	case KVM_STAT_VCPU:
6120		r = kvm_clear_stat_per_vcpu(stat_data->kvm,
6121					    stat_data->desc->desc.offset);
6122		break;
6123	}
6124
6125	return r;
6126}
6127
6128static int kvm_stat_data_open(struct inode *inode, struct file *file)
6129{
6130	__simple_attr_check_format("%llu\n", 0ull);
6131	return kvm_debugfs_open(inode, file, kvm_stat_data_get,
6132				kvm_stat_data_clear, "%llu\n");
6133}
6134
6135static const struct file_operations stat_fops_per_vm = {
6136	.owner = THIS_MODULE,
6137	.open = kvm_stat_data_open,
6138	.release = kvm_debugfs_release,
6139	.read = simple_attr_read,
6140	.write = simple_attr_write,
6141	.llseek = no_llseek,
6142};
6143
6144static int vm_stat_get(void *_offset, u64 *val)
6145{
6146	unsigned offset = (long)_offset;
6147	struct kvm *kvm;
6148	u64 tmp_val;
6149
6150	*val = 0;
6151	mutex_lock(&kvm_lock);
6152	list_for_each_entry(kvm, &vm_list, vm_list) {
6153		kvm_get_stat_per_vm(kvm, offset, &tmp_val);
6154		*val += tmp_val;
6155	}
6156	mutex_unlock(&kvm_lock);
6157	return 0;
6158}
6159
6160static int vm_stat_clear(void *_offset, u64 val)
6161{
6162	unsigned offset = (long)_offset;
6163	struct kvm *kvm;
6164
6165	if (val)
6166		return -EINVAL;
6167
6168	mutex_lock(&kvm_lock);
6169	list_for_each_entry(kvm, &vm_list, vm_list) {
6170		kvm_clear_stat_per_vm(kvm, offset);
6171	}
6172	mutex_unlock(&kvm_lock);
6173
6174	return 0;
6175}
6176
6177DEFINE_SIMPLE_ATTRIBUTE(vm_stat_fops, vm_stat_get, vm_stat_clear, "%llu\n");
6178DEFINE_SIMPLE_ATTRIBUTE(vm_stat_readonly_fops, vm_stat_get, NULL, "%llu\n");
6179
6180static int vcpu_stat_get(void *_offset, u64 *val)
6181{
6182	unsigned offset = (long)_offset;
6183	struct kvm *kvm;
6184	u64 tmp_val;
6185
6186	*val = 0;
6187	mutex_lock(&kvm_lock);
6188	list_for_each_entry(kvm, &vm_list, vm_list) {
6189		kvm_get_stat_per_vcpu(kvm, offset, &tmp_val);
6190		*val += tmp_val;
6191	}
6192	mutex_unlock(&kvm_lock);
6193	return 0;
6194}
6195
6196static int vcpu_stat_clear(void *_offset, u64 val)
6197{
6198	unsigned offset = (long)_offset;
6199	struct kvm *kvm;
6200
6201	if (val)
6202		return -EINVAL;
6203
6204	mutex_lock(&kvm_lock);
6205	list_for_each_entry(kvm, &vm_list, vm_list) {
6206		kvm_clear_stat_per_vcpu(kvm, offset);
6207	}
6208	mutex_unlock(&kvm_lock);
6209
6210	return 0;
6211}
6212
6213DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops, vcpu_stat_get, vcpu_stat_clear,
6214			"%llu\n");
6215DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_readonly_fops, vcpu_stat_get, NULL, "%llu\n");
6216
6217static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm)
6218{
6219	struct kobj_uevent_env *env;
6220	unsigned long long created, active;
6221
6222	if (!kvm_dev.this_device || !kvm)
6223		return;
6224
6225	mutex_lock(&kvm_lock);
6226	if (type == KVM_EVENT_CREATE_VM) {
6227		kvm_createvm_count++;
6228		kvm_active_vms++;
6229	} else if (type == KVM_EVENT_DESTROY_VM) {
6230		kvm_active_vms--;
6231	}
6232	created = kvm_createvm_count;
6233	active = kvm_active_vms;
6234	mutex_unlock(&kvm_lock);
6235
6236	env = kzalloc(sizeof(*env), GFP_KERNEL_ACCOUNT);
6237	if (!env)
6238		return;
6239
6240	add_uevent_var(env, "CREATED=%llu", created);
6241	add_uevent_var(env, "COUNT=%llu", active);
6242
6243	if (type == KVM_EVENT_CREATE_VM) {
6244		add_uevent_var(env, "EVENT=create");
6245		kvm->userspace_pid = task_pid_nr(current);
6246	} else if (type == KVM_EVENT_DESTROY_VM) {
6247		add_uevent_var(env, "EVENT=destroy");
6248	}
6249	add_uevent_var(env, "PID=%d", kvm->userspace_pid);
6250
6251	if (!IS_ERR(kvm->debugfs_dentry)) {
6252		char *tmp, *p = kmalloc(PATH_MAX, GFP_KERNEL_ACCOUNT);
6253
6254		if (p) {
6255			tmp = dentry_path_raw(kvm->debugfs_dentry, p, PATH_MAX);
6256			if (!IS_ERR(tmp))
6257				add_uevent_var(env, "STATS_PATH=%s", tmp);
6258			kfree(p);
6259		}
6260	}
6261	/* no need for checks, since we are adding at most only 5 keys */
6262	env->envp[env->envp_idx++] = NULL;
6263	kobject_uevent_env(&kvm_dev.this_device->kobj, KOBJ_CHANGE, env->envp);
6264	kfree(env);
6265}
6266
6267static void kvm_init_debug(void)
6268{
6269	const struct file_operations *fops;
6270	const struct _kvm_stats_desc *pdesc;
6271	int i;
6272
6273	kvm_debugfs_dir = debugfs_create_dir("kvm", NULL);
6274
6275	for (i = 0; i < kvm_vm_stats_header.num_desc; ++i) {
6276		pdesc = &kvm_vm_stats_desc[i];
6277		if (kvm_stats_debugfs_mode(pdesc) & 0222)
6278			fops = &vm_stat_fops;
6279		else
6280			fops = &vm_stat_readonly_fops;
6281		debugfs_create_file(pdesc->name, kvm_stats_debugfs_mode(pdesc),
6282				kvm_debugfs_dir,
6283				(void *)(long)pdesc->desc.offset, fops);
6284	}
6285
6286	for (i = 0; i < kvm_vcpu_stats_header.num_desc; ++i) {
6287		pdesc = &kvm_vcpu_stats_desc[i];
6288		if (kvm_stats_debugfs_mode(pdesc) & 0222)
6289			fops = &vcpu_stat_fops;
6290		else
6291			fops = &vcpu_stat_readonly_fops;
6292		debugfs_create_file(pdesc->name, kvm_stats_debugfs_mode(pdesc),
6293				kvm_debugfs_dir,
6294				(void *)(long)pdesc->desc.offset, fops);
6295	}
6296}
6297
6298static inline
6299struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn)
6300{
6301	return container_of(pn, struct kvm_vcpu, preempt_notifier);
6302}
6303
6304static void kvm_sched_in(struct preempt_notifier *pn, int cpu)
6305{
6306	struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
6307
6308	WRITE_ONCE(vcpu->preempted, false);
6309	WRITE_ONCE(vcpu->ready, false);
6310
6311	__this_cpu_write(kvm_running_vcpu, vcpu);
6312	kvm_arch_sched_in(vcpu, cpu);
6313	kvm_arch_vcpu_load(vcpu, cpu);
6314}
6315
6316static void kvm_sched_out(struct preempt_notifier *pn,
6317			  struct task_struct *next)
6318{
6319	struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
6320
6321	if (current->on_rq) {
6322		WRITE_ONCE(vcpu->preempted, true);
6323		WRITE_ONCE(vcpu->ready, true);
6324	}
6325	kvm_arch_vcpu_put(vcpu);
6326	__this_cpu_write(kvm_running_vcpu, NULL);
6327}
6328
6329/**
6330 * kvm_get_running_vcpu - get the vcpu running on the current CPU.
6331 *
6332 * We can disable preemption locally around accessing the per-CPU variable,
6333 * and use the resolved vcpu pointer after enabling preemption again,
6334 * because even if the current thread is migrated to another CPU, reading
6335 * the per-CPU value later will give us the same value as we update the
6336 * per-CPU variable in the preempt notifier handlers.
6337 */
6338struct kvm_vcpu *kvm_get_running_vcpu(void)
6339{
6340	struct kvm_vcpu *vcpu;
 
6341
6342	preempt_disable();
6343	vcpu = __this_cpu_read(kvm_running_vcpu);
6344	preempt_enable();
6345
6346	return vcpu;
6347}
6348EXPORT_SYMBOL_GPL(kvm_get_running_vcpu);
6349
6350/**
6351 * kvm_get_running_vcpus - get the per-CPU array of currently running vcpus.
6352 */
6353struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void)
6354{
6355        return &kvm_running_vcpu;
6356}
6357
6358#ifdef CONFIG_GUEST_PERF_EVENTS
6359static unsigned int kvm_guest_state(void)
6360{
6361	struct kvm_vcpu *vcpu = kvm_get_running_vcpu();
6362	unsigned int state;
6363
6364	if (!kvm_arch_pmi_in_guest(vcpu))
6365		return 0;
6366
6367	state = PERF_GUEST_ACTIVE;
6368	if (!kvm_arch_vcpu_in_kernel(vcpu))
6369		state |= PERF_GUEST_USER;
 
6370
6371	return state;
6372}
6373
6374static unsigned long kvm_guest_get_ip(void)
6375{
6376	struct kvm_vcpu *vcpu = kvm_get_running_vcpu();
6377
6378	/* Retrieving the IP must be guarded by a call to kvm_guest_state(). */
6379	if (WARN_ON_ONCE(!kvm_arch_pmi_in_guest(vcpu)))
6380		return 0;
 
6381
6382	return kvm_arch_vcpu_get_ip(vcpu);
6383}
6384
6385static struct perf_guest_info_callbacks kvm_guest_cbs = {
6386	.state			= kvm_guest_state,
6387	.get_ip			= kvm_guest_get_ip,
6388	.handle_intel_pt_intr	= NULL,
6389};
6390
6391void kvm_register_perf_callbacks(unsigned int (*pt_intr_handler)(void))
6392{
6393	kvm_guest_cbs.handle_intel_pt_intr = pt_intr_handler;
6394	perf_register_guest_info_callbacks(&kvm_guest_cbs);
6395}
6396void kvm_unregister_perf_callbacks(void)
6397{
6398	perf_unregister_guest_info_callbacks(&kvm_guest_cbs);
6399}
6400#endif
6401
6402int kvm_init(unsigned vcpu_size, unsigned vcpu_align, struct module *module)
6403{
6404	int r;
6405	int cpu;
 
 
 
6406
6407#ifdef CONFIG_KVM_GENERIC_HARDWARE_ENABLING
6408	r = cpuhp_setup_state_nocalls(CPUHP_AP_KVM_ONLINE, "kvm/cpu:online",
6409				      kvm_online_cpu, kvm_offline_cpu);
6410	if (r)
6411		return r;
6412
6413	register_syscore_ops(&kvm_syscore_ops);
6414#endif
6415
6416	/* A kmem cache lets us meet the alignment requirements of fx_save. */
6417	if (!vcpu_align)
6418		vcpu_align = __alignof__(struct kvm_vcpu);
6419	kvm_vcpu_cache =
6420		kmem_cache_create_usercopy("kvm_vcpu", vcpu_size, vcpu_align,
6421					   SLAB_ACCOUNT,
6422					   offsetof(struct kvm_vcpu, arch),
6423					   offsetofend(struct kvm_vcpu, stats_id)
6424					   - offsetof(struct kvm_vcpu, arch),
6425					   NULL);
6426	if (!kvm_vcpu_cache) {
6427		r = -ENOMEM;
6428		goto err_vcpu_cache;
6429	}
6430
6431	for_each_possible_cpu(cpu) {
6432		if (!alloc_cpumask_var_node(&per_cpu(cpu_kick_mask, cpu),
6433					    GFP_KERNEL, cpu_to_node(cpu))) {
6434			r = -ENOMEM;
6435			goto err_cpu_kick_mask;
6436		}
6437	}
6438
6439	r = kvm_irqfd_init();
6440	if (r)
6441		goto err_irqfd;
6442
6443	r = kvm_async_pf_init();
6444	if (r)
6445		goto err_async_pf;
6446
6447	kvm_chardev_ops.owner = module;
6448	kvm_vm_fops.owner = module;
6449	kvm_vcpu_fops.owner = module;
6450	kvm_device_fops.owner = module;
 
 
 
 
 
 
 
6451
6452	kvm_preempt_ops.sched_in = kvm_sched_in;
6453	kvm_preempt_ops.sched_out = kvm_sched_out;
6454
6455	kvm_init_debug();
6456
6457	r = kvm_vfio_ops_init();
6458	if (WARN_ON_ONCE(r))
6459		goto err_vfio;
6460
6461	kvm_gmem_init(module);
6462
6463	/*
6464	 * Registration _must_ be the very last thing done, as this exposes
6465	 * /dev/kvm to userspace, i.e. all infrastructure must be setup!
6466	 */
6467	r = misc_register(&kvm_dev);
6468	if (r) {
6469		pr_err("kvm: misc device register failed\n");
6470		goto err_register;
6471	}
6472
6473	return 0;
6474
6475err_register:
6476	kvm_vfio_ops_exit();
6477err_vfio:
6478	kvm_async_pf_deinit();
6479err_async_pf:
6480	kvm_irqfd_exit();
6481err_irqfd:
6482err_cpu_kick_mask:
6483	for_each_possible_cpu(cpu)
6484		free_cpumask_var(per_cpu(cpu_kick_mask, cpu));
6485	kmem_cache_destroy(kvm_vcpu_cache);
6486err_vcpu_cache:
6487#ifdef CONFIG_KVM_GENERIC_HARDWARE_ENABLING
6488	unregister_syscore_ops(&kvm_syscore_ops);
6489	cpuhp_remove_state_nocalls(CPUHP_AP_KVM_ONLINE);
6490#endif
 
 
 
 
 
 
 
 
 
 
 
 
6491	return r;
6492}
6493EXPORT_SYMBOL_GPL(kvm_init);
6494
6495void kvm_exit(void)
6496{
6497	int cpu;
6498
6499	/*
6500	 * Note, unregistering /dev/kvm doesn't strictly need to come first,
6501	 * fops_get(), a.k.a. try_module_get(), prevents acquiring references
6502	 * to KVM while the module is being stopped.
6503	 */
6504	misc_deregister(&kvm_dev);
6505
6506	debugfs_remove_recursive(kvm_debugfs_dir);
6507	for_each_possible_cpu(cpu)
6508		free_cpumask_var(per_cpu(cpu_kick_mask, cpu));
6509	kmem_cache_destroy(kvm_vcpu_cache);
6510	kvm_vfio_ops_exit();
6511	kvm_async_pf_deinit();
6512#ifdef CONFIG_KVM_GENERIC_HARDWARE_ENABLING
6513	unregister_syscore_ops(&kvm_syscore_ops);
6514	cpuhp_remove_state_nocalls(CPUHP_AP_KVM_ONLINE);
6515#endif
6516	kvm_irqfd_exit();
 
 
 
 
 
 
6517}
6518EXPORT_SYMBOL_GPL(kvm_exit);
6519
6520struct kvm_vm_worker_thread_context {
6521	struct kvm *kvm;
6522	struct task_struct *parent;
6523	struct completion init_done;
6524	kvm_vm_thread_fn_t thread_fn;
6525	uintptr_t data;
6526	int err;
6527};
6528
6529static int kvm_vm_worker_thread(void *context)
6530{
6531	/*
6532	 * The init_context is allocated on the stack of the parent thread, so
6533	 * we have to locally copy anything that is needed beyond initialization
6534	 */
6535	struct kvm_vm_worker_thread_context *init_context = context;
6536	struct task_struct *parent;
6537	struct kvm *kvm = init_context->kvm;
6538	kvm_vm_thread_fn_t thread_fn = init_context->thread_fn;
6539	uintptr_t data = init_context->data;
6540	int err;
6541
6542	err = kthread_park(current);
6543	/* kthread_park(current) is never supposed to return an error */
6544	WARN_ON(err != 0);
6545	if (err)
6546		goto init_complete;
6547
6548	err = cgroup_attach_task_all(init_context->parent, current);
6549	if (err) {
6550		kvm_err("%s: cgroup_attach_task_all failed with err %d\n",
6551			__func__, err);
6552		goto init_complete;
6553	}
6554
6555	set_user_nice(current, task_nice(init_context->parent));
6556
6557init_complete:
6558	init_context->err = err;
6559	complete(&init_context->init_done);
6560	init_context = NULL;
6561
6562	if (err)
6563		goto out;
6564
6565	/* Wait to be woken up by the spawner before proceeding. */
6566	kthread_parkme();
6567
6568	if (!kthread_should_stop())
6569		err = thread_fn(kvm, data);
6570
6571out:
6572	/*
6573	 * Move kthread back to its original cgroup to prevent it lingering in
6574	 * the cgroup of the VM process, after the latter finishes its
6575	 * execution.
6576	 *
6577	 * kthread_stop() waits on the 'exited' completion condition which is
6578	 * set in exit_mm(), via mm_release(), in do_exit(). However, the
6579	 * kthread is removed from the cgroup in the cgroup_exit() which is
6580	 * called after the exit_mm(). This causes the kthread_stop() to return
6581	 * before the kthread actually quits the cgroup.
6582	 */
6583	rcu_read_lock();
6584	parent = rcu_dereference(current->real_parent);
6585	get_task_struct(parent);
6586	rcu_read_unlock();
6587	cgroup_attach_task_all(parent, current);
6588	put_task_struct(parent);
6589
6590	return err;
6591}
6592
6593int kvm_vm_create_worker_thread(struct kvm *kvm, kvm_vm_thread_fn_t thread_fn,
6594				uintptr_t data, const char *name,
6595				struct task_struct **thread_ptr)
6596{
6597	struct kvm_vm_worker_thread_context init_context = {};
6598	struct task_struct *thread;
6599
6600	*thread_ptr = NULL;
6601	init_context.kvm = kvm;
6602	init_context.parent = current;
6603	init_context.thread_fn = thread_fn;
6604	init_context.data = data;
6605	init_completion(&init_context.init_done);
6606
6607	thread = kthread_run(kvm_vm_worker_thread, &init_context,
6608			     "%s-%d", name, task_pid_nr(current));
6609	if (IS_ERR(thread))
6610		return PTR_ERR(thread);
6611
6612	/* kthread_run is never supposed to return NULL */
6613	WARN_ON(thread == NULL);
6614
6615	wait_for_completion(&init_context.init_done);
6616
6617	if (!init_context.err)
6618		*thread_ptr = thread;
6619
6620	return init_context.err;
6621}