Linux Audio

Check our new training course

Loading...
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-only
   2
   3/*
   4 * Local APIC virtualization
   5 *
   6 * Copyright (C) 2006 Qumranet, Inc.
   7 * Copyright (C) 2007 Novell
   8 * Copyright (C) 2007 Intel
   9 * Copyright 2009 Red Hat, Inc. and/or its affiliates.
  10 *
  11 * Authors:
  12 *   Dor Laor <dor.laor@qumranet.com>
  13 *   Gregory Haskins <ghaskins@novell.com>
  14 *   Yaozu (Eddie) Dong <eddie.dong@intel.com>
  15 *
  16 * Based on Xen 3.1 code, Copyright (c) 2004, Intel Corporation.
  17 */
  18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  19
  20#include <linux/kvm_host.h>
  21#include <linux/kvm.h>
  22#include <linux/mm.h>
  23#include <linux/highmem.h>
  24#include <linux/smp.h>
  25#include <linux/hrtimer.h>
  26#include <linux/io.h>
  27#include <linux/export.h>
  28#include <linux/math64.h>
  29#include <linux/slab.h>
  30#include <asm/processor.h>
  31#include <asm/mce.h>
  32#include <asm/msr.h>
  33#include <asm/page.h>
  34#include <asm/current.h>
  35#include <asm/apicdef.h>
  36#include <asm/delay.h>
  37#include <linux/atomic.h>
  38#include <linux/jump_label.h>
  39#include "kvm_cache_regs.h"
  40#include "irq.h"
  41#include "ioapic.h"
  42#include "trace.h"
  43#include "x86.h"
  44#include "xen.h"
  45#include "cpuid.h"
  46#include "hyperv.h"
  47#include "smm.h"
  48
  49#ifndef CONFIG_X86_64
  50#define mod_64(x, y) ((x) - (y) * div64_u64(x, y))
  51#else
  52#define mod_64(x, y) ((x) % (y))
  53#endif
  54
 
 
 
 
 
  55/* 14 is the version for Xeon and Pentium 8.4.8*/
  56#define APIC_VERSION			0x14UL
  57#define LAPIC_MMIO_LENGTH		(1 << 12)
  58/* followed define is not in apicdef.h */
  59#define MAX_APIC_VECTOR			256
  60#define APIC_VECTORS_PER_REG		32
  61
  62/*
  63 * Enable local APIC timer advancement (tscdeadline mode only) with adaptive
  64 * tuning.  When enabled, KVM programs the host timer event to fire early, i.e.
  65 * before the deadline expires, to account for the delay between taking the
  66 * VM-Exit (to inject the guest event) and the subsequent VM-Enter to resume
  67 * the guest, i.e. so that the interrupt arrives in the guest with minimal
  68 * latency relative to the deadline programmed by the guest.
  69 */
  70static bool lapic_timer_advance __read_mostly = true;
  71module_param(lapic_timer_advance, bool, 0444);
  72
  73#define LAPIC_TIMER_ADVANCE_ADJUST_MIN	100	/* clock cycles */
  74#define LAPIC_TIMER_ADVANCE_ADJUST_MAX	10000	/* clock cycles */
  75#define LAPIC_TIMER_ADVANCE_NS_INIT	1000
  76#define LAPIC_TIMER_ADVANCE_NS_MAX     5000
  77/* step-by-step approximation to mitigate fluctuation */
  78#define LAPIC_TIMER_ADVANCE_ADJUST_STEP 8
  79static int kvm_lapic_msr_read(struct kvm_lapic *apic, u32 reg, u64 *data);
  80static int kvm_lapic_msr_write(struct kvm_lapic *apic, u32 reg, u64 data);
  81
  82static inline void __kvm_lapic_set_reg(char *regs, int reg_off, u32 val)
  83{
  84	*((u32 *) (regs + reg_off)) = val;
  85}
  86
  87static inline void kvm_lapic_set_reg(struct kvm_lapic *apic, int reg_off, u32 val)
  88{
  89	__kvm_lapic_set_reg(apic->regs, reg_off, val);
  90}
  91
  92static __always_inline u64 __kvm_lapic_get_reg64(char *regs, int reg)
  93{
  94	BUILD_BUG_ON(reg != APIC_ICR);
  95	return *((u64 *) (regs + reg));
  96}
  97
  98static __always_inline u64 kvm_lapic_get_reg64(struct kvm_lapic *apic, int reg)
  99{
 100	return __kvm_lapic_get_reg64(apic->regs, reg);
 101}
 102
 103static __always_inline void __kvm_lapic_set_reg64(char *regs, int reg, u64 val)
 104{
 105	BUILD_BUG_ON(reg != APIC_ICR);
 106	*((u64 *) (regs + reg)) = val;
 107}
 108
 109static __always_inline void kvm_lapic_set_reg64(struct kvm_lapic *apic,
 110						int reg, u64 val)
 111{
 112	__kvm_lapic_set_reg64(apic->regs, reg, val);
 113}
 114
 115static inline int apic_test_vector(int vec, void *bitmap)
 116{
 117	return test_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
 118}
 119
 120bool kvm_apic_pending_eoi(struct kvm_vcpu *vcpu, int vector)
 121{
 122	struct kvm_lapic *apic = vcpu->arch.apic;
 123
 124	return apic_test_vector(vector, apic->regs + APIC_ISR) ||
 125		apic_test_vector(vector, apic->regs + APIC_IRR);
 126}
 127
 128static inline int __apic_test_and_set_vector(int vec, void *bitmap)
 129{
 130	return __test_and_set_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
 131}
 132
 133static inline int __apic_test_and_clear_vector(int vec, void *bitmap)
 134{
 135	return __test_and_clear_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
 136}
 137
 138__read_mostly DEFINE_STATIC_KEY_FALSE(kvm_has_noapic_vcpu);
 139EXPORT_SYMBOL_GPL(kvm_has_noapic_vcpu);
 140
 141__read_mostly DEFINE_STATIC_KEY_DEFERRED_FALSE(apic_hw_disabled, HZ);
 142__read_mostly DEFINE_STATIC_KEY_DEFERRED_FALSE(apic_sw_disabled, HZ);
 143
 144static inline int apic_enabled(struct kvm_lapic *apic)
 145{
 146	return kvm_apic_sw_enabled(apic) &&	kvm_apic_hw_enabled(apic);
 147}
 148
 149#define LVT_MASK	\
 150	(APIC_LVT_MASKED | APIC_SEND_PENDING | APIC_VECTOR_MASK)
 151
 152#define LINT_MASK	\
 153	(LVT_MASK | APIC_MODE_MASK | APIC_INPUT_POLARITY | \
 154	 APIC_LVT_REMOTE_IRR | APIC_LVT_LEVEL_TRIGGER)
 155
 156static inline u32 kvm_x2apic_id(struct kvm_lapic *apic)
 157{
 158	return apic->vcpu->vcpu_id;
 159}
 160
 161static bool kvm_can_post_timer_interrupt(struct kvm_vcpu *vcpu)
 162{
 163	return pi_inject_timer && kvm_vcpu_apicv_active(vcpu) &&
 164		(kvm_mwait_in_guest(vcpu->kvm) || kvm_hlt_in_guest(vcpu->kvm));
 165}
 166
 167bool kvm_can_use_hv_timer(struct kvm_vcpu *vcpu)
 168{
 169	return kvm_x86_ops.set_hv_timer
 170	       && !(kvm_mwait_in_guest(vcpu->kvm) ||
 171		    kvm_can_post_timer_interrupt(vcpu));
 172}
 
 173
 174static bool kvm_use_posted_timer_interrupt(struct kvm_vcpu *vcpu)
 175{
 176	return kvm_can_post_timer_interrupt(vcpu) && vcpu->mode == IN_GUEST_MODE;
 177}
 178
 179static inline u32 kvm_apic_calc_x2apic_ldr(u32 id)
 180{
 181	return ((id >> 4) << 16) | (1 << (id & 0xf));
 182}
 183
 184static inline bool kvm_apic_map_get_logical_dest(struct kvm_apic_map *map,
 185		u32 dest_id, struct kvm_lapic ***cluster, u16 *mask) {
 186	switch (map->logical_mode) {
 187	case KVM_APIC_MODE_SW_DISABLED:
 188		/* Arbitrarily use the flat map so that @cluster isn't NULL. */
 189		*cluster = map->xapic_flat_map;
 190		*mask = 0;
 191		return true;
 192	case KVM_APIC_MODE_X2APIC: {
 193		u32 offset = (dest_id >> 16) * 16;
 194		u32 max_apic_id = map->max_apic_id;
 195
 196		if (offset <= max_apic_id) {
 197			u8 cluster_size = min(max_apic_id - offset + 1, 16U);
 198
 199			offset = array_index_nospec(offset, map->max_apic_id + 1);
 200			*cluster = &map->phys_map[offset];
 201			*mask = dest_id & (0xffff >> (16 - cluster_size));
 202		} else {
 203			*mask = 0;
 204		}
 205
 206		return true;
 207		}
 208	case KVM_APIC_MODE_XAPIC_FLAT:
 209		*cluster = map->xapic_flat_map;
 210		*mask = dest_id & 0xff;
 211		return true;
 212	case KVM_APIC_MODE_XAPIC_CLUSTER:
 213		*cluster = map->xapic_cluster_map[(dest_id >> 4) & 0xf];
 214		*mask = dest_id & 0xf;
 215		return true;
 216	case KVM_APIC_MODE_MAP_DISABLED:
 217		return false;
 218	default:
 219		WARN_ON_ONCE(1);
 220		return false;
 221	}
 222}
 223
 224static void kvm_apic_map_free(struct rcu_head *rcu)
 225{
 226	struct kvm_apic_map *map = container_of(rcu, struct kvm_apic_map, rcu);
 227
 228	kvfree(map);
 229}
 230
 231static int kvm_recalculate_phys_map(struct kvm_apic_map *new,
 232				    struct kvm_vcpu *vcpu,
 233				    bool *xapic_id_mismatch)
 234{
 235	struct kvm_lapic *apic = vcpu->arch.apic;
 236	u32 x2apic_id = kvm_x2apic_id(apic);
 237	u32 xapic_id = kvm_xapic_id(apic);
 238	u32 physical_id;
 239
 240	/*
 241	 * For simplicity, KVM always allocates enough space for all possible
 242	 * xAPIC IDs.  Yell, but don't kill the VM, as KVM can continue on
 243	 * without the optimized map.
 244	 */
 245	if (WARN_ON_ONCE(xapic_id > new->max_apic_id))
 246		return -EINVAL;
 247
 248	/*
 249	 * Bail if a vCPU was added and/or enabled its APIC between allocating
 250	 * the map and doing the actual calculations for the map.  Note, KVM
 251	 * hardcodes the x2APIC ID to vcpu_id, i.e. there's no TOCTOU bug if
 252	 * the compiler decides to reload x2apic_id after this check.
 253	 */
 254	if (x2apic_id > new->max_apic_id)
 255		return -E2BIG;
 256
 257	/*
 258	 * Deliberately truncate the vCPU ID when detecting a mismatched APIC
 259	 * ID to avoid false positives if the vCPU ID, i.e. x2APIC ID, is a
 260	 * 32-bit value.  Any unwanted aliasing due to truncation results will
 261	 * be detected below.
 262	 */
 263	if (!apic_x2apic_mode(apic) && xapic_id != (u8)vcpu->vcpu_id)
 264		*xapic_id_mismatch = true;
 265
 266	/*
 267	 * Apply KVM's hotplug hack if userspace has enable 32-bit APIC IDs.
 268	 * Allow sending events to vCPUs by their x2APIC ID even if the target
 269	 * vCPU is in legacy xAPIC mode, and silently ignore aliased xAPIC IDs
 270	 * (the x2APIC ID is truncated to 8 bits, causing IDs > 0xff to wrap
 271	 * and collide).
 272	 *
 273	 * Honor the architectural (and KVM's non-optimized) behavior if
 274	 * userspace has not enabled 32-bit x2APIC IDs.  Each APIC is supposed
 275	 * to process messages independently.  If multiple vCPUs have the same
 276	 * effective APIC ID, e.g. due to the x2APIC wrap or because the guest
 277	 * manually modified its xAPIC IDs, events targeting that ID are
 278	 * supposed to be recognized by all vCPUs with said ID.
 279	 */
 280	if (vcpu->kvm->arch.x2apic_format) {
 281		/* See also kvm_apic_match_physical_addr(). */
 282		if (apic_x2apic_mode(apic) || x2apic_id > 0xff)
 283			new->phys_map[x2apic_id] = apic;
 284
 285		if (!apic_x2apic_mode(apic) && !new->phys_map[xapic_id])
 286			new->phys_map[xapic_id] = apic;
 287	} else {
 288		/*
 289		 * Disable the optimized map if the physical APIC ID is already
 290		 * mapped, i.e. is aliased to multiple vCPUs.  The optimized
 291		 * map requires a strict 1:1 mapping between IDs and vCPUs.
 292		 */
 293		if (apic_x2apic_mode(apic))
 294			physical_id = x2apic_id;
 295		else
 296			physical_id = xapic_id;
 297
 298		if (new->phys_map[physical_id])
 299			return -EINVAL;
 300
 301		new->phys_map[physical_id] = apic;
 302	}
 303
 304	return 0;
 305}
 306
 307static void kvm_recalculate_logical_map(struct kvm_apic_map *new,
 308					struct kvm_vcpu *vcpu)
 309{
 310	struct kvm_lapic *apic = vcpu->arch.apic;
 311	enum kvm_apic_logical_mode logical_mode;
 312	struct kvm_lapic **cluster;
 313	u16 mask;
 314	u32 ldr;
 315
 316	if (new->logical_mode == KVM_APIC_MODE_MAP_DISABLED)
 317		return;
 318
 319	if (!kvm_apic_sw_enabled(apic))
 320		return;
 321
 322	ldr = kvm_lapic_get_reg(apic, APIC_LDR);
 323	if (!ldr)
 324		return;
 325
 326	if (apic_x2apic_mode(apic)) {
 327		logical_mode = KVM_APIC_MODE_X2APIC;
 328	} else {
 329		ldr = GET_APIC_LOGICAL_ID(ldr);
 330		if (kvm_lapic_get_reg(apic, APIC_DFR) == APIC_DFR_FLAT)
 331			logical_mode = KVM_APIC_MODE_XAPIC_FLAT;
 332		else
 333			logical_mode = KVM_APIC_MODE_XAPIC_CLUSTER;
 334	}
 335
 336	/*
 337	 * To optimize logical mode delivery, all software-enabled APICs must
 338	 * be configured for the same mode.
 339	 */
 340	if (new->logical_mode == KVM_APIC_MODE_SW_DISABLED) {
 341		new->logical_mode = logical_mode;
 342	} else if (new->logical_mode != logical_mode) {
 343		new->logical_mode = KVM_APIC_MODE_MAP_DISABLED;
 344		return;
 345	}
 346
 347	/*
 348	 * In x2APIC mode, the LDR is read-only and derived directly from the
 349	 * x2APIC ID, thus is guaranteed to be addressable.  KVM reuses
 350	 * kvm_apic_map.phys_map to optimize logical mode x2APIC interrupts by
 351	 * reversing the LDR calculation to get cluster of APICs, i.e. no
 352	 * additional work is required.
 353	 */
 354	if (apic_x2apic_mode(apic))
 355		return;
 356
 357	if (WARN_ON_ONCE(!kvm_apic_map_get_logical_dest(new, ldr,
 358							&cluster, &mask))) {
 359		new->logical_mode = KVM_APIC_MODE_MAP_DISABLED;
 360		return;
 361	}
 362
 363	if (!mask)
 364		return;
 365
 366	ldr = ffs(mask) - 1;
 367	if (!is_power_of_2(mask) || cluster[ldr])
 368		new->logical_mode = KVM_APIC_MODE_MAP_DISABLED;
 369	else
 370		cluster[ldr] = apic;
 371}
 372
 373/*
 374 * CLEAN -> DIRTY and UPDATE_IN_PROGRESS -> DIRTY changes happen without a lock.
 375 *
 376 * DIRTY -> UPDATE_IN_PROGRESS and UPDATE_IN_PROGRESS -> CLEAN happen with
 377 * apic_map_lock_held.
 378 */
 379enum {
 380	CLEAN,
 381	UPDATE_IN_PROGRESS,
 382	DIRTY
 383};
 384
 385static void kvm_recalculate_apic_map(struct kvm *kvm)
 386{
 387	struct kvm_apic_map *new, *old = NULL;
 388	struct kvm_vcpu *vcpu;
 389	unsigned long i;
 390	u32 max_id = 255; /* enough space for any xAPIC ID */
 391	bool xapic_id_mismatch;
 392	int r;
 393
 394	/* Read kvm->arch.apic_map_dirty before kvm->arch.apic_map.  */
 395	if (atomic_read_acquire(&kvm->arch.apic_map_dirty) == CLEAN)
 396		return;
 397
 398	WARN_ONCE(!irqchip_in_kernel(kvm),
 399		  "Dirty APIC map without an in-kernel local APIC");
 400
 401	mutex_lock(&kvm->arch.apic_map_lock);
 402
 403retry:
 404	/*
 405	 * Read kvm->arch.apic_map_dirty before kvm->arch.apic_map (if clean)
 406	 * or the APIC registers (if dirty).  Note, on retry the map may have
 407	 * not yet been marked dirty by whatever task changed a vCPU's x2APIC
 408	 * ID, i.e. the map may still show up as in-progress.  In that case
 409	 * this task still needs to retry and complete its calculation.
 410	 */
 411	if (atomic_cmpxchg_acquire(&kvm->arch.apic_map_dirty,
 412				   DIRTY, UPDATE_IN_PROGRESS) == CLEAN) {
 413		/* Someone else has updated the map. */
 414		mutex_unlock(&kvm->arch.apic_map_lock);
 415		return;
 416	}
 417
 418	/*
 419	 * Reset the mismatch flag between attempts so that KVM does the right
 420	 * thing if a vCPU changes its xAPIC ID, but do NOT reset max_id, i.e.
 421	 * keep max_id strictly increasing.  Disallowing max_id from shrinking
 422	 * ensures KVM won't get stuck in an infinite loop, e.g. if the vCPU
 423	 * with the highest x2APIC ID is toggling its APIC on and off.
 424	 */
 425	xapic_id_mismatch = false;
 426
 427	kvm_for_each_vcpu(i, vcpu, kvm)
 428		if (kvm_apic_present(vcpu))
 429			max_id = max(max_id, kvm_x2apic_id(vcpu->arch.apic));
 430
 431	new = kvzalloc(sizeof(struct kvm_apic_map) +
 432	                   sizeof(struct kvm_lapic *) * ((u64)max_id + 1),
 433			   GFP_KERNEL_ACCOUNT);
 434
 435	if (!new)
 436		goto out;
 437
 438	new->max_apic_id = max_id;
 439	new->logical_mode = KVM_APIC_MODE_SW_DISABLED;
 440
 441	kvm_for_each_vcpu(i, vcpu, kvm) {
 
 
 
 
 
 
 
 442		if (!kvm_apic_present(vcpu))
 443			continue;
 444
 445		r = kvm_recalculate_phys_map(new, vcpu, &xapic_id_mismatch);
 446		if (r) {
 447			kvfree(new);
 448			new = NULL;
 449			if (r == -E2BIG) {
 450				cond_resched();
 451				goto retry;
 452			}
 453
 454			goto out;
 455		}
 
 
 
 
 
 
 
 
 456
 457		kvm_recalculate_logical_map(new, vcpu);
 458	}
 459out:
 460	/*
 461	 * The optimized map is effectively KVM's internal version of APICv,
 462	 * and all unwanted aliasing that results in disabling the optimized
 463	 * map also applies to APICv.
 464	 */
 465	if (!new)
 466		kvm_set_apicv_inhibit(kvm, APICV_INHIBIT_REASON_PHYSICAL_ID_ALIASED);
 467	else
 468		kvm_clear_apicv_inhibit(kvm, APICV_INHIBIT_REASON_PHYSICAL_ID_ALIASED);
 469
 470	if (!new || new->logical_mode == KVM_APIC_MODE_MAP_DISABLED)
 471		kvm_set_apicv_inhibit(kvm, APICV_INHIBIT_REASON_LOGICAL_ID_ALIASED);
 472	else
 473		kvm_clear_apicv_inhibit(kvm, APICV_INHIBIT_REASON_LOGICAL_ID_ALIASED);
 474
 475	if (xapic_id_mismatch)
 476		kvm_set_apicv_inhibit(kvm, APICV_INHIBIT_REASON_APIC_ID_MODIFIED);
 477	else
 478		kvm_clear_apicv_inhibit(kvm, APICV_INHIBIT_REASON_APIC_ID_MODIFIED);
 
 
 
 
 
 
 
 
 479
 
 
 
 
 480	old = rcu_dereference_protected(kvm->arch.apic_map,
 481			lockdep_is_held(&kvm->arch.apic_map_lock));
 482	rcu_assign_pointer(kvm->arch.apic_map, new);
 483	/*
 484	 * Write kvm->arch.apic_map before clearing apic->apic_map_dirty.
 485	 * If another update has come in, leave it DIRTY.
 486	 */
 487	atomic_cmpxchg_release(&kvm->arch.apic_map_dirty,
 488			       UPDATE_IN_PROGRESS, CLEAN);
 489	mutex_unlock(&kvm->arch.apic_map_lock);
 490
 491	if (old)
 492		call_rcu(&old->rcu, kvm_apic_map_free);
 493
 494	kvm_make_scan_ioapic_request(kvm);
 495}
 496
 497static inline void apic_set_spiv(struct kvm_lapic *apic, u32 val)
 498{
 499	bool enabled = val & APIC_SPIV_APIC_ENABLED;
 500
 501	kvm_lapic_set_reg(apic, APIC_SPIV, val);
 502
 503	if (enabled != apic->sw_enabled) {
 504		apic->sw_enabled = enabled;
 505		if (enabled)
 506			static_branch_slow_dec_deferred(&apic_sw_disabled);
 507		else
 508			static_branch_inc(&apic_sw_disabled.key);
 509
 510		atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
 511	}
 512
 513	/* Check if there are APF page ready requests pending */
 514	if (enabled) {
 515		kvm_make_request(KVM_REQ_APF_READY, apic->vcpu);
 516		kvm_xen_sw_enable_lapic(apic->vcpu);
 517	}
 518}
 519
 520static inline void kvm_apic_set_xapic_id(struct kvm_lapic *apic, u8 id)
 521{
 522	kvm_lapic_set_reg(apic, APIC_ID, id << 24);
 523	atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
 524}
 525
 526static inline void kvm_apic_set_ldr(struct kvm_lapic *apic, u32 id)
 527{
 528	kvm_lapic_set_reg(apic, APIC_LDR, id);
 529	atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
 530}
 531
 532static inline void kvm_apic_set_dfr(struct kvm_lapic *apic, u32 val)
 533{
 534	kvm_lapic_set_reg(apic, APIC_DFR, val);
 535	atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
 536}
 537
 
 
 
 
 
 538static inline void kvm_apic_set_x2apic_id(struct kvm_lapic *apic, u32 id)
 539{
 540	u32 ldr = kvm_apic_calc_x2apic_ldr(id);
 541
 542	WARN_ON_ONCE(id != apic->vcpu->vcpu_id);
 543
 544	kvm_lapic_set_reg(apic, APIC_ID, id);
 545	kvm_lapic_set_reg(apic, APIC_LDR, ldr);
 546	atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
 547}
 548
 549static inline int apic_lvt_enabled(struct kvm_lapic *apic, int lvt_type)
 550{
 551	return !(kvm_lapic_get_reg(apic, lvt_type) & APIC_LVT_MASKED);
 552}
 553
 554static inline int apic_lvtt_oneshot(struct kvm_lapic *apic)
 555{
 556	return apic->lapic_timer.timer_mode == APIC_LVT_TIMER_ONESHOT;
 557}
 558
 559static inline int apic_lvtt_period(struct kvm_lapic *apic)
 560{
 561	return apic->lapic_timer.timer_mode == APIC_LVT_TIMER_PERIODIC;
 562}
 563
 564static inline int apic_lvtt_tscdeadline(struct kvm_lapic *apic)
 565{
 566	return apic->lapic_timer.timer_mode == APIC_LVT_TIMER_TSCDEADLINE;
 567}
 568
 569static inline int apic_lvt_nmi_mode(u32 lvt_val)
 570{
 571	return (lvt_val & (APIC_MODE_MASK | APIC_LVT_MASKED)) == APIC_DM_NMI;
 572}
 573
 574static inline bool kvm_lapic_lvt_supported(struct kvm_lapic *apic, int lvt_index)
 575{
 576	return apic->nr_lvt_entries > lvt_index;
 577}
 578
 579static inline int kvm_apic_calc_nr_lvt_entries(struct kvm_vcpu *vcpu)
 580{
 581	return KVM_APIC_MAX_NR_LVT_ENTRIES - !(vcpu->arch.mcg_cap & MCG_CMCI_P);
 582}
 583
 584void kvm_apic_set_version(struct kvm_vcpu *vcpu)
 585{
 586	struct kvm_lapic *apic = vcpu->arch.apic;
 587	u32 v = 0;
 588
 589	if (!lapic_in_kernel(vcpu))
 590		return;
 591
 592	v = APIC_VERSION | ((apic->nr_lvt_entries - 1) << 16);
 593
 594	/*
 595	 * KVM emulates 82093AA datasheet (with in-kernel IOAPIC implementation)
 596	 * which doesn't have EOI register; Some buggy OSes (e.g. Windows with
 597	 * Hyper-V role) disable EOI broadcast in lapic not checking for IOAPIC
 598	 * version first and level-triggered interrupts never get EOIed in
 599	 * IOAPIC.
 600	 */
 601	if (guest_cpuid_has(vcpu, X86_FEATURE_X2APIC) &&
 602	    !ioapic_in_kernel(vcpu->kvm))
 603		v |= APIC_LVR_DIRECTED_EOI;
 604	kvm_lapic_set_reg(apic, APIC_LVR, v);
 605}
 606
 607void kvm_apic_after_set_mcg_cap(struct kvm_vcpu *vcpu)
 608{
 609	int nr_lvt_entries = kvm_apic_calc_nr_lvt_entries(vcpu);
 610	struct kvm_lapic *apic = vcpu->arch.apic;
 611	int i;
 612
 613	if (!lapic_in_kernel(vcpu) || nr_lvt_entries == apic->nr_lvt_entries)
 614		return;
 615
 616	/* Initialize/mask any "new" LVT entries. */
 617	for (i = apic->nr_lvt_entries; i < nr_lvt_entries; i++)
 618		kvm_lapic_set_reg(apic, APIC_LVTx(i), APIC_LVT_MASKED);
 619
 620	apic->nr_lvt_entries = nr_lvt_entries;
 621
 622	/* The number of LVT entries is reflected in the version register. */
 623	kvm_apic_set_version(vcpu);
 624}
 625
 626static const unsigned int apic_lvt_mask[KVM_APIC_MAX_NR_LVT_ENTRIES] = {
 627	[LVT_TIMER] = LVT_MASK,      /* timer mode mask added at runtime */
 628	[LVT_THERMAL_MONITOR] = LVT_MASK | APIC_MODE_MASK,
 629	[LVT_PERFORMANCE_COUNTER] = LVT_MASK | APIC_MODE_MASK,
 630	[LVT_LINT0] = LINT_MASK,
 631	[LVT_LINT1] = LINT_MASK,
 632	[LVT_ERROR] = LVT_MASK,
 633	[LVT_CMCI] = LVT_MASK | APIC_MODE_MASK
 634};
 635
 636static int find_highest_vector(void *bitmap)
 637{
 638	int vec;
 639	u32 *reg;
 640
 641	for (vec = MAX_APIC_VECTOR - APIC_VECTORS_PER_REG;
 642	     vec >= 0; vec -= APIC_VECTORS_PER_REG) {
 643		reg = bitmap + REG_POS(vec);
 644		if (*reg)
 645			return __fls(*reg) + vec;
 646	}
 647
 648	return -1;
 649}
 650
 651static u8 count_vectors(void *bitmap)
 652{
 653	int vec;
 654	u32 *reg;
 655	u8 count = 0;
 656
 657	for (vec = 0; vec < MAX_APIC_VECTOR; vec += APIC_VECTORS_PER_REG) {
 658		reg = bitmap + REG_POS(vec);
 659		count += hweight32(*reg);
 660	}
 661
 662	return count;
 663}
 664
 665bool __kvm_apic_update_irr(u32 *pir, void *regs, int *max_irr)
 666{
 667	u32 i, vec;
 668	u32 pir_val, irr_val, prev_irr_val;
 669	int max_updated_irr;
 670
 671	max_updated_irr = -1;
 672	*max_irr = -1;
 673
 674	for (i = vec = 0; i <= 7; i++, vec += 32) {
 675		u32 *p_irr = (u32 *)(regs + APIC_IRR + i * 0x10);
 676
 677		irr_val = *p_irr;
 678		pir_val = READ_ONCE(pir[i]);
 679
 680		if (pir_val) {
 681			pir_val = xchg(&pir[i], 0);
 682
 683			prev_irr_val = irr_val;
 684			do {
 685				irr_val = prev_irr_val | pir_val;
 686			} while (prev_irr_val != irr_val &&
 687				 !try_cmpxchg(p_irr, &prev_irr_val, irr_val));
 688
 689			if (prev_irr_val != irr_val)
 690				max_updated_irr = __fls(irr_val ^ prev_irr_val) + vec;
 691		}
 692		if (irr_val)
 693			*max_irr = __fls(irr_val) + vec;
 694	}
 695
 696	return ((max_updated_irr != -1) &&
 697		(max_updated_irr == *max_irr));
 698}
 699EXPORT_SYMBOL_GPL(__kvm_apic_update_irr);
 700
 701bool kvm_apic_update_irr(struct kvm_vcpu *vcpu, u32 *pir, int *max_irr)
 702{
 703	struct kvm_lapic *apic = vcpu->arch.apic;
 704	bool irr_updated = __kvm_apic_update_irr(pir, apic->regs, max_irr);
 705
 706	if (unlikely(!apic->apicv_active && irr_updated))
 707		apic->irr_pending = true;
 708	return irr_updated;
 709}
 710EXPORT_SYMBOL_GPL(kvm_apic_update_irr);
 711
 712static inline int apic_search_irr(struct kvm_lapic *apic)
 713{
 714	return find_highest_vector(apic->regs + APIC_IRR);
 715}
 716
 717static inline int apic_find_highest_irr(struct kvm_lapic *apic)
 718{
 719	int result;
 720
 721	/*
 722	 * Note that irr_pending is just a hint. It will be always
 723	 * true with virtual interrupt delivery enabled.
 724	 */
 725	if (!apic->irr_pending)
 726		return -1;
 727
 728	result = apic_search_irr(apic);
 729	ASSERT(result == -1 || result >= 16);
 730
 731	return result;
 732}
 733
 734static inline void apic_clear_irr(int vec, struct kvm_lapic *apic)
 735{
 736	if (unlikely(apic->apicv_active)) {
 
 
 
 
 737		/* need to update RVI */
 738		kvm_lapic_clear_vector(vec, apic->regs + APIC_IRR);
 739		kvm_x86_call(hwapic_irr_update)(apic->vcpu,
 740						apic_find_highest_irr(apic));
 741	} else {
 742		apic->irr_pending = false;
 743		kvm_lapic_clear_vector(vec, apic->regs + APIC_IRR);
 744		if (apic_search_irr(apic) != -1)
 745			apic->irr_pending = true;
 746	}
 747}
 748
 749void kvm_apic_clear_irr(struct kvm_vcpu *vcpu, int vec)
 750{
 751	apic_clear_irr(vec, vcpu->arch.apic);
 752}
 753EXPORT_SYMBOL_GPL(kvm_apic_clear_irr);
 754
 755static inline void apic_set_isr(int vec, struct kvm_lapic *apic)
 756{
 
 
 757	if (__apic_test_and_set_vector(vec, apic->regs + APIC_ISR))
 758		return;
 759
 
 
 760	/*
 761	 * With APIC virtualization enabled, all caching is disabled
 762	 * because the processor can modify ISR under the hood.  Instead
 763	 * just set SVI.
 764	 */
 765	if (unlikely(apic->apicv_active))
 766		kvm_x86_call(hwapic_isr_update)(apic->vcpu, vec);
 767	else {
 768		++apic->isr_count;
 769		BUG_ON(apic->isr_count > MAX_APIC_VECTOR);
 770		/*
 771		 * ISR (in service register) bit is set when injecting an interrupt.
 772		 * The highest vector is injected. Thus the latest bit set matches
 773		 * the highest bit in ISR.
 774		 */
 775		apic->highest_isr_cache = vec;
 776	}
 777}
 778
 779static inline int apic_find_highest_isr(struct kvm_lapic *apic)
 780{
 781	int result;
 782
 783	/*
 784	 * Note that isr_count is always 1, and highest_isr_cache
 785	 * is always -1, with APIC virtualization enabled.
 786	 */
 787	if (!apic->isr_count)
 788		return -1;
 789	if (likely(apic->highest_isr_cache != -1))
 790		return apic->highest_isr_cache;
 791
 792	result = find_highest_vector(apic->regs + APIC_ISR);
 793	ASSERT(result == -1 || result >= 16);
 794
 795	return result;
 796}
 797
 798static inline void apic_clear_isr(int vec, struct kvm_lapic *apic)
 799{
 
 800	if (!__apic_test_and_clear_vector(vec, apic->regs + APIC_ISR))
 801		return;
 802
 
 
 803	/*
 804	 * We do get here for APIC virtualization enabled if the guest
 805	 * uses the Hyper-V APIC enlightenment.  In this case we may need
 806	 * to trigger a new interrupt delivery by writing the SVI field;
 807	 * on the other hand isr_count and highest_isr_cache are unused
 808	 * and must be left alone.
 809	 */
 810	if (unlikely(apic->apicv_active))
 811		kvm_x86_call(hwapic_isr_update)(apic->vcpu, apic_find_highest_isr(apic));
 
 812	else {
 813		--apic->isr_count;
 814		BUG_ON(apic->isr_count < 0);
 815		apic->highest_isr_cache = -1;
 816	}
 817}
 818
 819void kvm_apic_update_hwapic_isr(struct kvm_vcpu *vcpu)
 820{
 821	struct kvm_lapic *apic = vcpu->arch.apic;
 822
 823	if (WARN_ON_ONCE(!lapic_in_kernel(vcpu)) || !apic->apicv_active)
 824		return;
 825
 826	kvm_x86_call(hwapic_isr_update)(vcpu, apic_find_highest_isr(apic));
 827}
 828EXPORT_SYMBOL_GPL(kvm_apic_update_hwapic_isr);
 829
 830int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu)
 831{
 832	/* This may race with setting of irr in __apic_accept_irq() and
 833	 * value returned may be wrong, but kvm_vcpu_kick() in __apic_accept_irq
 834	 * will cause vmexit immediately and the value will be recalculated
 835	 * on the next vmentry.
 836	 */
 837	return apic_find_highest_irr(vcpu->arch.apic);
 838}
 839EXPORT_SYMBOL_GPL(kvm_lapic_find_highest_irr);
 840
 841static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
 842			     int vector, int level, int trig_mode,
 843			     struct dest_map *dest_map);
 844
 845int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq,
 846		     struct dest_map *dest_map)
 847{
 848	struct kvm_lapic *apic = vcpu->arch.apic;
 849
 850	return __apic_accept_irq(apic, irq->delivery_mode, irq->vector,
 851			irq->level, irq->trig_mode, dest_map);
 852}
 853
 854static int __pv_send_ipi(unsigned long *ipi_bitmap, struct kvm_apic_map *map,
 855			 struct kvm_lapic_irq *irq, u32 min)
 856{
 857	int i, count = 0;
 858	struct kvm_vcpu *vcpu;
 859
 860	if (min > map->max_apic_id)
 861		return 0;
 862
 863	for_each_set_bit(i, ipi_bitmap,
 864		min((u32)BITS_PER_LONG, (map->max_apic_id - min + 1))) {
 865		if (map->phys_map[min + i]) {
 866			vcpu = map->phys_map[min + i]->vcpu;
 867			count += kvm_apic_set_irq(vcpu, irq, NULL);
 868		}
 869	}
 870
 871	return count;
 872}
 873
 874int kvm_pv_send_ipi(struct kvm *kvm, unsigned long ipi_bitmap_low,
 875		    unsigned long ipi_bitmap_high, u32 min,
 876		    unsigned long icr, int op_64_bit)
 877{
 878	struct kvm_apic_map *map;
 879	struct kvm_lapic_irq irq = {0};
 880	int cluster_size = op_64_bit ? 64 : 32;
 881	int count;
 882
 883	if (icr & (APIC_DEST_MASK | APIC_SHORT_MASK))
 884		return -KVM_EINVAL;
 885
 886	irq.vector = icr & APIC_VECTOR_MASK;
 887	irq.delivery_mode = icr & APIC_MODE_MASK;
 888	irq.level = (icr & APIC_INT_ASSERT) != 0;
 889	irq.trig_mode = icr & APIC_INT_LEVELTRIG;
 890
 891	rcu_read_lock();
 892	map = rcu_dereference(kvm->arch.apic_map);
 893
 894	count = -EOPNOTSUPP;
 895	if (likely(map)) {
 896		count = __pv_send_ipi(&ipi_bitmap_low, map, &irq, min);
 897		min += cluster_size;
 898		count += __pv_send_ipi(&ipi_bitmap_high, map, &irq, min);
 899	}
 900
 901	rcu_read_unlock();
 902	return count;
 903}
 904
 905static int pv_eoi_put_user(struct kvm_vcpu *vcpu, u8 val)
 906{
 907
 908	return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data, &val,
 909				      sizeof(val));
 910}
 911
 912static int pv_eoi_get_user(struct kvm_vcpu *vcpu, u8 *val)
 913{
 914
 915	return kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data, val,
 916				      sizeof(*val));
 917}
 918
 919static inline bool pv_eoi_enabled(struct kvm_vcpu *vcpu)
 920{
 921	return vcpu->arch.pv_eoi.msr_val & KVM_MSR_ENABLED;
 922}
 923
 
 
 
 
 
 
 
 
 
 
 
 924static void pv_eoi_set_pending(struct kvm_vcpu *vcpu)
 925{
 926	if (pv_eoi_put_user(vcpu, KVM_PV_EOI_ENABLED) < 0)
 
 
 927		return;
 928
 929	__set_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention);
 930}
 931
 932static bool pv_eoi_test_and_clr_pending(struct kvm_vcpu *vcpu)
 933{
 934	u8 val;
 935
 936	if (pv_eoi_get_user(vcpu, &val) < 0)
 937		return false;
 938
 939	val &= KVM_PV_EOI_ENABLED;
 940
 941	if (val && pv_eoi_put_user(vcpu, KVM_PV_EOI_DISABLED) < 0)
 942		return false;
 943
 944	/*
 945	 * Clear pending bit in any case: it will be set again on vmentry.
 946	 * While this might not be ideal from performance point of view,
 947	 * this makes sure pv eoi is only enabled when we know it's safe.
 948	 */
 949	__clear_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention);
 950
 951	return val;
 952}
 953
 954static int apic_has_interrupt_for_ppr(struct kvm_lapic *apic, u32 ppr)
 955{
 956	int highest_irr;
 957	if (kvm_x86_ops.sync_pir_to_irr)
 958		highest_irr = kvm_x86_call(sync_pir_to_irr)(apic->vcpu);
 959	else
 960		highest_irr = apic_find_highest_irr(apic);
 961	if (highest_irr == -1 || (highest_irr & 0xF0) <= ppr)
 962		return -1;
 963	return highest_irr;
 964}
 965
 966static bool __apic_update_ppr(struct kvm_lapic *apic, u32 *new_ppr)
 967{
 968	u32 tpr, isrv, ppr, old_ppr;
 969	int isr;
 970
 971	old_ppr = kvm_lapic_get_reg(apic, APIC_PROCPRI);
 972	tpr = kvm_lapic_get_reg(apic, APIC_TASKPRI);
 973	isr = apic_find_highest_isr(apic);
 974	isrv = (isr != -1) ? isr : 0;
 975
 976	if ((tpr & 0xf0) >= (isrv & 0xf0))
 977		ppr = tpr & 0xff;
 978	else
 979		ppr = isrv & 0xf0;
 980
 981	*new_ppr = ppr;
 982	if (old_ppr != ppr)
 983		kvm_lapic_set_reg(apic, APIC_PROCPRI, ppr);
 984
 985	return ppr < old_ppr;
 986}
 987
 988static void apic_update_ppr(struct kvm_lapic *apic)
 989{
 990	u32 ppr;
 991
 992	if (__apic_update_ppr(apic, &ppr) &&
 993	    apic_has_interrupt_for_ppr(apic, ppr) != -1)
 994		kvm_make_request(KVM_REQ_EVENT, apic->vcpu);
 995}
 996
 997void kvm_apic_update_ppr(struct kvm_vcpu *vcpu)
 998{
 999	apic_update_ppr(vcpu->arch.apic);
1000}
1001EXPORT_SYMBOL_GPL(kvm_apic_update_ppr);
1002
1003static void apic_set_tpr(struct kvm_lapic *apic, u32 tpr)
1004{
1005	kvm_lapic_set_reg(apic, APIC_TASKPRI, tpr);
1006	apic_update_ppr(apic);
1007}
1008
1009static bool kvm_apic_broadcast(struct kvm_lapic *apic, u32 mda)
1010{
1011	return mda == (apic_x2apic_mode(apic) ?
1012			X2APIC_BROADCAST : APIC_BROADCAST);
1013}
1014
1015static bool kvm_apic_match_physical_addr(struct kvm_lapic *apic, u32 mda)
1016{
1017	if (kvm_apic_broadcast(apic, mda))
1018		return true;
1019
 
 
 
1020	/*
1021	 * Hotplug hack: Accept interrupts for vCPUs in xAPIC mode as if they
1022	 * were in x2APIC mode if the target APIC ID can't be encoded as an
1023	 * xAPIC ID.  This allows unique addressing of hotplugged vCPUs (which
1024	 * start in xAPIC mode) with an APIC ID that is unaddressable in xAPIC
1025	 * mode.  Match the x2APIC ID if and only if the target APIC ID can't
1026	 * be encoded in xAPIC to avoid spurious matches against a vCPU that
1027	 * changed its (addressable) xAPIC ID (which is writable).
1028	 */
1029	if (apic_x2apic_mode(apic) || mda > 0xff)
1030		return mda == kvm_x2apic_id(apic);
1031
1032	return mda == kvm_xapic_id(apic);
1033}
1034
1035static bool kvm_apic_match_logical_addr(struct kvm_lapic *apic, u32 mda)
1036{
1037	u32 logical_id;
1038
1039	if (kvm_apic_broadcast(apic, mda))
1040		return true;
1041
1042	logical_id = kvm_lapic_get_reg(apic, APIC_LDR);
1043
1044	if (apic_x2apic_mode(apic))
1045		return ((logical_id >> 16) == (mda >> 16))
1046		       && (logical_id & mda & 0xffff) != 0;
1047
1048	logical_id = GET_APIC_LOGICAL_ID(logical_id);
1049
1050	switch (kvm_lapic_get_reg(apic, APIC_DFR)) {
1051	case APIC_DFR_FLAT:
1052		return (logical_id & mda) != 0;
1053	case APIC_DFR_CLUSTER:
1054		return ((logical_id >> 4) == (mda >> 4))
1055		       && (logical_id & mda & 0xf) != 0;
1056	default:
1057		return false;
1058	}
1059}
1060
1061/* The KVM local APIC implementation has two quirks:
1062 *
1063 *  - Real hardware delivers interrupts destined to x2APIC ID > 0xff to LAPICs
1064 *    in xAPIC mode if the "destination & 0xff" matches its xAPIC ID.
1065 *    KVM doesn't do that aliasing.
1066 *
1067 *  - in-kernel IOAPIC messages have to be delivered directly to
1068 *    x2APIC, because the kernel does not support interrupt remapping.
1069 *    In order to support broadcast without interrupt remapping, x2APIC
1070 *    rewrites the destination of non-IPI messages from APIC_BROADCAST
1071 *    to X2APIC_BROADCAST.
1072 *
1073 * The broadcast quirk can be disabled with KVM_CAP_X2APIC_API.  This is
1074 * important when userspace wants to use x2APIC-format MSIs, because
1075 * APIC_BROADCAST (0xff) is a legal route for "cluster 0, CPUs 0-7".
1076 */
1077static u32 kvm_apic_mda(struct kvm_vcpu *vcpu, unsigned int dest_id,
1078		struct kvm_lapic *source, struct kvm_lapic *target)
1079{
1080	bool ipi = source != NULL;
1081
1082	if (!vcpu->kvm->arch.x2apic_broadcast_quirk_disabled &&
1083	    !ipi && dest_id == APIC_BROADCAST && apic_x2apic_mode(target))
1084		return X2APIC_BROADCAST;
1085
1086	return dest_id;
1087}
1088
1089bool kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source,
1090			   int shorthand, unsigned int dest, int dest_mode)
1091{
1092	struct kvm_lapic *target = vcpu->arch.apic;
1093	u32 mda = kvm_apic_mda(vcpu, dest, source, target);
1094
1095	ASSERT(target);
1096	switch (shorthand) {
1097	case APIC_DEST_NOSHORT:
1098		if (dest_mode == APIC_DEST_PHYSICAL)
1099			return kvm_apic_match_physical_addr(target, mda);
1100		else
1101			return kvm_apic_match_logical_addr(target, mda);
1102	case APIC_DEST_SELF:
1103		return target == source;
1104	case APIC_DEST_ALLINC:
1105		return true;
1106	case APIC_DEST_ALLBUT:
1107		return target != source;
1108	default:
1109		return false;
1110	}
1111}
1112EXPORT_SYMBOL_GPL(kvm_apic_match_dest);
1113
1114int kvm_vector_to_index(u32 vector, u32 dest_vcpus,
1115		       const unsigned long *bitmap, u32 bitmap_size)
1116{
1117	u32 mod;
1118	int i, idx = -1;
1119
1120	mod = vector % dest_vcpus;
1121
1122	for (i = 0; i <= mod; i++) {
1123		idx = find_next_bit(bitmap, bitmap_size, idx + 1);
1124		BUG_ON(idx == bitmap_size);
1125	}
1126
1127	return idx;
1128}
1129
1130static void kvm_apic_disabled_lapic_found(struct kvm *kvm)
1131{
1132	if (!kvm->arch.disabled_lapic_found) {
1133		kvm->arch.disabled_lapic_found = true;
1134		pr_info("Disabled LAPIC found during irq injection\n");
 
1135	}
1136}
1137
1138static bool kvm_apic_is_broadcast_dest(struct kvm *kvm, struct kvm_lapic **src,
1139		struct kvm_lapic_irq *irq, struct kvm_apic_map *map)
1140{
1141	if (kvm->arch.x2apic_broadcast_quirk_disabled) {
1142		if ((irq->dest_id == APIC_BROADCAST &&
1143		     map->logical_mode != KVM_APIC_MODE_X2APIC))
1144			return true;
1145		if (irq->dest_id == X2APIC_BROADCAST)
1146			return true;
1147	} else {
1148		bool x2apic_ipi = src && *src && apic_x2apic_mode(*src);
1149		if (irq->dest_id == (x2apic_ipi ?
1150		                     X2APIC_BROADCAST : APIC_BROADCAST))
1151			return true;
1152	}
1153
1154	return false;
1155}
1156
1157/* Return true if the interrupt can be handled by using *bitmap as index mask
1158 * for valid destinations in *dst array.
1159 * Return false if kvm_apic_map_get_dest_lapic did nothing useful.
1160 * Note: we may have zero kvm_lapic destinations when we return true, which
1161 * means that the interrupt should be dropped.  In this case, *bitmap would be
1162 * zero and *dst undefined.
1163 */
1164static inline bool kvm_apic_map_get_dest_lapic(struct kvm *kvm,
1165		struct kvm_lapic **src, struct kvm_lapic_irq *irq,
1166		struct kvm_apic_map *map, struct kvm_lapic ***dst,
1167		unsigned long *bitmap)
1168{
1169	int i, lowest;
1170
1171	if (irq->shorthand == APIC_DEST_SELF && src) {
1172		*dst = src;
1173		*bitmap = 1;
1174		return true;
1175	} else if (irq->shorthand)
1176		return false;
1177
1178	if (!map || kvm_apic_is_broadcast_dest(kvm, src, irq, map))
1179		return false;
1180
1181	if (irq->dest_mode == APIC_DEST_PHYSICAL) {
1182		if (irq->dest_id > map->max_apic_id) {
1183			*bitmap = 0;
1184		} else {
1185			u32 dest_id = array_index_nospec(irq->dest_id, map->max_apic_id + 1);
1186			*dst = &map->phys_map[dest_id];
1187			*bitmap = 1;
1188		}
1189		return true;
1190	}
1191
1192	*bitmap = 0;
1193	if (!kvm_apic_map_get_logical_dest(map, irq->dest_id, dst,
1194				(u16 *)bitmap))
1195		return false;
1196
1197	if (!kvm_lowest_prio_delivery(irq))
1198		return true;
1199
1200	if (!kvm_vector_hashing_enabled()) {
1201		lowest = -1;
1202		for_each_set_bit(i, bitmap, 16) {
1203			if (!(*dst)[i])
1204				continue;
1205			if (lowest < 0)
1206				lowest = i;
1207			else if (kvm_apic_compare_prio((*dst)[i]->vcpu,
1208						(*dst)[lowest]->vcpu) < 0)
1209				lowest = i;
1210		}
1211	} else {
1212		if (!*bitmap)
1213			return true;
1214
1215		lowest = kvm_vector_to_index(irq->vector, hweight16(*bitmap),
1216				bitmap, 16);
1217
1218		if (!(*dst)[lowest]) {
1219			kvm_apic_disabled_lapic_found(kvm);
1220			*bitmap = 0;
1221			return true;
1222		}
1223	}
1224
1225	*bitmap = (lowest >= 0) ? 1 << lowest : 0;
1226
1227	return true;
1228}
1229
1230bool kvm_irq_delivery_to_apic_fast(struct kvm *kvm, struct kvm_lapic *src,
1231		struct kvm_lapic_irq *irq, int *r, struct dest_map *dest_map)
1232{
1233	struct kvm_apic_map *map;
1234	unsigned long bitmap;
1235	struct kvm_lapic **dst = NULL;
1236	int i;
1237	bool ret;
1238
1239	*r = -1;
1240
1241	if (irq->shorthand == APIC_DEST_SELF) {
1242		if (KVM_BUG_ON(!src, kvm)) {
1243			*r = 0;
1244			return true;
1245		}
1246		*r = kvm_apic_set_irq(src->vcpu, irq, dest_map);
1247		return true;
1248	}
1249
1250	rcu_read_lock();
1251	map = rcu_dereference(kvm->arch.apic_map);
1252
1253	ret = kvm_apic_map_get_dest_lapic(kvm, &src, irq, map, &dst, &bitmap);
1254	if (ret) {
1255		*r = 0;
1256		for_each_set_bit(i, &bitmap, 16) {
1257			if (!dst[i])
1258				continue;
1259			*r += kvm_apic_set_irq(dst[i]->vcpu, irq, dest_map);
1260		}
1261	}
1262
1263	rcu_read_unlock();
1264	return ret;
1265}
1266
1267/*
1268 * This routine tries to handle interrupts in posted mode, here is how
1269 * it deals with different cases:
1270 * - For single-destination interrupts, handle it in posted mode
1271 * - Else if vector hashing is enabled and it is a lowest-priority
1272 *   interrupt, handle it in posted mode and use the following mechanism
1273 *   to find the destination vCPU.
1274 *	1. For lowest-priority interrupts, store all the possible
1275 *	   destination vCPUs in an array.
1276 *	2. Use "guest vector % max number of destination vCPUs" to find
1277 *	   the right destination vCPU in the array for the lowest-priority
1278 *	   interrupt.
1279 * - Otherwise, use remapped mode to inject the interrupt.
1280 */
1281bool kvm_intr_is_single_vcpu_fast(struct kvm *kvm, struct kvm_lapic_irq *irq,
1282			struct kvm_vcpu **dest_vcpu)
1283{
1284	struct kvm_apic_map *map;
1285	unsigned long bitmap;
1286	struct kvm_lapic **dst = NULL;
1287	bool ret = false;
1288
1289	if (irq->shorthand)
1290		return false;
1291
1292	rcu_read_lock();
1293	map = rcu_dereference(kvm->arch.apic_map);
1294
1295	if (kvm_apic_map_get_dest_lapic(kvm, NULL, irq, map, &dst, &bitmap) &&
1296			hweight16(bitmap) == 1) {
1297		unsigned long i = find_first_bit(&bitmap, 16);
1298
1299		if (dst[i]) {
1300			*dest_vcpu = dst[i]->vcpu;
1301			ret = true;
1302		}
1303	}
1304
1305	rcu_read_unlock();
1306	return ret;
1307}
1308
1309/*
1310 * Add a pending IRQ into lapic.
1311 * Return 1 if successfully added and 0 if discarded.
1312 */
1313static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
1314			     int vector, int level, int trig_mode,
1315			     struct dest_map *dest_map)
1316{
1317	int result = 0;
1318	struct kvm_vcpu *vcpu = apic->vcpu;
1319
1320	trace_kvm_apic_accept_irq(vcpu->vcpu_id, delivery_mode,
1321				  trig_mode, vector);
1322	switch (delivery_mode) {
1323	case APIC_DM_LOWEST:
1324		vcpu->arch.apic_arb_prio++;
1325		fallthrough;
1326	case APIC_DM_FIXED:
1327		if (unlikely(trig_mode && !level))
1328			break;
1329
1330		/* FIXME add logic for vcpu on reset */
1331		if (unlikely(!apic_enabled(apic)))
1332			break;
1333
1334		result = 1;
1335
1336		if (dest_map) {
1337			__set_bit(vcpu->vcpu_id, dest_map->map);
1338			dest_map->vectors[vcpu->vcpu_id] = vector;
1339		}
1340
1341		if (apic_test_vector(vector, apic->regs + APIC_TMR) != !!trig_mode) {
1342			if (trig_mode)
1343				kvm_lapic_set_vector(vector,
1344						     apic->regs + APIC_TMR);
1345			else
1346				kvm_lapic_clear_vector(vector,
1347						       apic->regs + APIC_TMR);
1348		}
1349
1350		kvm_x86_call(deliver_interrupt)(apic, delivery_mode,
1351						trig_mode, vector);
 
 
 
1352		break;
1353
1354	case APIC_DM_REMRD:
1355		result = 1;
1356		vcpu->arch.pv.pv_unhalted = 1;
1357		kvm_make_request(KVM_REQ_EVENT, vcpu);
1358		kvm_vcpu_kick(vcpu);
1359		break;
1360
1361	case APIC_DM_SMI:
1362		if (!kvm_inject_smi(vcpu)) {
1363			kvm_vcpu_kick(vcpu);
1364			result = 1;
1365		}
1366		break;
1367
1368	case APIC_DM_NMI:
1369		result = 1;
1370		kvm_inject_nmi(vcpu);
1371		kvm_vcpu_kick(vcpu);
1372		break;
1373
1374	case APIC_DM_INIT:
1375		if (!trig_mode || level) {
1376			result = 1;
1377			/* assumes that there are only KVM_APIC_INIT/SIPI */
1378			apic->pending_events = (1UL << KVM_APIC_INIT);
1379			kvm_make_request(KVM_REQ_EVENT, vcpu);
1380			kvm_vcpu_kick(vcpu);
1381		}
1382		break;
1383
1384	case APIC_DM_STARTUP:
1385		result = 1;
1386		apic->sipi_vector = vector;
1387		/* make sure sipi_vector is visible for the receiver */
1388		smp_wmb();
1389		set_bit(KVM_APIC_SIPI, &apic->pending_events);
1390		kvm_make_request(KVM_REQ_EVENT, vcpu);
1391		kvm_vcpu_kick(vcpu);
1392		break;
1393
1394	case APIC_DM_EXTINT:
1395		/*
1396		 * Should only be called by kvm_apic_local_deliver() with LVT0,
1397		 * before NMI watchdog was enabled. Already handled by
1398		 * kvm_apic_accept_pic_intr().
1399		 */
1400		break;
1401
1402	default:
1403		printk(KERN_ERR "TODO: unsupported delivery mode %x\n",
1404		       delivery_mode);
1405		break;
1406	}
1407	return result;
1408}
1409
1410/*
1411 * This routine identifies the destination vcpus mask meant to receive the
1412 * IOAPIC interrupts. It either uses kvm_apic_map_get_dest_lapic() to find
1413 * out the destination vcpus array and set the bitmap or it traverses to
1414 * each available vcpu to identify the same.
1415 */
1416void kvm_bitmap_or_dest_vcpus(struct kvm *kvm, struct kvm_lapic_irq *irq,
1417			      unsigned long *vcpu_bitmap)
1418{
1419	struct kvm_lapic **dest_vcpu = NULL;
1420	struct kvm_lapic *src = NULL;
1421	struct kvm_apic_map *map;
1422	struct kvm_vcpu *vcpu;
1423	unsigned long bitmap, i;
1424	int vcpu_idx;
1425	bool ret;
1426
1427	rcu_read_lock();
1428	map = rcu_dereference(kvm->arch.apic_map);
1429
1430	ret = kvm_apic_map_get_dest_lapic(kvm, &src, irq, map, &dest_vcpu,
1431					  &bitmap);
1432	if (ret) {
1433		for_each_set_bit(i, &bitmap, 16) {
1434			if (!dest_vcpu[i])
1435				continue;
1436			vcpu_idx = dest_vcpu[i]->vcpu->vcpu_idx;
1437			__set_bit(vcpu_idx, vcpu_bitmap);
1438		}
1439	} else {
1440		kvm_for_each_vcpu(i, vcpu, kvm) {
1441			if (!kvm_apic_present(vcpu))
1442				continue;
1443			if (!kvm_apic_match_dest(vcpu, NULL,
1444						 irq->shorthand,
1445						 irq->dest_id,
1446						 irq->dest_mode))
1447				continue;
1448			__set_bit(i, vcpu_bitmap);
1449		}
1450	}
1451	rcu_read_unlock();
1452}
1453
1454int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2)
1455{
1456	return vcpu1->arch.apic_arb_prio - vcpu2->arch.apic_arb_prio;
1457}
1458
1459static bool kvm_ioapic_handles_vector(struct kvm_lapic *apic, int vector)
1460{
1461	return test_bit(vector, apic->vcpu->arch.ioapic_handled_vectors);
1462}
1463
1464static void kvm_ioapic_send_eoi(struct kvm_lapic *apic, int vector)
1465{
1466	int trigger_mode;
1467
1468	/* Eoi the ioapic only if the ioapic doesn't own the vector. */
1469	if (!kvm_ioapic_handles_vector(apic, vector))
1470		return;
1471
1472	/* Request a KVM exit to inform the userspace IOAPIC. */
1473	if (irqchip_split(apic->vcpu->kvm)) {
1474		apic->vcpu->arch.pending_ioapic_eoi = vector;
1475		kvm_make_request(KVM_REQ_IOAPIC_EOI_EXIT, apic->vcpu);
1476		return;
1477	}
1478
1479	if (apic_test_vector(vector, apic->regs + APIC_TMR))
1480		trigger_mode = IOAPIC_LEVEL_TRIG;
1481	else
1482		trigger_mode = IOAPIC_EDGE_TRIG;
1483
1484	kvm_ioapic_update_eoi(apic->vcpu, vector, trigger_mode);
1485}
1486
1487static int apic_set_eoi(struct kvm_lapic *apic)
1488{
1489	int vector = apic_find_highest_isr(apic);
1490
1491	trace_kvm_eoi(apic, vector);
1492
1493	/*
1494	 * Not every write EOI will has corresponding ISR,
1495	 * one example is when Kernel check timer on setup_IO_APIC
1496	 */
1497	if (vector == -1)
1498		return vector;
1499
1500	apic_clear_isr(vector, apic);
1501	apic_update_ppr(apic);
1502
1503	if (kvm_hv_synic_has_vector(apic->vcpu, vector))
 
1504		kvm_hv_synic_send_eoi(apic->vcpu, vector);
1505
1506	kvm_ioapic_send_eoi(apic, vector);
1507	kvm_make_request(KVM_REQ_EVENT, apic->vcpu);
1508	return vector;
1509}
1510
1511/*
1512 * this interface assumes a trap-like exit, which has already finished
1513 * desired side effect including vISR and vPPR update.
1514 */
1515void kvm_apic_set_eoi_accelerated(struct kvm_vcpu *vcpu, int vector)
1516{
1517	struct kvm_lapic *apic = vcpu->arch.apic;
1518
1519	trace_kvm_eoi(apic, vector);
1520
1521	kvm_ioapic_send_eoi(apic, vector);
1522	kvm_make_request(KVM_REQ_EVENT, apic->vcpu);
1523}
1524EXPORT_SYMBOL_GPL(kvm_apic_set_eoi_accelerated);
1525
1526void kvm_apic_send_ipi(struct kvm_lapic *apic, u32 icr_low, u32 icr_high)
1527{
1528	struct kvm_lapic_irq irq;
1529
1530	/* KVM has no delay and should always clear the BUSY/PENDING flag. */
1531	WARN_ON_ONCE(icr_low & APIC_ICR_BUSY);
1532
1533	irq.vector = icr_low & APIC_VECTOR_MASK;
1534	irq.delivery_mode = icr_low & APIC_MODE_MASK;
1535	irq.dest_mode = icr_low & APIC_DEST_MASK;
1536	irq.level = (icr_low & APIC_INT_ASSERT) != 0;
1537	irq.trig_mode = icr_low & APIC_INT_LEVELTRIG;
1538	irq.shorthand = icr_low & APIC_SHORT_MASK;
1539	irq.msi_redir_hint = false;
1540	if (apic_x2apic_mode(apic))
1541		irq.dest_id = icr_high;
1542	else
1543		irq.dest_id = GET_XAPIC_DEST_FIELD(icr_high);
1544
1545	trace_kvm_apic_ipi(icr_low, irq.dest_id);
1546
1547	kvm_irq_delivery_to_apic(apic->vcpu->kvm, apic, &irq, NULL);
1548}
1549EXPORT_SYMBOL_GPL(kvm_apic_send_ipi);
1550
1551static u32 apic_get_tmcct(struct kvm_lapic *apic)
1552{
1553	ktime_t remaining, now;
1554	s64 ns;
 
1555
1556	ASSERT(apic != NULL);
1557
1558	/* if initial count is 0, current count should also be 0 */
1559	if (kvm_lapic_get_reg(apic, APIC_TMICT) == 0 ||
1560		apic->lapic_timer.period == 0)
1561		return 0;
1562
1563	now = ktime_get();
1564	remaining = ktime_sub(apic->lapic_timer.target_expiration, now);
1565	if (ktime_to_ns(remaining) < 0)
1566		remaining = 0;
1567
1568	ns = mod_64(ktime_to_ns(remaining), apic->lapic_timer.period);
1569	return div64_u64(ns, (apic->vcpu->kvm->arch.apic_bus_cycle_ns *
1570			      apic->divide_count));
 
 
1571}
1572
1573static void __report_tpr_access(struct kvm_lapic *apic, bool write)
1574{
1575	struct kvm_vcpu *vcpu = apic->vcpu;
1576	struct kvm_run *run = vcpu->run;
1577
1578	kvm_make_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu);
1579	run->tpr_access.rip = kvm_rip_read(vcpu);
1580	run->tpr_access.is_write = write;
1581}
1582
1583static inline void report_tpr_access(struct kvm_lapic *apic, bool write)
1584{
1585	if (apic->vcpu->arch.tpr_access_reporting)
1586		__report_tpr_access(apic, write);
1587}
1588
1589static u32 __apic_read(struct kvm_lapic *apic, unsigned int offset)
1590{
1591	u32 val = 0;
1592
1593	if (offset >= LAPIC_MMIO_LENGTH)
1594		return 0;
1595
1596	switch (offset) {
1597	case APIC_ARBPRI:
1598		break;
1599
1600	case APIC_TMCCT:	/* Timer CCR */
1601		if (apic_lvtt_tscdeadline(apic))
1602			return 0;
1603
1604		val = apic_get_tmcct(apic);
1605		break;
1606	case APIC_PROCPRI:
1607		apic_update_ppr(apic);
1608		val = kvm_lapic_get_reg(apic, offset);
1609		break;
1610	case APIC_TASKPRI:
1611		report_tpr_access(apic, false);
1612		fallthrough;
1613	default:
1614		val = kvm_lapic_get_reg(apic, offset);
1615		break;
1616	}
1617
1618	return val;
1619}
1620
1621static inline struct kvm_lapic *to_lapic(struct kvm_io_device *dev)
1622{
1623	return container_of(dev, struct kvm_lapic, dev);
1624}
1625
1626#define APIC_REG_MASK(reg)	(1ull << ((reg) >> 4))
1627#define APIC_REGS_MASK(first, count) \
1628	(APIC_REG_MASK(first) * ((1ull << (count)) - 1))
1629
1630u64 kvm_lapic_readable_reg_mask(struct kvm_lapic *apic)
 
1631{
1632	/* Leave bits '0' for reserved and write-only registers. */
 
 
1633	u64 valid_reg_mask =
1634		APIC_REG_MASK(APIC_ID) |
1635		APIC_REG_MASK(APIC_LVR) |
1636		APIC_REG_MASK(APIC_TASKPRI) |
1637		APIC_REG_MASK(APIC_PROCPRI) |
1638		APIC_REG_MASK(APIC_LDR) |
 
1639		APIC_REG_MASK(APIC_SPIV) |
1640		APIC_REGS_MASK(APIC_ISR, APIC_ISR_NR) |
1641		APIC_REGS_MASK(APIC_TMR, APIC_ISR_NR) |
1642		APIC_REGS_MASK(APIC_IRR, APIC_ISR_NR) |
1643		APIC_REG_MASK(APIC_ESR) |
1644		APIC_REG_MASK(APIC_ICR) |
 
1645		APIC_REG_MASK(APIC_LVTT) |
1646		APIC_REG_MASK(APIC_LVTTHMR) |
1647		APIC_REG_MASK(APIC_LVTPC) |
1648		APIC_REG_MASK(APIC_LVT0) |
1649		APIC_REG_MASK(APIC_LVT1) |
1650		APIC_REG_MASK(APIC_LVTERR) |
1651		APIC_REG_MASK(APIC_TMICT) |
1652		APIC_REG_MASK(APIC_TMCCT) |
1653		APIC_REG_MASK(APIC_TDCR);
1654
1655	if (kvm_lapic_lvt_supported(apic, LVT_CMCI))
1656		valid_reg_mask |= APIC_REG_MASK(APIC_LVTCMCI);
1657
1658	/* ARBPRI, DFR, and ICR2 are not valid in x2APIC mode. */
1659	if (!apic_x2apic_mode(apic))
1660		valid_reg_mask |= APIC_REG_MASK(APIC_ARBPRI) |
1661				  APIC_REG_MASK(APIC_DFR) |
1662				  APIC_REG_MASK(APIC_ICR2);
1663
1664	return valid_reg_mask;
1665}
1666EXPORT_SYMBOL_GPL(kvm_lapic_readable_reg_mask);
1667
1668static int kvm_lapic_reg_read(struct kvm_lapic *apic, u32 offset, int len,
1669			      void *data)
1670{
1671	unsigned char alignment = offset & 0xf;
1672	u32 result;
1673
1674	/*
1675	 * WARN if KVM reads ICR in x2APIC mode, as it's an 8-byte register in
1676	 * x2APIC and needs to be manually handled by the caller.
1677	 */
1678	WARN_ON_ONCE(apic_x2apic_mode(apic) && offset == APIC_ICR);
1679
1680	if (alignment + len > 4)
1681		return 1;
1682
1683	if (offset > 0x3f0 ||
1684	    !(kvm_lapic_readable_reg_mask(apic) & APIC_REG_MASK(offset)))
1685		return 1;
1686
1687	result = __apic_read(apic, offset & ~0xf);
1688
1689	trace_kvm_apic_read(offset, result);
1690
1691	switch (len) {
1692	case 1:
1693	case 2:
1694	case 4:
1695		memcpy(data, (char *)&result + alignment, len);
1696		break;
1697	default:
1698		printk(KERN_ERR "Local APIC read with len = %x, "
1699		       "should be 1,2, or 4 instead\n", len);
1700		break;
1701	}
1702	return 0;
1703}
 
1704
1705static int apic_mmio_in_range(struct kvm_lapic *apic, gpa_t addr)
1706{
1707	return addr >= apic->base_address &&
1708		addr < apic->base_address + LAPIC_MMIO_LENGTH;
1709}
1710
1711static int apic_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *this,
1712			   gpa_t address, int len, void *data)
1713{
1714	struct kvm_lapic *apic = to_lapic(this);
1715	u32 offset = address - apic->base_address;
1716
1717	if (!apic_mmio_in_range(apic, address))
1718		return -EOPNOTSUPP;
1719
1720	if (!kvm_apic_hw_enabled(apic) || apic_x2apic_mode(apic)) {
1721		if (!kvm_check_has_quirk(vcpu->kvm,
1722					 KVM_X86_QUIRK_LAPIC_MMIO_HOLE))
1723			return -EOPNOTSUPP;
1724
1725		memset(data, 0xff, len);
1726		return 0;
1727	}
1728
1729	kvm_lapic_reg_read(apic, offset, len, data);
1730
1731	return 0;
1732}
1733
1734static void update_divide_count(struct kvm_lapic *apic)
1735{
1736	u32 tmp1, tmp2, tdcr;
1737
1738	tdcr = kvm_lapic_get_reg(apic, APIC_TDCR);
1739	tmp1 = tdcr & 0xf;
1740	tmp2 = ((tmp1 & 0x3) | ((tmp1 & 0x8) >> 1)) + 1;
1741	apic->divide_count = 0x1 << (tmp2 & 0x7);
1742}
1743
1744static void limit_periodic_timer_frequency(struct kvm_lapic *apic)
1745{
1746	/*
1747	 * Do not allow the guest to program periodic timers with small
1748	 * interval, since the hrtimers are not throttled by the host
1749	 * scheduler.
1750	 */
1751	if (apic_lvtt_period(apic) && apic->lapic_timer.period) {
1752		s64 min_period = min_timer_period_us * 1000LL;
1753
1754		if (apic->lapic_timer.period < min_period) {
1755			pr_info_once(
1756			    "vcpu %i: requested %lld ns "
1757			    "lapic timer period limited to %lld ns\n",
1758			    apic->vcpu->vcpu_id,
1759			    apic->lapic_timer.period, min_period);
1760			apic->lapic_timer.period = min_period;
1761		}
1762	}
1763}
1764
1765static void cancel_hv_timer(struct kvm_lapic *apic);
1766
1767static void cancel_apic_timer(struct kvm_lapic *apic)
1768{
1769	hrtimer_cancel(&apic->lapic_timer.timer);
1770	preempt_disable();
1771	if (apic->lapic_timer.hv_timer_in_use)
1772		cancel_hv_timer(apic);
1773	preempt_enable();
1774	atomic_set(&apic->lapic_timer.pending, 0);
1775}
1776
1777static void apic_update_lvtt(struct kvm_lapic *apic)
1778{
1779	u32 timer_mode = kvm_lapic_get_reg(apic, APIC_LVTT) &
1780			apic->lapic_timer.timer_mode_mask;
1781
1782	if (apic->lapic_timer.timer_mode != timer_mode) {
1783		if (apic_lvtt_tscdeadline(apic) != (timer_mode ==
1784				APIC_LVT_TIMER_TSCDEADLINE)) {
1785			cancel_apic_timer(apic);
1786			kvm_lapic_set_reg(apic, APIC_TMICT, 0);
1787			apic->lapic_timer.period = 0;
1788			apic->lapic_timer.tscdeadline = 0;
1789		}
1790		apic->lapic_timer.timer_mode = timer_mode;
1791		limit_periodic_timer_frequency(apic);
1792	}
1793}
1794
1795/*
1796 * On APICv, this test will cause a busy wait
1797 * during a higher-priority task.
1798 */
1799
1800static bool lapic_timer_int_injected(struct kvm_vcpu *vcpu)
1801{
1802	struct kvm_lapic *apic = vcpu->arch.apic;
1803	u32 reg = kvm_lapic_get_reg(apic, APIC_LVTT);
1804
1805	if (kvm_apic_hw_enabled(apic)) {
1806		int vec = reg & APIC_VECTOR_MASK;
1807		void *bitmap = apic->regs + APIC_ISR;
1808
1809		if (apic->apicv_active)
1810			bitmap = apic->regs + APIC_IRR;
1811
1812		if (apic_test_vector(vec, bitmap))
1813			return true;
1814	}
1815	return false;
1816}
1817
1818static inline void __wait_lapic_expire(struct kvm_vcpu *vcpu, u64 guest_cycles)
1819{
1820	u64 timer_advance_ns = vcpu->arch.apic->lapic_timer.timer_advance_ns;
1821
1822	/*
1823	 * If the guest TSC is running at a different ratio than the host, then
1824	 * convert the delay to nanoseconds to achieve an accurate delay.  Note
1825	 * that __delay() uses delay_tsc whenever the hardware has TSC, thus
1826	 * always for VMX enabled hardware.
1827	 */
1828	if (vcpu->arch.tsc_scaling_ratio == kvm_caps.default_tsc_scaling_ratio) {
1829		__delay(min(guest_cycles,
1830			nsec_to_cycles(vcpu, timer_advance_ns)));
1831	} else {
1832		u64 delay_ns = guest_cycles * 1000000ULL;
1833		do_div(delay_ns, vcpu->arch.virtual_tsc_khz);
1834		ndelay(min_t(u32, delay_ns, timer_advance_ns));
1835	}
1836}
1837
1838static inline void adjust_lapic_timer_advance(struct kvm_vcpu *vcpu,
1839					      s64 advance_expire_delta)
1840{
1841	struct kvm_lapic *apic = vcpu->arch.apic;
1842	u32 timer_advance_ns = apic->lapic_timer.timer_advance_ns;
1843	u64 ns;
1844
1845	/* Do not adjust for tiny fluctuations or large random spikes. */
1846	if (abs(advance_expire_delta) > LAPIC_TIMER_ADVANCE_ADJUST_MAX ||
1847	    abs(advance_expire_delta) < LAPIC_TIMER_ADVANCE_ADJUST_MIN)
1848		return;
1849
1850	/* too early */
1851	if (advance_expire_delta < 0) {
1852		ns = -advance_expire_delta * 1000000ULL;
1853		do_div(ns, vcpu->arch.virtual_tsc_khz);
1854		timer_advance_ns -= ns/LAPIC_TIMER_ADVANCE_ADJUST_STEP;
1855	} else {
1856	/* too late */
1857		ns = advance_expire_delta * 1000000ULL;
1858		do_div(ns, vcpu->arch.virtual_tsc_khz);
1859		timer_advance_ns += ns/LAPIC_TIMER_ADVANCE_ADJUST_STEP;
1860	}
1861
1862	if (unlikely(timer_advance_ns > LAPIC_TIMER_ADVANCE_NS_MAX))
1863		timer_advance_ns = LAPIC_TIMER_ADVANCE_NS_INIT;
1864	apic->lapic_timer.timer_advance_ns = timer_advance_ns;
1865}
1866
1867static void __kvm_wait_lapic_expire(struct kvm_vcpu *vcpu)
1868{
1869	struct kvm_lapic *apic = vcpu->arch.apic;
1870	u64 guest_tsc, tsc_deadline;
1871
1872	tsc_deadline = apic->lapic_timer.expired_tscdeadline;
1873	apic->lapic_timer.expired_tscdeadline = 0;
1874	guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc());
1875	trace_kvm_wait_lapic_expire(vcpu->vcpu_id, guest_tsc - tsc_deadline);
1876
1877	adjust_lapic_timer_advance(vcpu, guest_tsc - tsc_deadline);
1878
1879	/*
1880	 * If the timer fired early, reread the TSC to account for the overhead
1881	 * of the above adjustment to avoid waiting longer than is necessary.
1882	 */
1883	if (guest_tsc < tsc_deadline)
1884		guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc());
 
 
 
 
1885
1886	if (guest_tsc < tsc_deadline)
1887		__wait_lapic_expire(vcpu, tsc_deadline - guest_tsc);
1888}
1889
1890void kvm_wait_lapic_expire(struct kvm_vcpu *vcpu)
1891{
1892	if (lapic_in_kernel(vcpu) &&
1893	    vcpu->arch.apic->lapic_timer.expired_tscdeadline &&
1894	    vcpu->arch.apic->lapic_timer.timer_advance_ns &&
1895	    lapic_timer_int_injected(vcpu))
1896		__kvm_wait_lapic_expire(vcpu);
1897}
1898EXPORT_SYMBOL_GPL(kvm_wait_lapic_expire);
1899
1900static void kvm_apic_inject_pending_timer_irqs(struct kvm_lapic *apic)
1901{
1902	struct kvm_timer *ktimer = &apic->lapic_timer;
1903
1904	kvm_apic_local_deliver(apic, APIC_LVTT);
1905	if (apic_lvtt_tscdeadline(apic)) {
1906		ktimer->tscdeadline = 0;
1907	} else if (apic_lvtt_oneshot(apic)) {
1908		ktimer->tscdeadline = 0;
1909		ktimer->target_expiration = 0;
1910	}
1911}
1912
1913static void apic_timer_expired(struct kvm_lapic *apic, bool from_timer_fn)
1914{
1915	struct kvm_vcpu *vcpu = apic->vcpu;
1916	struct kvm_timer *ktimer = &apic->lapic_timer;
1917
1918	if (atomic_read(&apic->lapic_timer.pending))
1919		return;
1920
1921	if (apic_lvtt_tscdeadline(apic) || ktimer->hv_timer_in_use)
1922		ktimer->expired_tscdeadline = ktimer->tscdeadline;
1923
1924	if (!from_timer_fn && apic->apicv_active) {
1925		WARN_ON(kvm_get_running_vcpu() != vcpu);
1926		kvm_apic_inject_pending_timer_irqs(apic);
1927		return;
1928	}
1929
1930	if (kvm_use_posted_timer_interrupt(apic->vcpu)) {
1931		/*
1932		 * Ensure the guest's timer has truly expired before posting an
1933		 * interrupt.  Open code the relevant checks to avoid querying
1934		 * lapic_timer_int_injected(), which will be false since the
1935		 * interrupt isn't yet injected.  Waiting until after injecting
1936		 * is not an option since that won't help a posted interrupt.
1937		 */
1938		if (vcpu->arch.apic->lapic_timer.expired_tscdeadline &&
1939		    vcpu->arch.apic->lapic_timer.timer_advance_ns)
1940			__kvm_wait_lapic_expire(vcpu);
1941		kvm_apic_inject_pending_timer_irqs(apic);
1942		return;
1943	}
1944
1945	atomic_inc(&apic->lapic_timer.pending);
1946	kvm_make_request(KVM_REQ_UNBLOCK, vcpu);
1947	if (from_timer_fn)
1948		kvm_vcpu_kick(vcpu);
1949}
1950
1951static void start_sw_tscdeadline(struct kvm_lapic *apic)
1952{
1953	struct kvm_timer *ktimer = &apic->lapic_timer;
1954	u64 guest_tsc, tscdeadline = ktimer->tscdeadline;
1955	u64 ns = 0;
1956	ktime_t expire;
1957	struct kvm_vcpu *vcpu = apic->vcpu;
1958	u32 this_tsc_khz = vcpu->arch.virtual_tsc_khz;
1959	unsigned long flags;
1960	ktime_t now;
1961
1962	if (unlikely(!tscdeadline || !this_tsc_khz))
1963		return;
1964
1965	local_irq_save(flags);
1966
1967	now = ktime_get();
1968	guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc());
1969
1970	ns = (tscdeadline - guest_tsc) * 1000000ULL;
1971	do_div(ns, this_tsc_khz);
1972
1973	if (likely(tscdeadline > guest_tsc) &&
1974	    likely(ns > apic->lapic_timer.timer_advance_ns)) {
1975		expire = ktime_add_ns(now, ns);
1976		expire = ktime_sub_ns(expire, ktimer->timer_advance_ns);
1977		hrtimer_start(&ktimer->timer, expire, HRTIMER_MODE_ABS_HARD);
1978	} else
1979		apic_timer_expired(apic, false);
1980
1981	local_irq_restore(flags);
1982}
1983
1984static inline u64 tmict_to_ns(struct kvm_lapic *apic, u32 tmict)
1985{
1986	return (u64)tmict * apic->vcpu->kvm->arch.apic_bus_cycle_ns *
1987		(u64)apic->divide_count;
1988}
1989
1990static void update_target_expiration(struct kvm_lapic *apic, uint32_t old_divisor)
1991{
1992	ktime_t now, remaining;
1993	u64 ns_remaining_old, ns_remaining_new;
1994
1995	apic->lapic_timer.period =
1996			tmict_to_ns(apic, kvm_lapic_get_reg(apic, APIC_TMICT));
1997	limit_periodic_timer_frequency(apic);
1998
1999	now = ktime_get();
2000	remaining = ktime_sub(apic->lapic_timer.target_expiration, now);
2001	if (ktime_to_ns(remaining) < 0)
2002		remaining = 0;
2003
2004	ns_remaining_old = ktime_to_ns(remaining);
2005	ns_remaining_new = mul_u64_u32_div(ns_remaining_old,
2006	                                   apic->divide_count, old_divisor);
2007
2008	apic->lapic_timer.tscdeadline +=
2009		nsec_to_cycles(apic->vcpu, ns_remaining_new) -
2010		nsec_to_cycles(apic->vcpu, ns_remaining_old);
2011	apic->lapic_timer.target_expiration = ktime_add_ns(now, ns_remaining_new);
2012}
2013
2014static bool set_target_expiration(struct kvm_lapic *apic, u32 count_reg)
2015{
2016	ktime_t now;
2017	u64 tscl = rdtsc();
2018	s64 deadline;
2019
2020	now = ktime_get();
2021	apic->lapic_timer.period =
2022			tmict_to_ns(apic, kvm_lapic_get_reg(apic, APIC_TMICT));
2023
2024	if (!apic->lapic_timer.period) {
2025		apic->lapic_timer.tscdeadline = 0;
2026		return false;
2027	}
2028
2029	limit_periodic_timer_frequency(apic);
2030	deadline = apic->lapic_timer.period;
2031
2032	if (apic_lvtt_period(apic) || apic_lvtt_oneshot(apic)) {
2033		if (unlikely(count_reg != APIC_TMICT)) {
2034			deadline = tmict_to_ns(apic,
2035				     kvm_lapic_get_reg(apic, count_reg));
2036			if (unlikely(deadline <= 0)) {
2037				if (apic_lvtt_period(apic))
2038					deadline = apic->lapic_timer.period;
2039				else
2040					deadline = 0;
2041			}
2042			else if (unlikely(deadline > apic->lapic_timer.period)) {
2043				pr_info_ratelimited(
2044				    "vcpu %i: requested lapic timer restore with "
2045				    "starting count register %#x=%u (%lld ns) > initial count (%lld ns). "
2046				    "Using initial count to start timer.\n",
2047				    apic->vcpu->vcpu_id,
2048				    count_reg,
2049				    kvm_lapic_get_reg(apic, count_reg),
2050				    deadline, apic->lapic_timer.period);
2051				kvm_lapic_set_reg(apic, count_reg, 0);
2052				deadline = apic->lapic_timer.period;
2053			}
2054		}
2055	}
2056
2057	apic->lapic_timer.tscdeadline = kvm_read_l1_tsc(apic->vcpu, tscl) +
2058		nsec_to_cycles(apic->vcpu, deadline);
2059	apic->lapic_timer.target_expiration = ktime_add_ns(now, deadline);
2060
2061	return true;
2062}
2063
2064static void advance_periodic_target_expiration(struct kvm_lapic *apic)
2065{
2066	ktime_t now = ktime_get();
2067	u64 tscl = rdtsc();
2068	ktime_t delta;
2069
2070	/*
2071	 * Synchronize both deadlines to the same time source or
2072	 * differences in the periods (caused by differences in the
2073	 * underlying clocks or numerical approximation errors) will
2074	 * cause the two to drift apart over time as the errors
2075	 * accumulate.
2076	 */
2077	apic->lapic_timer.target_expiration =
2078		ktime_add_ns(apic->lapic_timer.target_expiration,
2079				apic->lapic_timer.period);
2080	delta = ktime_sub(apic->lapic_timer.target_expiration, now);
2081	apic->lapic_timer.tscdeadline = kvm_read_l1_tsc(apic->vcpu, tscl) +
2082		nsec_to_cycles(apic->vcpu, delta);
2083}
2084
2085static void start_sw_period(struct kvm_lapic *apic)
2086{
2087	if (!apic->lapic_timer.period)
2088		return;
2089
2090	if (ktime_after(ktime_get(),
2091			apic->lapic_timer.target_expiration)) {
2092		apic_timer_expired(apic, false);
2093
2094		if (apic_lvtt_oneshot(apic))
2095			return;
2096
2097		advance_periodic_target_expiration(apic);
2098	}
2099
2100	hrtimer_start(&apic->lapic_timer.timer,
2101		apic->lapic_timer.target_expiration,
2102		HRTIMER_MODE_ABS_HARD);
2103}
2104
2105bool kvm_lapic_hv_timer_in_use(struct kvm_vcpu *vcpu)
2106{
2107	if (!lapic_in_kernel(vcpu))
2108		return false;
2109
2110	return vcpu->arch.apic->lapic_timer.hv_timer_in_use;
2111}
 
2112
2113static void cancel_hv_timer(struct kvm_lapic *apic)
2114{
2115	WARN_ON(preemptible());
2116	WARN_ON(!apic->lapic_timer.hv_timer_in_use);
2117	kvm_x86_call(cancel_hv_timer)(apic->vcpu);
2118	apic->lapic_timer.hv_timer_in_use = false;
2119}
2120
2121static bool start_hv_timer(struct kvm_lapic *apic)
2122{
2123	struct kvm_timer *ktimer = &apic->lapic_timer;
2124	struct kvm_vcpu *vcpu = apic->vcpu;
2125	bool expired;
2126
2127	WARN_ON(preemptible());
2128	if (!kvm_can_use_hv_timer(vcpu))
2129		return false;
2130
2131	if (!ktimer->tscdeadline)
2132		return false;
2133
2134	if (kvm_x86_call(set_hv_timer)(vcpu, ktimer->tscdeadline, &expired))
2135		return false;
2136
2137	ktimer->hv_timer_in_use = true;
2138	hrtimer_cancel(&ktimer->timer);
2139
2140	/*
2141	 * To simplify handling the periodic timer, leave the hv timer running
2142	 * even if the deadline timer has expired, i.e. rely on the resulting
2143	 * VM-Exit to recompute the periodic timer's target expiration.
2144	 */
2145	if (!apic_lvtt_period(apic)) {
2146		/*
2147		 * Cancel the hv timer if the sw timer fired while the hv timer
2148		 * was being programmed, or if the hv timer itself expired.
2149		 */
2150		if (atomic_read(&ktimer->pending)) {
2151			cancel_hv_timer(apic);
2152		} else if (expired) {
2153			apic_timer_expired(apic, false);
2154			cancel_hv_timer(apic);
2155		}
2156	}
2157
2158	trace_kvm_hv_timer_state(vcpu->vcpu_id, ktimer->hv_timer_in_use);
2159
2160	return true;
2161}
2162
2163static void start_sw_timer(struct kvm_lapic *apic)
2164{
2165	struct kvm_timer *ktimer = &apic->lapic_timer;
2166
2167	WARN_ON(preemptible());
2168	if (apic->lapic_timer.hv_timer_in_use)
2169		cancel_hv_timer(apic);
2170	if (!apic_lvtt_period(apic) && atomic_read(&ktimer->pending))
2171		return;
2172
2173	if (apic_lvtt_period(apic) || apic_lvtt_oneshot(apic))
2174		start_sw_period(apic);
2175	else if (apic_lvtt_tscdeadline(apic))
2176		start_sw_tscdeadline(apic);
2177	trace_kvm_hv_timer_state(apic->vcpu->vcpu_id, false);
2178}
2179
2180static void restart_apic_timer(struct kvm_lapic *apic)
2181{
2182	preempt_disable();
2183
2184	if (!apic_lvtt_period(apic) && atomic_read(&apic->lapic_timer.pending))
2185		goto out;
2186
2187	if (!start_hv_timer(apic))
2188		start_sw_timer(apic);
2189out:
2190	preempt_enable();
2191}
2192
2193void kvm_lapic_expired_hv_timer(struct kvm_vcpu *vcpu)
2194{
2195	struct kvm_lapic *apic = vcpu->arch.apic;
2196
2197	preempt_disable();
2198	/* If the preempt notifier has already run, it also called apic_timer_expired */
2199	if (!apic->lapic_timer.hv_timer_in_use)
2200		goto out;
2201	WARN_ON(kvm_vcpu_is_blocking(vcpu));
2202	apic_timer_expired(apic, false);
2203	cancel_hv_timer(apic);
2204
2205	if (apic_lvtt_period(apic) && apic->lapic_timer.period) {
2206		advance_periodic_target_expiration(apic);
2207		restart_apic_timer(apic);
2208	}
2209out:
2210	preempt_enable();
2211}
2212EXPORT_SYMBOL_GPL(kvm_lapic_expired_hv_timer);
2213
2214void kvm_lapic_switch_to_hv_timer(struct kvm_vcpu *vcpu)
2215{
2216	restart_apic_timer(vcpu->arch.apic);
2217}
 
2218
2219void kvm_lapic_switch_to_sw_timer(struct kvm_vcpu *vcpu)
2220{
2221	struct kvm_lapic *apic = vcpu->arch.apic;
2222
2223	preempt_disable();
2224	/* Possibly the TSC deadline timer is not enabled yet */
2225	if (apic->lapic_timer.hv_timer_in_use)
2226		start_sw_timer(apic);
2227	preempt_enable();
2228}
 
2229
2230void kvm_lapic_restart_hv_timer(struct kvm_vcpu *vcpu)
2231{
2232	struct kvm_lapic *apic = vcpu->arch.apic;
2233
2234	WARN_ON(!apic->lapic_timer.hv_timer_in_use);
2235	restart_apic_timer(apic);
2236}
2237
2238static void __start_apic_timer(struct kvm_lapic *apic, u32 count_reg)
2239{
2240	atomic_set(&apic->lapic_timer.pending, 0);
2241
2242	if ((apic_lvtt_period(apic) || apic_lvtt_oneshot(apic))
2243	    && !set_target_expiration(apic, count_reg))
2244		return;
2245
2246	restart_apic_timer(apic);
2247}
2248
2249static void start_apic_timer(struct kvm_lapic *apic)
2250{
2251	__start_apic_timer(apic, APIC_TMICT);
2252}
2253
2254static void apic_manage_nmi_watchdog(struct kvm_lapic *apic, u32 lvt0_val)
2255{
2256	bool lvt0_in_nmi_mode = apic_lvt_nmi_mode(lvt0_val);
2257
2258	if (apic->lvt0_in_nmi_mode != lvt0_in_nmi_mode) {
2259		apic->lvt0_in_nmi_mode = lvt0_in_nmi_mode;
2260		if (lvt0_in_nmi_mode) {
2261			atomic_inc(&apic->vcpu->kvm->arch.vapics_in_nmi_mode);
2262		} else
2263			atomic_dec(&apic->vcpu->kvm->arch.vapics_in_nmi_mode);
2264	}
2265}
2266
2267static int get_lvt_index(u32 reg)
2268{
2269	if (reg == APIC_LVTCMCI)
2270		return LVT_CMCI;
2271	if (reg < APIC_LVTT || reg > APIC_LVTERR)
2272		return -1;
2273	return array_index_nospec(
2274			(reg - APIC_LVTT) >> 4, KVM_APIC_MAX_NR_LVT_ENTRIES);
2275}
2276
2277static int kvm_lapic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val)
2278{
2279	int ret = 0;
2280
2281	trace_kvm_apic_write(reg, val);
2282
2283	switch (reg) {
2284	case APIC_ID:		/* Local APIC ID */
2285		if (!apic_x2apic_mode(apic)) {
2286			kvm_apic_set_xapic_id(apic, val >> 24);
2287		} else {
2288			ret = 1;
2289		}
2290		break;
2291
2292	case APIC_TASKPRI:
2293		report_tpr_access(apic, true);
2294		apic_set_tpr(apic, val & 0xff);
2295		break;
2296
2297	case APIC_EOI:
2298		apic_set_eoi(apic);
2299		break;
2300
2301	case APIC_LDR:
2302		if (!apic_x2apic_mode(apic))
2303			kvm_apic_set_ldr(apic, val & APIC_LDR_MASK);
2304		else
2305			ret = 1;
2306		break;
2307
2308	case APIC_DFR:
2309		if (!apic_x2apic_mode(apic))
2310			kvm_apic_set_dfr(apic, val | 0x0FFFFFFF);
2311		else
2312			ret = 1;
2313		break;
2314
2315	case APIC_SPIV: {
2316		u32 mask = 0x3ff;
2317		if (kvm_lapic_get_reg(apic, APIC_LVR) & APIC_LVR_DIRECTED_EOI)
2318			mask |= APIC_SPIV_DIRECTED_EOI;
2319		apic_set_spiv(apic, val & mask);
2320		if (!(val & APIC_SPIV_APIC_ENABLED)) {
2321			int i;
 
2322
2323			for (i = 0; i < apic->nr_lvt_entries; i++) {
2324				kvm_lapic_set_reg(apic, APIC_LVTx(i),
2325					kvm_lapic_get_reg(apic, APIC_LVTx(i)) | APIC_LVT_MASKED);
 
 
2326			}
2327			apic_update_lvtt(apic);
2328			atomic_set(&apic->lapic_timer.pending, 0);
2329
2330		}
2331		break;
2332	}
2333	case APIC_ICR:
2334		WARN_ON_ONCE(apic_x2apic_mode(apic));
2335
2336		/* No delay here, so we always clear the pending bit */
2337		val &= ~APIC_ICR_BUSY;
2338		kvm_apic_send_ipi(apic, val, kvm_lapic_get_reg(apic, APIC_ICR2));
2339		kvm_lapic_set_reg(apic, APIC_ICR, val);
2340		break;
 
2341	case APIC_ICR2:
2342		if (apic_x2apic_mode(apic))
2343			ret = 1;
2344		else
2345			kvm_lapic_set_reg(apic, APIC_ICR2, val & 0xff000000);
2346		break;
2347
2348	case APIC_LVT0:
2349		apic_manage_nmi_watchdog(apic, val);
2350		fallthrough;
2351	case APIC_LVTTHMR:
2352	case APIC_LVTPC:
2353	case APIC_LVT1:
2354	case APIC_LVTERR:
2355	case APIC_LVTCMCI: {
2356		u32 index = get_lvt_index(reg);
2357		if (!kvm_lapic_lvt_supported(apic, index)) {
2358			ret = 1;
2359			break;
2360		}
2361		if (!kvm_apic_sw_enabled(apic))
2362			val |= APIC_LVT_MASKED;
 
 
 
2363		val &= apic_lvt_mask[index];
2364		kvm_lapic_set_reg(apic, reg, val);
2365		break;
2366	}
2367
2368	case APIC_LVTT:
2369		if (!kvm_apic_sw_enabled(apic))
2370			val |= APIC_LVT_MASKED;
2371		val &= (apic_lvt_mask[0] | apic->lapic_timer.timer_mode_mask);
2372		kvm_lapic_set_reg(apic, APIC_LVTT, val);
2373		apic_update_lvtt(apic);
2374		break;
2375
2376	case APIC_TMICT:
2377		if (apic_lvtt_tscdeadline(apic))
2378			break;
2379
2380		cancel_apic_timer(apic);
2381		kvm_lapic_set_reg(apic, APIC_TMICT, val);
2382		start_apic_timer(apic);
2383		break;
2384
2385	case APIC_TDCR: {
2386		uint32_t old_divisor = apic->divide_count;
2387
2388		kvm_lapic_set_reg(apic, APIC_TDCR, val & 0xb);
2389		update_divide_count(apic);
2390		if (apic->divide_count != old_divisor &&
2391				apic->lapic_timer.period) {
2392			hrtimer_cancel(&apic->lapic_timer.timer);
2393			update_target_expiration(apic, old_divisor);
2394			restart_apic_timer(apic);
2395		}
2396		break;
2397	}
2398	case APIC_ESR:
2399		if (apic_x2apic_mode(apic) && val != 0)
2400			ret = 1;
2401		break;
2402
2403	case APIC_SELF_IPI:
2404		/*
2405		 * Self-IPI exists only when x2APIC is enabled.  Bits 7:0 hold
2406		 * the vector, everything else is reserved.
2407		 */
2408		if (!apic_x2apic_mode(apic) || (val & ~APIC_VECTOR_MASK))
2409			ret = 1;
2410		else
2411			kvm_apic_send_ipi(apic, APIC_DEST_SELF | val, 0);
2412		break;
2413	default:
2414		ret = 1;
2415		break;
2416	}
2417
2418	/*
2419	 * Recalculate APIC maps if necessary, e.g. if the software enable bit
2420	 * was toggled, the APIC ID changed, etc...   The maps are marked dirty
2421	 * on relevant changes, i.e. this is a nop for most writes.
2422	 */
2423	kvm_recalculate_apic_map(apic->vcpu->kvm);
2424
2425	return ret;
2426}
 
2427
2428static int apic_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *this,
2429			    gpa_t address, int len, const void *data)
2430{
2431	struct kvm_lapic *apic = to_lapic(this);
2432	unsigned int offset = address - apic->base_address;
2433	u32 val;
2434
2435	if (!apic_mmio_in_range(apic, address))
2436		return -EOPNOTSUPP;
2437
2438	if (!kvm_apic_hw_enabled(apic) || apic_x2apic_mode(apic)) {
2439		if (!kvm_check_has_quirk(vcpu->kvm,
2440					 KVM_X86_QUIRK_LAPIC_MMIO_HOLE))
2441			return -EOPNOTSUPP;
2442
2443		return 0;
2444	}
2445
2446	/*
2447	 * APIC register must be aligned on 128-bits boundary.
2448	 * 32/64/128 bits registers must be accessed thru 32 bits.
2449	 * Refer SDM 8.4.1
2450	 */
2451	if (len != 4 || (offset & 0xf))
2452		return 0;
2453
2454	val = *(u32*)data;
2455
2456	kvm_lapic_reg_write(apic, offset & 0xff0, val);
2457
2458	return 0;
2459}
2460
2461void kvm_lapic_set_eoi(struct kvm_vcpu *vcpu)
2462{
2463	kvm_lapic_reg_write(vcpu->arch.apic, APIC_EOI, 0);
2464}
2465EXPORT_SYMBOL_GPL(kvm_lapic_set_eoi);
2466
2467#define X2APIC_ICR_RESERVED_BITS (GENMASK_ULL(31, 20) | GENMASK_ULL(17, 16) | BIT(13))
2468
2469int kvm_x2apic_icr_write(struct kvm_lapic *apic, u64 data)
2470{
2471	if (data & X2APIC_ICR_RESERVED_BITS)
2472		return 1;
2473
2474	/*
2475	 * The BUSY bit is reserved on both Intel and AMD in x2APIC mode, but
2476	 * only AMD requires it to be zero, Intel essentially just ignores the
2477	 * bit.  And if IPI virtualization (Intel) or x2AVIC (AMD) is enabled,
2478	 * the CPU performs the reserved bits checks, i.e. the underlying CPU
2479	 * behavior will "win".  Arbitrarily clear the BUSY bit, as there is no
2480	 * sane way to provide consistent behavior with respect to hardware.
2481	 */
2482	data &= ~APIC_ICR_BUSY;
2483
2484	kvm_apic_send_ipi(apic, (u32)data, (u32)(data >> 32));
2485	if (kvm_x86_ops.x2apic_icr_is_split) {
2486		kvm_lapic_set_reg(apic, APIC_ICR, data);
2487		kvm_lapic_set_reg(apic, APIC_ICR2, data >> 32);
2488	} else {
2489		kvm_lapic_set_reg64(apic, APIC_ICR, data);
2490	}
2491	trace_kvm_apic_write(APIC_ICR, data);
2492	return 0;
2493}
2494
2495static u64 kvm_x2apic_icr_read(struct kvm_lapic *apic)
2496{
2497	if (kvm_x86_ops.x2apic_icr_is_split)
2498		return (u64)kvm_lapic_get_reg(apic, APIC_ICR) |
2499		       (u64)kvm_lapic_get_reg(apic, APIC_ICR2) << 32;
2500
2501	return kvm_lapic_get_reg64(apic, APIC_ICR);
2502}
2503
2504/* emulate APIC access in a trap manner */
2505void kvm_apic_write_nodecode(struct kvm_vcpu *vcpu, u32 offset)
2506{
2507	struct kvm_lapic *apic = vcpu->arch.apic;
2508
2509	/*
2510	 * ICR is a single 64-bit register when x2APIC is enabled, all others
2511	 * registers hold 32-bit values.  For legacy xAPIC, ICR writes need to
2512	 * go down the common path to get the upper half from ICR2.
2513	 *
2514	 * Note, using the write helpers may incur an unnecessary write to the
2515	 * virtual APIC state, but KVM needs to conditionally modify the value
2516	 * in certain cases, e.g. to clear the ICR busy bit.  The cost of extra
2517	 * conditional branches is likely a wash relative to the cost of the
2518	 * maybe-unecessary write, and both are in the noise anyways.
2519	 */
2520	if (apic_x2apic_mode(apic) && offset == APIC_ICR)
2521		WARN_ON_ONCE(kvm_x2apic_icr_write(apic, kvm_x2apic_icr_read(apic)));
2522	else
2523		kvm_lapic_reg_write(apic, offset, kvm_lapic_get_reg(apic, offset));
2524}
2525EXPORT_SYMBOL_GPL(kvm_apic_write_nodecode);
2526
2527void kvm_free_lapic(struct kvm_vcpu *vcpu)
2528{
2529	struct kvm_lapic *apic = vcpu->arch.apic;
2530
2531	if (!vcpu->arch.apic) {
2532		static_branch_dec(&kvm_has_noapic_vcpu);
2533		return;
2534	}
2535
2536	hrtimer_cancel(&apic->lapic_timer.timer);
2537
2538	if (!(vcpu->arch.apic_base & MSR_IA32_APICBASE_ENABLE))
2539		static_branch_slow_dec_deferred(&apic_hw_disabled);
2540
2541	if (!apic->sw_enabled)
2542		static_branch_slow_dec_deferred(&apic_sw_disabled);
2543
2544	if (apic->regs)
2545		free_page((unsigned long)apic->regs);
2546
2547	kfree(apic);
2548}
2549
2550/*
2551 *----------------------------------------------------------------------
2552 * LAPIC interface
2553 *----------------------------------------------------------------------
2554 */
2555u64 kvm_get_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu)
2556{
2557	struct kvm_lapic *apic = vcpu->arch.apic;
2558
2559	if (!kvm_apic_present(vcpu) || !apic_lvtt_tscdeadline(apic))
2560		return 0;
2561
2562	return apic->lapic_timer.tscdeadline;
2563}
2564
2565void kvm_set_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu, u64 data)
2566{
2567	struct kvm_lapic *apic = vcpu->arch.apic;
2568
2569	if (!kvm_apic_present(vcpu) || !apic_lvtt_tscdeadline(apic))
2570		return;
2571
2572	hrtimer_cancel(&apic->lapic_timer.timer);
2573	apic->lapic_timer.tscdeadline = data;
2574	start_apic_timer(apic);
2575}
2576
2577void kvm_lapic_set_tpr(struct kvm_vcpu *vcpu, unsigned long cr8)
2578{
2579	apic_set_tpr(vcpu->arch.apic, (cr8 & 0x0f) << 4);
 
 
 
2580}
2581
2582u64 kvm_lapic_get_cr8(struct kvm_vcpu *vcpu)
2583{
2584	u64 tpr;
2585
2586	tpr = (u64) kvm_lapic_get_reg(vcpu->arch.apic, APIC_TASKPRI);
2587
2588	return (tpr & 0xf0) >> 4;
2589}
2590
2591static void __kvm_apic_set_base(struct kvm_vcpu *vcpu, u64 value)
2592{
2593	u64 old_value = vcpu->arch.apic_base;
2594	struct kvm_lapic *apic = vcpu->arch.apic;
2595
 
 
 
2596	vcpu->arch.apic_base = value;
2597
2598	if ((old_value ^ value) & MSR_IA32_APICBASE_ENABLE)
2599		kvm_update_cpuid_runtime(vcpu);
2600
2601	if (!apic)
2602		return;
2603
2604	/* update jump label if enable bit changes */
2605	if ((old_value ^ value) & MSR_IA32_APICBASE_ENABLE) {
2606		if (value & MSR_IA32_APICBASE_ENABLE) {
2607			kvm_apic_set_xapic_id(apic, vcpu->vcpu_id);
2608			static_branch_slow_dec_deferred(&apic_hw_disabled);
2609			/* Check if there are APF page ready requests pending */
2610			kvm_make_request(KVM_REQ_APF_READY, vcpu);
2611		} else {
2612			static_branch_inc(&apic_hw_disabled.key);
2613			atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
2614		}
2615	}
2616
2617	if ((old_value ^ value) & X2APIC_ENABLE) {
2618		if (value & X2APIC_ENABLE)
2619			kvm_apic_set_x2apic_id(apic, vcpu->vcpu_id);
2620		else if (value & MSR_IA32_APICBASE_ENABLE)
2621			kvm_apic_set_xapic_id(apic, vcpu->vcpu_id);
2622	}
2623
2624	if ((old_value ^ value) & (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE)) {
2625		kvm_make_request(KVM_REQ_APICV_UPDATE, vcpu);
2626		kvm_x86_call(set_virtual_apic_mode)(vcpu);
2627	}
2628
2629	apic->base_address = apic->vcpu->arch.apic_base &
2630			     MSR_IA32_APICBASE_BASE;
2631
2632	if ((value & MSR_IA32_APICBASE_ENABLE) &&
2633	     apic->base_address != APIC_DEFAULT_PHYS_BASE) {
2634		kvm_set_apicv_inhibit(apic->vcpu->kvm,
2635				      APICV_INHIBIT_REASON_APIC_BASE_MODIFIED);
2636	}
2637}
2638
2639int kvm_apic_set_base(struct kvm_vcpu *vcpu, u64 value, bool host_initiated)
2640{
2641	enum lapic_mode old_mode = kvm_get_apic_mode(vcpu);
2642	enum lapic_mode new_mode = kvm_apic_mode(value);
2643
2644	if (vcpu->arch.apic_base == value)
2645		return 0;
2646
2647	u64 reserved_bits = kvm_vcpu_reserved_gpa_bits_raw(vcpu) | 0x2ff |
2648		(guest_cpuid_has(vcpu, X86_FEATURE_X2APIC) ? 0 : X2APIC_ENABLE);
2649
2650	if ((value & reserved_bits) != 0 || new_mode == LAPIC_MODE_INVALID)
2651		return 1;
2652	if (!host_initiated) {
2653		if (old_mode == LAPIC_MODE_X2APIC && new_mode == LAPIC_MODE_XAPIC)
2654			return 1;
2655		if (old_mode == LAPIC_MODE_DISABLED && new_mode == LAPIC_MODE_X2APIC)
2656			return 1;
2657	}
2658
2659	__kvm_apic_set_base(vcpu, value);
2660	kvm_recalculate_apic_map(vcpu->kvm);
2661	return 0;
2662}
2663
2664void kvm_apic_update_apicv(struct kvm_vcpu *vcpu)
2665{
2666	struct kvm_lapic *apic = vcpu->arch.apic;
2667
2668	/*
2669	 * When APICv is enabled, KVM must always search the IRR for a pending
2670	 * IRQ, as other vCPUs and devices can set IRR bits even if the vCPU
2671	 * isn't running.  If APICv is disabled, KVM _should_ search the IRR
2672	 * for a pending IRQ.  But KVM currently doesn't ensure *all* hardware,
2673	 * e.g. CPUs and IOMMUs, has seen the change in state, i.e. searching
2674	 * the IRR at this time could race with IRQ delivery from hardware that
2675	 * still sees APICv as being enabled.
2676	 *
2677	 * FIXME: Ensure other vCPUs and devices observe the change in APICv
2678	 *        state prior to updating KVM's metadata caches, so that KVM
2679	 *        can safely search the IRR and set irr_pending accordingly.
2680	 */
2681	apic->irr_pending = true;
2682
2683	if (apic->apicv_active)
2684		apic->isr_count = 1;
2685	else
 
2686		apic->isr_count = count_vectors(apic->regs + APIC_ISR);
2687
2688	apic->highest_isr_cache = -1;
2689}
2690
2691int kvm_alloc_apic_access_page(struct kvm *kvm)
2692{
2693	void __user *hva;
2694	int ret = 0;
2695
2696	mutex_lock(&kvm->slots_lock);
2697	if (kvm->arch.apic_access_memslot_enabled ||
2698	    kvm->arch.apic_access_memslot_inhibited)
2699		goto out;
2700
2701	hva = __x86_set_memory_region(kvm, APIC_ACCESS_PAGE_PRIVATE_MEMSLOT,
2702				      APIC_DEFAULT_PHYS_BASE, PAGE_SIZE);
2703	if (IS_ERR(hva)) {
2704		ret = PTR_ERR(hva);
2705		goto out;
2706	}
2707
2708	kvm->arch.apic_access_memslot_enabled = true;
2709out:
2710	mutex_unlock(&kvm->slots_lock);
2711	return ret;
2712}
2713EXPORT_SYMBOL_GPL(kvm_alloc_apic_access_page);
2714
2715void kvm_inhibit_apic_access_page(struct kvm_vcpu *vcpu)
2716{
2717	struct kvm *kvm = vcpu->kvm;
2718
2719	if (!kvm->arch.apic_access_memslot_enabled)
2720		return;
2721
2722	kvm_vcpu_srcu_read_unlock(vcpu);
2723
2724	mutex_lock(&kvm->slots_lock);
2725
2726	if (kvm->arch.apic_access_memslot_enabled) {
2727		__x86_set_memory_region(kvm, APIC_ACCESS_PAGE_PRIVATE_MEMSLOT, 0, 0);
2728		/*
2729		 * Clear "enabled" after the memslot is deleted so that a
2730		 * different vCPU doesn't get a false negative when checking
2731		 * the flag out of slots_lock.  No additional memory barrier is
2732		 * needed as modifying memslots requires waiting other vCPUs to
2733		 * drop SRCU (see above), and false positives are ok as the
2734		 * flag is rechecked after acquiring slots_lock.
2735		 */
2736		kvm->arch.apic_access_memslot_enabled = false;
2737
2738		/*
2739		 * Mark the memslot as inhibited to prevent reallocating the
2740		 * memslot during vCPU creation, e.g. if a vCPU is hotplugged.
2741		 */
2742		kvm->arch.apic_access_memslot_inhibited = true;
2743	}
2744
2745	mutex_unlock(&kvm->slots_lock);
2746
2747	kvm_vcpu_srcu_read_lock(vcpu);
2748}
 
2749
2750void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event)
2751{
2752	struct kvm_lapic *apic = vcpu->arch.apic;
2753	u64 msr_val;
2754	int i;
2755
2756	kvm_x86_call(apicv_pre_state_restore)(vcpu);
2757
2758	if (!init_event) {
2759		msr_val = APIC_DEFAULT_PHYS_BASE | MSR_IA32_APICBASE_ENABLE;
2760		if (kvm_vcpu_is_reset_bsp(vcpu))
2761			msr_val |= MSR_IA32_APICBASE_BSP;
2762
2763		/*
2764		 * Use the inner helper to avoid an extra recalcuation of the
2765		 * optimized APIC map if some other task has dirtied the map.
2766		 * The recalculation needed for this vCPU will be done after
2767		 * all APIC state has been initialized (see below).
2768		 */
2769		__kvm_apic_set_base(vcpu, msr_val);
2770	}
2771
2772	if (!apic)
2773		return;
2774
2775	/* Stop the timer in case it's a reset to an active apic */
2776	hrtimer_cancel(&apic->lapic_timer.timer);
2777
2778	/* The xAPIC ID is set at RESET even if the APIC was already enabled. */
2779	if (!init_event)
 
2780		kvm_apic_set_xapic_id(apic, vcpu->vcpu_id);
 
2781	kvm_apic_set_version(apic->vcpu);
2782
2783	for (i = 0; i < apic->nr_lvt_entries; i++)
2784		kvm_lapic_set_reg(apic, APIC_LVTx(i), APIC_LVT_MASKED);
2785	apic_update_lvtt(apic);
2786	if (kvm_vcpu_is_reset_bsp(vcpu) &&
2787	    kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_LINT0_REENABLED))
2788		kvm_lapic_set_reg(apic, APIC_LVT0,
2789			     SET_APIC_DELIVERY_MODE(0, APIC_MODE_EXTINT));
2790	apic_manage_nmi_watchdog(apic, kvm_lapic_get_reg(apic, APIC_LVT0));
2791
2792	kvm_apic_set_dfr(apic, 0xffffffffU);
2793	apic_set_spiv(apic, 0xff);
2794	kvm_lapic_set_reg(apic, APIC_TASKPRI, 0);
2795	if (!apic_x2apic_mode(apic))
2796		kvm_apic_set_ldr(apic, 0);
2797	kvm_lapic_set_reg(apic, APIC_ESR, 0);
2798	if (!apic_x2apic_mode(apic)) {
2799		kvm_lapic_set_reg(apic, APIC_ICR, 0);
2800		kvm_lapic_set_reg(apic, APIC_ICR2, 0);
2801	} else {
2802		kvm_lapic_set_reg64(apic, APIC_ICR, 0);
2803	}
2804	kvm_lapic_set_reg(apic, APIC_TDCR, 0);
2805	kvm_lapic_set_reg(apic, APIC_TMICT, 0);
2806	for (i = 0; i < 8; i++) {
2807		kvm_lapic_set_reg(apic, APIC_IRR + 0x10 * i, 0);
2808		kvm_lapic_set_reg(apic, APIC_ISR + 0x10 * i, 0);
2809		kvm_lapic_set_reg(apic, APIC_TMR + 0x10 * i, 0);
2810	}
2811	kvm_apic_update_apicv(vcpu);
 
2812	update_divide_count(apic);
2813	atomic_set(&apic->lapic_timer.pending, 0);
2814
 
 
2815	vcpu->arch.pv_eoi.msr_val = 0;
2816	apic_update_ppr(apic);
2817	if (apic->apicv_active) {
2818		kvm_x86_call(apicv_post_state_restore)(vcpu);
2819		kvm_x86_call(hwapic_irr_update)(vcpu, -1);
2820		kvm_x86_call(hwapic_isr_update)(vcpu, -1);
2821	}
2822
2823	vcpu->arch.apic_arb_prio = 0;
2824	vcpu->arch.apic_attention = 0;
2825
2826	kvm_recalculate_apic_map(vcpu->kvm);
2827}
2828
2829/*
2830 *----------------------------------------------------------------------
2831 * timer interface
2832 *----------------------------------------------------------------------
2833 */
2834
2835static bool lapic_is_periodic(struct kvm_lapic *apic)
2836{
2837	return apic_lvtt_period(apic);
2838}
2839
2840int apic_has_pending_timer(struct kvm_vcpu *vcpu)
2841{
2842	struct kvm_lapic *apic = vcpu->arch.apic;
2843
2844	if (apic_enabled(apic) && apic_lvt_enabled(apic, APIC_LVTT))
2845		return atomic_read(&apic->lapic_timer.pending);
2846
2847	return 0;
2848}
2849
2850int kvm_apic_local_deliver(struct kvm_lapic *apic, int lvt_type)
2851{
2852	u32 reg = kvm_lapic_get_reg(apic, lvt_type);
2853	int vector, mode, trig_mode;
2854	int r;
2855
2856	if (kvm_apic_hw_enabled(apic) && !(reg & APIC_LVT_MASKED)) {
2857		vector = reg & APIC_VECTOR_MASK;
2858		mode = reg & APIC_MODE_MASK;
2859		trig_mode = reg & APIC_LVT_LEVEL_TRIGGER;
2860
2861		r = __apic_accept_irq(apic, mode, vector, 1, trig_mode, NULL);
2862		if (r && lvt_type == APIC_LVTPC &&
2863		    guest_cpuid_is_intel_compatible(apic->vcpu))
2864			kvm_lapic_set_reg(apic, APIC_LVTPC, reg | APIC_LVT_MASKED);
2865		return r;
2866	}
2867	return 0;
2868}
2869
2870void kvm_apic_nmi_wd_deliver(struct kvm_vcpu *vcpu)
2871{
2872	struct kvm_lapic *apic = vcpu->arch.apic;
2873
2874	if (apic)
2875		kvm_apic_local_deliver(apic, APIC_LVT0);
2876}
2877
2878static const struct kvm_io_device_ops apic_mmio_ops = {
2879	.read     = apic_mmio_read,
2880	.write    = apic_mmio_write,
2881};
2882
2883static enum hrtimer_restart apic_timer_fn(struct hrtimer *data)
2884{
2885	struct kvm_timer *ktimer = container_of(data, struct kvm_timer, timer);
2886	struct kvm_lapic *apic = container_of(ktimer, struct kvm_lapic, lapic_timer);
2887
2888	apic_timer_expired(apic, true);
2889
2890	if (lapic_is_periodic(apic)) {
2891		advance_periodic_target_expiration(apic);
2892		hrtimer_add_expires_ns(&ktimer->timer, ktimer->period);
2893		return HRTIMER_RESTART;
2894	} else
2895		return HRTIMER_NORESTART;
2896}
2897
2898int kvm_create_lapic(struct kvm_vcpu *vcpu)
2899{
2900	struct kvm_lapic *apic;
2901
2902	ASSERT(vcpu != NULL);
2903
2904	if (!irqchip_in_kernel(vcpu->kvm)) {
2905		static_branch_inc(&kvm_has_noapic_vcpu);
2906		return 0;
2907	}
2908
2909	apic = kzalloc(sizeof(*apic), GFP_KERNEL_ACCOUNT);
2910	if (!apic)
2911		goto nomem;
2912
2913	vcpu->arch.apic = apic;
2914
2915	if (kvm_x86_ops.alloc_apic_backing_page)
2916		apic->regs = kvm_x86_call(alloc_apic_backing_page)(vcpu);
2917	else
2918		apic->regs = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT);
2919	if (!apic->regs) {
2920		printk(KERN_ERR "malloc apic regs error for vcpu %x\n",
2921		       vcpu->vcpu_id);
2922		goto nomem_free_apic;
2923	}
2924	apic->vcpu = vcpu;
2925
2926	apic->nr_lvt_entries = kvm_apic_calc_nr_lvt_entries(vcpu);
2927
2928	hrtimer_init(&apic->lapic_timer.timer, CLOCK_MONOTONIC,
2929		     HRTIMER_MODE_ABS_HARD);
2930	apic->lapic_timer.timer.function = apic_timer_fn;
2931	if (lapic_timer_advance)
2932		apic->lapic_timer.timer_advance_ns = LAPIC_TIMER_ADVANCE_NS_INIT;
 
 
 
 
 
2933
2934	/*
2935	 * Stuff the APIC ENABLE bit in lieu of temporarily incrementing
2936	 * apic_hw_disabled; the full RESET value is set by kvm_lapic_reset().
2937	 */
2938	vcpu->arch.apic_base = MSR_IA32_APICBASE_ENABLE;
2939	static_branch_inc(&apic_sw_disabled.key); /* sw disabled at reset */
2940	kvm_iodevice_init(&apic->dev, &apic_mmio_ops);
2941
2942	/*
2943	 * Defer evaluating inhibits until the vCPU is first run, as this vCPU
2944	 * will not get notified of any changes until this vCPU is visible to
2945	 * other vCPUs (marked online and added to the set of vCPUs).
2946	 *
2947	 * Opportunistically mark APICv active as VMX in particularly is highly
2948	 * unlikely to have inhibits.  Ignore the current per-VM APICv state so
2949	 * that vCPU creation is guaranteed to run with a deterministic value,
2950	 * the request will ensure the vCPU gets the correct state before VM-Entry.
2951	 */
2952	if (enable_apicv) {
2953		apic->apicv_active = true;
2954		kvm_make_request(KVM_REQ_APICV_UPDATE, vcpu);
2955	}
2956
2957	return 0;
2958nomem_free_apic:
2959	kfree(apic);
2960	vcpu->arch.apic = NULL;
2961nomem:
2962	return -ENOMEM;
2963}
2964
2965int kvm_apic_has_interrupt(struct kvm_vcpu *vcpu)
2966{
2967	struct kvm_lapic *apic = vcpu->arch.apic;
2968	u32 ppr;
2969
2970	if (!kvm_apic_present(vcpu))
2971		return -1;
2972
2973	__apic_update_ppr(apic, &ppr);
2974	return apic_has_interrupt_for_ppr(apic, ppr);
2975}
2976EXPORT_SYMBOL_GPL(kvm_apic_has_interrupt);
2977
2978int kvm_apic_accept_pic_intr(struct kvm_vcpu *vcpu)
2979{
2980	u32 lvt0 = kvm_lapic_get_reg(vcpu->arch.apic, APIC_LVT0);
2981
2982	if (!kvm_apic_hw_enabled(vcpu->arch.apic))
2983		return 1;
2984	if ((lvt0 & APIC_LVT_MASKED) == 0 &&
2985	    GET_APIC_DELIVERY_MODE(lvt0) == APIC_MODE_EXTINT)
2986		return 1;
2987	return 0;
2988}
2989
2990void kvm_inject_apic_timer_irqs(struct kvm_vcpu *vcpu)
2991{
2992	struct kvm_lapic *apic = vcpu->arch.apic;
2993
2994	if (atomic_read(&apic->lapic_timer.pending) > 0) {
2995		kvm_apic_inject_pending_timer_irqs(apic);
2996		atomic_set(&apic->lapic_timer.pending, 0);
2997	}
2998}
2999
3000void kvm_apic_ack_interrupt(struct kvm_vcpu *vcpu, int vector)
3001{
 
3002	struct kvm_lapic *apic = vcpu->arch.apic;
3003	u32 ppr;
3004
3005	if (WARN_ON_ONCE(vector < 0 || !apic))
3006		return;
3007
3008	/*
3009	 * We get here even with APIC virtualization enabled, if doing
3010	 * nested virtualization and L1 runs with the "acknowledge interrupt
3011	 * on exit" mode.  Then we cannot inject the interrupt via RVI,
3012	 * because the process would deliver it through the IDT.
3013	 */
3014
3015	apic_clear_irr(vector, apic);
3016	if (kvm_hv_synic_auto_eoi_set(vcpu, vector)) {
3017		/*
3018		 * For auto-EOI interrupts, there might be another pending
3019		 * interrupt above PPR, so check whether to raise another
3020		 * KVM_REQ_EVENT.
3021		 */
3022		apic_update_ppr(apic);
3023	} else {
3024		/*
3025		 * For normal interrupts, PPR has been raised and there cannot
3026		 * be a higher-priority pending interrupt---except if there was
3027		 * a concurrent interrupt injection, but that would have
3028		 * triggered KVM_REQ_EVENT already.
3029		 */
3030		apic_set_isr(vector, apic);
3031		__apic_update_ppr(apic, &ppr);
3032	}
3033
 
3034}
3035EXPORT_SYMBOL_GPL(kvm_apic_ack_interrupt);
3036
3037static int kvm_apic_state_fixup(struct kvm_vcpu *vcpu,
3038		struct kvm_lapic_state *s, bool set)
3039{
3040	if (apic_x2apic_mode(vcpu->arch.apic)) {
3041		u32 x2apic_id = kvm_x2apic_id(vcpu->arch.apic);
3042		u32 *id = (u32 *)(s->regs + APIC_ID);
3043		u32 *ldr = (u32 *)(s->regs + APIC_LDR);
3044		u64 icr;
3045
3046		if (vcpu->kvm->arch.x2apic_format) {
3047			if (*id != x2apic_id)
3048				return -EINVAL;
3049		} else {
3050			/*
3051			 * Ignore the userspace value when setting APIC state.
3052			 * KVM's model is that the x2APIC ID is readonly, e.g.
3053			 * KVM only supports delivering interrupts to KVM's
3054			 * version of the x2APIC ID.  However, for backwards
3055			 * compatibility, don't reject attempts to set a
3056			 * mismatched ID for userspace that hasn't opted into
3057			 * x2apic_format.
3058			 */
3059			if (set)
3060				*id = x2apic_id;
3061			else
3062				*id = x2apic_id << 24;
3063		}
3064
3065		/*
3066		 * In x2APIC mode, the LDR is fixed and based on the id.  And
3067		 * if the ICR is _not_ split, ICR is internally a single 64-bit
3068		 * register, but needs to be split to ICR+ICR2 in userspace for
3069		 * backwards compatibility.
3070		 */
3071		if (set)
3072			*ldr = kvm_apic_calc_x2apic_ldr(x2apic_id);
3073
3074		if (!kvm_x86_ops.x2apic_icr_is_split) {
3075			if (set) {
3076				icr = __kvm_lapic_get_reg(s->regs, APIC_ICR) |
3077				      (u64)__kvm_lapic_get_reg(s->regs, APIC_ICR2) << 32;
3078				__kvm_lapic_set_reg64(s->regs, APIC_ICR, icr);
3079			} else {
3080				icr = __kvm_lapic_get_reg64(s->regs, APIC_ICR);
3081				__kvm_lapic_set_reg(s->regs, APIC_ICR2, icr >> 32);
3082			}
3083		}
3084	}
3085
3086	return 0;
3087}
3088
3089int kvm_apic_get_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s)
3090{
3091	memcpy(s->regs, vcpu->arch.apic->regs, sizeof(*s));
3092
3093	/*
3094	 * Get calculated timer current count for remaining timer period (if
3095	 * any) and store it in the returned register set.
3096	 */
3097	__kvm_lapic_set_reg(s->regs, APIC_TMCCT,
3098			    __apic_read(vcpu->arch.apic, APIC_TMCCT));
3099
3100	return kvm_apic_state_fixup(vcpu, s, false);
3101}
3102
3103int kvm_apic_set_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s)
3104{
3105	struct kvm_lapic *apic = vcpu->arch.apic;
3106	int r;
3107
3108	kvm_x86_call(apicv_pre_state_restore)(vcpu);
3109
3110	/* set SPIV separately to get count of SW disabled APICs right */
3111	apic_set_spiv(apic, *((u32 *)(s->regs + APIC_SPIV)));
3112
3113	r = kvm_apic_state_fixup(vcpu, s, true);
3114	if (r) {
3115		kvm_recalculate_apic_map(vcpu->kvm);
3116		return r;
3117	}
3118	memcpy(vcpu->arch.apic->regs, s->regs, sizeof(*s));
3119
3120	atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
3121	kvm_recalculate_apic_map(vcpu->kvm);
3122	kvm_apic_set_version(vcpu);
3123
3124	apic_update_ppr(apic);
3125	cancel_apic_timer(apic);
3126	apic->lapic_timer.expired_tscdeadline = 0;
3127	apic_update_lvtt(apic);
3128	apic_manage_nmi_watchdog(apic, kvm_lapic_get_reg(apic, APIC_LVT0));
3129	update_divide_count(apic);
3130	__start_apic_timer(apic, APIC_TMCCT);
3131	kvm_lapic_set_reg(apic, APIC_TMCCT, 0);
3132	kvm_apic_update_apicv(vcpu);
3133	if (apic->apicv_active) {
3134		kvm_x86_call(apicv_post_state_restore)(vcpu);
3135		kvm_x86_call(hwapic_irr_update)(vcpu, apic_find_highest_irr(apic));
3136		kvm_x86_call(hwapic_isr_update)(vcpu, apic_find_highest_isr(apic));
 
 
 
3137	}
3138	kvm_make_request(KVM_REQ_EVENT, vcpu);
3139	if (ioapic_in_kernel(vcpu->kvm))
3140		kvm_rtc_eoi_tracking_restore_one(vcpu);
3141
3142	vcpu->arch.apic_arb_prio = 0;
3143
3144	return 0;
3145}
3146
3147void __kvm_migrate_apic_timer(struct kvm_vcpu *vcpu)
3148{
3149	struct hrtimer *timer;
3150
3151	if (!lapic_in_kernel(vcpu) ||
3152		kvm_can_post_timer_interrupt(vcpu))
3153		return;
3154
3155	timer = &vcpu->arch.apic->lapic_timer.timer;
3156	if (hrtimer_cancel(timer))
3157		hrtimer_start_expires(timer, HRTIMER_MODE_ABS_HARD);
3158}
3159
3160/*
3161 * apic_sync_pv_eoi_from_guest - called on vmexit or cancel interrupt
3162 *
3163 * Detect whether guest triggered PV EOI since the
3164 * last entry. If yes, set EOI on guests's behalf.
3165 * Clear PV EOI in guest memory in any case.
3166 */
3167static void apic_sync_pv_eoi_from_guest(struct kvm_vcpu *vcpu,
3168					struct kvm_lapic *apic)
3169{
 
3170	int vector;
3171	/*
3172	 * PV EOI state is derived from KVM_APIC_PV_EOI_PENDING in host
3173	 * and KVM_PV_EOI_ENABLED in guest memory as follows:
3174	 *
3175	 * KVM_APIC_PV_EOI_PENDING is unset:
3176	 * 	-> host disabled PV EOI.
3177	 * KVM_APIC_PV_EOI_PENDING is set, KVM_PV_EOI_ENABLED is set:
3178	 * 	-> host enabled PV EOI, guest did not execute EOI yet.
3179	 * KVM_APIC_PV_EOI_PENDING is set, KVM_PV_EOI_ENABLED is unset:
3180	 * 	-> host enabled PV EOI, guest executed EOI.
3181	 */
3182	BUG_ON(!pv_eoi_enabled(vcpu));
3183
3184	if (pv_eoi_test_and_clr_pending(vcpu))
 
 
 
 
 
 
3185		return;
3186	vector = apic_set_eoi(apic);
3187	trace_kvm_pv_eoi(apic, vector);
3188}
3189
3190void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu)
3191{
3192	u32 data;
3193
3194	if (test_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention))
3195		apic_sync_pv_eoi_from_guest(vcpu, vcpu->arch.apic);
3196
3197	if (!test_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention))
3198		return;
3199
3200	if (kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data,
3201				  sizeof(u32)))
3202		return;
3203
3204	apic_set_tpr(vcpu->arch.apic, data & 0xff);
3205}
3206
3207/*
3208 * apic_sync_pv_eoi_to_guest - called before vmentry
3209 *
3210 * Detect whether it's safe to enable PV EOI and
3211 * if yes do so.
3212 */
3213static void apic_sync_pv_eoi_to_guest(struct kvm_vcpu *vcpu,
3214					struct kvm_lapic *apic)
3215{
3216	if (!pv_eoi_enabled(vcpu) ||
3217	    /* IRR set or many bits in ISR: could be nested. */
3218	    apic->irr_pending ||
3219	    /* Cache not set: could be safe but we don't bother. */
3220	    apic->highest_isr_cache == -1 ||
3221	    /* Need EOI to update ioapic. */
3222	    kvm_ioapic_handles_vector(apic, apic->highest_isr_cache)) {
3223		/*
3224		 * PV EOI was disabled by apic_sync_pv_eoi_from_guest
3225		 * so we need not do anything here.
3226		 */
3227		return;
3228	}
3229
3230	pv_eoi_set_pending(apic->vcpu);
3231}
3232
3233void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu)
3234{
3235	u32 data, tpr;
3236	int max_irr, max_isr;
3237	struct kvm_lapic *apic = vcpu->arch.apic;
3238
3239	apic_sync_pv_eoi_to_guest(vcpu, apic);
3240
3241	if (!test_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention))
3242		return;
3243
3244	tpr = kvm_lapic_get_reg(apic, APIC_TASKPRI) & 0xff;
3245	max_irr = apic_find_highest_irr(apic);
3246	if (max_irr < 0)
3247		max_irr = 0;
3248	max_isr = apic_find_highest_isr(apic);
3249	if (max_isr < 0)
3250		max_isr = 0;
3251	data = (tpr & 0xff) | ((max_isr & 0xf0) << 8) | (max_irr << 24);
3252
3253	kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data,
3254				sizeof(u32));
3255}
3256
3257int kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr)
3258{
3259	if (vapic_addr) {
3260		if (kvm_gfn_to_hva_cache_init(vcpu->kvm,
3261					&vcpu->arch.apic->vapic_cache,
3262					vapic_addr, sizeof(u32)))
3263			return -EINVAL;
3264		__set_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention);
3265	} else {
3266		__clear_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention);
3267	}
3268
3269	vcpu->arch.apic->vapic_addr = vapic_addr;
3270	return 0;
3271}
3272
3273static int kvm_lapic_msr_read(struct kvm_lapic *apic, u32 reg, u64 *data)
3274{
3275	u32 low;
3276
3277	if (reg == APIC_ICR) {
3278		*data = kvm_x2apic_icr_read(apic);
3279		return 0;
3280	}
3281
3282	if (kvm_lapic_reg_read(apic, reg, 4, &low))
3283		return 1;
3284
3285	*data = low;
3286
3287	return 0;
3288}
3289
3290static int kvm_lapic_msr_write(struct kvm_lapic *apic, u32 reg, u64 data)
3291{
3292	/*
3293	 * ICR is a 64-bit register in x2APIC mode (and Hyper-V PV vAPIC) and
3294	 * can be written as such, all other registers remain accessible only
3295	 * through 32-bit reads/writes.
3296	 */
3297	if (reg == APIC_ICR)
3298		return kvm_x2apic_icr_write(apic, data);
3299
3300	/* Bits 63:32 are reserved in all other registers. */
3301	if (data >> 32)
3302		return 1;
3303
 
 
 
3304	return kvm_lapic_reg_write(apic, reg, (u32)data);
3305}
3306
3307int kvm_x2apic_msr_write(struct kvm_vcpu *vcpu, u32 msr, u64 data)
3308{
3309	struct kvm_lapic *apic = vcpu->arch.apic;
3310	u32 reg = (msr - APIC_BASE_MSR) << 4;
3311
3312	if (!lapic_in_kernel(vcpu) || !apic_x2apic_mode(apic))
3313		return 1;
3314
3315	return kvm_lapic_msr_write(apic, reg, data);
3316}
3317
3318int kvm_x2apic_msr_read(struct kvm_vcpu *vcpu, u32 msr, u64 *data)
3319{
3320	struct kvm_lapic *apic = vcpu->arch.apic;
3321	u32 reg = (msr - APIC_BASE_MSR) << 4;
3322
3323	if (!lapic_in_kernel(vcpu) || !apic_x2apic_mode(apic))
3324		return 1;
 
 
 
 
3325
3326	return kvm_lapic_msr_read(apic, reg, data);
3327}
3328
3329int kvm_hv_vapic_msr_write(struct kvm_vcpu *vcpu, u32 reg, u64 data)
3330{
 
 
3331	if (!lapic_in_kernel(vcpu))
3332		return 1;
3333
3334	return kvm_lapic_msr_write(vcpu->arch.apic, reg, data);
 
 
 
3335}
3336
3337int kvm_hv_vapic_msr_read(struct kvm_vcpu *vcpu, u32 reg, u64 *data)
3338{
 
 
 
3339	if (!lapic_in_kernel(vcpu))
3340		return 1;
3341
3342	return kvm_lapic_msr_read(vcpu->arch.apic, reg, data);
 
 
 
 
 
 
 
3343}
3344
3345int kvm_lapic_set_pv_eoi(struct kvm_vcpu *vcpu, u64 data, unsigned long len)
3346{
3347	u64 addr = data & ~KVM_MSR_ENABLED;
3348	struct gfn_to_hva_cache *ghc = &vcpu->arch.pv_eoi.data;
3349	unsigned long new_len;
3350	int ret;
3351
3352	if (!IS_ALIGNED(addr, 4))
3353		return 1;
3354
3355	if (data & KVM_MSR_ENABLED) {
3356		if (addr == ghc->gpa && len <= ghc->len)
3357			new_len = ghc->len;
3358		else
3359			new_len = len;
3360
3361		ret = kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, addr, new_len);
3362		if (ret)
3363			return ret;
3364	}
3365
3366	vcpu->arch.pv_eoi.msr_val = data;
 
 
3367
3368	return 0;
 
 
 
 
 
3369}
3370
3371int kvm_apic_accept_events(struct kvm_vcpu *vcpu)
3372{
3373	struct kvm_lapic *apic = vcpu->arch.apic;
3374	u8 sipi_vector;
3375	int r;
 
3376
3377	if (!kvm_apic_has_pending_init_or_sipi(vcpu))
 
 
 
 
 
 
 
 
3378		return 0;
3379
3380	if (is_guest_mode(vcpu)) {
3381		r = kvm_check_nested_events(vcpu);
3382		if (r < 0)
3383			return r == -EBUSY ? 0 : r;
3384		/*
3385		 * Continue processing INIT/SIPI even if a nested VM-Exit
3386		 * occurred, e.g. pending SIPIs should be dropped if INIT+SIPI
3387		 * are blocked as a result of transitioning to VMX root mode.
 
3388		 */
3389	}
3390
3391	/*
3392	 * INITs are blocked while CPU is in specific states (SMM, VMX root
3393	 * mode, SVM with GIF=0), while SIPIs are dropped if the CPU isn't in
3394	 * wait-for-SIPI (WFS).
 
 
 
3395	 */
3396	if (!kvm_apic_init_sipi_allowed(vcpu)) {
3397		WARN_ON_ONCE(vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED);
3398		clear_bit(KVM_APIC_SIPI, &apic->pending_events);
 
3399		return 0;
3400	}
3401
3402	if (test_and_clear_bit(KVM_APIC_INIT, &apic->pending_events)) {
 
3403		kvm_vcpu_reset(vcpu, true);
3404		if (kvm_vcpu_is_bsp(apic->vcpu))
3405			vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
3406		else
3407			vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED;
3408	}
3409	if (test_and_clear_bit(KVM_APIC_SIPI, &apic->pending_events)) {
 
3410		if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) {
3411			/* evaluate pending_events before reading the vector */
3412			smp_rmb();
3413			sipi_vector = apic->sipi_vector;
3414			kvm_x86_call(vcpu_deliver_sipi_vector)(vcpu,
3415							       sipi_vector);
3416			vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
3417		}
3418	}
3419	return 0;
3420}
3421
3422void kvm_lapic_exit(void)
3423{
3424	static_key_deferred_flush(&apic_hw_disabled);
3425	WARN_ON(static_branch_unlikely(&apic_hw_disabled.key));
3426	static_key_deferred_flush(&apic_sw_disabled);
3427	WARN_ON(static_branch_unlikely(&apic_sw_disabled.key));
3428}
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0-only
   2
   3/*
   4 * Local APIC virtualization
   5 *
   6 * Copyright (C) 2006 Qumranet, Inc.
   7 * Copyright (C) 2007 Novell
   8 * Copyright (C) 2007 Intel
   9 * Copyright 2009 Red Hat, Inc. and/or its affiliates.
  10 *
  11 * Authors:
  12 *   Dor Laor <dor.laor@qumranet.com>
  13 *   Gregory Haskins <ghaskins@novell.com>
  14 *   Yaozu (Eddie) Dong <eddie.dong@intel.com>
  15 *
  16 * Based on Xen 3.1 code, Copyright (c) 2004, Intel Corporation.
  17 */
 
  18
  19#include <linux/kvm_host.h>
  20#include <linux/kvm.h>
  21#include <linux/mm.h>
  22#include <linux/highmem.h>
  23#include <linux/smp.h>
  24#include <linux/hrtimer.h>
  25#include <linux/io.h>
  26#include <linux/export.h>
  27#include <linux/math64.h>
  28#include <linux/slab.h>
  29#include <asm/processor.h>
 
  30#include <asm/msr.h>
  31#include <asm/page.h>
  32#include <asm/current.h>
  33#include <asm/apicdef.h>
  34#include <asm/delay.h>
  35#include <linux/atomic.h>
  36#include <linux/jump_label.h>
  37#include "kvm_cache_regs.h"
  38#include "irq.h"
  39#include "ioapic.h"
  40#include "trace.h"
  41#include "x86.h"
 
  42#include "cpuid.h"
  43#include "hyperv.h"
 
  44
  45#ifndef CONFIG_X86_64
  46#define mod_64(x, y) ((x) - (y) * div64_u64(x, y))
  47#else
  48#define mod_64(x, y) ((x) % (y))
  49#endif
  50
  51#define PRId64 "d"
  52#define PRIx64 "llx"
  53#define PRIu64 "u"
  54#define PRIo64 "o"
  55
  56/* 14 is the version for Xeon and Pentium 8.4.8*/
  57#define APIC_VERSION			(0x14UL | ((KVM_APIC_LVT_NUM - 1) << 16))
  58#define LAPIC_MMIO_LENGTH		(1 << 12)
  59/* followed define is not in apicdef.h */
  60#define MAX_APIC_VECTOR			256
  61#define APIC_VECTORS_PER_REG		32
  62
  63static bool lapic_timer_advance_dynamic __read_mostly;
 
 
 
 
 
 
 
 
 
 
  64#define LAPIC_TIMER_ADVANCE_ADJUST_MIN	100	/* clock cycles */
  65#define LAPIC_TIMER_ADVANCE_ADJUST_MAX	10000	/* clock cycles */
  66#define LAPIC_TIMER_ADVANCE_NS_INIT	1000
  67#define LAPIC_TIMER_ADVANCE_NS_MAX     5000
  68/* step-by-step approximation to mitigate fluctuation */
  69#define LAPIC_TIMER_ADVANCE_ADJUST_STEP 8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  70
  71static inline int apic_test_vector(int vec, void *bitmap)
  72{
  73	return test_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
  74}
  75
  76bool kvm_apic_pending_eoi(struct kvm_vcpu *vcpu, int vector)
  77{
  78	struct kvm_lapic *apic = vcpu->arch.apic;
  79
  80	return apic_test_vector(vector, apic->regs + APIC_ISR) ||
  81		apic_test_vector(vector, apic->regs + APIC_IRR);
  82}
  83
  84static inline int __apic_test_and_set_vector(int vec, void *bitmap)
  85{
  86	return __test_and_set_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
  87}
  88
  89static inline int __apic_test_and_clear_vector(int vec, void *bitmap)
  90{
  91	return __test_and_clear_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
  92}
  93
 
 
 
  94__read_mostly DEFINE_STATIC_KEY_DEFERRED_FALSE(apic_hw_disabled, HZ);
  95__read_mostly DEFINE_STATIC_KEY_DEFERRED_FALSE(apic_sw_disabled, HZ);
  96
  97static inline int apic_enabled(struct kvm_lapic *apic)
  98{
  99	return kvm_apic_sw_enabled(apic) &&	kvm_apic_hw_enabled(apic);
 100}
 101
 102#define LVT_MASK	\
 103	(APIC_LVT_MASKED | APIC_SEND_PENDING | APIC_VECTOR_MASK)
 104
 105#define LINT_MASK	\
 106	(LVT_MASK | APIC_MODE_MASK | APIC_INPUT_POLARITY | \
 107	 APIC_LVT_REMOTE_IRR | APIC_LVT_LEVEL_TRIGGER)
 108
 109static inline u32 kvm_x2apic_id(struct kvm_lapic *apic)
 110{
 111	return apic->vcpu->vcpu_id;
 112}
 113
 114static bool kvm_can_post_timer_interrupt(struct kvm_vcpu *vcpu)
 115{
 116	return pi_inject_timer && kvm_vcpu_apicv_active(vcpu);
 
 117}
 118
 119bool kvm_can_use_hv_timer(struct kvm_vcpu *vcpu)
 120{
 121	return kvm_x86_ops.set_hv_timer
 122	       && !(kvm_mwait_in_guest(vcpu->kvm) ||
 123		    kvm_can_post_timer_interrupt(vcpu));
 124}
 125EXPORT_SYMBOL_GPL(kvm_can_use_hv_timer);
 126
 127static bool kvm_use_posted_timer_interrupt(struct kvm_vcpu *vcpu)
 128{
 129	return kvm_can_post_timer_interrupt(vcpu) && vcpu->mode == IN_GUEST_MODE;
 130}
 131
 
 
 
 
 
 132static inline bool kvm_apic_map_get_logical_dest(struct kvm_apic_map *map,
 133		u32 dest_id, struct kvm_lapic ***cluster, u16 *mask) {
 134	switch (map->mode) {
 
 
 
 
 
 135	case KVM_APIC_MODE_X2APIC: {
 136		u32 offset = (dest_id >> 16) * 16;
 137		u32 max_apic_id = map->max_apic_id;
 138
 139		if (offset <= max_apic_id) {
 140			u8 cluster_size = min(max_apic_id - offset + 1, 16U);
 141
 142			offset = array_index_nospec(offset, map->max_apic_id + 1);
 143			*cluster = &map->phys_map[offset];
 144			*mask = dest_id & (0xffff >> (16 - cluster_size));
 145		} else {
 146			*mask = 0;
 147		}
 148
 149		return true;
 150		}
 151	case KVM_APIC_MODE_XAPIC_FLAT:
 152		*cluster = map->xapic_flat_map;
 153		*mask = dest_id & 0xff;
 154		return true;
 155	case KVM_APIC_MODE_XAPIC_CLUSTER:
 156		*cluster = map->xapic_cluster_map[(dest_id >> 4) & 0xf];
 157		*mask = dest_id & 0xf;
 158		return true;
 
 
 159	default:
 160		/* Not optimized. */
 161		return false;
 162	}
 163}
 164
 165static void kvm_apic_map_free(struct rcu_head *rcu)
 166{
 167	struct kvm_apic_map *map = container_of(rcu, struct kvm_apic_map, rcu);
 168
 169	kvfree(map);
 170}
 171
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 172/*
 173 * CLEAN -> DIRTY and UPDATE_IN_PROGRESS -> DIRTY changes happen without a lock.
 174 *
 175 * DIRTY -> UPDATE_IN_PROGRESS and UPDATE_IN_PROGRESS -> CLEAN happen with
 176 * apic_map_lock_held.
 177 */
 178enum {
 179	CLEAN,
 180	UPDATE_IN_PROGRESS,
 181	DIRTY
 182};
 183
 184void kvm_recalculate_apic_map(struct kvm *kvm)
 185{
 186	struct kvm_apic_map *new, *old = NULL;
 187	struct kvm_vcpu *vcpu;
 188	int i;
 189	u32 max_id = 255; /* enough space for any xAPIC ID */
 
 
 190
 191	/* Read kvm->arch.apic_map_dirty before kvm->arch.apic_map.  */
 192	if (atomic_read_acquire(&kvm->arch.apic_map_dirty) == CLEAN)
 193		return;
 194
 
 
 
 195	mutex_lock(&kvm->arch.apic_map_lock);
 
 
 196	/*
 197	 * Read kvm->arch.apic_map_dirty before kvm->arch.apic_map
 198	 * (if clean) or the APIC registers (if dirty).
 
 
 
 199	 */
 200	if (atomic_cmpxchg_acquire(&kvm->arch.apic_map_dirty,
 201				   DIRTY, UPDATE_IN_PROGRESS) == CLEAN) {
 202		/* Someone else has updated the map. */
 203		mutex_unlock(&kvm->arch.apic_map_lock);
 204		return;
 205	}
 206
 
 
 
 
 
 
 
 
 
 207	kvm_for_each_vcpu(i, vcpu, kvm)
 208		if (kvm_apic_present(vcpu))
 209			max_id = max(max_id, kvm_x2apic_id(vcpu->arch.apic));
 210
 211	new = kvzalloc(sizeof(struct kvm_apic_map) +
 212	                   sizeof(struct kvm_lapic *) * ((u64)max_id + 1),
 213			   GFP_KERNEL_ACCOUNT);
 214
 215	if (!new)
 216		goto out;
 217
 218	new->max_apic_id = max_id;
 
 219
 220	kvm_for_each_vcpu(i, vcpu, kvm) {
 221		struct kvm_lapic *apic = vcpu->arch.apic;
 222		struct kvm_lapic **cluster;
 223		u16 mask;
 224		u32 ldr;
 225		u8 xapic_id;
 226		u32 x2apic_id;
 227
 228		if (!kvm_apic_present(vcpu))
 229			continue;
 230
 231		xapic_id = kvm_xapic_id(apic);
 232		x2apic_id = kvm_x2apic_id(apic);
 
 
 
 
 
 
 233
 234		/* Hotplug hack: see kvm_apic_match_physical_addr(), ... */
 235		if ((apic_x2apic_mode(apic) || x2apic_id > 0xff) &&
 236				x2apic_id <= new->max_apic_id)
 237			new->phys_map[x2apic_id] = apic;
 238		/*
 239		 * ... xAPIC ID of VCPUs with APIC ID > 0xff will wrap-around,
 240		 * prevent them from masking VCPUs with APIC ID <= 0xff.
 241		 */
 242		if (!apic_x2apic_mode(apic) && !new->phys_map[xapic_id])
 243			new->phys_map[xapic_id] = apic;
 244
 245		if (!kvm_apic_sw_enabled(apic))
 246			continue;
 
 
 
 
 
 
 
 
 
 
 247
 248		ldr = kvm_lapic_get_reg(apic, APIC_LDR);
 
 
 
 249
 250		if (apic_x2apic_mode(apic)) {
 251			new->mode |= KVM_APIC_MODE_X2APIC;
 252		} else if (ldr) {
 253			ldr = GET_APIC_LOGICAL_ID(ldr);
 254			if (kvm_lapic_get_reg(apic, APIC_DFR) == APIC_DFR_FLAT)
 255				new->mode |= KVM_APIC_MODE_XAPIC_FLAT;
 256			else
 257				new->mode |= KVM_APIC_MODE_XAPIC_CLUSTER;
 258		}
 259
 260		if (!kvm_apic_map_get_logical_dest(new, ldr, &cluster, &mask))
 261			continue;
 262
 263		if (mask)
 264			cluster[ffs(mask) - 1] = apic;
 265	}
 266out:
 267	old = rcu_dereference_protected(kvm->arch.apic_map,
 268			lockdep_is_held(&kvm->arch.apic_map_lock));
 269	rcu_assign_pointer(kvm->arch.apic_map, new);
 270	/*
 271	 * Write kvm->arch.apic_map before clearing apic->apic_map_dirty.
 272	 * If another update has come in, leave it DIRTY.
 273	 */
 274	atomic_cmpxchg_release(&kvm->arch.apic_map_dirty,
 275			       UPDATE_IN_PROGRESS, CLEAN);
 276	mutex_unlock(&kvm->arch.apic_map_lock);
 277
 278	if (old)
 279		call_rcu(&old->rcu, kvm_apic_map_free);
 280
 281	kvm_make_scan_ioapic_request(kvm);
 282}
 283
 284static inline void apic_set_spiv(struct kvm_lapic *apic, u32 val)
 285{
 286	bool enabled = val & APIC_SPIV_APIC_ENABLED;
 287
 288	kvm_lapic_set_reg(apic, APIC_SPIV, val);
 289
 290	if (enabled != apic->sw_enabled) {
 291		apic->sw_enabled = enabled;
 292		if (enabled)
 293			static_branch_slow_dec_deferred(&apic_sw_disabled);
 294		else
 295			static_branch_inc(&apic_sw_disabled.key);
 296
 297		atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
 298	}
 299
 300	/* Check if there are APF page ready requests pending */
 301	if (enabled)
 302		kvm_make_request(KVM_REQ_APF_READY, apic->vcpu);
 
 
 303}
 304
 305static inline void kvm_apic_set_xapic_id(struct kvm_lapic *apic, u8 id)
 306{
 307	kvm_lapic_set_reg(apic, APIC_ID, id << 24);
 308	atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
 309}
 310
 311static inline void kvm_apic_set_ldr(struct kvm_lapic *apic, u32 id)
 312{
 313	kvm_lapic_set_reg(apic, APIC_LDR, id);
 314	atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
 315}
 316
 317static inline void kvm_apic_set_dfr(struct kvm_lapic *apic, u32 val)
 318{
 319	kvm_lapic_set_reg(apic, APIC_DFR, val);
 320	atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
 321}
 322
 323static inline u32 kvm_apic_calc_x2apic_ldr(u32 id)
 324{
 325	return ((id >> 4) << 16) | (1 << (id & 0xf));
 326}
 327
 328static inline void kvm_apic_set_x2apic_id(struct kvm_lapic *apic, u32 id)
 329{
 330	u32 ldr = kvm_apic_calc_x2apic_ldr(id);
 331
 332	WARN_ON_ONCE(id != apic->vcpu->vcpu_id);
 333
 334	kvm_lapic_set_reg(apic, APIC_ID, id);
 335	kvm_lapic_set_reg(apic, APIC_LDR, ldr);
 336	atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
 337}
 338
 339static inline int apic_lvt_enabled(struct kvm_lapic *apic, int lvt_type)
 340{
 341	return !(kvm_lapic_get_reg(apic, lvt_type) & APIC_LVT_MASKED);
 342}
 343
 344static inline int apic_lvtt_oneshot(struct kvm_lapic *apic)
 345{
 346	return apic->lapic_timer.timer_mode == APIC_LVT_TIMER_ONESHOT;
 347}
 348
 349static inline int apic_lvtt_period(struct kvm_lapic *apic)
 350{
 351	return apic->lapic_timer.timer_mode == APIC_LVT_TIMER_PERIODIC;
 352}
 353
 354static inline int apic_lvtt_tscdeadline(struct kvm_lapic *apic)
 355{
 356	return apic->lapic_timer.timer_mode == APIC_LVT_TIMER_TSCDEADLINE;
 357}
 358
 359static inline int apic_lvt_nmi_mode(u32 lvt_val)
 360{
 361	return (lvt_val & (APIC_MODE_MASK | APIC_LVT_MASKED)) == APIC_DM_NMI;
 362}
 363
 
 
 
 
 
 
 
 
 
 
 364void kvm_apic_set_version(struct kvm_vcpu *vcpu)
 365{
 366	struct kvm_lapic *apic = vcpu->arch.apic;
 367	u32 v = APIC_VERSION;
 368
 369	if (!lapic_in_kernel(vcpu))
 370		return;
 371
 
 
 372	/*
 373	 * KVM emulates 82093AA datasheet (with in-kernel IOAPIC implementation)
 374	 * which doesn't have EOI register; Some buggy OSes (e.g. Windows with
 375	 * Hyper-V role) disable EOI broadcast in lapic not checking for IOAPIC
 376	 * version first and level-triggered interrupts never get EOIed in
 377	 * IOAPIC.
 378	 */
 379	if (guest_cpuid_has(vcpu, X86_FEATURE_X2APIC) &&
 380	    !ioapic_in_kernel(vcpu->kvm))
 381		v |= APIC_LVR_DIRECTED_EOI;
 382	kvm_lapic_set_reg(apic, APIC_LVR, v);
 383}
 384
 385static const unsigned int apic_lvt_mask[KVM_APIC_LVT_NUM] = {
 386	LVT_MASK ,      /* part LVTT mask, timer mode mask added at runtime */
 387	LVT_MASK | APIC_MODE_MASK,	/* LVTTHMR */
 388	LVT_MASK | APIC_MODE_MASK,	/* LVTPC */
 389	LINT_MASK, LINT_MASK,	/* LVT0-1 */
 390	LVT_MASK		/* LVTERR */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 391};
 392
 393static int find_highest_vector(void *bitmap)
 394{
 395	int vec;
 396	u32 *reg;
 397
 398	for (vec = MAX_APIC_VECTOR - APIC_VECTORS_PER_REG;
 399	     vec >= 0; vec -= APIC_VECTORS_PER_REG) {
 400		reg = bitmap + REG_POS(vec);
 401		if (*reg)
 402			return __fls(*reg) + vec;
 403	}
 404
 405	return -1;
 406}
 407
 408static u8 count_vectors(void *bitmap)
 409{
 410	int vec;
 411	u32 *reg;
 412	u8 count = 0;
 413
 414	for (vec = 0; vec < MAX_APIC_VECTOR; vec += APIC_VECTORS_PER_REG) {
 415		reg = bitmap + REG_POS(vec);
 416		count += hweight32(*reg);
 417	}
 418
 419	return count;
 420}
 421
 422bool __kvm_apic_update_irr(u32 *pir, void *regs, int *max_irr)
 423{
 424	u32 i, vec;
 425	u32 pir_val, irr_val, prev_irr_val;
 426	int max_updated_irr;
 427
 428	max_updated_irr = -1;
 429	*max_irr = -1;
 430
 431	for (i = vec = 0; i <= 7; i++, vec += 32) {
 
 
 
 432		pir_val = READ_ONCE(pir[i]);
 433		irr_val = *((u32 *)(regs + APIC_IRR + i * 0x10));
 434		if (pir_val) {
 
 
 435			prev_irr_val = irr_val;
 436			irr_val |= xchg(&pir[i], 0);
 437			*((u32 *)(regs + APIC_IRR + i * 0x10)) = irr_val;
 438			if (prev_irr_val != irr_val) {
 439				max_updated_irr =
 440					__fls(irr_val ^ prev_irr_val) + vec;
 441			}
 
 442		}
 443		if (irr_val)
 444			*max_irr = __fls(irr_val) + vec;
 445	}
 446
 447	return ((max_updated_irr != -1) &&
 448		(max_updated_irr == *max_irr));
 449}
 450EXPORT_SYMBOL_GPL(__kvm_apic_update_irr);
 451
 452bool kvm_apic_update_irr(struct kvm_vcpu *vcpu, u32 *pir, int *max_irr)
 453{
 454	struct kvm_lapic *apic = vcpu->arch.apic;
 
 455
 456	return __kvm_apic_update_irr(pir, apic->regs, max_irr);
 
 
 457}
 458EXPORT_SYMBOL_GPL(kvm_apic_update_irr);
 459
 460static inline int apic_search_irr(struct kvm_lapic *apic)
 461{
 462	return find_highest_vector(apic->regs + APIC_IRR);
 463}
 464
 465static inline int apic_find_highest_irr(struct kvm_lapic *apic)
 466{
 467	int result;
 468
 469	/*
 470	 * Note that irr_pending is just a hint. It will be always
 471	 * true with virtual interrupt delivery enabled.
 472	 */
 473	if (!apic->irr_pending)
 474		return -1;
 475
 476	result = apic_search_irr(apic);
 477	ASSERT(result == -1 || result >= 16);
 478
 479	return result;
 480}
 481
 482static inline void apic_clear_irr(int vec, struct kvm_lapic *apic)
 483{
 484	struct kvm_vcpu *vcpu;
 485
 486	vcpu = apic->vcpu;
 487
 488	if (unlikely(vcpu->arch.apicv_active)) {
 489		/* need to update RVI */
 490		kvm_lapic_clear_vector(vec, apic->regs + APIC_IRR);
 491		static_call(kvm_x86_hwapic_irr_update)(vcpu,
 492				apic_find_highest_irr(apic));
 493	} else {
 494		apic->irr_pending = false;
 495		kvm_lapic_clear_vector(vec, apic->regs + APIC_IRR);
 496		if (apic_search_irr(apic) != -1)
 497			apic->irr_pending = true;
 498	}
 499}
 500
 501void kvm_apic_clear_irr(struct kvm_vcpu *vcpu, int vec)
 502{
 503	apic_clear_irr(vec, vcpu->arch.apic);
 504}
 505EXPORT_SYMBOL_GPL(kvm_apic_clear_irr);
 506
 507static inline void apic_set_isr(int vec, struct kvm_lapic *apic)
 508{
 509	struct kvm_vcpu *vcpu;
 510
 511	if (__apic_test_and_set_vector(vec, apic->regs + APIC_ISR))
 512		return;
 513
 514	vcpu = apic->vcpu;
 515
 516	/*
 517	 * With APIC virtualization enabled, all caching is disabled
 518	 * because the processor can modify ISR under the hood.  Instead
 519	 * just set SVI.
 520	 */
 521	if (unlikely(vcpu->arch.apicv_active))
 522		static_call(kvm_x86_hwapic_isr_update)(vcpu, vec);
 523	else {
 524		++apic->isr_count;
 525		BUG_ON(apic->isr_count > MAX_APIC_VECTOR);
 526		/*
 527		 * ISR (in service register) bit is set when injecting an interrupt.
 528		 * The highest vector is injected. Thus the latest bit set matches
 529		 * the highest bit in ISR.
 530		 */
 531		apic->highest_isr_cache = vec;
 532	}
 533}
 534
 535static inline int apic_find_highest_isr(struct kvm_lapic *apic)
 536{
 537	int result;
 538
 539	/*
 540	 * Note that isr_count is always 1, and highest_isr_cache
 541	 * is always -1, with APIC virtualization enabled.
 542	 */
 543	if (!apic->isr_count)
 544		return -1;
 545	if (likely(apic->highest_isr_cache != -1))
 546		return apic->highest_isr_cache;
 547
 548	result = find_highest_vector(apic->regs + APIC_ISR);
 549	ASSERT(result == -1 || result >= 16);
 550
 551	return result;
 552}
 553
 554static inline void apic_clear_isr(int vec, struct kvm_lapic *apic)
 555{
 556	struct kvm_vcpu *vcpu;
 557	if (!__apic_test_and_clear_vector(vec, apic->regs + APIC_ISR))
 558		return;
 559
 560	vcpu = apic->vcpu;
 561
 562	/*
 563	 * We do get here for APIC virtualization enabled if the guest
 564	 * uses the Hyper-V APIC enlightenment.  In this case we may need
 565	 * to trigger a new interrupt delivery by writing the SVI field;
 566	 * on the other hand isr_count and highest_isr_cache are unused
 567	 * and must be left alone.
 568	 */
 569	if (unlikely(vcpu->arch.apicv_active))
 570		static_call(kvm_x86_hwapic_isr_update)(vcpu,
 571						apic_find_highest_isr(apic));
 572	else {
 573		--apic->isr_count;
 574		BUG_ON(apic->isr_count < 0);
 575		apic->highest_isr_cache = -1;
 576	}
 577}
 578
 
 
 
 
 
 
 
 
 
 
 
 579int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu)
 580{
 581	/* This may race with setting of irr in __apic_accept_irq() and
 582	 * value returned may be wrong, but kvm_vcpu_kick() in __apic_accept_irq
 583	 * will cause vmexit immediately and the value will be recalculated
 584	 * on the next vmentry.
 585	 */
 586	return apic_find_highest_irr(vcpu->arch.apic);
 587}
 588EXPORT_SYMBOL_GPL(kvm_lapic_find_highest_irr);
 589
 590static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
 591			     int vector, int level, int trig_mode,
 592			     struct dest_map *dest_map);
 593
 594int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq,
 595		     struct dest_map *dest_map)
 596{
 597	struct kvm_lapic *apic = vcpu->arch.apic;
 598
 599	return __apic_accept_irq(apic, irq->delivery_mode, irq->vector,
 600			irq->level, irq->trig_mode, dest_map);
 601}
 602
 603static int __pv_send_ipi(unsigned long *ipi_bitmap, struct kvm_apic_map *map,
 604			 struct kvm_lapic_irq *irq, u32 min)
 605{
 606	int i, count = 0;
 607	struct kvm_vcpu *vcpu;
 608
 609	if (min > map->max_apic_id)
 610		return 0;
 611
 612	for_each_set_bit(i, ipi_bitmap,
 613		min((u32)BITS_PER_LONG, (map->max_apic_id - min + 1))) {
 614		if (map->phys_map[min + i]) {
 615			vcpu = map->phys_map[min + i]->vcpu;
 616			count += kvm_apic_set_irq(vcpu, irq, NULL);
 617		}
 618	}
 619
 620	return count;
 621}
 622
 623int kvm_pv_send_ipi(struct kvm *kvm, unsigned long ipi_bitmap_low,
 624		    unsigned long ipi_bitmap_high, u32 min,
 625		    unsigned long icr, int op_64_bit)
 626{
 627	struct kvm_apic_map *map;
 628	struct kvm_lapic_irq irq = {0};
 629	int cluster_size = op_64_bit ? 64 : 32;
 630	int count;
 631
 632	if (icr & (APIC_DEST_MASK | APIC_SHORT_MASK))
 633		return -KVM_EINVAL;
 634
 635	irq.vector = icr & APIC_VECTOR_MASK;
 636	irq.delivery_mode = icr & APIC_MODE_MASK;
 637	irq.level = (icr & APIC_INT_ASSERT) != 0;
 638	irq.trig_mode = icr & APIC_INT_LEVELTRIG;
 639
 640	rcu_read_lock();
 641	map = rcu_dereference(kvm->arch.apic_map);
 642
 643	count = -EOPNOTSUPP;
 644	if (likely(map)) {
 645		count = __pv_send_ipi(&ipi_bitmap_low, map, &irq, min);
 646		min += cluster_size;
 647		count += __pv_send_ipi(&ipi_bitmap_high, map, &irq, min);
 648	}
 649
 650	rcu_read_unlock();
 651	return count;
 652}
 653
 654static int pv_eoi_put_user(struct kvm_vcpu *vcpu, u8 val)
 655{
 656
 657	return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data, &val,
 658				      sizeof(val));
 659}
 660
 661static int pv_eoi_get_user(struct kvm_vcpu *vcpu, u8 *val)
 662{
 663
 664	return kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data, val,
 665				      sizeof(*val));
 666}
 667
 668static inline bool pv_eoi_enabled(struct kvm_vcpu *vcpu)
 669{
 670	return vcpu->arch.pv_eoi.msr_val & KVM_MSR_ENABLED;
 671}
 672
 673static bool pv_eoi_get_pending(struct kvm_vcpu *vcpu)
 674{
 675	u8 val;
 676	if (pv_eoi_get_user(vcpu, &val) < 0) {
 677		printk(KERN_WARNING "Can't read EOI MSR value: 0x%llx\n",
 678			   (unsigned long long)vcpu->arch.pv_eoi.msr_val);
 679		return false;
 680	}
 681	return val & KVM_PV_EOI_ENABLED;
 682}
 683
 684static void pv_eoi_set_pending(struct kvm_vcpu *vcpu)
 685{
 686	if (pv_eoi_put_user(vcpu, KVM_PV_EOI_ENABLED) < 0) {
 687		printk(KERN_WARNING "Can't set EOI MSR value: 0x%llx\n",
 688			   (unsigned long long)vcpu->arch.pv_eoi.msr_val);
 689		return;
 690	}
 691	__set_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention);
 692}
 693
 694static void pv_eoi_clr_pending(struct kvm_vcpu *vcpu)
 695{
 696	if (pv_eoi_put_user(vcpu, KVM_PV_EOI_DISABLED) < 0) {
 697		printk(KERN_WARNING "Can't clear EOI MSR value: 0x%llx\n",
 698			   (unsigned long long)vcpu->arch.pv_eoi.msr_val);
 699		return;
 700	}
 
 
 
 
 
 
 
 
 
 
 701	__clear_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention);
 
 
 702}
 703
 704static int apic_has_interrupt_for_ppr(struct kvm_lapic *apic, u32 ppr)
 705{
 706	int highest_irr;
 707	if (apic->vcpu->arch.apicv_active)
 708		highest_irr = static_call(kvm_x86_sync_pir_to_irr)(apic->vcpu);
 709	else
 710		highest_irr = apic_find_highest_irr(apic);
 711	if (highest_irr == -1 || (highest_irr & 0xF0) <= ppr)
 712		return -1;
 713	return highest_irr;
 714}
 715
 716static bool __apic_update_ppr(struct kvm_lapic *apic, u32 *new_ppr)
 717{
 718	u32 tpr, isrv, ppr, old_ppr;
 719	int isr;
 720
 721	old_ppr = kvm_lapic_get_reg(apic, APIC_PROCPRI);
 722	tpr = kvm_lapic_get_reg(apic, APIC_TASKPRI);
 723	isr = apic_find_highest_isr(apic);
 724	isrv = (isr != -1) ? isr : 0;
 725
 726	if ((tpr & 0xf0) >= (isrv & 0xf0))
 727		ppr = tpr & 0xff;
 728	else
 729		ppr = isrv & 0xf0;
 730
 731	*new_ppr = ppr;
 732	if (old_ppr != ppr)
 733		kvm_lapic_set_reg(apic, APIC_PROCPRI, ppr);
 734
 735	return ppr < old_ppr;
 736}
 737
 738static void apic_update_ppr(struct kvm_lapic *apic)
 739{
 740	u32 ppr;
 741
 742	if (__apic_update_ppr(apic, &ppr) &&
 743	    apic_has_interrupt_for_ppr(apic, ppr) != -1)
 744		kvm_make_request(KVM_REQ_EVENT, apic->vcpu);
 745}
 746
 747void kvm_apic_update_ppr(struct kvm_vcpu *vcpu)
 748{
 749	apic_update_ppr(vcpu->arch.apic);
 750}
 751EXPORT_SYMBOL_GPL(kvm_apic_update_ppr);
 752
 753static void apic_set_tpr(struct kvm_lapic *apic, u32 tpr)
 754{
 755	kvm_lapic_set_reg(apic, APIC_TASKPRI, tpr);
 756	apic_update_ppr(apic);
 757}
 758
 759static bool kvm_apic_broadcast(struct kvm_lapic *apic, u32 mda)
 760{
 761	return mda == (apic_x2apic_mode(apic) ?
 762			X2APIC_BROADCAST : APIC_BROADCAST);
 763}
 764
 765static bool kvm_apic_match_physical_addr(struct kvm_lapic *apic, u32 mda)
 766{
 767	if (kvm_apic_broadcast(apic, mda))
 768		return true;
 769
 770	if (apic_x2apic_mode(apic))
 771		return mda == kvm_x2apic_id(apic);
 772
 773	/*
 774	 * Hotplug hack: Make LAPIC in xAPIC mode also accept interrupts as if
 775	 * it were in x2APIC mode.  Hotplugged VCPUs start in xAPIC mode and
 776	 * this allows unique addressing of VCPUs with APIC ID over 0xff.
 777	 * The 0xff condition is needed because writeable xAPIC ID.
 
 
 
 778	 */
 779	if (kvm_x2apic_id(apic) > 0xff && mda == kvm_x2apic_id(apic))
 780		return true;
 781
 782	return mda == kvm_xapic_id(apic);
 783}
 784
 785static bool kvm_apic_match_logical_addr(struct kvm_lapic *apic, u32 mda)
 786{
 787	u32 logical_id;
 788
 789	if (kvm_apic_broadcast(apic, mda))
 790		return true;
 791
 792	logical_id = kvm_lapic_get_reg(apic, APIC_LDR);
 793
 794	if (apic_x2apic_mode(apic))
 795		return ((logical_id >> 16) == (mda >> 16))
 796		       && (logical_id & mda & 0xffff) != 0;
 797
 798	logical_id = GET_APIC_LOGICAL_ID(logical_id);
 799
 800	switch (kvm_lapic_get_reg(apic, APIC_DFR)) {
 801	case APIC_DFR_FLAT:
 802		return (logical_id & mda) != 0;
 803	case APIC_DFR_CLUSTER:
 804		return ((logical_id >> 4) == (mda >> 4))
 805		       && (logical_id & mda & 0xf) != 0;
 806	default:
 807		return false;
 808	}
 809}
 810
 811/* The KVM local APIC implementation has two quirks:
 812 *
 813 *  - Real hardware delivers interrupts destined to x2APIC ID > 0xff to LAPICs
 814 *    in xAPIC mode if the "destination & 0xff" matches its xAPIC ID.
 815 *    KVM doesn't do that aliasing.
 816 *
 817 *  - in-kernel IOAPIC messages have to be delivered directly to
 818 *    x2APIC, because the kernel does not support interrupt remapping.
 819 *    In order to support broadcast without interrupt remapping, x2APIC
 820 *    rewrites the destination of non-IPI messages from APIC_BROADCAST
 821 *    to X2APIC_BROADCAST.
 822 *
 823 * The broadcast quirk can be disabled with KVM_CAP_X2APIC_API.  This is
 824 * important when userspace wants to use x2APIC-format MSIs, because
 825 * APIC_BROADCAST (0xff) is a legal route for "cluster 0, CPUs 0-7".
 826 */
 827static u32 kvm_apic_mda(struct kvm_vcpu *vcpu, unsigned int dest_id,
 828		struct kvm_lapic *source, struct kvm_lapic *target)
 829{
 830	bool ipi = source != NULL;
 831
 832	if (!vcpu->kvm->arch.x2apic_broadcast_quirk_disabled &&
 833	    !ipi && dest_id == APIC_BROADCAST && apic_x2apic_mode(target))
 834		return X2APIC_BROADCAST;
 835
 836	return dest_id;
 837}
 838
 839bool kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source,
 840			   int shorthand, unsigned int dest, int dest_mode)
 841{
 842	struct kvm_lapic *target = vcpu->arch.apic;
 843	u32 mda = kvm_apic_mda(vcpu, dest, source, target);
 844
 845	ASSERT(target);
 846	switch (shorthand) {
 847	case APIC_DEST_NOSHORT:
 848		if (dest_mode == APIC_DEST_PHYSICAL)
 849			return kvm_apic_match_physical_addr(target, mda);
 850		else
 851			return kvm_apic_match_logical_addr(target, mda);
 852	case APIC_DEST_SELF:
 853		return target == source;
 854	case APIC_DEST_ALLINC:
 855		return true;
 856	case APIC_DEST_ALLBUT:
 857		return target != source;
 858	default:
 859		return false;
 860	}
 861}
 862EXPORT_SYMBOL_GPL(kvm_apic_match_dest);
 863
 864int kvm_vector_to_index(u32 vector, u32 dest_vcpus,
 865		       const unsigned long *bitmap, u32 bitmap_size)
 866{
 867	u32 mod;
 868	int i, idx = -1;
 869
 870	mod = vector % dest_vcpus;
 871
 872	for (i = 0; i <= mod; i++) {
 873		idx = find_next_bit(bitmap, bitmap_size, idx + 1);
 874		BUG_ON(idx == bitmap_size);
 875	}
 876
 877	return idx;
 878}
 879
 880static void kvm_apic_disabled_lapic_found(struct kvm *kvm)
 881{
 882	if (!kvm->arch.disabled_lapic_found) {
 883		kvm->arch.disabled_lapic_found = true;
 884		printk(KERN_INFO
 885		       "Disabled LAPIC found during irq injection\n");
 886	}
 887}
 888
 889static bool kvm_apic_is_broadcast_dest(struct kvm *kvm, struct kvm_lapic **src,
 890		struct kvm_lapic_irq *irq, struct kvm_apic_map *map)
 891{
 892	if (kvm->arch.x2apic_broadcast_quirk_disabled) {
 893		if ((irq->dest_id == APIC_BROADCAST &&
 894				map->mode != KVM_APIC_MODE_X2APIC))
 895			return true;
 896		if (irq->dest_id == X2APIC_BROADCAST)
 897			return true;
 898	} else {
 899		bool x2apic_ipi = src && *src && apic_x2apic_mode(*src);
 900		if (irq->dest_id == (x2apic_ipi ?
 901		                     X2APIC_BROADCAST : APIC_BROADCAST))
 902			return true;
 903	}
 904
 905	return false;
 906}
 907
 908/* Return true if the interrupt can be handled by using *bitmap as index mask
 909 * for valid destinations in *dst array.
 910 * Return false if kvm_apic_map_get_dest_lapic did nothing useful.
 911 * Note: we may have zero kvm_lapic destinations when we return true, which
 912 * means that the interrupt should be dropped.  In this case, *bitmap would be
 913 * zero and *dst undefined.
 914 */
 915static inline bool kvm_apic_map_get_dest_lapic(struct kvm *kvm,
 916		struct kvm_lapic **src, struct kvm_lapic_irq *irq,
 917		struct kvm_apic_map *map, struct kvm_lapic ***dst,
 918		unsigned long *bitmap)
 919{
 920	int i, lowest;
 921
 922	if (irq->shorthand == APIC_DEST_SELF && src) {
 923		*dst = src;
 924		*bitmap = 1;
 925		return true;
 926	} else if (irq->shorthand)
 927		return false;
 928
 929	if (!map || kvm_apic_is_broadcast_dest(kvm, src, irq, map))
 930		return false;
 931
 932	if (irq->dest_mode == APIC_DEST_PHYSICAL) {
 933		if (irq->dest_id > map->max_apic_id) {
 934			*bitmap = 0;
 935		} else {
 936			u32 dest_id = array_index_nospec(irq->dest_id, map->max_apic_id + 1);
 937			*dst = &map->phys_map[dest_id];
 938			*bitmap = 1;
 939		}
 940		return true;
 941	}
 942
 943	*bitmap = 0;
 944	if (!kvm_apic_map_get_logical_dest(map, irq->dest_id, dst,
 945				(u16 *)bitmap))
 946		return false;
 947
 948	if (!kvm_lowest_prio_delivery(irq))
 949		return true;
 950
 951	if (!kvm_vector_hashing_enabled()) {
 952		lowest = -1;
 953		for_each_set_bit(i, bitmap, 16) {
 954			if (!(*dst)[i])
 955				continue;
 956			if (lowest < 0)
 957				lowest = i;
 958			else if (kvm_apic_compare_prio((*dst)[i]->vcpu,
 959						(*dst)[lowest]->vcpu) < 0)
 960				lowest = i;
 961		}
 962	} else {
 963		if (!*bitmap)
 964			return true;
 965
 966		lowest = kvm_vector_to_index(irq->vector, hweight16(*bitmap),
 967				bitmap, 16);
 968
 969		if (!(*dst)[lowest]) {
 970			kvm_apic_disabled_lapic_found(kvm);
 971			*bitmap = 0;
 972			return true;
 973		}
 974	}
 975
 976	*bitmap = (lowest >= 0) ? 1 << lowest : 0;
 977
 978	return true;
 979}
 980
 981bool kvm_irq_delivery_to_apic_fast(struct kvm *kvm, struct kvm_lapic *src,
 982		struct kvm_lapic_irq *irq, int *r, struct dest_map *dest_map)
 983{
 984	struct kvm_apic_map *map;
 985	unsigned long bitmap;
 986	struct kvm_lapic **dst = NULL;
 987	int i;
 988	bool ret;
 989
 990	*r = -1;
 991
 992	if (irq->shorthand == APIC_DEST_SELF) {
 
 
 
 
 993		*r = kvm_apic_set_irq(src->vcpu, irq, dest_map);
 994		return true;
 995	}
 996
 997	rcu_read_lock();
 998	map = rcu_dereference(kvm->arch.apic_map);
 999
1000	ret = kvm_apic_map_get_dest_lapic(kvm, &src, irq, map, &dst, &bitmap);
1001	if (ret) {
1002		*r = 0;
1003		for_each_set_bit(i, &bitmap, 16) {
1004			if (!dst[i])
1005				continue;
1006			*r += kvm_apic_set_irq(dst[i]->vcpu, irq, dest_map);
1007		}
1008	}
1009
1010	rcu_read_unlock();
1011	return ret;
1012}
1013
1014/*
1015 * This routine tries to handle interrupts in posted mode, here is how
1016 * it deals with different cases:
1017 * - For single-destination interrupts, handle it in posted mode
1018 * - Else if vector hashing is enabled and it is a lowest-priority
1019 *   interrupt, handle it in posted mode and use the following mechanism
1020 *   to find the destination vCPU.
1021 *	1. For lowest-priority interrupts, store all the possible
1022 *	   destination vCPUs in an array.
1023 *	2. Use "guest vector % max number of destination vCPUs" to find
1024 *	   the right destination vCPU in the array for the lowest-priority
1025 *	   interrupt.
1026 * - Otherwise, use remapped mode to inject the interrupt.
1027 */
1028bool kvm_intr_is_single_vcpu_fast(struct kvm *kvm, struct kvm_lapic_irq *irq,
1029			struct kvm_vcpu **dest_vcpu)
1030{
1031	struct kvm_apic_map *map;
1032	unsigned long bitmap;
1033	struct kvm_lapic **dst = NULL;
1034	bool ret = false;
1035
1036	if (irq->shorthand)
1037		return false;
1038
1039	rcu_read_lock();
1040	map = rcu_dereference(kvm->arch.apic_map);
1041
1042	if (kvm_apic_map_get_dest_lapic(kvm, NULL, irq, map, &dst, &bitmap) &&
1043			hweight16(bitmap) == 1) {
1044		unsigned long i = find_first_bit(&bitmap, 16);
1045
1046		if (dst[i]) {
1047			*dest_vcpu = dst[i]->vcpu;
1048			ret = true;
1049		}
1050	}
1051
1052	rcu_read_unlock();
1053	return ret;
1054}
1055
1056/*
1057 * Add a pending IRQ into lapic.
1058 * Return 1 if successfully added and 0 if discarded.
1059 */
1060static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
1061			     int vector, int level, int trig_mode,
1062			     struct dest_map *dest_map)
1063{
1064	int result = 0;
1065	struct kvm_vcpu *vcpu = apic->vcpu;
1066
1067	trace_kvm_apic_accept_irq(vcpu->vcpu_id, delivery_mode,
1068				  trig_mode, vector);
1069	switch (delivery_mode) {
1070	case APIC_DM_LOWEST:
1071		vcpu->arch.apic_arb_prio++;
1072		fallthrough;
1073	case APIC_DM_FIXED:
1074		if (unlikely(trig_mode && !level))
1075			break;
1076
1077		/* FIXME add logic for vcpu on reset */
1078		if (unlikely(!apic_enabled(apic)))
1079			break;
1080
1081		result = 1;
1082
1083		if (dest_map) {
1084			__set_bit(vcpu->vcpu_id, dest_map->map);
1085			dest_map->vectors[vcpu->vcpu_id] = vector;
1086		}
1087
1088		if (apic_test_vector(vector, apic->regs + APIC_TMR) != !!trig_mode) {
1089			if (trig_mode)
1090				kvm_lapic_set_vector(vector,
1091						     apic->regs + APIC_TMR);
1092			else
1093				kvm_lapic_clear_vector(vector,
1094						       apic->regs + APIC_TMR);
1095		}
1096
1097		if (static_call(kvm_x86_deliver_posted_interrupt)(vcpu, vector)) {
1098			kvm_lapic_set_irr(vector, apic);
1099			kvm_make_request(KVM_REQ_EVENT, vcpu);
1100			kvm_vcpu_kick(vcpu);
1101		}
1102		break;
1103
1104	case APIC_DM_REMRD:
1105		result = 1;
1106		vcpu->arch.pv.pv_unhalted = 1;
1107		kvm_make_request(KVM_REQ_EVENT, vcpu);
1108		kvm_vcpu_kick(vcpu);
1109		break;
1110
1111	case APIC_DM_SMI:
1112		result = 1;
1113		kvm_make_request(KVM_REQ_SMI, vcpu);
1114		kvm_vcpu_kick(vcpu);
 
1115		break;
1116
1117	case APIC_DM_NMI:
1118		result = 1;
1119		kvm_inject_nmi(vcpu);
1120		kvm_vcpu_kick(vcpu);
1121		break;
1122
1123	case APIC_DM_INIT:
1124		if (!trig_mode || level) {
1125			result = 1;
1126			/* assumes that there are only KVM_APIC_INIT/SIPI */
1127			apic->pending_events = (1UL << KVM_APIC_INIT);
1128			kvm_make_request(KVM_REQ_EVENT, vcpu);
1129			kvm_vcpu_kick(vcpu);
1130		}
1131		break;
1132
1133	case APIC_DM_STARTUP:
1134		result = 1;
1135		apic->sipi_vector = vector;
1136		/* make sure sipi_vector is visible for the receiver */
1137		smp_wmb();
1138		set_bit(KVM_APIC_SIPI, &apic->pending_events);
1139		kvm_make_request(KVM_REQ_EVENT, vcpu);
1140		kvm_vcpu_kick(vcpu);
1141		break;
1142
1143	case APIC_DM_EXTINT:
1144		/*
1145		 * Should only be called by kvm_apic_local_deliver() with LVT0,
1146		 * before NMI watchdog was enabled. Already handled by
1147		 * kvm_apic_accept_pic_intr().
1148		 */
1149		break;
1150
1151	default:
1152		printk(KERN_ERR "TODO: unsupported delivery mode %x\n",
1153		       delivery_mode);
1154		break;
1155	}
1156	return result;
1157}
1158
1159/*
1160 * This routine identifies the destination vcpus mask meant to receive the
1161 * IOAPIC interrupts. It either uses kvm_apic_map_get_dest_lapic() to find
1162 * out the destination vcpus array and set the bitmap or it traverses to
1163 * each available vcpu to identify the same.
1164 */
1165void kvm_bitmap_or_dest_vcpus(struct kvm *kvm, struct kvm_lapic_irq *irq,
1166			      unsigned long *vcpu_bitmap)
1167{
1168	struct kvm_lapic **dest_vcpu = NULL;
1169	struct kvm_lapic *src = NULL;
1170	struct kvm_apic_map *map;
1171	struct kvm_vcpu *vcpu;
1172	unsigned long bitmap;
1173	int i, vcpu_idx;
1174	bool ret;
1175
1176	rcu_read_lock();
1177	map = rcu_dereference(kvm->arch.apic_map);
1178
1179	ret = kvm_apic_map_get_dest_lapic(kvm, &src, irq, map, &dest_vcpu,
1180					  &bitmap);
1181	if (ret) {
1182		for_each_set_bit(i, &bitmap, 16) {
1183			if (!dest_vcpu[i])
1184				continue;
1185			vcpu_idx = dest_vcpu[i]->vcpu->vcpu_idx;
1186			__set_bit(vcpu_idx, vcpu_bitmap);
1187		}
1188	} else {
1189		kvm_for_each_vcpu(i, vcpu, kvm) {
1190			if (!kvm_apic_present(vcpu))
1191				continue;
1192			if (!kvm_apic_match_dest(vcpu, NULL,
1193						 irq->shorthand,
1194						 irq->dest_id,
1195						 irq->dest_mode))
1196				continue;
1197			__set_bit(i, vcpu_bitmap);
1198		}
1199	}
1200	rcu_read_unlock();
1201}
1202
1203int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2)
1204{
1205	return vcpu1->arch.apic_arb_prio - vcpu2->arch.apic_arb_prio;
1206}
1207
1208static bool kvm_ioapic_handles_vector(struct kvm_lapic *apic, int vector)
1209{
1210	return test_bit(vector, apic->vcpu->arch.ioapic_handled_vectors);
1211}
1212
1213static void kvm_ioapic_send_eoi(struct kvm_lapic *apic, int vector)
1214{
1215	int trigger_mode;
1216
1217	/* Eoi the ioapic only if the ioapic doesn't own the vector. */
1218	if (!kvm_ioapic_handles_vector(apic, vector))
1219		return;
1220
1221	/* Request a KVM exit to inform the userspace IOAPIC. */
1222	if (irqchip_split(apic->vcpu->kvm)) {
1223		apic->vcpu->arch.pending_ioapic_eoi = vector;
1224		kvm_make_request(KVM_REQ_IOAPIC_EOI_EXIT, apic->vcpu);
1225		return;
1226	}
1227
1228	if (apic_test_vector(vector, apic->regs + APIC_TMR))
1229		trigger_mode = IOAPIC_LEVEL_TRIG;
1230	else
1231		trigger_mode = IOAPIC_EDGE_TRIG;
1232
1233	kvm_ioapic_update_eoi(apic->vcpu, vector, trigger_mode);
1234}
1235
1236static int apic_set_eoi(struct kvm_lapic *apic)
1237{
1238	int vector = apic_find_highest_isr(apic);
1239
1240	trace_kvm_eoi(apic, vector);
1241
1242	/*
1243	 * Not every write EOI will has corresponding ISR,
1244	 * one example is when Kernel check timer on setup_IO_APIC
1245	 */
1246	if (vector == -1)
1247		return vector;
1248
1249	apic_clear_isr(vector, apic);
1250	apic_update_ppr(apic);
1251
1252	if (to_hv_vcpu(apic->vcpu) &&
1253	    test_bit(vector, to_hv_synic(apic->vcpu)->vec_bitmap))
1254		kvm_hv_synic_send_eoi(apic->vcpu, vector);
1255
1256	kvm_ioapic_send_eoi(apic, vector);
1257	kvm_make_request(KVM_REQ_EVENT, apic->vcpu);
1258	return vector;
1259}
1260
1261/*
1262 * this interface assumes a trap-like exit, which has already finished
1263 * desired side effect including vISR and vPPR update.
1264 */
1265void kvm_apic_set_eoi_accelerated(struct kvm_vcpu *vcpu, int vector)
1266{
1267	struct kvm_lapic *apic = vcpu->arch.apic;
1268
1269	trace_kvm_eoi(apic, vector);
1270
1271	kvm_ioapic_send_eoi(apic, vector);
1272	kvm_make_request(KVM_REQ_EVENT, apic->vcpu);
1273}
1274EXPORT_SYMBOL_GPL(kvm_apic_set_eoi_accelerated);
1275
1276void kvm_apic_send_ipi(struct kvm_lapic *apic, u32 icr_low, u32 icr_high)
1277{
1278	struct kvm_lapic_irq irq;
1279
 
 
 
1280	irq.vector = icr_low & APIC_VECTOR_MASK;
1281	irq.delivery_mode = icr_low & APIC_MODE_MASK;
1282	irq.dest_mode = icr_low & APIC_DEST_MASK;
1283	irq.level = (icr_low & APIC_INT_ASSERT) != 0;
1284	irq.trig_mode = icr_low & APIC_INT_LEVELTRIG;
1285	irq.shorthand = icr_low & APIC_SHORT_MASK;
1286	irq.msi_redir_hint = false;
1287	if (apic_x2apic_mode(apic))
1288		irq.dest_id = icr_high;
1289	else
1290		irq.dest_id = GET_APIC_DEST_FIELD(icr_high);
1291
1292	trace_kvm_apic_ipi(icr_low, irq.dest_id);
1293
1294	kvm_irq_delivery_to_apic(apic->vcpu->kvm, apic, &irq, NULL);
1295}
 
1296
1297static u32 apic_get_tmcct(struct kvm_lapic *apic)
1298{
1299	ktime_t remaining, now;
1300	s64 ns;
1301	u32 tmcct;
1302
1303	ASSERT(apic != NULL);
1304
1305	/* if initial count is 0, current count should also be 0 */
1306	if (kvm_lapic_get_reg(apic, APIC_TMICT) == 0 ||
1307		apic->lapic_timer.period == 0)
1308		return 0;
1309
1310	now = ktime_get();
1311	remaining = ktime_sub(apic->lapic_timer.target_expiration, now);
1312	if (ktime_to_ns(remaining) < 0)
1313		remaining = 0;
1314
1315	ns = mod_64(ktime_to_ns(remaining), apic->lapic_timer.period);
1316	tmcct = div64_u64(ns,
1317			 (APIC_BUS_CYCLE_NS * apic->divide_count));
1318
1319	return tmcct;
1320}
1321
1322static void __report_tpr_access(struct kvm_lapic *apic, bool write)
1323{
1324	struct kvm_vcpu *vcpu = apic->vcpu;
1325	struct kvm_run *run = vcpu->run;
1326
1327	kvm_make_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu);
1328	run->tpr_access.rip = kvm_rip_read(vcpu);
1329	run->tpr_access.is_write = write;
1330}
1331
1332static inline void report_tpr_access(struct kvm_lapic *apic, bool write)
1333{
1334	if (apic->vcpu->arch.tpr_access_reporting)
1335		__report_tpr_access(apic, write);
1336}
1337
1338static u32 __apic_read(struct kvm_lapic *apic, unsigned int offset)
1339{
1340	u32 val = 0;
1341
1342	if (offset >= LAPIC_MMIO_LENGTH)
1343		return 0;
1344
1345	switch (offset) {
1346	case APIC_ARBPRI:
1347		break;
1348
1349	case APIC_TMCCT:	/* Timer CCR */
1350		if (apic_lvtt_tscdeadline(apic))
1351			return 0;
1352
1353		val = apic_get_tmcct(apic);
1354		break;
1355	case APIC_PROCPRI:
1356		apic_update_ppr(apic);
1357		val = kvm_lapic_get_reg(apic, offset);
1358		break;
1359	case APIC_TASKPRI:
1360		report_tpr_access(apic, false);
1361		fallthrough;
1362	default:
1363		val = kvm_lapic_get_reg(apic, offset);
1364		break;
1365	}
1366
1367	return val;
1368}
1369
1370static inline struct kvm_lapic *to_lapic(struct kvm_io_device *dev)
1371{
1372	return container_of(dev, struct kvm_lapic, dev);
1373}
1374
1375#define APIC_REG_MASK(reg)	(1ull << ((reg) >> 4))
1376#define APIC_REGS_MASK(first, count) \
1377	(APIC_REG_MASK(first) * ((1ull << (count)) - 1))
1378
1379int kvm_lapic_reg_read(struct kvm_lapic *apic, u32 offset, int len,
1380		void *data)
1381{
1382	unsigned char alignment = offset & 0xf;
1383	u32 result;
1384	/* this bitmask has a bit cleared for each reserved register */
1385	u64 valid_reg_mask =
1386		APIC_REG_MASK(APIC_ID) |
1387		APIC_REG_MASK(APIC_LVR) |
1388		APIC_REG_MASK(APIC_TASKPRI) |
1389		APIC_REG_MASK(APIC_PROCPRI) |
1390		APIC_REG_MASK(APIC_LDR) |
1391		APIC_REG_MASK(APIC_DFR) |
1392		APIC_REG_MASK(APIC_SPIV) |
1393		APIC_REGS_MASK(APIC_ISR, APIC_ISR_NR) |
1394		APIC_REGS_MASK(APIC_TMR, APIC_ISR_NR) |
1395		APIC_REGS_MASK(APIC_IRR, APIC_ISR_NR) |
1396		APIC_REG_MASK(APIC_ESR) |
1397		APIC_REG_MASK(APIC_ICR) |
1398		APIC_REG_MASK(APIC_ICR2) |
1399		APIC_REG_MASK(APIC_LVTT) |
1400		APIC_REG_MASK(APIC_LVTTHMR) |
1401		APIC_REG_MASK(APIC_LVTPC) |
1402		APIC_REG_MASK(APIC_LVT0) |
1403		APIC_REG_MASK(APIC_LVT1) |
1404		APIC_REG_MASK(APIC_LVTERR) |
1405		APIC_REG_MASK(APIC_TMICT) |
1406		APIC_REG_MASK(APIC_TMCCT) |
1407		APIC_REG_MASK(APIC_TDCR);
1408
1409	/* ARBPRI is not valid on x2APIC */
 
 
 
1410	if (!apic_x2apic_mode(apic))
1411		valid_reg_mask |= APIC_REG_MASK(APIC_ARBPRI);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1412
1413	if (alignment + len > 4)
1414		return 1;
1415
1416	if (offset > 0x3f0 || !(valid_reg_mask & APIC_REG_MASK(offset)))
 
1417		return 1;
1418
1419	result = __apic_read(apic, offset & ~0xf);
1420
1421	trace_kvm_apic_read(offset, result);
1422
1423	switch (len) {
1424	case 1:
1425	case 2:
1426	case 4:
1427		memcpy(data, (char *)&result + alignment, len);
1428		break;
1429	default:
1430		printk(KERN_ERR "Local APIC read with len = %x, "
1431		       "should be 1,2, or 4 instead\n", len);
1432		break;
1433	}
1434	return 0;
1435}
1436EXPORT_SYMBOL_GPL(kvm_lapic_reg_read);
1437
1438static int apic_mmio_in_range(struct kvm_lapic *apic, gpa_t addr)
1439{
1440	return addr >= apic->base_address &&
1441		addr < apic->base_address + LAPIC_MMIO_LENGTH;
1442}
1443
1444static int apic_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *this,
1445			   gpa_t address, int len, void *data)
1446{
1447	struct kvm_lapic *apic = to_lapic(this);
1448	u32 offset = address - apic->base_address;
1449
1450	if (!apic_mmio_in_range(apic, address))
1451		return -EOPNOTSUPP;
1452
1453	if (!kvm_apic_hw_enabled(apic) || apic_x2apic_mode(apic)) {
1454		if (!kvm_check_has_quirk(vcpu->kvm,
1455					 KVM_X86_QUIRK_LAPIC_MMIO_HOLE))
1456			return -EOPNOTSUPP;
1457
1458		memset(data, 0xff, len);
1459		return 0;
1460	}
1461
1462	kvm_lapic_reg_read(apic, offset, len, data);
1463
1464	return 0;
1465}
1466
1467static void update_divide_count(struct kvm_lapic *apic)
1468{
1469	u32 tmp1, tmp2, tdcr;
1470
1471	tdcr = kvm_lapic_get_reg(apic, APIC_TDCR);
1472	tmp1 = tdcr & 0xf;
1473	tmp2 = ((tmp1 & 0x3) | ((tmp1 & 0x8) >> 1)) + 1;
1474	apic->divide_count = 0x1 << (tmp2 & 0x7);
1475}
1476
1477static void limit_periodic_timer_frequency(struct kvm_lapic *apic)
1478{
1479	/*
1480	 * Do not allow the guest to program periodic timers with small
1481	 * interval, since the hrtimers are not throttled by the host
1482	 * scheduler.
1483	 */
1484	if (apic_lvtt_period(apic) && apic->lapic_timer.period) {
1485		s64 min_period = min_timer_period_us * 1000LL;
1486
1487		if (apic->lapic_timer.period < min_period) {
1488			pr_info_ratelimited(
1489			    "kvm: vcpu %i: requested %lld ns "
1490			    "lapic timer period limited to %lld ns\n",
1491			    apic->vcpu->vcpu_id,
1492			    apic->lapic_timer.period, min_period);
1493			apic->lapic_timer.period = min_period;
1494		}
1495	}
1496}
1497
1498static void cancel_hv_timer(struct kvm_lapic *apic);
1499
1500static void cancel_apic_timer(struct kvm_lapic *apic)
1501{
1502	hrtimer_cancel(&apic->lapic_timer.timer);
1503	preempt_disable();
1504	if (apic->lapic_timer.hv_timer_in_use)
1505		cancel_hv_timer(apic);
1506	preempt_enable();
 
1507}
1508
1509static void apic_update_lvtt(struct kvm_lapic *apic)
1510{
1511	u32 timer_mode = kvm_lapic_get_reg(apic, APIC_LVTT) &
1512			apic->lapic_timer.timer_mode_mask;
1513
1514	if (apic->lapic_timer.timer_mode != timer_mode) {
1515		if (apic_lvtt_tscdeadline(apic) != (timer_mode ==
1516				APIC_LVT_TIMER_TSCDEADLINE)) {
1517			cancel_apic_timer(apic);
1518			kvm_lapic_set_reg(apic, APIC_TMICT, 0);
1519			apic->lapic_timer.period = 0;
1520			apic->lapic_timer.tscdeadline = 0;
1521		}
1522		apic->lapic_timer.timer_mode = timer_mode;
1523		limit_periodic_timer_frequency(apic);
1524	}
1525}
1526
1527/*
1528 * On APICv, this test will cause a busy wait
1529 * during a higher-priority task.
1530 */
1531
1532static bool lapic_timer_int_injected(struct kvm_vcpu *vcpu)
1533{
1534	struct kvm_lapic *apic = vcpu->arch.apic;
1535	u32 reg = kvm_lapic_get_reg(apic, APIC_LVTT);
1536
1537	if (kvm_apic_hw_enabled(apic)) {
1538		int vec = reg & APIC_VECTOR_MASK;
1539		void *bitmap = apic->regs + APIC_ISR;
1540
1541		if (vcpu->arch.apicv_active)
1542			bitmap = apic->regs + APIC_IRR;
1543
1544		if (apic_test_vector(vec, bitmap))
1545			return true;
1546	}
1547	return false;
1548}
1549
1550static inline void __wait_lapic_expire(struct kvm_vcpu *vcpu, u64 guest_cycles)
1551{
1552	u64 timer_advance_ns = vcpu->arch.apic->lapic_timer.timer_advance_ns;
1553
1554	/*
1555	 * If the guest TSC is running at a different ratio than the host, then
1556	 * convert the delay to nanoseconds to achieve an accurate delay.  Note
1557	 * that __delay() uses delay_tsc whenever the hardware has TSC, thus
1558	 * always for VMX enabled hardware.
1559	 */
1560	if (vcpu->arch.tsc_scaling_ratio == kvm_default_tsc_scaling_ratio) {
1561		__delay(min(guest_cycles,
1562			nsec_to_cycles(vcpu, timer_advance_ns)));
1563	} else {
1564		u64 delay_ns = guest_cycles * 1000000ULL;
1565		do_div(delay_ns, vcpu->arch.virtual_tsc_khz);
1566		ndelay(min_t(u32, delay_ns, timer_advance_ns));
1567	}
1568}
1569
1570static inline void adjust_lapic_timer_advance(struct kvm_vcpu *vcpu,
1571					      s64 advance_expire_delta)
1572{
1573	struct kvm_lapic *apic = vcpu->arch.apic;
1574	u32 timer_advance_ns = apic->lapic_timer.timer_advance_ns;
1575	u64 ns;
1576
1577	/* Do not adjust for tiny fluctuations or large random spikes. */
1578	if (abs(advance_expire_delta) > LAPIC_TIMER_ADVANCE_ADJUST_MAX ||
1579	    abs(advance_expire_delta) < LAPIC_TIMER_ADVANCE_ADJUST_MIN)
1580		return;
1581
1582	/* too early */
1583	if (advance_expire_delta < 0) {
1584		ns = -advance_expire_delta * 1000000ULL;
1585		do_div(ns, vcpu->arch.virtual_tsc_khz);
1586		timer_advance_ns -= ns/LAPIC_TIMER_ADVANCE_ADJUST_STEP;
1587	} else {
1588	/* too late */
1589		ns = advance_expire_delta * 1000000ULL;
1590		do_div(ns, vcpu->arch.virtual_tsc_khz);
1591		timer_advance_ns += ns/LAPIC_TIMER_ADVANCE_ADJUST_STEP;
1592	}
1593
1594	if (unlikely(timer_advance_ns > LAPIC_TIMER_ADVANCE_NS_MAX))
1595		timer_advance_ns = LAPIC_TIMER_ADVANCE_NS_INIT;
1596	apic->lapic_timer.timer_advance_ns = timer_advance_ns;
1597}
1598
1599static void __kvm_wait_lapic_expire(struct kvm_vcpu *vcpu)
1600{
1601	struct kvm_lapic *apic = vcpu->arch.apic;
1602	u64 guest_tsc, tsc_deadline;
1603
1604	tsc_deadline = apic->lapic_timer.expired_tscdeadline;
1605	apic->lapic_timer.expired_tscdeadline = 0;
1606	guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc());
1607	apic->lapic_timer.advance_expire_delta = guest_tsc - tsc_deadline;
 
 
1608
1609	if (lapic_timer_advance_dynamic) {
1610		adjust_lapic_timer_advance(vcpu, apic->lapic_timer.advance_expire_delta);
1611		/*
1612		 * If the timer fired early, reread the TSC to account for the
1613		 * overhead of the above adjustment to avoid waiting longer
1614		 * than is necessary.
1615		 */
1616		if (guest_tsc < tsc_deadline)
1617			guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc());
1618	}
1619
1620	if (guest_tsc < tsc_deadline)
1621		__wait_lapic_expire(vcpu, tsc_deadline - guest_tsc);
1622}
1623
1624void kvm_wait_lapic_expire(struct kvm_vcpu *vcpu)
1625{
1626	if (lapic_in_kernel(vcpu) &&
1627	    vcpu->arch.apic->lapic_timer.expired_tscdeadline &&
1628	    vcpu->arch.apic->lapic_timer.timer_advance_ns &&
1629	    lapic_timer_int_injected(vcpu))
1630		__kvm_wait_lapic_expire(vcpu);
1631}
1632EXPORT_SYMBOL_GPL(kvm_wait_lapic_expire);
1633
1634static void kvm_apic_inject_pending_timer_irqs(struct kvm_lapic *apic)
1635{
1636	struct kvm_timer *ktimer = &apic->lapic_timer;
1637
1638	kvm_apic_local_deliver(apic, APIC_LVTT);
1639	if (apic_lvtt_tscdeadline(apic)) {
1640		ktimer->tscdeadline = 0;
1641	} else if (apic_lvtt_oneshot(apic)) {
1642		ktimer->tscdeadline = 0;
1643		ktimer->target_expiration = 0;
1644	}
1645}
1646
1647static void apic_timer_expired(struct kvm_lapic *apic, bool from_timer_fn)
1648{
1649	struct kvm_vcpu *vcpu = apic->vcpu;
1650	struct kvm_timer *ktimer = &apic->lapic_timer;
1651
1652	if (atomic_read(&apic->lapic_timer.pending))
1653		return;
1654
1655	if (apic_lvtt_tscdeadline(apic) || ktimer->hv_timer_in_use)
1656		ktimer->expired_tscdeadline = ktimer->tscdeadline;
1657
1658	if (!from_timer_fn && vcpu->arch.apicv_active) {
1659		WARN_ON(kvm_get_running_vcpu() != vcpu);
1660		kvm_apic_inject_pending_timer_irqs(apic);
1661		return;
1662	}
1663
1664	if (kvm_use_posted_timer_interrupt(apic->vcpu)) {
1665		/*
1666		 * Ensure the guest's timer has truly expired before posting an
1667		 * interrupt.  Open code the relevant checks to avoid querying
1668		 * lapic_timer_int_injected(), which will be false since the
1669		 * interrupt isn't yet injected.  Waiting until after injecting
1670		 * is not an option since that won't help a posted interrupt.
1671		 */
1672		if (vcpu->arch.apic->lapic_timer.expired_tscdeadline &&
1673		    vcpu->arch.apic->lapic_timer.timer_advance_ns)
1674			__kvm_wait_lapic_expire(vcpu);
1675		kvm_apic_inject_pending_timer_irqs(apic);
1676		return;
1677	}
1678
1679	atomic_inc(&apic->lapic_timer.pending);
1680	kvm_make_request(KVM_REQ_UNBLOCK, vcpu);
1681	if (from_timer_fn)
1682		kvm_vcpu_kick(vcpu);
1683}
1684
1685static void start_sw_tscdeadline(struct kvm_lapic *apic)
1686{
1687	struct kvm_timer *ktimer = &apic->lapic_timer;
1688	u64 guest_tsc, tscdeadline = ktimer->tscdeadline;
1689	u64 ns = 0;
1690	ktime_t expire;
1691	struct kvm_vcpu *vcpu = apic->vcpu;
1692	unsigned long this_tsc_khz = vcpu->arch.virtual_tsc_khz;
1693	unsigned long flags;
1694	ktime_t now;
1695
1696	if (unlikely(!tscdeadline || !this_tsc_khz))
1697		return;
1698
1699	local_irq_save(flags);
1700
1701	now = ktime_get();
1702	guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc());
1703
1704	ns = (tscdeadline - guest_tsc) * 1000000ULL;
1705	do_div(ns, this_tsc_khz);
1706
1707	if (likely(tscdeadline > guest_tsc) &&
1708	    likely(ns > apic->lapic_timer.timer_advance_ns)) {
1709		expire = ktime_add_ns(now, ns);
1710		expire = ktime_sub_ns(expire, ktimer->timer_advance_ns);
1711		hrtimer_start(&ktimer->timer, expire, HRTIMER_MODE_ABS_HARD);
1712	} else
1713		apic_timer_expired(apic, false);
1714
1715	local_irq_restore(flags);
1716}
1717
1718static inline u64 tmict_to_ns(struct kvm_lapic *apic, u32 tmict)
1719{
1720	return (u64)tmict * APIC_BUS_CYCLE_NS * (u64)apic->divide_count;
 
1721}
1722
1723static void update_target_expiration(struct kvm_lapic *apic, uint32_t old_divisor)
1724{
1725	ktime_t now, remaining;
1726	u64 ns_remaining_old, ns_remaining_new;
1727
1728	apic->lapic_timer.period =
1729			tmict_to_ns(apic, kvm_lapic_get_reg(apic, APIC_TMICT));
1730	limit_periodic_timer_frequency(apic);
1731
1732	now = ktime_get();
1733	remaining = ktime_sub(apic->lapic_timer.target_expiration, now);
1734	if (ktime_to_ns(remaining) < 0)
1735		remaining = 0;
1736
1737	ns_remaining_old = ktime_to_ns(remaining);
1738	ns_remaining_new = mul_u64_u32_div(ns_remaining_old,
1739	                                   apic->divide_count, old_divisor);
1740
1741	apic->lapic_timer.tscdeadline +=
1742		nsec_to_cycles(apic->vcpu, ns_remaining_new) -
1743		nsec_to_cycles(apic->vcpu, ns_remaining_old);
1744	apic->lapic_timer.target_expiration = ktime_add_ns(now, ns_remaining_new);
1745}
1746
1747static bool set_target_expiration(struct kvm_lapic *apic, u32 count_reg)
1748{
1749	ktime_t now;
1750	u64 tscl = rdtsc();
1751	s64 deadline;
1752
1753	now = ktime_get();
1754	apic->lapic_timer.period =
1755			tmict_to_ns(apic, kvm_lapic_get_reg(apic, APIC_TMICT));
1756
1757	if (!apic->lapic_timer.period) {
1758		apic->lapic_timer.tscdeadline = 0;
1759		return false;
1760	}
1761
1762	limit_periodic_timer_frequency(apic);
1763	deadline = apic->lapic_timer.period;
1764
1765	if (apic_lvtt_period(apic) || apic_lvtt_oneshot(apic)) {
1766		if (unlikely(count_reg != APIC_TMICT)) {
1767			deadline = tmict_to_ns(apic,
1768				     kvm_lapic_get_reg(apic, count_reg));
1769			if (unlikely(deadline <= 0))
1770				deadline = apic->lapic_timer.period;
 
 
 
 
1771			else if (unlikely(deadline > apic->lapic_timer.period)) {
1772				pr_info_ratelimited(
1773				    "kvm: vcpu %i: requested lapic timer restore with "
1774				    "starting count register %#x=%u (%lld ns) > initial count (%lld ns). "
1775				    "Using initial count to start timer.\n",
1776				    apic->vcpu->vcpu_id,
1777				    count_reg,
1778				    kvm_lapic_get_reg(apic, count_reg),
1779				    deadline, apic->lapic_timer.period);
1780				kvm_lapic_set_reg(apic, count_reg, 0);
1781				deadline = apic->lapic_timer.period;
1782			}
1783		}
1784	}
1785
1786	apic->lapic_timer.tscdeadline = kvm_read_l1_tsc(apic->vcpu, tscl) +
1787		nsec_to_cycles(apic->vcpu, deadline);
1788	apic->lapic_timer.target_expiration = ktime_add_ns(now, deadline);
1789
1790	return true;
1791}
1792
1793static void advance_periodic_target_expiration(struct kvm_lapic *apic)
1794{
1795	ktime_t now = ktime_get();
1796	u64 tscl = rdtsc();
1797	ktime_t delta;
1798
1799	/*
1800	 * Synchronize both deadlines to the same time source or
1801	 * differences in the periods (caused by differences in the
1802	 * underlying clocks or numerical approximation errors) will
1803	 * cause the two to drift apart over time as the errors
1804	 * accumulate.
1805	 */
1806	apic->lapic_timer.target_expiration =
1807		ktime_add_ns(apic->lapic_timer.target_expiration,
1808				apic->lapic_timer.period);
1809	delta = ktime_sub(apic->lapic_timer.target_expiration, now);
1810	apic->lapic_timer.tscdeadline = kvm_read_l1_tsc(apic->vcpu, tscl) +
1811		nsec_to_cycles(apic->vcpu, delta);
1812}
1813
1814static void start_sw_period(struct kvm_lapic *apic)
1815{
1816	if (!apic->lapic_timer.period)
1817		return;
1818
1819	if (ktime_after(ktime_get(),
1820			apic->lapic_timer.target_expiration)) {
1821		apic_timer_expired(apic, false);
1822
1823		if (apic_lvtt_oneshot(apic))
1824			return;
1825
1826		advance_periodic_target_expiration(apic);
1827	}
1828
1829	hrtimer_start(&apic->lapic_timer.timer,
1830		apic->lapic_timer.target_expiration,
1831		HRTIMER_MODE_ABS_HARD);
1832}
1833
1834bool kvm_lapic_hv_timer_in_use(struct kvm_vcpu *vcpu)
1835{
1836	if (!lapic_in_kernel(vcpu))
1837		return false;
1838
1839	return vcpu->arch.apic->lapic_timer.hv_timer_in_use;
1840}
1841EXPORT_SYMBOL_GPL(kvm_lapic_hv_timer_in_use);
1842
1843static void cancel_hv_timer(struct kvm_lapic *apic)
1844{
1845	WARN_ON(preemptible());
1846	WARN_ON(!apic->lapic_timer.hv_timer_in_use);
1847	static_call(kvm_x86_cancel_hv_timer)(apic->vcpu);
1848	apic->lapic_timer.hv_timer_in_use = false;
1849}
1850
1851static bool start_hv_timer(struct kvm_lapic *apic)
1852{
1853	struct kvm_timer *ktimer = &apic->lapic_timer;
1854	struct kvm_vcpu *vcpu = apic->vcpu;
1855	bool expired;
1856
1857	WARN_ON(preemptible());
1858	if (!kvm_can_use_hv_timer(vcpu))
1859		return false;
1860
1861	if (!ktimer->tscdeadline)
1862		return false;
1863
1864	if (static_call(kvm_x86_set_hv_timer)(vcpu, ktimer->tscdeadline, &expired))
1865		return false;
1866
1867	ktimer->hv_timer_in_use = true;
1868	hrtimer_cancel(&ktimer->timer);
1869
1870	/*
1871	 * To simplify handling the periodic timer, leave the hv timer running
1872	 * even if the deadline timer has expired, i.e. rely on the resulting
1873	 * VM-Exit to recompute the periodic timer's target expiration.
1874	 */
1875	if (!apic_lvtt_period(apic)) {
1876		/*
1877		 * Cancel the hv timer if the sw timer fired while the hv timer
1878		 * was being programmed, or if the hv timer itself expired.
1879		 */
1880		if (atomic_read(&ktimer->pending)) {
1881			cancel_hv_timer(apic);
1882		} else if (expired) {
1883			apic_timer_expired(apic, false);
1884			cancel_hv_timer(apic);
1885		}
1886	}
1887
1888	trace_kvm_hv_timer_state(vcpu->vcpu_id, ktimer->hv_timer_in_use);
1889
1890	return true;
1891}
1892
1893static void start_sw_timer(struct kvm_lapic *apic)
1894{
1895	struct kvm_timer *ktimer = &apic->lapic_timer;
1896
1897	WARN_ON(preemptible());
1898	if (apic->lapic_timer.hv_timer_in_use)
1899		cancel_hv_timer(apic);
1900	if (!apic_lvtt_period(apic) && atomic_read(&ktimer->pending))
1901		return;
1902
1903	if (apic_lvtt_period(apic) || apic_lvtt_oneshot(apic))
1904		start_sw_period(apic);
1905	else if (apic_lvtt_tscdeadline(apic))
1906		start_sw_tscdeadline(apic);
1907	trace_kvm_hv_timer_state(apic->vcpu->vcpu_id, false);
1908}
1909
1910static void restart_apic_timer(struct kvm_lapic *apic)
1911{
1912	preempt_disable();
1913
1914	if (!apic_lvtt_period(apic) && atomic_read(&apic->lapic_timer.pending))
1915		goto out;
1916
1917	if (!start_hv_timer(apic))
1918		start_sw_timer(apic);
1919out:
1920	preempt_enable();
1921}
1922
1923void kvm_lapic_expired_hv_timer(struct kvm_vcpu *vcpu)
1924{
1925	struct kvm_lapic *apic = vcpu->arch.apic;
1926
1927	preempt_disable();
1928	/* If the preempt notifier has already run, it also called apic_timer_expired */
1929	if (!apic->lapic_timer.hv_timer_in_use)
1930		goto out;
1931	WARN_ON(rcuwait_active(&vcpu->wait));
1932	apic_timer_expired(apic, false);
1933	cancel_hv_timer(apic);
1934
1935	if (apic_lvtt_period(apic) && apic->lapic_timer.period) {
1936		advance_periodic_target_expiration(apic);
1937		restart_apic_timer(apic);
1938	}
1939out:
1940	preempt_enable();
1941}
1942EXPORT_SYMBOL_GPL(kvm_lapic_expired_hv_timer);
1943
1944void kvm_lapic_switch_to_hv_timer(struct kvm_vcpu *vcpu)
1945{
1946	restart_apic_timer(vcpu->arch.apic);
1947}
1948EXPORT_SYMBOL_GPL(kvm_lapic_switch_to_hv_timer);
1949
1950void kvm_lapic_switch_to_sw_timer(struct kvm_vcpu *vcpu)
1951{
1952	struct kvm_lapic *apic = vcpu->arch.apic;
1953
1954	preempt_disable();
1955	/* Possibly the TSC deadline timer is not enabled yet */
1956	if (apic->lapic_timer.hv_timer_in_use)
1957		start_sw_timer(apic);
1958	preempt_enable();
1959}
1960EXPORT_SYMBOL_GPL(kvm_lapic_switch_to_sw_timer);
1961
1962void kvm_lapic_restart_hv_timer(struct kvm_vcpu *vcpu)
1963{
1964	struct kvm_lapic *apic = vcpu->arch.apic;
1965
1966	WARN_ON(!apic->lapic_timer.hv_timer_in_use);
1967	restart_apic_timer(apic);
1968}
1969
1970static void __start_apic_timer(struct kvm_lapic *apic, u32 count_reg)
1971{
1972	atomic_set(&apic->lapic_timer.pending, 0);
1973
1974	if ((apic_lvtt_period(apic) || apic_lvtt_oneshot(apic))
1975	    && !set_target_expiration(apic, count_reg))
1976		return;
1977
1978	restart_apic_timer(apic);
1979}
1980
1981static void start_apic_timer(struct kvm_lapic *apic)
1982{
1983	__start_apic_timer(apic, APIC_TMICT);
1984}
1985
1986static void apic_manage_nmi_watchdog(struct kvm_lapic *apic, u32 lvt0_val)
1987{
1988	bool lvt0_in_nmi_mode = apic_lvt_nmi_mode(lvt0_val);
1989
1990	if (apic->lvt0_in_nmi_mode != lvt0_in_nmi_mode) {
1991		apic->lvt0_in_nmi_mode = lvt0_in_nmi_mode;
1992		if (lvt0_in_nmi_mode) {
1993			atomic_inc(&apic->vcpu->kvm->arch.vapics_in_nmi_mode);
1994		} else
1995			atomic_dec(&apic->vcpu->kvm->arch.vapics_in_nmi_mode);
1996	}
1997}
1998
1999int kvm_lapic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val)
 
 
 
 
 
 
 
 
 
 
2000{
2001	int ret = 0;
2002
2003	trace_kvm_apic_write(reg, val);
2004
2005	switch (reg) {
2006	case APIC_ID:		/* Local APIC ID */
2007		if (!apic_x2apic_mode(apic))
2008			kvm_apic_set_xapic_id(apic, val >> 24);
2009		else
2010			ret = 1;
 
2011		break;
2012
2013	case APIC_TASKPRI:
2014		report_tpr_access(apic, true);
2015		apic_set_tpr(apic, val & 0xff);
2016		break;
2017
2018	case APIC_EOI:
2019		apic_set_eoi(apic);
2020		break;
2021
2022	case APIC_LDR:
2023		if (!apic_x2apic_mode(apic))
2024			kvm_apic_set_ldr(apic, val & APIC_LDR_MASK);
2025		else
2026			ret = 1;
2027		break;
2028
2029	case APIC_DFR:
2030		if (!apic_x2apic_mode(apic))
2031			kvm_apic_set_dfr(apic, val | 0x0FFFFFFF);
2032		else
2033			ret = 1;
2034		break;
2035
2036	case APIC_SPIV: {
2037		u32 mask = 0x3ff;
2038		if (kvm_lapic_get_reg(apic, APIC_LVR) & APIC_LVR_DIRECTED_EOI)
2039			mask |= APIC_SPIV_DIRECTED_EOI;
2040		apic_set_spiv(apic, val & mask);
2041		if (!(val & APIC_SPIV_APIC_ENABLED)) {
2042			int i;
2043			u32 lvt_val;
2044
2045			for (i = 0; i < KVM_APIC_LVT_NUM; i++) {
2046				lvt_val = kvm_lapic_get_reg(apic,
2047						       APIC_LVTT + 0x10 * i);
2048				kvm_lapic_set_reg(apic, APIC_LVTT + 0x10 * i,
2049					     lvt_val | APIC_LVT_MASKED);
2050			}
2051			apic_update_lvtt(apic);
2052			atomic_set(&apic->lapic_timer.pending, 0);
2053
2054		}
2055		break;
2056	}
2057	case APIC_ICR:
 
 
2058		/* No delay here, so we always clear the pending bit */
2059		val &= ~(1 << 12);
2060		kvm_apic_send_ipi(apic, val, kvm_lapic_get_reg(apic, APIC_ICR2));
2061		kvm_lapic_set_reg(apic, APIC_ICR, val);
2062		break;
2063
2064	case APIC_ICR2:
2065		if (!apic_x2apic_mode(apic))
2066			val &= 0xff000000;
2067		kvm_lapic_set_reg(apic, APIC_ICR2, val);
 
2068		break;
2069
2070	case APIC_LVT0:
2071		apic_manage_nmi_watchdog(apic, val);
2072		fallthrough;
2073	case APIC_LVTTHMR:
2074	case APIC_LVTPC:
2075	case APIC_LVT1:
2076	case APIC_LVTERR: {
2077		/* TODO: Check vector */
2078		size_t size;
2079		u32 index;
2080
 
 
2081		if (!kvm_apic_sw_enabled(apic))
2082			val |= APIC_LVT_MASKED;
2083		size = ARRAY_SIZE(apic_lvt_mask);
2084		index = array_index_nospec(
2085				(reg - APIC_LVTT) >> 4, size);
2086		val &= apic_lvt_mask[index];
2087		kvm_lapic_set_reg(apic, reg, val);
2088		break;
2089	}
2090
2091	case APIC_LVTT:
2092		if (!kvm_apic_sw_enabled(apic))
2093			val |= APIC_LVT_MASKED;
2094		val &= (apic_lvt_mask[0] | apic->lapic_timer.timer_mode_mask);
2095		kvm_lapic_set_reg(apic, APIC_LVTT, val);
2096		apic_update_lvtt(apic);
2097		break;
2098
2099	case APIC_TMICT:
2100		if (apic_lvtt_tscdeadline(apic))
2101			break;
2102
2103		cancel_apic_timer(apic);
2104		kvm_lapic_set_reg(apic, APIC_TMICT, val);
2105		start_apic_timer(apic);
2106		break;
2107
2108	case APIC_TDCR: {
2109		uint32_t old_divisor = apic->divide_count;
2110
2111		kvm_lapic_set_reg(apic, APIC_TDCR, val & 0xb);
2112		update_divide_count(apic);
2113		if (apic->divide_count != old_divisor &&
2114				apic->lapic_timer.period) {
2115			hrtimer_cancel(&apic->lapic_timer.timer);
2116			update_target_expiration(apic, old_divisor);
2117			restart_apic_timer(apic);
2118		}
2119		break;
2120	}
2121	case APIC_ESR:
2122		if (apic_x2apic_mode(apic) && val != 0)
2123			ret = 1;
2124		break;
2125
2126	case APIC_SELF_IPI:
2127		if (apic_x2apic_mode(apic)) {
2128			kvm_lapic_reg_write(apic, APIC_ICR,
2129					    APIC_DEST_SELF | (val & APIC_VECTOR_MASK));
2130		} else
 
2131			ret = 1;
 
 
2132		break;
2133	default:
2134		ret = 1;
2135		break;
2136	}
2137
 
 
 
 
 
2138	kvm_recalculate_apic_map(apic->vcpu->kvm);
2139
2140	return ret;
2141}
2142EXPORT_SYMBOL_GPL(kvm_lapic_reg_write);
2143
2144static int apic_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *this,
2145			    gpa_t address, int len, const void *data)
2146{
2147	struct kvm_lapic *apic = to_lapic(this);
2148	unsigned int offset = address - apic->base_address;
2149	u32 val;
2150
2151	if (!apic_mmio_in_range(apic, address))
2152		return -EOPNOTSUPP;
2153
2154	if (!kvm_apic_hw_enabled(apic) || apic_x2apic_mode(apic)) {
2155		if (!kvm_check_has_quirk(vcpu->kvm,
2156					 KVM_X86_QUIRK_LAPIC_MMIO_HOLE))
2157			return -EOPNOTSUPP;
2158
2159		return 0;
2160	}
2161
2162	/*
2163	 * APIC register must be aligned on 128-bits boundary.
2164	 * 32/64/128 bits registers must be accessed thru 32 bits.
2165	 * Refer SDM 8.4.1
2166	 */
2167	if (len != 4 || (offset & 0xf))
2168		return 0;
2169
2170	val = *(u32*)data;
2171
2172	kvm_lapic_reg_write(apic, offset & 0xff0, val);
2173
2174	return 0;
2175}
2176
2177void kvm_lapic_set_eoi(struct kvm_vcpu *vcpu)
2178{
2179	kvm_lapic_reg_write(vcpu->arch.apic, APIC_EOI, 0);
2180}
2181EXPORT_SYMBOL_GPL(kvm_lapic_set_eoi);
2182
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2183/* emulate APIC access in a trap manner */
2184void kvm_apic_write_nodecode(struct kvm_vcpu *vcpu, u32 offset)
2185{
2186	u32 val = 0;
2187
2188	/* hw has done the conditional check and inst decode */
2189	offset &= 0xff0;
2190
2191	kvm_lapic_reg_read(vcpu->arch.apic, offset, 4, &val);
2192
2193	/* TODO: optimize to just emulate side effect w/o one more write */
2194	kvm_lapic_reg_write(vcpu->arch.apic, offset, val);
 
 
 
 
 
 
 
 
2195}
2196EXPORT_SYMBOL_GPL(kvm_apic_write_nodecode);
2197
2198void kvm_free_lapic(struct kvm_vcpu *vcpu)
2199{
2200	struct kvm_lapic *apic = vcpu->arch.apic;
2201
2202	if (!vcpu->arch.apic)
 
2203		return;
 
2204
2205	hrtimer_cancel(&apic->lapic_timer.timer);
2206
2207	if (!(vcpu->arch.apic_base & MSR_IA32_APICBASE_ENABLE))
2208		static_branch_slow_dec_deferred(&apic_hw_disabled);
2209
2210	if (!apic->sw_enabled)
2211		static_branch_slow_dec_deferred(&apic_sw_disabled);
2212
2213	if (apic->regs)
2214		free_page((unsigned long)apic->regs);
2215
2216	kfree(apic);
2217}
2218
2219/*
2220 *----------------------------------------------------------------------
2221 * LAPIC interface
2222 *----------------------------------------------------------------------
2223 */
2224u64 kvm_get_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu)
2225{
2226	struct kvm_lapic *apic = vcpu->arch.apic;
2227
2228	if (!kvm_apic_present(vcpu) || !apic_lvtt_tscdeadline(apic))
2229		return 0;
2230
2231	return apic->lapic_timer.tscdeadline;
2232}
2233
2234void kvm_set_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu, u64 data)
2235{
2236	struct kvm_lapic *apic = vcpu->arch.apic;
2237
2238	if (!kvm_apic_present(vcpu) || !apic_lvtt_tscdeadline(apic))
2239		return;
2240
2241	hrtimer_cancel(&apic->lapic_timer.timer);
2242	apic->lapic_timer.tscdeadline = data;
2243	start_apic_timer(apic);
2244}
2245
2246void kvm_lapic_set_tpr(struct kvm_vcpu *vcpu, unsigned long cr8)
2247{
2248	struct kvm_lapic *apic = vcpu->arch.apic;
2249
2250	apic_set_tpr(apic, ((cr8 & 0x0f) << 4)
2251		     | (kvm_lapic_get_reg(apic, APIC_TASKPRI) & 4));
2252}
2253
2254u64 kvm_lapic_get_cr8(struct kvm_vcpu *vcpu)
2255{
2256	u64 tpr;
2257
2258	tpr = (u64) kvm_lapic_get_reg(vcpu->arch.apic, APIC_TASKPRI);
2259
2260	return (tpr & 0xf0) >> 4;
2261}
2262
2263void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value)
2264{
2265	u64 old_value = vcpu->arch.apic_base;
2266	struct kvm_lapic *apic = vcpu->arch.apic;
2267
2268	if (!apic)
2269		value |= MSR_IA32_APICBASE_BSP;
2270
2271	vcpu->arch.apic_base = value;
2272
2273	if ((old_value ^ value) & MSR_IA32_APICBASE_ENABLE)
2274		kvm_update_cpuid_runtime(vcpu);
2275
2276	if (!apic)
2277		return;
2278
2279	/* update jump label if enable bit changes */
2280	if ((old_value ^ value) & MSR_IA32_APICBASE_ENABLE) {
2281		if (value & MSR_IA32_APICBASE_ENABLE) {
2282			kvm_apic_set_xapic_id(apic, vcpu->vcpu_id);
2283			static_branch_slow_dec_deferred(&apic_hw_disabled);
2284			/* Check if there are APF page ready requests pending */
2285			kvm_make_request(KVM_REQ_APF_READY, vcpu);
2286		} else {
2287			static_branch_inc(&apic_hw_disabled.key);
2288			atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
2289		}
2290	}
2291
2292	if (((old_value ^ value) & X2APIC_ENABLE) && (value & X2APIC_ENABLE))
2293		kvm_apic_set_x2apic_id(apic, vcpu->vcpu_id);
 
 
 
 
2294
2295	if ((old_value ^ value) & (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE))
2296		static_call(kvm_x86_set_virtual_apic_mode)(vcpu);
 
 
2297
2298	apic->base_address = apic->vcpu->arch.apic_base &
2299			     MSR_IA32_APICBASE_BASE;
2300
2301	if ((value & MSR_IA32_APICBASE_ENABLE) &&
2302	     apic->base_address != APIC_DEFAULT_PHYS_BASE)
2303		pr_warn_once("APIC base relocation is unsupported by KVM");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2304}
2305
2306void kvm_apic_update_apicv(struct kvm_vcpu *vcpu)
2307{
2308	struct kvm_lapic *apic = vcpu->arch.apic;
2309
2310	if (vcpu->arch.apicv_active) {
2311		/* irr_pending is always true when apicv is activated. */
2312		apic->irr_pending = true;
 
 
 
 
 
 
 
 
 
 
 
 
 
2313		apic->isr_count = 1;
2314	} else {
2315		apic->irr_pending = (apic_search_irr(apic) != -1);
2316		apic->isr_count = count_vectors(apic->regs + APIC_ISR);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2317	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2318}
2319EXPORT_SYMBOL_GPL(kvm_apic_update_apicv);
2320
2321void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event)
2322{
2323	struct kvm_lapic *apic = vcpu->arch.apic;
 
2324	int i;
2325
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2326	if (!apic)
2327		return;
2328
2329	/* Stop the timer in case it's a reset to an active apic */
2330	hrtimer_cancel(&apic->lapic_timer.timer);
2331
2332	if (!init_event) {
2333		kvm_lapic_set_base(vcpu, APIC_DEFAULT_PHYS_BASE |
2334		                         MSR_IA32_APICBASE_ENABLE);
2335		kvm_apic_set_xapic_id(apic, vcpu->vcpu_id);
2336	}
2337	kvm_apic_set_version(apic->vcpu);
2338
2339	for (i = 0; i < KVM_APIC_LVT_NUM; i++)
2340		kvm_lapic_set_reg(apic, APIC_LVTT + 0x10 * i, APIC_LVT_MASKED);
2341	apic_update_lvtt(apic);
2342	if (kvm_vcpu_is_reset_bsp(vcpu) &&
2343	    kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_LINT0_REENABLED))
2344		kvm_lapic_set_reg(apic, APIC_LVT0,
2345			     SET_APIC_DELIVERY_MODE(0, APIC_MODE_EXTINT));
2346	apic_manage_nmi_watchdog(apic, kvm_lapic_get_reg(apic, APIC_LVT0));
2347
2348	kvm_apic_set_dfr(apic, 0xffffffffU);
2349	apic_set_spiv(apic, 0xff);
2350	kvm_lapic_set_reg(apic, APIC_TASKPRI, 0);
2351	if (!apic_x2apic_mode(apic))
2352		kvm_apic_set_ldr(apic, 0);
2353	kvm_lapic_set_reg(apic, APIC_ESR, 0);
2354	kvm_lapic_set_reg(apic, APIC_ICR, 0);
2355	kvm_lapic_set_reg(apic, APIC_ICR2, 0);
 
 
 
 
2356	kvm_lapic_set_reg(apic, APIC_TDCR, 0);
2357	kvm_lapic_set_reg(apic, APIC_TMICT, 0);
2358	for (i = 0; i < 8; i++) {
2359		kvm_lapic_set_reg(apic, APIC_IRR + 0x10 * i, 0);
2360		kvm_lapic_set_reg(apic, APIC_ISR + 0x10 * i, 0);
2361		kvm_lapic_set_reg(apic, APIC_TMR + 0x10 * i, 0);
2362	}
2363	kvm_apic_update_apicv(vcpu);
2364	apic->highest_isr_cache = -1;
2365	update_divide_count(apic);
2366	atomic_set(&apic->lapic_timer.pending, 0);
2367	if (kvm_vcpu_is_bsp(vcpu))
2368		kvm_lapic_set_base(vcpu,
2369				vcpu->arch.apic_base | MSR_IA32_APICBASE_BSP);
2370	vcpu->arch.pv_eoi.msr_val = 0;
2371	apic_update_ppr(apic);
2372	if (vcpu->arch.apicv_active) {
2373		static_call(kvm_x86_apicv_post_state_restore)(vcpu);
2374		static_call(kvm_x86_hwapic_irr_update)(vcpu, -1);
2375		static_call(kvm_x86_hwapic_isr_update)(vcpu, -1);
2376	}
2377
2378	vcpu->arch.apic_arb_prio = 0;
2379	vcpu->arch.apic_attention = 0;
2380
2381	kvm_recalculate_apic_map(vcpu->kvm);
2382}
2383
2384/*
2385 *----------------------------------------------------------------------
2386 * timer interface
2387 *----------------------------------------------------------------------
2388 */
2389
2390static bool lapic_is_periodic(struct kvm_lapic *apic)
2391{
2392	return apic_lvtt_period(apic);
2393}
2394
2395int apic_has_pending_timer(struct kvm_vcpu *vcpu)
2396{
2397	struct kvm_lapic *apic = vcpu->arch.apic;
2398
2399	if (apic_enabled(apic) && apic_lvt_enabled(apic, APIC_LVTT))
2400		return atomic_read(&apic->lapic_timer.pending);
2401
2402	return 0;
2403}
2404
2405int kvm_apic_local_deliver(struct kvm_lapic *apic, int lvt_type)
2406{
2407	u32 reg = kvm_lapic_get_reg(apic, lvt_type);
2408	int vector, mode, trig_mode;
 
2409
2410	if (kvm_apic_hw_enabled(apic) && !(reg & APIC_LVT_MASKED)) {
2411		vector = reg & APIC_VECTOR_MASK;
2412		mode = reg & APIC_MODE_MASK;
2413		trig_mode = reg & APIC_LVT_LEVEL_TRIGGER;
2414		return __apic_accept_irq(apic, mode, vector, 1, trig_mode,
2415					NULL);
 
 
 
 
2416	}
2417	return 0;
2418}
2419
2420void kvm_apic_nmi_wd_deliver(struct kvm_vcpu *vcpu)
2421{
2422	struct kvm_lapic *apic = vcpu->arch.apic;
2423
2424	if (apic)
2425		kvm_apic_local_deliver(apic, APIC_LVT0);
2426}
2427
2428static const struct kvm_io_device_ops apic_mmio_ops = {
2429	.read     = apic_mmio_read,
2430	.write    = apic_mmio_write,
2431};
2432
2433static enum hrtimer_restart apic_timer_fn(struct hrtimer *data)
2434{
2435	struct kvm_timer *ktimer = container_of(data, struct kvm_timer, timer);
2436	struct kvm_lapic *apic = container_of(ktimer, struct kvm_lapic, lapic_timer);
2437
2438	apic_timer_expired(apic, true);
2439
2440	if (lapic_is_periodic(apic)) {
2441		advance_periodic_target_expiration(apic);
2442		hrtimer_add_expires_ns(&ktimer->timer, ktimer->period);
2443		return HRTIMER_RESTART;
2444	} else
2445		return HRTIMER_NORESTART;
2446}
2447
2448int kvm_create_lapic(struct kvm_vcpu *vcpu, int timer_advance_ns)
2449{
2450	struct kvm_lapic *apic;
2451
2452	ASSERT(vcpu != NULL);
2453
 
 
 
 
 
2454	apic = kzalloc(sizeof(*apic), GFP_KERNEL_ACCOUNT);
2455	if (!apic)
2456		goto nomem;
2457
2458	vcpu->arch.apic = apic;
2459
2460	apic->regs = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT);
 
 
 
2461	if (!apic->regs) {
2462		printk(KERN_ERR "malloc apic regs error for vcpu %x\n",
2463		       vcpu->vcpu_id);
2464		goto nomem_free_apic;
2465	}
2466	apic->vcpu = vcpu;
2467
 
 
2468	hrtimer_init(&apic->lapic_timer.timer, CLOCK_MONOTONIC,
2469		     HRTIMER_MODE_ABS_HARD);
2470	apic->lapic_timer.timer.function = apic_timer_fn;
2471	if (timer_advance_ns == -1) {
2472		apic->lapic_timer.timer_advance_ns = LAPIC_TIMER_ADVANCE_NS_INIT;
2473		lapic_timer_advance_dynamic = true;
2474	} else {
2475		apic->lapic_timer.timer_advance_ns = timer_advance_ns;
2476		lapic_timer_advance_dynamic = false;
2477	}
2478
2479	/*
2480	 * APIC is created enabled. This will prevent kvm_lapic_set_base from
2481	 * thinking that APIC state has changed.
2482	 */
2483	vcpu->arch.apic_base = MSR_IA32_APICBASE_ENABLE;
2484	static_branch_inc(&apic_sw_disabled.key); /* sw disabled at reset */
2485	kvm_iodevice_init(&apic->dev, &apic_mmio_ops);
2486
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2487	return 0;
2488nomem_free_apic:
2489	kfree(apic);
2490	vcpu->arch.apic = NULL;
2491nomem:
2492	return -ENOMEM;
2493}
2494
2495int kvm_apic_has_interrupt(struct kvm_vcpu *vcpu)
2496{
2497	struct kvm_lapic *apic = vcpu->arch.apic;
2498	u32 ppr;
2499
2500	if (!kvm_apic_present(vcpu))
2501		return -1;
2502
2503	__apic_update_ppr(apic, &ppr);
2504	return apic_has_interrupt_for_ppr(apic, ppr);
2505}
2506EXPORT_SYMBOL_GPL(kvm_apic_has_interrupt);
2507
2508int kvm_apic_accept_pic_intr(struct kvm_vcpu *vcpu)
2509{
2510	u32 lvt0 = kvm_lapic_get_reg(vcpu->arch.apic, APIC_LVT0);
2511
2512	if (!kvm_apic_hw_enabled(vcpu->arch.apic))
2513		return 1;
2514	if ((lvt0 & APIC_LVT_MASKED) == 0 &&
2515	    GET_APIC_DELIVERY_MODE(lvt0) == APIC_MODE_EXTINT)
2516		return 1;
2517	return 0;
2518}
2519
2520void kvm_inject_apic_timer_irqs(struct kvm_vcpu *vcpu)
2521{
2522	struct kvm_lapic *apic = vcpu->arch.apic;
2523
2524	if (atomic_read(&apic->lapic_timer.pending) > 0) {
2525		kvm_apic_inject_pending_timer_irqs(apic);
2526		atomic_set(&apic->lapic_timer.pending, 0);
2527	}
2528}
2529
2530int kvm_get_apic_interrupt(struct kvm_vcpu *vcpu)
2531{
2532	int vector = kvm_apic_has_interrupt(vcpu);
2533	struct kvm_lapic *apic = vcpu->arch.apic;
2534	u32 ppr;
2535
2536	if (vector == -1)
2537		return -1;
2538
2539	/*
2540	 * We get here even with APIC virtualization enabled, if doing
2541	 * nested virtualization and L1 runs with the "acknowledge interrupt
2542	 * on exit" mode.  Then we cannot inject the interrupt via RVI,
2543	 * because the process would deliver it through the IDT.
2544	 */
2545
2546	apic_clear_irr(vector, apic);
2547	if (to_hv_vcpu(vcpu) && test_bit(vector, to_hv_synic(vcpu)->auto_eoi_bitmap)) {
2548		/*
2549		 * For auto-EOI interrupts, there might be another pending
2550		 * interrupt above PPR, so check whether to raise another
2551		 * KVM_REQ_EVENT.
2552		 */
2553		apic_update_ppr(apic);
2554	} else {
2555		/*
2556		 * For normal interrupts, PPR has been raised and there cannot
2557		 * be a higher-priority pending interrupt---except if there was
2558		 * a concurrent interrupt injection, but that would have
2559		 * triggered KVM_REQ_EVENT already.
2560		 */
2561		apic_set_isr(vector, apic);
2562		__apic_update_ppr(apic, &ppr);
2563	}
2564
2565	return vector;
2566}
 
2567
2568static int kvm_apic_state_fixup(struct kvm_vcpu *vcpu,
2569		struct kvm_lapic_state *s, bool set)
2570{
2571	if (apic_x2apic_mode(vcpu->arch.apic)) {
 
2572		u32 *id = (u32 *)(s->regs + APIC_ID);
2573		u32 *ldr = (u32 *)(s->regs + APIC_LDR);
 
2574
2575		if (vcpu->kvm->arch.x2apic_format) {
2576			if (*id != vcpu->vcpu_id)
2577				return -EINVAL;
2578		} else {
 
 
 
 
 
 
 
 
 
2579			if (set)
2580				*id >>= 24;
2581			else
2582				*id <<= 24;
2583		}
2584
2585		/* In x2APIC mode, the LDR is fixed and based on the id */
 
 
 
 
 
2586		if (set)
2587			*ldr = kvm_apic_calc_x2apic_ldr(*id);
 
 
 
 
 
 
 
 
 
 
 
2588	}
2589
2590	return 0;
2591}
2592
2593int kvm_apic_get_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s)
2594{
2595	memcpy(s->regs, vcpu->arch.apic->regs, sizeof(*s));
2596
2597	/*
2598	 * Get calculated timer current count for remaining timer period (if
2599	 * any) and store it in the returned register set.
2600	 */
2601	__kvm_lapic_set_reg(s->regs, APIC_TMCCT,
2602			    __apic_read(vcpu->arch.apic, APIC_TMCCT));
2603
2604	return kvm_apic_state_fixup(vcpu, s, false);
2605}
2606
2607int kvm_apic_set_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s)
2608{
2609	struct kvm_lapic *apic = vcpu->arch.apic;
2610	int r;
2611
2612	kvm_lapic_set_base(vcpu, vcpu->arch.apic_base);
 
2613	/* set SPIV separately to get count of SW disabled APICs right */
2614	apic_set_spiv(apic, *((u32 *)(s->regs + APIC_SPIV)));
2615
2616	r = kvm_apic_state_fixup(vcpu, s, true);
2617	if (r) {
2618		kvm_recalculate_apic_map(vcpu->kvm);
2619		return r;
2620	}
2621	memcpy(vcpu->arch.apic->regs, s->regs, sizeof(*s));
2622
2623	atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
2624	kvm_recalculate_apic_map(vcpu->kvm);
2625	kvm_apic_set_version(vcpu);
2626
2627	apic_update_ppr(apic);
2628	hrtimer_cancel(&apic->lapic_timer.timer);
2629	apic->lapic_timer.expired_tscdeadline = 0;
2630	apic_update_lvtt(apic);
2631	apic_manage_nmi_watchdog(apic, kvm_lapic_get_reg(apic, APIC_LVT0));
2632	update_divide_count(apic);
2633	__start_apic_timer(apic, APIC_TMCCT);
2634	kvm_lapic_set_reg(apic, APIC_TMCCT, 0);
2635	kvm_apic_update_apicv(vcpu);
2636	apic->highest_isr_cache = -1;
2637	if (vcpu->arch.apicv_active) {
2638		static_call(kvm_x86_apicv_post_state_restore)(vcpu);
2639		static_call(kvm_x86_hwapic_irr_update)(vcpu,
2640				apic_find_highest_irr(apic));
2641		static_call(kvm_x86_hwapic_isr_update)(vcpu,
2642				apic_find_highest_isr(apic));
2643	}
2644	kvm_make_request(KVM_REQ_EVENT, vcpu);
2645	if (ioapic_in_kernel(vcpu->kvm))
2646		kvm_rtc_eoi_tracking_restore_one(vcpu);
2647
2648	vcpu->arch.apic_arb_prio = 0;
2649
2650	return 0;
2651}
2652
2653void __kvm_migrate_apic_timer(struct kvm_vcpu *vcpu)
2654{
2655	struct hrtimer *timer;
2656
2657	if (!lapic_in_kernel(vcpu) ||
2658		kvm_can_post_timer_interrupt(vcpu))
2659		return;
2660
2661	timer = &vcpu->arch.apic->lapic_timer.timer;
2662	if (hrtimer_cancel(timer))
2663		hrtimer_start_expires(timer, HRTIMER_MODE_ABS_HARD);
2664}
2665
2666/*
2667 * apic_sync_pv_eoi_from_guest - called on vmexit or cancel interrupt
2668 *
2669 * Detect whether guest triggered PV EOI since the
2670 * last entry. If yes, set EOI on guests's behalf.
2671 * Clear PV EOI in guest memory in any case.
2672 */
2673static void apic_sync_pv_eoi_from_guest(struct kvm_vcpu *vcpu,
2674					struct kvm_lapic *apic)
2675{
2676	bool pending;
2677	int vector;
2678	/*
2679	 * PV EOI state is derived from KVM_APIC_PV_EOI_PENDING in host
2680	 * and KVM_PV_EOI_ENABLED in guest memory as follows:
2681	 *
2682	 * KVM_APIC_PV_EOI_PENDING is unset:
2683	 * 	-> host disabled PV EOI.
2684	 * KVM_APIC_PV_EOI_PENDING is set, KVM_PV_EOI_ENABLED is set:
2685	 * 	-> host enabled PV EOI, guest did not execute EOI yet.
2686	 * KVM_APIC_PV_EOI_PENDING is set, KVM_PV_EOI_ENABLED is unset:
2687	 * 	-> host enabled PV EOI, guest executed EOI.
2688	 */
2689	BUG_ON(!pv_eoi_enabled(vcpu));
2690	pending = pv_eoi_get_pending(vcpu);
2691	/*
2692	 * Clear pending bit in any case: it will be set again on vmentry.
2693	 * While this might not be ideal from performance point of view,
2694	 * this makes sure pv eoi is only enabled when we know it's safe.
2695	 */
2696	pv_eoi_clr_pending(vcpu);
2697	if (pending)
2698		return;
2699	vector = apic_set_eoi(apic);
2700	trace_kvm_pv_eoi(apic, vector);
2701}
2702
2703void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu)
2704{
2705	u32 data;
2706
2707	if (test_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention))
2708		apic_sync_pv_eoi_from_guest(vcpu, vcpu->arch.apic);
2709
2710	if (!test_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention))
2711		return;
2712
2713	if (kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data,
2714				  sizeof(u32)))
2715		return;
2716
2717	apic_set_tpr(vcpu->arch.apic, data & 0xff);
2718}
2719
2720/*
2721 * apic_sync_pv_eoi_to_guest - called before vmentry
2722 *
2723 * Detect whether it's safe to enable PV EOI and
2724 * if yes do so.
2725 */
2726static void apic_sync_pv_eoi_to_guest(struct kvm_vcpu *vcpu,
2727					struct kvm_lapic *apic)
2728{
2729	if (!pv_eoi_enabled(vcpu) ||
2730	    /* IRR set or many bits in ISR: could be nested. */
2731	    apic->irr_pending ||
2732	    /* Cache not set: could be safe but we don't bother. */
2733	    apic->highest_isr_cache == -1 ||
2734	    /* Need EOI to update ioapic. */
2735	    kvm_ioapic_handles_vector(apic, apic->highest_isr_cache)) {
2736		/*
2737		 * PV EOI was disabled by apic_sync_pv_eoi_from_guest
2738		 * so we need not do anything here.
2739		 */
2740		return;
2741	}
2742
2743	pv_eoi_set_pending(apic->vcpu);
2744}
2745
2746void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu)
2747{
2748	u32 data, tpr;
2749	int max_irr, max_isr;
2750	struct kvm_lapic *apic = vcpu->arch.apic;
2751
2752	apic_sync_pv_eoi_to_guest(vcpu, apic);
2753
2754	if (!test_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention))
2755		return;
2756
2757	tpr = kvm_lapic_get_reg(apic, APIC_TASKPRI) & 0xff;
2758	max_irr = apic_find_highest_irr(apic);
2759	if (max_irr < 0)
2760		max_irr = 0;
2761	max_isr = apic_find_highest_isr(apic);
2762	if (max_isr < 0)
2763		max_isr = 0;
2764	data = (tpr & 0xff) | ((max_isr & 0xf0) << 8) | (max_irr << 24);
2765
2766	kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data,
2767				sizeof(u32));
2768}
2769
2770int kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr)
2771{
2772	if (vapic_addr) {
2773		if (kvm_gfn_to_hva_cache_init(vcpu->kvm,
2774					&vcpu->arch.apic->vapic_cache,
2775					vapic_addr, sizeof(u32)))
2776			return -EINVAL;
2777		__set_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention);
2778	} else {
2779		__clear_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention);
2780	}
2781
2782	vcpu->arch.apic->vapic_addr = vapic_addr;
2783	return 0;
2784}
2785
2786int kvm_x2apic_msr_write(struct kvm_vcpu *vcpu, u32 msr, u64 data)
2787{
2788	struct kvm_lapic *apic = vcpu->arch.apic;
2789	u32 reg = (msr - APIC_BASE_MSR) << 4;
 
 
 
 
2790
2791	if (!lapic_in_kernel(vcpu) || !apic_x2apic_mode(apic))
2792		return 1;
2793
2794	if (reg == APIC_ICR2)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2795		return 1;
2796
2797	/* if this is ICR write vector before command */
2798	if (reg == APIC_ICR)
2799		kvm_lapic_reg_write(apic, APIC_ICR2, (u32)(data >> 32));
2800	return kvm_lapic_reg_write(apic, reg, (u32)data);
2801}
2802
2803int kvm_x2apic_msr_read(struct kvm_vcpu *vcpu, u32 msr, u64 *data)
2804{
2805	struct kvm_lapic *apic = vcpu->arch.apic;
2806	u32 reg = (msr - APIC_BASE_MSR) << 4, low, high = 0;
2807
2808	if (!lapic_in_kernel(vcpu) || !apic_x2apic_mode(apic))
2809		return 1;
2810
2811	if (reg == APIC_DFR || reg == APIC_ICR2)
2812		return 1;
 
 
 
 
 
2813
2814	if (kvm_lapic_reg_read(apic, reg, 4, &low))
2815		return 1;
2816	if (reg == APIC_ICR)
2817		kvm_lapic_reg_read(apic, APIC_ICR2, 4, &high);
2818
2819	*data = (((u64)high) << 32) | low;
2820
2821	return 0;
2822}
2823
2824int kvm_hv_vapic_msr_write(struct kvm_vcpu *vcpu, u32 reg, u64 data)
2825{
2826	struct kvm_lapic *apic = vcpu->arch.apic;
2827
2828	if (!lapic_in_kernel(vcpu))
2829		return 1;
2830
2831	/* if this is ICR write vector before command */
2832	if (reg == APIC_ICR)
2833		kvm_lapic_reg_write(apic, APIC_ICR2, (u32)(data >> 32));
2834	return kvm_lapic_reg_write(apic, reg, (u32)data);
2835}
2836
2837int kvm_hv_vapic_msr_read(struct kvm_vcpu *vcpu, u32 reg, u64 *data)
2838{
2839	struct kvm_lapic *apic = vcpu->arch.apic;
2840	u32 low, high = 0;
2841
2842	if (!lapic_in_kernel(vcpu))
2843		return 1;
2844
2845	if (kvm_lapic_reg_read(apic, reg, 4, &low))
2846		return 1;
2847	if (reg == APIC_ICR)
2848		kvm_lapic_reg_read(apic, APIC_ICR2, 4, &high);
2849
2850	*data = (((u64)high) << 32) | low;
2851
2852	return 0;
2853}
2854
2855int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data, unsigned long len)
2856{
2857	u64 addr = data & ~KVM_MSR_ENABLED;
2858	struct gfn_to_hva_cache *ghc = &vcpu->arch.pv_eoi.data;
2859	unsigned long new_len;
 
2860
2861	if (!IS_ALIGNED(addr, 4))
2862		return 1;
2863
 
 
 
 
 
 
 
 
 
 
 
2864	vcpu->arch.pv_eoi.msr_val = data;
2865	if (!pv_eoi_enabled(vcpu))
2866		return 0;
2867
2868	if (addr == ghc->gpa && len <= ghc->len)
2869		new_len = ghc->len;
2870	else
2871		new_len = len;
2872
2873	return kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, addr, new_len);
2874}
2875
2876int kvm_apic_accept_events(struct kvm_vcpu *vcpu)
2877{
2878	struct kvm_lapic *apic = vcpu->arch.apic;
2879	u8 sipi_vector;
2880	int r;
2881	unsigned long pe;
2882
2883	if (!lapic_in_kernel(vcpu))
2884		return 0;
2885
2886	/*
2887	 * Read pending events before calling the check_events
2888	 * callback.
2889	 */
2890	pe = smp_load_acquire(&apic->pending_events);
2891	if (!pe)
2892		return 0;
2893
2894	if (is_guest_mode(vcpu)) {
2895		r = kvm_check_nested_events(vcpu);
2896		if (r < 0)
2897			return r == -EBUSY ? 0 : r;
2898		/*
2899		 * If an event has happened and caused a vmexit,
2900		 * we know INITs are latched and therefore
2901		 * we will not incorrectly deliver an APIC
2902		 * event instead of a vmexit.
2903		 */
2904	}
2905
2906	/*
2907	 * INITs are latched while CPU is in specific states
2908	 * (SMM, VMX root mode, SVM with GIF=0).
2909	 * Because a CPU cannot be in these states immediately
2910	 * after it has processed an INIT signal (and thus in
2911	 * KVM_MP_STATE_INIT_RECEIVED state), just eat SIPIs
2912	 * and leave the INIT pending.
2913	 */
2914	if (kvm_vcpu_latch_init(vcpu)) {
2915		WARN_ON_ONCE(vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED);
2916		if (test_bit(KVM_APIC_SIPI, &pe))
2917			clear_bit(KVM_APIC_SIPI, &apic->pending_events);
2918		return 0;
2919	}
2920
2921	if (test_bit(KVM_APIC_INIT, &pe)) {
2922		clear_bit(KVM_APIC_INIT, &apic->pending_events);
2923		kvm_vcpu_reset(vcpu, true);
2924		if (kvm_vcpu_is_bsp(apic->vcpu))
2925			vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
2926		else
2927			vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED;
2928	}
2929	if (test_bit(KVM_APIC_SIPI, &pe)) {
2930		clear_bit(KVM_APIC_SIPI, &apic->pending_events);
2931		if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) {
2932			/* evaluate pending_events before reading the vector */
2933			smp_rmb();
2934			sipi_vector = apic->sipi_vector;
2935			kvm_x86_ops.vcpu_deliver_sipi_vector(vcpu, sipi_vector);
 
2936			vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
2937		}
2938	}
2939	return 0;
2940}
2941
2942void kvm_lapic_exit(void)
2943{
2944	static_key_deferred_flush(&apic_hw_disabled);
 
2945	static_key_deferred_flush(&apic_sw_disabled);
 
2946}