Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2
3/*
4 * Local APIC virtualization
5 *
6 * Copyright (C) 2006 Qumranet, Inc.
7 * Copyright (C) 2007 Novell
8 * Copyright (C) 2007 Intel
9 * Copyright 2009 Red Hat, Inc. and/or its affiliates.
10 *
11 * Authors:
12 * Dor Laor <dor.laor@qumranet.com>
13 * Gregory Haskins <ghaskins@novell.com>
14 * Yaozu (Eddie) Dong <eddie.dong@intel.com>
15 *
16 * Based on Xen 3.1 code, Copyright (c) 2004, Intel Corporation.
17 */
18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
20#include <linux/kvm_host.h>
21#include <linux/kvm.h>
22#include <linux/mm.h>
23#include <linux/highmem.h>
24#include <linux/smp.h>
25#include <linux/hrtimer.h>
26#include <linux/io.h>
27#include <linux/export.h>
28#include <linux/math64.h>
29#include <linux/slab.h>
30#include <asm/processor.h>
31#include <asm/mce.h>
32#include <asm/msr.h>
33#include <asm/page.h>
34#include <asm/current.h>
35#include <asm/apicdef.h>
36#include <asm/delay.h>
37#include <linux/atomic.h>
38#include <linux/jump_label.h>
39#include "kvm_cache_regs.h"
40#include "irq.h"
41#include "ioapic.h"
42#include "trace.h"
43#include "x86.h"
44#include "xen.h"
45#include "cpuid.h"
46#include "hyperv.h"
47#include "smm.h"
48
49#ifndef CONFIG_X86_64
50#define mod_64(x, y) ((x) - (y) * div64_u64(x, y))
51#else
52#define mod_64(x, y) ((x) % (y))
53#endif
54
55/* 14 is the version for Xeon and Pentium 8.4.8*/
56#define APIC_VERSION 0x14UL
57#define LAPIC_MMIO_LENGTH (1 << 12)
58/* followed define is not in apicdef.h */
59#define MAX_APIC_VECTOR 256
60#define APIC_VECTORS_PER_REG 32
61
62/*
63 * Enable local APIC timer advancement (tscdeadline mode only) with adaptive
64 * tuning. When enabled, KVM programs the host timer event to fire early, i.e.
65 * before the deadline expires, to account for the delay between taking the
66 * VM-Exit (to inject the guest event) and the subsequent VM-Enter to resume
67 * the guest, i.e. so that the interrupt arrives in the guest with minimal
68 * latency relative to the deadline programmed by the guest.
69 */
70static bool lapic_timer_advance __read_mostly = true;
71module_param(lapic_timer_advance, bool, 0444);
72
73#define LAPIC_TIMER_ADVANCE_ADJUST_MIN 100 /* clock cycles */
74#define LAPIC_TIMER_ADVANCE_ADJUST_MAX 10000 /* clock cycles */
75#define LAPIC_TIMER_ADVANCE_NS_INIT 1000
76#define LAPIC_TIMER_ADVANCE_NS_MAX 5000
77/* step-by-step approximation to mitigate fluctuation */
78#define LAPIC_TIMER_ADVANCE_ADJUST_STEP 8
79static int kvm_lapic_msr_read(struct kvm_lapic *apic, u32 reg, u64 *data);
80static int kvm_lapic_msr_write(struct kvm_lapic *apic, u32 reg, u64 data);
81
82static inline void __kvm_lapic_set_reg(char *regs, int reg_off, u32 val)
83{
84 *((u32 *) (regs + reg_off)) = val;
85}
86
87static inline void kvm_lapic_set_reg(struct kvm_lapic *apic, int reg_off, u32 val)
88{
89 __kvm_lapic_set_reg(apic->regs, reg_off, val);
90}
91
92static __always_inline u64 __kvm_lapic_get_reg64(char *regs, int reg)
93{
94 BUILD_BUG_ON(reg != APIC_ICR);
95 return *((u64 *) (regs + reg));
96}
97
98static __always_inline u64 kvm_lapic_get_reg64(struct kvm_lapic *apic, int reg)
99{
100 return __kvm_lapic_get_reg64(apic->regs, reg);
101}
102
103static __always_inline void __kvm_lapic_set_reg64(char *regs, int reg, u64 val)
104{
105 BUILD_BUG_ON(reg != APIC_ICR);
106 *((u64 *) (regs + reg)) = val;
107}
108
109static __always_inline void kvm_lapic_set_reg64(struct kvm_lapic *apic,
110 int reg, u64 val)
111{
112 __kvm_lapic_set_reg64(apic->regs, reg, val);
113}
114
115static inline int apic_test_vector(int vec, void *bitmap)
116{
117 return test_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
118}
119
120bool kvm_apic_pending_eoi(struct kvm_vcpu *vcpu, int vector)
121{
122 struct kvm_lapic *apic = vcpu->arch.apic;
123
124 return apic_test_vector(vector, apic->regs + APIC_ISR) ||
125 apic_test_vector(vector, apic->regs + APIC_IRR);
126}
127
128static inline int __apic_test_and_set_vector(int vec, void *bitmap)
129{
130 return __test_and_set_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
131}
132
133static inline int __apic_test_and_clear_vector(int vec, void *bitmap)
134{
135 return __test_and_clear_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
136}
137
138__read_mostly DEFINE_STATIC_KEY_FALSE(kvm_has_noapic_vcpu);
139EXPORT_SYMBOL_GPL(kvm_has_noapic_vcpu);
140
141__read_mostly DEFINE_STATIC_KEY_DEFERRED_FALSE(apic_hw_disabled, HZ);
142__read_mostly DEFINE_STATIC_KEY_DEFERRED_FALSE(apic_sw_disabled, HZ);
143
144static inline int apic_enabled(struct kvm_lapic *apic)
145{
146 return kvm_apic_sw_enabled(apic) && kvm_apic_hw_enabled(apic);
147}
148
149#define LVT_MASK \
150 (APIC_LVT_MASKED | APIC_SEND_PENDING | APIC_VECTOR_MASK)
151
152#define LINT_MASK \
153 (LVT_MASK | APIC_MODE_MASK | APIC_INPUT_POLARITY | \
154 APIC_LVT_REMOTE_IRR | APIC_LVT_LEVEL_TRIGGER)
155
156static inline u32 kvm_x2apic_id(struct kvm_lapic *apic)
157{
158 return apic->vcpu->vcpu_id;
159}
160
161static bool kvm_can_post_timer_interrupt(struct kvm_vcpu *vcpu)
162{
163 return pi_inject_timer && kvm_vcpu_apicv_active(vcpu) &&
164 (kvm_mwait_in_guest(vcpu->kvm) || kvm_hlt_in_guest(vcpu->kvm));
165}
166
167bool kvm_can_use_hv_timer(struct kvm_vcpu *vcpu)
168{
169 return kvm_x86_ops.set_hv_timer
170 && !(kvm_mwait_in_guest(vcpu->kvm) ||
171 kvm_can_post_timer_interrupt(vcpu));
172}
173
174static bool kvm_use_posted_timer_interrupt(struct kvm_vcpu *vcpu)
175{
176 return kvm_can_post_timer_interrupt(vcpu) && vcpu->mode == IN_GUEST_MODE;
177}
178
179static inline u32 kvm_apic_calc_x2apic_ldr(u32 id)
180{
181 return ((id >> 4) << 16) | (1 << (id & 0xf));
182}
183
184static inline bool kvm_apic_map_get_logical_dest(struct kvm_apic_map *map,
185 u32 dest_id, struct kvm_lapic ***cluster, u16 *mask) {
186 switch (map->logical_mode) {
187 case KVM_APIC_MODE_SW_DISABLED:
188 /* Arbitrarily use the flat map so that @cluster isn't NULL. */
189 *cluster = map->xapic_flat_map;
190 *mask = 0;
191 return true;
192 case KVM_APIC_MODE_X2APIC: {
193 u32 offset = (dest_id >> 16) * 16;
194 u32 max_apic_id = map->max_apic_id;
195
196 if (offset <= max_apic_id) {
197 u8 cluster_size = min(max_apic_id - offset + 1, 16U);
198
199 offset = array_index_nospec(offset, map->max_apic_id + 1);
200 *cluster = &map->phys_map[offset];
201 *mask = dest_id & (0xffff >> (16 - cluster_size));
202 } else {
203 *mask = 0;
204 }
205
206 return true;
207 }
208 case KVM_APIC_MODE_XAPIC_FLAT:
209 *cluster = map->xapic_flat_map;
210 *mask = dest_id & 0xff;
211 return true;
212 case KVM_APIC_MODE_XAPIC_CLUSTER:
213 *cluster = map->xapic_cluster_map[(dest_id >> 4) & 0xf];
214 *mask = dest_id & 0xf;
215 return true;
216 case KVM_APIC_MODE_MAP_DISABLED:
217 return false;
218 default:
219 WARN_ON_ONCE(1);
220 return false;
221 }
222}
223
224static void kvm_apic_map_free(struct rcu_head *rcu)
225{
226 struct kvm_apic_map *map = container_of(rcu, struct kvm_apic_map, rcu);
227
228 kvfree(map);
229}
230
231static int kvm_recalculate_phys_map(struct kvm_apic_map *new,
232 struct kvm_vcpu *vcpu,
233 bool *xapic_id_mismatch)
234{
235 struct kvm_lapic *apic = vcpu->arch.apic;
236 u32 x2apic_id = kvm_x2apic_id(apic);
237 u32 xapic_id = kvm_xapic_id(apic);
238 u32 physical_id;
239
240 /*
241 * For simplicity, KVM always allocates enough space for all possible
242 * xAPIC IDs. Yell, but don't kill the VM, as KVM can continue on
243 * without the optimized map.
244 */
245 if (WARN_ON_ONCE(xapic_id > new->max_apic_id))
246 return -EINVAL;
247
248 /*
249 * Bail if a vCPU was added and/or enabled its APIC between allocating
250 * the map and doing the actual calculations for the map. Note, KVM
251 * hardcodes the x2APIC ID to vcpu_id, i.e. there's no TOCTOU bug if
252 * the compiler decides to reload x2apic_id after this check.
253 */
254 if (x2apic_id > new->max_apic_id)
255 return -E2BIG;
256
257 /*
258 * Deliberately truncate the vCPU ID when detecting a mismatched APIC
259 * ID to avoid false positives if the vCPU ID, i.e. x2APIC ID, is a
260 * 32-bit value. Any unwanted aliasing due to truncation results will
261 * be detected below.
262 */
263 if (!apic_x2apic_mode(apic) && xapic_id != (u8)vcpu->vcpu_id)
264 *xapic_id_mismatch = true;
265
266 /*
267 * Apply KVM's hotplug hack if userspace has enable 32-bit APIC IDs.
268 * Allow sending events to vCPUs by their x2APIC ID even if the target
269 * vCPU is in legacy xAPIC mode, and silently ignore aliased xAPIC IDs
270 * (the x2APIC ID is truncated to 8 bits, causing IDs > 0xff to wrap
271 * and collide).
272 *
273 * Honor the architectural (and KVM's non-optimized) behavior if
274 * userspace has not enabled 32-bit x2APIC IDs. Each APIC is supposed
275 * to process messages independently. If multiple vCPUs have the same
276 * effective APIC ID, e.g. due to the x2APIC wrap or because the guest
277 * manually modified its xAPIC IDs, events targeting that ID are
278 * supposed to be recognized by all vCPUs with said ID.
279 */
280 if (vcpu->kvm->arch.x2apic_format) {
281 /* See also kvm_apic_match_physical_addr(). */
282 if (apic_x2apic_mode(apic) || x2apic_id > 0xff)
283 new->phys_map[x2apic_id] = apic;
284
285 if (!apic_x2apic_mode(apic) && !new->phys_map[xapic_id])
286 new->phys_map[xapic_id] = apic;
287 } else {
288 /*
289 * Disable the optimized map if the physical APIC ID is already
290 * mapped, i.e. is aliased to multiple vCPUs. The optimized
291 * map requires a strict 1:1 mapping between IDs and vCPUs.
292 */
293 if (apic_x2apic_mode(apic))
294 physical_id = x2apic_id;
295 else
296 physical_id = xapic_id;
297
298 if (new->phys_map[physical_id])
299 return -EINVAL;
300
301 new->phys_map[physical_id] = apic;
302 }
303
304 return 0;
305}
306
307static void kvm_recalculate_logical_map(struct kvm_apic_map *new,
308 struct kvm_vcpu *vcpu)
309{
310 struct kvm_lapic *apic = vcpu->arch.apic;
311 enum kvm_apic_logical_mode logical_mode;
312 struct kvm_lapic **cluster;
313 u16 mask;
314 u32 ldr;
315
316 if (new->logical_mode == KVM_APIC_MODE_MAP_DISABLED)
317 return;
318
319 if (!kvm_apic_sw_enabled(apic))
320 return;
321
322 ldr = kvm_lapic_get_reg(apic, APIC_LDR);
323 if (!ldr)
324 return;
325
326 if (apic_x2apic_mode(apic)) {
327 logical_mode = KVM_APIC_MODE_X2APIC;
328 } else {
329 ldr = GET_APIC_LOGICAL_ID(ldr);
330 if (kvm_lapic_get_reg(apic, APIC_DFR) == APIC_DFR_FLAT)
331 logical_mode = KVM_APIC_MODE_XAPIC_FLAT;
332 else
333 logical_mode = KVM_APIC_MODE_XAPIC_CLUSTER;
334 }
335
336 /*
337 * To optimize logical mode delivery, all software-enabled APICs must
338 * be configured for the same mode.
339 */
340 if (new->logical_mode == KVM_APIC_MODE_SW_DISABLED) {
341 new->logical_mode = logical_mode;
342 } else if (new->logical_mode != logical_mode) {
343 new->logical_mode = KVM_APIC_MODE_MAP_DISABLED;
344 return;
345 }
346
347 /*
348 * In x2APIC mode, the LDR is read-only and derived directly from the
349 * x2APIC ID, thus is guaranteed to be addressable. KVM reuses
350 * kvm_apic_map.phys_map to optimize logical mode x2APIC interrupts by
351 * reversing the LDR calculation to get cluster of APICs, i.e. no
352 * additional work is required.
353 */
354 if (apic_x2apic_mode(apic))
355 return;
356
357 if (WARN_ON_ONCE(!kvm_apic_map_get_logical_dest(new, ldr,
358 &cluster, &mask))) {
359 new->logical_mode = KVM_APIC_MODE_MAP_DISABLED;
360 return;
361 }
362
363 if (!mask)
364 return;
365
366 ldr = ffs(mask) - 1;
367 if (!is_power_of_2(mask) || cluster[ldr])
368 new->logical_mode = KVM_APIC_MODE_MAP_DISABLED;
369 else
370 cluster[ldr] = apic;
371}
372
373/*
374 * CLEAN -> DIRTY and UPDATE_IN_PROGRESS -> DIRTY changes happen without a lock.
375 *
376 * DIRTY -> UPDATE_IN_PROGRESS and UPDATE_IN_PROGRESS -> CLEAN happen with
377 * apic_map_lock_held.
378 */
379enum {
380 CLEAN,
381 UPDATE_IN_PROGRESS,
382 DIRTY
383};
384
385static void kvm_recalculate_apic_map(struct kvm *kvm)
386{
387 struct kvm_apic_map *new, *old = NULL;
388 struct kvm_vcpu *vcpu;
389 unsigned long i;
390 u32 max_id = 255; /* enough space for any xAPIC ID */
391 bool xapic_id_mismatch;
392 int r;
393
394 /* Read kvm->arch.apic_map_dirty before kvm->arch.apic_map. */
395 if (atomic_read_acquire(&kvm->arch.apic_map_dirty) == CLEAN)
396 return;
397
398 WARN_ONCE(!irqchip_in_kernel(kvm),
399 "Dirty APIC map without an in-kernel local APIC");
400
401 mutex_lock(&kvm->arch.apic_map_lock);
402
403retry:
404 /*
405 * Read kvm->arch.apic_map_dirty before kvm->arch.apic_map (if clean)
406 * or the APIC registers (if dirty). Note, on retry the map may have
407 * not yet been marked dirty by whatever task changed a vCPU's x2APIC
408 * ID, i.e. the map may still show up as in-progress. In that case
409 * this task still needs to retry and complete its calculation.
410 */
411 if (atomic_cmpxchg_acquire(&kvm->arch.apic_map_dirty,
412 DIRTY, UPDATE_IN_PROGRESS) == CLEAN) {
413 /* Someone else has updated the map. */
414 mutex_unlock(&kvm->arch.apic_map_lock);
415 return;
416 }
417
418 /*
419 * Reset the mismatch flag between attempts so that KVM does the right
420 * thing if a vCPU changes its xAPIC ID, but do NOT reset max_id, i.e.
421 * keep max_id strictly increasing. Disallowing max_id from shrinking
422 * ensures KVM won't get stuck in an infinite loop, e.g. if the vCPU
423 * with the highest x2APIC ID is toggling its APIC on and off.
424 */
425 xapic_id_mismatch = false;
426
427 kvm_for_each_vcpu(i, vcpu, kvm)
428 if (kvm_apic_present(vcpu))
429 max_id = max(max_id, kvm_x2apic_id(vcpu->arch.apic));
430
431 new = kvzalloc(sizeof(struct kvm_apic_map) +
432 sizeof(struct kvm_lapic *) * ((u64)max_id + 1),
433 GFP_KERNEL_ACCOUNT);
434
435 if (!new)
436 goto out;
437
438 new->max_apic_id = max_id;
439 new->logical_mode = KVM_APIC_MODE_SW_DISABLED;
440
441 kvm_for_each_vcpu(i, vcpu, kvm) {
442 if (!kvm_apic_present(vcpu))
443 continue;
444
445 r = kvm_recalculate_phys_map(new, vcpu, &xapic_id_mismatch);
446 if (r) {
447 kvfree(new);
448 new = NULL;
449 if (r == -E2BIG) {
450 cond_resched();
451 goto retry;
452 }
453
454 goto out;
455 }
456
457 kvm_recalculate_logical_map(new, vcpu);
458 }
459out:
460 /*
461 * The optimized map is effectively KVM's internal version of APICv,
462 * and all unwanted aliasing that results in disabling the optimized
463 * map also applies to APICv.
464 */
465 if (!new)
466 kvm_set_apicv_inhibit(kvm, APICV_INHIBIT_REASON_PHYSICAL_ID_ALIASED);
467 else
468 kvm_clear_apicv_inhibit(kvm, APICV_INHIBIT_REASON_PHYSICAL_ID_ALIASED);
469
470 if (!new || new->logical_mode == KVM_APIC_MODE_MAP_DISABLED)
471 kvm_set_apicv_inhibit(kvm, APICV_INHIBIT_REASON_LOGICAL_ID_ALIASED);
472 else
473 kvm_clear_apicv_inhibit(kvm, APICV_INHIBIT_REASON_LOGICAL_ID_ALIASED);
474
475 if (xapic_id_mismatch)
476 kvm_set_apicv_inhibit(kvm, APICV_INHIBIT_REASON_APIC_ID_MODIFIED);
477 else
478 kvm_clear_apicv_inhibit(kvm, APICV_INHIBIT_REASON_APIC_ID_MODIFIED);
479
480 old = rcu_dereference_protected(kvm->arch.apic_map,
481 lockdep_is_held(&kvm->arch.apic_map_lock));
482 rcu_assign_pointer(kvm->arch.apic_map, new);
483 /*
484 * Write kvm->arch.apic_map before clearing apic->apic_map_dirty.
485 * If another update has come in, leave it DIRTY.
486 */
487 atomic_cmpxchg_release(&kvm->arch.apic_map_dirty,
488 UPDATE_IN_PROGRESS, CLEAN);
489 mutex_unlock(&kvm->arch.apic_map_lock);
490
491 if (old)
492 call_rcu(&old->rcu, kvm_apic_map_free);
493
494 kvm_make_scan_ioapic_request(kvm);
495}
496
497static inline void apic_set_spiv(struct kvm_lapic *apic, u32 val)
498{
499 bool enabled = val & APIC_SPIV_APIC_ENABLED;
500
501 kvm_lapic_set_reg(apic, APIC_SPIV, val);
502
503 if (enabled != apic->sw_enabled) {
504 apic->sw_enabled = enabled;
505 if (enabled)
506 static_branch_slow_dec_deferred(&apic_sw_disabled);
507 else
508 static_branch_inc(&apic_sw_disabled.key);
509
510 atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
511 }
512
513 /* Check if there are APF page ready requests pending */
514 if (enabled) {
515 kvm_make_request(KVM_REQ_APF_READY, apic->vcpu);
516 kvm_xen_sw_enable_lapic(apic->vcpu);
517 }
518}
519
520static inline void kvm_apic_set_xapic_id(struct kvm_lapic *apic, u8 id)
521{
522 kvm_lapic_set_reg(apic, APIC_ID, id << 24);
523 atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
524}
525
526static inline void kvm_apic_set_ldr(struct kvm_lapic *apic, u32 id)
527{
528 kvm_lapic_set_reg(apic, APIC_LDR, id);
529 atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
530}
531
532static inline void kvm_apic_set_dfr(struct kvm_lapic *apic, u32 val)
533{
534 kvm_lapic_set_reg(apic, APIC_DFR, val);
535 atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
536}
537
538static inline void kvm_apic_set_x2apic_id(struct kvm_lapic *apic, u32 id)
539{
540 u32 ldr = kvm_apic_calc_x2apic_ldr(id);
541
542 WARN_ON_ONCE(id != apic->vcpu->vcpu_id);
543
544 kvm_lapic_set_reg(apic, APIC_ID, id);
545 kvm_lapic_set_reg(apic, APIC_LDR, ldr);
546 atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
547}
548
549static inline int apic_lvt_enabled(struct kvm_lapic *apic, int lvt_type)
550{
551 return !(kvm_lapic_get_reg(apic, lvt_type) & APIC_LVT_MASKED);
552}
553
554static inline int apic_lvtt_oneshot(struct kvm_lapic *apic)
555{
556 return apic->lapic_timer.timer_mode == APIC_LVT_TIMER_ONESHOT;
557}
558
559static inline int apic_lvtt_period(struct kvm_lapic *apic)
560{
561 return apic->lapic_timer.timer_mode == APIC_LVT_TIMER_PERIODIC;
562}
563
564static inline int apic_lvtt_tscdeadline(struct kvm_lapic *apic)
565{
566 return apic->lapic_timer.timer_mode == APIC_LVT_TIMER_TSCDEADLINE;
567}
568
569static inline int apic_lvt_nmi_mode(u32 lvt_val)
570{
571 return (lvt_val & (APIC_MODE_MASK | APIC_LVT_MASKED)) == APIC_DM_NMI;
572}
573
574static inline bool kvm_lapic_lvt_supported(struct kvm_lapic *apic, int lvt_index)
575{
576 return apic->nr_lvt_entries > lvt_index;
577}
578
579static inline int kvm_apic_calc_nr_lvt_entries(struct kvm_vcpu *vcpu)
580{
581 return KVM_APIC_MAX_NR_LVT_ENTRIES - !(vcpu->arch.mcg_cap & MCG_CMCI_P);
582}
583
584void kvm_apic_set_version(struct kvm_vcpu *vcpu)
585{
586 struct kvm_lapic *apic = vcpu->arch.apic;
587 u32 v = 0;
588
589 if (!lapic_in_kernel(vcpu))
590 return;
591
592 v = APIC_VERSION | ((apic->nr_lvt_entries - 1) << 16);
593
594 /*
595 * KVM emulates 82093AA datasheet (with in-kernel IOAPIC implementation)
596 * which doesn't have EOI register; Some buggy OSes (e.g. Windows with
597 * Hyper-V role) disable EOI broadcast in lapic not checking for IOAPIC
598 * version first and level-triggered interrupts never get EOIed in
599 * IOAPIC.
600 */
601 if (guest_cpuid_has(vcpu, X86_FEATURE_X2APIC) &&
602 !ioapic_in_kernel(vcpu->kvm))
603 v |= APIC_LVR_DIRECTED_EOI;
604 kvm_lapic_set_reg(apic, APIC_LVR, v);
605}
606
607void kvm_apic_after_set_mcg_cap(struct kvm_vcpu *vcpu)
608{
609 int nr_lvt_entries = kvm_apic_calc_nr_lvt_entries(vcpu);
610 struct kvm_lapic *apic = vcpu->arch.apic;
611 int i;
612
613 if (!lapic_in_kernel(vcpu) || nr_lvt_entries == apic->nr_lvt_entries)
614 return;
615
616 /* Initialize/mask any "new" LVT entries. */
617 for (i = apic->nr_lvt_entries; i < nr_lvt_entries; i++)
618 kvm_lapic_set_reg(apic, APIC_LVTx(i), APIC_LVT_MASKED);
619
620 apic->nr_lvt_entries = nr_lvt_entries;
621
622 /* The number of LVT entries is reflected in the version register. */
623 kvm_apic_set_version(vcpu);
624}
625
626static const unsigned int apic_lvt_mask[KVM_APIC_MAX_NR_LVT_ENTRIES] = {
627 [LVT_TIMER] = LVT_MASK, /* timer mode mask added at runtime */
628 [LVT_THERMAL_MONITOR] = LVT_MASK | APIC_MODE_MASK,
629 [LVT_PERFORMANCE_COUNTER] = LVT_MASK | APIC_MODE_MASK,
630 [LVT_LINT0] = LINT_MASK,
631 [LVT_LINT1] = LINT_MASK,
632 [LVT_ERROR] = LVT_MASK,
633 [LVT_CMCI] = LVT_MASK | APIC_MODE_MASK
634};
635
636static int find_highest_vector(void *bitmap)
637{
638 int vec;
639 u32 *reg;
640
641 for (vec = MAX_APIC_VECTOR - APIC_VECTORS_PER_REG;
642 vec >= 0; vec -= APIC_VECTORS_PER_REG) {
643 reg = bitmap + REG_POS(vec);
644 if (*reg)
645 return __fls(*reg) + vec;
646 }
647
648 return -1;
649}
650
651static u8 count_vectors(void *bitmap)
652{
653 int vec;
654 u32 *reg;
655 u8 count = 0;
656
657 for (vec = 0; vec < MAX_APIC_VECTOR; vec += APIC_VECTORS_PER_REG) {
658 reg = bitmap + REG_POS(vec);
659 count += hweight32(*reg);
660 }
661
662 return count;
663}
664
665bool __kvm_apic_update_irr(u32 *pir, void *regs, int *max_irr)
666{
667 u32 i, vec;
668 u32 pir_val, irr_val, prev_irr_val;
669 int max_updated_irr;
670
671 max_updated_irr = -1;
672 *max_irr = -1;
673
674 for (i = vec = 0; i <= 7; i++, vec += 32) {
675 u32 *p_irr = (u32 *)(regs + APIC_IRR + i * 0x10);
676
677 irr_val = *p_irr;
678 pir_val = READ_ONCE(pir[i]);
679
680 if (pir_val) {
681 pir_val = xchg(&pir[i], 0);
682
683 prev_irr_val = irr_val;
684 do {
685 irr_val = prev_irr_val | pir_val;
686 } while (prev_irr_val != irr_val &&
687 !try_cmpxchg(p_irr, &prev_irr_val, irr_val));
688
689 if (prev_irr_val != irr_val)
690 max_updated_irr = __fls(irr_val ^ prev_irr_val) + vec;
691 }
692 if (irr_val)
693 *max_irr = __fls(irr_val) + vec;
694 }
695
696 return ((max_updated_irr != -1) &&
697 (max_updated_irr == *max_irr));
698}
699EXPORT_SYMBOL_GPL(__kvm_apic_update_irr);
700
701bool kvm_apic_update_irr(struct kvm_vcpu *vcpu, u32 *pir, int *max_irr)
702{
703 struct kvm_lapic *apic = vcpu->arch.apic;
704 bool irr_updated = __kvm_apic_update_irr(pir, apic->regs, max_irr);
705
706 if (unlikely(!apic->apicv_active && irr_updated))
707 apic->irr_pending = true;
708 return irr_updated;
709}
710EXPORT_SYMBOL_GPL(kvm_apic_update_irr);
711
712static inline int apic_search_irr(struct kvm_lapic *apic)
713{
714 return find_highest_vector(apic->regs + APIC_IRR);
715}
716
717static inline int apic_find_highest_irr(struct kvm_lapic *apic)
718{
719 int result;
720
721 /*
722 * Note that irr_pending is just a hint. It will be always
723 * true with virtual interrupt delivery enabled.
724 */
725 if (!apic->irr_pending)
726 return -1;
727
728 result = apic_search_irr(apic);
729 ASSERT(result == -1 || result >= 16);
730
731 return result;
732}
733
734static inline void apic_clear_irr(int vec, struct kvm_lapic *apic)
735{
736 if (unlikely(apic->apicv_active)) {
737 /* need to update RVI */
738 kvm_lapic_clear_vector(vec, apic->regs + APIC_IRR);
739 kvm_x86_call(hwapic_irr_update)(apic->vcpu,
740 apic_find_highest_irr(apic));
741 } else {
742 apic->irr_pending = false;
743 kvm_lapic_clear_vector(vec, apic->regs + APIC_IRR);
744 if (apic_search_irr(apic) != -1)
745 apic->irr_pending = true;
746 }
747}
748
749void kvm_apic_clear_irr(struct kvm_vcpu *vcpu, int vec)
750{
751 apic_clear_irr(vec, vcpu->arch.apic);
752}
753EXPORT_SYMBOL_GPL(kvm_apic_clear_irr);
754
755static inline void apic_set_isr(int vec, struct kvm_lapic *apic)
756{
757 if (__apic_test_and_set_vector(vec, apic->regs + APIC_ISR))
758 return;
759
760 /*
761 * With APIC virtualization enabled, all caching is disabled
762 * because the processor can modify ISR under the hood. Instead
763 * just set SVI.
764 */
765 if (unlikely(apic->apicv_active))
766 kvm_x86_call(hwapic_isr_update)(apic->vcpu, vec);
767 else {
768 ++apic->isr_count;
769 BUG_ON(apic->isr_count > MAX_APIC_VECTOR);
770 /*
771 * ISR (in service register) bit is set when injecting an interrupt.
772 * The highest vector is injected. Thus the latest bit set matches
773 * the highest bit in ISR.
774 */
775 apic->highest_isr_cache = vec;
776 }
777}
778
779static inline int apic_find_highest_isr(struct kvm_lapic *apic)
780{
781 int result;
782
783 /*
784 * Note that isr_count is always 1, and highest_isr_cache
785 * is always -1, with APIC virtualization enabled.
786 */
787 if (!apic->isr_count)
788 return -1;
789 if (likely(apic->highest_isr_cache != -1))
790 return apic->highest_isr_cache;
791
792 result = find_highest_vector(apic->regs + APIC_ISR);
793 ASSERT(result == -1 || result >= 16);
794
795 return result;
796}
797
798static inline void apic_clear_isr(int vec, struct kvm_lapic *apic)
799{
800 if (!__apic_test_and_clear_vector(vec, apic->regs + APIC_ISR))
801 return;
802
803 /*
804 * We do get here for APIC virtualization enabled if the guest
805 * uses the Hyper-V APIC enlightenment. In this case we may need
806 * to trigger a new interrupt delivery by writing the SVI field;
807 * on the other hand isr_count and highest_isr_cache are unused
808 * and must be left alone.
809 */
810 if (unlikely(apic->apicv_active))
811 kvm_x86_call(hwapic_isr_update)(apic->vcpu, apic_find_highest_isr(apic));
812 else {
813 --apic->isr_count;
814 BUG_ON(apic->isr_count < 0);
815 apic->highest_isr_cache = -1;
816 }
817}
818
819void kvm_apic_update_hwapic_isr(struct kvm_vcpu *vcpu)
820{
821 struct kvm_lapic *apic = vcpu->arch.apic;
822
823 if (WARN_ON_ONCE(!lapic_in_kernel(vcpu)) || !apic->apicv_active)
824 return;
825
826 kvm_x86_call(hwapic_isr_update)(vcpu, apic_find_highest_isr(apic));
827}
828EXPORT_SYMBOL_GPL(kvm_apic_update_hwapic_isr);
829
830int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu)
831{
832 /* This may race with setting of irr in __apic_accept_irq() and
833 * value returned may be wrong, but kvm_vcpu_kick() in __apic_accept_irq
834 * will cause vmexit immediately and the value will be recalculated
835 * on the next vmentry.
836 */
837 return apic_find_highest_irr(vcpu->arch.apic);
838}
839EXPORT_SYMBOL_GPL(kvm_lapic_find_highest_irr);
840
841static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
842 int vector, int level, int trig_mode,
843 struct dest_map *dest_map);
844
845int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq,
846 struct dest_map *dest_map)
847{
848 struct kvm_lapic *apic = vcpu->arch.apic;
849
850 return __apic_accept_irq(apic, irq->delivery_mode, irq->vector,
851 irq->level, irq->trig_mode, dest_map);
852}
853
854static int __pv_send_ipi(unsigned long *ipi_bitmap, struct kvm_apic_map *map,
855 struct kvm_lapic_irq *irq, u32 min)
856{
857 int i, count = 0;
858 struct kvm_vcpu *vcpu;
859
860 if (min > map->max_apic_id)
861 return 0;
862
863 for_each_set_bit(i, ipi_bitmap,
864 min((u32)BITS_PER_LONG, (map->max_apic_id - min + 1))) {
865 if (map->phys_map[min + i]) {
866 vcpu = map->phys_map[min + i]->vcpu;
867 count += kvm_apic_set_irq(vcpu, irq, NULL);
868 }
869 }
870
871 return count;
872}
873
874int kvm_pv_send_ipi(struct kvm *kvm, unsigned long ipi_bitmap_low,
875 unsigned long ipi_bitmap_high, u32 min,
876 unsigned long icr, int op_64_bit)
877{
878 struct kvm_apic_map *map;
879 struct kvm_lapic_irq irq = {0};
880 int cluster_size = op_64_bit ? 64 : 32;
881 int count;
882
883 if (icr & (APIC_DEST_MASK | APIC_SHORT_MASK))
884 return -KVM_EINVAL;
885
886 irq.vector = icr & APIC_VECTOR_MASK;
887 irq.delivery_mode = icr & APIC_MODE_MASK;
888 irq.level = (icr & APIC_INT_ASSERT) != 0;
889 irq.trig_mode = icr & APIC_INT_LEVELTRIG;
890
891 rcu_read_lock();
892 map = rcu_dereference(kvm->arch.apic_map);
893
894 count = -EOPNOTSUPP;
895 if (likely(map)) {
896 count = __pv_send_ipi(&ipi_bitmap_low, map, &irq, min);
897 min += cluster_size;
898 count += __pv_send_ipi(&ipi_bitmap_high, map, &irq, min);
899 }
900
901 rcu_read_unlock();
902 return count;
903}
904
905static int pv_eoi_put_user(struct kvm_vcpu *vcpu, u8 val)
906{
907
908 return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data, &val,
909 sizeof(val));
910}
911
912static int pv_eoi_get_user(struct kvm_vcpu *vcpu, u8 *val)
913{
914
915 return kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data, val,
916 sizeof(*val));
917}
918
919static inline bool pv_eoi_enabled(struct kvm_vcpu *vcpu)
920{
921 return vcpu->arch.pv_eoi.msr_val & KVM_MSR_ENABLED;
922}
923
924static void pv_eoi_set_pending(struct kvm_vcpu *vcpu)
925{
926 if (pv_eoi_put_user(vcpu, KVM_PV_EOI_ENABLED) < 0)
927 return;
928
929 __set_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention);
930}
931
932static bool pv_eoi_test_and_clr_pending(struct kvm_vcpu *vcpu)
933{
934 u8 val;
935
936 if (pv_eoi_get_user(vcpu, &val) < 0)
937 return false;
938
939 val &= KVM_PV_EOI_ENABLED;
940
941 if (val && pv_eoi_put_user(vcpu, KVM_PV_EOI_DISABLED) < 0)
942 return false;
943
944 /*
945 * Clear pending bit in any case: it will be set again on vmentry.
946 * While this might not be ideal from performance point of view,
947 * this makes sure pv eoi is only enabled when we know it's safe.
948 */
949 __clear_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention);
950
951 return val;
952}
953
954static int apic_has_interrupt_for_ppr(struct kvm_lapic *apic, u32 ppr)
955{
956 int highest_irr;
957 if (kvm_x86_ops.sync_pir_to_irr)
958 highest_irr = kvm_x86_call(sync_pir_to_irr)(apic->vcpu);
959 else
960 highest_irr = apic_find_highest_irr(apic);
961 if (highest_irr == -1 || (highest_irr & 0xF0) <= ppr)
962 return -1;
963 return highest_irr;
964}
965
966static bool __apic_update_ppr(struct kvm_lapic *apic, u32 *new_ppr)
967{
968 u32 tpr, isrv, ppr, old_ppr;
969 int isr;
970
971 old_ppr = kvm_lapic_get_reg(apic, APIC_PROCPRI);
972 tpr = kvm_lapic_get_reg(apic, APIC_TASKPRI);
973 isr = apic_find_highest_isr(apic);
974 isrv = (isr != -1) ? isr : 0;
975
976 if ((tpr & 0xf0) >= (isrv & 0xf0))
977 ppr = tpr & 0xff;
978 else
979 ppr = isrv & 0xf0;
980
981 *new_ppr = ppr;
982 if (old_ppr != ppr)
983 kvm_lapic_set_reg(apic, APIC_PROCPRI, ppr);
984
985 return ppr < old_ppr;
986}
987
988static void apic_update_ppr(struct kvm_lapic *apic)
989{
990 u32 ppr;
991
992 if (__apic_update_ppr(apic, &ppr) &&
993 apic_has_interrupt_for_ppr(apic, ppr) != -1)
994 kvm_make_request(KVM_REQ_EVENT, apic->vcpu);
995}
996
997void kvm_apic_update_ppr(struct kvm_vcpu *vcpu)
998{
999 apic_update_ppr(vcpu->arch.apic);
1000}
1001EXPORT_SYMBOL_GPL(kvm_apic_update_ppr);
1002
1003static void apic_set_tpr(struct kvm_lapic *apic, u32 tpr)
1004{
1005 kvm_lapic_set_reg(apic, APIC_TASKPRI, tpr);
1006 apic_update_ppr(apic);
1007}
1008
1009static bool kvm_apic_broadcast(struct kvm_lapic *apic, u32 mda)
1010{
1011 return mda == (apic_x2apic_mode(apic) ?
1012 X2APIC_BROADCAST : APIC_BROADCAST);
1013}
1014
1015static bool kvm_apic_match_physical_addr(struct kvm_lapic *apic, u32 mda)
1016{
1017 if (kvm_apic_broadcast(apic, mda))
1018 return true;
1019
1020 /*
1021 * Hotplug hack: Accept interrupts for vCPUs in xAPIC mode as if they
1022 * were in x2APIC mode if the target APIC ID can't be encoded as an
1023 * xAPIC ID. This allows unique addressing of hotplugged vCPUs (which
1024 * start in xAPIC mode) with an APIC ID that is unaddressable in xAPIC
1025 * mode. Match the x2APIC ID if and only if the target APIC ID can't
1026 * be encoded in xAPIC to avoid spurious matches against a vCPU that
1027 * changed its (addressable) xAPIC ID (which is writable).
1028 */
1029 if (apic_x2apic_mode(apic) || mda > 0xff)
1030 return mda == kvm_x2apic_id(apic);
1031
1032 return mda == kvm_xapic_id(apic);
1033}
1034
1035static bool kvm_apic_match_logical_addr(struct kvm_lapic *apic, u32 mda)
1036{
1037 u32 logical_id;
1038
1039 if (kvm_apic_broadcast(apic, mda))
1040 return true;
1041
1042 logical_id = kvm_lapic_get_reg(apic, APIC_LDR);
1043
1044 if (apic_x2apic_mode(apic))
1045 return ((logical_id >> 16) == (mda >> 16))
1046 && (logical_id & mda & 0xffff) != 0;
1047
1048 logical_id = GET_APIC_LOGICAL_ID(logical_id);
1049
1050 switch (kvm_lapic_get_reg(apic, APIC_DFR)) {
1051 case APIC_DFR_FLAT:
1052 return (logical_id & mda) != 0;
1053 case APIC_DFR_CLUSTER:
1054 return ((logical_id >> 4) == (mda >> 4))
1055 && (logical_id & mda & 0xf) != 0;
1056 default:
1057 return false;
1058 }
1059}
1060
1061/* The KVM local APIC implementation has two quirks:
1062 *
1063 * - Real hardware delivers interrupts destined to x2APIC ID > 0xff to LAPICs
1064 * in xAPIC mode if the "destination & 0xff" matches its xAPIC ID.
1065 * KVM doesn't do that aliasing.
1066 *
1067 * - in-kernel IOAPIC messages have to be delivered directly to
1068 * x2APIC, because the kernel does not support interrupt remapping.
1069 * In order to support broadcast without interrupt remapping, x2APIC
1070 * rewrites the destination of non-IPI messages from APIC_BROADCAST
1071 * to X2APIC_BROADCAST.
1072 *
1073 * The broadcast quirk can be disabled with KVM_CAP_X2APIC_API. This is
1074 * important when userspace wants to use x2APIC-format MSIs, because
1075 * APIC_BROADCAST (0xff) is a legal route for "cluster 0, CPUs 0-7".
1076 */
1077static u32 kvm_apic_mda(struct kvm_vcpu *vcpu, unsigned int dest_id,
1078 struct kvm_lapic *source, struct kvm_lapic *target)
1079{
1080 bool ipi = source != NULL;
1081
1082 if (!vcpu->kvm->arch.x2apic_broadcast_quirk_disabled &&
1083 !ipi && dest_id == APIC_BROADCAST && apic_x2apic_mode(target))
1084 return X2APIC_BROADCAST;
1085
1086 return dest_id;
1087}
1088
1089bool kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source,
1090 int shorthand, unsigned int dest, int dest_mode)
1091{
1092 struct kvm_lapic *target = vcpu->arch.apic;
1093 u32 mda = kvm_apic_mda(vcpu, dest, source, target);
1094
1095 ASSERT(target);
1096 switch (shorthand) {
1097 case APIC_DEST_NOSHORT:
1098 if (dest_mode == APIC_DEST_PHYSICAL)
1099 return kvm_apic_match_physical_addr(target, mda);
1100 else
1101 return kvm_apic_match_logical_addr(target, mda);
1102 case APIC_DEST_SELF:
1103 return target == source;
1104 case APIC_DEST_ALLINC:
1105 return true;
1106 case APIC_DEST_ALLBUT:
1107 return target != source;
1108 default:
1109 return false;
1110 }
1111}
1112EXPORT_SYMBOL_GPL(kvm_apic_match_dest);
1113
1114int kvm_vector_to_index(u32 vector, u32 dest_vcpus,
1115 const unsigned long *bitmap, u32 bitmap_size)
1116{
1117 u32 mod;
1118 int i, idx = -1;
1119
1120 mod = vector % dest_vcpus;
1121
1122 for (i = 0; i <= mod; i++) {
1123 idx = find_next_bit(bitmap, bitmap_size, idx + 1);
1124 BUG_ON(idx == bitmap_size);
1125 }
1126
1127 return idx;
1128}
1129
1130static void kvm_apic_disabled_lapic_found(struct kvm *kvm)
1131{
1132 if (!kvm->arch.disabled_lapic_found) {
1133 kvm->arch.disabled_lapic_found = true;
1134 pr_info("Disabled LAPIC found during irq injection\n");
1135 }
1136}
1137
1138static bool kvm_apic_is_broadcast_dest(struct kvm *kvm, struct kvm_lapic **src,
1139 struct kvm_lapic_irq *irq, struct kvm_apic_map *map)
1140{
1141 if (kvm->arch.x2apic_broadcast_quirk_disabled) {
1142 if ((irq->dest_id == APIC_BROADCAST &&
1143 map->logical_mode != KVM_APIC_MODE_X2APIC))
1144 return true;
1145 if (irq->dest_id == X2APIC_BROADCAST)
1146 return true;
1147 } else {
1148 bool x2apic_ipi = src && *src && apic_x2apic_mode(*src);
1149 if (irq->dest_id == (x2apic_ipi ?
1150 X2APIC_BROADCAST : APIC_BROADCAST))
1151 return true;
1152 }
1153
1154 return false;
1155}
1156
1157/* Return true if the interrupt can be handled by using *bitmap as index mask
1158 * for valid destinations in *dst array.
1159 * Return false if kvm_apic_map_get_dest_lapic did nothing useful.
1160 * Note: we may have zero kvm_lapic destinations when we return true, which
1161 * means that the interrupt should be dropped. In this case, *bitmap would be
1162 * zero and *dst undefined.
1163 */
1164static inline bool kvm_apic_map_get_dest_lapic(struct kvm *kvm,
1165 struct kvm_lapic **src, struct kvm_lapic_irq *irq,
1166 struct kvm_apic_map *map, struct kvm_lapic ***dst,
1167 unsigned long *bitmap)
1168{
1169 int i, lowest;
1170
1171 if (irq->shorthand == APIC_DEST_SELF && src) {
1172 *dst = src;
1173 *bitmap = 1;
1174 return true;
1175 } else if (irq->shorthand)
1176 return false;
1177
1178 if (!map || kvm_apic_is_broadcast_dest(kvm, src, irq, map))
1179 return false;
1180
1181 if (irq->dest_mode == APIC_DEST_PHYSICAL) {
1182 if (irq->dest_id > map->max_apic_id) {
1183 *bitmap = 0;
1184 } else {
1185 u32 dest_id = array_index_nospec(irq->dest_id, map->max_apic_id + 1);
1186 *dst = &map->phys_map[dest_id];
1187 *bitmap = 1;
1188 }
1189 return true;
1190 }
1191
1192 *bitmap = 0;
1193 if (!kvm_apic_map_get_logical_dest(map, irq->dest_id, dst,
1194 (u16 *)bitmap))
1195 return false;
1196
1197 if (!kvm_lowest_prio_delivery(irq))
1198 return true;
1199
1200 if (!kvm_vector_hashing_enabled()) {
1201 lowest = -1;
1202 for_each_set_bit(i, bitmap, 16) {
1203 if (!(*dst)[i])
1204 continue;
1205 if (lowest < 0)
1206 lowest = i;
1207 else if (kvm_apic_compare_prio((*dst)[i]->vcpu,
1208 (*dst)[lowest]->vcpu) < 0)
1209 lowest = i;
1210 }
1211 } else {
1212 if (!*bitmap)
1213 return true;
1214
1215 lowest = kvm_vector_to_index(irq->vector, hweight16(*bitmap),
1216 bitmap, 16);
1217
1218 if (!(*dst)[lowest]) {
1219 kvm_apic_disabled_lapic_found(kvm);
1220 *bitmap = 0;
1221 return true;
1222 }
1223 }
1224
1225 *bitmap = (lowest >= 0) ? 1 << lowest : 0;
1226
1227 return true;
1228}
1229
1230bool kvm_irq_delivery_to_apic_fast(struct kvm *kvm, struct kvm_lapic *src,
1231 struct kvm_lapic_irq *irq, int *r, struct dest_map *dest_map)
1232{
1233 struct kvm_apic_map *map;
1234 unsigned long bitmap;
1235 struct kvm_lapic **dst = NULL;
1236 int i;
1237 bool ret;
1238
1239 *r = -1;
1240
1241 if (irq->shorthand == APIC_DEST_SELF) {
1242 if (KVM_BUG_ON(!src, kvm)) {
1243 *r = 0;
1244 return true;
1245 }
1246 *r = kvm_apic_set_irq(src->vcpu, irq, dest_map);
1247 return true;
1248 }
1249
1250 rcu_read_lock();
1251 map = rcu_dereference(kvm->arch.apic_map);
1252
1253 ret = kvm_apic_map_get_dest_lapic(kvm, &src, irq, map, &dst, &bitmap);
1254 if (ret) {
1255 *r = 0;
1256 for_each_set_bit(i, &bitmap, 16) {
1257 if (!dst[i])
1258 continue;
1259 *r += kvm_apic_set_irq(dst[i]->vcpu, irq, dest_map);
1260 }
1261 }
1262
1263 rcu_read_unlock();
1264 return ret;
1265}
1266
1267/*
1268 * This routine tries to handle interrupts in posted mode, here is how
1269 * it deals with different cases:
1270 * - For single-destination interrupts, handle it in posted mode
1271 * - Else if vector hashing is enabled and it is a lowest-priority
1272 * interrupt, handle it in posted mode and use the following mechanism
1273 * to find the destination vCPU.
1274 * 1. For lowest-priority interrupts, store all the possible
1275 * destination vCPUs in an array.
1276 * 2. Use "guest vector % max number of destination vCPUs" to find
1277 * the right destination vCPU in the array for the lowest-priority
1278 * interrupt.
1279 * - Otherwise, use remapped mode to inject the interrupt.
1280 */
1281bool kvm_intr_is_single_vcpu_fast(struct kvm *kvm, struct kvm_lapic_irq *irq,
1282 struct kvm_vcpu **dest_vcpu)
1283{
1284 struct kvm_apic_map *map;
1285 unsigned long bitmap;
1286 struct kvm_lapic **dst = NULL;
1287 bool ret = false;
1288
1289 if (irq->shorthand)
1290 return false;
1291
1292 rcu_read_lock();
1293 map = rcu_dereference(kvm->arch.apic_map);
1294
1295 if (kvm_apic_map_get_dest_lapic(kvm, NULL, irq, map, &dst, &bitmap) &&
1296 hweight16(bitmap) == 1) {
1297 unsigned long i = find_first_bit(&bitmap, 16);
1298
1299 if (dst[i]) {
1300 *dest_vcpu = dst[i]->vcpu;
1301 ret = true;
1302 }
1303 }
1304
1305 rcu_read_unlock();
1306 return ret;
1307}
1308
1309/*
1310 * Add a pending IRQ into lapic.
1311 * Return 1 if successfully added and 0 if discarded.
1312 */
1313static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
1314 int vector, int level, int trig_mode,
1315 struct dest_map *dest_map)
1316{
1317 int result = 0;
1318 struct kvm_vcpu *vcpu = apic->vcpu;
1319
1320 trace_kvm_apic_accept_irq(vcpu->vcpu_id, delivery_mode,
1321 trig_mode, vector);
1322 switch (delivery_mode) {
1323 case APIC_DM_LOWEST:
1324 vcpu->arch.apic_arb_prio++;
1325 fallthrough;
1326 case APIC_DM_FIXED:
1327 if (unlikely(trig_mode && !level))
1328 break;
1329
1330 /* FIXME add logic for vcpu on reset */
1331 if (unlikely(!apic_enabled(apic)))
1332 break;
1333
1334 result = 1;
1335
1336 if (dest_map) {
1337 __set_bit(vcpu->vcpu_id, dest_map->map);
1338 dest_map->vectors[vcpu->vcpu_id] = vector;
1339 }
1340
1341 if (apic_test_vector(vector, apic->regs + APIC_TMR) != !!trig_mode) {
1342 if (trig_mode)
1343 kvm_lapic_set_vector(vector,
1344 apic->regs + APIC_TMR);
1345 else
1346 kvm_lapic_clear_vector(vector,
1347 apic->regs + APIC_TMR);
1348 }
1349
1350 kvm_x86_call(deliver_interrupt)(apic, delivery_mode,
1351 trig_mode, vector);
1352 break;
1353
1354 case APIC_DM_REMRD:
1355 result = 1;
1356 vcpu->arch.pv.pv_unhalted = 1;
1357 kvm_make_request(KVM_REQ_EVENT, vcpu);
1358 kvm_vcpu_kick(vcpu);
1359 break;
1360
1361 case APIC_DM_SMI:
1362 if (!kvm_inject_smi(vcpu)) {
1363 kvm_vcpu_kick(vcpu);
1364 result = 1;
1365 }
1366 break;
1367
1368 case APIC_DM_NMI:
1369 result = 1;
1370 kvm_inject_nmi(vcpu);
1371 kvm_vcpu_kick(vcpu);
1372 break;
1373
1374 case APIC_DM_INIT:
1375 if (!trig_mode || level) {
1376 result = 1;
1377 /* assumes that there are only KVM_APIC_INIT/SIPI */
1378 apic->pending_events = (1UL << KVM_APIC_INIT);
1379 kvm_make_request(KVM_REQ_EVENT, vcpu);
1380 kvm_vcpu_kick(vcpu);
1381 }
1382 break;
1383
1384 case APIC_DM_STARTUP:
1385 result = 1;
1386 apic->sipi_vector = vector;
1387 /* make sure sipi_vector is visible for the receiver */
1388 smp_wmb();
1389 set_bit(KVM_APIC_SIPI, &apic->pending_events);
1390 kvm_make_request(KVM_REQ_EVENT, vcpu);
1391 kvm_vcpu_kick(vcpu);
1392 break;
1393
1394 case APIC_DM_EXTINT:
1395 /*
1396 * Should only be called by kvm_apic_local_deliver() with LVT0,
1397 * before NMI watchdog was enabled. Already handled by
1398 * kvm_apic_accept_pic_intr().
1399 */
1400 break;
1401
1402 default:
1403 printk(KERN_ERR "TODO: unsupported delivery mode %x\n",
1404 delivery_mode);
1405 break;
1406 }
1407 return result;
1408}
1409
1410/*
1411 * This routine identifies the destination vcpus mask meant to receive the
1412 * IOAPIC interrupts. It either uses kvm_apic_map_get_dest_lapic() to find
1413 * out the destination vcpus array and set the bitmap or it traverses to
1414 * each available vcpu to identify the same.
1415 */
1416void kvm_bitmap_or_dest_vcpus(struct kvm *kvm, struct kvm_lapic_irq *irq,
1417 unsigned long *vcpu_bitmap)
1418{
1419 struct kvm_lapic **dest_vcpu = NULL;
1420 struct kvm_lapic *src = NULL;
1421 struct kvm_apic_map *map;
1422 struct kvm_vcpu *vcpu;
1423 unsigned long bitmap, i;
1424 int vcpu_idx;
1425 bool ret;
1426
1427 rcu_read_lock();
1428 map = rcu_dereference(kvm->arch.apic_map);
1429
1430 ret = kvm_apic_map_get_dest_lapic(kvm, &src, irq, map, &dest_vcpu,
1431 &bitmap);
1432 if (ret) {
1433 for_each_set_bit(i, &bitmap, 16) {
1434 if (!dest_vcpu[i])
1435 continue;
1436 vcpu_idx = dest_vcpu[i]->vcpu->vcpu_idx;
1437 __set_bit(vcpu_idx, vcpu_bitmap);
1438 }
1439 } else {
1440 kvm_for_each_vcpu(i, vcpu, kvm) {
1441 if (!kvm_apic_present(vcpu))
1442 continue;
1443 if (!kvm_apic_match_dest(vcpu, NULL,
1444 irq->shorthand,
1445 irq->dest_id,
1446 irq->dest_mode))
1447 continue;
1448 __set_bit(i, vcpu_bitmap);
1449 }
1450 }
1451 rcu_read_unlock();
1452}
1453
1454int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2)
1455{
1456 return vcpu1->arch.apic_arb_prio - vcpu2->arch.apic_arb_prio;
1457}
1458
1459static bool kvm_ioapic_handles_vector(struct kvm_lapic *apic, int vector)
1460{
1461 return test_bit(vector, apic->vcpu->arch.ioapic_handled_vectors);
1462}
1463
1464static void kvm_ioapic_send_eoi(struct kvm_lapic *apic, int vector)
1465{
1466 int trigger_mode;
1467
1468 /* Eoi the ioapic only if the ioapic doesn't own the vector. */
1469 if (!kvm_ioapic_handles_vector(apic, vector))
1470 return;
1471
1472 /* Request a KVM exit to inform the userspace IOAPIC. */
1473 if (irqchip_split(apic->vcpu->kvm)) {
1474 apic->vcpu->arch.pending_ioapic_eoi = vector;
1475 kvm_make_request(KVM_REQ_IOAPIC_EOI_EXIT, apic->vcpu);
1476 return;
1477 }
1478
1479 if (apic_test_vector(vector, apic->regs + APIC_TMR))
1480 trigger_mode = IOAPIC_LEVEL_TRIG;
1481 else
1482 trigger_mode = IOAPIC_EDGE_TRIG;
1483
1484 kvm_ioapic_update_eoi(apic->vcpu, vector, trigger_mode);
1485}
1486
1487static int apic_set_eoi(struct kvm_lapic *apic)
1488{
1489 int vector = apic_find_highest_isr(apic);
1490
1491 trace_kvm_eoi(apic, vector);
1492
1493 /*
1494 * Not every write EOI will has corresponding ISR,
1495 * one example is when Kernel check timer on setup_IO_APIC
1496 */
1497 if (vector == -1)
1498 return vector;
1499
1500 apic_clear_isr(vector, apic);
1501 apic_update_ppr(apic);
1502
1503 if (kvm_hv_synic_has_vector(apic->vcpu, vector))
1504 kvm_hv_synic_send_eoi(apic->vcpu, vector);
1505
1506 kvm_ioapic_send_eoi(apic, vector);
1507 kvm_make_request(KVM_REQ_EVENT, apic->vcpu);
1508 return vector;
1509}
1510
1511/*
1512 * this interface assumes a trap-like exit, which has already finished
1513 * desired side effect including vISR and vPPR update.
1514 */
1515void kvm_apic_set_eoi_accelerated(struct kvm_vcpu *vcpu, int vector)
1516{
1517 struct kvm_lapic *apic = vcpu->arch.apic;
1518
1519 trace_kvm_eoi(apic, vector);
1520
1521 kvm_ioapic_send_eoi(apic, vector);
1522 kvm_make_request(KVM_REQ_EVENT, apic->vcpu);
1523}
1524EXPORT_SYMBOL_GPL(kvm_apic_set_eoi_accelerated);
1525
1526void kvm_apic_send_ipi(struct kvm_lapic *apic, u32 icr_low, u32 icr_high)
1527{
1528 struct kvm_lapic_irq irq;
1529
1530 /* KVM has no delay and should always clear the BUSY/PENDING flag. */
1531 WARN_ON_ONCE(icr_low & APIC_ICR_BUSY);
1532
1533 irq.vector = icr_low & APIC_VECTOR_MASK;
1534 irq.delivery_mode = icr_low & APIC_MODE_MASK;
1535 irq.dest_mode = icr_low & APIC_DEST_MASK;
1536 irq.level = (icr_low & APIC_INT_ASSERT) != 0;
1537 irq.trig_mode = icr_low & APIC_INT_LEVELTRIG;
1538 irq.shorthand = icr_low & APIC_SHORT_MASK;
1539 irq.msi_redir_hint = false;
1540 if (apic_x2apic_mode(apic))
1541 irq.dest_id = icr_high;
1542 else
1543 irq.dest_id = GET_XAPIC_DEST_FIELD(icr_high);
1544
1545 trace_kvm_apic_ipi(icr_low, irq.dest_id);
1546
1547 kvm_irq_delivery_to_apic(apic->vcpu->kvm, apic, &irq, NULL);
1548}
1549EXPORT_SYMBOL_GPL(kvm_apic_send_ipi);
1550
1551static u32 apic_get_tmcct(struct kvm_lapic *apic)
1552{
1553 ktime_t remaining, now;
1554 s64 ns;
1555
1556 ASSERT(apic != NULL);
1557
1558 /* if initial count is 0, current count should also be 0 */
1559 if (kvm_lapic_get_reg(apic, APIC_TMICT) == 0 ||
1560 apic->lapic_timer.period == 0)
1561 return 0;
1562
1563 now = ktime_get();
1564 remaining = ktime_sub(apic->lapic_timer.target_expiration, now);
1565 if (ktime_to_ns(remaining) < 0)
1566 remaining = 0;
1567
1568 ns = mod_64(ktime_to_ns(remaining), apic->lapic_timer.period);
1569 return div64_u64(ns, (apic->vcpu->kvm->arch.apic_bus_cycle_ns *
1570 apic->divide_count));
1571}
1572
1573static void __report_tpr_access(struct kvm_lapic *apic, bool write)
1574{
1575 struct kvm_vcpu *vcpu = apic->vcpu;
1576 struct kvm_run *run = vcpu->run;
1577
1578 kvm_make_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu);
1579 run->tpr_access.rip = kvm_rip_read(vcpu);
1580 run->tpr_access.is_write = write;
1581}
1582
1583static inline void report_tpr_access(struct kvm_lapic *apic, bool write)
1584{
1585 if (apic->vcpu->arch.tpr_access_reporting)
1586 __report_tpr_access(apic, write);
1587}
1588
1589static u32 __apic_read(struct kvm_lapic *apic, unsigned int offset)
1590{
1591 u32 val = 0;
1592
1593 if (offset >= LAPIC_MMIO_LENGTH)
1594 return 0;
1595
1596 switch (offset) {
1597 case APIC_ARBPRI:
1598 break;
1599
1600 case APIC_TMCCT: /* Timer CCR */
1601 if (apic_lvtt_tscdeadline(apic))
1602 return 0;
1603
1604 val = apic_get_tmcct(apic);
1605 break;
1606 case APIC_PROCPRI:
1607 apic_update_ppr(apic);
1608 val = kvm_lapic_get_reg(apic, offset);
1609 break;
1610 case APIC_TASKPRI:
1611 report_tpr_access(apic, false);
1612 fallthrough;
1613 default:
1614 val = kvm_lapic_get_reg(apic, offset);
1615 break;
1616 }
1617
1618 return val;
1619}
1620
1621static inline struct kvm_lapic *to_lapic(struct kvm_io_device *dev)
1622{
1623 return container_of(dev, struct kvm_lapic, dev);
1624}
1625
1626#define APIC_REG_MASK(reg) (1ull << ((reg) >> 4))
1627#define APIC_REGS_MASK(first, count) \
1628 (APIC_REG_MASK(first) * ((1ull << (count)) - 1))
1629
1630u64 kvm_lapic_readable_reg_mask(struct kvm_lapic *apic)
1631{
1632 /* Leave bits '0' for reserved and write-only registers. */
1633 u64 valid_reg_mask =
1634 APIC_REG_MASK(APIC_ID) |
1635 APIC_REG_MASK(APIC_LVR) |
1636 APIC_REG_MASK(APIC_TASKPRI) |
1637 APIC_REG_MASK(APIC_PROCPRI) |
1638 APIC_REG_MASK(APIC_LDR) |
1639 APIC_REG_MASK(APIC_SPIV) |
1640 APIC_REGS_MASK(APIC_ISR, APIC_ISR_NR) |
1641 APIC_REGS_MASK(APIC_TMR, APIC_ISR_NR) |
1642 APIC_REGS_MASK(APIC_IRR, APIC_ISR_NR) |
1643 APIC_REG_MASK(APIC_ESR) |
1644 APIC_REG_MASK(APIC_ICR) |
1645 APIC_REG_MASK(APIC_LVTT) |
1646 APIC_REG_MASK(APIC_LVTTHMR) |
1647 APIC_REG_MASK(APIC_LVTPC) |
1648 APIC_REG_MASK(APIC_LVT0) |
1649 APIC_REG_MASK(APIC_LVT1) |
1650 APIC_REG_MASK(APIC_LVTERR) |
1651 APIC_REG_MASK(APIC_TMICT) |
1652 APIC_REG_MASK(APIC_TMCCT) |
1653 APIC_REG_MASK(APIC_TDCR);
1654
1655 if (kvm_lapic_lvt_supported(apic, LVT_CMCI))
1656 valid_reg_mask |= APIC_REG_MASK(APIC_LVTCMCI);
1657
1658 /* ARBPRI, DFR, and ICR2 are not valid in x2APIC mode. */
1659 if (!apic_x2apic_mode(apic))
1660 valid_reg_mask |= APIC_REG_MASK(APIC_ARBPRI) |
1661 APIC_REG_MASK(APIC_DFR) |
1662 APIC_REG_MASK(APIC_ICR2);
1663
1664 return valid_reg_mask;
1665}
1666EXPORT_SYMBOL_GPL(kvm_lapic_readable_reg_mask);
1667
1668static int kvm_lapic_reg_read(struct kvm_lapic *apic, u32 offset, int len,
1669 void *data)
1670{
1671 unsigned char alignment = offset & 0xf;
1672 u32 result;
1673
1674 /*
1675 * WARN if KVM reads ICR in x2APIC mode, as it's an 8-byte register in
1676 * x2APIC and needs to be manually handled by the caller.
1677 */
1678 WARN_ON_ONCE(apic_x2apic_mode(apic) && offset == APIC_ICR);
1679
1680 if (alignment + len > 4)
1681 return 1;
1682
1683 if (offset > 0x3f0 ||
1684 !(kvm_lapic_readable_reg_mask(apic) & APIC_REG_MASK(offset)))
1685 return 1;
1686
1687 result = __apic_read(apic, offset & ~0xf);
1688
1689 trace_kvm_apic_read(offset, result);
1690
1691 switch (len) {
1692 case 1:
1693 case 2:
1694 case 4:
1695 memcpy(data, (char *)&result + alignment, len);
1696 break;
1697 default:
1698 printk(KERN_ERR "Local APIC read with len = %x, "
1699 "should be 1,2, or 4 instead\n", len);
1700 break;
1701 }
1702 return 0;
1703}
1704
1705static int apic_mmio_in_range(struct kvm_lapic *apic, gpa_t addr)
1706{
1707 return addr >= apic->base_address &&
1708 addr < apic->base_address + LAPIC_MMIO_LENGTH;
1709}
1710
1711static int apic_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *this,
1712 gpa_t address, int len, void *data)
1713{
1714 struct kvm_lapic *apic = to_lapic(this);
1715 u32 offset = address - apic->base_address;
1716
1717 if (!apic_mmio_in_range(apic, address))
1718 return -EOPNOTSUPP;
1719
1720 if (!kvm_apic_hw_enabled(apic) || apic_x2apic_mode(apic)) {
1721 if (!kvm_check_has_quirk(vcpu->kvm,
1722 KVM_X86_QUIRK_LAPIC_MMIO_HOLE))
1723 return -EOPNOTSUPP;
1724
1725 memset(data, 0xff, len);
1726 return 0;
1727 }
1728
1729 kvm_lapic_reg_read(apic, offset, len, data);
1730
1731 return 0;
1732}
1733
1734static void update_divide_count(struct kvm_lapic *apic)
1735{
1736 u32 tmp1, tmp2, tdcr;
1737
1738 tdcr = kvm_lapic_get_reg(apic, APIC_TDCR);
1739 tmp1 = tdcr & 0xf;
1740 tmp2 = ((tmp1 & 0x3) | ((tmp1 & 0x8) >> 1)) + 1;
1741 apic->divide_count = 0x1 << (tmp2 & 0x7);
1742}
1743
1744static void limit_periodic_timer_frequency(struct kvm_lapic *apic)
1745{
1746 /*
1747 * Do not allow the guest to program periodic timers with small
1748 * interval, since the hrtimers are not throttled by the host
1749 * scheduler.
1750 */
1751 if (apic_lvtt_period(apic) && apic->lapic_timer.period) {
1752 s64 min_period = min_timer_period_us * 1000LL;
1753
1754 if (apic->lapic_timer.period < min_period) {
1755 pr_info_once(
1756 "vcpu %i: requested %lld ns "
1757 "lapic timer period limited to %lld ns\n",
1758 apic->vcpu->vcpu_id,
1759 apic->lapic_timer.period, min_period);
1760 apic->lapic_timer.period = min_period;
1761 }
1762 }
1763}
1764
1765static void cancel_hv_timer(struct kvm_lapic *apic);
1766
1767static void cancel_apic_timer(struct kvm_lapic *apic)
1768{
1769 hrtimer_cancel(&apic->lapic_timer.timer);
1770 preempt_disable();
1771 if (apic->lapic_timer.hv_timer_in_use)
1772 cancel_hv_timer(apic);
1773 preempt_enable();
1774 atomic_set(&apic->lapic_timer.pending, 0);
1775}
1776
1777static void apic_update_lvtt(struct kvm_lapic *apic)
1778{
1779 u32 timer_mode = kvm_lapic_get_reg(apic, APIC_LVTT) &
1780 apic->lapic_timer.timer_mode_mask;
1781
1782 if (apic->lapic_timer.timer_mode != timer_mode) {
1783 if (apic_lvtt_tscdeadline(apic) != (timer_mode ==
1784 APIC_LVT_TIMER_TSCDEADLINE)) {
1785 cancel_apic_timer(apic);
1786 kvm_lapic_set_reg(apic, APIC_TMICT, 0);
1787 apic->lapic_timer.period = 0;
1788 apic->lapic_timer.tscdeadline = 0;
1789 }
1790 apic->lapic_timer.timer_mode = timer_mode;
1791 limit_periodic_timer_frequency(apic);
1792 }
1793}
1794
1795/*
1796 * On APICv, this test will cause a busy wait
1797 * during a higher-priority task.
1798 */
1799
1800static bool lapic_timer_int_injected(struct kvm_vcpu *vcpu)
1801{
1802 struct kvm_lapic *apic = vcpu->arch.apic;
1803 u32 reg = kvm_lapic_get_reg(apic, APIC_LVTT);
1804
1805 if (kvm_apic_hw_enabled(apic)) {
1806 int vec = reg & APIC_VECTOR_MASK;
1807 void *bitmap = apic->regs + APIC_ISR;
1808
1809 if (apic->apicv_active)
1810 bitmap = apic->regs + APIC_IRR;
1811
1812 if (apic_test_vector(vec, bitmap))
1813 return true;
1814 }
1815 return false;
1816}
1817
1818static inline void __wait_lapic_expire(struct kvm_vcpu *vcpu, u64 guest_cycles)
1819{
1820 u64 timer_advance_ns = vcpu->arch.apic->lapic_timer.timer_advance_ns;
1821
1822 /*
1823 * If the guest TSC is running at a different ratio than the host, then
1824 * convert the delay to nanoseconds to achieve an accurate delay. Note
1825 * that __delay() uses delay_tsc whenever the hardware has TSC, thus
1826 * always for VMX enabled hardware.
1827 */
1828 if (vcpu->arch.tsc_scaling_ratio == kvm_caps.default_tsc_scaling_ratio) {
1829 __delay(min(guest_cycles,
1830 nsec_to_cycles(vcpu, timer_advance_ns)));
1831 } else {
1832 u64 delay_ns = guest_cycles * 1000000ULL;
1833 do_div(delay_ns, vcpu->arch.virtual_tsc_khz);
1834 ndelay(min_t(u32, delay_ns, timer_advance_ns));
1835 }
1836}
1837
1838static inline void adjust_lapic_timer_advance(struct kvm_vcpu *vcpu,
1839 s64 advance_expire_delta)
1840{
1841 struct kvm_lapic *apic = vcpu->arch.apic;
1842 u32 timer_advance_ns = apic->lapic_timer.timer_advance_ns;
1843 u64 ns;
1844
1845 /* Do not adjust for tiny fluctuations or large random spikes. */
1846 if (abs(advance_expire_delta) > LAPIC_TIMER_ADVANCE_ADJUST_MAX ||
1847 abs(advance_expire_delta) < LAPIC_TIMER_ADVANCE_ADJUST_MIN)
1848 return;
1849
1850 /* too early */
1851 if (advance_expire_delta < 0) {
1852 ns = -advance_expire_delta * 1000000ULL;
1853 do_div(ns, vcpu->arch.virtual_tsc_khz);
1854 timer_advance_ns -= ns/LAPIC_TIMER_ADVANCE_ADJUST_STEP;
1855 } else {
1856 /* too late */
1857 ns = advance_expire_delta * 1000000ULL;
1858 do_div(ns, vcpu->arch.virtual_tsc_khz);
1859 timer_advance_ns += ns/LAPIC_TIMER_ADVANCE_ADJUST_STEP;
1860 }
1861
1862 if (unlikely(timer_advance_ns > LAPIC_TIMER_ADVANCE_NS_MAX))
1863 timer_advance_ns = LAPIC_TIMER_ADVANCE_NS_INIT;
1864 apic->lapic_timer.timer_advance_ns = timer_advance_ns;
1865}
1866
1867static void __kvm_wait_lapic_expire(struct kvm_vcpu *vcpu)
1868{
1869 struct kvm_lapic *apic = vcpu->arch.apic;
1870 u64 guest_tsc, tsc_deadline;
1871
1872 tsc_deadline = apic->lapic_timer.expired_tscdeadline;
1873 apic->lapic_timer.expired_tscdeadline = 0;
1874 guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc());
1875 trace_kvm_wait_lapic_expire(vcpu->vcpu_id, guest_tsc - tsc_deadline);
1876
1877 adjust_lapic_timer_advance(vcpu, guest_tsc - tsc_deadline);
1878
1879 /*
1880 * If the timer fired early, reread the TSC to account for the overhead
1881 * of the above adjustment to avoid waiting longer than is necessary.
1882 */
1883 if (guest_tsc < tsc_deadline)
1884 guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc());
1885
1886 if (guest_tsc < tsc_deadline)
1887 __wait_lapic_expire(vcpu, tsc_deadline - guest_tsc);
1888}
1889
1890void kvm_wait_lapic_expire(struct kvm_vcpu *vcpu)
1891{
1892 if (lapic_in_kernel(vcpu) &&
1893 vcpu->arch.apic->lapic_timer.expired_tscdeadline &&
1894 vcpu->arch.apic->lapic_timer.timer_advance_ns &&
1895 lapic_timer_int_injected(vcpu))
1896 __kvm_wait_lapic_expire(vcpu);
1897}
1898EXPORT_SYMBOL_GPL(kvm_wait_lapic_expire);
1899
1900static void kvm_apic_inject_pending_timer_irqs(struct kvm_lapic *apic)
1901{
1902 struct kvm_timer *ktimer = &apic->lapic_timer;
1903
1904 kvm_apic_local_deliver(apic, APIC_LVTT);
1905 if (apic_lvtt_tscdeadline(apic)) {
1906 ktimer->tscdeadline = 0;
1907 } else if (apic_lvtt_oneshot(apic)) {
1908 ktimer->tscdeadline = 0;
1909 ktimer->target_expiration = 0;
1910 }
1911}
1912
1913static void apic_timer_expired(struct kvm_lapic *apic, bool from_timer_fn)
1914{
1915 struct kvm_vcpu *vcpu = apic->vcpu;
1916 struct kvm_timer *ktimer = &apic->lapic_timer;
1917
1918 if (atomic_read(&apic->lapic_timer.pending))
1919 return;
1920
1921 if (apic_lvtt_tscdeadline(apic) || ktimer->hv_timer_in_use)
1922 ktimer->expired_tscdeadline = ktimer->tscdeadline;
1923
1924 if (!from_timer_fn && apic->apicv_active) {
1925 WARN_ON(kvm_get_running_vcpu() != vcpu);
1926 kvm_apic_inject_pending_timer_irqs(apic);
1927 return;
1928 }
1929
1930 if (kvm_use_posted_timer_interrupt(apic->vcpu)) {
1931 /*
1932 * Ensure the guest's timer has truly expired before posting an
1933 * interrupt. Open code the relevant checks to avoid querying
1934 * lapic_timer_int_injected(), which will be false since the
1935 * interrupt isn't yet injected. Waiting until after injecting
1936 * is not an option since that won't help a posted interrupt.
1937 */
1938 if (vcpu->arch.apic->lapic_timer.expired_tscdeadline &&
1939 vcpu->arch.apic->lapic_timer.timer_advance_ns)
1940 __kvm_wait_lapic_expire(vcpu);
1941 kvm_apic_inject_pending_timer_irqs(apic);
1942 return;
1943 }
1944
1945 atomic_inc(&apic->lapic_timer.pending);
1946 kvm_make_request(KVM_REQ_UNBLOCK, vcpu);
1947 if (from_timer_fn)
1948 kvm_vcpu_kick(vcpu);
1949}
1950
1951static void start_sw_tscdeadline(struct kvm_lapic *apic)
1952{
1953 struct kvm_timer *ktimer = &apic->lapic_timer;
1954 u64 guest_tsc, tscdeadline = ktimer->tscdeadline;
1955 u64 ns = 0;
1956 ktime_t expire;
1957 struct kvm_vcpu *vcpu = apic->vcpu;
1958 u32 this_tsc_khz = vcpu->arch.virtual_tsc_khz;
1959 unsigned long flags;
1960 ktime_t now;
1961
1962 if (unlikely(!tscdeadline || !this_tsc_khz))
1963 return;
1964
1965 local_irq_save(flags);
1966
1967 now = ktime_get();
1968 guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc());
1969
1970 ns = (tscdeadline - guest_tsc) * 1000000ULL;
1971 do_div(ns, this_tsc_khz);
1972
1973 if (likely(tscdeadline > guest_tsc) &&
1974 likely(ns > apic->lapic_timer.timer_advance_ns)) {
1975 expire = ktime_add_ns(now, ns);
1976 expire = ktime_sub_ns(expire, ktimer->timer_advance_ns);
1977 hrtimer_start(&ktimer->timer, expire, HRTIMER_MODE_ABS_HARD);
1978 } else
1979 apic_timer_expired(apic, false);
1980
1981 local_irq_restore(flags);
1982}
1983
1984static inline u64 tmict_to_ns(struct kvm_lapic *apic, u32 tmict)
1985{
1986 return (u64)tmict * apic->vcpu->kvm->arch.apic_bus_cycle_ns *
1987 (u64)apic->divide_count;
1988}
1989
1990static void update_target_expiration(struct kvm_lapic *apic, uint32_t old_divisor)
1991{
1992 ktime_t now, remaining;
1993 u64 ns_remaining_old, ns_remaining_new;
1994
1995 apic->lapic_timer.period =
1996 tmict_to_ns(apic, kvm_lapic_get_reg(apic, APIC_TMICT));
1997 limit_periodic_timer_frequency(apic);
1998
1999 now = ktime_get();
2000 remaining = ktime_sub(apic->lapic_timer.target_expiration, now);
2001 if (ktime_to_ns(remaining) < 0)
2002 remaining = 0;
2003
2004 ns_remaining_old = ktime_to_ns(remaining);
2005 ns_remaining_new = mul_u64_u32_div(ns_remaining_old,
2006 apic->divide_count, old_divisor);
2007
2008 apic->lapic_timer.tscdeadline +=
2009 nsec_to_cycles(apic->vcpu, ns_remaining_new) -
2010 nsec_to_cycles(apic->vcpu, ns_remaining_old);
2011 apic->lapic_timer.target_expiration = ktime_add_ns(now, ns_remaining_new);
2012}
2013
2014static bool set_target_expiration(struct kvm_lapic *apic, u32 count_reg)
2015{
2016 ktime_t now;
2017 u64 tscl = rdtsc();
2018 s64 deadline;
2019
2020 now = ktime_get();
2021 apic->lapic_timer.period =
2022 tmict_to_ns(apic, kvm_lapic_get_reg(apic, APIC_TMICT));
2023
2024 if (!apic->lapic_timer.period) {
2025 apic->lapic_timer.tscdeadline = 0;
2026 return false;
2027 }
2028
2029 limit_periodic_timer_frequency(apic);
2030 deadline = apic->lapic_timer.period;
2031
2032 if (apic_lvtt_period(apic) || apic_lvtt_oneshot(apic)) {
2033 if (unlikely(count_reg != APIC_TMICT)) {
2034 deadline = tmict_to_ns(apic,
2035 kvm_lapic_get_reg(apic, count_reg));
2036 if (unlikely(deadline <= 0)) {
2037 if (apic_lvtt_period(apic))
2038 deadline = apic->lapic_timer.period;
2039 else
2040 deadline = 0;
2041 }
2042 else if (unlikely(deadline > apic->lapic_timer.period)) {
2043 pr_info_ratelimited(
2044 "vcpu %i: requested lapic timer restore with "
2045 "starting count register %#x=%u (%lld ns) > initial count (%lld ns). "
2046 "Using initial count to start timer.\n",
2047 apic->vcpu->vcpu_id,
2048 count_reg,
2049 kvm_lapic_get_reg(apic, count_reg),
2050 deadline, apic->lapic_timer.period);
2051 kvm_lapic_set_reg(apic, count_reg, 0);
2052 deadline = apic->lapic_timer.period;
2053 }
2054 }
2055 }
2056
2057 apic->lapic_timer.tscdeadline = kvm_read_l1_tsc(apic->vcpu, tscl) +
2058 nsec_to_cycles(apic->vcpu, deadline);
2059 apic->lapic_timer.target_expiration = ktime_add_ns(now, deadline);
2060
2061 return true;
2062}
2063
2064static void advance_periodic_target_expiration(struct kvm_lapic *apic)
2065{
2066 ktime_t now = ktime_get();
2067 u64 tscl = rdtsc();
2068 ktime_t delta;
2069
2070 /*
2071 * Synchronize both deadlines to the same time source or
2072 * differences in the periods (caused by differences in the
2073 * underlying clocks or numerical approximation errors) will
2074 * cause the two to drift apart over time as the errors
2075 * accumulate.
2076 */
2077 apic->lapic_timer.target_expiration =
2078 ktime_add_ns(apic->lapic_timer.target_expiration,
2079 apic->lapic_timer.period);
2080 delta = ktime_sub(apic->lapic_timer.target_expiration, now);
2081 apic->lapic_timer.tscdeadline = kvm_read_l1_tsc(apic->vcpu, tscl) +
2082 nsec_to_cycles(apic->vcpu, delta);
2083}
2084
2085static void start_sw_period(struct kvm_lapic *apic)
2086{
2087 if (!apic->lapic_timer.period)
2088 return;
2089
2090 if (ktime_after(ktime_get(),
2091 apic->lapic_timer.target_expiration)) {
2092 apic_timer_expired(apic, false);
2093
2094 if (apic_lvtt_oneshot(apic))
2095 return;
2096
2097 advance_periodic_target_expiration(apic);
2098 }
2099
2100 hrtimer_start(&apic->lapic_timer.timer,
2101 apic->lapic_timer.target_expiration,
2102 HRTIMER_MODE_ABS_HARD);
2103}
2104
2105bool kvm_lapic_hv_timer_in_use(struct kvm_vcpu *vcpu)
2106{
2107 if (!lapic_in_kernel(vcpu))
2108 return false;
2109
2110 return vcpu->arch.apic->lapic_timer.hv_timer_in_use;
2111}
2112
2113static void cancel_hv_timer(struct kvm_lapic *apic)
2114{
2115 WARN_ON(preemptible());
2116 WARN_ON(!apic->lapic_timer.hv_timer_in_use);
2117 kvm_x86_call(cancel_hv_timer)(apic->vcpu);
2118 apic->lapic_timer.hv_timer_in_use = false;
2119}
2120
2121static bool start_hv_timer(struct kvm_lapic *apic)
2122{
2123 struct kvm_timer *ktimer = &apic->lapic_timer;
2124 struct kvm_vcpu *vcpu = apic->vcpu;
2125 bool expired;
2126
2127 WARN_ON(preemptible());
2128 if (!kvm_can_use_hv_timer(vcpu))
2129 return false;
2130
2131 if (!ktimer->tscdeadline)
2132 return false;
2133
2134 if (kvm_x86_call(set_hv_timer)(vcpu, ktimer->tscdeadline, &expired))
2135 return false;
2136
2137 ktimer->hv_timer_in_use = true;
2138 hrtimer_cancel(&ktimer->timer);
2139
2140 /*
2141 * To simplify handling the periodic timer, leave the hv timer running
2142 * even if the deadline timer has expired, i.e. rely on the resulting
2143 * VM-Exit to recompute the periodic timer's target expiration.
2144 */
2145 if (!apic_lvtt_period(apic)) {
2146 /*
2147 * Cancel the hv timer if the sw timer fired while the hv timer
2148 * was being programmed, or if the hv timer itself expired.
2149 */
2150 if (atomic_read(&ktimer->pending)) {
2151 cancel_hv_timer(apic);
2152 } else if (expired) {
2153 apic_timer_expired(apic, false);
2154 cancel_hv_timer(apic);
2155 }
2156 }
2157
2158 trace_kvm_hv_timer_state(vcpu->vcpu_id, ktimer->hv_timer_in_use);
2159
2160 return true;
2161}
2162
2163static void start_sw_timer(struct kvm_lapic *apic)
2164{
2165 struct kvm_timer *ktimer = &apic->lapic_timer;
2166
2167 WARN_ON(preemptible());
2168 if (apic->lapic_timer.hv_timer_in_use)
2169 cancel_hv_timer(apic);
2170 if (!apic_lvtt_period(apic) && atomic_read(&ktimer->pending))
2171 return;
2172
2173 if (apic_lvtt_period(apic) || apic_lvtt_oneshot(apic))
2174 start_sw_period(apic);
2175 else if (apic_lvtt_tscdeadline(apic))
2176 start_sw_tscdeadline(apic);
2177 trace_kvm_hv_timer_state(apic->vcpu->vcpu_id, false);
2178}
2179
2180static void restart_apic_timer(struct kvm_lapic *apic)
2181{
2182 preempt_disable();
2183
2184 if (!apic_lvtt_period(apic) && atomic_read(&apic->lapic_timer.pending))
2185 goto out;
2186
2187 if (!start_hv_timer(apic))
2188 start_sw_timer(apic);
2189out:
2190 preempt_enable();
2191}
2192
2193void kvm_lapic_expired_hv_timer(struct kvm_vcpu *vcpu)
2194{
2195 struct kvm_lapic *apic = vcpu->arch.apic;
2196
2197 preempt_disable();
2198 /* If the preempt notifier has already run, it also called apic_timer_expired */
2199 if (!apic->lapic_timer.hv_timer_in_use)
2200 goto out;
2201 WARN_ON(kvm_vcpu_is_blocking(vcpu));
2202 apic_timer_expired(apic, false);
2203 cancel_hv_timer(apic);
2204
2205 if (apic_lvtt_period(apic) && apic->lapic_timer.period) {
2206 advance_periodic_target_expiration(apic);
2207 restart_apic_timer(apic);
2208 }
2209out:
2210 preempt_enable();
2211}
2212EXPORT_SYMBOL_GPL(kvm_lapic_expired_hv_timer);
2213
2214void kvm_lapic_switch_to_hv_timer(struct kvm_vcpu *vcpu)
2215{
2216 restart_apic_timer(vcpu->arch.apic);
2217}
2218
2219void kvm_lapic_switch_to_sw_timer(struct kvm_vcpu *vcpu)
2220{
2221 struct kvm_lapic *apic = vcpu->arch.apic;
2222
2223 preempt_disable();
2224 /* Possibly the TSC deadline timer is not enabled yet */
2225 if (apic->lapic_timer.hv_timer_in_use)
2226 start_sw_timer(apic);
2227 preempt_enable();
2228}
2229
2230void kvm_lapic_restart_hv_timer(struct kvm_vcpu *vcpu)
2231{
2232 struct kvm_lapic *apic = vcpu->arch.apic;
2233
2234 WARN_ON(!apic->lapic_timer.hv_timer_in_use);
2235 restart_apic_timer(apic);
2236}
2237
2238static void __start_apic_timer(struct kvm_lapic *apic, u32 count_reg)
2239{
2240 atomic_set(&apic->lapic_timer.pending, 0);
2241
2242 if ((apic_lvtt_period(apic) || apic_lvtt_oneshot(apic))
2243 && !set_target_expiration(apic, count_reg))
2244 return;
2245
2246 restart_apic_timer(apic);
2247}
2248
2249static void start_apic_timer(struct kvm_lapic *apic)
2250{
2251 __start_apic_timer(apic, APIC_TMICT);
2252}
2253
2254static void apic_manage_nmi_watchdog(struct kvm_lapic *apic, u32 lvt0_val)
2255{
2256 bool lvt0_in_nmi_mode = apic_lvt_nmi_mode(lvt0_val);
2257
2258 if (apic->lvt0_in_nmi_mode != lvt0_in_nmi_mode) {
2259 apic->lvt0_in_nmi_mode = lvt0_in_nmi_mode;
2260 if (lvt0_in_nmi_mode) {
2261 atomic_inc(&apic->vcpu->kvm->arch.vapics_in_nmi_mode);
2262 } else
2263 atomic_dec(&apic->vcpu->kvm->arch.vapics_in_nmi_mode);
2264 }
2265}
2266
2267static int get_lvt_index(u32 reg)
2268{
2269 if (reg == APIC_LVTCMCI)
2270 return LVT_CMCI;
2271 if (reg < APIC_LVTT || reg > APIC_LVTERR)
2272 return -1;
2273 return array_index_nospec(
2274 (reg - APIC_LVTT) >> 4, KVM_APIC_MAX_NR_LVT_ENTRIES);
2275}
2276
2277static int kvm_lapic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val)
2278{
2279 int ret = 0;
2280
2281 trace_kvm_apic_write(reg, val);
2282
2283 switch (reg) {
2284 case APIC_ID: /* Local APIC ID */
2285 if (!apic_x2apic_mode(apic)) {
2286 kvm_apic_set_xapic_id(apic, val >> 24);
2287 } else {
2288 ret = 1;
2289 }
2290 break;
2291
2292 case APIC_TASKPRI:
2293 report_tpr_access(apic, true);
2294 apic_set_tpr(apic, val & 0xff);
2295 break;
2296
2297 case APIC_EOI:
2298 apic_set_eoi(apic);
2299 break;
2300
2301 case APIC_LDR:
2302 if (!apic_x2apic_mode(apic))
2303 kvm_apic_set_ldr(apic, val & APIC_LDR_MASK);
2304 else
2305 ret = 1;
2306 break;
2307
2308 case APIC_DFR:
2309 if (!apic_x2apic_mode(apic))
2310 kvm_apic_set_dfr(apic, val | 0x0FFFFFFF);
2311 else
2312 ret = 1;
2313 break;
2314
2315 case APIC_SPIV: {
2316 u32 mask = 0x3ff;
2317 if (kvm_lapic_get_reg(apic, APIC_LVR) & APIC_LVR_DIRECTED_EOI)
2318 mask |= APIC_SPIV_DIRECTED_EOI;
2319 apic_set_spiv(apic, val & mask);
2320 if (!(val & APIC_SPIV_APIC_ENABLED)) {
2321 int i;
2322
2323 for (i = 0; i < apic->nr_lvt_entries; i++) {
2324 kvm_lapic_set_reg(apic, APIC_LVTx(i),
2325 kvm_lapic_get_reg(apic, APIC_LVTx(i)) | APIC_LVT_MASKED);
2326 }
2327 apic_update_lvtt(apic);
2328 atomic_set(&apic->lapic_timer.pending, 0);
2329
2330 }
2331 break;
2332 }
2333 case APIC_ICR:
2334 WARN_ON_ONCE(apic_x2apic_mode(apic));
2335
2336 /* No delay here, so we always clear the pending bit */
2337 val &= ~APIC_ICR_BUSY;
2338 kvm_apic_send_ipi(apic, val, kvm_lapic_get_reg(apic, APIC_ICR2));
2339 kvm_lapic_set_reg(apic, APIC_ICR, val);
2340 break;
2341 case APIC_ICR2:
2342 if (apic_x2apic_mode(apic))
2343 ret = 1;
2344 else
2345 kvm_lapic_set_reg(apic, APIC_ICR2, val & 0xff000000);
2346 break;
2347
2348 case APIC_LVT0:
2349 apic_manage_nmi_watchdog(apic, val);
2350 fallthrough;
2351 case APIC_LVTTHMR:
2352 case APIC_LVTPC:
2353 case APIC_LVT1:
2354 case APIC_LVTERR:
2355 case APIC_LVTCMCI: {
2356 u32 index = get_lvt_index(reg);
2357 if (!kvm_lapic_lvt_supported(apic, index)) {
2358 ret = 1;
2359 break;
2360 }
2361 if (!kvm_apic_sw_enabled(apic))
2362 val |= APIC_LVT_MASKED;
2363 val &= apic_lvt_mask[index];
2364 kvm_lapic_set_reg(apic, reg, val);
2365 break;
2366 }
2367
2368 case APIC_LVTT:
2369 if (!kvm_apic_sw_enabled(apic))
2370 val |= APIC_LVT_MASKED;
2371 val &= (apic_lvt_mask[0] | apic->lapic_timer.timer_mode_mask);
2372 kvm_lapic_set_reg(apic, APIC_LVTT, val);
2373 apic_update_lvtt(apic);
2374 break;
2375
2376 case APIC_TMICT:
2377 if (apic_lvtt_tscdeadline(apic))
2378 break;
2379
2380 cancel_apic_timer(apic);
2381 kvm_lapic_set_reg(apic, APIC_TMICT, val);
2382 start_apic_timer(apic);
2383 break;
2384
2385 case APIC_TDCR: {
2386 uint32_t old_divisor = apic->divide_count;
2387
2388 kvm_lapic_set_reg(apic, APIC_TDCR, val & 0xb);
2389 update_divide_count(apic);
2390 if (apic->divide_count != old_divisor &&
2391 apic->lapic_timer.period) {
2392 hrtimer_cancel(&apic->lapic_timer.timer);
2393 update_target_expiration(apic, old_divisor);
2394 restart_apic_timer(apic);
2395 }
2396 break;
2397 }
2398 case APIC_ESR:
2399 if (apic_x2apic_mode(apic) && val != 0)
2400 ret = 1;
2401 break;
2402
2403 case APIC_SELF_IPI:
2404 /*
2405 * Self-IPI exists only when x2APIC is enabled. Bits 7:0 hold
2406 * the vector, everything else is reserved.
2407 */
2408 if (!apic_x2apic_mode(apic) || (val & ~APIC_VECTOR_MASK))
2409 ret = 1;
2410 else
2411 kvm_apic_send_ipi(apic, APIC_DEST_SELF | val, 0);
2412 break;
2413 default:
2414 ret = 1;
2415 break;
2416 }
2417
2418 /*
2419 * Recalculate APIC maps if necessary, e.g. if the software enable bit
2420 * was toggled, the APIC ID changed, etc... The maps are marked dirty
2421 * on relevant changes, i.e. this is a nop for most writes.
2422 */
2423 kvm_recalculate_apic_map(apic->vcpu->kvm);
2424
2425 return ret;
2426}
2427
2428static int apic_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *this,
2429 gpa_t address, int len, const void *data)
2430{
2431 struct kvm_lapic *apic = to_lapic(this);
2432 unsigned int offset = address - apic->base_address;
2433 u32 val;
2434
2435 if (!apic_mmio_in_range(apic, address))
2436 return -EOPNOTSUPP;
2437
2438 if (!kvm_apic_hw_enabled(apic) || apic_x2apic_mode(apic)) {
2439 if (!kvm_check_has_quirk(vcpu->kvm,
2440 KVM_X86_QUIRK_LAPIC_MMIO_HOLE))
2441 return -EOPNOTSUPP;
2442
2443 return 0;
2444 }
2445
2446 /*
2447 * APIC register must be aligned on 128-bits boundary.
2448 * 32/64/128 bits registers must be accessed thru 32 bits.
2449 * Refer SDM 8.4.1
2450 */
2451 if (len != 4 || (offset & 0xf))
2452 return 0;
2453
2454 val = *(u32*)data;
2455
2456 kvm_lapic_reg_write(apic, offset & 0xff0, val);
2457
2458 return 0;
2459}
2460
2461void kvm_lapic_set_eoi(struct kvm_vcpu *vcpu)
2462{
2463 kvm_lapic_reg_write(vcpu->arch.apic, APIC_EOI, 0);
2464}
2465EXPORT_SYMBOL_GPL(kvm_lapic_set_eoi);
2466
2467#define X2APIC_ICR_RESERVED_BITS (GENMASK_ULL(31, 20) | GENMASK_ULL(17, 16) | BIT(13))
2468
2469int kvm_x2apic_icr_write(struct kvm_lapic *apic, u64 data)
2470{
2471 if (data & X2APIC_ICR_RESERVED_BITS)
2472 return 1;
2473
2474 /*
2475 * The BUSY bit is reserved on both Intel and AMD in x2APIC mode, but
2476 * only AMD requires it to be zero, Intel essentially just ignores the
2477 * bit. And if IPI virtualization (Intel) or x2AVIC (AMD) is enabled,
2478 * the CPU performs the reserved bits checks, i.e. the underlying CPU
2479 * behavior will "win". Arbitrarily clear the BUSY bit, as there is no
2480 * sane way to provide consistent behavior with respect to hardware.
2481 */
2482 data &= ~APIC_ICR_BUSY;
2483
2484 kvm_apic_send_ipi(apic, (u32)data, (u32)(data >> 32));
2485 if (kvm_x86_ops.x2apic_icr_is_split) {
2486 kvm_lapic_set_reg(apic, APIC_ICR, data);
2487 kvm_lapic_set_reg(apic, APIC_ICR2, data >> 32);
2488 } else {
2489 kvm_lapic_set_reg64(apic, APIC_ICR, data);
2490 }
2491 trace_kvm_apic_write(APIC_ICR, data);
2492 return 0;
2493}
2494
2495static u64 kvm_x2apic_icr_read(struct kvm_lapic *apic)
2496{
2497 if (kvm_x86_ops.x2apic_icr_is_split)
2498 return (u64)kvm_lapic_get_reg(apic, APIC_ICR) |
2499 (u64)kvm_lapic_get_reg(apic, APIC_ICR2) << 32;
2500
2501 return kvm_lapic_get_reg64(apic, APIC_ICR);
2502}
2503
2504/* emulate APIC access in a trap manner */
2505void kvm_apic_write_nodecode(struct kvm_vcpu *vcpu, u32 offset)
2506{
2507 struct kvm_lapic *apic = vcpu->arch.apic;
2508
2509 /*
2510 * ICR is a single 64-bit register when x2APIC is enabled, all others
2511 * registers hold 32-bit values. For legacy xAPIC, ICR writes need to
2512 * go down the common path to get the upper half from ICR2.
2513 *
2514 * Note, using the write helpers may incur an unnecessary write to the
2515 * virtual APIC state, but KVM needs to conditionally modify the value
2516 * in certain cases, e.g. to clear the ICR busy bit. The cost of extra
2517 * conditional branches is likely a wash relative to the cost of the
2518 * maybe-unecessary write, and both are in the noise anyways.
2519 */
2520 if (apic_x2apic_mode(apic) && offset == APIC_ICR)
2521 WARN_ON_ONCE(kvm_x2apic_icr_write(apic, kvm_x2apic_icr_read(apic)));
2522 else
2523 kvm_lapic_reg_write(apic, offset, kvm_lapic_get_reg(apic, offset));
2524}
2525EXPORT_SYMBOL_GPL(kvm_apic_write_nodecode);
2526
2527void kvm_free_lapic(struct kvm_vcpu *vcpu)
2528{
2529 struct kvm_lapic *apic = vcpu->arch.apic;
2530
2531 if (!vcpu->arch.apic) {
2532 static_branch_dec(&kvm_has_noapic_vcpu);
2533 return;
2534 }
2535
2536 hrtimer_cancel(&apic->lapic_timer.timer);
2537
2538 if (!(vcpu->arch.apic_base & MSR_IA32_APICBASE_ENABLE))
2539 static_branch_slow_dec_deferred(&apic_hw_disabled);
2540
2541 if (!apic->sw_enabled)
2542 static_branch_slow_dec_deferred(&apic_sw_disabled);
2543
2544 if (apic->regs)
2545 free_page((unsigned long)apic->regs);
2546
2547 kfree(apic);
2548}
2549
2550/*
2551 *----------------------------------------------------------------------
2552 * LAPIC interface
2553 *----------------------------------------------------------------------
2554 */
2555u64 kvm_get_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu)
2556{
2557 struct kvm_lapic *apic = vcpu->arch.apic;
2558
2559 if (!kvm_apic_present(vcpu) || !apic_lvtt_tscdeadline(apic))
2560 return 0;
2561
2562 return apic->lapic_timer.tscdeadline;
2563}
2564
2565void kvm_set_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu, u64 data)
2566{
2567 struct kvm_lapic *apic = vcpu->arch.apic;
2568
2569 if (!kvm_apic_present(vcpu) || !apic_lvtt_tscdeadline(apic))
2570 return;
2571
2572 hrtimer_cancel(&apic->lapic_timer.timer);
2573 apic->lapic_timer.tscdeadline = data;
2574 start_apic_timer(apic);
2575}
2576
2577void kvm_lapic_set_tpr(struct kvm_vcpu *vcpu, unsigned long cr8)
2578{
2579 apic_set_tpr(vcpu->arch.apic, (cr8 & 0x0f) << 4);
2580}
2581
2582u64 kvm_lapic_get_cr8(struct kvm_vcpu *vcpu)
2583{
2584 u64 tpr;
2585
2586 tpr = (u64) kvm_lapic_get_reg(vcpu->arch.apic, APIC_TASKPRI);
2587
2588 return (tpr & 0xf0) >> 4;
2589}
2590
2591static void __kvm_apic_set_base(struct kvm_vcpu *vcpu, u64 value)
2592{
2593 u64 old_value = vcpu->arch.apic_base;
2594 struct kvm_lapic *apic = vcpu->arch.apic;
2595
2596 vcpu->arch.apic_base = value;
2597
2598 if ((old_value ^ value) & MSR_IA32_APICBASE_ENABLE)
2599 kvm_update_cpuid_runtime(vcpu);
2600
2601 if (!apic)
2602 return;
2603
2604 /* update jump label if enable bit changes */
2605 if ((old_value ^ value) & MSR_IA32_APICBASE_ENABLE) {
2606 if (value & MSR_IA32_APICBASE_ENABLE) {
2607 kvm_apic_set_xapic_id(apic, vcpu->vcpu_id);
2608 static_branch_slow_dec_deferred(&apic_hw_disabled);
2609 /* Check if there are APF page ready requests pending */
2610 kvm_make_request(KVM_REQ_APF_READY, vcpu);
2611 } else {
2612 static_branch_inc(&apic_hw_disabled.key);
2613 atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
2614 }
2615 }
2616
2617 if ((old_value ^ value) & X2APIC_ENABLE) {
2618 if (value & X2APIC_ENABLE)
2619 kvm_apic_set_x2apic_id(apic, vcpu->vcpu_id);
2620 else if (value & MSR_IA32_APICBASE_ENABLE)
2621 kvm_apic_set_xapic_id(apic, vcpu->vcpu_id);
2622 }
2623
2624 if ((old_value ^ value) & (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE)) {
2625 kvm_make_request(KVM_REQ_APICV_UPDATE, vcpu);
2626 kvm_x86_call(set_virtual_apic_mode)(vcpu);
2627 }
2628
2629 apic->base_address = apic->vcpu->arch.apic_base &
2630 MSR_IA32_APICBASE_BASE;
2631
2632 if ((value & MSR_IA32_APICBASE_ENABLE) &&
2633 apic->base_address != APIC_DEFAULT_PHYS_BASE) {
2634 kvm_set_apicv_inhibit(apic->vcpu->kvm,
2635 APICV_INHIBIT_REASON_APIC_BASE_MODIFIED);
2636 }
2637}
2638
2639int kvm_apic_set_base(struct kvm_vcpu *vcpu, u64 value, bool host_initiated)
2640{
2641 enum lapic_mode old_mode = kvm_get_apic_mode(vcpu);
2642 enum lapic_mode new_mode = kvm_apic_mode(value);
2643
2644 if (vcpu->arch.apic_base == value)
2645 return 0;
2646
2647 u64 reserved_bits = kvm_vcpu_reserved_gpa_bits_raw(vcpu) | 0x2ff |
2648 (guest_cpuid_has(vcpu, X86_FEATURE_X2APIC) ? 0 : X2APIC_ENABLE);
2649
2650 if ((value & reserved_bits) != 0 || new_mode == LAPIC_MODE_INVALID)
2651 return 1;
2652 if (!host_initiated) {
2653 if (old_mode == LAPIC_MODE_X2APIC && new_mode == LAPIC_MODE_XAPIC)
2654 return 1;
2655 if (old_mode == LAPIC_MODE_DISABLED && new_mode == LAPIC_MODE_X2APIC)
2656 return 1;
2657 }
2658
2659 __kvm_apic_set_base(vcpu, value);
2660 kvm_recalculate_apic_map(vcpu->kvm);
2661 return 0;
2662}
2663
2664void kvm_apic_update_apicv(struct kvm_vcpu *vcpu)
2665{
2666 struct kvm_lapic *apic = vcpu->arch.apic;
2667
2668 /*
2669 * When APICv is enabled, KVM must always search the IRR for a pending
2670 * IRQ, as other vCPUs and devices can set IRR bits even if the vCPU
2671 * isn't running. If APICv is disabled, KVM _should_ search the IRR
2672 * for a pending IRQ. But KVM currently doesn't ensure *all* hardware,
2673 * e.g. CPUs and IOMMUs, has seen the change in state, i.e. searching
2674 * the IRR at this time could race with IRQ delivery from hardware that
2675 * still sees APICv as being enabled.
2676 *
2677 * FIXME: Ensure other vCPUs and devices observe the change in APICv
2678 * state prior to updating KVM's metadata caches, so that KVM
2679 * can safely search the IRR and set irr_pending accordingly.
2680 */
2681 apic->irr_pending = true;
2682
2683 if (apic->apicv_active)
2684 apic->isr_count = 1;
2685 else
2686 apic->isr_count = count_vectors(apic->regs + APIC_ISR);
2687
2688 apic->highest_isr_cache = -1;
2689}
2690
2691int kvm_alloc_apic_access_page(struct kvm *kvm)
2692{
2693 void __user *hva;
2694 int ret = 0;
2695
2696 mutex_lock(&kvm->slots_lock);
2697 if (kvm->arch.apic_access_memslot_enabled ||
2698 kvm->arch.apic_access_memslot_inhibited)
2699 goto out;
2700
2701 hva = __x86_set_memory_region(kvm, APIC_ACCESS_PAGE_PRIVATE_MEMSLOT,
2702 APIC_DEFAULT_PHYS_BASE, PAGE_SIZE);
2703 if (IS_ERR(hva)) {
2704 ret = PTR_ERR(hva);
2705 goto out;
2706 }
2707
2708 kvm->arch.apic_access_memslot_enabled = true;
2709out:
2710 mutex_unlock(&kvm->slots_lock);
2711 return ret;
2712}
2713EXPORT_SYMBOL_GPL(kvm_alloc_apic_access_page);
2714
2715void kvm_inhibit_apic_access_page(struct kvm_vcpu *vcpu)
2716{
2717 struct kvm *kvm = vcpu->kvm;
2718
2719 if (!kvm->arch.apic_access_memslot_enabled)
2720 return;
2721
2722 kvm_vcpu_srcu_read_unlock(vcpu);
2723
2724 mutex_lock(&kvm->slots_lock);
2725
2726 if (kvm->arch.apic_access_memslot_enabled) {
2727 __x86_set_memory_region(kvm, APIC_ACCESS_PAGE_PRIVATE_MEMSLOT, 0, 0);
2728 /*
2729 * Clear "enabled" after the memslot is deleted so that a
2730 * different vCPU doesn't get a false negative when checking
2731 * the flag out of slots_lock. No additional memory barrier is
2732 * needed as modifying memslots requires waiting other vCPUs to
2733 * drop SRCU (see above), and false positives are ok as the
2734 * flag is rechecked after acquiring slots_lock.
2735 */
2736 kvm->arch.apic_access_memslot_enabled = false;
2737
2738 /*
2739 * Mark the memslot as inhibited to prevent reallocating the
2740 * memslot during vCPU creation, e.g. if a vCPU is hotplugged.
2741 */
2742 kvm->arch.apic_access_memslot_inhibited = true;
2743 }
2744
2745 mutex_unlock(&kvm->slots_lock);
2746
2747 kvm_vcpu_srcu_read_lock(vcpu);
2748}
2749
2750void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event)
2751{
2752 struct kvm_lapic *apic = vcpu->arch.apic;
2753 u64 msr_val;
2754 int i;
2755
2756 kvm_x86_call(apicv_pre_state_restore)(vcpu);
2757
2758 if (!init_event) {
2759 msr_val = APIC_DEFAULT_PHYS_BASE | MSR_IA32_APICBASE_ENABLE;
2760 if (kvm_vcpu_is_reset_bsp(vcpu))
2761 msr_val |= MSR_IA32_APICBASE_BSP;
2762
2763 /*
2764 * Use the inner helper to avoid an extra recalcuation of the
2765 * optimized APIC map if some other task has dirtied the map.
2766 * The recalculation needed for this vCPU will be done after
2767 * all APIC state has been initialized (see below).
2768 */
2769 __kvm_apic_set_base(vcpu, msr_val);
2770 }
2771
2772 if (!apic)
2773 return;
2774
2775 /* Stop the timer in case it's a reset to an active apic */
2776 hrtimer_cancel(&apic->lapic_timer.timer);
2777
2778 /* The xAPIC ID is set at RESET even if the APIC was already enabled. */
2779 if (!init_event)
2780 kvm_apic_set_xapic_id(apic, vcpu->vcpu_id);
2781 kvm_apic_set_version(apic->vcpu);
2782
2783 for (i = 0; i < apic->nr_lvt_entries; i++)
2784 kvm_lapic_set_reg(apic, APIC_LVTx(i), APIC_LVT_MASKED);
2785 apic_update_lvtt(apic);
2786 if (kvm_vcpu_is_reset_bsp(vcpu) &&
2787 kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_LINT0_REENABLED))
2788 kvm_lapic_set_reg(apic, APIC_LVT0,
2789 SET_APIC_DELIVERY_MODE(0, APIC_MODE_EXTINT));
2790 apic_manage_nmi_watchdog(apic, kvm_lapic_get_reg(apic, APIC_LVT0));
2791
2792 kvm_apic_set_dfr(apic, 0xffffffffU);
2793 apic_set_spiv(apic, 0xff);
2794 kvm_lapic_set_reg(apic, APIC_TASKPRI, 0);
2795 if (!apic_x2apic_mode(apic))
2796 kvm_apic_set_ldr(apic, 0);
2797 kvm_lapic_set_reg(apic, APIC_ESR, 0);
2798 if (!apic_x2apic_mode(apic)) {
2799 kvm_lapic_set_reg(apic, APIC_ICR, 0);
2800 kvm_lapic_set_reg(apic, APIC_ICR2, 0);
2801 } else {
2802 kvm_lapic_set_reg64(apic, APIC_ICR, 0);
2803 }
2804 kvm_lapic_set_reg(apic, APIC_TDCR, 0);
2805 kvm_lapic_set_reg(apic, APIC_TMICT, 0);
2806 for (i = 0; i < 8; i++) {
2807 kvm_lapic_set_reg(apic, APIC_IRR + 0x10 * i, 0);
2808 kvm_lapic_set_reg(apic, APIC_ISR + 0x10 * i, 0);
2809 kvm_lapic_set_reg(apic, APIC_TMR + 0x10 * i, 0);
2810 }
2811 kvm_apic_update_apicv(vcpu);
2812 update_divide_count(apic);
2813 atomic_set(&apic->lapic_timer.pending, 0);
2814
2815 vcpu->arch.pv_eoi.msr_val = 0;
2816 apic_update_ppr(apic);
2817 if (apic->apicv_active) {
2818 kvm_x86_call(apicv_post_state_restore)(vcpu);
2819 kvm_x86_call(hwapic_irr_update)(vcpu, -1);
2820 kvm_x86_call(hwapic_isr_update)(vcpu, -1);
2821 }
2822
2823 vcpu->arch.apic_arb_prio = 0;
2824 vcpu->arch.apic_attention = 0;
2825
2826 kvm_recalculate_apic_map(vcpu->kvm);
2827}
2828
2829/*
2830 *----------------------------------------------------------------------
2831 * timer interface
2832 *----------------------------------------------------------------------
2833 */
2834
2835static bool lapic_is_periodic(struct kvm_lapic *apic)
2836{
2837 return apic_lvtt_period(apic);
2838}
2839
2840int apic_has_pending_timer(struct kvm_vcpu *vcpu)
2841{
2842 struct kvm_lapic *apic = vcpu->arch.apic;
2843
2844 if (apic_enabled(apic) && apic_lvt_enabled(apic, APIC_LVTT))
2845 return atomic_read(&apic->lapic_timer.pending);
2846
2847 return 0;
2848}
2849
2850int kvm_apic_local_deliver(struct kvm_lapic *apic, int lvt_type)
2851{
2852 u32 reg = kvm_lapic_get_reg(apic, lvt_type);
2853 int vector, mode, trig_mode;
2854 int r;
2855
2856 if (kvm_apic_hw_enabled(apic) && !(reg & APIC_LVT_MASKED)) {
2857 vector = reg & APIC_VECTOR_MASK;
2858 mode = reg & APIC_MODE_MASK;
2859 trig_mode = reg & APIC_LVT_LEVEL_TRIGGER;
2860
2861 r = __apic_accept_irq(apic, mode, vector, 1, trig_mode, NULL);
2862 if (r && lvt_type == APIC_LVTPC &&
2863 guest_cpuid_is_intel_compatible(apic->vcpu))
2864 kvm_lapic_set_reg(apic, APIC_LVTPC, reg | APIC_LVT_MASKED);
2865 return r;
2866 }
2867 return 0;
2868}
2869
2870void kvm_apic_nmi_wd_deliver(struct kvm_vcpu *vcpu)
2871{
2872 struct kvm_lapic *apic = vcpu->arch.apic;
2873
2874 if (apic)
2875 kvm_apic_local_deliver(apic, APIC_LVT0);
2876}
2877
2878static const struct kvm_io_device_ops apic_mmio_ops = {
2879 .read = apic_mmio_read,
2880 .write = apic_mmio_write,
2881};
2882
2883static enum hrtimer_restart apic_timer_fn(struct hrtimer *data)
2884{
2885 struct kvm_timer *ktimer = container_of(data, struct kvm_timer, timer);
2886 struct kvm_lapic *apic = container_of(ktimer, struct kvm_lapic, lapic_timer);
2887
2888 apic_timer_expired(apic, true);
2889
2890 if (lapic_is_periodic(apic)) {
2891 advance_periodic_target_expiration(apic);
2892 hrtimer_add_expires_ns(&ktimer->timer, ktimer->period);
2893 return HRTIMER_RESTART;
2894 } else
2895 return HRTIMER_NORESTART;
2896}
2897
2898int kvm_create_lapic(struct kvm_vcpu *vcpu)
2899{
2900 struct kvm_lapic *apic;
2901
2902 ASSERT(vcpu != NULL);
2903
2904 if (!irqchip_in_kernel(vcpu->kvm)) {
2905 static_branch_inc(&kvm_has_noapic_vcpu);
2906 return 0;
2907 }
2908
2909 apic = kzalloc(sizeof(*apic), GFP_KERNEL_ACCOUNT);
2910 if (!apic)
2911 goto nomem;
2912
2913 vcpu->arch.apic = apic;
2914
2915 if (kvm_x86_ops.alloc_apic_backing_page)
2916 apic->regs = kvm_x86_call(alloc_apic_backing_page)(vcpu);
2917 else
2918 apic->regs = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT);
2919 if (!apic->regs) {
2920 printk(KERN_ERR "malloc apic regs error for vcpu %x\n",
2921 vcpu->vcpu_id);
2922 goto nomem_free_apic;
2923 }
2924 apic->vcpu = vcpu;
2925
2926 apic->nr_lvt_entries = kvm_apic_calc_nr_lvt_entries(vcpu);
2927
2928 hrtimer_init(&apic->lapic_timer.timer, CLOCK_MONOTONIC,
2929 HRTIMER_MODE_ABS_HARD);
2930 apic->lapic_timer.timer.function = apic_timer_fn;
2931 if (lapic_timer_advance)
2932 apic->lapic_timer.timer_advance_ns = LAPIC_TIMER_ADVANCE_NS_INIT;
2933
2934 /*
2935 * Stuff the APIC ENABLE bit in lieu of temporarily incrementing
2936 * apic_hw_disabled; the full RESET value is set by kvm_lapic_reset().
2937 */
2938 vcpu->arch.apic_base = MSR_IA32_APICBASE_ENABLE;
2939 static_branch_inc(&apic_sw_disabled.key); /* sw disabled at reset */
2940 kvm_iodevice_init(&apic->dev, &apic_mmio_ops);
2941
2942 /*
2943 * Defer evaluating inhibits until the vCPU is first run, as this vCPU
2944 * will not get notified of any changes until this vCPU is visible to
2945 * other vCPUs (marked online and added to the set of vCPUs).
2946 *
2947 * Opportunistically mark APICv active as VMX in particularly is highly
2948 * unlikely to have inhibits. Ignore the current per-VM APICv state so
2949 * that vCPU creation is guaranteed to run with a deterministic value,
2950 * the request will ensure the vCPU gets the correct state before VM-Entry.
2951 */
2952 if (enable_apicv) {
2953 apic->apicv_active = true;
2954 kvm_make_request(KVM_REQ_APICV_UPDATE, vcpu);
2955 }
2956
2957 return 0;
2958nomem_free_apic:
2959 kfree(apic);
2960 vcpu->arch.apic = NULL;
2961nomem:
2962 return -ENOMEM;
2963}
2964
2965int kvm_apic_has_interrupt(struct kvm_vcpu *vcpu)
2966{
2967 struct kvm_lapic *apic = vcpu->arch.apic;
2968 u32 ppr;
2969
2970 if (!kvm_apic_present(vcpu))
2971 return -1;
2972
2973 __apic_update_ppr(apic, &ppr);
2974 return apic_has_interrupt_for_ppr(apic, ppr);
2975}
2976EXPORT_SYMBOL_GPL(kvm_apic_has_interrupt);
2977
2978int kvm_apic_accept_pic_intr(struct kvm_vcpu *vcpu)
2979{
2980 u32 lvt0 = kvm_lapic_get_reg(vcpu->arch.apic, APIC_LVT0);
2981
2982 if (!kvm_apic_hw_enabled(vcpu->arch.apic))
2983 return 1;
2984 if ((lvt0 & APIC_LVT_MASKED) == 0 &&
2985 GET_APIC_DELIVERY_MODE(lvt0) == APIC_MODE_EXTINT)
2986 return 1;
2987 return 0;
2988}
2989
2990void kvm_inject_apic_timer_irqs(struct kvm_vcpu *vcpu)
2991{
2992 struct kvm_lapic *apic = vcpu->arch.apic;
2993
2994 if (atomic_read(&apic->lapic_timer.pending) > 0) {
2995 kvm_apic_inject_pending_timer_irqs(apic);
2996 atomic_set(&apic->lapic_timer.pending, 0);
2997 }
2998}
2999
3000void kvm_apic_ack_interrupt(struct kvm_vcpu *vcpu, int vector)
3001{
3002 struct kvm_lapic *apic = vcpu->arch.apic;
3003 u32 ppr;
3004
3005 if (WARN_ON_ONCE(vector < 0 || !apic))
3006 return;
3007
3008 /*
3009 * We get here even with APIC virtualization enabled, if doing
3010 * nested virtualization and L1 runs with the "acknowledge interrupt
3011 * on exit" mode. Then we cannot inject the interrupt via RVI,
3012 * because the process would deliver it through the IDT.
3013 */
3014
3015 apic_clear_irr(vector, apic);
3016 if (kvm_hv_synic_auto_eoi_set(vcpu, vector)) {
3017 /*
3018 * For auto-EOI interrupts, there might be another pending
3019 * interrupt above PPR, so check whether to raise another
3020 * KVM_REQ_EVENT.
3021 */
3022 apic_update_ppr(apic);
3023 } else {
3024 /*
3025 * For normal interrupts, PPR has been raised and there cannot
3026 * be a higher-priority pending interrupt---except if there was
3027 * a concurrent interrupt injection, but that would have
3028 * triggered KVM_REQ_EVENT already.
3029 */
3030 apic_set_isr(vector, apic);
3031 __apic_update_ppr(apic, &ppr);
3032 }
3033
3034}
3035EXPORT_SYMBOL_GPL(kvm_apic_ack_interrupt);
3036
3037static int kvm_apic_state_fixup(struct kvm_vcpu *vcpu,
3038 struct kvm_lapic_state *s, bool set)
3039{
3040 if (apic_x2apic_mode(vcpu->arch.apic)) {
3041 u32 x2apic_id = kvm_x2apic_id(vcpu->arch.apic);
3042 u32 *id = (u32 *)(s->regs + APIC_ID);
3043 u32 *ldr = (u32 *)(s->regs + APIC_LDR);
3044 u64 icr;
3045
3046 if (vcpu->kvm->arch.x2apic_format) {
3047 if (*id != x2apic_id)
3048 return -EINVAL;
3049 } else {
3050 /*
3051 * Ignore the userspace value when setting APIC state.
3052 * KVM's model is that the x2APIC ID is readonly, e.g.
3053 * KVM only supports delivering interrupts to KVM's
3054 * version of the x2APIC ID. However, for backwards
3055 * compatibility, don't reject attempts to set a
3056 * mismatched ID for userspace that hasn't opted into
3057 * x2apic_format.
3058 */
3059 if (set)
3060 *id = x2apic_id;
3061 else
3062 *id = x2apic_id << 24;
3063 }
3064
3065 /*
3066 * In x2APIC mode, the LDR is fixed and based on the id. And
3067 * if the ICR is _not_ split, ICR is internally a single 64-bit
3068 * register, but needs to be split to ICR+ICR2 in userspace for
3069 * backwards compatibility.
3070 */
3071 if (set)
3072 *ldr = kvm_apic_calc_x2apic_ldr(x2apic_id);
3073
3074 if (!kvm_x86_ops.x2apic_icr_is_split) {
3075 if (set) {
3076 icr = __kvm_lapic_get_reg(s->regs, APIC_ICR) |
3077 (u64)__kvm_lapic_get_reg(s->regs, APIC_ICR2) << 32;
3078 __kvm_lapic_set_reg64(s->regs, APIC_ICR, icr);
3079 } else {
3080 icr = __kvm_lapic_get_reg64(s->regs, APIC_ICR);
3081 __kvm_lapic_set_reg(s->regs, APIC_ICR2, icr >> 32);
3082 }
3083 }
3084 }
3085
3086 return 0;
3087}
3088
3089int kvm_apic_get_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s)
3090{
3091 memcpy(s->regs, vcpu->arch.apic->regs, sizeof(*s));
3092
3093 /*
3094 * Get calculated timer current count for remaining timer period (if
3095 * any) and store it in the returned register set.
3096 */
3097 __kvm_lapic_set_reg(s->regs, APIC_TMCCT,
3098 __apic_read(vcpu->arch.apic, APIC_TMCCT));
3099
3100 return kvm_apic_state_fixup(vcpu, s, false);
3101}
3102
3103int kvm_apic_set_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s)
3104{
3105 struct kvm_lapic *apic = vcpu->arch.apic;
3106 int r;
3107
3108 kvm_x86_call(apicv_pre_state_restore)(vcpu);
3109
3110 /* set SPIV separately to get count of SW disabled APICs right */
3111 apic_set_spiv(apic, *((u32 *)(s->regs + APIC_SPIV)));
3112
3113 r = kvm_apic_state_fixup(vcpu, s, true);
3114 if (r) {
3115 kvm_recalculate_apic_map(vcpu->kvm);
3116 return r;
3117 }
3118 memcpy(vcpu->arch.apic->regs, s->regs, sizeof(*s));
3119
3120 atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
3121 kvm_recalculate_apic_map(vcpu->kvm);
3122 kvm_apic_set_version(vcpu);
3123
3124 apic_update_ppr(apic);
3125 cancel_apic_timer(apic);
3126 apic->lapic_timer.expired_tscdeadline = 0;
3127 apic_update_lvtt(apic);
3128 apic_manage_nmi_watchdog(apic, kvm_lapic_get_reg(apic, APIC_LVT0));
3129 update_divide_count(apic);
3130 __start_apic_timer(apic, APIC_TMCCT);
3131 kvm_lapic_set_reg(apic, APIC_TMCCT, 0);
3132 kvm_apic_update_apicv(vcpu);
3133 if (apic->apicv_active) {
3134 kvm_x86_call(apicv_post_state_restore)(vcpu);
3135 kvm_x86_call(hwapic_irr_update)(vcpu, apic_find_highest_irr(apic));
3136 kvm_x86_call(hwapic_isr_update)(vcpu, apic_find_highest_isr(apic));
3137 }
3138 kvm_make_request(KVM_REQ_EVENT, vcpu);
3139 if (ioapic_in_kernel(vcpu->kvm))
3140 kvm_rtc_eoi_tracking_restore_one(vcpu);
3141
3142 vcpu->arch.apic_arb_prio = 0;
3143
3144 return 0;
3145}
3146
3147void __kvm_migrate_apic_timer(struct kvm_vcpu *vcpu)
3148{
3149 struct hrtimer *timer;
3150
3151 if (!lapic_in_kernel(vcpu) ||
3152 kvm_can_post_timer_interrupt(vcpu))
3153 return;
3154
3155 timer = &vcpu->arch.apic->lapic_timer.timer;
3156 if (hrtimer_cancel(timer))
3157 hrtimer_start_expires(timer, HRTIMER_MODE_ABS_HARD);
3158}
3159
3160/*
3161 * apic_sync_pv_eoi_from_guest - called on vmexit or cancel interrupt
3162 *
3163 * Detect whether guest triggered PV EOI since the
3164 * last entry. If yes, set EOI on guests's behalf.
3165 * Clear PV EOI in guest memory in any case.
3166 */
3167static void apic_sync_pv_eoi_from_guest(struct kvm_vcpu *vcpu,
3168 struct kvm_lapic *apic)
3169{
3170 int vector;
3171 /*
3172 * PV EOI state is derived from KVM_APIC_PV_EOI_PENDING in host
3173 * and KVM_PV_EOI_ENABLED in guest memory as follows:
3174 *
3175 * KVM_APIC_PV_EOI_PENDING is unset:
3176 * -> host disabled PV EOI.
3177 * KVM_APIC_PV_EOI_PENDING is set, KVM_PV_EOI_ENABLED is set:
3178 * -> host enabled PV EOI, guest did not execute EOI yet.
3179 * KVM_APIC_PV_EOI_PENDING is set, KVM_PV_EOI_ENABLED is unset:
3180 * -> host enabled PV EOI, guest executed EOI.
3181 */
3182 BUG_ON(!pv_eoi_enabled(vcpu));
3183
3184 if (pv_eoi_test_and_clr_pending(vcpu))
3185 return;
3186 vector = apic_set_eoi(apic);
3187 trace_kvm_pv_eoi(apic, vector);
3188}
3189
3190void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu)
3191{
3192 u32 data;
3193
3194 if (test_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention))
3195 apic_sync_pv_eoi_from_guest(vcpu, vcpu->arch.apic);
3196
3197 if (!test_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention))
3198 return;
3199
3200 if (kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data,
3201 sizeof(u32)))
3202 return;
3203
3204 apic_set_tpr(vcpu->arch.apic, data & 0xff);
3205}
3206
3207/*
3208 * apic_sync_pv_eoi_to_guest - called before vmentry
3209 *
3210 * Detect whether it's safe to enable PV EOI and
3211 * if yes do so.
3212 */
3213static void apic_sync_pv_eoi_to_guest(struct kvm_vcpu *vcpu,
3214 struct kvm_lapic *apic)
3215{
3216 if (!pv_eoi_enabled(vcpu) ||
3217 /* IRR set or many bits in ISR: could be nested. */
3218 apic->irr_pending ||
3219 /* Cache not set: could be safe but we don't bother. */
3220 apic->highest_isr_cache == -1 ||
3221 /* Need EOI to update ioapic. */
3222 kvm_ioapic_handles_vector(apic, apic->highest_isr_cache)) {
3223 /*
3224 * PV EOI was disabled by apic_sync_pv_eoi_from_guest
3225 * so we need not do anything here.
3226 */
3227 return;
3228 }
3229
3230 pv_eoi_set_pending(apic->vcpu);
3231}
3232
3233void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu)
3234{
3235 u32 data, tpr;
3236 int max_irr, max_isr;
3237 struct kvm_lapic *apic = vcpu->arch.apic;
3238
3239 apic_sync_pv_eoi_to_guest(vcpu, apic);
3240
3241 if (!test_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention))
3242 return;
3243
3244 tpr = kvm_lapic_get_reg(apic, APIC_TASKPRI) & 0xff;
3245 max_irr = apic_find_highest_irr(apic);
3246 if (max_irr < 0)
3247 max_irr = 0;
3248 max_isr = apic_find_highest_isr(apic);
3249 if (max_isr < 0)
3250 max_isr = 0;
3251 data = (tpr & 0xff) | ((max_isr & 0xf0) << 8) | (max_irr << 24);
3252
3253 kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data,
3254 sizeof(u32));
3255}
3256
3257int kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr)
3258{
3259 if (vapic_addr) {
3260 if (kvm_gfn_to_hva_cache_init(vcpu->kvm,
3261 &vcpu->arch.apic->vapic_cache,
3262 vapic_addr, sizeof(u32)))
3263 return -EINVAL;
3264 __set_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention);
3265 } else {
3266 __clear_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention);
3267 }
3268
3269 vcpu->arch.apic->vapic_addr = vapic_addr;
3270 return 0;
3271}
3272
3273static int kvm_lapic_msr_read(struct kvm_lapic *apic, u32 reg, u64 *data)
3274{
3275 u32 low;
3276
3277 if (reg == APIC_ICR) {
3278 *data = kvm_x2apic_icr_read(apic);
3279 return 0;
3280 }
3281
3282 if (kvm_lapic_reg_read(apic, reg, 4, &low))
3283 return 1;
3284
3285 *data = low;
3286
3287 return 0;
3288}
3289
3290static int kvm_lapic_msr_write(struct kvm_lapic *apic, u32 reg, u64 data)
3291{
3292 /*
3293 * ICR is a 64-bit register in x2APIC mode (and Hyper-V PV vAPIC) and
3294 * can be written as such, all other registers remain accessible only
3295 * through 32-bit reads/writes.
3296 */
3297 if (reg == APIC_ICR)
3298 return kvm_x2apic_icr_write(apic, data);
3299
3300 /* Bits 63:32 are reserved in all other registers. */
3301 if (data >> 32)
3302 return 1;
3303
3304 return kvm_lapic_reg_write(apic, reg, (u32)data);
3305}
3306
3307int kvm_x2apic_msr_write(struct kvm_vcpu *vcpu, u32 msr, u64 data)
3308{
3309 struct kvm_lapic *apic = vcpu->arch.apic;
3310 u32 reg = (msr - APIC_BASE_MSR) << 4;
3311
3312 if (!lapic_in_kernel(vcpu) || !apic_x2apic_mode(apic))
3313 return 1;
3314
3315 return kvm_lapic_msr_write(apic, reg, data);
3316}
3317
3318int kvm_x2apic_msr_read(struct kvm_vcpu *vcpu, u32 msr, u64 *data)
3319{
3320 struct kvm_lapic *apic = vcpu->arch.apic;
3321 u32 reg = (msr - APIC_BASE_MSR) << 4;
3322
3323 if (!lapic_in_kernel(vcpu) || !apic_x2apic_mode(apic))
3324 return 1;
3325
3326 return kvm_lapic_msr_read(apic, reg, data);
3327}
3328
3329int kvm_hv_vapic_msr_write(struct kvm_vcpu *vcpu, u32 reg, u64 data)
3330{
3331 if (!lapic_in_kernel(vcpu))
3332 return 1;
3333
3334 return kvm_lapic_msr_write(vcpu->arch.apic, reg, data);
3335}
3336
3337int kvm_hv_vapic_msr_read(struct kvm_vcpu *vcpu, u32 reg, u64 *data)
3338{
3339 if (!lapic_in_kernel(vcpu))
3340 return 1;
3341
3342 return kvm_lapic_msr_read(vcpu->arch.apic, reg, data);
3343}
3344
3345int kvm_lapic_set_pv_eoi(struct kvm_vcpu *vcpu, u64 data, unsigned long len)
3346{
3347 u64 addr = data & ~KVM_MSR_ENABLED;
3348 struct gfn_to_hva_cache *ghc = &vcpu->arch.pv_eoi.data;
3349 unsigned long new_len;
3350 int ret;
3351
3352 if (!IS_ALIGNED(addr, 4))
3353 return 1;
3354
3355 if (data & KVM_MSR_ENABLED) {
3356 if (addr == ghc->gpa && len <= ghc->len)
3357 new_len = ghc->len;
3358 else
3359 new_len = len;
3360
3361 ret = kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, addr, new_len);
3362 if (ret)
3363 return ret;
3364 }
3365
3366 vcpu->arch.pv_eoi.msr_val = data;
3367
3368 return 0;
3369}
3370
3371int kvm_apic_accept_events(struct kvm_vcpu *vcpu)
3372{
3373 struct kvm_lapic *apic = vcpu->arch.apic;
3374 u8 sipi_vector;
3375 int r;
3376
3377 if (!kvm_apic_has_pending_init_or_sipi(vcpu))
3378 return 0;
3379
3380 if (is_guest_mode(vcpu)) {
3381 r = kvm_check_nested_events(vcpu);
3382 if (r < 0)
3383 return r == -EBUSY ? 0 : r;
3384 /*
3385 * Continue processing INIT/SIPI even if a nested VM-Exit
3386 * occurred, e.g. pending SIPIs should be dropped if INIT+SIPI
3387 * are blocked as a result of transitioning to VMX root mode.
3388 */
3389 }
3390
3391 /*
3392 * INITs are blocked while CPU is in specific states (SMM, VMX root
3393 * mode, SVM with GIF=0), while SIPIs are dropped if the CPU isn't in
3394 * wait-for-SIPI (WFS).
3395 */
3396 if (!kvm_apic_init_sipi_allowed(vcpu)) {
3397 WARN_ON_ONCE(vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED);
3398 clear_bit(KVM_APIC_SIPI, &apic->pending_events);
3399 return 0;
3400 }
3401
3402 if (test_and_clear_bit(KVM_APIC_INIT, &apic->pending_events)) {
3403 kvm_vcpu_reset(vcpu, true);
3404 if (kvm_vcpu_is_bsp(apic->vcpu))
3405 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
3406 else
3407 vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED;
3408 }
3409 if (test_and_clear_bit(KVM_APIC_SIPI, &apic->pending_events)) {
3410 if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) {
3411 /* evaluate pending_events before reading the vector */
3412 smp_rmb();
3413 sipi_vector = apic->sipi_vector;
3414 kvm_x86_call(vcpu_deliver_sipi_vector)(vcpu,
3415 sipi_vector);
3416 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
3417 }
3418 }
3419 return 0;
3420}
3421
3422void kvm_lapic_exit(void)
3423{
3424 static_key_deferred_flush(&apic_hw_disabled);
3425 WARN_ON(static_branch_unlikely(&apic_hw_disabled.key));
3426 static_key_deferred_flush(&apic_sw_disabled);
3427 WARN_ON(static_branch_unlikely(&apic_sw_disabled.key));
3428}
1// SPDX-License-Identifier: GPL-2.0-only
2
3/*
4 * Local APIC virtualization
5 *
6 * Copyright (C) 2006 Qumranet, Inc.
7 * Copyright (C) 2007 Novell
8 * Copyright (C) 2007 Intel
9 * Copyright 2009 Red Hat, Inc. and/or its affiliates.
10 *
11 * Authors:
12 * Dor Laor <dor.laor@qumranet.com>
13 * Gregory Haskins <ghaskins@novell.com>
14 * Yaozu (Eddie) Dong <eddie.dong@intel.com>
15 *
16 * Based on Xen 3.1 code, Copyright (c) 2004, Intel Corporation.
17 */
18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
20#include <linux/kvm_host.h>
21#include <linux/kvm.h>
22#include <linux/mm.h>
23#include <linux/highmem.h>
24#include <linux/smp.h>
25#include <linux/hrtimer.h>
26#include <linux/io.h>
27#include <linux/export.h>
28#include <linux/math64.h>
29#include <linux/slab.h>
30#include <asm/processor.h>
31#include <asm/mce.h>
32#include <asm/msr.h>
33#include <asm/page.h>
34#include <asm/current.h>
35#include <asm/apicdef.h>
36#include <asm/delay.h>
37#include <linux/atomic.h>
38#include <linux/jump_label.h>
39#include "kvm_cache_regs.h"
40#include "irq.h"
41#include "ioapic.h"
42#include "trace.h"
43#include "x86.h"
44#include "xen.h"
45#include "cpuid.h"
46#include "hyperv.h"
47#include "smm.h"
48
49#ifndef CONFIG_X86_64
50#define mod_64(x, y) ((x) - (y) * div64_u64(x, y))
51#else
52#define mod_64(x, y) ((x) % (y))
53#endif
54
55/* 14 is the version for Xeon and Pentium 8.4.8*/
56#define APIC_VERSION 0x14UL
57#define LAPIC_MMIO_LENGTH (1 << 12)
58/* followed define is not in apicdef.h */
59#define MAX_APIC_VECTOR 256
60#define APIC_VECTORS_PER_REG 32
61
62static bool lapic_timer_advance_dynamic __read_mostly;
63#define LAPIC_TIMER_ADVANCE_ADJUST_MIN 100 /* clock cycles */
64#define LAPIC_TIMER_ADVANCE_ADJUST_MAX 10000 /* clock cycles */
65#define LAPIC_TIMER_ADVANCE_NS_INIT 1000
66#define LAPIC_TIMER_ADVANCE_NS_MAX 5000
67/* step-by-step approximation to mitigate fluctuation */
68#define LAPIC_TIMER_ADVANCE_ADJUST_STEP 8
69static int kvm_lapic_msr_read(struct kvm_lapic *apic, u32 reg, u64 *data);
70static int kvm_lapic_msr_write(struct kvm_lapic *apic, u32 reg, u64 data);
71
72static inline void __kvm_lapic_set_reg(char *regs, int reg_off, u32 val)
73{
74 *((u32 *) (regs + reg_off)) = val;
75}
76
77static inline void kvm_lapic_set_reg(struct kvm_lapic *apic, int reg_off, u32 val)
78{
79 __kvm_lapic_set_reg(apic->regs, reg_off, val);
80}
81
82static __always_inline u64 __kvm_lapic_get_reg64(char *regs, int reg)
83{
84 BUILD_BUG_ON(reg != APIC_ICR);
85 return *((u64 *) (regs + reg));
86}
87
88static __always_inline u64 kvm_lapic_get_reg64(struct kvm_lapic *apic, int reg)
89{
90 return __kvm_lapic_get_reg64(apic->regs, reg);
91}
92
93static __always_inline void __kvm_lapic_set_reg64(char *regs, int reg, u64 val)
94{
95 BUILD_BUG_ON(reg != APIC_ICR);
96 *((u64 *) (regs + reg)) = val;
97}
98
99static __always_inline void kvm_lapic_set_reg64(struct kvm_lapic *apic,
100 int reg, u64 val)
101{
102 __kvm_lapic_set_reg64(apic->regs, reg, val);
103}
104
105static inline int apic_test_vector(int vec, void *bitmap)
106{
107 return test_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
108}
109
110bool kvm_apic_pending_eoi(struct kvm_vcpu *vcpu, int vector)
111{
112 struct kvm_lapic *apic = vcpu->arch.apic;
113
114 return apic_test_vector(vector, apic->regs + APIC_ISR) ||
115 apic_test_vector(vector, apic->regs + APIC_IRR);
116}
117
118static inline int __apic_test_and_set_vector(int vec, void *bitmap)
119{
120 return __test_and_set_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
121}
122
123static inline int __apic_test_and_clear_vector(int vec, void *bitmap)
124{
125 return __test_and_clear_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
126}
127
128__read_mostly DEFINE_STATIC_KEY_FALSE(kvm_has_noapic_vcpu);
129EXPORT_SYMBOL_GPL(kvm_has_noapic_vcpu);
130
131__read_mostly DEFINE_STATIC_KEY_DEFERRED_FALSE(apic_hw_disabled, HZ);
132__read_mostly DEFINE_STATIC_KEY_DEFERRED_FALSE(apic_sw_disabled, HZ);
133
134static inline int apic_enabled(struct kvm_lapic *apic)
135{
136 return kvm_apic_sw_enabled(apic) && kvm_apic_hw_enabled(apic);
137}
138
139#define LVT_MASK \
140 (APIC_LVT_MASKED | APIC_SEND_PENDING | APIC_VECTOR_MASK)
141
142#define LINT_MASK \
143 (LVT_MASK | APIC_MODE_MASK | APIC_INPUT_POLARITY | \
144 APIC_LVT_REMOTE_IRR | APIC_LVT_LEVEL_TRIGGER)
145
146static inline u32 kvm_x2apic_id(struct kvm_lapic *apic)
147{
148 return apic->vcpu->vcpu_id;
149}
150
151static bool kvm_can_post_timer_interrupt(struct kvm_vcpu *vcpu)
152{
153 return pi_inject_timer && kvm_vcpu_apicv_active(vcpu) &&
154 (kvm_mwait_in_guest(vcpu->kvm) || kvm_hlt_in_guest(vcpu->kvm));
155}
156
157bool kvm_can_use_hv_timer(struct kvm_vcpu *vcpu)
158{
159 return kvm_x86_ops.set_hv_timer
160 && !(kvm_mwait_in_guest(vcpu->kvm) ||
161 kvm_can_post_timer_interrupt(vcpu));
162}
163
164static bool kvm_use_posted_timer_interrupt(struct kvm_vcpu *vcpu)
165{
166 return kvm_can_post_timer_interrupt(vcpu) && vcpu->mode == IN_GUEST_MODE;
167}
168
169static inline u32 kvm_apic_calc_x2apic_ldr(u32 id)
170{
171 return ((id >> 4) << 16) | (1 << (id & 0xf));
172}
173
174static inline bool kvm_apic_map_get_logical_dest(struct kvm_apic_map *map,
175 u32 dest_id, struct kvm_lapic ***cluster, u16 *mask) {
176 switch (map->logical_mode) {
177 case KVM_APIC_MODE_SW_DISABLED:
178 /* Arbitrarily use the flat map so that @cluster isn't NULL. */
179 *cluster = map->xapic_flat_map;
180 *mask = 0;
181 return true;
182 case KVM_APIC_MODE_X2APIC: {
183 u32 offset = (dest_id >> 16) * 16;
184 u32 max_apic_id = map->max_apic_id;
185
186 if (offset <= max_apic_id) {
187 u8 cluster_size = min(max_apic_id - offset + 1, 16U);
188
189 offset = array_index_nospec(offset, map->max_apic_id + 1);
190 *cluster = &map->phys_map[offset];
191 *mask = dest_id & (0xffff >> (16 - cluster_size));
192 } else {
193 *mask = 0;
194 }
195
196 return true;
197 }
198 case KVM_APIC_MODE_XAPIC_FLAT:
199 *cluster = map->xapic_flat_map;
200 *mask = dest_id & 0xff;
201 return true;
202 case KVM_APIC_MODE_XAPIC_CLUSTER:
203 *cluster = map->xapic_cluster_map[(dest_id >> 4) & 0xf];
204 *mask = dest_id & 0xf;
205 return true;
206 case KVM_APIC_MODE_MAP_DISABLED:
207 return false;
208 default:
209 WARN_ON_ONCE(1);
210 return false;
211 }
212}
213
214static void kvm_apic_map_free(struct rcu_head *rcu)
215{
216 struct kvm_apic_map *map = container_of(rcu, struct kvm_apic_map, rcu);
217
218 kvfree(map);
219}
220
221static int kvm_recalculate_phys_map(struct kvm_apic_map *new,
222 struct kvm_vcpu *vcpu,
223 bool *xapic_id_mismatch)
224{
225 struct kvm_lapic *apic = vcpu->arch.apic;
226 u32 x2apic_id = kvm_x2apic_id(apic);
227 u32 xapic_id = kvm_xapic_id(apic);
228 u32 physical_id;
229
230 /*
231 * For simplicity, KVM always allocates enough space for all possible
232 * xAPIC IDs. Yell, but don't kill the VM, as KVM can continue on
233 * without the optimized map.
234 */
235 if (WARN_ON_ONCE(xapic_id > new->max_apic_id))
236 return -EINVAL;
237
238 /*
239 * Bail if a vCPU was added and/or enabled its APIC between allocating
240 * the map and doing the actual calculations for the map. Note, KVM
241 * hardcodes the x2APIC ID to vcpu_id, i.e. there's no TOCTOU bug if
242 * the compiler decides to reload x2apic_id after this check.
243 */
244 if (x2apic_id > new->max_apic_id)
245 return -E2BIG;
246
247 /*
248 * Deliberately truncate the vCPU ID when detecting a mismatched APIC
249 * ID to avoid false positives if the vCPU ID, i.e. x2APIC ID, is a
250 * 32-bit value. Any unwanted aliasing due to truncation results will
251 * be detected below.
252 */
253 if (!apic_x2apic_mode(apic) && xapic_id != (u8)vcpu->vcpu_id)
254 *xapic_id_mismatch = true;
255
256 /*
257 * Apply KVM's hotplug hack if userspace has enable 32-bit APIC IDs.
258 * Allow sending events to vCPUs by their x2APIC ID even if the target
259 * vCPU is in legacy xAPIC mode, and silently ignore aliased xAPIC IDs
260 * (the x2APIC ID is truncated to 8 bits, causing IDs > 0xff to wrap
261 * and collide).
262 *
263 * Honor the architectural (and KVM's non-optimized) behavior if
264 * userspace has not enabled 32-bit x2APIC IDs. Each APIC is supposed
265 * to process messages independently. If multiple vCPUs have the same
266 * effective APIC ID, e.g. due to the x2APIC wrap or because the guest
267 * manually modified its xAPIC IDs, events targeting that ID are
268 * supposed to be recognized by all vCPUs with said ID.
269 */
270 if (vcpu->kvm->arch.x2apic_format) {
271 /* See also kvm_apic_match_physical_addr(). */
272 if (apic_x2apic_mode(apic) || x2apic_id > 0xff)
273 new->phys_map[x2apic_id] = apic;
274
275 if (!apic_x2apic_mode(apic) && !new->phys_map[xapic_id])
276 new->phys_map[xapic_id] = apic;
277 } else {
278 /*
279 * Disable the optimized map if the physical APIC ID is already
280 * mapped, i.e. is aliased to multiple vCPUs. The optimized
281 * map requires a strict 1:1 mapping between IDs and vCPUs.
282 */
283 if (apic_x2apic_mode(apic))
284 physical_id = x2apic_id;
285 else
286 physical_id = xapic_id;
287
288 if (new->phys_map[physical_id])
289 return -EINVAL;
290
291 new->phys_map[physical_id] = apic;
292 }
293
294 return 0;
295}
296
297static void kvm_recalculate_logical_map(struct kvm_apic_map *new,
298 struct kvm_vcpu *vcpu)
299{
300 struct kvm_lapic *apic = vcpu->arch.apic;
301 enum kvm_apic_logical_mode logical_mode;
302 struct kvm_lapic **cluster;
303 u16 mask;
304 u32 ldr;
305
306 if (new->logical_mode == KVM_APIC_MODE_MAP_DISABLED)
307 return;
308
309 if (!kvm_apic_sw_enabled(apic))
310 return;
311
312 ldr = kvm_lapic_get_reg(apic, APIC_LDR);
313 if (!ldr)
314 return;
315
316 if (apic_x2apic_mode(apic)) {
317 logical_mode = KVM_APIC_MODE_X2APIC;
318 } else {
319 ldr = GET_APIC_LOGICAL_ID(ldr);
320 if (kvm_lapic_get_reg(apic, APIC_DFR) == APIC_DFR_FLAT)
321 logical_mode = KVM_APIC_MODE_XAPIC_FLAT;
322 else
323 logical_mode = KVM_APIC_MODE_XAPIC_CLUSTER;
324 }
325
326 /*
327 * To optimize logical mode delivery, all software-enabled APICs must
328 * be configured for the same mode.
329 */
330 if (new->logical_mode == KVM_APIC_MODE_SW_DISABLED) {
331 new->logical_mode = logical_mode;
332 } else if (new->logical_mode != logical_mode) {
333 new->logical_mode = KVM_APIC_MODE_MAP_DISABLED;
334 return;
335 }
336
337 /*
338 * In x2APIC mode, the LDR is read-only and derived directly from the
339 * x2APIC ID, thus is guaranteed to be addressable. KVM reuses
340 * kvm_apic_map.phys_map to optimize logical mode x2APIC interrupts by
341 * reversing the LDR calculation to get cluster of APICs, i.e. no
342 * additional work is required.
343 */
344 if (apic_x2apic_mode(apic)) {
345 WARN_ON_ONCE(ldr != kvm_apic_calc_x2apic_ldr(kvm_x2apic_id(apic)));
346 return;
347 }
348
349 if (WARN_ON_ONCE(!kvm_apic_map_get_logical_dest(new, ldr,
350 &cluster, &mask))) {
351 new->logical_mode = KVM_APIC_MODE_MAP_DISABLED;
352 return;
353 }
354
355 if (!mask)
356 return;
357
358 ldr = ffs(mask) - 1;
359 if (!is_power_of_2(mask) || cluster[ldr])
360 new->logical_mode = KVM_APIC_MODE_MAP_DISABLED;
361 else
362 cluster[ldr] = apic;
363}
364
365/*
366 * CLEAN -> DIRTY and UPDATE_IN_PROGRESS -> DIRTY changes happen without a lock.
367 *
368 * DIRTY -> UPDATE_IN_PROGRESS and UPDATE_IN_PROGRESS -> CLEAN happen with
369 * apic_map_lock_held.
370 */
371enum {
372 CLEAN,
373 UPDATE_IN_PROGRESS,
374 DIRTY
375};
376
377void kvm_recalculate_apic_map(struct kvm *kvm)
378{
379 struct kvm_apic_map *new, *old = NULL;
380 struct kvm_vcpu *vcpu;
381 unsigned long i;
382 u32 max_id = 255; /* enough space for any xAPIC ID */
383 bool xapic_id_mismatch;
384 int r;
385
386 /* Read kvm->arch.apic_map_dirty before kvm->arch.apic_map. */
387 if (atomic_read_acquire(&kvm->arch.apic_map_dirty) == CLEAN)
388 return;
389
390 WARN_ONCE(!irqchip_in_kernel(kvm),
391 "Dirty APIC map without an in-kernel local APIC");
392
393 mutex_lock(&kvm->arch.apic_map_lock);
394
395retry:
396 /*
397 * Read kvm->arch.apic_map_dirty before kvm->arch.apic_map (if clean)
398 * or the APIC registers (if dirty). Note, on retry the map may have
399 * not yet been marked dirty by whatever task changed a vCPU's x2APIC
400 * ID, i.e. the map may still show up as in-progress. In that case
401 * this task still needs to retry and complete its calculation.
402 */
403 if (atomic_cmpxchg_acquire(&kvm->arch.apic_map_dirty,
404 DIRTY, UPDATE_IN_PROGRESS) == CLEAN) {
405 /* Someone else has updated the map. */
406 mutex_unlock(&kvm->arch.apic_map_lock);
407 return;
408 }
409
410 /*
411 * Reset the mismatch flag between attempts so that KVM does the right
412 * thing if a vCPU changes its xAPIC ID, but do NOT reset max_id, i.e.
413 * keep max_id strictly increasing. Disallowing max_id from shrinking
414 * ensures KVM won't get stuck in an infinite loop, e.g. if the vCPU
415 * with the highest x2APIC ID is toggling its APIC on and off.
416 */
417 xapic_id_mismatch = false;
418
419 kvm_for_each_vcpu(i, vcpu, kvm)
420 if (kvm_apic_present(vcpu))
421 max_id = max(max_id, kvm_x2apic_id(vcpu->arch.apic));
422
423 new = kvzalloc(sizeof(struct kvm_apic_map) +
424 sizeof(struct kvm_lapic *) * ((u64)max_id + 1),
425 GFP_KERNEL_ACCOUNT);
426
427 if (!new)
428 goto out;
429
430 new->max_apic_id = max_id;
431 new->logical_mode = KVM_APIC_MODE_SW_DISABLED;
432
433 kvm_for_each_vcpu(i, vcpu, kvm) {
434 if (!kvm_apic_present(vcpu))
435 continue;
436
437 r = kvm_recalculate_phys_map(new, vcpu, &xapic_id_mismatch);
438 if (r) {
439 kvfree(new);
440 new = NULL;
441 if (r == -E2BIG) {
442 cond_resched();
443 goto retry;
444 }
445
446 goto out;
447 }
448
449 kvm_recalculate_logical_map(new, vcpu);
450 }
451out:
452 /*
453 * The optimized map is effectively KVM's internal version of APICv,
454 * and all unwanted aliasing that results in disabling the optimized
455 * map also applies to APICv.
456 */
457 if (!new)
458 kvm_set_apicv_inhibit(kvm, APICV_INHIBIT_REASON_PHYSICAL_ID_ALIASED);
459 else
460 kvm_clear_apicv_inhibit(kvm, APICV_INHIBIT_REASON_PHYSICAL_ID_ALIASED);
461
462 if (!new || new->logical_mode == KVM_APIC_MODE_MAP_DISABLED)
463 kvm_set_apicv_inhibit(kvm, APICV_INHIBIT_REASON_LOGICAL_ID_ALIASED);
464 else
465 kvm_clear_apicv_inhibit(kvm, APICV_INHIBIT_REASON_LOGICAL_ID_ALIASED);
466
467 if (xapic_id_mismatch)
468 kvm_set_apicv_inhibit(kvm, APICV_INHIBIT_REASON_APIC_ID_MODIFIED);
469 else
470 kvm_clear_apicv_inhibit(kvm, APICV_INHIBIT_REASON_APIC_ID_MODIFIED);
471
472 old = rcu_dereference_protected(kvm->arch.apic_map,
473 lockdep_is_held(&kvm->arch.apic_map_lock));
474 rcu_assign_pointer(kvm->arch.apic_map, new);
475 /*
476 * Write kvm->arch.apic_map before clearing apic->apic_map_dirty.
477 * If another update has come in, leave it DIRTY.
478 */
479 atomic_cmpxchg_release(&kvm->arch.apic_map_dirty,
480 UPDATE_IN_PROGRESS, CLEAN);
481 mutex_unlock(&kvm->arch.apic_map_lock);
482
483 if (old)
484 call_rcu(&old->rcu, kvm_apic_map_free);
485
486 kvm_make_scan_ioapic_request(kvm);
487}
488
489static inline void apic_set_spiv(struct kvm_lapic *apic, u32 val)
490{
491 bool enabled = val & APIC_SPIV_APIC_ENABLED;
492
493 kvm_lapic_set_reg(apic, APIC_SPIV, val);
494
495 if (enabled != apic->sw_enabled) {
496 apic->sw_enabled = enabled;
497 if (enabled)
498 static_branch_slow_dec_deferred(&apic_sw_disabled);
499 else
500 static_branch_inc(&apic_sw_disabled.key);
501
502 atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
503 }
504
505 /* Check if there are APF page ready requests pending */
506 if (enabled) {
507 kvm_make_request(KVM_REQ_APF_READY, apic->vcpu);
508 kvm_xen_sw_enable_lapic(apic->vcpu);
509 }
510}
511
512static inline void kvm_apic_set_xapic_id(struct kvm_lapic *apic, u8 id)
513{
514 kvm_lapic_set_reg(apic, APIC_ID, id << 24);
515 atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
516}
517
518static inline void kvm_apic_set_ldr(struct kvm_lapic *apic, u32 id)
519{
520 kvm_lapic_set_reg(apic, APIC_LDR, id);
521 atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
522}
523
524static inline void kvm_apic_set_dfr(struct kvm_lapic *apic, u32 val)
525{
526 kvm_lapic_set_reg(apic, APIC_DFR, val);
527 atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
528}
529
530static inline void kvm_apic_set_x2apic_id(struct kvm_lapic *apic, u32 id)
531{
532 u32 ldr = kvm_apic_calc_x2apic_ldr(id);
533
534 WARN_ON_ONCE(id != apic->vcpu->vcpu_id);
535
536 kvm_lapic_set_reg(apic, APIC_ID, id);
537 kvm_lapic_set_reg(apic, APIC_LDR, ldr);
538 atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
539}
540
541static inline int apic_lvt_enabled(struct kvm_lapic *apic, int lvt_type)
542{
543 return !(kvm_lapic_get_reg(apic, lvt_type) & APIC_LVT_MASKED);
544}
545
546static inline int apic_lvtt_oneshot(struct kvm_lapic *apic)
547{
548 return apic->lapic_timer.timer_mode == APIC_LVT_TIMER_ONESHOT;
549}
550
551static inline int apic_lvtt_period(struct kvm_lapic *apic)
552{
553 return apic->lapic_timer.timer_mode == APIC_LVT_TIMER_PERIODIC;
554}
555
556static inline int apic_lvtt_tscdeadline(struct kvm_lapic *apic)
557{
558 return apic->lapic_timer.timer_mode == APIC_LVT_TIMER_TSCDEADLINE;
559}
560
561static inline int apic_lvt_nmi_mode(u32 lvt_val)
562{
563 return (lvt_val & (APIC_MODE_MASK | APIC_LVT_MASKED)) == APIC_DM_NMI;
564}
565
566static inline bool kvm_lapic_lvt_supported(struct kvm_lapic *apic, int lvt_index)
567{
568 return apic->nr_lvt_entries > lvt_index;
569}
570
571static inline int kvm_apic_calc_nr_lvt_entries(struct kvm_vcpu *vcpu)
572{
573 return KVM_APIC_MAX_NR_LVT_ENTRIES - !(vcpu->arch.mcg_cap & MCG_CMCI_P);
574}
575
576void kvm_apic_set_version(struct kvm_vcpu *vcpu)
577{
578 struct kvm_lapic *apic = vcpu->arch.apic;
579 u32 v = 0;
580
581 if (!lapic_in_kernel(vcpu))
582 return;
583
584 v = APIC_VERSION | ((apic->nr_lvt_entries - 1) << 16);
585
586 /*
587 * KVM emulates 82093AA datasheet (with in-kernel IOAPIC implementation)
588 * which doesn't have EOI register; Some buggy OSes (e.g. Windows with
589 * Hyper-V role) disable EOI broadcast in lapic not checking for IOAPIC
590 * version first and level-triggered interrupts never get EOIed in
591 * IOAPIC.
592 */
593 if (guest_cpuid_has(vcpu, X86_FEATURE_X2APIC) &&
594 !ioapic_in_kernel(vcpu->kvm))
595 v |= APIC_LVR_DIRECTED_EOI;
596 kvm_lapic_set_reg(apic, APIC_LVR, v);
597}
598
599void kvm_apic_after_set_mcg_cap(struct kvm_vcpu *vcpu)
600{
601 int nr_lvt_entries = kvm_apic_calc_nr_lvt_entries(vcpu);
602 struct kvm_lapic *apic = vcpu->arch.apic;
603 int i;
604
605 if (!lapic_in_kernel(vcpu) || nr_lvt_entries == apic->nr_lvt_entries)
606 return;
607
608 /* Initialize/mask any "new" LVT entries. */
609 for (i = apic->nr_lvt_entries; i < nr_lvt_entries; i++)
610 kvm_lapic_set_reg(apic, APIC_LVTx(i), APIC_LVT_MASKED);
611
612 apic->nr_lvt_entries = nr_lvt_entries;
613
614 /* The number of LVT entries is reflected in the version register. */
615 kvm_apic_set_version(vcpu);
616}
617
618static const unsigned int apic_lvt_mask[KVM_APIC_MAX_NR_LVT_ENTRIES] = {
619 [LVT_TIMER] = LVT_MASK, /* timer mode mask added at runtime */
620 [LVT_THERMAL_MONITOR] = LVT_MASK | APIC_MODE_MASK,
621 [LVT_PERFORMANCE_COUNTER] = LVT_MASK | APIC_MODE_MASK,
622 [LVT_LINT0] = LINT_MASK,
623 [LVT_LINT1] = LINT_MASK,
624 [LVT_ERROR] = LVT_MASK,
625 [LVT_CMCI] = LVT_MASK | APIC_MODE_MASK
626};
627
628static int find_highest_vector(void *bitmap)
629{
630 int vec;
631 u32 *reg;
632
633 for (vec = MAX_APIC_VECTOR - APIC_VECTORS_PER_REG;
634 vec >= 0; vec -= APIC_VECTORS_PER_REG) {
635 reg = bitmap + REG_POS(vec);
636 if (*reg)
637 return __fls(*reg) + vec;
638 }
639
640 return -1;
641}
642
643static u8 count_vectors(void *bitmap)
644{
645 int vec;
646 u32 *reg;
647 u8 count = 0;
648
649 for (vec = 0; vec < MAX_APIC_VECTOR; vec += APIC_VECTORS_PER_REG) {
650 reg = bitmap + REG_POS(vec);
651 count += hweight32(*reg);
652 }
653
654 return count;
655}
656
657bool __kvm_apic_update_irr(u32 *pir, void *regs, int *max_irr)
658{
659 u32 i, vec;
660 u32 pir_val, irr_val, prev_irr_val;
661 int max_updated_irr;
662
663 max_updated_irr = -1;
664 *max_irr = -1;
665
666 for (i = vec = 0; i <= 7; i++, vec += 32) {
667 u32 *p_irr = (u32 *)(regs + APIC_IRR + i * 0x10);
668
669 irr_val = *p_irr;
670 pir_val = READ_ONCE(pir[i]);
671
672 if (pir_val) {
673 pir_val = xchg(&pir[i], 0);
674
675 prev_irr_val = irr_val;
676 do {
677 irr_val = prev_irr_val | pir_val;
678 } while (prev_irr_val != irr_val &&
679 !try_cmpxchg(p_irr, &prev_irr_val, irr_val));
680
681 if (prev_irr_val != irr_val)
682 max_updated_irr = __fls(irr_val ^ prev_irr_val) + vec;
683 }
684 if (irr_val)
685 *max_irr = __fls(irr_val) + vec;
686 }
687
688 return ((max_updated_irr != -1) &&
689 (max_updated_irr == *max_irr));
690}
691EXPORT_SYMBOL_GPL(__kvm_apic_update_irr);
692
693bool kvm_apic_update_irr(struct kvm_vcpu *vcpu, u32 *pir, int *max_irr)
694{
695 struct kvm_lapic *apic = vcpu->arch.apic;
696 bool irr_updated = __kvm_apic_update_irr(pir, apic->regs, max_irr);
697
698 if (unlikely(!apic->apicv_active && irr_updated))
699 apic->irr_pending = true;
700 return irr_updated;
701}
702EXPORT_SYMBOL_GPL(kvm_apic_update_irr);
703
704static inline int apic_search_irr(struct kvm_lapic *apic)
705{
706 return find_highest_vector(apic->regs + APIC_IRR);
707}
708
709static inline int apic_find_highest_irr(struct kvm_lapic *apic)
710{
711 int result;
712
713 /*
714 * Note that irr_pending is just a hint. It will be always
715 * true with virtual interrupt delivery enabled.
716 */
717 if (!apic->irr_pending)
718 return -1;
719
720 result = apic_search_irr(apic);
721 ASSERT(result == -1 || result >= 16);
722
723 return result;
724}
725
726static inline void apic_clear_irr(int vec, struct kvm_lapic *apic)
727{
728 if (unlikely(apic->apicv_active)) {
729 /* need to update RVI */
730 kvm_lapic_clear_vector(vec, apic->regs + APIC_IRR);
731 static_call_cond(kvm_x86_hwapic_irr_update)(apic->vcpu,
732 apic_find_highest_irr(apic));
733 } else {
734 apic->irr_pending = false;
735 kvm_lapic_clear_vector(vec, apic->regs + APIC_IRR);
736 if (apic_search_irr(apic) != -1)
737 apic->irr_pending = true;
738 }
739}
740
741void kvm_apic_clear_irr(struct kvm_vcpu *vcpu, int vec)
742{
743 apic_clear_irr(vec, vcpu->arch.apic);
744}
745EXPORT_SYMBOL_GPL(kvm_apic_clear_irr);
746
747static inline void apic_set_isr(int vec, struct kvm_lapic *apic)
748{
749 if (__apic_test_and_set_vector(vec, apic->regs + APIC_ISR))
750 return;
751
752 /*
753 * With APIC virtualization enabled, all caching is disabled
754 * because the processor can modify ISR under the hood. Instead
755 * just set SVI.
756 */
757 if (unlikely(apic->apicv_active))
758 static_call_cond(kvm_x86_hwapic_isr_update)(vec);
759 else {
760 ++apic->isr_count;
761 BUG_ON(apic->isr_count > MAX_APIC_VECTOR);
762 /*
763 * ISR (in service register) bit is set when injecting an interrupt.
764 * The highest vector is injected. Thus the latest bit set matches
765 * the highest bit in ISR.
766 */
767 apic->highest_isr_cache = vec;
768 }
769}
770
771static inline int apic_find_highest_isr(struct kvm_lapic *apic)
772{
773 int result;
774
775 /*
776 * Note that isr_count is always 1, and highest_isr_cache
777 * is always -1, with APIC virtualization enabled.
778 */
779 if (!apic->isr_count)
780 return -1;
781 if (likely(apic->highest_isr_cache != -1))
782 return apic->highest_isr_cache;
783
784 result = find_highest_vector(apic->regs + APIC_ISR);
785 ASSERT(result == -1 || result >= 16);
786
787 return result;
788}
789
790static inline void apic_clear_isr(int vec, struct kvm_lapic *apic)
791{
792 if (!__apic_test_and_clear_vector(vec, apic->regs + APIC_ISR))
793 return;
794
795 /*
796 * We do get here for APIC virtualization enabled if the guest
797 * uses the Hyper-V APIC enlightenment. In this case we may need
798 * to trigger a new interrupt delivery by writing the SVI field;
799 * on the other hand isr_count and highest_isr_cache are unused
800 * and must be left alone.
801 */
802 if (unlikely(apic->apicv_active))
803 static_call_cond(kvm_x86_hwapic_isr_update)(apic_find_highest_isr(apic));
804 else {
805 --apic->isr_count;
806 BUG_ON(apic->isr_count < 0);
807 apic->highest_isr_cache = -1;
808 }
809}
810
811int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu)
812{
813 /* This may race with setting of irr in __apic_accept_irq() and
814 * value returned may be wrong, but kvm_vcpu_kick() in __apic_accept_irq
815 * will cause vmexit immediately and the value will be recalculated
816 * on the next vmentry.
817 */
818 return apic_find_highest_irr(vcpu->arch.apic);
819}
820EXPORT_SYMBOL_GPL(kvm_lapic_find_highest_irr);
821
822static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
823 int vector, int level, int trig_mode,
824 struct dest_map *dest_map);
825
826int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq,
827 struct dest_map *dest_map)
828{
829 struct kvm_lapic *apic = vcpu->arch.apic;
830
831 return __apic_accept_irq(apic, irq->delivery_mode, irq->vector,
832 irq->level, irq->trig_mode, dest_map);
833}
834
835static int __pv_send_ipi(unsigned long *ipi_bitmap, struct kvm_apic_map *map,
836 struct kvm_lapic_irq *irq, u32 min)
837{
838 int i, count = 0;
839 struct kvm_vcpu *vcpu;
840
841 if (min > map->max_apic_id)
842 return 0;
843
844 for_each_set_bit(i, ipi_bitmap,
845 min((u32)BITS_PER_LONG, (map->max_apic_id - min + 1))) {
846 if (map->phys_map[min + i]) {
847 vcpu = map->phys_map[min + i]->vcpu;
848 count += kvm_apic_set_irq(vcpu, irq, NULL);
849 }
850 }
851
852 return count;
853}
854
855int kvm_pv_send_ipi(struct kvm *kvm, unsigned long ipi_bitmap_low,
856 unsigned long ipi_bitmap_high, u32 min,
857 unsigned long icr, int op_64_bit)
858{
859 struct kvm_apic_map *map;
860 struct kvm_lapic_irq irq = {0};
861 int cluster_size = op_64_bit ? 64 : 32;
862 int count;
863
864 if (icr & (APIC_DEST_MASK | APIC_SHORT_MASK))
865 return -KVM_EINVAL;
866
867 irq.vector = icr & APIC_VECTOR_MASK;
868 irq.delivery_mode = icr & APIC_MODE_MASK;
869 irq.level = (icr & APIC_INT_ASSERT) != 0;
870 irq.trig_mode = icr & APIC_INT_LEVELTRIG;
871
872 rcu_read_lock();
873 map = rcu_dereference(kvm->arch.apic_map);
874
875 count = -EOPNOTSUPP;
876 if (likely(map)) {
877 count = __pv_send_ipi(&ipi_bitmap_low, map, &irq, min);
878 min += cluster_size;
879 count += __pv_send_ipi(&ipi_bitmap_high, map, &irq, min);
880 }
881
882 rcu_read_unlock();
883 return count;
884}
885
886static int pv_eoi_put_user(struct kvm_vcpu *vcpu, u8 val)
887{
888
889 return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data, &val,
890 sizeof(val));
891}
892
893static int pv_eoi_get_user(struct kvm_vcpu *vcpu, u8 *val)
894{
895
896 return kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data, val,
897 sizeof(*val));
898}
899
900static inline bool pv_eoi_enabled(struct kvm_vcpu *vcpu)
901{
902 return vcpu->arch.pv_eoi.msr_val & KVM_MSR_ENABLED;
903}
904
905static void pv_eoi_set_pending(struct kvm_vcpu *vcpu)
906{
907 if (pv_eoi_put_user(vcpu, KVM_PV_EOI_ENABLED) < 0)
908 return;
909
910 __set_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention);
911}
912
913static bool pv_eoi_test_and_clr_pending(struct kvm_vcpu *vcpu)
914{
915 u8 val;
916
917 if (pv_eoi_get_user(vcpu, &val) < 0)
918 return false;
919
920 val &= KVM_PV_EOI_ENABLED;
921
922 if (val && pv_eoi_put_user(vcpu, KVM_PV_EOI_DISABLED) < 0)
923 return false;
924
925 /*
926 * Clear pending bit in any case: it will be set again on vmentry.
927 * While this might not be ideal from performance point of view,
928 * this makes sure pv eoi is only enabled when we know it's safe.
929 */
930 __clear_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention);
931
932 return val;
933}
934
935static int apic_has_interrupt_for_ppr(struct kvm_lapic *apic, u32 ppr)
936{
937 int highest_irr;
938 if (kvm_x86_ops.sync_pir_to_irr)
939 highest_irr = static_call(kvm_x86_sync_pir_to_irr)(apic->vcpu);
940 else
941 highest_irr = apic_find_highest_irr(apic);
942 if (highest_irr == -1 || (highest_irr & 0xF0) <= ppr)
943 return -1;
944 return highest_irr;
945}
946
947static bool __apic_update_ppr(struct kvm_lapic *apic, u32 *new_ppr)
948{
949 u32 tpr, isrv, ppr, old_ppr;
950 int isr;
951
952 old_ppr = kvm_lapic_get_reg(apic, APIC_PROCPRI);
953 tpr = kvm_lapic_get_reg(apic, APIC_TASKPRI);
954 isr = apic_find_highest_isr(apic);
955 isrv = (isr != -1) ? isr : 0;
956
957 if ((tpr & 0xf0) >= (isrv & 0xf0))
958 ppr = tpr & 0xff;
959 else
960 ppr = isrv & 0xf0;
961
962 *new_ppr = ppr;
963 if (old_ppr != ppr)
964 kvm_lapic_set_reg(apic, APIC_PROCPRI, ppr);
965
966 return ppr < old_ppr;
967}
968
969static void apic_update_ppr(struct kvm_lapic *apic)
970{
971 u32 ppr;
972
973 if (__apic_update_ppr(apic, &ppr) &&
974 apic_has_interrupt_for_ppr(apic, ppr) != -1)
975 kvm_make_request(KVM_REQ_EVENT, apic->vcpu);
976}
977
978void kvm_apic_update_ppr(struct kvm_vcpu *vcpu)
979{
980 apic_update_ppr(vcpu->arch.apic);
981}
982EXPORT_SYMBOL_GPL(kvm_apic_update_ppr);
983
984static void apic_set_tpr(struct kvm_lapic *apic, u32 tpr)
985{
986 kvm_lapic_set_reg(apic, APIC_TASKPRI, tpr);
987 apic_update_ppr(apic);
988}
989
990static bool kvm_apic_broadcast(struct kvm_lapic *apic, u32 mda)
991{
992 return mda == (apic_x2apic_mode(apic) ?
993 X2APIC_BROADCAST : APIC_BROADCAST);
994}
995
996static bool kvm_apic_match_physical_addr(struct kvm_lapic *apic, u32 mda)
997{
998 if (kvm_apic_broadcast(apic, mda))
999 return true;
1000
1001 /*
1002 * Hotplug hack: Accept interrupts for vCPUs in xAPIC mode as if they
1003 * were in x2APIC mode if the target APIC ID can't be encoded as an
1004 * xAPIC ID. This allows unique addressing of hotplugged vCPUs (which
1005 * start in xAPIC mode) with an APIC ID that is unaddressable in xAPIC
1006 * mode. Match the x2APIC ID if and only if the target APIC ID can't
1007 * be encoded in xAPIC to avoid spurious matches against a vCPU that
1008 * changed its (addressable) xAPIC ID (which is writable).
1009 */
1010 if (apic_x2apic_mode(apic) || mda > 0xff)
1011 return mda == kvm_x2apic_id(apic);
1012
1013 return mda == kvm_xapic_id(apic);
1014}
1015
1016static bool kvm_apic_match_logical_addr(struct kvm_lapic *apic, u32 mda)
1017{
1018 u32 logical_id;
1019
1020 if (kvm_apic_broadcast(apic, mda))
1021 return true;
1022
1023 logical_id = kvm_lapic_get_reg(apic, APIC_LDR);
1024
1025 if (apic_x2apic_mode(apic))
1026 return ((logical_id >> 16) == (mda >> 16))
1027 && (logical_id & mda & 0xffff) != 0;
1028
1029 logical_id = GET_APIC_LOGICAL_ID(logical_id);
1030
1031 switch (kvm_lapic_get_reg(apic, APIC_DFR)) {
1032 case APIC_DFR_FLAT:
1033 return (logical_id & mda) != 0;
1034 case APIC_DFR_CLUSTER:
1035 return ((logical_id >> 4) == (mda >> 4))
1036 && (logical_id & mda & 0xf) != 0;
1037 default:
1038 return false;
1039 }
1040}
1041
1042/* The KVM local APIC implementation has two quirks:
1043 *
1044 * - Real hardware delivers interrupts destined to x2APIC ID > 0xff to LAPICs
1045 * in xAPIC mode if the "destination & 0xff" matches its xAPIC ID.
1046 * KVM doesn't do that aliasing.
1047 *
1048 * - in-kernel IOAPIC messages have to be delivered directly to
1049 * x2APIC, because the kernel does not support interrupt remapping.
1050 * In order to support broadcast without interrupt remapping, x2APIC
1051 * rewrites the destination of non-IPI messages from APIC_BROADCAST
1052 * to X2APIC_BROADCAST.
1053 *
1054 * The broadcast quirk can be disabled with KVM_CAP_X2APIC_API. This is
1055 * important when userspace wants to use x2APIC-format MSIs, because
1056 * APIC_BROADCAST (0xff) is a legal route for "cluster 0, CPUs 0-7".
1057 */
1058static u32 kvm_apic_mda(struct kvm_vcpu *vcpu, unsigned int dest_id,
1059 struct kvm_lapic *source, struct kvm_lapic *target)
1060{
1061 bool ipi = source != NULL;
1062
1063 if (!vcpu->kvm->arch.x2apic_broadcast_quirk_disabled &&
1064 !ipi && dest_id == APIC_BROADCAST && apic_x2apic_mode(target))
1065 return X2APIC_BROADCAST;
1066
1067 return dest_id;
1068}
1069
1070bool kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source,
1071 int shorthand, unsigned int dest, int dest_mode)
1072{
1073 struct kvm_lapic *target = vcpu->arch.apic;
1074 u32 mda = kvm_apic_mda(vcpu, dest, source, target);
1075
1076 ASSERT(target);
1077 switch (shorthand) {
1078 case APIC_DEST_NOSHORT:
1079 if (dest_mode == APIC_DEST_PHYSICAL)
1080 return kvm_apic_match_physical_addr(target, mda);
1081 else
1082 return kvm_apic_match_logical_addr(target, mda);
1083 case APIC_DEST_SELF:
1084 return target == source;
1085 case APIC_DEST_ALLINC:
1086 return true;
1087 case APIC_DEST_ALLBUT:
1088 return target != source;
1089 default:
1090 return false;
1091 }
1092}
1093EXPORT_SYMBOL_GPL(kvm_apic_match_dest);
1094
1095int kvm_vector_to_index(u32 vector, u32 dest_vcpus,
1096 const unsigned long *bitmap, u32 bitmap_size)
1097{
1098 u32 mod;
1099 int i, idx = -1;
1100
1101 mod = vector % dest_vcpus;
1102
1103 for (i = 0; i <= mod; i++) {
1104 idx = find_next_bit(bitmap, bitmap_size, idx + 1);
1105 BUG_ON(idx == bitmap_size);
1106 }
1107
1108 return idx;
1109}
1110
1111static void kvm_apic_disabled_lapic_found(struct kvm *kvm)
1112{
1113 if (!kvm->arch.disabled_lapic_found) {
1114 kvm->arch.disabled_lapic_found = true;
1115 pr_info("Disabled LAPIC found during irq injection\n");
1116 }
1117}
1118
1119static bool kvm_apic_is_broadcast_dest(struct kvm *kvm, struct kvm_lapic **src,
1120 struct kvm_lapic_irq *irq, struct kvm_apic_map *map)
1121{
1122 if (kvm->arch.x2apic_broadcast_quirk_disabled) {
1123 if ((irq->dest_id == APIC_BROADCAST &&
1124 map->logical_mode != KVM_APIC_MODE_X2APIC))
1125 return true;
1126 if (irq->dest_id == X2APIC_BROADCAST)
1127 return true;
1128 } else {
1129 bool x2apic_ipi = src && *src && apic_x2apic_mode(*src);
1130 if (irq->dest_id == (x2apic_ipi ?
1131 X2APIC_BROADCAST : APIC_BROADCAST))
1132 return true;
1133 }
1134
1135 return false;
1136}
1137
1138/* Return true if the interrupt can be handled by using *bitmap as index mask
1139 * for valid destinations in *dst array.
1140 * Return false if kvm_apic_map_get_dest_lapic did nothing useful.
1141 * Note: we may have zero kvm_lapic destinations when we return true, which
1142 * means that the interrupt should be dropped. In this case, *bitmap would be
1143 * zero and *dst undefined.
1144 */
1145static inline bool kvm_apic_map_get_dest_lapic(struct kvm *kvm,
1146 struct kvm_lapic **src, struct kvm_lapic_irq *irq,
1147 struct kvm_apic_map *map, struct kvm_lapic ***dst,
1148 unsigned long *bitmap)
1149{
1150 int i, lowest;
1151
1152 if (irq->shorthand == APIC_DEST_SELF && src) {
1153 *dst = src;
1154 *bitmap = 1;
1155 return true;
1156 } else if (irq->shorthand)
1157 return false;
1158
1159 if (!map || kvm_apic_is_broadcast_dest(kvm, src, irq, map))
1160 return false;
1161
1162 if (irq->dest_mode == APIC_DEST_PHYSICAL) {
1163 if (irq->dest_id > map->max_apic_id) {
1164 *bitmap = 0;
1165 } else {
1166 u32 dest_id = array_index_nospec(irq->dest_id, map->max_apic_id + 1);
1167 *dst = &map->phys_map[dest_id];
1168 *bitmap = 1;
1169 }
1170 return true;
1171 }
1172
1173 *bitmap = 0;
1174 if (!kvm_apic_map_get_logical_dest(map, irq->dest_id, dst,
1175 (u16 *)bitmap))
1176 return false;
1177
1178 if (!kvm_lowest_prio_delivery(irq))
1179 return true;
1180
1181 if (!kvm_vector_hashing_enabled()) {
1182 lowest = -1;
1183 for_each_set_bit(i, bitmap, 16) {
1184 if (!(*dst)[i])
1185 continue;
1186 if (lowest < 0)
1187 lowest = i;
1188 else if (kvm_apic_compare_prio((*dst)[i]->vcpu,
1189 (*dst)[lowest]->vcpu) < 0)
1190 lowest = i;
1191 }
1192 } else {
1193 if (!*bitmap)
1194 return true;
1195
1196 lowest = kvm_vector_to_index(irq->vector, hweight16(*bitmap),
1197 bitmap, 16);
1198
1199 if (!(*dst)[lowest]) {
1200 kvm_apic_disabled_lapic_found(kvm);
1201 *bitmap = 0;
1202 return true;
1203 }
1204 }
1205
1206 *bitmap = (lowest >= 0) ? 1 << lowest : 0;
1207
1208 return true;
1209}
1210
1211bool kvm_irq_delivery_to_apic_fast(struct kvm *kvm, struct kvm_lapic *src,
1212 struct kvm_lapic_irq *irq, int *r, struct dest_map *dest_map)
1213{
1214 struct kvm_apic_map *map;
1215 unsigned long bitmap;
1216 struct kvm_lapic **dst = NULL;
1217 int i;
1218 bool ret;
1219
1220 *r = -1;
1221
1222 if (irq->shorthand == APIC_DEST_SELF) {
1223 if (KVM_BUG_ON(!src, kvm)) {
1224 *r = 0;
1225 return true;
1226 }
1227 *r = kvm_apic_set_irq(src->vcpu, irq, dest_map);
1228 return true;
1229 }
1230
1231 rcu_read_lock();
1232 map = rcu_dereference(kvm->arch.apic_map);
1233
1234 ret = kvm_apic_map_get_dest_lapic(kvm, &src, irq, map, &dst, &bitmap);
1235 if (ret) {
1236 *r = 0;
1237 for_each_set_bit(i, &bitmap, 16) {
1238 if (!dst[i])
1239 continue;
1240 *r += kvm_apic_set_irq(dst[i]->vcpu, irq, dest_map);
1241 }
1242 }
1243
1244 rcu_read_unlock();
1245 return ret;
1246}
1247
1248/*
1249 * This routine tries to handle interrupts in posted mode, here is how
1250 * it deals with different cases:
1251 * - For single-destination interrupts, handle it in posted mode
1252 * - Else if vector hashing is enabled and it is a lowest-priority
1253 * interrupt, handle it in posted mode and use the following mechanism
1254 * to find the destination vCPU.
1255 * 1. For lowest-priority interrupts, store all the possible
1256 * destination vCPUs in an array.
1257 * 2. Use "guest vector % max number of destination vCPUs" to find
1258 * the right destination vCPU in the array for the lowest-priority
1259 * interrupt.
1260 * - Otherwise, use remapped mode to inject the interrupt.
1261 */
1262bool kvm_intr_is_single_vcpu_fast(struct kvm *kvm, struct kvm_lapic_irq *irq,
1263 struct kvm_vcpu **dest_vcpu)
1264{
1265 struct kvm_apic_map *map;
1266 unsigned long bitmap;
1267 struct kvm_lapic **dst = NULL;
1268 bool ret = false;
1269
1270 if (irq->shorthand)
1271 return false;
1272
1273 rcu_read_lock();
1274 map = rcu_dereference(kvm->arch.apic_map);
1275
1276 if (kvm_apic_map_get_dest_lapic(kvm, NULL, irq, map, &dst, &bitmap) &&
1277 hweight16(bitmap) == 1) {
1278 unsigned long i = find_first_bit(&bitmap, 16);
1279
1280 if (dst[i]) {
1281 *dest_vcpu = dst[i]->vcpu;
1282 ret = true;
1283 }
1284 }
1285
1286 rcu_read_unlock();
1287 return ret;
1288}
1289
1290/*
1291 * Add a pending IRQ into lapic.
1292 * Return 1 if successfully added and 0 if discarded.
1293 */
1294static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
1295 int vector, int level, int trig_mode,
1296 struct dest_map *dest_map)
1297{
1298 int result = 0;
1299 struct kvm_vcpu *vcpu = apic->vcpu;
1300
1301 trace_kvm_apic_accept_irq(vcpu->vcpu_id, delivery_mode,
1302 trig_mode, vector);
1303 switch (delivery_mode) {
1304 case APIC_DM_LOWEST:
1305 vcpu->arch.apic_arb_prio++;
1306 fallthrough;
1307 case APIC_DM_FIXED:
1308 if (unlikely(trig_mode && !level))
1309 break;
1310
1311 /* FIXME add logic for vcpu on reset */
1312 if (unlikely(!apic_enabled(apic)))
1313 break;
1314
1315 result = 1;
1316
1317 if (dest_map) {
1318 __set_bit(vcpu->vcpu_id, dest_map->map);
1319 dest_map->vectors[vcpu->vcpu_id] = vector;
1320 }
1321
1322 if (apic_test_vector(vector, apic->regs + APIC_TMR) != !!trig_mode) {
1323 if (trig_mode)
1324 kvm_lapic_set_vector(vector,
1325 apic->regs + APIC_TMR);
1326 else
1327 kvm_lapic_clear_vector(vector,
1328 apic->regs + APIC_TMR);
1329 }
1330
1331 static_call(kvm_x86_deliver_interrupt)(apic, delivery_mode,
1332 trig_mode, vector);
1333 break;
1334
1335 case APIC_DM_REMRD:
1336 result = 1;
1337 vcpu->arch.pv.pv_unhalted = 1;
1338 kvm_make_request(KVM_REQ_EVENT, vcpu);
1339 kvm_vcpu_kick(vcpu);
1340 break;
1341
1342 case APIC_DM_SMI:
1343 if (!kvm_inject_smi(vcpu)) {
1344 kvm_vcpu_kick(vcpu);
1345 result = 1;
1346 }
1347 break;
1348
1349 case APIC_DM_NMI:
1350 result = 1;
1351 kvm_inject_nmi(vcpu);
1352 kvm_vcpu_kick(vcpu);
1353 break;
1354
1355 case APIC_DM_INIT:
1356 if (!trig_mode || level) {
1357 result = 1;
1358 /* assumes that there are only KVM_APIC_INIT/SIPI */
1359 apic->pending_events = (1UL << KVM_APIC_INIT);
1360 kvm_make_request(KVM_REQ_EVENT, vcpu);
1361 kvm_vcpu_kick(vcpu);
1362 }
1363 break;
1364
1365 case APIC_DM_STARTUP:
1366 result = 1;
1367 apic->sipi_vector = vector;
1368 /* make sure sipi_vector is visible for the receiver */
1369 smp_wmb();
1370 set_bit(KVM_APIC_SIPI, &apic->pending_events);
1371 kvm_make_request(KVM_REQ_EVENT, vcpu);
1372 kvm_vcpu_kick(vcpu);
1373 break;
1374
1375 case APIC_DM_EXTINT:
1376 /*
1377 * Should only be called by kvm_apic_local_deliver() with LVT0,
1378 * before NMI watchdog was enabled. Already handled by
1379 * kvm_apic_accept_pic_intr().
1380 */
1381 break;
1382
1383 default:
1384 printk(KERN_ERR "TODO: unsupported delivery mode %x\n",
1385 delivery_mode);
1386 break;
1387 }
1388 return result;
1389}
1390
1391/*
1392 * This routine identifies the destination vcpus mask meant to receive the
1393 * IOAPIC interrupts. It either uses kvm_apic_map_get_dest_lapic() to find
1394 * out the destination vcpus array and set the bitmap or it traverses to
1395 * each available vcpu to identify the same.
1396 */
1397void kvm_bitmap_or_dest_vcpus(struct kvm *kvm, struct kvm_lapic_irq *irq,
1398 unsigned long *vcpu_bitmap)
1399{
1400 struct kvm_lapic **dest_vcpu = NULL;
1401 struct kvm_lapic *src = NULL;
1402 struct kvm_apic_map *map;
1403 struct kvm_vcpu *vcpu;
1404 unsigned long bitmap, i;
1405 int vcpu_idx;
1406 bool ret;
1407
1408 rcu_read_lock();
1409 map = rcu_dereference(kvm->arch.apic_map);
1410
1411 ret = kvm_apic_map_get_dest_lapic(kvm, &src, irq, map, &dest_vcpu,
1412 &bitmap);
1413 if (ret) {
1414 for_each_set_bit(i, &bitmap, 16) {
1415 if (!dest_vcpu[i])
1416 continue;
1417 vcpu_idx = dest_vcpu[i]->vcpu->vcpu_idx;
1418 __set_bit(vcpu_idx, vcpu_bitmap);
1419 }
1420 } else {
1421 kvm_for_each_vcpu(i, vcpu, kvm) {
1422 if (!kvm_apic_present(vcpu))
1423 continue;
1424 if (!kvm_apic_match_dest(vcpu, NULL,
1425 irq->shorthand,
1426 irq->dest_id,
1427 irq->dest_mode))
1428 continue;
1429 __set_bit(i, vcpu_bitmap);
1430 }
1431 }
1432 rcu_read_unlock();
1433}
1434
1435int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2)
1436{
1437 return vcpu1->arch.apic_arb_prio - vcpu2->arch.apic_arb_prio;
1438}
1439
1440static bool kvm_ioapic_handles_vector(struct kvm_lapic *apic, int vector)
1441{
1442 return test_bit(vector, apic->vcpu->arch.ioapic_handled_vectors);
1443}
1444
1445static void kvm_ioapic_send_eoi(struct kvm_lapic *apic, int vector)
1446{
1447 int trigger_mode;
1448
1449 /* Eoi the ioapic only if the ioapic doesn't own the vector. */
1450 if (!kvm_ioapic_handles_vector(apic, vector))
1451 return;
1452
1453 /* Request a KVM exit to inform the userspace IOAPIC. */
1454 if (irqchip_split(apic->vcpu->kvm)) {
1455 apic->vcpu->arch.pending_ioapic_eoi = vector;
1456 kvm_make_request(KVM_REQ_IOAPIC_EOI_EXIT, apic->vcpu);
1457 return;
1458 }
1459
1460 if (apic_test_vector(vector, apic->regs + APIC_TMR))
1461 trigger_mode = IOAPIC_LEVEL_TRIG;
1462 else
1463 trigger_mode = IOAPIC_EDGE_TRIG;
1464
1465 kvm_ioapic_update_eoi(apic->vcpu, vector, trigger_mode);
1466}
1467
1468static int apic_set_eoi(struct kvm_lapic *apic)
1469{
1470 int vector = apic_find_highest_isr(apic);
1471
1472 trace_kvm_eoi(apic, vector);
1473
1474 /*
1475 * Not every write EOI will has corresponding ISR,
1476 * one example is when Kernel check timer on setup_IO_APIC
1477 */
1478 if (vector == -1)
1479 return vector;
1480
1481 apic_clear_isr(vector, apic);
1482 apic_update_ppr(apic);
1483
1484 if (kvm_hv_synic_has_vector(apic->vcpu, vector))
1485 kvm_hv_synic_send_eoi(apic->vcpu, vector);
1486
1487 kvm_ioapic_send_eoi(apic, vector);
1488 kvm_make_request(KVM_REQ_EVENT, apic->vcpu);
1489 return vector;
1490}
1491
1492/*
1493 * this interface assumes a trap-like exit, which has already finished
1494 * desired side effect including vISR and vPPR update.
1495 */
1496void kvm_apic_set_eoi_accelerated(struct kvm_vcpu *vcpu, int vector)
1497{
1498 struct kvm_lapic *apic = vcpu->arch.apic;
1499
1500 trace_kvm_eoi(apic, vector);
1501
1502 kvm_ioapic_send_eoi(apic, vector);
1503 kvm_make_request(KVM_REQ_EVENT, apic->vcpu);
1504}
1505EXPORT_SYMBOL_GPL(kvm_apic_set_eoi_accelerated);
1506
1507void kvm_apic_send_ipi(struct kvm_lapic *apic, u32 icr_low, u32 icr_high)
1508{
1509 struct kvm_lapic_irq irq;
1510
1511 /* KVM has no delay and should always clear the BUSY/PENDING flag. */
1512 WARN_ON_ONCE(icr_low & APIC_ICR_BUSY);
1513
1514 irq.vector = icr_low & APIC_VECTOR_MASK;
1515 irq.delivery_mode = icr_low & APIC_MODE_MASK;
1516 irq.dest_mode = icr_low & APIC_DEST_MASK;
1517 irq.level = (icr_low & APIC_INT_ASSERT) != 0;
1518 irq.trig_mode = icr_low & APIC_INT_LEVELTRIG;
1519 irq.shorthand = icr_low & APIC_SHORT_MASK;
1520 irq.msi_redir_hint = false;
1521 if (apic_x2apic_mode(apic))
1522 irq.dest_id = icr_high;
1523 else
1524 irq.dest_id = GET_XAPIC_DEST_FIELD(icr_high);
1525
1526 trace_kvm_apic_ipi(icr_low, irq.dest_id);
1527
1528 kvm_irq_delivery_to_apic(apic->vcpu->kvm, apic, &irq, NULL);
1529}
1530EXPORT_SYMBOL_GPL(kvm_apic_send_ipi);
1531
1532static u32 apic_get_tmcct(struct kvm_lapic *apic)
1533{
1534 ktime_t remaining, now;
1535 s64 ns;
1536
1537 ASSERT(apic != NULL);
1538
1539 /* if initial count is 0, current count should also be 0 */
1540 if (kvm_lapic_get_reg(apic, APIC_TMICT) == 0 ||
1541 apic->lapic_timer.period == 0)
1542 return 0;
1543
1544 now = ktime_get();
1545 remaining = ktime_sub(apic->lapic_timer.target_expiration, now);
1546 if (ktime_to_ns(remaining) < 0)
1547 remaining = 0;
1548
1549 ns = mod_64(ktime_to_ns(remaining), apic->lapic_timer.period);
1550 return div64_u64(ns, (APIC_BUS_CYCLE_NS * apic->divide_count));
1551}
1552
1553static void __report_tpr_access(struct kvm_lapic *apic, bool write)
1554{
1555 struct kvm_vcpu *vcpu = apic->vcpu;
1556 struct kvm_run *run = vcpu->run;
1557
1558 kvm_make_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu);
1559 run->tpr_access.rip = kvm_rip_read(vcpu);
1560 run->tpr_access.is_write = write;
1561}
1562
1563static inline void report_tpr_access(struct kvm_lapic *apic, bool write)
1564{
1565 if (apic->vcpu->arch.tpr_access_reporting)
1566 __report_tpr_access(apic, write);
1567}
1568
1569static u32 __apic_read(struct kvm_lapic *apic, unsigned int offset)
1570{
1571 u32 val = 0;
1572
1573 if (offset >= LAPIC_MMIO_LENGTH)
1574 return 0;
1575
1576 switch (offset) {
1577 case APIC_ARBPRI:
1578 break;
1579
1580 case APIC_TMCCT: /* Timer CCR */
1581 if (apic_lvtt_tscdeadline(apic))
1582 return 0;
1583
1584 val = apic_get_tmcct(apic);
1585 break;
1586 case APIC_PROCPRI:
1587 apic_update_ppr(apic);
1588 val = kvm_lapic_get_reg(apic, offset);
1589 break;
1590 case APIC_TASKPRI:
1591 report_tpr_access(apic, false);
1592 fallthrough;
1593 default:
1594 val = kvm_lapic_get_reg(apic, offset);
1595 break;
1596 }
1597
1598 return val;
1599}
1600
1601static inline struct kvm_lapic *to_lapic(struct kvm_io_device *dev)
1602{
1603 return container_of(dev, struct kvm_lapic, dev);
1604}
1605
1606#define APIC_REG_MASK(reg) (1ull << ((reg) >> 4))
1607#define APIC_REGS_MASK(first, count) \
1608 (APIC_REG_MASK(first) * ((1ull << (count)) - 1))
1609
1610u64 kvm_lapic_readable_reg_mask(struct kvm_lapic *apic)
1611{
1612 /* Leave bits '0' for reserved and write-only registers. */
1613 u64 valid_reg_mask =
1614 APIC_REG_MASK(APIC_ID) |
1615 APIC_REG_MASK(APIC_LVR) |
1616 APIC_REG_MASK(APIC_TASKPRI) |
1617 APIC_REG_MASK(APIC_PROCPRI) |
1618 APIC_REG_MASK(APIC_LDR) |
1619 APIC_REG_MASK(APIC_SPIV) |
1620 APIC_REGS_MASK(APIC_ISR, APIC_ISR_NR) |
1621 APIC_REGS_MASK(APIC_TMR, APIC_ISR_NR) |
1622 APIC_REGS_MASK(APIC_IRR, APIC_ISR_NR) |
1623 APIC_REG_MASK(APIC_ESR) |
1624 APIC_REG_MASK(APIC_ICR) |
1625 APIC_REG_MASK(APIC_LVTT) |
1626 APIC_REG_MASK(APIC_LVTTHMR) |
1627 APIC_REG_MASK(APIC_LVTPC) |
1628 APIC_REG_MASK(APIC_LVT0) |
1629 APIC_REG_MASK(APIC_LVT1) |
1630 APIC_REG_MASK(APIC_LVTERR) |
1631 APIC_REG_MASK(APIC_TMICT) |
1632 APIC_REG_MASK(APIC_TMCCT) |
1633 APIC_REG_MASK(APIC_TDCR);
1634
1635 if (kvm_lapic_lvt_supported(apic, LVT_CMCI))
1636 valid_reg_mask |= APIC_REG_MASK(APIC_LVTCMCI);
1637
1638 /* ARBPRI, DFR, and ICR2 are not valid in x2APIC mode. */
1639 if (!apic_x2apic_mode(apic))
1640 valid_reg_mask |= APIC_REG_MASK(APIC_ARBPRI) |
1641 APIC_REG_MASK(APIC_DFR) |
1642 APIC_REG_MASK(APIC_ICR2);
1643
1644 return valid_reg_mask;
1645}
1646EXPORT_SYMBOL_GPL(kvm_lapic_readable_reg_mask);
1647
1648static int kvm_lapic_reg_read(struct kvm_lapic *apic, u32 offset, int len,
1649 void *data)
1650{
1651 unsigned char alignment = offset & 0xf;
1652 u32 result;
1653
1654 /*
1655 * WARN if KVM reads ICR in x2APIC mode, as it's an 8-byte register in
1656 * x2APIC and needs to be manually handled by the caller.
1657 */
1658 WARN_ON_ONCE(apic_x2apic_mode(apic) && offset == APIC_ICR);
1659
1660 if (alignment + len > 4)
1661 return 1;
1662
1663 if (offset > 0x3f0 ||
1664 !(kvm_lapic_readable_reg_mask(apic) & APIC_REG_MASK(offset)))
1665 return 1;
1666
1667 result = __apic_read(apic, offset & ~0xf);
1668
1669 trace_kvm_apic_read(offset, result);
1670
1671 switch (len) {
1672 case 1:
1673 case 2:
1674 case 4:
1675 memcpy(data, (char *)&result + alignment, len);
1676 break;
1677 default:
1678 printk(KERN_ERR "Local APIC read with len = %x, "
1679 "should be 1,2, or 4 instead\n", len);
1680 break;
1681 }
1682 return 0;
1683}
1684
1685static int apic_mmio_in_range(struct kvm_lapic *apic, gpa_t addr)
1686{
1687 return addr >= apic->base_address &&
1688 addr < apic->base_address + LAPIC_MMIO_LENGTH;
1689}
1690
1691static int apic_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *this,
1692 gpa_t address, int len, void *data)
1693{
1694 struct kvm_lapic *apic = to_lapic(this);
1695 u32 offset = address - apic->base_address;
1696
1697 if (!apic_mmio_in_range(apic, address))
1698 return -EOPNOTSUPP;
1699
1700 if (!kvm_apic_hw_enabled(apic) || apic_x2apic_mode(apic)) {
1701 if (!kvm_check_has_quirk(vcpu->kvm,
1702 KVM_X86_QUIRK_LAPIC_MMIO_HOLE))
1703 return -EOPNOTSUPP;
1704
1705 memset(data, 0xff, len);
1706 return 0;
1707 }
1708
1709 kvm_lapic_reg_read(apic, offset, len, data);
1710
1711 return 0;
1712}
1713
1714static void update_divide_count(struct kvm_lapic *apic)
1715{
1716 u32 tmp1, tmp2, tdcr;
1717
1718 tdcr = kvm_lapic_get_reg(apic, APIC_TDCR);
1719 tmp1 = tdcr & 0xf;
1720 tmp2 = ((tmp1 & 0x3) | ((tmp1 & 0x8) >> 1)) + 1;
1721 apic->divide_count = 0x1 << (tmp2 & 0x7);
1722}
1723
1724static void limit_periodic_timer_frequency(struct kvm_lapic *apic)
1725{
1726 /*
1727 * Do not allow the guest to program periodic timers with small
1728 * interval, since the hrtimers are not throttled by the host
1729 * scheduler.
1730 */
1731 if (apic_lvtt_period(apic) && apic->lapic_timer.period) {
1732 s64 min_period = min_timer_period_us * 1000LL;
1733
1734 if (apic->lapic_timer.period < min_period) {
1735 pr_info_ratelimited(
1736 "vcpu %i: requested %lld ns "
1737 "lapic timer period limited to %lld ns\n",
1738 apic->vcpu->vcpu_id,
1739 apic->lapic_timer.period, min_period);
1740 apic->lapic_timer.period = min_period;
1741 }
1742 }
1743}
1744
1745static void cancel_hv_timer(struct kvm_lapic *apic);
1746
1747static void cancel_apic_timer(struct kvm_lapic *apic)
1748{
1749 hrtimer_cancel(&apic->lapic_timer.timer);
1750 preempt_disable();
1751 if (apic->lapic_timer.hv_timer_in_use)
1752 cancel_hv_timer(apic);
1753 preempt_enable();
1754 atomic_set(&apic->lapic_timer.pending, 0);
1755}
1756
1757static void apic_update_lvtt(struct kvm_lapic *apic)
1758{
1759 u32 timer_mode = kvm_lapic_get_reg(apic, APIC_LVTT) &
1760 apic->lapic_timer.timer_mode_mask;
1761
1762 if (apic->lapic_timer.timer_mode != timer_mode) {
1763 if (apic_lvtt_tscdeadline(apic) != (timer_mode ==
1764 APIC_LVT_TIMER_TSCDEADLINE)) {
1765 cancel_apic_timer(apic);
1766 kvm_lapic_set_reg(apic, APIC_TMICT, 0);
1767 apic->lapic_timer.period = 0;
1768 apic->lapic_timer.tscdeadline = 0;
1769 }
1770 apic->lapic_timer.timer_mode = timer_mode;
1771 limit_periodic_timer_frequency(apic);
1772 }
1773}
1774
1775/*
1776 * On APICv, this test will cause a busy wait
1777 * during a higher-priority task.
1778 */
1779
1780static bool lapic_timer_int_injected(struct kvm_vcpu *vcpu)
1781{
1782 struct kvm_lapic *apic = vcpu->arch.apic;
1783 u32 reg = kvm_lapic_get_reg(apic, APIC_LVTT);
1784
1785 if (kvm_apic_hw_enabled(apic)) {
1786 int vec = reg & APIC_VECTOR_MASK;
1787 void *bitmap = apic->regs + APIC_ISR;
1788
1789 if (apic->apicv_active)
1790 bitmap = apic->regs + APIC_IRR;
1791
1792 if (apic_test_vector(vec, bitmap))
1793 return true;
1794 }
1795 return false;
1796}
1797
1798static inline void __wait_lapic_expire(struct kvm_vcpu *vcpu, u64 guest_cycles)
1799{
1800 u64 timer_advance_ns = vcpu->arch.apic->lapic_timer.timer_advance_ns;
1801
1802 /*
1803 * If the guest TSC is running at a different ratio than the host, then
1804 * convert the delay to nanoseconds to achieve an accurate delay. Note
1805 * that __delay() uses delay_tsc whenever the hardware has TSC, thus
1806 * always for VMX enabled hardware.
1807 */
1808 if (vcpu->arch.tsc_scaling_ratio == kvm_caps.default_tsc_scaling_ratio) {
1809 __delay(min(guest_cycles,
1810 nsec_to_cycles(vcpu, timer_advance_ns)));
1811 } else {
1812 u64 delay_ns = guest_cycles * 1000000ULL;
1813 do_div(delay_ns, vcpu->arch.virtual_tsc_khz);
1814 ndelay(min_t(u32, delay_ns, timer_advance_ns));
1815 }
1816}
1817
1818static inline void adjust_lapic_timer_advance(struct kvm_vcpu *vcpu,
1819 s64 advance_expire_delta)
1820{
1821 struct kvm_lapic *apic = vcpu->arch.apic;
1822 u32 timer_advance_ns = apic->lapic_timer.timer_advance_ns;
1823 u64 ns;
1824
1825 /* Do not adjust for tiny fluctuations or large random spikes. */
1826 if (abs(advance_expire_delta) > LAPIC_TIMER_ADVANCE_ADJUST_MAX ||
1827 abs(advance_expire_delta) < LAPIC_TIMER_ADVANCE_ADJUST_MIN)
1828 return;
1829
1830 /* too early */
1831 if (advance_expire_delta < 0) {
1832 ns = -advance_expire_delta * 1000000ULL;
1833 do_div(ns, vcpu->arch.virtual_tsc_khz);
1834 timer_advance_ns -= ns/LAPIC_TIMER_ADVANCE_ADJUST_STEP;
1835 } else {
1836 /* too late */
1837 ns = advance_expire_delta * 1000000ULL;
1838 do_div(ns, vcpu->arch.virtual_tsc_khz);
1839 timer_advance_ns += ns/LAPIC_TIMER_ADVANCE_ADJUST_STEP;
1840 }
1841
1842 if (unlikely(timer_advance_ns > LAPIC_TIMER_ADVANCE_NS_MAX))
1843 timer_advance_ns = LAPIC_TIMER_ADVANCE_NS_INIT;
1844 apic->lapic_timer.timer_advance_ns = timer_advance_ns;
1845}
1846
1847static void __kvm_wait_lapic_expire(struct kvm_vcpu *vcpu)
1848{
1849 struct kvm_lapic *apic = vcpu->arch.apic;
1850 u64 guest_tsc, tsc_deadline;
1851
1852 tsc_deadline = apic->lapic_timer.expired_tscdeadline;
1853 apic->lapic_timer.expired_tscdeadline = 0;
1854 guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc());
1855 trace_kvm_wait_lapic_expire(vcpu->vcpu_id, guest_tsc - tsc_deadline);
1856
1857 if (lapic_timer_advance_dynamic) {
1858 adjust_lapic_timer_advance(vcpu, guest_tsc - tsc_deadline);
1859 /*
1860 * If the timer fired early, reread the TSC to account for the
1861 * overhead of the above adjustment to avoid waiting longer
1862 * than is necessary.
1863 */
1864 if (guest_tsc < tsc_deadline)
1865 guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc());
1866 }
1867
1868 if (guest_tsc < tsc_deadline)
1869 __wait_lapic_expire(vcpu, tsc_deadline - guest_tsc);
1870}
1871
1872void kvm_wait_lapic_expire(struct kvm_vcpu *vcpu)
1873{
1874 if (lapic_in_kernel(vcpu) &&
1875 vcpu->arch.apic->lapic_timer.expired_tscdeadline &&
1876 vcpu->arch.apic->lapic_timer.timer_advance_ns &&
1877 lapic_timer_int_injected(vcpu))
1878 __kvm_wait_lapic_expire(vcpu);
1879}
1880EXPORT_SYMBOL_GPL(kvm_wait_lapic_expire);
1881
1882static void kvm_apic_inject_pending_timer_irqs(struct kvm_lapic *apic)
1883{
1884 struct kvm_timer *ktimer = &apic->lapic_timer;
1885
1886 kvm_apic_local_deliver(apic, APIC_LVTT);
1887 if (apic_lvtt_tscdeadline(apic)) {
1888 ktimer->tscdeadline = 0;
1889 } else if (apic_lvtt_oneshot(apic)) {
1890 ktimer->tscdeadline = 0;
1891 ktimer->target_expiration = 0;
1892 }
1893}
1894
1895static void apic_timer_expired(struct kvm_lapic *apic, bool from_timer_fn)
1896{
1897 struct kvm_vcpu *vcpu = apic->vcpu;
1898 struct kvm_timer *ktimer = &apic->lapic_timer;
1899
1900 if (atomic_read(&apic->lapic_timer.pending))
1901 return;
1902
1903 if (apic_lvtt_tscdeadline(apic) || ktimer->hv_timer_in_use)
1904 ktimer->expired_tscdeadline = ktimer->tscdeadline;
1905
1906 if (!from_timer_fn && apic->apicv_active) {
1907 WARN_ON(kvm_get_running_vcpu() != vcpu);
1908 kvm_apic_inject_pending_timer_irqs(apic);
1909 return;
1910 }
1911
1912 if (kvm_use_posted_timer_interrupt(apic->vcpu)) {
1913 /*
1914 * Ensure the guest's timer has truly expired before posting an
1915 * interrupt. Open code the relevant checks to avoid querying
1916 * lapic_timer_int_injected(), which will be false since the
1917 * interrupt isn't yet injected. Waiting until after injecting
1918 * is not an option since that won't help a posted interrupt.
1919 */
1920 if (vcpu->arch.apic->lapic_timer.expired_tscdeadline &&
1921 vcpu->arch.apic->lapic_timer.timer_advance_ns)
1922 __kvm_wait_lapic_expire(vcpu);
1923 kvm_apic_inject_pending_timer_irqs(apic);
1924 return;
1925 }
1926
1927 atomic_inc(&apic->lapic_timer.pending);
1928 kvm_make_request(KVM_REQ_UNBLOCK, vcpu);
1929 if (from_timer_fn)
1930 kvm_vcpu_kick(vcpu);
1931}
1932
1933static void start_sw_tscdeadline(struct kvm_lapic *apic)
1934{
1935 struct kvm_timer *ktimer = &apic->lapic_timer;
1936 u64 guest_tsc, tscdeadline = ktimer->tscdeadline;
1937 u64 ns = 0;
1938 ktime_t expire;
1939 struct kvm_vcpu *vcpu = apic->vcpu;
1940 unsigned long this_tsc_khz = vcpu->arch.virtual_tsc_khz;
1941 unsigned long flags;
1942 ktime_t now;
1943
1944 if (unlikely(!tscdeadline || !this_tsc_khz))
1945 return;
1946
1947 local_irq_save(flags);
1948
1949 now = ktime_get();
1950 guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc());
1951
1952 ns = (tscdeadline - guest_tsc) * 1000000ULL;
1953 do_div(ns, this_tsc_khz);
1954
1955 if (likely(tscdeadline > guest_tsc) &&
1956 likely(ns > apic->lapic_timer.timer_advance_ns)) {
1957 expire = ktime_add_ns(now, ns);
1958 expire = ktime_sub_ns(expire, ktimer->timer_advance_ns);
1959 hrtimer_start(&ktimer->timer, expire, HRTIMER_MODE_ABS_HARD);
1960 } else
1961 apic_timer_expired(apic, false);
1962
1963 local_irq_restore(flags);
1964}
1965
1966static inline u64 tmict_to_ns(struct kvm_lapic *apic, u32 tmict)
1967{
1968 return (u64)tmict * APIC_BUS_CYCLE_NS * (u64)apic->divide_count;
1969}
1970
1971static void update_target_expiration(struct kvm_lapic *apic, uint32_t old_divisor)
1972{
1973 ktime_t now, remaining;
1974 u64 ns_remaining_old, ns_remaining_new;
1975
1976 apic->lapic_timer.period =
1977 tmict_to_ns(apic, kvm_lapic_get_reg(apic, APIC_TMICT));
1978 limit_periodic_timer_frequency(apic);
1979
1980 now = ktime_get();
1981 remaining = ktime_sub(apic->lapic_timer.target_expiration, now);
1982 if (ktime_to_ns(remaining) < 0)
1983 remaining = 0;
1984
1985 ns_remaining_old = ktime_to_ns(remaining);
1986 ns_remaining_new = mul_u64_u32_div(ns_remaining_old,
1987 apic->divide_count, old_divisor);
1988
1989 apic->lapic_timer.tscdeadline +=
1990 nsec_to_cycles(apic->vcpu, ns_remaining_new) -
1991 nsec_to_cycles(apic->vcpu, ns_remaining_old);
1992 apic->lapic_timer.target_expiration = ktime_add_ns(now, ns_remaining_new);
1993}
1994
1995static bool set_target_expiration(struct kvm_lapic *apic, u32 count_reg)
1996{
1997 ktime_t now;
1998 u64 tscl = rdtsc();
1999 s64 deadline;
2000
2001 now = ktime_get();
2002 apic->lapic_timer.period =
2003 tmict_to_ns(apic, kvm_lapic_get_reg(apic, APIC_TMICT));
2004
2005 if (!apic->lapic_timer.period) {
2006 apic->lapic_timer.tscdeadline = 0;
2007 return false;
2008 }
2009
2010 limit_periodic_timer_frequency(apic);
2011 deadline = apic->lapic_timer.period;
2012
2013 if (apic_lvtt_period(apic) || apic_lvtt_oneshot(apic)) {
2014 if (unlikely(count_reg != APIC_TMICT)) {
2015 deadline = tmict_to_ns(apic,
2016 kvm_lapic_get_reg(apic, count_reg));
2017 if (unlikely(deadline <= 0)) {
2018 if (apic_lvtt_period(apic))
2019 deadline = apic->lapic_timer.period;
2020 else
2021 deadline = 0;
2022 }
2023 else if (unlikely(deadline > apic->lapic_timer.period)) {
2024 pr_info_ratelimited(
2025 "vcpu %i: requested lapic timer restore with "
2026 "starting count register %#x=%u (%lld ns) > initial count (%lld ns). "
2027 "Using initial count to start timer.\n",
2028 apic->vcpu->vcpu_id,
2029 count_reg,
2030 kvm_lapic_get_reg(apic, count_reg),
2031 deadline, apic->lapic_timer.period);
2032 kvm_lapic_set_reg(apic, count_reg, 0);
2033 deadline = apic->lapic_timer.period;
2034 }
2035 }
2036 }
2037
2038 apic->lapic_timer.tscdeadline = kvm_read_l1_tsc(apic->vcpu, tscl) +
2039 nsec_to_cycles(apic->vcpu, deadline);
2040 apic->lapic_timer.target_expiration = ktime_add_ns(now, deadline);
2041
2042 return true;
2043}
2044
2045static void advance_periodic_target_expiration(struct kvm_lapic *apic)
2046{
2047 ktime_t now = ktime_get();
2048 u64 tscl = rdtsc();
2049 ktime_t delta;
2050
2051 /*
2052 * Synchronize both deadlines to the same time source or
2053 * differences in the periods (caused by differences in the
2054 * underlying clocks or numerical approximation errors) will
2055 * cause the two to drift apart over time as the errors
2056 * accumulate.
2057 */
2058 apic->lapic_timer.target_expiration =
2059 ktime_add_ns(apic->lapic_timer.target_expiration,
2060 apic->lapic_timer.period);
2061 delta = ktime_sub(apic->lapic_timer.target_expiration, now);
2062 apic->lapic_timer.tscdeadline = kvm_read_l1_tsc(apic->vcpu, tscl) +
2063 nsec_to_cycles(apic->vcpu, delta);
2064}
2065
2066static void start_sw_period(struct kvm_lapic *apic)
2067{
2068 if (!apic->lapic_timer.period)
2069 return;
2070
2071 if (ktime_after(ktime_get(),
2072 apic->lapic_timer.target_expiration)) {
2073 apic_timer_expired(apic, false);
2074
2075 if (apic_lvtt_oneshot(apic))
2076 return;
2077
2078 advance_periodic_target_expiration(apic);
2079 }
2080
2081 hrtimer_start(&apic->lapic_timer.timer,
2082 apic->lapic_timer.target_expiration,
2083 HRTIMER_MODE_ABS_HARD);
2084}
2085
2086bool kvm_lapic_hv_timer_in_use(struct kvm_vcpu *vcpu)
2087{
2088 if (!lapic_in_kernel(vcpu))
2089 return false;
2090
2091 return vcpu->arch.apic->lapic_timer.hv_timer_in_use;
2092}
2093
2094static void cancel_hv_timer(struct kvm_lapic *apic)
2095{
2096 WARN_ON(preemptible());
2097 WARN_ON(!apic->lapic_timer.hv_timer_in_use);
2098 static_call(kvm_x86_cancel_hv_timer)(apic->vcpu);
2099 apic->lapic_timer.hv_timer_in_use = false;
2100}
2101
2102static bool start_hv_timer(struct kvm_lapic *apic)
2103{
2104 struct kvm_timer *ktimer = &apic->lapic_timer;
2105 struct kvm_vcpu *vcpu = apic->vcpu;
2106 bool expired;
2107
2108 WARN_ON(preemptible());
2109 if (!kvm_can_use_hv_timer(vcpu))
2110 return false;
2111
2112 if (!ktimer->tscdeadline)
2113 return false;
2114
2115 if (static_call(kvm_x86_set_hv_timer)(vcpu, ktimer->tscdeadline, &expired))
2116 return false;
2117
2118 ktimer->hv_timer_in_use = true;
2119 hrtimer_cancel(&ktimer->timer);
2120
2121 /*
2122 * To simplify handling the periodic timer, leave the hv timer running
2123 * even if the deadline timer has expired, i.e. rely on the resulting
2124 * VM-Exit to recompute the periodic timer's target expiration.
2125 */
2126 if (!apic_lvtt_period(apic)) {
2127 /*
2128 * Cancel the hv timer if the sw timer fired while the hv timer
2129 * was being programmed, or if the hv timer itself expired.
2130 */
2131 if (atomic_read(&ktimer->pending)) {
2132 cancel_hv_timer(apic);
2133 } else if (expired) {
2134 apic_timer_expired(apic, false);
2135 cancel_hv_timer(apic);
2136 }
2137 }
2138
2139 trace_kvm_hv_timer_state(vcpu->vcpu_id, ktimer->hv_timer_in_use);
2140
2141 return true;
2142}
2143
2144static void start_sw_timer(struct kvm_lapic *apic)
2145{
2146 struct kvm_timer *ktimer = &apic->lapic_timer;
2147
2148 WARN_ON(preemptible());
2149 if (apic->lapic_timer.hv_timer_in_use)
2150 cancel_hv_timer(apic);
2151 if (!apic_lvtt_period(apic) && atomic_read(&ktimer->pending))
2152 return;
2153
2154 if (apic_lvtt_period(apic) || apic_lvtt_oneshot(apic))
2155 start_sw_period(apic);
2156 else if (apic_lvtt_tscdeadline(apic))
2157 start_sw_tscdeadline(apic);
2158 trace_kvm_hv_timer_state(apic->vcpu->vcpu_id, false);
2159}
2160
2161static void restart_apic_timer(struct kvm_lapic *apic)
2162{
2163 preempt_disable();
2164
2165 if (!apic_lvtt_period(apic) && atomic_read(&apic->lapic_timer.pending))
2166 goto out;
2167
2168 if (!start_hv_timer(apic))
2169 start_sw_timer(apic);
2170out:
2171 preempt_enable();
2172}
2173
2174void kvm_lapic_expired_hv_timer(struct kvm_vcpu *vcpu)
2175{
2176 struct kvm_lapic *apic = vcpu->arch.apic;
2177
2178 preempt_disable();
2179 /* If the preempt notifier has already run, it also called apic_timer_expired */
2180 if (!apic->lapic_timer.hv_timer_in_use)
2181 goto out;
2182 WARN_ON(kvm_vcpu_is_blocking(vcpu));
2183 apic_timer_expired(apic, false);
2184 cancel_hv_timer(apic);
2185
2186 if (apic_lvtt_period(apic) && apic->lapic_timer.period) {
2187 advance_periodic_target_expiration(apic);
2188 restart_apic_timer(apic);
2189 }
2190out:
2191 preempt_enable();
2192}
2193EXPORT_SYMBOL_GPL(kvm_lapic_expired_hv_timer);
2194
2195void kvm_lapic_switch_to_hv_timer(struct kvm_vcpu *vcpu)
2196{
2197 restart_apic_timer(vcpu->arch.apic);
2198}
2199
2200void kvm_lapic_switch_to_sw_timer(struct kvm_vcpu *vcpu)
2201{
2202 struct kvm_lapic *apic = vcpu->arch.apic;
2203
2204 preempt_disable();
2205 /* Possibly the TSC deadline timer is not enabled yet */
2206 if (apic->lapic_timer.hv_timer_in_use)
2207 start_sw_timer(apic);
2208 preempt_enable();
2209}
2210
2211void kvm_lapic_restart_hv_timer(struct kvm_vcpu *vcpu)
2212{
2213 struct kvm_lapic *apic = vcpu->arch.apic;
2214
2215 WARN_ON(!apic->lapic_timer.hv_timer_in_use);
2216 restart_apic_timer(apic);
2217}
2218
2219static void __start_apic_timer(struct kvm_lapic *apic, u32 count_reg)
2220{
2221 atomic_set(&apic->lapic_timer.pending, 0);
2222
2223 if ((apic_lvtt_period(apic) || apic_lvtt_oneshot(apic))
2224 && !set_target_expiration(apic, count_reg))
2225 return;
2226
2227 restart_apic_timer(apic);
2228}
2229
2230static void start_apic_timer(struct kvm_lapic *apic)
2231{
2232 __start_apic_timer(apic, APIC_TMICT);
2233}
2234
2235static void apic_manage_nmi_watchdog(struct kvm_lapic *apic, u32 lvt0_val)
2236{
2237 bool lvt0_in_nmi_mode = apic_lvt_nmi_mode(lvt0_val);
2238
2239 if (apic->lvt0_in_nmi_mode != lvt0_in_nmi_mode) {
2240 apic->lvt0_in_nmi_mode = lvt0_in_nmi_mode;
2241 if (lvt0_in_nmi_mode) {
2242 atomic_inc(&apic->vcpu->kvm->arch.vapics_in_nmi_mode);
2243 } else
2244 atomic_dec(&apic->vcpu->kvm->arch.vapics_in_nmi_mode);
2245 }
2246}
2247
2248static int get_lvt_index(u32 reg)
2249{
2250 if (reg == APIC_LVTCMCI)
2251 return LVT_CMCI;
2252 if (reg < APIC_LVTT || reg > APIC_LVTERR)
2253 return -1;
2254 return array_index_nospec(
2255 (reg - APIC_LVTT) >> 4, KVM_APIC_MAX_NR_LVT_ENTRIES);
2256}
2257
2258static int kvm_lapic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val)
2259{
2260 int ret = 0;
2261
2262 trace_kvm_apic_write(reg, val);
2263
2264 switch (reg) {
2265 case APIC_ID: /* Local APIC ID */
2266 if (!apic_x2apic_mode(apic)) {
2267 kvm_apic_set_xapic_id(apic, val >> 24);
2268 } else {
2269 ret = 1;
2270 }
2271 break;
2272
2273 case APIC_TASKPRI:
2274 report_tpr_access(apic, true);
2275 apic_set_tpr(apic, val & 0xff);
2276 break;
2277
2278 case APIC_EOI:
2279 apic_set_eoi(apic);
2280 break;
2281
2282 case APIC_LDR:
2283 if (!apic_x2apic_mode(apic))
2284 kvm_apic_set_ldr(apic, val & APIC_LDR_MASK);
2285 else
2286 ret = 1;
2287 break;
2288
2289 case APIC_DFR:
2290 if (!apic_x2apic_mode(apic))
2291 kvm_apic_set_dfr(apic, val | 0x0FFFFFFF);
2292 else
2293 ret = 1;
2294 break;
2295
2296 case APIC_SPIV: {
2297 u32 mask = 0x3ff;
2298 if (kvm_lapic_get_reg(apic, APIC_LVR) & APIC_LVR_DIRECTED_EOI)
2299 mask |= APIC_SPIV_DIRECTED_EOI;
2300 apic_set_spiv(apic, val & mask);
2301 if (!(val & APIC_SPIV_APIC_ENABLED)) {
2302 int i;
2303
2304 for (i = 0; i < apic->nr_lvt_entries; i++) {
2305 kvm_lapic_set_reg(apic, APIC_LVTx(i),
2306 kvm_lapic_get_reg(apic, APIC_LVTx(i)) | APIC_LVT_MASKED);
2307 }
2308 apic_update_lvtt(apic);
2309 atomic_set(&apic->lapic_timer.pending, 0);
2310
2311 }
2312 break;
2313 }
2314 case APIC_ICR:
2315 WARN_ON_ONCE(apic_x2apic_mode(apic));
2316
2317 /* No delay here, so we always clear the pending bit */
2318 val &= ~APIC_ICR_BUSY;
2319 kvm_apic_send_ipi(apic, val, kvm_lapic_get_reg(apic, APIC_ICR2));
2320 kvm_lapic_set_reg(apic, APIC_ICR, val);
2321 break;
2322 case APIC_ICR2:
2323 if (apic_x2apic_mode(apic))
2324 ret = 1;
2325 else
2326 kvm_lapic_set_reg(apic, APIC_ICR2, val & 0xff000000);
2327 break;
2328
2329 case APIC_LVT0:
2330 apic_manage_nmi_watchdog(apic, val);
2331 fallthrough;
2332 case APIC_LVTTHMR:
2333 case APIC_LVTPC:
2334 case APIC_LVT1:
2335 case APIC_LVTERR:
2336 case APIC_LVTCMCI: {
2337 u32 index = get_lvt_index(reg);
2338 if (!kvm_lapic_lvt_supported(apic, index)) {
2339 ret = 1;
2340 break;
2341 }
2342 if (!kvm_apic_sw_enabled(apic))
2343 val |= APIC_LVT_MASKED;
2344 val &= apic_lvt_mask[index];
2345 kvm_lapic_set_reg(apic, reg, val);
2346 break;
2347 }
2348
2349 case APIC_LVTT:
2350 if (!kvm_apic_sw_enabled(apic))
2351 val |= APIC_LVT_MASKED;
2352 val &= (apic_lvt_mask[0] | apic->lapic_timer.timer_mode_mask);
2353 kvm_lapic_set_reg(apic, APIC_LVTT, val);
2354 apic_update_lvtt(apic);
2355 break;
2356
2357 case APIC_TMICT:
2358 if (apic_lvtt_tscdeadline(apic))
2359 break;
2360
2361 cancel_apic_timer(apic);
2362 kvm_lapic_set_reg(apic, APIC_TMICT, val);
2363 start_apic_timer(apic);
2364 break;
2365
2366 case APIC_TDCR: {
2367 uint32_t old_divisor = apic->divide_count;
2368
2369 kvm_lapic_set_reg(apic, APIC_TDCR, val & 0xb);
2370 update_divide_count(apic);
2371 if (apic->divide_count != old_divisor &&
2372 apic->lapic_timer.period) {
2373 hrtimer_cancel(&apic->lapic_timer.timer);
2374 update_target_expiration(apic, old_divisor);
2375 restart_apic_timer(apic);
2376 }
2377 break;
2378 }
2379 case APIC_ESR:
2380 if (apic_x2apic_mode(apic) && val != 0)
2381 ret = 1;
2382 break;
2383
2384 case APIC_SELF_IPI:
2385 /*
2386 * Self-IPI exists only when x2APIC is enabled. Bits 7:0 hold
2387 * the vector, everything else is reserved.
2388 */
2389 if (!apic_x2apic_mode(apic) || (val & ~APIC_VECTOR_MASK))
2390 ret = 1;
2391 else
2392 kvm_apic_send_ipi(apic, APIC_DEST_SELF | val, 0);
2393 break;
2394 default:
2395 ret = 1;
2396 break;
2397 }
2398
2399 /*
2400 * Recalculate APIC maps if necessary, e.g. if the software enable bit
2401 * was toggled, the APIC ID changed, etc... The maps are marked dirty
2402 * on relevant changes, i.e. this is a nop for most writes.
2403 */
2404 kvm_recalculate_apic_map(apic->vcpu->kvm);
2405
2406 return ret;
2407}
2408
2409static int apic_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *this,
2410 gpa_t address, int len, const void *data)
2411{
2412 struct kvm_lapic *apic = to_lapic(this);
2413 unsigned int offset = address - apic->base_address;
2414 u32 val;
2415
2416 if (!apic_mmio_in_range(apic, address))
2417 return -EOPNOTSUPP;
2418
2419 if (!kvm_apic_hw_enabled(apic) || apic_x2apic_mode(apic)) {
2420 if (!kvm_check_has_quirk(vcpu->kvm,
2421 KVM_X86_QUIRK_LAPIC_MMIO_HOLE))
2422 return -EOPNOTSUPP;
2423
2424 return 0;
2425 }
2426
2427 /*
2428 * APIC register must be aligned on 128-bits boundary.
2429 * 32/64/128 bits registers must be accessed thru 32 bits.
2430 * Refer SDM 8.4.1
2431 */
2432 if (len != 4 || (offset & 0xf))
2433 return 0;
2434
2435 val = *(u32*)data;
2436
2437 kvm_lapic_reg_write(apic, offset & 0xff0, val);
2438
2439 return 0;
2440}
2441
2442void kvm_lapic_set_eoi(struct kvm_vcpu *vcpu)
2443{
2444 kvm_lapic_reg_write(vcpu->arch.apic, APIC_EOI, 0);
2445}
2446EXPORT_SYMBOL_GPL(kvm_lapic_set_eoi);
2447
2448/* emulate APIC access in a trap manner */
2449void kvm_apic_write_nodecode(struct kvm_vcpu *vcpu, u32 offset)
2450{
2451 struct kvm_lapic *apic = vcpu->arch.apic;
2452
2453 /*
2454 * ICR is a single 64-bit register when x2APIC is enabled, all others
2455 * registers hold 32-bit values. For legacy xAPIC, ICR writes need to
2456 * go down the common path to get the upper half from ICR2.
2457 *
2458 * Note, using the write helpers may incur an unnecessary write to the
2459 * virtual APIC state, but KVM needs to conditionally modify the value
2460 * in certain cases, e.g. to clear the ICR busy bit. The cost of extra
2461 * conditional branches is likely a wash relative to the cost of the
2462 * maybe-unecessary write, and both are in the noise anyways.
2463 */
2464 if (apic_x2apic_mode(apic) && offset == APIC_ICR)
2465 kvm_x2apic_icr_write(apic, kvm_lapic_get_reg64(apic, APIC_ICR));
2466 else
2467 kvm_lapic_reg_write(apic, offset, kvm_lapic_get_reg(apic, offset));
2468}
2469EXPORT_SYMBOL_GPL(kvm_apic_write_nodecode);
2470
2471void kvm_free_lapic(struct kvm_vcpu *vcpu)
2472{
2473 struct kvm_lapic *apic = vcpu->arch.apic;
2474
2475 if (!vcpu->arch.apic) {
2476 static_branch_dec(&kvm_has_noapic_vcpu);
2477 return;
2478 }
2479
2480 hrtimer_cancel(&apic->lapic_timer.timer);
2481
2482 if (!(vcpu->arch.apic_base & MSR_IA32_APICBASE_ENABLE))
2483 static_branch_slow_dec_deferred(&apic_hw_disabled);
2484
2485 if (!apic->sw_enabled)
2486 static_branch_slow_dec_deferred(&apic_sw_disabled);
2487
2488 if (apic->regs)
2489 free_page((unsigned long)apic->regs);
2490
2491 kfree(apic);
2492}
2493
2494/*
2495 *----------------------------------------------------------------------
2496 * LAPIC interface
2497 *----------------------------------------------------------------------
2498 */
2499u64 kvm_get_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu)
2500{
2501 struct kvm_lapic *apic = vcpu->arch.apic;
2502
2503 if (!kvm_apic_present(vcpu) || !apic_lvtt_tscdeadline(apic))
2504 return 0;
2505
2506 return apic->lapic_timer.tscdeadline;
2507}
2508
2509void kvm_set_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu, u64 data)
2510{
2511 struct kvm_lapic *apic = vcpu->arch.apic;
2512
2513 if (!kvm_apic_present(vcpu) || !apic_lvtt_tscdeadline(apic))
2514 return;
2515
2516 hrtimer_cancel(&apic->lapic_timer.timer);
2517 apic->lapic_timer.tscdeadline = data;
2518 start_apic_timer(apic);
2519}
2520
2521void kvm_lapic_set_tpr(struct kvm_vcpu *vcpu, unsigned long cr8)
2522{
2523 apic_set_tpr(vcpu->arch.apic, (cr8 & 0x0f) << 4);
2524}
2525
2526u64 kvm_lapic_get_cr8(struct kvm_vcpu *vcpu)
2527{
2528 u64 tpr;
2529
2530 tpr = (u64) kvm_lapic_get_reg(vcpu->arch.apic, APIC_TASKPRI);
2531
2532 return (tpr & 0xf0) >> 4;
2533}
2534
2535void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value)
2536{
2537 u64 old_value = vcpu->arch.apic_base;
2538 struct kvm_lapic *apic = vcpu->arch.apic;
2539
2540 vcpu->arch.apic_base = value;
2541
2542 if ((old_value ^ value) & MSR_IA32_APICBASE_ENABLE)
2543 kvm_update_cpuid_runtime(vcpu);
2544
2545 if (!apic)
2546 return;
2547
2548 /* update jump label if enable bit changes */
2549 if ((old_value ^ value) & MSR_IA32_APICBASE_ENABLE) {
2550 if (value & MSR_IA32_APICBASE_ENABLE) {
2551 kvm_apic_set_xapic_id(apic, vcpu->vcpu_id);
2552 static_branch_slow_dec_deferred(&apic_hw_disabled);
2553 /* Check if there are APF page ready requests pending */
2554 kvm_make_request(KVM_REQ_APF_READY, vcpu);
2555 } else {
2556 static_branch_inc(&apic_hw_disabled.key);
2557 atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
2558 }
2559 }
2560
2561 if ((old_value ^ value) & X2APIC_ENABLE) {
2562 if (value & X2APIC_ENABLE)
2563 kvm_apic_set_x2apic_id(apic, vcpu->vcpu_id);
2564 else if (value & MSR_IA32_APICBASE_ENABLE)
2565 kvm_apic_set_xapic_id(apic, vcpu->vcpu_id);
2566 }
2567
2568 if ((old_value ^ value) & (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE)) {
2569 kvm_make_request(KVM_REQ_APICV_UPDATE, vcpu);
2570 static_call_cond(kvm_x86_set_virtual_apic_mode)(vcpu);
2571 }
2572
2573 apic->base_address = apic->vcpu->arch.apic_base &
2574 MSR_IA32_APICBASE_BASE;
2575
2576 if ((value & MSR_IA32_APICBASE_ENABLE) &&
2577 apic->base_address != APIC_DEFAULT_PHYS_BASE) {
2578 kvm_set_apicv_inhibit(apic->vcpu->kvm,
2579 APICV_INHIBIT_REASON_APIC_BASE_MODIFIED);
2580 }
2581}
2582
2583void kvm_apic_update_apicv(struct kvm_vcpu *vcpu)
2584{
2585 struct kvm_lapic *apic = vcpu->arch.apic;
2586
2587 if (apic->apicv_active) {
2588 /* irr_pending is always true when apicv is activated. */
2589 apic->irr_pending = true;
2590 apic->isr_count = 1;
2591 } else {
2592 /*
2593 * Don't clear irr_pending, searching the IRR can race with
2594 * updates from the CPU as APICv is still active from hardware's
2595 * perspective. The flag will be cleared as appropriate when
2596 * KVM injects the interrupt.
2597 */
2598 apic->isr_count = count_vectors(apic->regs + APIC_ISR);
2599 }
2600 apic->highest_isr_cache = -1;
2601}
2602
2603int kvm_alloc_apic_access_page(struct kvm *kvm)
2604{
2605 struct page *page;
2606 void __user *hva;
2607 int ret = 0;
2608
2609 mutex_lock(&kvm->slots_lock);
2610 if (kvm->arch.apic_access_memslot_enabled ||
2611 kvm->arch.apic_access_memslot_inhibited)
2612 goto out;
2613
2614 hva = __x86_set_memory_region(kvm, APIC_ACCESS_PAGE_PRIVATE_MEMSLOT,
2615 APIC_DEFAULT_PHYS_BASE, PAGE_SIZE);
2616 if (IS_ERR(hva)) {
2617 ret = PTR_ERR(hva);
2618 goto out;
2619 }
2620
2621 page = gfn_to_page(kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT);
2622 if (is_error_page(page)) {
2623 ret = -EFAULT;
2624 goto out;
2625 }
2626
2627 /*
2628 * Do not pin the page in memory, so that memory hot-unplug
2629 * is able to migrate it.
2630 */
2631 put_page(page);
2632 kvm->arch.apic_access_memslot_enabled = true;
2633out:
2634 mutex_unlock(&kvm->slots_lock);
2635 return ret;
2636}
2637EXPORT_SYMBOL_GPL(kvm_alloc_apic_access_page);
2638
2639void kvm_inhibit_apic_access_page(struct kvm_vcpu *vcpu)
2640{
2641 struct kvm *kvm = vcpu->kvm;
2642
2643 if (!kvm->arch.apic_access_memslot_enabled)
2644 return;
2645
2646 kvm_vcpu_srcu_read_unlock(vcpu);
2647
2648 mutex_lock(&kvm->slots_lock);
2649
2650 if (kvm->arch.apic_access_memslot_enabled) {
2651 __x86_set_memory_region(kvm, APIC_ACCESS_PAGE_PRIVATE_MEMSLOT, 0, 0);
2652 /*
2653 * Clear "enabled" after the memslot is deleted so that a
2654 * different vCPU doesn't get a false negative when checking
2655 * the flag out of slots_lock. No additional memory barrier is
2656 * needed as modifying memslots requires waiting other vCPUs to
2657 * drop SRCU (see above), and false positives are ok as the
2658 * flag is rechecked after acquiring slots_lock.
2659 */
2660 kvm->arch.apic_access_memslot_enabled = false;
2661
2662 /*
2663 * Mark the memslot as inhibited to prevent reallocating the
2664 * memslot during vCPU creation, e.g. if a vCPU is hotplugged.
2665 */
2666 kvm->arch.apic_access_memslot_inhibited = true;
2667 }
2668
2669 mutex_unlock(&kvm->slots_lock);
2670
2671 kvm_vcpu_srcu_read_lock(vcpu);
2672}
2673
2674void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event)
2675{
2676 struct kvm_lapic *apic = vcpu->arch.apic;
2677 u64 msr_val;
2678 int i;
2679
2680 static_call_cond(kvm_x86_apicv_pre_state_restore)(vcpu);
2681
2682 if (!init_event) {
2683 msr_val = APIC_DEFAULT_PHYS_BASE | MSR_IA32_APICBASE_ENABLE;
2684 if (kvm_vcpu_is_reset_bsp(vcpu))
2685 msr_val |= MSR_IA32_APICBASE_BSP;
2686 kvm_lapic_set_base(vcpu, msr_val);
2687 }
2688
2689 if (!apic)
2690 return;
2691
2692 /* Stop the timer in case it's a reset to an active apic */
2693 hrtimer_cancel(&apic->lapic_timer.timer);
2694
2695 /* The xAPIC ID is set at RESET even if the APIC was already enabled. */
2696 if (!init_event)
2697 kvm_apic_set_xapic_id(apic, vcpu->vcpu_id);
2698 kvm_apic_set_version(apic->vcpu);
2699
2700 for (i = 0; i < apic->nr_lvt_entries; i++)
2701 kvm_lapic_set_reg(apic, APIC_LVTx(i), APIC_LVT_MASKED);
2702 apic_update_lvtt(apic);
2703 if (kvm_vcpu_is_reset_bsp(vcpu) &&
2704 kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_LINT0_REENABLED))
2705 kvm_lapic_set_reg(apic, APIC_LVT0,
2706 SET_APIC_DELIVERY_MODE(0, APIC_MODE_EXTINT));
2707 apic_manage_nmi_watchdog(apic, kvm_lapic_get_reg(apic, APIC_LVT0));
2708
2709 kvm_apic_set_dfr(apic, 0xffffffffU);
2710 apic_set_spiv(apic, 0xff);
2711 kvm_lapic_set_reg(apic, APIC_TASKPRI, 0);
2712 if (!apic_x2apic_mode(apic))
2713 kvm_apic_set_ldr(apic, 0);
2714 kvm_lapic_set_reg(apic, APIC_ESR, 0);
2715 if (!apic_x2apic_mode(apic)) {
2716 kvm_lapic_set_reg(apic, APIC_ICR, 0);
2717 kvm_lapic_set_reg(apic, APIC_ICR2, 0);
2718 } else {
2719 kvm_lapic_set_reg64(apic, APIC_ICR, 0);
2720 }
2721 kvm_lapic_set_reg(apic, APIC_TDCR, 0);
2722 kvm_lapic_set_reg(apic, APIC_TMICT, 0);
2723 for (i = 0; i < 8; i++) {
2724 kvm_lapic_set_reg(apic, APIC_IRR + 0x10 * i, 0);
2725 kvm_lapic_set_reg(apic, APIC_ISR + 0x10 * i, 0);
2726 kvm_lapic_set_reg(apic, APIC_TMR + 0x10 * i, 0);
2727 }
2728 kvm_apic_update_apicv(vcpu);
2729 update_divide_count(apic);
2730 atomic_set(&apic->lapic_timer.pending, 0);
2731
2732 vcpu->arch.pv_eoi.msr_val = 0;
2733 apic_update_ppr(apic);
2734 if (apic->apicv_active) {
2735 static_call_cond(kvm_x86_apicv_post_state_restore)(vcpu);
2736 static_call_cond(kvm_x86_hwapic_irr_update)(vcpu, -1);
2737 static_call_cond(kvm_x86_hwapic_isr_update)(-1);
2738 }
2739
2740 vcpu->arch.apic_arb_prio = 0;
2741 vcpu->arch.apic_attention = 0;
2742
2743 kvm_recalculate_apic_map(vcpu->kvm);
2744}
2745
2746/*
2747 *----------------------------------------------------------------------
2748 * timer interface
2749 *----------------------------------------------------------------------
2750 */
2751
2752static bool lapic_is_periodic(struct kvm_lapic *apic)
2753{
2754 return apic_lvtt_period(apic);
2755}
2756
2757int apic_has_pending_timer(struct kvm_vcpu *vcpu)
2758{
2759 struct kvm_lapic *apic = vcpu->arch.apic;
2760
2761 if (apic_enabled(apic) && apic_lvt_enabled(apic, APIC_LVTT))
2762 return atomic_read(&apic->lapic_timer.pending);
2763
2764 return 0;
2765}
2766
2767int kvm_apic_local_deliver(struct kvm_lapic *apic, int lvt_type)
2768{
2769 u32 reg = kvm_lapic_get_reg(apic, lvt_type);
2770 int vector, mode, trig_mode;
2771 int r;
2772
2773 if (kvm_apic_hw_enabled(apic) && !(reg & APIC_LVT_MASKED)) {
2774 vector = reg & APIC_VECTOR_MASK;
2775 mode = reg & APIC_MODE_MASK;
2776 trig_mode = reg & APIC_LVT_LEVEL_TRIGGER;
2777
2778 r = __apic_accept_irq(apic, mode, vector, 1, trig_mode, NULL);
2779 if (r && lvt_type == APIC_LVTPC &&
2780 guest_cpuid_is_intel_compatible(apic->vcpu))
2781 kvm_lapic_set_reg(apic, APIC_LVTPC, reg | APIC_LVT_MASKED);
2782 return r;
2783 }
2784 return 0;
2785}
2786
2787void kvm_apic_nmi_wd_deliver(struct kvm_vcpu *vcpu)
2788{
2789 struct kvm_lapic *apic = vcpu->arch.apic;
2790
2791 if (apic)
2792 kvm_apic_local_deliver(apic, APIC_LVT0);
2793}
2794
2795static const struct kvm_io_device_ops apic_mmio_ops = {
2796 .read = apic_mmio_read,
2797 .write = apic_mmio_write,
2798};
2799
2800static enum hrtimer_restart apic_timer_fn(struct hrtimer *data)
2801{
2802 struct kvm_timer *ktimer = container_of(data, struct kvm_timer, timer);
2803 struct kvm_lapic *apic = container_of(ktimer, struct kvm_lapic, lapic_timer);
2804
2805 apic_timer_expired(apic, true);
2806
2807 if (lapic_is_periodic(apic)) {
2808 advance_periodic_target_expiration(apic);
2809 hrtimer_add_expires_ns(&ktimer->timer, ktimer->period);
2810 return HRTIMER_RESTART;
2811 } else
2812 return HRTIMER_NORESTART;
2813}
2814
2815int kvm_create_lapic(struct kvm_vcpu *vcpu, int timer_advance_ns)
2816{
2817 struct kvm_lapic *apic;
2818
2819 ASSERT(vcpu != NULL);
2820
2821 if (!irqchip_in_kernel(vcpu->kvm)) {
2822 static_branch_inc(&kvm_has_noapic_vcpu);
2823 return 0;
2824 }
2825
2826 apic = kzalloc(sizeof(*apic), GFP_KERNEL_ACCOUNT);
2827 if (!apic)
2828 goto nomem;
2829
2830 vcpu->arch.apic = apic;
2831
2832 if (kvm_x86_ops.alloc_apic_backing_page)
2833 apic->regs = static_call(kvm_x86_alloc_apic_backing_page)(vcpu);
2834 else
2835 apic->regs = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT);
2836 if (!apic->regs) {
2837 printk(KERN_ERR "malloc apic regs error for vcpu %x\n",
2838 vcpu->vcpu_id);
2839 goto nomem_free_apic;
2840 }
2841 apic->vcpu = vcpu;
2842
2843 apic->nr_lvt_entries = kvm_apic_calc_nr_lvt_entries(vcpu);
2844
2845 hrtimer_init(&apic->lapic_timer.timer, CLOCK_MONOTONIC,
2846 HRTIMER_MODE_ABS_HARD);
2847 apic->lapic_timer.timer.function = apic_timer_fn;
2848 if (timer_advance_ns == -1) {
2849 apic->lapic_timer.timer_advance_ns = LAPIC_TIMER_ADVANCE_NS_INIT;
2850 lapic_timer_advance_dynamic = true;
2851 } else {
2852 apic->lapic_timer.timer_advance_ns = timer_advance_ns;
2853 lapic_timer_advance_dynamic = false;
2854 }
2855
2856 /*
2857 * Stuff the APIC ENABLE bit in lieu of temporarily incrementing
2858 * apic_hw_disabled; the full RESET value is set by kvm_lapic_reset().
2859 */
2860 vcpu->arch.apic_base = MSR_IA32_APICBASE_ENABLE;
2861 static_branch_inc(&apic_sw_disabled.key); /* sw disabled at reset */
2862 kvm_iodevice_init(&apic->dev, &apic_mmio_ops);
2863
2864 /*
2865 * Defer evaluating inhibits until the vCPU is first run, as this vCPU
2866 * will not get notified of any changes until this vCPU is visible to
2867 * other vCPUs (marked online and added to the set of vCPUs).
2868 *
2869 * Opportunistically mark APICv active as VMX in particularly is highly
2870 * unlikely to have inhibits. Ignore the current per-VM APICv state so
2871 * that vCPU creation is guaranteed to run with a deterministic value,
2872 * the request will ensure the vCPU gets the correct state before VM-Entry.
2873 */
2874 if (enable_apicv) {
2875 apic->apicv_active = true;
2876 kvm_make_request(KVM_REQ_APICV_UPDATE, vcpu);
2877 }
2878
2879 return 0;
2880nomem_free_apic:
2881 kfree(apic);
2882 vcpu->arch.apic = NULL;
2883nomem:
2884 return -ENOMEM;
2885}
2886
2887int kvm_apic_has_interrupt(struct kvm_vcpu *vcpu)
2888{
2889 struct kvm_lapic *apic = vcpu->arch.apic;
2890 u32 ppr;
2891
2892 if (!kvm_apic_present(vcpu))
2893 return -1;
2894
2895 __apic_update_ppr(apic, &ppr);
2896 return apic_has_interrupt_for_ppr(apic, ppr);
2897}
2898EXPORT_SYMBOL_GPL(kvm_apic_has_interrupt);
2899
2900int kvm_apic_accept_pic_intr(struct kvm_vcpu *vcpu)
2901{
2902 u32 lvt0 = kvm_lapic_get_reg(vcpu->arch.apic, APIC_LVT0);
2903
2904 if (!kvm_apic_hw_enabled(vcpu->arch.apic))
2905 return 1;
2906 if ((lvt0 & APIC_LVT_MASKED) == 0 &&
2907 GET_APIC_DELIVERY_MODE(lvt0) == APIC_MODE_EXTINT)
2908 return 1;
2909 return 0;
2910}
2911
2912void kvm_inject_apic_timer_irqs(struct kvm_vcpu *vcpu)
2913{
2914 struct kvm_lapic *apic = vcpu->arch.apic;
2915
2916 if (atomic_read(&apic->lapic_timer.pending) > 0) {
2917 kvm_apic_inject_pending_timer_irqs(apic);
2918 atomic_set(&apic->lapic_timer.pending, 0);
2919 }
2920}
2921
2922int kvm_get_apic_interrupt(struct kvm_vcpu *vcpu)
2923{
2924 int vector = kvm_apic_has_interrupt(vcpu);
2925 struct kvm_lapic *apic = vcpu->arch.apic;
2926 u32 ppr;
2927
2928 if (vector == -1)
2929 return -1;
2930
2931 /*
2932 * We get here even with APIC virtualization enabled, if doing
2933 * nested virtualization and L1 runs with the "acknowledge interrupt
2934 * on exit" mode. Then we cannot inject the interrupt via RVI,
2935 * because the process would deliver it through the IDT.
2936 */
2937
2938 apic_clear_irr(vector, apic);
2939 if (kvm_hv_synic_auto_eoi_set(vcpu, vector)) {
2940 /*
2941 * For auto-EOI interrupts, there might be another pending
2942 * interrupt above PPR, so check whether to raise another
2943 * KVM_REQ_EVENT.
2944 */
2945 apic_update_ppr(apic);
2946 } else {
2947 /*
2948 * For normal interrupts, PPR has been raised and there cannot
2949 * be a higher-priority pending interrupt---except if there was
2950 * a concurrent interrupt injection, but that would have
2951 * triggered KVM_REQ_EVENT already.
2952 */
2953 apic_set_isr(vector, apic);
2954 __apic_update_ppr(apic, &ppr);
2955 }
2956
2957 return vector;
2958}
2959
2960static int kvm_apic_state_fixup(struct kvm_vcpu *vcpu,
2961 struct kvm_lapic_state *s, bool set)
2962{
2963 if (apic_x2apic_mode(vcpu->arch.apic)) {
2964 u32 *id = (u32 *)(s->regs + APIC_ID);
2965 u32 *ldr = (u32 *)(s->regs + APIC_LDR);
2966 u64 icr;
2967
2968 if (vcpu->kvm->arch.x2apic_format) {
2969 if (*id != vcpu->vcpu_id)
2970 return -EINVAL;
2971 } else {
2972 if (set)
2973 *id >>= 24;
2974 else
2975 *id <<= 24;
2976 }
2977
2978 /*
2979 * In x2APIC mode, the LDR is fixed and based on the id. And
2980 * ICR is internally a single 64-bit register, but needs to be
2981 * split to ICR+ICR2 in userspace for backwards compatibility.
2982 */
2983 if (set) {
2984 *ldr = kvm_apic_calc_x2apic_ldr(*id);
2985
2986 icr = __kvm_lapic_get_reg(s->regs, APIC_ICR) |
2987 (u64)__kvm_lapic_get_reg(s->regs, APIC_ICR2) << 32;
2988 __kvm_lapic_set_reg64(s->regs, APIC_ICR, icr);
2989 } else {
2990 icr = __kvm_lapic_get_reg64(s->regs, APIC_ICR);
2991 __kvm_lapic_set_reg(s->regs, APIC_ICR2, icr >> 32);
2992 }
2993 }
2994
2995 return 0;
2996}
2997
2998int kvm_apic_get_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s)
2999{
3000 memcpy(s->regs, vcpu->arch.apic->regs, sizeof(*s));
3001
3002 /*
3003 * Get calculated timer current count for remaining timer period (if
3004 * any) and store it in the returned register set.
3005 */
3006 __kvm_lapic_set_reg(s->regs, APIC_TMCCT,
3007 __apic_read(vcpu->arch.apic, APIC_TMCCT));
3008
3009 return kvm_apic_state_fixup(vcpu, s, false);
3010}
3011
3012int kvm_apic_set_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s)
3013{
3014 struct kvm_lapic *apic = vcpu->arch.apic;
3015 int r;
3016
3017 static_call_cond(kvm_x86_apicv_pre_state_restore)(vcpu);
3018
3019 kvm_lapic_set_base(vcpu, vcpu->arch.apic_base);
3020 /* set SPIV separately to get count of SW disabled APICs right */
3021 apic_set_spiv(apic, *((u32 *)(s->regs + APIC_SPIV)));
3022
3023 r = kvm_apic_state_fixup(vcpu, s, true);
3024 if (r) {
3025 kvm_recalculate_apic_map(vcpu->kvm);
3026 return r;
3027 }
3028 memcpy(vcpu->arch.apic->regs, s->regs, sizeof(*s));
3029
3030 atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
3031 kvm_recalculate_apic_map(vcpu->kvm);
3032 kvm_apic_set_version(vcpu);
3033
3034 apic_update_ppr(apic);
3035 cancel_apic_timer(apic);
3036 apic->lapic_timer.expired_tscdeadline = 0;
3037 apic_update_lvtt(apic);
3038 apic_manage_nmi_watchdog(apic, kvm_lapic_get_reg(apic, APIC_LVT0));
3039 update_divide_count(apic);
3040 __start_apic_timer(apic, APIC_TMCCT);
3041 kvm_lapic_set_reg(apic, APIC_TMCCT, 0);
3042 kvm_apic_update_apicv(vcpu);
3043 if (apic->apicv_active) {
3044 static_call_cond(kvm_x86_apicv_post_state_restore)(vcpu);
3045 static_call_cond(kvm_x86_hwapic_irr_update)(vcpu, apic_find_highest_irr(apic));
3046 static_call_cond(kvm_x86_hwapic_isr_update)(apic_find_highest_isr(apic));
3047 }
3048 kvm_make_request(KVM_REQ_EVENT, vcpu);
3049 if (ioapic_in_kernel(vcpu->kvm))
3050 kvm_rtc_eoi_tracking_restore_one(vcpu);
3051
3052 vcpu->arch.apic_arb_prio = 0;
3053
3054 return 0;
3055}
3056
3057void __kvm_migrate_apic_timer(struct kvm_vcpu *vcpu)
3058{
3059 struct hrtimer *timer;
3060
3061 if (!lapic_in_kernel(vcpu) ||
3062 kvm_can_post_timer_interrupt(vcpu))
3063 return;
3064
3065 timer = &vcpu->arch.apic->lapic_timer.timer;
3066 if (hrtimer_cancel(timer))
3067 hrtimer_start_expires(timer, HRTIMER_MODE_ABS_HARD);
3068}
3069
3070/*
3071 * apic_sync_pv_eoi_from_guest - called on vmexit or cancel interrupt
3072 *
3073 * Detect whether guest triggered PV EOI since the
3074 * last entry. If yes, set EOI on guests's behalf.
3075 * Clear PV EOI in guest memory in any case.
3076 */
3077static void apic_sync_pv_eoi_from_guest(struct kvm_vcpu *vcpu,
3078 struct kvm_lapic *apic)
3079{
3080 int vector;
3081 /*
3082 * PV EOI state is derived from KVM_APIC_PV_EOI_PENDING in host
3083 * and KVM_PV_EOI_ENABLED in guest memory as follows:
3084 *
3085 * KVM_APIC_PV_EOI_PENDING is unset:
3086 * -> host disabled PV EOI.
3087 * KVM_APIC_PV_EOI_PENDING is set, KVM_PV_EOI_ENABLED is set:
3088 * -> host enabled PV EOI, guest did not execute EOI yet.
3089 * KVM_APIC_PV_EOI_PENDING is set, KVM_PV_EOI_ENABLED is unset:
3090 * -> host enabled PV EOI, guest executed EOI.
3091 */
3092 BUG_ON(!pv_eoi_enabled(vcpu));
3093
3094 if (pv_eoi_test_and_clr_pending(vcpu))
3095 return;
3096 vector = apic_set_eoi(apic);
3097 trace_kvm_pv_eoi(apic, vector);
3098}
3099
3100void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu)
3101{
3102 u32 data;
3103
3104 if (test_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention))
3105 apic_sync_pv_eoi_from_guest(vcpu, vcpu->arch.apic);
3106
3107 if (!test_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention))
3108 return;
3109
3110 if (kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data,
3111 sizeof(u32)))
3112 return;
3113
3114 apic_set_tpr(vcpu->arch.apic, data & 0xff);
3115}
3116
3117/*
3118 * apic_sync_pv_eoi_to_guest - called before vmentry
3119 *
3120 * Detect whether it's safe to enable PV EOI and
3121 * if yes do so.
3122 */
3123static void apic_sync_pv_eoi_to_guest(struct kvm_vcpu *vcpu,
3124 struct kvm_lapic *apic)
3125{
3126 if (!pv_eoi_enabled(vcpu) ||
3127 /* IRR set or many bits in ISR: could be nested. */
3128 apic->irr_pending ||
3129 /* Cache not set: could be safe but we don't bother. */
3130 apic->highest_isr_cache == -1 ||
3131 /* Need EOI to update ioapic. */
3132 kvm_ioapic_handles_vector(apic, apic->highest_isr_cache)) {
3133 /*
3134 * PV EOI was disabled by apic_sync_pv_eoi_from_guest
3135 * so we need not do anything here.
3136 */
3137 return;
3138 }
3139
3140 pv_eoi_set_pending(apic->vcpu);
3141}
3142
3143void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu)
3144{
3145 u32 data, tpr;
3146 int max_irr, max_isr;
3147 struct kvm_lapic *apic = vcpu->arch.apic;
3148
3149 apic_sync_pv_eoi_to_guest(vcpu, apic);
3150
3151 if (!test_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention))
3152 return;
3153
3154 tpr = kvm_lapic_get_reg(apic, APIC_TASKPRI) & 0xff;
3155 max_irr = apic_find_highest_irr(apic);
3156 if (max_irr < 0)
3157 max_irr = 0;
3158 max_isr = apic_find_highest_isr(apic);
3159 if (max_isr < 0)
3160 max_isr = 0;
3161 data = (tpr & 0xff) | ((max_isr & 0xf0) << 8) | (max_irr << 24);
3162
3163 kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data,
3164 sizeof(u32));
3165}
3166
3167int kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr)
3168{
3169 if (vapic_addr) {
3170 if (kvm_gfn_to_hva_cache_init(vcpu->kvm,
3171 &vcpu->arch.apic->vapic_cache,
3172 vapic_addr, sizeof(u32)))
3173 return -EINVAL;
3174 __set_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention);
3175 } else {
3176 __clear_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention);
3177 }
3178
3179 vcpu->arch.apic->vapic_addr = vapic_addr;
3180 return 0;
3181}
3182
3183int kvm_x2apic_icr_write(struct kvm_lapic *apic, u64 data)
3184{
3185 data &= ~APIC_ICR_BUSY;
3186
3187 kvm_apic_send_ipi(apic, (u32)data, (u32)(data >> 32));
3188 kvm_lapic_set_reg64(apic, APIC_ICR, data);
3189 trace_kvm_apic_write(APIC_ICR, data);
3190 return 0;
3191}
3192
3193static int kvm_lapic_msr_read(struct kvm_lapic *apic, u32 reg, u64 *data)
3194{
3195 u32 low;
3196
3197 if (reg == APIC_ICR) {
3198 *data = kvm_lapic_get_reg64(apic, APIC_ICR);
3199 return 0;
3200 }
3201
3202 if (kvm_lapic_reg_read(apic, reg, 4, &low))
3203 return 1;
3204
3205 *data = low;
3206
3207 return 0;
3208}
3209
3210static int kvm_lapic_msr_write(struct kvm_lapic *apic, u32 reg, u64 data)
3211{
3212 /*
3213 * ICR is a 64-bit register in x2APIC mode (and Hyper-V PV vAPIC) and
3214 * can be written as such, all other registers remain accessible only
3215 * through 32-bit reads/writes.
3216 */
3217 if (reg == APIC_ICR)
3218 return kvm_x2apic_icr_write(apic, data);
3219
3220 /* Bits 63:32 are reserved in all other registers. */
3221 if (data >> 32)
3222 return 1;
3223
3224 return kvm_lapic_reg_write(apic, reg, (u32)data);
3225}
3226
3227int kvm_x2apic_msr_write(struct kvm_vcpu *vcpu, u32 msr, u64 data)
3228{
3229 struct kvm_lapic *apic = vcpu->arch.apic;
3230 u32 reg = (msr - APIC_BASE_MSR) << 4;
3231
3232 if (!lapic_in_kernel(vcpu) || !apic_x2apic_mode(apic))
3233 return 1;
3234
3235 return kvm_lapic_msr_write(apic, reg, data);
3236}
3237
3238int kvm_x2apic_msr_read(struct kvm_vcpu *vcpu, u32 msr, u64 *data)
3239{
3240 struct kvm_lapic *apic = vcpu->arch.apic;
3241 u32 reg = (msr - APIC_BASE_MSR) << 4;
3242
3243 if (!lapic_in_kernel(vcpu) || !apic_x2apic_mode(apic))
3244 return 1;
3245
3246 return kvm_lapic_msr_read(apic, reg, data);
3247}
3248
3249int kvm_hv_vapic_msr_write(struct kvm_vcpu *vcpu, u32 reg, u64 data)
3250{
3251 if (!lapic_in_kernel(vcpu))
3252 return 1;
3253
3254 return kvm_lapic_msr_write(vcpu->arch.apic, reg, data);
3255}
3256
3257int kvm_hv_vapic_msr_read(struct kvm_vcpu *vcpu, u32 reg, u64 *data)
3258{
3259 if (!lapic_in_kernel(vcpu))
3260 return 1;
3261
3262 return kvm_lapic_msr_read(vcpu->arch.apic, reg, data);
3263}
3264
3265int kvm_lapic_set_pv_eoi(struct kvm_vcpu *vcpu, u64 data, unsigned long len)
3266{
3267 u64 addr = data & ~KVM_MSR_ENABLED;
3268 struct gfn_to_hva_cache *ghc = &vcpu->arch.pv_eoi.data;
3269 unsigned long new_len;
3270 int ret;
3271
3272 if (!IS_ALIGNED(addr, 4))
3273 return 1;
3274
3275 if (data & KVM_MSR_ENABLED) {
3276 if (addr == ghc->gpa && len <= ghc->len)
3277 new_len = ghc->len;
3278 else
3279 new_len = len;
3280
3281 ret = kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, addr, new_len);
3282 if (ret)
3283 return ret;
3284 }
3285
3286 vcpu->arch.pv_eoi.msr_val = data;
3287
3288 return 0;
3289}
3290
3291int kvm_apic_accept_events(struct kvm_vcpu *vcpu)
3292{
3293 struct kvm_lapic *apic = vcpu->arch.apic;
3294 u8 sipi_vector;
3295 int r;
3296
3297 if (!kvm_apic_has_pending_init_or_sipi(vcpu))
3298 return 0;
3299
3300 if (is_guest_mode(vcpu)) {
3301 r = kvm_check_nested_events(vcpu);
3302 if (r < 0)
3303 return r == -EBUSY ? 0 : r;
3304 /*
3305 * Continue processing INIT/SIPI even if a nested VM-Exit
3306 * occurred, e.g. pending SIPIs should be dropped if INIT+SIPI
3307 * are blocked as a result of transitioning to VMX root mode.
3308 */
3309 }
3310
3311 /*
3312 * INITs are blocked while CPU is in specific states (SMM, VMX root
3313 * mode, SVM with GIF=0), while SIPIs are dropped if the CPU isn't in
3314 * wait-for-SIPI (WFS).
3315 */
3316 if (!kvm_apic_init_sipi_allowed(vcpu)) {
3317 WARN_ON_ONCE(vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED);
3318 clear_bit(KVM_APIC_SIPI, &apic->pending_events);
3319 return 0;
3320 }
3321
3322 if (test_and_clear_bit(KVM_APIC_INIT, &apic->pending_events)) {
3323 kvm_vcpu_reset(vcpu, true);
3324 if (kvm_vcpu_is_bsp(apic->vcpu))
3325 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
3326 else
3327 vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED;
3328 }
3329 if (test_and_clear_bit(KVM_APIC_SIPI, &apic->pending_events)) {
3330 if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) {
3331 /* evaluate pending_events before reading the vector */
3332 smp_rmb();
3333 sipi_vector = apic->sipi_vector;
3334 static_call(kvm_x86_vcpu_deliver_sipi_vector)(vcpu, sipi_vector);
3335 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
3336 }
3337 }
3338 return 0;
3339}
3340
3341void kvm_lapic_exit(void)
3342{
3343 static_key_deferred_flush(&apic_hw_disabled);
3344 WARN_ON(static_branch_unlikely(&apic_hw_disabled.key));
3345 static_key_deferred_flush(&apic_sw_disabled);
3346 WARN_ON(static_branch_unlikely(&apic_sw_disabled.key));
3347}