Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2
3/*
4 * Local APIC virtualization
5 *
6 * Copyright (C) 2006 Qumranet, Inc.
7 * Copyright (C) 2007 Novell
8 * Copyright (C) 2007 Intel
9 * Copyright 2009 Red Hat, Inc. and/or its affiliates.
10 *
11 * Authors:
12 * Dor Laor <dor.laor@qumranet.com>
13 * Gregory Haskins <ghaskins@novell.com>
14 * Yaozu (Eddie) Dong <eddie.dong@intel.com>
15 *
16 * Based on Xen 3.1 code, Copyright (c) 2004, Intel Corporation.
17 */
18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
20#include <linux/kvm_host.h>
21#include <linux/kvm.h>
22#include <linux/mm.h>
23#include <linux/highmem.h>
24#include <linux/smp.h>
25#include <linux/hrtimer.h>
26#include <linux/io.h>
27#include <linux/export.h>
28#include <linux/math64.h>
29#include <linux/slab.h>
30#include <asm/processor.h>
31#include <asm/mce.h>
32#include <asm/msr.h>
33#include <asm/page.h>
34#include <asm/current.h>
35#include <asm/apicdef.h>
36#include <asm/delay.h>
37#include <linux/atomic.h>
38#include <linux/jump_label.h>
39#include "kvm_cache_regs.h"
40#include "irq.h"
41#include "ioapic.h"
42#include "trace.h"
43#include "x86.h"
44#include "xen.h"
45#include "cpuid.h"
46#include "hyperv.h"
47#include "smm.h"
48
49#ifndef CONFIG_X86_64
50#define mod_64(x, y) ((x) - (y) * div64_u64(x, y))
51#else
52#define mod_64(x, y) ((x) % (y))
53#endif
54
55/* 14 is the version for Xeon and Pentium 8.4.8*/
56#define APIC_VERSION 0x14UL
57#define LAPIC_MMIO_LENGTH (1 << 12)
58/* followed define is not in apicdef.h */
59#define MAX_APIC_VECTOR 256
60#define APIC_VECTORS_PER_REG 32
61
62/*
63 * Enable local APIC timer advancement (tscdeadline mode only) with adaptive
64 * tuning. When enabled, KVM programs the host timer event to fire early, i.e.
65 * before the deadline expires, to account for the delay between taking the
66 * VM-Exit (to inject the guest event) and the subsequent VM-Enter to resume
67 * the guest, i.e. so that the interrupt arrives in the guest with minimal
68 * latency relative to the deadline programmed by the guest.
69 */
70static bool lapic_timer_advance __read_mostly = true;
71module_param(lapic_timer_advance, bool, 0444);
72
73#define LAPIC_TIMER_ADVANCE_ADJUST_MIN 100 /* clock cycles */
74#define LAPIC_TIMER_ADVANCE_ADJUST_MAX 10000 /* clock cycles */
75#define LAPIC_TIMER_ADVANCE_NS_INIT 1000
76#define LAPIC_TIMER_ADVANCE_NS_MAX 5000
77/* step-by-step approximation to mitigate fluctuation */
78#define LAPIC_TIMER_ADVANCE_ADJUST_STEP 8
79static int kvm_lapic_msr_read(struct kvm_lapic *apic, u32 reg, u64 *data);
80static int kvm_lapic_msr_write(struct kvm_lapic *apic, u32 reg, u64 data);
81
82static inline void __kvm_lapic_set_reg(char *regs, int reg_off, u32 val)
83{
84 *((u32 *) (regs + reg_off)) = val;
85}
86
87static inline void kvm_lapic_set_reg(struct kvm_lapic *apic, int reg_off, u32 val)
88{
89 __kvm_lapic_set_reg(apic->regs, reg_off, val);
90}
91
92static __always_inline u64 __kvm_lapic_get_reg64(char *regs, int reg)
93{
94 BUILD_BUG_ON(reg != APIC_ICR);
95 return *((u64 *) (regs + reg));
96}
97
98static __always_inline u64 kvm_lapic_get_reg64(struct kvm_lapic *apic, int reg)
99{
100 return __kvm_lapic_get_reg64(apic->regs, reg);
101}
102
103static __always_inline void __kvm_lapic_set_reg64(char *regs, int reg, u64 val)
104{
105 BUILD_BUG_ON(reg != APIC_ICR);
106 *((u64 *) (regs + reg)) = val;
107}
108
109static __always_inline void kvm_lapic_set_reg64(struct kvm_lapic *apic,
110 int reg, u64 val)
111{
112 __kvm_lapic_set_reg64(apic->regs, reg, val);
113}
114
115static inline int apic_test_vector(int vec, void *bitmap)
116{
117 return test_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
118}
119
120bool kvm_apic_pending_eoi(struct kvm_vcpu *vcpu, int vector)
121{
122 struct kvm_lapic *apic = vcpu->arch.apic;
123
124 return apic_test_vector(vector, apic->regs + APIC_ISR) ||
125 apic_test_vector(vector, apic->regs + APIC_IRR);
126}
127
128static inline int __apic_test_and_set_vector(int vec, void *bitmap)
129{
130 return __test_and_set_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
131}
132
133static inline int __apic_test_and_clear_vector(int vec, void *bitmap)
134{
135 return __test_and_clear_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
136}
137
138__read_mostly DEFINE_STATIC_KEY_FALSE(kvm_has_noapic_vcpu);
139EXPORT_SYMBOL_GPL(kvm_has_noapic_vcpu);
140
141__read_mostly DEFINE_STATIC_KEY_DEFERRED_FALSE(apic_hw_disabled, HZ);
142__read_mostly DEFINE_STATIC_KEY_DEFERRED_FALSE(apic_sw_disabled, HZ);
143
144static inline int apic_enabled(struct kvm_lapic *apic)
145{
146 return kvm_apic_sw_enabled(apic) && kvm_apic_hw_enabled(apic);
147}
148
149#define LVT_MASK \
150 (APIC_LVT_MASKED | APIC_SEND_PENDING | APIC_VECTOR_MASK)
151
152#define LINT_MASK \
153 (LVT_MASK | APIC_MODE_MASK | APIC_INPUT_POLARITY | \
154 APIC_LVT_REMOTE_IRR | APIC_LVT_LEVEL_TRIGGER)
155
156static inline u32 kvm_x2apic_id(struct kvm_lapic *apic)
157{
158 return apic->vcpu->vcpu_id;
159}
160
161static bool kvm_can_post_timer_interrupt(struct kvm_vcpu *vcpu)
162{
163 return pi_inject_timer && kvm_vcpu_apicv_active(vcpu) &&
164 (kvm_mwait_in_guest(vcpu->kvm) || kvm_hlt_in_guest(vcpu->kvm));
165}
166
167bool kvm_can_use_hv_timer(struct kvm_vcpu *vcpu)
168{
169 return kvm_x86_ops.set_hv_timer
170 && !(kvm_mwait_in_guest(vcpu->kvm) ||
171 kvm_can_post_timer_interrupt(vcpu));
172}
173
174static bool kvm_use_posted_timer_interrupt(struct kvm_vcpu *vcpu)
175{
176 return kvm_can_post_timer_interrupt(vcpu) && vcpu->mode == IN_GUEST_MODE;
177}
178
179static inline u32 kvm_apic_calc_x2apic_ldr(u32 id)
180{
181 return ((id >> 4) << 16) | (1 << (id & 0xf));
182}
183
184static inline bool kvm_apic_map_get_logical_dest(struct kvm_apic_map *map,
185 u32 dest_id, struct kvm_lapic ***cluster, u16 *mask) {
186 switch (map->logical_mode) {
187 case KVM_APIC_MODE_SW_DISABLED:
188 /* Arbitrarily use the flat map so that @cluster isn't NULL. */
189 *cluster = map->xapic_flat_map;
190 *mask = 0;
191 return true;
192 case KVM_APIC_MODE_X2APIC: {
193 u32 offset = (dest_id >> 16) * 16;
194 u32 max_apic_id = map->max_apic_id;
195
196 if (offset <= max_apic_id) {
197 u8 cluster_size = min(max_apic_id - offset + 1, 16U);
198
199 offset = array_index_nospec(offset, map->max_apic_id + 1);
200 *cluster = &map->phys_map[offset];
201 *mask = dest_id & (0xffff >> (16 - cluster_size));
202 } else {
203 *mask = 0;
204 }
205
206 return true;
207 }
208 case KVM_APIC_MODE_XAPIC_FLAT:
209 *cluster = map->xapic_flat_map;
210 *mask = dest_id & 0xff;
211 return true;
212 case KVM_APIC_MODE_XAPIC_CLUSTER:
213 *cluster = map->xapic_cluster_map[(dest_id >> 4) & 0xf];
214 *mask = dest_id & 0xf;
215 return true;
216 case KVM_APIC_MODE_MAP_DISABLED:
217 return false;
218 default:
219 WARN_ON_ONCE(1);
220 return false;
221 }
222}
223
224static void kvm_apic_map_free(struct rcu_head *rcu)
225{
226 struct kvm_apic_map *map = container_of(rcu, struct kvm_apic_map, rcu);
227
228 kvfree(map);
229}
230
231static int kvm_recalculate_phys_map(struct kvm_apic_map *new,
232 struct kvm_vcpu *vcpu,
233 bool *xapic_id_mismatch)
234{
235 struct kvm_lapic *apic = vcpu->arch.apic;
236 u32 x2apic_id = kvm_x2apic_id(apic);
237 u32 xapic_id = kvm_xapic_id(apic);
238 u32 physical_id;
239
240 /*
241 * For simplicity, KVM always allocates enough space for all possible
242 * xAPIC IDs. Yell, but don't kill the VM, as KVM can continue on
243 * without the optimized map.
244 */
245 if (WARN_ON_ONCE(xapic_id > new->max_apic_id))
246 return -EINVAL;
247
248 /*
249 * Bail if a vCPU was added and/or enabled its APIC between allocating
250 * the map and doing the actual calculations for the map. Note, KVM
251 * hardcodes the x2APIC ID to vcpu_id, i.e. there's no TOCTOU bug if
252 * the compiler decides to reload x2apic_id after this check.
253 */
254 if (x2apic_id > new->max_apic_id)
255 return -E2BIG;
256
257 /*
258 * Deliberately truncate the vCPU ID when detecting a mismatched APIC
259 * ID to avoid false positives if the vCPU ID, i.e. x2APIC ID, is a
260 * 32-bit value. Any unwanted aliasing due to truncation results will
261 * be detected below.
262 */
263 if (!apic_x2apic_mode(apic) && xapic_id != (u8)vcpu->vcpu_id)
264 *xapic_id_mismatch = true;
265
266 /*
267 * Apply KVM's hotplug hack if userspace has enable 32-bit APIC IDs.
268 * Allow sending events to vCPUs by their x2APIC ID even if the target
269 * vCPU is in legacy xAPIC mode, and silently ignore aliased xAPIC IDs
270 * (the x2APIC ID is truncated to 8 bits, causing IDs > 0xff to wrap
271 * and collide).
272 *
273 * Honor the architectural (and KVM's non-optimized) behavior if
274 * userspace has not enabled 32-bit x2APIC IDs. Each APIC is supposed
275 * to process messages independently. If multiple vCPUs have the same
276 * effective APIC ID, e.g. due to the x2APIC wrap or because the guest
277 * manually modified its xAPIC IDs, events targeting that ID are
278 * supposed to be recognized by all vCPUs with said ID.
279 */
280 if (vcpu->kvm->arch.x2apic_format) {
281 /* See also kvm_apic_match_physical_addr(). */
282 if (apic_x2apic_mode(apic) || x2apic_id > 0xff)
283 new->phys_map[x2apic_id] = apic;
284
285 if (!apic_x2apic_mode(apic) && !new->phys_map[xapic_id])
286 new->phys_map[xapic_id] = apic;
287 } else {
288 /*
289 * Disable the optimized map if the physical APIC ID is already
290 * mapped, i.e. is aliased to multiple vCPUs. The optimized
291 * map requires a strict 1:1 mapping between IDs and vCPUs.
292 */
293 if (apic_x2apic_mode(apic))
294 physical_id = x2apic_id;
295 else
296 physical_id = xapic_id;
297
298 if (new->phys_map[physical_id])
299 return -EINVAL;
300
301 new->phys_map[physical_id] = apic;
302 }
303
304 return 0;
305}
306
307static void kvm_recalculate_logical_map(struct kvm_apic_map *new,
308 struct kvm_vcpu *vcpu)
309{
310 struct kvm_lapic *apic = vcpu->arch.apic;
311 enum kvm_apic_logical_mode logical_mode;
312 struct kvm_lapic **cluster;
313 u16 mask;
314 u32 ldr;
315
316 if (new->logical_mode == KVM_APIC_MODE_MAP_DISABLED)
317 return;
318
319 if (!kvm_apic_sw_enabled(apic))
320 return;
321
322 ldr = kvm_lapic_get_reg(apic, APIC_LDR);
323 if (!ldr)
324 return;
325
326 if (apic_x2apic_mode(apic)) {
327 logical_mode = KVM_APIC_MODE_X2APIC;
328 } else {
329 ldr = GET_APIC_LOGICAL_ID(ldr);
330 if (kvm_lapic_get_reg(apic, APIC_DFR) == APIC_DFR_FLAT)
331 logical_mode = KVM_APIC_MODE_XAPIC_FLAT;
332 else
333 logical_mode = KVM_APIC_MODE_XAPIC_CLUSTER;
334 }
335
336 /*
337 * To optimize logical mode delivery, all software-enabled APICs must
338 * be configured for the same mode.
339 */
340 if (new->logical_mode == KVM_APIC_MODE_SW_DISABLED) {
341 new->logical_mode = logical_mode;
342 } else if (new->logical_mode != logical_mode) {
343 new->logical_mode = KVM_APIC_MODE_MAP_DISABLED;
344 return;
345 }
346
347 /*
348 * In x2APIC mode, the LDR is read-only and derived directly from the
349 * x2APIC ID, thus is guaranteed to be addressable. KVM reuses
350 * kvm_apic_map.phys_map to optimize logical mode x2APIC interrupts by
351 * reversing the LDR calculation to get cluster of APICs, i.e. no
352 * additional work is required.
353 */
354 if (apic_x2apic_mode(apic))
355 return;
356
357 if (WARN_ON_ONCE(!kvm_apic_map_get_logical_dest(new, ldr,
358 &cluster, &mask))) {
359 new->logical_mode = KVM_APIC_MODE_MAP_DISABLED;
360 return;
361 }
362
363 if (!mask)
364 return;
365
366 ldr = ffs(mask) - 1;
367 if (!is_power_of_2(mask) || cluster[ldr])
368 new->logical_mode = KVM_APIC_MODE_MAP_DISABLED;
369 else
370 cluster[ldr] = apic;
371}
372
373/*
374 * CLEAN -> DIRTY and UPDATE_IN_PROGRESS -> DIRTY changes happen without a lock.
375 *
376 * DIRTY -> UPDATE_IN_PROGRESS and UPDATE_IN_PROGRESS -> CLEAN happen with
377 * apic_map_lock_held.
378 */
379enum {
380 CLEAN,
381 UPDATE_IN_PROGRESS,
382 DIRTY
383};
384
385static void kvm_recalculate_apic_map(struct kvm *kvm)
386{
387 struct kvm_apic_map *new, *old = NULL;
388 struct kvm_vcpu *vcpu;
389 unsigned long i;
390 u32 max_id = 255; /* enough space for any xAPIC ID */
391 bool xapic_id_mismatch;
392 int r;
393
394 /* Read kvm->arch.apic_map_dirty before kvm->arch.apic_map. */
395 if (atomic_read_acquire(&kvm->arch.apic_map_dirty) == CLEAN)
396 return;
397
398 WARN_ONCE(!irqchip_in_kernel(kvm),
399 "Dirty APIC map without an in-kernel local APIC");
400
401 mutex_lock(&kvm->arch.apic_map_lock);
402
403retry:
404 /*
405 * Read kvm->arch.apic_map_dirty before kvm->arch.apic_map (if clean)
406 * or the APIC registers (if dirty). Note, on retry the map may have
407 * not yet been marked dirty by whatever task changed a vCPU's x2APIC
408 * ID, i.e. the map may still show up as in-progress. In that case
409 * this task still needs to retry and complete its calculation.
410 */
411 if (atomic_cmpxchg_acquire(&kvm->arch.apic_map_dirty,
412 DIRTY, UPDATE_IN_PROGRESS) == CLEAN) {
413 /* Someone else has updated the map. */
414 mutex_unlock(&kvm->arch.apic_map_lock);
415 return;
416 }
417
418 /*
419 * Reset the mismatch flag between attempts so that KVM does the right
420 * thing if a vCPU changes its xAPIC ID, but do NOT reset max_id, i.e.
421 * keep max_id strictly increasing. Disallowing max_id from shrinking
422 * ensures KVM won't get stuck in an infinite loop, e.g. if the vCPU
423 * with the highest x2APIC ID is toggling its APIC on and off.
424 */
425 xapic_id_mismatch = false;
426
427 kvm_for_each_vcpu(i, vcpu, kvm)
428 if (kvm_apic_present(vcpu))
429 max_id = max(max_id, kvm_x2apic_id(vcpu->arch.apic));
430
431 new = kvzalloc(sizeof(struct kvm_apic_map) +
432 sizeof(struct kvm_lapic *) * ((u64)max_id + 1),
433 GFP_KERNEL_ACCOUNT);
434
435 if (!new)
436 goto out;
437
438 new->max_apic_id = max_id;
439 new->logical_mode = KVM_APIC_MODE_SW_DISABLED;
440
441 kvm_for_each_vcpu(i, vcpu, kvm) {
442 if (!kvm_apic_present(vcpu))
443 continue;
444
445 r = kvm_recalculate_phys_map(new, vcpu, &xapic_id_mismatch);
446 if (r) {
447 kvfree(new);
448 new = NULL;
449 if (r == -E2BIG) {
450 cond_resched();
451 goto retry;
452 }
453
454 goto out;
455 }
456
457 kvm_recalculate_logical_map(new, vcpu);
458 }
459out:
460 /*
461 * The optimized map is effectively KVM's internal version of APICv,
462 * and all unwanted aliasing that results in disabling the optimized
463 * map also applies to APICv.
464 */
465 if (!new)
466 kvm_set_apicv_inhibit(kvm, APICV_INHIBIT_REASON_PHYSICAL_ID_ALIASED);
467 else
468 kvm_clear_apicv_inhibit(kvm, APICV_INHIBIT_REASON_PHYSICAL_ID_ALIASED);
469
470 if (!new || new->logical_mode == KVM_APIC_MODE_MAP_DISABLED)
471 kvm_set_apicv_inhibit(kvm, APICV_INHIBIT_REASON_LOGICAL_ID_ALIASED);
472 else
473 kvm_clear_apicv_inhibit(kvm, APICV_INHIBIT_REASON_LOGICAL_ID_ALIASED);
474
475 if (xapic_id_mismatch)
476 kvm_set_apicv_inhibit(kvm, APICV_INHIBIT_REASON_APIC_ID_MODIFIED);
477 else
478 kvm_clear_apicv_inhibit(kvm, APICV_INHIBIT_REASON_APIC_ID_MODIFIED);
479
480 old = rcu_dereference_protected(kvm->arch.apic_map,
481 lockdep_is_held(&kvm->arch.apic_map_lock));
482 rcu_assign_pointer(kvm->arch.apic_map, new);
483 /*
484 * Write kvm->arch.apic_map before clearing apic->apic_map_dirty.
485 * If another update has come in, leave it DIRTY.
486 */
487 atomic_cmpxchg_release(&kvm->arch.apic_map_dirty,
488 UPDATE_IN_PROGRESS, CLEAN);
489 mutex_unlock(&kvm->arch.apic_map_lock);
490
491 if (old)
492 call_rcu(&old->rcu, kvm_apic_map_free);
493
494 kvm_make_scan_ioapic_request(kvm);
495}
496
497static inline void apic_set_spiv(struct kvm_lapic *apic, u32 val)
498{
499 bool enabled = val & APIC_SPIV_APIC_ENABLED;
500
501 kvm_lapic_set_reg(apic, APIC_SPIV, val);
502
503 if (enabled != apic->sw_enabled) {
504 apic->sw_enabled = enabled;
505 if (enabled)
506 static_branch_slow_dec_deferred(&apic_sw_disabled);
507 else
508 static_branch_inc(&apic_sw_disabled.key);
509
510 atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
511 }
512
513 /* Check if there are APF page ready requests pending */
514 if (enabled) {
515 kvm_make_request(KVM_REQ_APF_READY, apic->vcpu);
516 kvm_xen_sw_enable_lapic(apic->vcpu);
517 }
518}
519
520static inline void kvm_apic_set_xapic_id(struct kvm_lapic *apic, u8 id)
521{
522 kvm_lapic_set_reg(apic, APIC_ID, id << 24);
523 atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
524}
525
526static inline void kvm_apic_set_ldr(struct kvm_lapic *apic, u32 id)
527{
528 kvm_lapic_set_reg(apic, APIC_LDR, id);
529 atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
530}
531
532static inline void kvm_apic_set_dfr(struct kvm_lapic *apic, u32 val)
533{
534 kvm_lapic_set_reg(apic, APIC_DFR, val);
535 atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
536}
537
538static inline void kvm_apic_set_x2apic_id(struct kvm_lapic *apic, u32 id)
539{
540 u32 ldr = kvm_apic_calc_x2apic_ldr(id);
541
542 WARN_ON_ONCE(id != apic->vcpu->vcpu_id);
543
544 kvm_lapic_set_reg(apic, APIC_ID, id);
545 kvm_lapic_set_reg(apic, APIC_LDR, ldr);
546 atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
547}
548
549static inline int apic_lvt_enabled(struct kvm_lapic *apic, int lvt_type)
550{
551 return !(kvm_lapic_get_reg(apic, lvt_type) & APIC_LVT_MASKED);
552}
553
554static inline int apic_lvtt_oneshot(struct kvm_lapic *apic)
555{
556 return apic->lapic_timer.timer_mode == APIC_LVT_TIMER_ONESHOT;
557}
558
559static inline int apic_lvtt_period(struct kvm_lapic *apic)
560{
561 return apic->lapic_timer.timer_mode == APIC_LVT_TIMER_PERIODIC;
562}
563
564static inline int apic_lvtt_tscdeadline(struct kvm_lapic *apic)
565{
566 return apic->lapic_timer.timer_mode == APIC_LVT_TIMER_TSCDEADLINE;
567}
568
569static inline int apic_lvt_nmi_mode(u32 lvt_val)
570{
571 return (lvt_val & (APIC_MODE_MASK | APIC_LVT_MASKED)) == APIC_DM_NMI;
572}
573
574static inline bool kvm_lapic_lvt_supported(struct kvm_lapic *apic, int lvt_index)
575{
576 return apic->nr_lvt_entries > lvt_index;
577}
578
579static inline int kvm_apic_calc_nr_lvt_entries(struct kvm_vcpu *vcpu)
580{
581 return KVM_APIC_MAX_NR_LVT_ENTRIES - !(vcpu->arch.mcg_cap & MCG_CMCI_P);
582}
583
584void kvm_apic_set_version(struct kvm_vcpu *vcpu)
585{
586 struct kvm_lapic *apic = vcpu->arch.apic;
587 u32 v = 0;
588
589 if (!lapic_in_kernel(vcpu))
590 return;
591
592 v = APIC_VERSION | ((apic->nr_lvt_entries - 1) << 16);
593
594 /*
595 * KVM emulates 82093AA datasheet (with in-kernel IOAPIC implementation)
596 * which doesn't have EOI register; Some buggy OSes (e.g. Windows with
597 * Hyper-V role) disable EOI broadcast in lapic not checking for IOAPIC
598 * version first and level-triggered interrupts never get EOIed in
599 * IOAPIC.
600 */
601 if (guest_cpuid_has(vcpu, X86_FEATURE_X2APIC) &&
602 !ioapic_in_kernel(vcpu->kvm))
603 v |= APIC_LVR_DIRECTED_EOI;
604 kvm_lapic_set_reg(apic, APIC_LVR, v);
605}
606
607void kvm_apic_after_set_mcg_cap(struct kvm_vcpu *vcpu)
608{
609 int nr_lvt_entries = kvm_apic_calc_nr_lvt_entries(vcpu);
610 struct kvm_lapic *apic = vcpu->arch.apic;
611 int i;
612
613 if (!lapic_in_kernel(vcpu) || nr_lvt_entries == apic->nr_lvt_entries)
614 return;
615
616 /* Initialize/mask any "new" LVT entries. */
617 for (i = apic->nr_lvt_entries; i < nr_lvt_entries; i++)
618 kvm_lapic_set_reg(apic, APIC_LVTx(i), APIC_LVT_MASKED);
619
620 apic->nr_lvt_entries = nr_lvt_entries;
621
622 /* The number of LVT entries is reflected in the version register. */
623 kvm_apic_set_version(vcpu);
624}
625
626static const unsigned int apic_lvt_mask[KVM_APIC_MAX_NR_LVT_ENTRIES] = {
627 [LVT_TIMER] = LVT_MASK, /* timer mode mask added at runtime */
628 [LVT_THERMAL_MONITOR] = LVT_MASK | APIC_MODE_MASK,
629 [LVT_PERFORMANCE_COUNTER] = LVT_MASK | APIC_MODE_MASK,
630 [LVT_LINT0] = LINT_MASK,
631 [LVT_LINT1] = LINT_MASK,
632 [LVT_ERROR] = LVT_MASK,
633 [LVT_CMCI] = LVT_MASK | APIC_MODE_MASK
634};
635
636static int find_highest_vector(void *bitmap)
637{
638 int vec;
639 u32 *reg;
640
641 for (vec = MAX_APIC_VECTOR - APIC_VECTORS_PER_REG;
642 vec >= 0; vec -= APIC_VECTORS_PER_REG) {
643 reg = bitmap + REG_POS(vec);
644 if (*reg)
645 return __fls(*reg) + vec;
646 }
647
648 return -1;
649}
650
651static u8 count_vectors(void *bitmap)
652{
653 int vec;
654 u32 *reg;
655 u8 count = 0;
656
657 for (vec = 0; vec < MAX_APIC_VECTOR; vec += APIC_VECTORS_PER_REG) {
658 reg = bitmap + REG_POS(vec);
659 count += hweight32(*reg);
660 }
661
662 return count;
663}
664
665bool __kvm_apic_update_irr(u32 *pir, void *regs, int *max_irr)
666{
667 u32 i, vec;
668 u32 pir_val, irr_val, prev_irr_val;
669 int max_updated_irr;
670
671 max_updated_irr = -1;
672 *max_irr = -1;
673
674 for (i = vec = 0; i <= 7; i++, vec += 32) {
675 u32 *p_irr = (u32 *)(regs + APIC_IRR + i * 0x10);
676
677 irr_val = *p_irr;
678 pir_val = READ_ONCE(pir[i]);
679
680 if (pir_val) {
681 pir_val = xchg(&pir[i], 0);
682
683 prev_irr_val = irr_val;
684 do {
685 irr_val = prev_irr_val | pir_val;
686 } while (prev_irr_val != irr_val &&
687 !try_cmpxchg(p_irr, &prev_irr_val, irr_val));
688
689 if (prev_irr_val != irr_val)
690 max_updated_irr = __fls(irr_val ^ prev_irr_val) + vec;
691 }
692 if (irr_val)
693 *max_irr = __fls(irr_val) + vec;
694 }
695
696 return ((max_updated_irr != -1) &&
697 (max_updated_irr == *max_irr));
698}
699EXPORT_SYMBOL_GPL(__kvm_apic_update_irr);
700
701bool kvm_apic_update_irr(struct kvm_vcpu *vcpu, u32 *pir, int *max_irr)
702{
703 struct kvm_lapic *apic = vcpu->arch.apic;
704 bool irr_updated = __kvm_apic_update_irr(pir, apic->regs, max_irr);
705
706 if (unlikely(!apic->apicv_active && irr_updated))
707 apic->irr_pending = true;
708 return irr_updated;
709}
710EXPORT_SYMBOL_GPL(kvm_apic_update_irr);
711
712static inline int apic_search_irr(struct kvm_lapic *apic)
713{
714 return find_highest_vector(apic->regs + APIC_IRR);
715}
716
717static inline int apic_find_highest_irr(struct kvm_lapic *apic)
718{
719 int result;
720
721 /*
722 * Note that irr_pending is just a hint. It will be always
723 * true with virtual interrupt delivery enabled.
724 */
725 if (!apic->irr_pending)
726 return -1;
727
728 result = apic_search_irr(apic);
729 ASSERT(result == -1 || result >= 16);
730
731 return result;
732}
733
734static inline void apic_clear_irr(int vec, struct kvm_lapic *apic)
735{
736 if (unlikely(apic->apicv_active)) {
737 /* need to update RVI */
738 kvm_lapic_clear_vector(vec, apic->regs + APIC_IRR);
739 kvm_x86_call(hwapic_irr_update)(apic->vcpu,
740 apic_find_highest_irr(apic));
741 } else {
742 apic->irr_pending = false;
743 kvm_lapic_clear_vector(vec, apic->regs + APIC_IRR);
744 if (apic_search_irr(apic) != -1)
745 apic->irr_pending = true;
746 }
747}
748
749void kvm_apic_clear_irr(struct kvm_vcpu *vcpu, int vec)
750{
751 apic_clear_irr(vec, vcpu->arch.apic);
752}
753EXPORT_SYMBOL_GPL(kvm_apic_clear_irr);
754
755static inline void apic_set_isr(int vec, struct kvm_lapic *apic)
756{
757 if (__apic_test_and_set_vector(vec, apic->regs + APIC_ISR))
758 return;
759
760 /*
761 * With APIC virtualization enabled, all caching is disabled
762 * because the processor can modify ISR under the hood. Instead
763 * just set SVI.
764 */
765 if (unlikely(apic->apicv_active))
766 kvm_x86_call(hwapic_isr_update)(apic->vcpu, vec);
767 else {
768 ++apic->isr_count;
769 BUG_ON(apic->isr_count > MAX_APIC_VECTOR);
770 /*
771 * ISR (in service register) bit is set when injecting an interrupt.
772 * The highest vector is injected. Thus the latest bit set matches
773 * the highest bit in ISR.
774 */
775 apic->highest_isr_cache = vec;
776 }
777}
778
779static inline int apic_find_highest_isr(struct kvm_lapic *apic)
780{
781 int result;
782
783 /*
784 * Note that isr_count is always 1, and highest_isr_cache
785 * is always -1, with APIC virtualization enabled.
786 */
787 if (!apic->isr_count)
788 return -1;
789 if (likely(apic->highest_isr_cache != -1))
790 return apic->highest_isr_cache;
791
792 result = find_highest_vector(apic->regs + APIC_ISR);
793 ASSERT(result == -1 || result >= 16);
794
795 return result;
796}
797
798static inline void apic_clear_isr(int vec, struct kvm_lapic *apic)
799{
800 if (!__apic_test_and_clear_vector(vec, apic->regs + APIC_ISR))
801 return;
802
803 /*
804 * We do get here for APIC virtualization enabled if the guest
805 * uses the Hyper-V APIC enlightenment. In this case we may need
806 * to trigger a new interrupt delivery by writing the SVI field;
807 * on the other hand isr_count and highest_isr_cache are unused
808 * and must be left alone.
809 */
810 if (unlikely(apic->apicv_active))
811 kvm_x86_call(hwapic_isr_update)(apic->vcpu, apic_find_highest_isr(apic));
812 else {
813 --apic->isr_count;
814 BUG_ON(apic->isr_count < 0);
815 apic->highest_isr_cache = -1;
816 }
817}
818
819void kvm_apic_update_hwapic_isr(struct kvm_vcpu *vcpu)
820{
821 struct kvm_lapic *apic = vcpu->arch.apic;
822
823 if (WARN_ON_ONCE(!lapic_in_kernel(vcpu)) || !apic->apicv_active)
824 return;
825
826 kvm_x86_call(hwapic_isr_update)(vcpu, apic_find_highest_isr(apic));
827}
828EXPORT_SYMBOL_GPL(kvm_apic_update_hwapic_isr);
829
830int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu)
831{
832 /* This may race with setting of irr in __apic_accept_irq() and
833 * value returned may be wrong, but kvm_vcpu_kick() in __apic_accept_irq
834 * will cause vmexit immediately and the value will be recalculated
835 * on the next vmentry.
836 */
837 return apic_find_highest_irr(vcpu->arch.apic);
838}
839EXPORT_SYMBOL_GPL(kvm_lapic_find_highest_irr);
840
841static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
842 int vector, int level, int trig_mode,
843 struct dest_map *dest_map);
844
845int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq,
846 struct dest_map *dest_map)
847{
848 struct kvm_lapic *apic = vcpu->arch.apic;
849
850 return __apic_accept_irq(apic, irq->delivery_mode, irq->vector,
851 irq->level, irq->trig_mode, dest_map);
852}
853
854static int __pv_send_ipi(unsigned long *ipi_bitmap, struct kvm_apic_map *map,
855 struct kvm_lapic_irq *irq, u32 min)
856{
857 int i, count = 0;
858 struct kvm_vcpu *vcpu;
859
860 if (min > map->max_apic_id)
861 return 0;
862
863 for_each_set_bit(i, ipi_bitmap,
864 min((u32)BITS_PER_LONG, (map->max_apic_id - min + 1))) {
865 if (map->phys_map[min + i]) {
866 vcpu = map->phys_map[min + i]->vcpu;
867 count += kvm_apic_set_irq(vcpu, irq, NULL);
868 }
869 }
870
871 return count;
872}
873
874int kvm_pv_send_ipi(struct kvm *kvm, unsigned long ipi_bitmap_low,
875 unsigned long ipi_bitmap_high, u32 min,
876 unsigned long icr, int op_64_bit)
877{
878 struct kvm_apic_map *map;
879 struct kvm_lapic_irq irq = {0};
880 int cluster_size = op_64_bit ? 64 : 32;
881 int count;
882
883 if (icr & (APIC_DEST_MASK | APIC_SHORT_MASK))
884 return -KVM_EINVAL;
885
886 irq.vector = icr & APIC_VECTOR_MASK;
887 irq.delivery_mode = icr & APIC_MODE_MASK;
888 irq.level = (icr & APIC_INT_ASSERT) != 0;
889 irq.trig_mode = icr & APIC_INT_LEVELTRIG;
890
891 rcu_read_lock();
892 map = rcu_dereference(kvm->arch.apic_map);
893
894 count = -EOPNOTSUPP;
895 if (likely(map)) {
896 count = __pv_send_ipi(&ipi_bitmap_low, map, &irq, min);
897 min += cluster_size;
898 count += __pv_send_ipi(&ipi_bitmap_high, map, &irq, min);
899 }
900
901 rcu_read_unlock();
902 return count;
903}
904
905static int pv_eoi_put_user(struct kvm_vcpu *vcpu, u8 val)
906{
907
908 return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data, &val,
909 sizeof(val));
910}
911
912static int pv_eoi_get_user(struct kvm_vcpu *vcpu, u8 *val)
913{
914
915 return kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data, val,
916 sizeof(*val));
917}
918
919static inline bool pv_eoi_enabled(struct kvm_vcpu *vcpu)
920{
921 return vcpu->arch.pv_eoi.msr_val & KVM_MSR_ENABLED;
922}
923
924static void pv_eoi_set_pending(struct kvm_vcpu *vcpu)
925{
926 if (pv_eoi_put_user(vcpu, KVM_PV_EOI_ENABLED) < 0)
927 return;
928
929 __set_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention);
930}
931
932static bool pv_eoi_test_and_clr_pending(struct kvm_vcpu *vcpu)
933{
934 u8 val;
935
936 if (pv_eoi_get_user(vcpu, &val) < 0)
937 return false;
938
939 val &= KVM_PV_EOI_ENABLED;
940
941 if (val && pv_eoi_put_user(vcpu, KVM_PV_EOI_DISABLED) < 0)
942 return false;
943
944 /*
945 * Clear pending bit in any case: it will be set again on vmentry.
946 * While this might not be ideal from performance point of view,
947 * this makes sure pv eoi is only enabled when we know it's safe.
948 */
949 __clear_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention);
950
951 return val;
952}
953
954static int apic_has_interrupt_for_ppr(struct kvm_lapic *apic, u32 ppr)
955{
956 int highest_irr;
957 if (kvm_x86_ops.sync_pir_to_irr)
958 highest_irr = kvm_x86_call(sync_pir_to_irr)(apic->vcpu);
959 else
960 highest_irr = apic_find_highest_irr(apic);
961 if (highest_irr == -1 || (highest_irr & 0xF0) <= ppr)
962 return -1;
963 return highest_irr;
964}
965
966static bool __apic_update_ppr(struct kvm_lapic *apic, u32 *new_ppr)
967{
968 u32 tpr, isrv, ppr, old_ppr;
969 int isr;
970
971 old_ppr = kvm_lapic_get_reg(apic, APIC_PROCPRI);
972 tpr = kvm_lapic_get_reg(apic, APIC_TASKPRI);
973 isr = apic_find_highest_isr(apic);
974 isrv = (isr != -1) ? isr : 0;
975
976 if ((tpr & 0xf0) >= (isrv & 0xf0))
977 ppr = tpr & 0xff;
978 else
979 ppr = isrv & 0xf0;
980
981 *new_ppr = ppr;
982 if (old_ppr != ppr)
983 kvm_lapic_set_reg(apic, APIC_PROCPRI, ppr);
984
985 return ppr < old_ppr;
986}
987
988static void apic_update_ppr(struct kvm_lapic *apic)
989{
990 u32 ppr;
991
992 if (__apic_update_ppr(apic, &ppr) &&
993 apic_has_interrupt_for_ppr(apic, ppr) != -1)
994 kvm_make_request(KVM_REQ_EVENT, apic->vcpu);
995}
996
997void kvm_apic_update_ppr(struct kvm_vcpu *vcpu)
998{
999 apic_update_ppr(vcpu->arch.apic);
1000}
1001EXPORT_SYMBOL_GPL(kvm_apic_update_ppr);
1002
1003static void apic_set_tpr(struct kvm_lapic *apic, u32 tpr)
1004{
1005 kvm_lapic_set_reg(apic, APIC_TASKPRI, tpr);
1006 apic_update_ppr(apic);
1007}
1008
1009static bool kvm_apic_broadcast(struct kvm_lapic *apic, u32 mda)
1010{
1011 return mda == (apic_x2apic_mode(apic) ?
1012 X2APIC_BROADCAST : APIC_BROADCAST);
1013}
1014
1015static bool kvm_apic_match_physical_addr(struct kvm_lapic *apic, u32 mda)
1016{
1017 if (kvm_apic_broadcast(apic, mda))
1018 return true;
1019
1020 /*
1021 * Hotplug hack: Accept interrupts for vCPUs in xAPIC mode as if they
1022 * were in x2APIC mode if the target APIC ID can't be encoded as an
1023 * xAPIC ID. This allows unique addressing of hotplugged vCPUs (which
1024 * start in xAPIC mode) with an APIC ID that is unaddressable in xAPIC
1025 * mode. Match the x2APIC ID if and only if the target APIC ID can't
1026 * be encoded in xAPIC to avoid spurious matches against a vCPU that
1027 * changed its (addressable) xAPIC ID (which is writable).
1028 */
1029 if (apic_x2apic_mode(apic) || mda > 0xff)
1030 return mda == kvm_x2apic_id(apic);
1031
1032 return mda == kvm_xapic_id(apic);
1033}
1034
1035static bool kvm_apic_match_logical_addr(struct kvm_lapic *apic, u32 mda)
1036{
1037 u32 logical_id;
1038
1039 if (kvm_apic_broadcast(apic, mda))
1040 return true;
1041
1042 logical_id = kvm_lapic_get_reg(apic, APIC_LDR);
1043
1044 if (apic_x2apic_mode(apic))
1045 return ((logical_id >> 16) == (mda >> 16))
1046 && (logical_id & mda & 0xffff) != 0;
1047
1048 logical_id = GET_APIC_LOGICAL_ID(logical_id);
1049
1050 switch (kvm_lapic_get_reg(apic, APIC_DFR)) {
1051 case APIC_DFR_FLAT:
1052 return (logical_id & mda) != 0;
1053 case APIC_DFR_CLUSTER:
1054 return ((logical_id >> 4) == (mda >> 4))
1055 && (logical_id & mda & 0xf) != 0;
1056 default:
1057 return false;
1058 }
1059}
1060
1061/* The KVM local APIC implementation has two quirks:
1062 *
1063 * - Real hardware delivers interrupts destined to x2APIC ID > 0xff to LAPICs
1064 * in xAPIC mode if the "destination & 0xff" matches its xAPIC ID.
1065 * KVM doesn't do that aliasing.
1066 *
1067 * - in-kernel IOAPIC messages have to be delivered directly to
1068 * x2APIC, because the kernel does not support interrupt remapping.
1069 * In order to support broadcast without interrupt remapping, x2APIC
1070 * rewrites the destination of non-IPI messages from APIC_BROADCAST
1071 * to X2APIC_BROADCAST.
1072 *
1073 * The broadcast quirk can be disabled with KVM_CAP_X2APIC_API. This is
1074 * important when userspace wants to use x2APIC-format MSIs, because
1075 * APIC_BROADCAST (0xff) is a legal route for "cluster 0, CPUs 0-7".
1076 */
1077static u32 kvm_apic_mda(struct kvm_vcpu *vcpu, unsigned int dest_id,
1078 struct kvm_lapic *source, struct kvm_lapic *target)
1079{
1080 bool ipi = source != NULL;
1081
1082 if (!vcpu->kvm->arch.x2apic_broadcast_quirk_disabled &&
1083 !ipi && dest_id == APIC_BROADCAST && apic_x2apic_mode(target))
1084 return X2APIC_BROADCAST;
1085
1086 return dest_id;
1087}
1088
1089bool kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source,
1090 int shorthand, unsigned int dest, int dest_mode)
1091{
1092 struct kvm_lapic *target = vcpu->arch.apic;
1093 u32 mda = kvm_apic_mda(vcpu, dest, source, target);
1094
1095 ASSERT(target);
1096 switch (shorthand) {
1097 case APIC_DEST_NOSHORT:
1098 if (dest_mode == APIC_DEST_PHYSICAL)
1099 return kvm_apic_match_physical_addr(target, mda);
1100 else
1101 return kvm_apic_match_logical_addr(target, mda);
1102 case APIC_DEST_SELF:
1103 return target == source;
1104 case APIC_DEST_ALLINC:
1105 return true;
1106 case APIC_DEST_ALLBUT:
1107 return target != source;
1108 default:
1109 return false;
1110 }
1111}
1112EXPORT_SYMBOL_GPL(kvm_apic_match_dest);
1113
1114int kvm_vector_to_index(u32 vector, u32 dest_vcpus,
1115 const unsigned long *bitmap, u32 bitmap_size)
1116{
1117 u32 mod;
1118 int i, idx = -1;
1119
1120 mod = vector % dest_vcpus;
1121
1122 for (i = 0; i <= mod; i++) {
1123 idx = find_next_bit(bitmap, bitmap_size, idx + 1);
1124 BUG_ON(idx == bitmap_size);
1125 }
1126
1127 return idx;
1128}
1129
1130static void kvm_apic_disabled_lapic_found(struct kvm *kvm)
1131{
1132 if (!kvm->arch.disabled_lapic_found) {
1133 kvm->arch.disabled_lapic_found = true;
1134 pr_info("Disabled LAPIC found during irq injection\n");
1135 }
1136}
1137
1138static bool kvm_apic_is_broadcast_dest(struct kvm *kvm, struct kvm_lapic **src,
1139 struct kvm_lapic_irq *irq, struct kvm_apic_map *map)
1140{
1141 if (kvm->arch.x2apic_broadcast_quirk_disabled) {
1142 if ((irq->dest_id == APIC_BROADCAST &&
1143 map->logical_mode != KVM_APIC_MODE_X2APIC))
1144 return true;
1145 if (irq->dest_id == X2APIC_BROADCAST)
1146 return true;
1147 } else {
1148 bool x2apic_ipi = src && *src && apic_x2apic_mode(*src);
1149 if (irq->dest_id == (x2apic_ipi ?
1150 X2APIC_BROADCAST : APIC_BROADCAST))
1151 return true;
1152 }
1153
1154 return false;
1155}
1156
1157/* Return true if the interrupt can be handled by using *bitmap as index mask
1158 * for valid destinations in *dst array.
1159 * Return false if kvm_apic_map_get_dest_lapic did nothing useful.
1160 * Note: we may have zero kvm_lapic destinations when we return true, which
1161 * means that the interrupt should be dropped. In this case, *bitmap would be
1162 * zero and *dst undefined.
1163 */
1164static inline bool kvm_apic_map_get_dest_lapic(struct kvm *kvm,
1165 struct kvm_lapic **src, struct kvm_lapic_irq *irq,
1166 struct kvm_apic_map *map, struct kvm_lapic ***dst,
1167 unsigned long *bitmap)
1168{
1169 int i, lowest;
1170
1171 if (irq->shorthand == APIC_DEST_SELF && src) {
1172 *dst = src;
1173 *bitmap = 1;
1174 return true;
1175 } else if (irq->shorthand)
1176 return false;
1177
1178 if (!map || kvm_apic_is_broadcast_dest(kvm, src, irq, map))
1179 return false;
1180
1181 if (irq->dest_mode == APIC_DEST_PHYSICAL) {
1182 if (irq->dest_id > map->max_apic_id) {
1183 *bitmap = 0;
1184 } else {
1185 u32 dest_id = array_index_nospec(irq->dest_id, map->max_apic_id + 1);
1186 *dst = &map->phys_map[dest_id];
1187 *bitmap = 1;
1188 }
1189 return true;
1190 }
1191
1192 *bitmap = 0;
1193 if (!kvm_apic_map_get_logical_dest(map, irq->dest_id, dst,
1194 (u16 *)bitmap))
1195 return false;
1196
1197 if (!kvm_lowest_prio_delivery(irq))
1198 return true;
1199
1200 if (!kvm_vector_hashing_enabled()) {
1201 lowest = -1;
1202 for_each_set_bit(i, bitmap, 16) {
1203 if (!(*dst)[i])
1204 continue;
1205 if (lowest < 0)
1206 lowest = i;
1207 else if (kvm_apic_compare_prio((*dst)[i]->vcpu,
1208 (*dst)[lowest]->vcpu) < 0)
1209 lowest = i;
1210 }
1211 } else {
1212 if (!*bitmap)
1213 return true;
1214
1215 lowest = kvm_vector_to_index(irq->vector, hweight16(*bitmap),
1216 bitmap, 16);
1217
1218 if (!(*dst)[lowest]) {
1219 kvm_apic_disabled_lapic_found(kvm);
1220 *bitmap = 0;
1221 return true;
1222 }
1223 }
1224
1225 *bitmap = (lowest >= 0) ? 1 << lowest : 0;
1226
1227 return true;
1228}
1229
1230bool kvm_irq_delivery_to_apic_fast(struct kvm *kvm, struct kvm_lapic *src,
1231 struct kvm_lapic_irq *irq, int *r, struct dest_map *dest_map)
1232{
1233 struct kvm_apic_map *map;
1234 unsigned long bitmap;
1235 struct kvm_lapic **dst = NULL;
1236 int i;
1237 bool ret;
1238
1239 *r = -1;
1240
1241 if (irq->shorthand == APIC_DEST_SELF) {
1242 if (KVM_BUG_ON(!src, kvm)) {
1243 *r = 0;
1244 return true;
1245 }
1246 *r = kvm_apic_set_irq(src->vcpu, irq, dest_map);
1247 return true;
1248 }
1249
1250 rcu_read_lock();
1251 map = rcu_dereference(kvm->arch.apic_map);
1252
1253 ret = kvm_apic_map_get_dest_lapic(kvm, &src, irq, map, &dst, &bitmap);
1254 if (ret) {
1255 *r = 0;
1256 for_each_set_bit(i, &bitmap, 16) {
1257 if (!dst[i])
1258 continue;
1259 *r += kvm_apic_set_irq(dst[i]->vcpu, irq, dest_map);
1260 }
1261 }
1262
1263 rcu_read_unlock();
1264 return ret;
1265}
1266
1267/*
1268 * This routine tries to handle interrupts in posted mode, here is how
1269 * it deals with different cases:
1270 * - For single-destination interrupts, handle it in posted mode
1271 * - Else if vector hashing is enabled and it is a lowest-priority
1272 * interrupt, handle it in posted mode and use the following mechanism
1273 * to find the destination vCPU.
1274 * 1. For lowest-priority interrupts, store all the possible
1275 * destination vCPUs in an array.
1276 * 2. Use "guest vector % max number of destination vCPUs" to find
1277 * the right destination vCPU in the array for the lowest-priority
1278 * interrupt.
1279 * - Otherwise, use remapped mode to inject the interrupt.
1280 */
1281bool kvm_intr_is_single_vcpu_fast(struct kvm *kvm, struct kvm_lapic_irq *irq,
1282 struct kvm_vcpu **dest_vcpu)
1283{
1284 struct kvm_apic_map *map;
1285 unsigned long bitmap;
1286 struct kvm_lapic **dst = NULL;
1287 bool ret = false;
1288
1289 if (irq->shorthand)
1290 return false;
1291
1292 rcu_read_lock();
1293 map = rcu_dereference(kvm->arch.apic_map);
1294
1295 if (kvm_apic_map_get_dest_lapic(kvm, NULL, irq, map, &dst, &bitmap) &&
1296 hweight16(bitmap) == 1) {
1297 unsigned long i = find_first_bit(&bitmap, 16);
1298
1299 if (dst[i]) {
1300 *dest_vcpu = dst[i]->vcpu;
1301 ret = true;
1302 }
1303 }
1304
1305 rcu_read_unlock();
1306 return ret;
1307}
1308
1309/*
1310 * Add a pending IRQ into lapic.
1311 * Return 1 if successfully added and 0 if discarded.
1312 */
1313static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
1314 int vector, int level, int trig_mode,
1315 struct dest_map *dest_map)
1316{
1317 int result = 0;
1318 struct kvm_vcpu *vcpu = apic->vcpu;
1319
1320 trace_kvm_apic_accept_irq(vcpu->vcpu_id, delivery_mode,
1321 trig_mode, vector);
1322 switch (delivery_mode) {
1323 case APIC_DM_LOWEST:
1324 vcpu->arch.apic_arb_prio++;
1325 fallthrough;
1326 case APIC_DM_FIXED:
1327 if (unlikely(trig_mode && !level))
1328 break;
1329
1330 /* FIXME add logic for vcpu on reset */
1331 if (unlikely(!apic_enabled(apic)))
1332 break;
1333
1334 result = 1;
1335
1336 if (dest_map) {
1337 __set_bit(vcpu->vcpu_id, dest_map->map);
1338 dest_map->vectors[vcpu->vcpu_id] = vector;
1339 }
1340
1341 if (apic_test_vector(vector, apic->regs + APIC_TMR) != !!trig_mode) {
1342 if (trig_mode)
1343 kvm_lapic_set_vector(vector,
1344 apic->regs + APIC_TMR);
1345 else
1346 kvm_lapic_clear_vector(vector,
1347 apic->regs + APIC_TMR);
1348 }
1349
1350 kvm_x86_call(deliver_interrupt)(apic, delivery_mode,
1351 trig_mode, vector);
1352 break;
1353
1354 case APIC_DM_REMRD:
1355 result = 1;
1356 vcpu->arch.pv.pv_unhalted = 1;
1357 kvm_make_request(KVM_REQ_EVENT, vcpu);
1358 kvm_vcpu_kick(vcpu);
1359 break;
1360
1361 case APIC_DM_SMI:
1362 if (!kvm_inject_smi(vcpu)) {
1363 kvm_vcpu_kick(vcpu);
1364 result = 1;
1365 }
1366 break;
1367
1368 case APIC_DM_NMI:
1369 result = 1;
1370 kvm_inject_nmi(vcpu);
1371 kvm_vcpu_kick(vcpu);
1372 break;
1373
1374 case APIC_DM_INIT:
1375 if (!trig_mode || level) {
1376 result = 1;
1377 /* assumes that there are only KVM_APIC_INIT/SIPI */
1378 apic->pending_events = (1UL << KVM_APIC_INIT);
1379 kvm_make_request(KVM_REQ_EVENT, vcpu);
1380 kvm_vcpu_kick(vcpu);
1381 }
1382 break;
1383
1384 case APIC_DM_STARTUP:
1385 result = 1;
1386 apic->sipi_vector = vector;
1387 /* make sure sipi_vector is visible for the receiver */
1388 smp_wmb();
1389 set_bit(KVM_APIC_SIPI, &apic->pending_events);
1390 kvm_make_request(KVM_REQ_EVENT, vcpu);
1391 kvm_vcpu_kick(vcpu);
1392 break;
1393
1394 case APIC_DM_EXTINT:
1395 /*
1396 * Should only be called by kvm_apic_local_deliver() with LVT0,
1397 * before NMI watchdog was enabled. Already handled by
1398 * kvm_apic_accept_pic_intr().
1399 */
1400 break;
1401
1402 default:
1403 printk(KERN_ERR "TODO: unsupported delivery mode %x\n",
1404 delivery_mode);
1405 break;
1406 }
1407 return result;
1408}
1409
1410/*
1411 * This routine identifies the destination vcpus mask meant to receive the
1412 * IOAPIC interrupts. It either uses kvm_apic_map_get_dest_lapic() to find
1413 * out the destination vcpus array and set the bitmap or it traverses to
1414 * each available vcpu to identify the same.
1415 */
1416void kvm_bitmap_or_dest_vcpus(struct kvm *kvm, struct kvm_lapic_irq *irq,
1417 unsigned long *vcpu_bitmap)
1418{
1419 struct kvm_lapic **dest_vcpu = NULL;
1420 struct kvm_lapic *src = NULL;
1421 struct kvm_apic_map *map;
1422 struct kvm_vcpu *vcpu;
1423 unsigned long bitmap, i;
1424 int vcpu_idx;
1425 bool ret;
1426
1427 rcu_read_lock();
1428 map = rcu_dereference(kvm->arch.apic_map);
1429
1430 ret = kvm_apic_map_get_dest_lapic(kvm, &src, irq, map, &dest_vcpu,
1431 &bitmap);
1432 if (ret) {
1433 for_each_set_bit(i, &bitmap, 16) {
1434 if (!dest_vcpu[i])
1435 continue;
1436 vcpu_idx = dest_vcpu[i]->vcpu->vcpu_idx;
1437 __set_bit(vcpu_idx, vcpu_bitmap);
1438 }
1439 } else {
1440 kvm_for_each_vcpu(i, vcpu, kvm) {
1441 if (!kvm_apic_present(vcpu))
1442 continue;
1443 if (!kvm_apic_match_dest(vcpu, NULL,
1444 irq->shorthand,
1445 irq->dest_id,
1446 irq->dest_mode))
1447 continue;
1448 __set_bit(i, vcpu_bitmap);
1449 }
1450 }
1451 rcu_read_unlock();
1452}
1453
1454int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2)
1455{
1456 return vcpu1->arch.apic_arb_prio - vcpu2->arch.apic_arb_prio;
1457}
1458
1459static bool kvm_ioapic_handles_vector(struct kvm_lapic *apic, int vector)
1460{
1461 return test_bit(vector, apic->vcpu->arch.ioapic_handled_vectors);
1462}
1463
1464static void kvm_ioapic_send_eoi(struct kvm_lapic *apic, int vector)
1465{
1466 int trigger_mode;
1467
1468 /* Eoi the ioapic only if the ioapic doesn't own the vector. */
1469 if (!kvm_ioapic_handles_vector(apic, vector))
1470 return;
1471
1472 /* Request a KVM exit to inform the userspace IOAPIC. */
1473 if (irqchip_split(apic->vcpu->kvm)) {
1474 apic->vcpu->arch.pending_ioapic_eoi = vector;
1475 kvm_make_request(KVM_REQ_IOAPIC_EOI_EXIT, apic->vcpu);
1476 return;
1477 }
1478
1479 if (apic_test_vector(vector, apic->regs + APIC_TMR))
1480 trigger_mode = IOAPIC_LEVEL_TRIG;
1481 else
1482 trigger_mode = IOAPIC_EDGE_TRIG;
1483
1484 kvm_ioapic_update_eoi(apic->vcpu, vector, trigger_mode);
1485}
1486
1487static int apic_set_eoi(struct kvm_lapic *apic)
1488{
1489 int vector = apic_find_highest_isr(apic);
1490
1491 trace_kvm_eoi(apic, vector);
1492
1493 /*
1494 * Not every write EOI will has corresponding ISR,
1495 * one example is when Kernel check timer on setup_IO_APIC
1496 */
1497 if (vector == -1)
1498 return vector;
1499
1500 apic_clear_isr(vector, apic);
1501 apic_update_ppr(apic);
1502
1503 if (kvm_hv_synic_has_vector(apic->vcpu, vector))
1504 kvm_hv_synic_send_eoi(apic->vcpu, vector);
1505
1506 kvm_ioapic_send_eoi(apic, vector);
1507 kvm_make_request(KVM_REQ_EVENT, apic->vcpu);
1508 return vector;
1509}
1510
1511/*
1512 * this interface assumes a trap-like exit, which has already finished
1513 * desired side effect including vISR and vPPR update.
1514 */
1515void kvm_apic_set_eoi_accelerated(struct kvm_vcpu *vcpu, int vector)
1516{
1517 struct kvm_lapic *apic = vcpu->arch.apic;
1518
1519 trace_kvm_eoi(apic, vector);
1520
1521 kvm_ioapic_send_eoi(apic, vector);
1522 kvm_make_request(KVM_REQ_EVENT, apic->vcpu);
1523}
1524EXPORT_SYMBOL_GPL(kvm_apic_set_eoi_accelerated);
1525
1526void kvm_apic_send_ipi(struct kvm_lapic *apic, u32 icr_low, u32 icr_high)
1527{
1528 struct kvm_lapic_irq irq;
1529
1530 /* KVM has no delay and should always clear the BUSY/PENDING flag. */
1531 WARN_ON_ONCE(icr_low & APIC_ICR_BUSY);
1532
1533 irq.vector = icr_low & APIC_VECTOR_MASK;
1534 irq.delivery_mode = icr_low & APIC_MODE_MASK;
1535 irq.dest_mode = icr_low & APIC_DEST_MASK;
1536 irq.level = (icr_low & APIC_INT_ASSERT) != 0;
1537 irq.trig_mode = icr_low & APIC_INT_LEVELTRIG;
1538 irq.shorthand = icr_low & APIC_SHORT_MASK;
1539 irq.msi_redir_hint = false;
1540 if (apic_x2apic_mode(apic))
1541 irq.dest_id = icr_high;
1542 else
1543 irq.dest_id = GET_XAPIC_DEST_FIELD(icr_high);
1544
1545 trace_kvm_apic_ipi(icr_low, irq.dest_id);
1546
1547 kvm_irq_delivery_to_apic(apic->vcpu->kvm, apic, &irq, NULL);
1548}
1549EXPORT_SYMBOL_GPL(kvm_apic_send_ipi);
1550
1551static u32 apic_get_tmcct(struct kvm_lapic *apic)
1552{
1553 ktime_t remaining, now;
1554 s64 ns;
1555
1556 ASSERT(apic != NULL);
1557
1558 /* if initial count is 0, current count should also be 0 */
1559 if (kvm_lapic_get_reg(apic, APIC_TMICT) == 0 ||
1560 apic->lapic_timer.period == 0)
1561 return 0;
1562
1563 now = ktime_get();
1564 remaining = ktime_sub(apic->lapic_timer.target_expiration, now);
1565 if (ktime_to_ns(remaining) < 0)
1566 remaining = 0;
1567
1568 ns = mod_64(ktime_to_ns(remaining), apic->lapic_timer.period);
1569 return div64_u64(ns, (apic->vcpu->kvm->arch.apic_bus_cycle_ns *
1570 apic->divide_count));
1571}
1572
1573static void __report_tpr_access(struct kvm_lapic *apic, bool write)
1574{
1575 struct kvm_vcpu *vcpu = apic->vcpu;
1576 struct kvm_run *run = vcpu->run;
1577
1578 kvm_make_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu);
1579 run->tpr_access.rip = kvm_rip_read(vcpu);
1580 run->tpr_access.is_write = write;
1581}
1582
1583static inline void report_tpr_access(struct kvm_lapic *apic, bool write)
1584{
1585 if (apic->vcpu->arch.tpr_access_reporting)
1586 __report_tpr_access(apic, write);
1587}
1588
1589static u32 __apic_read(struct kvm_lapic *apic, unsigned int offset)
1590{
1591 u32 val = 0;
1592
1593 if (offset >= LAPIC_MMIO_LENGTH)
1594 return 0;
1595
1596 switch (offset) {
1597 case APIC_ARBPRI:
1598 break;
1599
1600 case APIC_TMCCT: /* Timer CCR */
1601 if (apic_lvtt_tscdeadline(apic))
1602 return 0;
1603
1604 val = apic_get_tmcct(apic);
1605 break;
1606 case APIC_PROCPRI:
1607 apic_update_ppr(apic);
1608 val = kvm_lapic_get_reg(apic, offset);
1609 break;
1610 case APIC_TASKPRI:
1611 report_tpr_access(apic, false);
1612 fallthrough;
1613 default:
1614 val = kvm_lapic_get_reg(apic, offset);
1615 break;
1616 }
1617
1618 return val;
1619}
1620
1621static inline struct kvm_lapic *to_lapic(struct kvm_io_device *dev)
1622{
1623 return container_of(dev, struct kvm_lapic, dev);
1624}
1625
1626#define APIC_REG_MASK(reg) (1ull << ((reg) >> 4))
1627#define APIC_REGS_MASK(first, count) \
1628 (APIC_REG_MASK(first) * ((1ull << (count)) - 1))
1629
1630u64 kvm_lapic_readable_reg_mask(struct kvm_lapic *apic)
1631{
1632 /* Leave bits '0' for reserved and write-only registers. */
1633 u64 valid_reg_mask =
1634 APIC_REG_MASK(APIC_ID) |
1635 APIC_REG_MASK(APIC_LVR) |
1636 APIC_REG_MASK(APIC_TASKPRI) |
1637 APIC_REG_MASK(APIC_PROCPRI) |
1638 APIC_REG_MASK(APIC_LDR) |
1639 APIC_REG_MASK(APIC_SPIV) |
1640 APIC_REGS_MASK(APIC_ISR, APIC_ISR_NR) |
1641 APIC_REGS_MASK(APIC_TMR, APIC_ISR_NR) |
1642 APIC_REGS_MASK(APIC_IRR, APIC_ISR_NR) |
1643 APIC_REG_MASK(APIC_ESR) |
1644 APIC_REG_MASK(APIC_ICR) |
1645 APIC_REG_MASK(APIC_LVTT) |
1646 APIC_REG_MASK(APIC_LVTTHMR) |
1647 APIC_REG_MASK(APIC_LVTPC) |
1648 APIC_REG_MASK(APIC_LVT0) |
1649 APIC_REG_MASK(APIC_LVT1) |
1650 APIC_REG_MASK(APIC_LVTERR) |
1651 APIC_REG_MASK(APIC_TMICT) |
1652 APIC_REG_MASK(APIC_TMCCT) |
1653 APIC_REG_MASK(APIC_TDCR);
1654
1655 if (kvm_lapic_lvt_supported(apic, LVT_CMCI))
1656 valid_reg_mask |= APIC_REG_MASK(APIC_LVTCMCI);
1657
1658 /* ARBPRI, DFR, and ICR2 are not valid in x2APIC mode. */
1659 if (!apic_x2apic_mode(apic))
1660 valid_reg_mask |= APIC_REG_MASK(APIC_ARBPRI) |
1661 APIC_REG_MASK(APIC_DFR) |
1662 APIC_REG_MASK(APIC_ICR2);
1663
1664 return valid_reg_mask;
1665}
1666EXPORT_SYMBOL_GPL(kvm_lapic_readable_reg_mask);
1667
1668static int kvm_lapic_reg_read(struct kvm_lapic *apic, u32 offset, int len,
1669 void *data)
1670{
1671 unsigned char alignment = offset & 0xf;
1672 u32 result;
1673
1674 /*
1675 * WARN if KVM reads ICR in x2APIC mode, as it's an 8-byte register in
1676 * x2APIC and needs to be manually handled by the caller.
1677 */
1678 WARN_ON_ONCE(apic_x2apic_mode(apic) && offset == APIC_ICR);
1679
1680 if (alignment + len > 4)
1681 return 1;
1682
1683 if (offset > 0x3f0 ||
1684 !(kvm_lapic_readable_reg_mask(apic) & APIC_REG_MASK(offset)))
1685 return 1;
1686
1687 result = __apic_read(apic, offset & ~0xf);
1688
1689 trace_kvm_apic_read(offset, result);
1690
1691 switch (len) {
1692 case 1:
1693 case 2:
1694 case 4:
1695 memcpy(data, (char *)&result + alignment, len);
1696 break;
1697 default:
1698 printk(KERN_ERR "Local APIC read with len = %x, "
1699 "should be 1,2, or 4 instead\n", len);
1700 break;
1701 }
1702 return 0;
1703}
1704
1705static int apic_mmio_in_range(struct kvm_lapic *apic, gpa_t addr)
1706{
1707 return addr >= apic->base_address &&
1708 addr < apic->base_address + LAPIC_MMIO_LENGTH;
1709}
1710
1711static int apic_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *this,
1712 gpa_t address, int len, void *data)
1713{
1714 struct kvm_lapic *apic = to_lapic(this);
1715 u32 offset = address - apic->base_address;
1716
1717 if (!apic_mmio_in_range(apic, address))
1718 return -EOPNOTSUPP;
1719
1720 if (!kvm_apic_hw_enabled(apic) || apic_x2apic_mode(apic)) {
1721 if (!kvm_check_has_quirk(vcpu->kvm,
1722 KVM_X86_QUIRK_LAPIC_MMIO_HOLE))
1723 return -EOPNOTSUPP;
1724
1725 memset(data, 0xff, len);
1726 return 0;
1727 }
1728
1729 kvm_lapic_reg_read(apic, offset, len, data);
1730
1731 return 0;
1732}
1733
1734static void update_divide_count(struct kvm_lapic *apic)
1735{
1736 u32 tmp1, tmp2, tdcr;
1737
1738 tdcr = kvm_lapic_get_reg(apic, APIC_TDCR);
1739 tmp1 = tdcr & 0xf;
1740 tmp2 = ((tmp1 & 0x3) | ((tmp1 & 0x8) >> 1)) + 1;
1741 apic->divide_count = 0x1 << (tmp2 & 0x7);
1742}
1743
1744static void limit_periodic_timer_frequency(struct kvm_lapic *apic)
1745{
1746 /*
1747 * Do not allow the guest to program periodic timers with small
1748 * interval, since the hrtimers are not throttled by the host
1749 * scheduler.
1750 */
1751 if (apic_lvtt_period(apic) && apic->lapic_timer.period) {
1752 s64 min_period = min_timer_period_us * 1000LL;
1753
1754 if (apic->lapic_timer.period < min_period) {
1755 pr_info_once(
1756 "vcpu %i: requested %lld ns "
1757 "lapic timer period limited to %lld ns\n",
1758 apic->vcpu->vcpu_id,
1759 apic->lapic_timer.period, min_period);
1760 apic->lapic_timer.period = min_period;
1761 }
1762 }
1763}
1764
1765static void cancel_hv_timer(struct kvm_lapic *apic);
1766
1767static void cancel_apic_timer(struct kvm_lapic *apic)
1768{
1769 hrtimer_cancel(&apic->lapic_timer.timer);
1770 preempt_disable();
1771 if (apic->lapic_timer.hv_timer_in_use)
1772 cancel_hv_timer(apic);
1773 preempt_enable();
1774 atomic_set(&apic->lapic_timer.pending, 0);
1775}
1776
1777static void apic_update_lvtt(struct kvm_lapic *apic)
1778{
1779 u32 timer_mode = kvm_lapic_get_reg(apic, APIC_LVTT) &
1780 apic->lapic_timer.timer_mode_mask;
1781
1782 if (apic->lapic_timer.timer_mode != timer_mode) {
1783 if (apic_lvtt_tscdeadline(apic) != (timer_mode ==
1784 APIC_LVT_TIMER_TSCDEADLINE)) {
1785 cancel_apic_timer(apic);
1786 kvm_lapic_set_reg(apic, APIC_TMICT, 0);
1787 apic->lapic_timer.period = 0;
1788 apic->lapic_timer.tscdeadline = 0;
1789 }
1790 apic->lapic_timer.timer_mode = timer_mode;
1791 limit_periodic_timer_frequency(apic);
1792 }
1793}
1794
1795/*
1796 * On APICv, this test will cause a busy wait
1797 * during a higher-priority task.
1798 */
1799
1800static bool lapic_timer_int_injected(struct kvm_vcpu *vcpu)
1801{
1802 struct kvm_lapic *apic = vcpu->arch.apic;
1803 u32 reg = kvm_lapic_get_reg(apic, APIC_LVTT);
1804
1805 if (kvm_apic_hw_enabled(apic)) {
1806 int vec = reg & APIC_VECTOR_MASK;
1807 void *bitmap = apic->regs + APIC_ISR;
1808
1809 if (apic->apicv_active)
1810 bitmap = apic->regs + APIC_IRR;
1811
1812 if (apic_test_vector(vec, bitmap))
1813 return true;
1814 }
1815 return false;
1816}
1817
1818static inline void __wait_lapic_expire(struct kvm_vcpu *vcpu, u64 guest_cycles)
1819{
1820 u64 timer_advance_ns = vcpu->arch.apic->lapic_timer.timer_advance_ns;
1821
1822 /*
1823 * If the guest TSC is running at a different ratio than the host, then
1824 * convert the delay to nanoseconds to achieve an accurate delay. Note
1825 * that __delay() uses delay_tsc whenever the hardware has TSC, thus
1826 * always for VMX enabled hardware.
1827 */
1828 if (vcpu->arch.tsc_scaling_ratio == kvm_caps.default_tsc_scaling_ratio) {
1829 __delay(min(guest_cycles,
1830 nsec_to_cycles(vcpu, timer_advance_ns)));
1831 } else {
1832 u64 delay_ns = guest_cycles * 1000000ULL;
1833 do_div(delay_ns, vcpu->arch.virtual_tsc_khz);
1834 ndelay(min_t(u32, delay_ns, timer_advance_ns));
1835 }
1836}
1837
1838static inline void adjust_lapic_timer_advance(struct kvm_vcpu *vcpu,
1839 s64 advance_expire_delta)
1840{
1841 struct kvm_lapic *apic = vcpu->arch.apic;
1842 u32 timer_advance_ns = apic->lapic_timer.timer_advance_ns;
1843 u64 ns;
1844
1845 /* Do not adjust for tiny fluctuations or large random spikes. */
1846 if (abs(advance_expire_delta) > LAPIC_TIMER_ADVANCE_ADJUST_MAX ||
1847 abs(advance_expire_delta) < LAPIC_TIMER_ADVANCE_ADJUST_MIN)
1848 return;
1849
1850 /* too early */
1851 if (advance_expire_delta < 0) {
1852 ns = -advance_expire_delta * 1000000ULL;
1853 do_div(ns, vcpu->arch.virtual_tsc_khz);
1854 timer_advance_ns -= ns/LAPIC_TIMER_ADVANCE_ADJUST_STEP;
1855 } else {
1856 /* too late */
1857 ns = advance_expire_delta * 1000000ULL;
1858 do_div(ns, vcpu->arch.virtual_tsc_khz);
1859 timer_advance_ns += ns/LAPIC_TIMER_ADVANCE_ADJUST_STEP;
1860 }
1861
1862 if (unlikely(timer_advance_ns > LAPIC_TIMER_ADVANCE_NS_MAX))
1863 timer_advance_ns = LAPIC_TIMER_ADVANCE_NS_INIT;
1864 apic->lapic_timer.timer_advance_ns = timer_advance_ns;
1865}
1866
1867static void __kvm_wait_lapic_expire(struct kvm_vcpu *vcpu)
1868{
1869 struct kvm_lapic *apic = vcpu->arch.apic;
1870 u64 guest_tsc, tsc_deadline;
1871
1872 tsc_deadline = apic->lapic_timer.expired_tscdeadline;
1873 apic->lapic_timer.expired_tscdeadline = 0;
1874 guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc());
1875 trace_kvm_wait_lapic_expire(vcpu->vcpu_id, guest_tsc - tsc_deadline);
1876
1877 adjust_lapic_timer_advance(vcpu, guest_tsc - tsc_deadline);
1878
1879 /*
1880 * If the timer fired early, reread the TSC to account for the overhead
1881 * of the above adjustment to avoid waiting longer than is necessary.
1882 */
1883 if (guest_tsc < tsc_deadline)
1884 guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc());
1885
1886 if (guest_tsc < tsc_deadline)
1887 __wait_lapic_expire(vcpu, tsc_deadline - guest_tsc);
1888}
1889
1890void kvm_wait_lapic_expire(struct kvm_vcpu *vcpu)
1891{
1892 if (lapic_in_kernel(vcpu) &&
1893 vcpu->arch.apic->lapic_timer.expired_tscdeadline &&
1894 vcpu->arch.apic->lapic_timer.timer_advance_ns &&
1895 lapic_timer_int_injected(vcpu))
1896 __kvm_wait_lapic_expire(vcpu);
1897}
1898EXPORT_SYMBOL_GPL(kvm_wait_lapic_expire);
1899
1900static void kvm_apic_inject_pending_timer_irqs(struct kvm_lapic *apic)
1901{
1902 struct kvm_timer *ktimer = &apic->lapic_timer;
1903
1904 kvm_apic_local_deliver(apic, APIC_LVTT);
1905 if (apic_lvtt_tscdeadline(apic)) {
1906 ktimer->tscdeadline = 0;
1907 } else if (apic_lvtt_oneshot(apic)) {
1908 ktimer->tscdeadline = 0;
1909 ktimer->target_expiration = 0;
1910 }
1911}
1912
1913static void apic_timer_expired(struct kvm_lapic *apic, bool from_timer_fn)
1914{
1915 struct kvm_vcpu *vcpu = apic->vcpu;
1916 struct kvm_timer *ktimer = &apic->lapic_timer;
1917
1918 if (atomic_read(&apic->lapic_timer.pending))
1919 return;
1920
1921 if (apic_lvtt_tscdeadline(apic) || ktimer->hv_timer_in_use)
1922 ktimer->expired_tscdeadline = ktimer->tscdeadline;
1923
1924 if (!from_timer_fn && apic->apicv_active) {
1925 WARN_ON(kvm_get_running_vcpu() != vcpu);
1926 kvm_apic_inject_pending_timer_irqs(apic);
1927 return;
1928 }
1929
1930 if (kvm_use_posted_timer_interrupt(apic->vcpu)) {
1931 /*
1932 * Ensure the guest's timer has truly expired before posting an
1933 * interrupt. Open code the relevant checks to avoid querying
1934 * lapic_timer_int_injected(), which will be false since the
1935 * interrupt isn't yet injected. Waiting until after injecting
1936 * is not an option since that won't help a posted interrupt.
1937 */
1938 if (vcpu->arch.apic->lapic_timer.expired_tscdeadline &&
1939 vcpu->arch.apic->lapic_timer.timer_advance_ns)
1940 __kvm_wait_lapic_expire(vcpu);
1941 kvm_apic_inject_pending_timer_irqs(apic);
1942 return;
1943 }
1944
1945 atomic_inc(&apic->lapic_timer.pending);
1946 kvm_make_request(KVM_REQ_UNBLOCK, vcpu);
1947 if (from_timer_fn)
1948 kvm_vcpu_kick(vcpu);
1949}
1950
1951static void start_sw_tscdeadline(struct kvm_lapic *apic)
1952{
1953 struct kvm_timer *ktimer = &apic->lapic_timer;
1954 u64 guest_tsc, tscdeadline = ktimer->tscdeadline;
1955 u64 ns = 0;
1956 ktime_t expire;
1957 struct kvm_vcpu *vcpu = apic->vcpu;
1958 u32 this_tsc_khz = vcpu->arch.virtual_tsc_khz;
1959 unsigned long flags;
1960 ktime_t now;
1961
1962 if (unlikely(!tscdeadline || !this_tsc_khz))
1963 return;
1964
1965 local_irq_save(flags);
1966
1967 now = ktime_get();
1968 guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc());
1969
1970 ns = (tscdeadline - guest_tsc) * 1000000ULL;
1971 do_div(ns, this_tsc_khz);
1972
1973 if (likely(tscdeadline > guest_tsc) &&
1974 likely(ns > apic->lapic_timer.timer_advance_ns)) {
1975 expire = ktime_add_ns(now, ns);
1976 expire = ktime_sub_ns(expire, ktimer->timer_advance_ns);
1977 hrtimer_start(&ktimer->timer, expire, HRTIMER_MODE_ABS_HARD);
1978 } else
1979 apic_timer_expired(apic, false);
1980
1981 local_irq_restore(flags);
1982}
1983
1984static inline u64 tmict_to_ns(struct kvm_lapic *apic, u32 tmict)
1985{
1986 return (u64)tmict * apic->vcpu->kvm->arch.apic_bus_cycle_ns *
1987 (u64)apic->divide_count;
1988}
1989
1990static void update_target_expiration(struct kvm_lapic *apic, uint32_t old_divisor)
1991{
1992 ktime_t now, remaining;
1993 u64 ns_remaining_old, ns_remaining_new;
1994
1995 apic->lapic_timer.period =
1996 tmict_to_ns(apic, kvm_lapic_get_reg(apic, APIC_TMICT));
1997 limit_periodic_timer_frequency(apic);
1998
1999 now = ktime_get();
2000 remaining = ktime_sub(apic->lapic_timer.target_expiration, now);
2001 if (ktime_to_ns(remaining) < 0)
2002 remaining = 0;
2003
2004 ns_remaining_old = ktime_to_ns(remaining);
2005 ns_remaining_new = mul_u64_u32_div(ns_remaining_old,
2006 apic->divide_count, old_divisor);
2007
2008 apic->lapic_timer.tscdeadline +=
2009 nsec_to_cycles(apic->vcpu, ns_remaining_new) -
2010 nsec_to_cycles(apic->vcpu, ns_remaining_old);
2011 apic->lapic_timer.target_expiration = ktime_add_ns(now, ns_remaining_new);
2012}
2013
2014static bool set_target_expiration(struct kvm_lapic *apic, u32 count_reg)
2015{
2016 ktime_t now;
2017 u64 tscl = rdtsc();
2018 s64 deadline;
2019
2020 now = ktime_get();
2021 apic->lapic_timer.period =
2022 tmict_to_ns(apic, kvm_lapic_get_reg(apic, APIC_TMICT));
2023
2024 if (!apic->lapic_timer.period) {
2025 apic->lapic_timer.tscdeadline = 0;
2026 return false;
2027 }
2028
2029 limit_periodic_timer_frequency(apic);
2030 deadline = apic->lapic_timer.period;
2031
2032 if (apic_lvtt_period(apic) || apic_lvtt_oneshot(apic)) {
2033 if (unlikely(count_reg != APIC_TMICT)) {
2034 deadline = tmict_to_ns(apic,
2035 kvm_lapic_get_reg(apic, count_reg));
2036 if (unlikely(deadline <= 0)) {
2037 if (apic_lvtt_period(apic))
2038 deadline = apic->lapic_timer.period;
2039 else
2040 deadline = 0;
2041 }
2042 else if (unlikely(deadline > apic->lapic_timer.period)) {
2043 pr_info_ratelimited(
2044 "vcpu %i: requested lapic timer restore with "
2045 "starting count register %#x=%u (%lld ns) > initial count (%lld ns). "
2046 "Using initial count to start timer.\n",
2047 apic->vcpu->vcpu_id,
2048 count_reg,
2049 kvm_lapic_get_reg(apic, count_reg),
2050 deadline, apic->lapic_timer.period);
2051 kvm_lapic_set_reg(apic, count_reg, 0);
2052 deadline = apic->lapic_timer.period;
2053 }
2054 }
2055 }
2056
2057 apic->lapic_timer.tscdeadline = kvm_read_l1_tsc(apic->vcpu, tscl) +
2058 nsec_to_cycles(apic->vcpu, deadline);
2059 apic->lapic_timer.target_expiration = ktime_add_ns(now, deadline);
2060
2061 return true;
2062}
2063
2064static void advance_periodic_target_expiration(struct kvm_lapic *apic)
2065{
2066 ktime_t now = ktime_get();
2067 u64 tscl = rdtsc();
2068 ktime_t delta;
2069
2070 /*
2071 * Synchronize both deadlines to the same time source or
2072 * differences in the periods (caused by differences in the
2073 * underlying clocks or numerical approximation errors) will
2074 * cause the two to drift apart over time as the errors
2075 * accumulate.
2076 */
2077 apic->lapic_timer.target_expiration =
2078 ktime_add_ns(apic->lapic_timer.target_expiration,
2079 apic->lapic_timer.period);
2080 delta = ktime_sub(apic->lapic_timer.target_expiration, now);
2081 apic->lapic_timer.tscdeadline = kvm_read_l1_tsc(apic->vcpu, tscl) +
2082 nsec_to_cycles(apic->vcpu, delta);
2083}
2084
2085static void start_sw_period(struct kvm_lapic *apic)
2086{
2087 if (!apic->lapic_timer.period)
2088 return;
2089
2090 if (ktime_after(ktime_get(),
2091 apic->lapic_timer.target_expiration)) {
2092 apic_timer_expired(apic, false);
2093
2094 if (apic_lvtt_oneshot(apic))
2095 return;
2096
2097 advance_periodic_target_expiration(apic);
2098 }
2099
2100 hrtimer_start(&apic->lapic_timer.timer,
2101 apic->lapic_timer.target_expiration,
2102 HRTIMER_MODE_ABS_HARD);
2103}
2104
2105bool kvm_lapic_hv_timer_in_use(struct kvm_vcpu *vcpu)
2106{
2107 if (!lapic_in_kernel(vcpu))
2108 return false;
2109
2110 return vcpu->arch.apic->lapic_timer.hv_timer_in_use;
2111}
2112
2113static void cancel_hv_timer(struct kvm_lapic *apic)
2114{
2115 WARN_ON(preemptible());
2116 WARN_ON(!apic->lapic_timer.hv_timer_in_use);
2117 kvm_x86_call(cancel_hv_timer)(apic->vcpu);
2118 apic->lapic_timer.hv_timer_in_use = false;
2119}
2120
2121static bool start_hv_timer(struct kvm_lapic *apic)
2122{
2123 struct kvm_timer *ktimer = &apic->lapic_timer;
2124 struct kvm_vcpu *vcpu = apic->vcpu;
2125 bool expired;
2126
2127 WARN_ON(preemptible());
2128 if (!kvm_can_use_hv_timer(vcpu))
2129 return false;
2130
2131 if (!ktimer->tscdeadline)
2132 return false;
2133
2134 if (kvm_x86_call(set_hv_timer)(vcpu, ktimer->tscdeadline, &expired))
2135 return false;
2136
2137 ktimer->hv_timer_in_use = true;
2138 hrtimer_cancel(&ktimer->timer);
2139
2140 /*
2141 * To simplify handling the periodic timer, leave the hv timer running
2142 * even if the deadline timer has expired, i.e. rely on the resulting
2143 * VM-Exit to recompute the periodic timer's target expiration.
2144 */
2145 if (!apic_lvtt_period(apic)) {
2146 /*
2147 * Cancel the hv timer if the sw timer fired while the hv timer
2148 * was being programmed, or if the hv timer itself expired.
2149 */
2150 if (atomic_read(&ktimer->pending)) {
2151 cancel_hv_timer(apic);
2152 } else if (expired) {
2153 apic_timer_expired(apic, false);
2154 cancel_hv_timer(apic);
2155 }
2156 }
2157
2158 trace_kvm_hv_timer_state(vcpu->vcpu_id, ktimer->hv_timer_in_use);
2159
2160 return true;
2161}
2162
2163static void start_sw_timer(struct kvm_lapic *apic)
2164{
2165 struct kvm_timer *ktimer = &apic->lapic_timer;
2166
2167 WARN_ON(preemptible());
2168 if (apic->lapic_timer.hv_timer_in_use)
2169 cancel_hv_timer(apic);
2170 if (!apic_lvtt_period(apic) && atomic_read(&ktimer->pending))
2171 return;
2172
2173 if (apic_lvtt_period(apic) || apic_lvtt_oneshot(apic))
2174 start_sw_period(apic);
2175 else if (apic_lvtt_tscdeadline(apic))
2176 start_sw_tscdeadline(apic);
2177 trace_kvm_hv_timer_state(apic->vcpu->vcpu_id, false);
2178}
2179
2180static void restart_apic_timer(struct kvm_lapic *apic)
2181{
2182 preempt_disable();
2183
2184 if (!apic_lvtt_period(apic) && atomic_read(&apic->lapic_timer.pending))
2185 goto out;
2186
2187 if (!start_hv_timer(apic))
2188 start_sw_timer(apic);
2189out:
2190 preempt_enable();
2191}
2192
2193void kvm_lapic_expired_hv_timer(struct kvm_vcpu *vcpu)
2194{
2195 struct kvm_lapic *apic = vcpu->arch.apic;
2196
2197 preempt_disable();
2198 /* If the preempt notifier has already run, it also called apic_timer_expired */
2199 if (!apic->lapic_timer.hv_timer_in_use)
2200 goto out;
2201 WARN_ON(kvm_vcpu_is_blocking(vcpu));
2202 apic_timer_expired(apic, false);
2203 cancel_hv_timer(apic);
2204
2205 if (apic_lvtt_period(apic) && apic->lapic_timer.period) {
2206 advance_periodic_target_expiration(apic);
2207 restart_apic_timer(apic);
2208 }
2209out:
2210 preempt_enable();
2211}
2212EXPORT_SYMBOL_GPL(kvm_lapic_expired_hv_timer);
2213
2214void kvm_lapic_switch_to_hv_timer(struct kvm_vcpu *vcpu)
2215{
2216 restart_apic_timer(vcpu->arch.apic);
2217}
2218
2219void kvm_lapic_switch_to_sw_timer(struct kvm_vcpu *vcpu)
2220{
2221 struct kvm_lapic *apic = vcpu->arch.apic;
2222
2223 preempt_disable();
2224 /* Possibly the TSC deadline timer is not enabled yet */
2225 if (apic->lapic_timer.hv_timer_in_use)
2226 start_sw_timer(apic);
2227 preempt_enable();
2228}
2229
2230void kvm_lapic_restart_hv_timer(struct kvm_vcpu *vcpu)
2231{
2232 struct kvm_lapic *apic = vcpu->arch.apic;
2233
2234 WARN_ON(!apic->lapic_timer.hv_timer_in_use);
2235 restart_apic_timer(apic);
2236}
2237
2238static void __start_apic_timer(struct kvm_lapic *apic, u32 count_reg)
2239{
2240 atomic_set(&apic->lapic_timer.pending, 0);
2241
2242 if ((apic_lvtt_period(apic) || apic_lvtt_oneshot(apic))
2243 && !set_target_expiration(apic, count_reg))
2244 return;
2245
2246 restart_apic_timer(apic);
2247}
2248
2249static void start_apic_timer(struct kvm_lapic *apic)
2250{
2251 __start_apic_timer(apic, APIC_TMICT);
2252}
2253
2254static void apic_manage_nmi_watchdog(struct kvm_lapic *apic, u32 lvt0_val)
2255{
2256 bool lvt0_in_nmi_mode = apic_lvt_nmi_mode(lvt0_val);
2257
2258 if (apic->lvt0_in_nmi_mode != lvt0_in_nmi_mode) {
2259 apic->lvt0_in_nmi_mode = lvt0_in_nmi_mode;
2260 if (lvt0_in_nmi_mode) {
2261 atomic_inc(&apic->vcpu->kvm->arch.vapics_in_nmi_mode);
2262 } else
2263 atomic_dec(&apic->vcpu->kvm->arch.vapics_in_nmi_mode);
2264 }
2265}
2266
2267static int get_lvt_index(u32 reg)
2268{
2269 if (reg == APIC_LVTCMCI)
2270 return LVT_CMCI;
2271 if (reg < APIC_LVTT || reg > APIC_LVTERR)
2272 return -1;
2273 return array_index_nospec(
2274 (reg - APIC_LVTT) >> 4, KVM_APIC_MAX_NR_LVT_ENTRIES);
2275}
2276
2277static int kvm_lapic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val)
2278{
2279 int ret = 0;
2280
2281 trace_kvm_apic_write(reg, val);
2282
2283 switch (reg) {
2284 case APIC_ID: /* Local APIC ID */
2285 if (!apic_x2apic_mode(apic)) {
2286 kvm_apic_set_xapic_id(apic, val >> 24);
2287 } else {
2288 ret = 1;
2289 }
2290 break;
2291
2292 case APIC_TASKPRI:
2293 report_tpr_access(apic, true);
2294 apic_set_tpr(apic, val & 0xff);
2295 break;
2296
2297 case APIC_EOI:
2298 apic_set_eoi(apic);
2299 break;
2300
2301 case APIC_LDR:
2302 if (!apic_x2apic_mode(apic))
2303 kvm_apic_set_ldr(apic, val & APIC_LDR_MASK);
2304 else
2305 ret = 1;
2306 break;
2307
2308 case APIC_DFR:
2309 if (!apic_x2apic_mode(apic))
2310 kvm_apic_set_dfr(apic, val | 0x0FFFFFFF);
2311 else
2312 ret = 1;
2313 break;
2314
2315 case APIC_SPIV: {
2316 u32 mask = 0x3ff;
2317 if (kvm_lapic_get_reg(apic, APIC_LVR) & APIC_LVR_DIRECTED_EOI)
2318 mask |= APIC_SPIV_DIRECTED_EOI;
2319 apic_set_spiv(apic, val & mask);
2320 if (!(val & APIC_SPIV_APIC_ENABLED)) {
2321 int i;
2322
2323 for (i = 0; i < apic->nr_lvt_entries; i++) {
2324 kvm_lapic_set_reg(apic, APIC_LVTx(i),
2325 kvm_lapic_get_reg(apic, APIC_LVTx(i)) | APIC_LVT_MASKED);
2326 }
2327 apic_update_lvtt(apic);
2328 atomic_set(&apic->lapic_timer.pending, 0);
2329
2330 }
2331 break;
2332 }
2333 case APIC_ICR:
2334 WARN_ON_ONCE(apic_x2apic_mode(apic));
2335
2336 /* No delay here, so we always clear the pending bit */
2337 val &= ~APIC_ICR_BUSY;
2338 kvm_apic_send_ipi(apic, val, kvm_lapic_get_reg(apic, APIC_ICR2));
2339 kvm_lapic_set_reg(apic, APIC_ICR, val);
2340 break;
2341 case APIC_ICR2:
2342 if (apic_x2apic_mode(apic))
2343 ret = 1;
2344 else
2345 kvm_lapic_set_reg(apic, APIC_ICR2, val & 0xff000000);
2346 break;
2347
2348 case APIC_LVT0:
2349 apic_manage_nmi_watchdog(apic, val);
2350 fallthrough;
2351 case APIC_LVTTHMR:
2352 case APIC_LVTPC:
2353 case APIC_LVT1:
2354 case APIC_LVTERR:
2355 case APIC_LVTCMCI: {
2356 u32 index = get_lvt_index(reg);
2357 if (!kvm_lapic_lvt_supported(apic, index)) {
2358 ret = 1;
2359 break;
2360 }
2361 if (!kvm_apic_sw_enabled(apic))
2362 val |= APIC_LVT_MASKED;
2363 val &= apic_lvt_mask[index];
2364 kvm_lapic_set_reg(apic, reg, val);
2365 break;
2366 }
2367
2368 case APIC_LVTT:
2369 if (!kvm_apic_sw_enabled(apic))
2370 val |= APIC_LVT_MASKED;
2371 val &= (apic_lvt_mask[0] | apic->lapic_timer.timer_mode_mask);
2372 kvm_lapic_set_reg(apic, APIC_LVTT, val);
2373 apic_update_lvtt(apic);
2374 break;
2375
2376 case APIC_TMICT:
2377 if (apic_lvtt_tscdeadline(apic))
2378 break;
2379
2380 cancel_apic_timer(apic);
2381 kvm_lapic_set_reg(apic, APIC_TMICT, val);
2382 start_apic_timer(apic);
2383 break;
2384
2385 case APIC_TDCR: {
2386 uint32_t old_divisor = apic->divide_count;
2387
2388 kvm_lapic_set_reg(apic, APIC_TDCR, val & 0xb);
2389 update_divide_count(apic);
2390 if (apic->divide_count != old_divisor &&
2391 apic->lapic_timer.period) {
2392 hrtimer_cancel(&apic->lapic_timer.timer);
2393 update_target_expiration(apic, old_divisor);
2394 restart_apic_timer(apic);
2395 }
2396 break;
2397 }
2398 case APIC_ESR:
2399 if (apic_x2apic_mode(apic) && val != 0)
2400 ret = 1;
2401 break;
2402
2403 case APIC_SELF_IPI:
2404 /*
2405 * Self-IPI exists only when x2APIC is enabled. Bits 7:0 hold
2406 * the vector, everything else is reserved.
2407 */
2408 if (!apic_x2apic_mode(apic) || (val & ~APIC_VECTOR_MASK))
2409 ret = 1;
2410 else
2411 kvm_apic_send_ipi(apic, APIC_DEST_SELF | val, 0);
2412 break;
2413 default:
2414 ret = 1;
2415 break;
2416 }
2417
2418 /*
2419 * Recalculate APIC maps if necessary, e.g. if the software enable bit
2420 * was toggled, the APIC ID changed, etc... The maps are marked dirty
2421 * on relevant changes, i.e. this is a nop for most writes.
2422 */
2423 kvm_recalculate_apic_map(apic->vcpu->kvm);
2424
2425 return ret;
2426}
2427
2428static int apic_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *this,
2429 gpa_t address, int len, const void *data)
2430{
2431 struct kvm_lapic *apic = to_lapic(this);
2432 unsigned int offset = address - apic->base_address;
2433 u32 val;
2434
2435 if (!apic_mmio_in_range(apic, address))
2436 return -EOPNOTSUPP;
2437
2438 if (!kvm_apic_hw_enabled(apic) || apic_x2apic_mode(apic)) {
2439 if (!kvm_check_has_quirk(vcpu->kvm,
2440 KVM_X86_QUIRK_LAPIC_MMIO_HOLE))
2441 return -EOPNOTSUPP;
2442
2443 return 0;
2444 }
2445
2446 /*
2447 * APIC register must be aligned on 128-bits boundary.
2448 * 32/64/128 bits registers must be accessed thru 32 bits.
2449 * Refer SDM 8.4.1
2450 */
2451 if (len != 4 || (offset & 0xf))
2452 return 0;
2453
2454 val = *(u32*)data;
2455
2456 kvm_lapic_reg_write(apic, offset & 0xff0, val);
2457
2458 return 0;
2459}
2460
2461void kvm_lapic_set_eoi(struct kvm_vcpu *vcpu)
2462{
2463 kvm_lapic_reg_write(vcpu->arch.apic, APIC_EOI, 0);
2464}
2465EXPORT_SYMBOL_GPL(kvm_lapic_set_eoi);
2466
2467#define X2APIC_ICR_RESERVED_BITS (GENMASK_ULL(31, 20) | GENMASK_ULL(17, 16) | BIT(13))
2468
2469int kvm_x2apic_icr_write(struct kvm_lapic *apic, u64 data)
2470{
2471 if (data & X2APIC_ICR_RESERVED_BITS)
2472 return 1;
2473
2474 /*
2475 * The BUSY bit is reserved on both Intel and AMD in x2APIC mode, but
2476 * only AMD requires it to be zero, Intel essentially just ignores the
2477 * bit. And if IPI virtualization (Intel) or x2AVIC (AMD) is enabled,
2478 * the CPU performs the reserved bits checks, i.e. the underlying CPU
2479 * behavior will "win". Arbitrarily clear the BUSY bit, as there is no
2480 * sane way to provide consistent behavior with respect to hardware.
2481 */
2482 data &= ~APIC_ICR_BUSY;
2483
2484 kvm_apic_send_ipi(apic, (u32)data, (u32)(data >> 32));
2485 if (kvm_x86_ops.x2apic_icr_is_split) {
2486 kvm_lapic_set_reg(apic, APIC_ICR, data);
2487 kvm_lapic_set_reg(apic, APIC_ICR2, data >> 32);
2488 } else {
2489 kvm_lapic_set_reg64(apic, APIC_ICR, data);
2490 }
2491 trace_kvm_apic_write(APIC_ICR, data);
2492 return 0;
2493}
2494
2495static u64 kvm_x2apic_icr_read(struct kvm_lapic *apic)
2496{
2497 if (kvm_x86_ops.x2apic_icr_is_split)
2498 return (u64)kvm_lapic_get_reg(apic, APIC_ICR) |
2499 (u64)kvm_lapic_get_reg(apic, APIC_ICR2) << 32;
2500
2501 return kvm_lapic_get_reg64(apic, APIC_ICR);
2502}
2503
2504/* emulate APIC access in a trap manner */
2505void kvm_apic_write_nodecode(struct kvm_vcpu *vcpu, u32 offset)
2506{
2507 struct kvm_lapic *apic = vcpu->arch.apic;
2508
2509 /*
2510 * ICR is a single 64-bit register when x2APIC is enabled, all others
2511 * registers hold 32-bit values. For legacy xAPIC, ICR writes need to
2512 * go down the common path to get the upper half from ICR2.
2513 *
2514 * Note, using the write helpers may incur an unnecessary write to the
2515 * virtual APIC state, but KVM needs to conditionally modify the value
2516 * in certain cases, e.g. to clear the ICR busy bit. The cost of extra
2517 * conditional branches is likely a wash relative to the cost of the
2518 * maybe-unecessary write, and both are in the noise anyways.
2519 */
2520 if (apic_x2apic_mode(apic) && offset == APIC_ICR)
2521 WARN_ON_ONCE(kvm_x2apic_icr_write(apic, kvm_x2apic_icr_read(apic)));
2522 else
2523 kvm_lapic_reg_write(apic, offset, kvm_lapic_get_reg(apic, offset));
2524}
2525EXPORT_SYMBOL_GPL(kvm_apic_write_nodecode);
2526
2527void kvm_free_lapic(struct kvm_vcpu *vcpu)
2528{
2529 struct kvm_lapic *apic = vcpu->arch.apic;
2530
2531 if (!vcpu->arch.apic) {
2532 static_branch_dec(&kvm_has_noapic_vcpu);
2533 return;
2534 }
2535
2536 hrtimer_cancel(&apic->lapic_timer.timer);
2537
2538 if (!(vcpu->arch.apic_base & MSR_IA32_APICBASE_ENABLE))
2539 static_branch_slow_dec_deferred(&apic_hw_disabled);
2540
2541 if (!apic->sw_enabled)
2542 static_branch_slow_dec_deferred(&apic_sw_disabled);
2543
2544 if (apic->regs)
2545 free_page((unsigned long)apic->regs);
2546
2547 kfree(apic);
2548}
2549
2550/*
2551 *----------------------------------------------------------------------
2552 * LAPIC interface
2553 *----------------------------------------------------------------------
2554 */
2555u64 kvm_get_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu)
2556{
2557 struct kvm_lapic *apic = vcpu->arch.apic;
2558
2559 if (!kvm_apic_present(vcpu) || !apic_lvtt_tscdeadline(apic))
2560 return 0;
2561
2562 return apic->lapic_timer.tscdeadline;
2563}
2564
2565void kvm_set_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu, u64 data)
2566{
2567 struct kvm_lapic *apic = vcpu->arch.apic;
2568
2569 if (!kvm_apic_present(vcpu) || !apic_lvtt_tscdeadline(apic))
2570 return;
2571
2572 hrtimer_cancel(&apic->lapic_timer.timer);
2573 apic->lapic_timer.tscdeadline = data;
2574 start_apic_timer(apic);
2575}
2576
2577void kvm_lapic_set_tpr(struct kvm_vcpu *vcpu, unsigned long cr8)
2578{
2579 apic_set_tpr(vcpu->arch.apic, (cr8 & 0x0f) << 4);
2580}
2581
2582u64 kvm_lapic_get_cr8(struct kvm_vcpu *vcpu)
2583{
2584 u64 tpr;
2585
2586 tpr = (u64) kvm_lapic_get_reg(vcpu->arch.apic, APIC_TASKPRI);
2587
2588 return (tpr & 0xf0) >> 4;
2589}
2590
2591static void __kvm_apic_set_base(struct kvm_vcpu *vcpu, u64 value)
2592{
2593 u64 old_value = vcpu->arch.apic_base;
2594 struct kvm_lapic *apic = vcpu->arch.apic;
2595
2596 vcpu->arch.apic_base = value;
2597
2598 if ((old_value ^ value) & MSR_IA32_APICBASE_ENABLE)
2599 kvm_update_cpuid_runtime(vcpu);
2600
2601 if (!apic)
2602 return;
2603
2604 /* update jump label if enable bit changes */
2605 if ((old_value ^ value) & MSR_IA32_APICBASE_ENABLE) {
2606 if (value & MSR_IA32_APICBASE_ENABLE) {
2607 kvm_apic_set_xapic_id(apic, vcpu->vcpu_id);
2608 static_branch_slow_dec_deferred(&apic_hw_disabled);
2609 /* Check if there are APF page ready requests pending */
2610 kvm_make_request(KVM_REQ_APF_READY, vcpu);
2611 } else {
2612 static_branch_inc(&apic_hw_disabled.key);
2613 atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
2614 }
2615 }
2616
2617 if ((old_value ^ value) & X2APIC_ENABLE) {
2618 if (value & X2APIC_ENABLE)
2619 kvm_apic_set_x2apic_id(apic, vcpu->vcpu_id);
2620 else if (value & MSR_IA32_APICBASE_ENABLE)
2621 kvm_apic_set_xapic_id(apic, vcpu->vcpu_id);
2622 }
2623
2624 if ((old_value ^ value) & (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE)) {
2625 kvm_make_request(KVM_REQ_APICV_UPDATE, vcpu);
2626 kvm_x86_call(set_virtual_apic_mode)(vcpu);
2627 }
2628
2629 apic->base_address = apic->vcpu->arch.apic_base &
2630 MSR_IA32_APICBASE_BASE;
2631
2632 if ((value & MSR_IA32_APICBASE_ENABLE) &&
2633 apic->base_address != APIC_DEFAULT_PHYS_BASE) {
2634 kvm_set_apicv_inhibit(apic->vcpu->kvm,
2635 APICV_INHIBIT_REASON_APIC_BASE_MODIFIED);
2636 }
2637}
2638
2639int kvm_apic_set_base(struct kvm_vcpu *vcpu, u64 value, bool host_initiated)
2640{
2641 enum lapic_mode old_mode = kvm_get_apic_mode(vcpu);
2642 enum lapic_mode new_mode = kvm_apic_mode(value);
2643
2644 if (vcpu->arch.apic_base == value)
2645 return 0;
2646
2647 u64 reserved_bits = kvm_vcpu_reserved_gpa_bits_raw(vcpu) | 0x2ff |
2648 (guest_cpuid_has(vcpu, X86_FEATURE_X2APIC) ? 0 : X2APIC_ENABLE);
2649
2650 if ((value & reserved_bits) != 0 || new_mode == LAPIC_MODE_INVALID)
2651 return 1;
2652 if (!host_initiated) {
2653 if (old_mode == LAPIC_MODE_X2APIC && new_mode == LAPIC_MODE_XAPIC)
2654 return 1;
2655 if (old_mode == LAPIC_MODE_DISABLED && new_mode == LAPIC_MODE_X2APIC)
2656 return 1;
2657 }
2658
2659 __kvm_apic_set_base(vcpu, value);
2660 kvm_recalculate_apic_map(vcpu->kvm);
2661 return 0;
2662}
2663
2664void kvm_apic_update_apicv(struct kvm_vcpu *vcpu)
2665{
2666 struct kvm_lapic *apic = vcpu->arch.apic;
2667
2668 /*
2669 * When APICv is enabled, KVM must always search the IRR for a pending
2670 * IRQ, as other vCPUs and devices can set IRR bits even if the vCPU
2671 * isn't running. If APICv is disabled, KVM _should_ search the IRR
2672 * for a pending IRQ. But KVM currently doesn't ensure *all* hardware,
2673 * e.g. CPUs and IOMMUs, has seen the change in state, i.e. searching
2674 * the IRR at this time could race with IRQ delivery from hardware that
2675 * still sees APICv as being enabled.
2676 *
2677 * FIXME: Ensure other vCPUs and devices observe the change in APICv
2678 * state prior to updating KVM's metadata caches, so that KVM
2679 * can safely search the IRR and set irr_pending accordingly.
2680 */
2681 apic->irr_pending = true;
2682
2683 if (apic->apicv_active)
2684 apic->isr_count = 1;
2685 else
2686 apic->isr_count = count_vectors(apic->regs + APIC_ISR);
2687
2688 apic->highest_isr_cache = -1;
2689}
2690
2691int kvm_alloc_apic_access_page(struct kvm *kvm)
2692{
2693 void __user *hva;
2694 int ret = 0;
2695
2696 mutex_lock(&kvm->slots_lock);
2697 if (kvm->arch.apic_access_memslot_enabled ||
2698 kvm->arch.apic_access_memslot_inhibited)
2699 goto out;
2700
2701 hva = __x86_set_memory_region(kvm, APIC_ACCESS_PAGE_PRIVATE_MEMSLOT,
2702 APIC_DEFAULT_PHYS_BASE, PAGE_SIZE);
2703 if (IS_ERR(hva)) {
2704 ret = PTR_ERR(hva);
2705 goto out;
2706 }
2707
2708 kvm->arch.apic_access_memslot_enabled = true;
2709out:
2710 mutex_unlock(&kvm->slots_lock);
2711 return ret;
2712}
2713EXPORT_SYMBOL_GPL(kvm_alloc_apic_access_page);
2714
2715void kvm_inhibit_apic_access_page(struct kvm_vcpu *vcpu)
2716{
2717 struct kvm *kvm = vcpu->kvm;
2718
2719 if (!kvm->arch.apic_access_memslot_enabled)
2720 return;
2721
2722 kvm_vcpu_srcu_read_unlock(vcpu);
2723
2724 mutex_lock(&kvm->slots_lock);
2725
2726 if (kvm->arch.apic_access_memslot_enabled) {
2727 __x86_set_memory_region(kvm, APIC_ACCESS_PAGE_PRIVATE_MEMSLOT, 0, 0);
2728 /*
2729 * Clear "enabled" after the memslot is deleted so that a
2730 * different vCPU doesn't get a false negative when checking
2731 * the flag out of slots_lock. No additional memory barrier is
2732 * needed as modifying memslots requires waiting other vCPUs to
2733 * drop SRCU (see above), and false positives are ok as the
2734 * flag is rechecked after acquiring slots_lock.
2735 */
2736 kvm->arch.apic_access_memslot_enabled = false;
2737
2738 /*
2739 * Mark the memslot as inhibited to prevent reallocating the
2740 * memslot during vCPU creation, e.g. if a vCPU is hotplugged.
2741 */
2742 kvm->arch.apic_access_memslot_inhibited = true;
2743 }
2744
2745 mutex_unlock(&kvm->slots_lock);
2746
2747 kvm_vcpu_srcu_read_lock(vcpu);
2748}
2749
2750void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event)
2751{
2752 struct kvm_lapic *apic = vcpu->arch.apic;
2753 u64 msr_val;
2754 int i;
2755
2756 kvm_x86_call(apicv_pre_state_restore)(vcpu);
2757
2758 if (!init_event) {
2759 msr_val = APIC_DEFAULT_PHYS_BASE | MSR_IA32_APICBASE_ENABLE;
2760 if (kvm_vcpu_is_reset_bsp(vcpu))
2761 msr_val |= MSR_IA32_APICBASE_BSP;
2762
2763 /*
2764 * Use the inner helper to avoid an extra recalcuation of the
2765 * optimized APIC map if some other task has dirtied the map.
2766 * The recalculation needed for this vCPU will be done after
2767 * all APIC state has been initialized (see below).
2768 */
2769 __kvm_apic_set_base(vcpu, msr_val);
2770 }
2771
2772 if (!apic)
2773 return;
2774
2775 /* Stop the timer in case it's a reset to an active apic */
2776 hrtimer_cancel(&apic->lapic_timer.timer);
2777
2778 /* The xAPIC ID is set at RESET even if the APIC was already enabled. */
2779 if (!init_event)
2780 kvm_apic_set_xapic_id(apic, vcpu->vcpu_id);
2781 kvm_apic_set_version(apic->vcpu);
2782
2783 for (i = 0; i < apic->nr_lvt_entries; i++)
2784 kvm_lapic_set_reg(apic, APIC_LVTx(i), APIC_LVT_MASKED);
2785 apic_update_lvtt(apic);
2786 if (kvm_vcpu_is_reset_bsp(vcpu) &&
2787 kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_LINT0_REENABLED))
2788 kvm_lapic_set_reg(apic, APIC_LVT0,
2789 SET_APIC_DELIVERY_MODE(0, APIC_MODE_EXTINT));
2790 apic_manage_nmi_watchdog(apic, kvm_lapic_get_reg(apic, APIC_LVT0));
2791
2792 kvm_apic_set_dfr(apic, 0xffffffffU);
2793 apic_set_spiv(apic, 0xff);
2794 kvm_lapic_set_reg(apic, APIC_TASKPRI, 0);
2795 if (!apic_x2apic_mode(apic))
2796 kvm_apic_set_ldr(apic, 0);
2797 kvm_lapic_set_reg(apic, APIC_ESR, 0);
2798 if (!apic_x2apic_mode(apic)) {
2799 kvm_lapic_set_reg(apic, APIC_ICR, 0);
2800 kvm_lapic_set_reg(apic, APIC_ICR2, 0);
2801 } else {
2802 kvm_lapic_set_reg64(apic, APIC_ICR, 0);
2803 }
2804 kvm_lapic_set_reg(apic, APIC_TDCR, 0);
2805 kvm_lapic_set_reg(apic, APIC_TMICT, 0);
2806 for (i = 0; i < 8; i++) {
2807 kvm_lapic_set_reg(apic, APIC_IRR + 0x10 * i, 0);
2808 kvm_lapic_set_reg(apic, APIC_ISR + 0x10 * i, 0);
2809 kvm_lapic_set_reg(apic, APIC_TMR + 0x10 * i, 0);
2810 }
2811 kvm_apic_update_apicv(vcpu);
2812 update_divide_count(apic);
2813 atomic_set(&apic->lapic_timer.pending, 0);
2814
2815 vcpu->arch.pv_eoi.msr_val = 0;
2816 apic_update_ppr(apic);
2817 if (apic->apicv_active) {
2818 kvm_x86_call(apicv_post_state_restore)(vcpu);
2819 kvm_x86_call(hwapic_irr_update)(vcpu, -1);
2820 kvm_x86_call(hwapic_isr_update)(vcpu, -1);
2821 }
2822
2823 vcpu->arch.apic_arb_prio = 0;
2824 vcpu->arch.apic_attention = 0;
2825
2826 kvm_recalculate_apic_map(vcpu->kvm);
2827}
2828
2829/*
2830 *----------------------------------------------------------------------
2831 * timer interface
2832 *----------------------------------------------------------------------
2833 */
2834
2835static bool lapic_is_periodic(struct kvm_lapic *apic)
2836{
2837 return apic_lvtt_period(apic);
2838}
2839
2840int apic_has_pending_timer(struct kvm_vcpu *vcpu)
2841{
2842 struct kvm_lapic *apic = vcpu->arch.apic;
2843
2844 if (apic_enabled(apic) && apic_lvt_enabled(apic, APIC_LVTT))
2845 return atomic_read(&apic->lapic_timer.pending);
2846
2847 return 0;
2848}
2849
2850int kvm_apic_local_deliver(struct kvm_lapic *apic, int lvt_type)
2851{
2852 u32 reg = kvm_lapic_get_reg(apic, lvt_type);
2853 int vector, mode, trig_mode;
2854 int r;
2855
2856 if (kvm_apic_hw_enabled(apic) && !(reg & APIC_LVT_MASKED)) {
2857 vector = reg & APIC_VECTOR_MASK;
2858 mode = reg & APIC_MODE_MASK;
2859 trig_mode = reg & APIC_LVT_LEVEL_TRIGGER;
2860
2861 r = __apic_accept_irq(apic, mode, vector, 1, trig_mode, NULL);
2862 if (r && lvt_type == APIC_LVTPC &&
2863 guest_cpuid_is_intel_compatible(apic->vcpu))
2864 kvm_lapic_set_reg(apic, APIC_LVTPC, reg | APIC_LVT_MASKED);
2865 return r;
2866 }
2867 return 0;
2868}
2869
2870void kvm_apic_nmi_wd_deliver(struct kvm_vcpu *vcpu)
2871{
2872 struct kvm_lapic *apic = vcpu->arch.apic;
2873
2874 if (apic)
2875 kvm_apic_local_deliver(apic, APIC_LVT0);
2876}
2877
2878static const struct kvm_io_device_ops apic_mmio_ops = {
2879 .read = apic_mmio_read,
2880 .write = apic_mmio_write,
2881};
2882
2883static enum hrtimer_restart apic_timer_fn(struct hrtimer *data)
2884{
2885 struct kvm_timer *ktimer = container_of(data, struct kvm_timer, timer);
2886 struct kvm_lapic *apic = container_of(ktimer, struct kvm_lapic, lapic_timer);
2887
2888 apic_timer_expired(apic, true);
2889
2890 if (lapic_is_periodic(apic)) {
2891 advance_periodic_target_expiration(apic);
2892 hrtimer_add_expires_ns(&ktimer->timer, ktimer->period);
2893 return HRTIMER_RESTART;
2894 } else
2895 return HRTIMER_NORESTART;
2896}
2897
2898int kvm_create_lapic(struct kvm_vcpu *vcpu)
2899{
2900 struct kvm_lapic *apic;
2901
2902 ASSERT(vcpu != NULL);
2903
2904 if (!irqchip_in_kernel(vcpu->kvm)) {
2905 static_branch_inc(&kvm_has_noapic_vcpu);
2906 return 0;
2907 }
2908
2909 apic = kzalloc(sizeof(*apic), GFP_KERNEL_ACCOUNT);
2910 if (!apic)
2911 goto nomem;
2912
2913 vcpu->arch.apic = apic;
2914
2915 if (kvm_x86_ops.alloc_apic_backing_page)
2916 apic->regs = kvm_x86_call(alloc_apic_backing_page)(vcpu);
2917 else
2918 apic->regs = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT);
2919 if (!apic->regs) {
2920 printk(KERN_ERR "malloc apic regs error for vcpu %x\n",
2921 vcpu->vcpu_id);
2922 goto nomem_free_apic;
2923 }
2924 apic->vcpu = vcpu;
2925
2926 apic->nr_lvt_entries = kvm_apic_calc_nr_lvt_entries(vcpu);
2927
2928 hrtimer_init(&apic->lapic_timer.timer, CLOCK_MONOTONIC,
2929 HRTIMER_MODE_ABS_HARD);
2930 apic->lapic_timer.timer.function = apic_timer_fn;
2931 if (lapic_timer_advance)
2932 apic->lapic_timer.timer_advance_ns = LAPIC_TIMER_ADVANCE_NS_INIT;
2933
2934 /*
2935 * Stuff the APIC ENABLE bit in lieu of temporarily incrementing
2936 * apic_hw_disabled; the full RESET value is set by kvm_lapic_reset().
2937 */
2938 vcpu->arch.apic_base = MSR_IA32_APICBASE_ENABLE;
2939 static_branch_inc(&apic_sw_disabled.key); /* sw disabled at reset */
2940 kvm_iodevice_init(&apic->dev, &apic_mmio_ops);
2941
2942 /*
2943 * Defer evaluating inhibits until the vCPU is first run, as this vCPU
2944 * will not get notified of any changes until this vCPU is visible to
2945 * other vCPUs (marked online and added to the set of vCPUs).
2946 *
2947 * Opportunistically mark APICv active as VMX in particularly is highly
2948 * unlikely to have inhibits. Ignore the current per-VM APICv state so
2949 * that vCPU creation is guaranteed to run with a deterministic value,
2950 * the request will ensure the vCPU gets the correct state before VM-Entry.
2951 */
2952 if (enable_apicv) {
2953 apic->apicv_active = true;
2954 kvm_make_request(KVM_REQ_APICV_UPDATE, vcpu);
2955 }
2956
2957 return 0;
2958nomem_free_apic:
2959 kfree(apic);
2960 vcpu->arch.apic = NULL;
2961nomem:
2962 return -ENOMEM;
2963}
2964
2965int kvm_apic_has_interrupt(struct kvm_vcpu *vcpu)
2966{
2967 struct kvm_lapic *apic = vcpu->arch.apic;
2968 u32 ppr;
2969
2970 if (!kvm_apic_present(vcpu))
2971 return -1;
2972
2973 __apic_update_ppr(apic, &ppr);
2974 return apic_has_interrupt_for_ppr(apic, ppr);
2975}
2976EXPORT_SYMBOL_GPL(kvm_apic_has_interrupt);
2977
2978int kvm_apic_accept_pic_intr(struct kvm_vcpu *vcpu)
2979{
2980 u32 lvt0 = kvm_lapic_get_reg(vcpu->arch.apic, APIC_LVT0);
2981
2982 if (!kvm_apic_hw_enabled(vcpu->arch.apic))
2983 return 1;
2984 if ((lvt0 & APIC_LVT_MASKED) == 0 &&
2985 GET_APIC_DELIVERY_MODE(lvt0) == APIC_MODE_EXTINT)
2986 return 1;
2987 return 0;
2988}
2989
2990void kvm_inject_apic_timer_irqs(struct kvm_vcpu *vcpu)
2991{
2992 struct kvm_lapic *apic = vcpu->arch.apic;
2993
2994 if (atomic_read(&apic->lapic_timer.pending) > 0) {
2995 kvm_apic_inject_pending_timer_irqs(apic);
2996 atomic_set(&apic->lapic_timer.pending, 0);
2997 }
2998}
2999
3000void kvm_apic_ack_interrupt(struct kvm_vcpu *vcpu, int vector)
3001{
3002 struct kvm_lapic *apic = vcpu->arch.apic;
3003 u32 ppr;
3004
3005 if (WARN_ON_ONCE(vector < 0 || !apic))
3006 return;
3007
3008 /*
3009 * We get here even with APIC virtualization enabled, if doing
3010 * nested virtualization and L1 runs with the "acknowledge interrupt
3011 * on exit" mode. Then we cannot inject the interrupt via RVI,
3012 * because the process would deliver it through the IDT.
3013 */
3014
3015 apic_clear_irr(vector, apic);
3016 if (kvm_hv_synic_auto_eoi_set(vcpu, vector)) {
3017 /*
3018 * For auto-EOI interrupts, there might be another pending
3019 * interrupt above PPR, so check whether to raise another
3020 * KVM_REQ_EVENT.
3021 */
3022 apic_update_ppr(apic);
3023 } else {
3024 /*
3025 * For normal interrupts, PPR has been raised and there cannot
3026 * be a higher-priority pending interrupt---except if there was
3027 * a concurrent interrupt injection, but that would have
3028 * triggered KVM_REQ_EVENT already.
3029 */
3030 apic_set_isr(vector, apic);
3031 __apic_update_ppr(apic, &ppr);
3032 }
3033
3034}
3035EXPORT_SYMBOL_GPL(kvm_apic_ack_interrupt);
3036
3037static int kvm_apic_state_fixup(struct kvm_vcpu *vcpu,
3038 struct kvm_lapic_state *s, bool set)
3039{
3040 if (apic_x2apic_mode(vcpu->arch.apic)) {
3041 u32 x2apic_id = kvm_x2apic_id(vcpu->arch.apic);
3042 u32 *id = (u32 *)(s->regs + APIC_ID);
3043 u32 *ldr = (u32 *)(s->regs + APIC_LDR);
3044 u64 icr;
3045
3046 if (vcpu->kvm->arch.x2apic_format) {
3047 if (*id != x2apic_id)
3048 return -EINVAL;
3049 } else {
3050 /*
3051 * Ignore the userspace value when setting APIC state.
3052 * KVM's model is that the x2APIC ID is readonly, e.g.
3053 * KVM only supports delivering interrupts to KVM's
3054 * version of the x2APIC ID. However, for backwards
3055 * compatibility, don't reject attempts to set a
3056 * mismatched ID for userspace that hasn't opted into
3057 * x2apic_format.
3058 */
3059 if (set)
3060 *id = x2apic_id;
3061 else
3062 *id = x2apic_id << 24;
3063 }
3064
3065 /*
3066 * In x2APIC mode, the LDR is fixed and based on the id. And
3067 * if the ICR is _not_ split, ICR is internally a single 64-bit
3068 * register, but needs to be split to ICR+ICR2 in userspace for
3069 * backwards compatibility.
3070 */
3071 if (set)
3072 *ldr = kvm_apic_calc_x2apic_ldr(x2apic_id);
3073
3074 if (!kvm_x86_ops.x2apic_icr_is_split) {
3075 if (set) {
3076 icr = __kvm_lapic_get_reg(s->regs, APIC_ICR) |
3077 (u64)__kvm_lapic_get_reg(s->regs, APIC_ICR2) << 32;
3078 __kvm_lapic_set_reg64(s->regs, APIC_ICR, icr);
3079 } else {
3080 icr = __kvm_lapic_get_reg64(s->regs, APIC_ICR);
3081 __kvm_lapic_set_reg(s->regs, APIC_ICR2, icr >> 32);
3082 }
3083 }
3084 }
3085
3086 return 0;
3087}
3088
3089int kvm_apic_get_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s)
3090{
3091 memcpy(s->regs, vcpu->arch.apic->regs, sizeof(*s));
3092
3093 /*
3094 * Get calculated timer current count for remaining timer period (if
3095 * any) and store it in the returned register set.
3096 */
3097 __kvm_lapic_set_reg(s->regs, APIC_TMCCT,
3098 __apic_read(vcpu->arch.apic, APIC_TMCCT));
3099
3100 return kvm_apic_state_fixup(vcpu, s, false);
3101}
3102
3103int kvm_apic_set_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s)
3104{
3105 struct kvm_lapic *apic = vcpu->arch.apic;
3106 int r;
3107
3108 kvm_x86_call(apicv_pre_state_restore)(vcpu);
3109
3110 /* set SPIV separately to get count of SW disabled APICs right */
3111 apic_set_spiv(apic, *((u32 *)(s->regs + APIC_SPIV)));
3112
3113 r = kvm_apic_state_fixup(vcpu, s, true);
3114 if (r) {
3115 kvm_recalculate_apic_map(vcpu->kvm);
3116 return r;
3117 }
3118 memcpy(vcpu->arch.apic->regs, s->regs, sizeof(*s));
3119
3120 atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
3121 kvm_recalculate_apic_map(vcpu->kvm);
3122 kvm_apic_set_version(vcpu);
3123
3124 apic_update_ppr(apic);
3125 cancel_apic_timer(apic);
3126 apic->lapic_timer.expired_tscdeadline = 0;
3127 apic_update_lvtt(apic);
3128 apic_manage_nmi_watchdog(apic, kvm_lapic_get_reg(apic, APIC_LVT0));
3129 update_divide_count(apic);
3130 __start_apic_timer(apic, APIC_TMCCT);
3131 kvm_lapic_set_reg(apic, APIC_TMCCT, 0);
3132 kvm_apic_update_apicv(vcpu);
3133 if (apic->apicv_active) {
3134 kvm_x86_call(apicv_post_state_restore)(vcpu);
3135 kvm_x86_call(hwapic_irr_update)(vcpu, apic_find_highest_irr(apic));
3136 kvm_x86_call(hwapic_isr_update)(vcpu, apic_find_highest_isr(apic));
3137 }
3138 kvm_make_request(KVM_REQ_EVENT, vcpu);
3139 if (ioapic_in_kernel(vcpu->kvm))
3140 kvm_rtc_eoi_tracking_restore_one(vcpu);
3141
3142 vcpu->arch.apic_arb_prio = 0;
3143
3144 return 0;
3145}
3146
3147void __kvm_migrate_apic_timer(struct kvm_vcpu *vcpu)
3148{
3149 struct hrtimer *timer;
3150
3151 if (!lapic_in_kernel(vcpu) ||
3152 kvm_can_post_timer_interrupt(vcpu))
3153 return;
3154
3155 timer = &vcpu->arch.apic->lapic_timer.timer;
3156 if (hrtimer_cancel(timer))
3157 hrtimer_start_expires(timer, HRTIMER_MODE_ABS_HARD);
3158}
3159
3160/*
3161 * apic_sync_pv_eoi_from_guest - called on vmexit or cancel interrupt
3162 *
3163 * Detect whether guest triggered PV EOI since the
3164 * last entry. If yes, set EOI on guests's behalf.
3165 * Clear PV EOI in guest memory in any case.
3166 */
3167static void apic_sync_pv_eoi_from_guest(struct kvm_vcpu *vcpu,
3168 struct kvm_lapic *apic)
3169{
3170 int vector;
3171 /*
3172 * PV EOI state is derived from KVM_APIC_PV_EOI_PENDING in host
3173 * and KVM_PV_EOI_ENABLED in guest memory as follows:
3174 *
3175 * KVM_APIC_PV_EOI_PENDING is unset:
3176 * -> host disabled PV EOI.
3177 * KVM_APIC_PV_EOI_PENDING is set, KVM_PV_EOI_ENABLED is set:
3178 * -> host enabled PV EOI, guest did not execute EOI yet.
3179 * KVM_APIC_PV_EOI_PENDING is set, KVM_PV_EOI_ENABLED is unset:
3180 * -> host enabled PV EOI, guest executed EOI.
3181 */
3182 BUG_ON(!pv_eoi_enabled(vcpu));
3183
3184 if (pv_eoi_test_and_clr_pending(vcpu))
3185 return;
3186 vector = apic_set_eoi(apic);
3187 trace_kvm_pv_eoi(apic, vector);
3188}
3189
3190void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu)
3191{
3192 u32 data;
3193
3194 if (test_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention))
3195 apic_sync_pv_eoi_from_guest(vcpu, vcpu->arch.apic);
3196
3197 if (!test_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention))
3198 return;
3199
3200 if (kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data,
3201 sizeof(u32)))
3202 return;
3203
3204 apic_set_tpr(vcpu->arch.apic, data & 0xff);
3205}
3206
3207/*
3208 * apic_sync_pv_eoi_to_guest - called before vmentry
3209 *
3210 * Detect whether it's safe to enable PV EOI and
3211 * if yes do so.
3212 */
3213static void apic_sync_pv_eoi_to_guest(struct kvm_vcpu *vcpu,
3214 struct kvm_lapic *apic)
3215{
3216 if (!pv_eoi_enabled(vcpu) ||
3217 /* IRR set or many bits in ISR: could be nested. */
3218 apic->irr_pending ||
3219 /* Cache not set: could be safe but we don't bother. */
3220 apic->highest_isr_cache == -1 ||
3221 /* Need EOI to update ioapic. */
3222 kvm_ioapic_handles_vector(apic, apic->highest_isr_cache)) {
3223 /*
3224 * PV EOI was disabled by apic_sync_pv_eoi_from_guest
3225 * so we need not do anything here.
3226 */
3227 return;
3228 }
3229
3230 pv_eoi_set_pending(apic->vcpu);
3231}
3232
3233void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu)
3234{
3235 u32 data, tpr;
3236 int max_irr, max_isr;
3237 struct kvm_lapic *apic = vcpu->arch.apic;
3238
3239 apic_sync_pv_eoi_to_guest(vcpu, apic);
3240
3241 if (!test_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention))
3242 return;
3243
3244 tpr = kvm_lapic_get_reg(apic, APIC_TASKPRI) & 0xff;
3245 max_irr = apic_find_highest_irr(apic);
3246 if (max_irr < 0)
3247 max_irr = 0;
3248 max_isr = apic_find_highest_isr(apic);
3249 if (max_isr < 0)
3250 max_isr = 0;
3251 data = (tpr & 0xff) | ((max_isr & 0xf0) << 8) | (max_irr << 24);
3252
3253 kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data,
3254 sizeof(u32));
3255}
3256
3257int kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr)
3258{
3259 if (vapic_addr) {
3260 if (kvm_gfn_to_hva_cache_init(vcpu->kvm,
3261 &vcpu->arch.apic->vapic_cache,
3262 vapic_addr, sizeof(u32)))
3263 return -EINVAL;
3264 __set_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention);
3265 } else {
3266 __clear_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention);
3267 }
3268
3269 vcpu->arch.apic->vapic_addr = vapic_addr;
3270 return 0;
3271}
3272
3273static int kvm_lapic_msr_read(struct kvm_lapic *apic, u32 reg, u64 *data)
3274{
3275 u32 low;
3276
3277 if (reg == APIC_ICR) {
3278 *data = kvm_x2apic_icr_read(apic);
3279 return 0;
3280 }
3281
3282 if (kvm_lapic_reg_read(apic, reg, 4, &low))
3283 return 1;
3284
3285 *data = low;
3286
3287 return 0;
3288}
3289
3290static int kvm_lapic_msr_write(struct kvm_lapic *apic, u32 reg, u64 data)
3291{
3292 /*
3293 * ICR is a 64-bit register in x2APIC mode (and Hyper-V PV vAPIC) and
3294 * can be written as such, all other registers remain accessible only
3295 * through 32-bit reads/writes.
3296 */
3297 if (reg == APIC_ICR)
3298 return kvm_x2apic_icr_write(apic, data);
3299
3300 /* Bits 63:32 are reserved in all other registers. */
3301 if (data >> 32)
3302 return 1;
3303
3304 return kvm_lapic_reg_write(apic, reg, (u32)data);
3305}
3306
3307int kvm_x2apic_msr_write(struct kvm_vcpu *vcpu, u32 msr, u64 data)
3308{
3309 struct kvm_lapic *apic = vcpu->arch.apic;
3310 u32 reg = (msr - APIC_BASE_MSR) << 4;
3311
3312 if (!lapic_in_kernel(vcpu) || !apic_x2apic_mode(apic))
3313 return 1;
3314
3315 return kvm_lapic_msr_write(apic, reg, data);
3316}
3317
3318int kvm_x2apic_msr_read(struct kvm_vcpu *vcpu, u32 msr, u64 *data)
3319{
3320 struct kvm_lapic *apic = vcpu->arch.apic;
3321 u32 reg = (msr - APIC_BASE_MSR) << 4;
3322
3323 if (!lapic_in_kernel(vcpu) || !apic_x2apic_mode(apic))
3324 return 1;
3325
3326 return kvm_lapic_msr_read(apic, reg, data);
3327}
3328
3329int kvm_hv_vapic_msr_write(struct kvm_vcpu *vcpu, u32 reg, u64 data)
3330{
3331 if (!lapic_in_kernel(vcpu))
3332 return 1;
3333
3334 return kvm_lapic_msr_write(vcpu->arch.apic, reg, data);
3335}
3336
3337int kvm_hv_vapic_msr_read(struct kvm_vcpu *vcpu, u32 reg, u64 *data)
3338{
3339 if (!lapic_in_kernel(vcpu))
3340 return 1;
3341
3342 return kvm_lapic_msr_read(vcpu->arch.apic, reg, data);
3343}
3344
3345int kvm_lapic_set_pv_eoi(struct kvm_vcpu *vcpu, u64 data, unsigned long len)
3346{
3347 u64 addr = data & ~KVM_MSR_ENABLED;
3348 struct gfn_to_hva_cache *ghc = &vcpu->arch.pv_eoi.data;
3349 unsigned long new_len;
3350 int ret;
3351
3352 if (!IS_ALIGNED(addr, 4))
3353 return 1;
3354
3355 if (data & KVM_MSR_ENABLED) {
3356 if (addr == ghc->gpa && len <= ghc->len)
3357 new_len = ghc->len;
3358 else
3359 new_len = len;
3360
3361 ret = kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, addr, new_len);
3362 if (ret)
3363 return ret;
3364 }
3365
3366 vcpu->arch.pv_eoi.msr_val = data;
3367
3368 return 0;
3369}
3370
3371int kvm_apic_accept_events(struct kvm_vcpu *vcpu)
3372{
3373 struct kvm_lapic *apic = vcpu->arch.apic;
3374 u8 sipi_vector;
3375 int r;
3376
3377 if (!kvm_apic_has_pending_init_or_sipi(vcpu))
3378 return 0;
3379
3380 if (is_guest_mode(vcpu)) {
3381 r = kvm_check_nested_events(vcpu);
3382 if (r < 0)
3383 return r == -EBUSY ? 0 : r;
3384 /*
3385 * Continue processing INIT/SIPI even if a nested VM-Exit
3386 * occurred, e.g. pending SIPIs should be dropped if INIT+SIPI
3387 * are blocked as a result of transitioning to VMX root mode.
3388 */
3389 }
3390
3391 /*
3392 * INITs are blocked while CPU is in specific states (SMM, VMX root
3393 * mode, SVM with GIF=0), while SIPIs are dropped if the CPU isn't in
3394 * wait-for-SIPI (WFS).
3395 */
3396 if (!kvm_apic_init_sipi_allowed(vcpu)) {
3397 WARN_ON_ONCE(vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED);
3398 clear_bit(KVM_APIC_SIPI, &apic->pending_events);
3399 return 0;
3400 }
3401
3402 if (test_and_clear_bit(KVM_APIC_INIT, &apic->pending_events)) {
3403 kvm_vcpu_reset(vcpu, true);
3404 if (kvm_vcpu_is_bsp(apic->vcpu))
3405 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
3406 else
3407 vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED;
3408 }
3409 if (test_and_clear_bit(KVM_APIC_SIPI, &apic->pending_events)) {
3410 if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) {
3411 /* evaluate pending_events before reading the vector */
3412 smp_rmb();
3413 sipi_vector = apic->sipi_vector;
3414 kvm_x86_call(vcpu_deliver_sipi_vector)(vcpu,
3415 sipi_vector);
3416 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
3417 }
3418 }
3419 return 0;
3420}
3421
3422void kvm_lapic_exit(void)
3423{
3424 static_key_deferred_flush(&apic_hw_disabled);
3425 WARN_ON(static_branch_unlikely(&apic_hw_disabled.key));
3426 static_key_deferred_flush(&apic_sw_disabled);
3427 WARN_ON(static_branch_unlikely(&apic_sw_disabled.key));
3428}
1// SPDX-License-Identifier: GPL-2.0-only
2
3/*
4 * Local APIC virtualization
5 *
6 * Copyright (C) 2006 Qumranet, Inc.
7 * Copyright (C) 2007 Novell
8 * Copyright (C) 2007 Intel
9 * Copyright 2009 Red Hat, Inc. and/or its affiliates.
10 *
11 * Authors:
12 * Dor Laor <dor.laor@qumranet.com>
13 * Gregory Haskins <ghaskins@novell.com>
14 * Yaozu (Eddie) Dong <eddie.dong@intel.com>
15 *
16 * Based on Xen 3.1 code, Copyright (c) 2004, Intel Corporation.
17 */
18
19#include <linux/kvm_host.h>
20#include <linux/kvm.h>
21#include <linux/mm.h>
22#include <linux/highmem.h>
23#include <linux/smp.h>
24#include <linux/hrtimer.h>
25#include <linux/io.h>
26#include <linux/export.h>
27#include <linux/math64.h>
28#include <linux/slab.h>
29#include <asm/processor.h>
30#include <asm/mce.h>
31#include <asm/msr.h>
32#include <asm/page.h>
33#include <asm/current.h>
34#include <asm/apicdef.h>
35#include <asm/delay.h>
36#include <linux/atomic.h>
37#include <linux/jump_label.h>
38#include "kvm_cache_regs.h"
39#include "irq.h"
40#include "ioapic.h"
41#include "trace.h"
42#include "x86.h"
43#include "cpuid.h"
44#include "hyperv.h"
45#include "smm.h"
46
47#ifndef CONFIG_X86_64
48#define mod_64(x, y) ((x) - (y) * div64_u64(x, y))
49#else
50#define mod_64(x, y) ((x) % (y))
51#endif
52
53#define PRId64 "d"
54#define PRIx64 "llx"
55#define PRIu64 "u"
56#define PRIo64 "o"
57
58/* 14 is the version for Xeon and Pentium 8.4.8*/
59#define APIC_VERSION 0x14UL
60#define LAPIC_MMIO_LENGTH (1 << 12)
61/* followed define is not in apicdef.h */
62#define MAX_APIC_VECTOR 256
63#define APIC_VECTORS_PER_REG 32
64
65static bool lapic_timer_advance_dynamic __read_mostly;
66#define LAPIC_TIMER_ADVANCE_ADJUST_MIN 100 /* clock cycles */
67#define LAPIC_TIMER_ADVANCE_ADJUST_MAX 10000 /* clock cycles */
68#define LAPIC_TIMER_ADVANCE_NS_INIT 1000
69#define LAPIC_TIMER_ADVANCE_NS_MAX 5000
70/* step-by-step approximation to mitigate fluctuation */
71#define LAPIC_TIMER_ADVANCE_ADJUST_STEP 8
72static int kvm_lapic_msr_read(struct kvm_lapic *apic, u32 reg, u64 *data);
73static int kvm_lapic_msr_write(struct kvm_lapic *apic, u32 reg, u64 data);
74
75static inline void __kvm_lapic_set_reg(char *regs, int reg_off, u32 val)
76{
77 *((u32 *) (regs + reg_off)) = val;
78}
79
80static inline void kvm_lapic_set_reg(struct kvm_lapic *apic, int reg_off, u32 val)
81{
82 __kvm_lapic_set_reg(apic->regs, reg_off, val);
83}
84
85static __always_inline u64 __kvm_lapic_get_reg64(char *regs, int reg)
86{
87 BUILD_BUG_ON(reg != APIC_ICR);
88 return *((u64 *) (regs + reg));
89}
90
91static __always_inline u64 kvm_lapic_get_reg64(struct kvm_lapic *apic, int reg)
92{
93 return __kvm_lapic_get_reg64(apic->regs, reg);
94}
95
96static __always_inline void __kvm_lapic_set_reg64(char *regs, int reg, u64 val)
97{
98 BUILD_BUG_ON(reg != APIC_ICR);
99 *((u64 *) (regs + reg)) = val;
100}
101
102static __always_inline void kvm_lapic_set_reg64(struct kvm_lapic *apic,
103 int reg, u64 val)
104{
105 __kvm_lapic_set_reg64(apic->regs, reg, val);
106}
107
108static inline int apic_test_vector(int vec, void *bitmap)
109{
110 return test_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
111}
112
113bool kvm_apic_pending_eoi(struct kvm_vcpu *vcpu, int vector)
114{
115 struct kvm_lapic *apic = vcpu->arch.apic;
116
117 return apic_test_vector(vector, apic->regs + APIC_ISR) ||
118 apic_test_vector(vector, apic->regs + APIC_IRR);
119}
120
121static inline int __apic_test_and_set_vector(int vec, void *bitmap)
122{
123 return __test_and_set_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
124}
125
126static inline int __apic_test_and_clear_vector(int vec, void *bitmap)
127{
128 return __test_and_clear_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
129}
130
131__read_mostly DEFINE_STATIC_KEY_DEFERRED_FALSE(apic_hw_disabled, HZ);
132__read_mostly DEFINE_STATIC_KEY_DEFERRED_FALSE(apic_sw_disabled, HZ);
133
134static inline int apic_enabled(struct kvm_lapic *apic)
135{
136 return kvm_apic_sw_enabled(apic) && kvm_apic_hw_enabled(apic);
137}
138
139#define LVT_MASK \
140 (APIC_LVT_MASKED | APIC_SEND_PENDING | APIC_VECTOR_MASK)
141
142#define LINT_MASK \
143 (LVT_MASK | APIC_MODE_MASK | APIC_INPUT_POLARITY | \
144 APIC_LVT_REMOTE_IRR | APIC_LVT_LEVEL_TRIGGER)
145
146static inline u32 kvm_x2apic_id(struct kvm_lapic *apic)
147{
148 return apic->vcpu->vcpu_id;
149}
150
151static bool kvm_can_post_timer_interrupt(struct kvm_vcpu *vcpu)
152{
153 return pi_inject_timer && kvm_vcpu_apicv_active(vcpu) &&
154 (kvm_mwait_in_guest(vcpu->kvm) || kvm_hlt_in_guest(vcpu->kvm));
155}
156
157bool kvm_can_use_hv_timer(struct kvm_vcpu *vcpu)
158{
159 return kvm_x86_ops.set_hv_timer
160 && !(kvm_mwait_in_guest(vcpu->kvm) ||
161 kvm_can_post_timer_interrupt(vcpu));
162}
163
164static bool kvm_use_posted_timer_interrupt(struct kvm_vcpu *vcpu)
165{
166 return kvm_can_post_timer_interrupt(vcpu) && vcpu->mode == IN_GUEST_MODE;
167}
168
169static inline bool kvm_apic_map_get_logical_dest(struct kvm_apic_map *map,
170 u32 dest_id, struct kvm_lapic ***cluster, u16 *mask) {
171 switch (map->mode) {
172 case KVM_APIC_MODE_X2APIC: {
173 u32 offset = (dest_id >> 16) * 16;
174 u32 max_apic_id = map->max_apic_id;
175
176 if (offset <= max_apic_id) {
177 u8 cluster_size = min(max_apic_id - offset + 1, 16U);
178
179 offset = array_index_nospec(offset, map->max_apic_id + 1);
180 *cluster = &map->phys_map[offset];
181 *mask = dest_id & (0xffff >> (16 - cluster_size));
182 } else {
183 *mask = 0;
184 }
185
186 return true;
187 }
188 case KVM_APIC_MODE_XAPIC_FLAT:
189 *cluster = map->xapic_flat_map;
190 *mask = dest_id & 0xff;
191 return true;
192 case KVM_APIC_MODE_XAPIC_CLUSTER:
193 *cluster = map->xapic_cluster_map[(dest_id >> 4) & 0xf];
194 *mask = dest_id & 0xf;
195 return true;
196 default:
197 /* Not optimized. */
198 return false;
199 }
200}
201
202static void kvm_apic_map_free(struct rcu_head *rcu)
203{
204 struct kvm_apic_map *map = container_of(rcu, struct kvm_apic_map, rcu);
205
206 kvfree(map);
207}
208
209/*
210 * CLEAN -> DIRTY and UPDATE_IN_PROGRESS -> DIRTY changes happen without a lock.
211 *
212 * DIRTY -> UPDATE_IN_PROGRESS and UPDATE_IN_PROGRESS -> CLEAN happen with
213 * apic_map_lock_held.
214 */
215enum {
216 CLEAN,
217 UPDATE_IN_PROGRESS,
218 DIRTY
219};
220
221void kvm_recalculate_apic_map(struct kvm *kvm)
222{
223 struct kvm_apic_map *new, *old = NULL;
224 struct kvm_vcpu *vcpu;
225 unsigned long i;
226 u32 max_id = 255; /* enough space for any xAPIC ID */
227
228 /* Read kvm->arch.apic_map_dirty before kvm->arch.apic_map. */
229 if (atomic_read_acquire(&kvm->arch.apic_map_dirty) == CLEAN)
230 return;
231
232 WARN_ONCE(!irqchip_in_kernel(kvm),
233 "Dirty APIC map without an in-kernel local APIC");
234
235 mutex_lock(&kvm->arch.apic_map_lock);
236 /*
237 * Read kvm->arch.apic_map_dirty before kvm->arch.apic_map
238 * (if clean) or the APIC registers (if dirty).
239 */
240 if (atomic_cmpxchg_acquire(&kvm->arch.apic_map_dirty,
241 DIRTY, UPDATE_IN_PROGRESS) == CLEAN) {
242 /* Someone else has updated the map. */
243 mutex_unlock(&kvm->arch.apic_map_lock);
244 return;
245 }
246
247 kvm_for_each_vcpu(i, vcpu, kvm)
248 if (kvm_apic_present(vcpu))
249 max_id = max(max_id, kvm_x2apic_id(vcpu->arch.apic));
250
251 new = kvzalloc(sizeof(struct kvm_apic_map) +
252 sizeof(struct kvm_lapic *) * ((u64)max_id + 1),
253 GFP_KERNEL_ACCOUNT);
254
255 if (!new)
256 goto out;
257
258 new->max_apic_id = max_id;
259
260 kvm_for_each_vcpu(i, vcpu, kvm) {
261 struct kvm_lapic *apic = vcpu->arch.apic;
262 struct kvm_lapic **cluster;
263 u16 mask;
264 u32 ldr;
265 u8 xapic_id;
266 u32 x2apic_id;
267
268 if (!kvm_apic_present(vcpu))
269 continue;
270
271 xapic_id = kvm_xapic_id(apic);
272 x2apic_id = kvm_x2apic_id(apic);
273
274 /* Hotplug hack: see kvm_apic_match_physical_addr(), ... */
275 if ((apic_x2apic_mode(apic) || x2apic_id > 0xff) &&
276 x2apic_id <= new->max_apic_id)
277 new->phys_map[x2apic_id] = apic;
278 /*
279 * ... xAPIC ID of VCPUs with APIC ID > 0xff will wrap-around,
280 * prevent them from masking VCPUs with APIC ID <= 0xff.
281 */
282 if (!apic_x2apic_mode(apic) && !new->phys_map[xapic_id])
283 new->phys_map[xapic_id] = apic;
284
285 if (!kvm_apic_sw_enabled(apic))
286 continue;
287
288 ldr = kvm_lapic_get_reg(apic, APIC_LDR);
289
290 if (apic_x2apic_mode(apic)) {
291 new->mode |= KVM_APIC_MODE_X2APIC;
292 } else if (ldr) {
293 ldr = GET_APIC_LOGICAL_ID(ldr);
294 if (kvm_lapic_get_reg(apic, APIC_DFR) == APIC_DFR_FLAT)
295 new->mode |= KVM_APIC_MODE_XAPIC_FLAT;
296 else
297 new->mode |= KVM_APIC_MODE_XAPIC_CLUSTER;
298 }
299
300 if (!kvm_apic_map_get_logical_dest(new, ldr, &cluster, &mask))
301 continue;
302
303 if (mask)
304 cluster[ffs(mask) - 1] = apic;
305 }
306out:
307 old = rcu_dereference_protected(kvm->arch.apic_map,
308 lockdep_is_held(&kvm->arch.apic_map_lock));
309 rcu_assign_pointer(kvm->arch.apic_map, new);
310 /*
311 * Write kvm->arch.apic_map before clearing apic->apic_map_dirty.
312 * If another update has come in, leave it DIRTY.
313 */
314 atomic_cmpxchg_release(&kvm->arch.apic_map_dirty,
315 UPDATE_IN_PROGRESS, CLEAN);
316 mutex_unlock(&kvm->arch.apic_map_lock);
317
318 if (old)
319 call_rcu(&old->rcu, kvm_apic_map_free);
320
321 kvm_make_scan_ioapic_request(kvm);
322}
323
324static inline void apic_set_spiv(struct kvm_lapic *apic, u32 val)
325{
326 bool enabled = val & APIC_SPIV_APIC_ENABLED;
327
328 kvm_lapic_set_reg(apic, APIC_SPIV, val);
329
330 if (enabled != apic->sw_enabled) {
331 apic->sw_enabled = enabled;
332 if (enabled)
333 static_branch_slow_dec_deferred(&apic_sw_disabled);
334 else
335 static_branch_inc(&apic_sw_disabled.key);
336
337 atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
338 }
339
340 /* Check if there are APF page ready requests pending */
341 if (enabled)
342 kvm_make_request(KVM_REQ_APF_READY, apic->vcpu);
343}
344
345static inline void kvm_apic_set_xapic_id(struct kvm_lapic *apic, u8 id)
346{
347 kvm_lapic_set_reg(apic, APIC_ID, id << 24);
348 atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
349}
350
351static inline void kvm_apic_set_ldr(struct kvm_lapic *apic, u32 id)
352{
353 kvm_lapic_set_reg(apic, APIC_LDR, id);
354 atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
355}
356
357static inline void kvm_apic_set_dfr(struct kvm_lapic *apic, u32 val)
358{
359 kvm_lapic_set_reg(apic, APIC_DFR, val);
360 atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
361}
362
363static inline u32 kvm_apic_calc_x2apic_ldr(u32 id)
364{
365 return ((id >> 4) << 16) | (1 << (id & 0xf));
366}
367
368static inline void kvm_apic_set_x2apic_id(struct kvm_lapic *apic, u32 id)
369{
370 u32 ldr = kvm_apic_calc_x2apic_ldr(id);
371
372 WARN_ON_ONCE(id != apic->vcpu->vcpu_id);
373
374 kvm_lapic_set_reg(apic, APIC_ID, id);
375 kvm_lapic_set_reg(apic, APIC_LDR, ldr);
376 atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
377}
378
379static inline int apic_lvt_enabled(struct kvm_lapic *apic, int lvt_type)
380{
381 return !(kvm_lapic_get_reg(apic, lvt_type) & APIC_LVT_MASKED);
382}
383
384static inline int apic_lvtt_oneshot(struct kvm_lapic *apic)
385{
386 return apic->lapic_timer.timer_mode == APIC_LVT_TIMER_ONESHOT;
387}
388
389static inline int apic_lvtt_period(struct kvm_lapic *apic)
390{
391 return apic->lapic_timer.timer_mode == APIC_LVT_TIMER_PERIODIC;
392}
393
394static inline int apic_lvtt_tscdeadline(struct kvm_lapic *apic)
395{
396 return apic->lapic_timer.timer_mode == APIC_LVT_TIMER_TSCDEADLINE;
397}
398
399static inline int apic_lvt_nmi_mode(u32 lvt_val)
400{
401 return (lvt_val & (APIC_MODE_MASK | APIC_LVT_MASKED)) == APIC_DM_NMI;
402}
403
404static inline bool kvm_lapic_lvt_supported(struct kvm_lapic *apic, int lvt_index)
405{
406 return apic->nr_lvt_entries > lvt_index;
407}
408
409static inline int kvm_apic_calc_nr_lvt_entries(struct kvm_vcpu *vcpu)
410{
411 return KVM_APIC_MAX_NR_LVT_ENTRIES - !(vcpu->arch.mcg_cap & MCG_CMCI_P);
412}
413
414void kvm_apic_set_version(struct kvm_vcpu *vcpu)
415{
416 struct kvm_lapic *apic = vcpu->arch.apic;
417 u32 v = 0;
418
419 if (!lapic_in_kernel(vcpu))
420 return;
421
422 v = APIC_VERSION | ((apic->nr_lvt_entries - 1) << 16);
423
424 /*
425 * KVM emulates 82093AA datasheet (with in-kernel IOAPIC implementation)
426 * which doesn't have EOI register; Some buggy OSes (e.g. Windows with
427 * Hyper-V role) disable EOI broadcast in lapic not checking for IOAPIC
428 * version first and level-triggered interrupts never get EOIed in
429 * IOAPIC.
430 */
431 if (guest_cpuid_has(vcpu, X86_FEATURE_X2APIC) &&
432 !ioapic_in_kernel(vcpu->kvm))
433 v |= APIC_LVR_DIRECTED_EOI;
434 kvm_lapic_set_reg(apic, APIC_LVR, v);
435}
436
437void kvm_apic_after_set_mcg_cap(struct kvm_vcpu *vcpu)
438{
439 int nr_lvt_entries = kvm_apic_calc_nr_lvt_entries(vcpu);
440 struct kvm_lapic *apic = vcpu->arch.apic;
441 int i;
442
443 if (!lapic_in_kernel(vcpu) || nr_lvt_entries == apic->nr_lvt_entries)
444 return;
445
446 /* Initialize/mask any "new" LVT entries. */
447 for (i = apic->nr_lvt_entries; i < nr_lvt_entries; i++)
448 kvm_lapic_set_reg(apic, APIC_LVTx(i), APIC_LVT_MASKED);
449
450 apic->nr_lvt_entries = nr_lvt_entries;
451
452 /* The number of LVT entries is reflected in the version register. */
453 kvm_apic_set_version(vcpu);
454}
455
456static const unsigned int apic_lvt_mask[KVM_APIC_MAX_NR_LVT_ENTRIES] = {
457 [LVT_TIMER] = LVT_MASK, /* timer mode mask added at runtime */
458 [LVT_THERMAL_MONITOR] = LVT_MASK | APIC_MODE_MASK,
459 [LVT_PERFORMANCE_COUNTER] = LVT_MASK | APIC_MODE_MASK,
460 [LVT_LINT0] = LINT_MASK,
461 [LVT_LINT1] = LINT_MASK,
462 [LVT_ERROR] = LVT_MASK,
463 [LVT_CMCI] = LVT_MASK | APIC_MODE_MASK
464};
465
466static int find_highest_vector(void *bitmap)
467{
468 int vec;
469 u32 *reg;
470
471 for (vec = MAX_APIC_VECTOR - APIC_VECTORS_PER_REG;
472 vec >= 0; vec -= APIC_VECTORS_PER_REG) {
473 reg = bitmap + REG_POS(vec);
474 if (*reg)
475 return __fls(*reg) + vec;
476 }
477
478 return -1;
479}
480
481static u8 count_vectors(void *bitmap)
482{
483 int vec;
484 u32 *reg;
485 u8 count = 0;
486
487 for (vec = 0; vec < MAX_APIC_VECTOR; vec += APIC_VECTORS_PER_REG) {
488 reg = bitmap + REG_POS(vec);
489 count += hweight32(*reg);
490 }
491
492 return count;
493}
494
495bool __kvm_apic_update_irr(u32 *pir, void *regs, int *max_irr)
496{
497 u32 i, vec;
498 u32 pir_val, irr_val, prev_irr_val;
499 int max_updated_irr;
500
501 max_updated_irr = -1;
502 *max_irr = -1;
503
504 for (i = vec = 0; i <= 7; i++, vec += 32) {
505 pir_val = READ_ONCE(pir[i]);
506 irr_val = *((u32 *)(regs + APIC_IRR + i * 0x10));
507 if (pir_val) {
508 prev_irr_val = irr_val;
509 irr_val |= xchg(&pir[i], 0);
510 *((u32 *)(regs + APIC_IRR + i * 0x10)) = irr_val;
511 if (prev_irr_val != irr_val) {
512 max_updated_irr =
513 __fls(irr_val ^ prev_irr_val) + vec;
514 }
515 }
516 if (irr_val)
517 *max_irr = __fls(irr_val) + vec;
518 }
519
520 return ((max_updated_irr != -1) &&
521 (max_updated_irr == *max_irr));
522}
523EXPORT_SYMBOL_GPL(__kvm_apic_update_irr);
524
525bool kvm_apic_update_irr(struct kvm_vcpu *vcpu, u32 *pir, int *max_irr)
526{
527 struct kvm_lapic *apic = vcpu->arch.apic;
528
529 return __kvm_apic_update_irr(pir, apic->regs, max_irr);
530}
531EXPORT_SYMBOL_GPL(kvm_apic_update_irr);
532
533static inline int apic_search_irr(struct kvm_lapic *apic)
534{
535 return find_highest_vector(apic->regs + APIC_IRR);
536}
537
538static inline int apic_find_highest_irr(struct kvm_lapic *apic)
539{
540 int result;
541
542 /*
543 * Note that irr_pending is just a hint. It will be always
544 * true with virtual interrupt delivery enabled.
545 */
546 if (!apic->irr_pending)
547 return -1;
548
549 result = apic_search_irr(apic);
550 ASSERT(result == -1 || result >= 16);
551
552 return result;
553}
554
555static inline void apic_clear_irr(int vec, struct kvm_lapic *apic)
556{
557 if (unlikely(apic->apicv_active)) {
558 /* need to update RVI */
559 kvm_lapic_clear_vector(vec, apic->regs + APIC_IRR);
560 static_call_cond(kvm_x86_hwapic_irr_update)(apic->vcpu,
561 apic_find_highest_irr(apic));
562 } else {
563 apic->irr_pending = false;
564 kvm_lapic_clear_vector(vec, apic->regs + APIC_IRR);
565 if (apic_search_irr(apic) != -1)
566 apic->irr_pending = true;
567 }
568}
569
570void kvm_apic_clear_irr(struct kvm_vcpu *vcpu, int vec)
571{
572 apic_clear_irr(vec, vcpu->arch.apic);
573}
574EXPORT_SYMBOL_GPL(kvm_apic_clear_irr);
575
576static inline void apic_set_isr(int vec, struct kvm_lapic *apic)
577{
578 if (__apic_test_and_set_vector(vec, apic->regs + APIC_ISR))
579 return;
580
581 /*
582 * With APIC virtualization enabled, all caching is disabled
583 * because the processor can modify ISR under the hood. Instead
584 * just set SVI.
585 */
586 if (unlikely(apic->apicv_active))
587 static_call_cond(kvm_x86_hwapic_isr_update)(vec);
588 else {
589 ++apic->isr_count;
590 BUG_ON(apic->isr_count > MAX_APIC_VECTOR);
591 /*
592 * ISR (in service register) bit is set when injecting an interrupt.
593 * The highest vector is injected. Thus the latest bit set matches
594 * the highest bit in ISR.
595 */
596 apic->highest_isr_cache = vec;
597 }
598}
599
600static inline int apic_find_highest_isr(struct kvm_lapic *apic)
601{
602 int result;
603
604 /*
605 * Note that isr_count is always 1, and highest_isr_cache
606 * is always -1, with APIC virtualization enabled.
607 */
608 if (!apic->isr_count)
609 return -1;
610 if (likely(apic->highest_isr_cache != -1))
611 return apic->highest_isr_cache;
612
613 result = find_highest_vector(apic->regs + APIC_ISR);
614 ASSERT(result == -1 || result >= 16);
615
616 return result;
617}
618
619static inline void apic_clear_isr(int vec, struct kvm_lapic *apic)
620{
621 if (!__apic_test_and_clear_vector(vec, apic->regs + APIC_ISR))
622 return;
623
624 /*
625 * We do get here for APIC virtualization enabled if the guest
626 * uses the Hyper-V APIC enlightenment. In this case we may need
627 * to trigger a new interrupt delivery by writing the SVI field;
628 * on the other hand isr_count and highest_isr_cache are unused
629 * and must be left alone.
630 */
631 if (unlikely(apic->apicv_active))
632 static_call_cond(kvm_x86_hwapic_isr_update)(apic_find_highest_isr(apic));
633 else {
634 --apic->isr_count;
635 BUG_ON(apic->isr_count < 0);
636 apic->highest_isr_cache = -1;
637 }
638}
639
640int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu)
641{
642 /* This may race with setting of irr in __apic_accept_irq() and
643 * value returned may be wrong, but kvm_vcpu_kick() in __apic_accept_irq
644 * will cause vmexit immediately and the value will be recalculated
645 * on the next vmentry.
646 */
647 return apic_find_highest_irr(vcpu->arch.apic);
648}
649EXPORT_SYMBOL_GPL(kvm_lapic_find_highest_irr);
650
651static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
652 int vector, int level, int trig_mode,
653 struct dest_map *dest_map);
654
655int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq,
656 struct dest_map *dest_map)
657{
658 struct kvm_lapic *apic = vcpu->arch.apic;
659
660 return __apic_accept_irq(apic, irq->delivery_mode, irq->vector,
661 irq->level, irq->trig_mode, dest_map);
662}
663
664static int __pv_send_ipi(unsigned long *ipi_bitmap, struct kvm_apic_map *map,
665 struct kvm_lapic_irq *irq, u32 min)
666{
667 int i, count = 0;
668 struct kvm_vcpu *vcpu;
669
670 if (min > map->max_apic_id)
671 return 0;
672
673 for_each_set_bit(i, ipi_bitmap,
674 min((u32)BITS_PER_LONG, (map->max_apic_id - min + 1))) {
675 if (map->phys_map[min + i]) {
676 vcpu = map->phys_map[min + i]->vcpu;
677 count += kvm_apic_set_irq(vcpu, irq, NULL);
678 }
679 }
680
681 return count;
682}
683
684int kvm_pv_send_ipi(struct kvm *kvm, unsigned long ipi_bitmap_low,
685 unsigned long ipi_bitmap_high, u32 min,
686 unsigned long icr, int op_64_bit)
687{
688 struct kvm_apic_map *map;
689 struct kvm_lapic_irq irq = {0};
690 int cluster_size = op_64_bit ? 64 : 32;
691 int count;
692
693 if (icr & (APIC_DEST_MASK | APIC_SHORT_MASK))
694 return -KVM_EINVAL;
695
696 irq.vector = icr & APIC_VECTOR_MASK;
697 irq.delivery_mode = icr & APIC_MODE_MASK;
698 irq.level = (icr & APIC_INT_ASSERT) != 0;
699 irq.trig_mode = icr & APIC_INT_LEVELTRIG;
700
701 rcu_read_lock();
702 map = rcu_dereference(kvm->arch.apic_map);
703
704 count = -EOPNOTSUPP;
705 if (likely(map)) {
706 count = __pv_send_ipi(&ipi_bitmap_low, map, &irq, min);
707 min += cluster_size;
708 count += __pv_send_ipi(&ipi_bitmap_high, map, &irq, min);
709 }
710
711 rcu_read_unlock();
712 return count;
713}
714
715static int pv_eoi_put_user(struct kvm_vcpu *vcpu, u8 val)
716{
717
718 return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data, &val,
719 sizeof(val));
720}
721
722static int pv_eoi_get_user(struct kvm_vcpu *vcpu, u8 *val)
723{
724
725 return kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data, val,
726 sizeof(*val));
727}
728
729static inline bool pv_eoi_enabled(struct kvm_vcpu *vcpu)
730{
731 return vcpu->arch.pv_eoi.msr_val & KVM_MSR_ENABLED;
732}
733
734static void pv_eoi_set_pending(struct kvm_vcpu *vcpu)
735{
736 if (pv_eoi_put_user(vcpu, KVM_PV_EOI_ENABLED) < 0)
737 return;
738
739 __set_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention);
740}
741
742static bool pv_eoi_test_and_clr_pending(struct kvm_vcpu *vcpu)
743{
744 u8 val;
745
746 if (pv_eoi_get_user(vcpu, &val) < 0)
747 return false;
748
749 val &= KVM_PV_EOI_ENABLED;
750
751 if (val && pv_eoi_put_user(vcpu, KVM_PV_EOI_DISABLED) < 0)
752 return false;
753
754 /*
755 * Clear pending bit in any case: it will be set again on vmentry.
756 * While this might not be ideal from performance point of view,
757 * this makes sure pv eoi is only enabled when we know it's safe.
758 */
759 __clear_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention);
760
761 return val;
762}
763
764static int apic_has_interrupt_for_ppr(struct kvm_lapic *apic, u32 ppr)
765{
766 int highest_irr;
767 if (kvm_x86_ops.sync_pir_to_irr)
768 highest_irr = static_call(kvm_x86_sync_pir_to_irr)(apic->vcpu);
769 else
770 highest_irr = apic_find_highest_irr(apic);
771 if (highest_irr == -1 || (highest_irr & 0xF0) <= ppr)
772 return -1;
773 return highest_irr;
774}
775
776static bool __apic_update_ppr(struct kvm_lapic *apic, u32 *new_ppr)
777{
778 u32 tpr, isrv, ppr, old_ppr;
779 int isr;
780
781 old_ppr = kvm_lapic_get_reg(apic, APIC_PROCPRI);
782 tpr = kvm_lapic_get_reg(apic, APIC_TASKPRI);
783 isr = apic_find_highest_isr(apic);
784 isrv = (isr != -1) ? isr : 0;
785
786 if ((tpr & 0xf0) >= (isrv & 0xf0))
787 ppr = tpr & 0xff;
788 else
789 ppr = isrv & 0xf0;
790
791 *new_ppr = ppr;
792 if (old_ppr != ppr)
793 kvm_lapic_set_reg(apic, APIC_PROCPRI, ppr);
794
795 return ppr < old_ppr;
796}
797
798static void apic_update_ppr(struct kvm_lapic *apic)
799{
800 u32 ppr;
801
802 if (__apic_update_ppr(apic, &ppr) &&
803 apic_has_interrupt_for_ppr(apic, ppr) != -1)
804 kvm_make_request(KVM_REQ_EVENT, apic->vcpu);
805}
806
807void kvm_apic_update_ppr(struct kvm_vcpu *vcpu)
808{
809 apic_update_ppr(vcpu->arch.apic);
810}
811EXPORT_SYMBOL_GPL(kvm_apic_update_ppr);
812
813static void apic_set_tpr(struct kvm_lapic *apic, u32 tpr)
814{
815 kvm_lapic_set_reg(apic, APIC_TASKPRI, tpr);
816 apic_update_ppr(apic);
817}
818
819static bool kvm_apic_broadcast(struct kvm_lapic *apic, u32 mda)
820{
821 return mda == (apic_x2apic_mode(apic) ?
822 X2APIC_BROADCAST : APIC_BROADCAST);
823}
824
825static bool kvm_apic_match_physical_addr(struct kvm_lapic *apic, u32 mda)
826{
827 if (kvm_apic_broadcast(apic, mda))
828 return true;
829
830 /*
831 * Hotplug hack: Accept interrupts for vCPUs in xAPIC mode as if they
832 * were in x2APIC mode if the target APIC ID can't be encoded as an
833 * xAPIC ID. This allows unique addressing of hotplugged vCPUs (which
834 * start in xAPIC mode) with an APIC ID that is unaddressable in xAPIC
835 * mode. Match the x2APIC ID if and only if the target APIC ID can't
836 * be encoded in xAPIC to avoid spurious matches against a vCPU that
837 * changed its (addressable) xAPIC ID (which is writable).
838 */
839 if (apic_x2apic_mode(apic) || mda > 0xff)
840 return mda == kvm_x2apic_id(apic);
841
842 return mda == kvm_xapic_id(apic);
843}
844
845static bool kvm_apic_match_logical_addr(struct kvm_lapic *apic, u32 mda)
846{
847 u32 logical_id;
848
849 if (kvm_apic_broadcast(apic, mda))
850 return true;
851
852 logical_id = kvm_lapic_get_reg(apic, APIC_LDR);
853
854 if (apic_x2apic_mode(apic))
855 return ((logical_id >> 16) == (mda >> 16))
856 && (logical_id & mda & 0xffff) != 0;
857
858 logical_id = GET_APIC_LOGICAL_ID(logical_id);
859
860 switch (kvm_lapic_get_reg(apic, APIC_DFR)) {
861 case APIC_DFR_FLAT:
862 return (logical_id & mda) != 0;
863 case APIC_DFR_CLUSTER:
864 return ((logical_id >> 4) == (mda >> 4))
865 && (logical_id & mda & 0xf) != 0;
866 default:
867 return false;
868 }
869}
870
871/* The KVM local APIC implementation has two quirks:
872 *
873 * - Real hardware delivers interrupts destined to x2APIC ID > 0xff to LAPICs
874 * in xAPIC mode if the "destination & 0xff" matches its xAPIC ID.
875 * KVM doesn't do that aliasing.
876 *
877 * - in-kernel IOAPIC messages have to be delivered directly to
878 * x2APIC, because the kernel does not support interrupt remapping.
879 * In order to support broadcast without interrupt remapping, x2APIC
880 * rewrites the destination of non-IPI messages from APIC_BROADCAST
881 * to X2APIC_BROADCAST.
882 *
883 * The broadcast quirk can be disabled with KVM_CAP_X2APIC_API. This is
884 * important when userspace wants to use x2APIC-format MSIs, because
885 * APIC_BROADCAST (0xff) is a legal route for "cluster 0, CPUs 0-7".
886 */
887static u32 kvm_apic_mda(struct kvm_vcpu *vcpu, unsigned int dest_id,
888 struct kvm_lapic *source, struct kvm_lapic *target)
889{
890 bool ipi = source != NULL;
891
892 if (!vcpu->kvm->arch.x2apic_broadcast_quirk_disabled &&
893 !ipi && dest_id == APIC_BROADCAST && apic_x2apic_mode(target))
894 return X2APIC_BROADCAST;
895
896 return dest_id;
897}
898
899bool kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source,
900 int shorthand, unsigned int dest, int dest_mode)
901{
902 struct kvm_lapic *target = vcpu->arch.apic;
903 u32 mda = kvm_apic_mda(vcpu, dest, source, target);
904
905 ASSERT(target);
906 switch (shorthand) {
907 case APIC_DEST_NOSHORT:
908 if (dest_mode == APIC_DEST_PHYSICAL)
909 return kvm_apic_match_physical_addr(target, mda);
910 else
911 return kvm_apic_match_logical_addr(target, mda);
912 case APIC_DEST_SELF:
913 return target == source;
914 case APIC_DEST_ALLINC:
915 return true;
916 case APIC_DEST_ALLBUT:
917 return target != source;
918 default:
919 return false;
920 }
921}
922EXPORT_SYMBOL_GPL(kvm_apic_match_dest);
923
924int kvm_vector_to_index(u32 vector, u32 dest_vcpus,
925 const unsigned long *bitmap, u32 bitmap_size)
926{
927 u32 mod;
928 int i, idx = -1;
929
930 mod = vector % dest_vcpus;
931
932 for (i = 0; i <= mod; i++) {
933 idx = find_next_bit(bitmap, bitmap_size, idx + 1);
934 BUG_ON(idx == bitmap_size);
935 }
936
937 return idx;
938}
939
940static void kvm_apic_disabled_lapic_found(struct kvm *kvm)
941{
942 if (!kvm->arch.disabled_lapic_found) {
943 kvm->arch.disabled_lapic_found = true;
944 printk(KERN_INFO
945 "Disabled LAPIC found during irq injection\n");
946 }
947}
948
949static bool kvm_apic_is_broadcast_dest(struct kvm *kvm, struct kvm_lapic **src,
950 struct kvm_lapic_irq *irq, struct kvm_apic_map *map)
951{
952 if (kvm->arch.x2apic_broadcast_quirk_disabled) {
953 if ((irq->dest_id == APIC_BROADCAST &&
954 map->mode != KVM_APIC_MODE_X2APIC))
955 return true;
956 if (irq->dest_id == X2APIC_BROADCAST)
957 return true;
958 } else {
959 bool x2apic_ipi = src && *src && apic_x2apic_mode(*src);
960 if (irq->dest_id == (x2apic_ipi ?
961 X2APIC_BROADCAST : APIC_BROADCAST))
962 return true;
963 }
964
965 return false;
966}
967
968/* Return true if the interrupt can be handled by using *bitmap as index mask
969 * for valid destinations in *dst array.
970 * Return false if kvm_apic_map_get_dest_lapic did nothing useful.
971 * Note: we may have zero kvm_lapic destinations when we return true, which
972 * means that the interrupt should be dropped. In this case, *bitmap would be
973 * zero and *dst undefined.
974 */
975static inline bool kvm_apic_map_get_dest_lapic(struct kvm *kvm,
976 struct kvm_lapic **src, struct kvm_lapic_irq *irq,
977 struct kvm_apic_map *map, struct kvm_lapic ***dst,
978 unsigned long *bitmap)
979{
980 int i, lowest;
981
982 if (irq->shorthand == APIC_DEST_SELF && src) {
983 *dst = src;
984 *bitmap = 1;
985 return true;
986 } else if (irq->shorthand)
987 return false;
988
989 if (!map || kvm_apic_is_broadcast_dest(kvm, src, irq, map))
990 return false;
991
992 if (irq->dest_mode == APIC_DEST_PHYSICAL) {
993 if (irq->dest_id > map->max_apic_id) {
994 *bitmap = 0;
995 } else {
996 u32 dest_id = array_index_nospec(irq->dest_id, map->max_apic_id + 1);
997 *dst = &map->phys_map[dest_id];
998 *bitmap = 1;
999 }
1000 return true;
1001 }
1002
1003 *bitmap = 0;
1004 if (!kvm_apic_map_get_logical_dest(map, irq->dest_id, dst,
1005 (u16 *)bitmap))
1006 return false;
1007
1008 if (!kvm_lowest_prio_delivery(irq))
1009 return true;
1010
1011 if (!kvm_vector_hashing_enabled()) {
1012 lowest = -1;
1013 for_each_set_bit(i, bitmap, 16) {
1014 if (!(*dst)[i])
1015 continue;
1016 if (lowest < 0)
1017 lowest = i;
1018 else if (kvm_apic_compare_prio((*dst)[i]->vcpu,
1019 (*dst)[lowest]->vcpu) < 0)
1020 lowest = i;
1021 }
1022 } else {
1023 if (!*bitmap)
1024 return true;
1025
1026 lowest = kvm_vector_to_index(irq->vector, hweight16(*bitmap),
1027 bitmap, 16);
1028
1029 if (!(*dst)[lowest]) {
1030 kvm_apic_disabled_lapic_found(kvm);
1031 *bitmap = 0;
1032 return true;
1033 }
1034 }
1035
1036 *bitmap = (lowest >= 0) ? 1 << lowest : 0;
1037
1038 return true;
1039}
1040
1041bool kvm_irq_delivery_to_apic_fast(struct kvm *kvm, struct kvm_lapic *src,
1042 struct kvm_lapic_irq *irq, int *r, struct dest_map *dest_map)
1043{
1044 struct kvm_apic_map *map;
1045 unsigned long bitmap;
1046 struct kvm_lapic **dst = NULL;
1047 int i;
1048 bool ret;
1049
1050 *r = -1;
1051
1052 if (irq->shorthand == APIC_DEST_SELF) {
1053 if (KVM_BUG_ON(!src, kvm)) {
1054 *r = 0;
1055 return true;
1056 }
1057 *r = kvm_apic_set_irq(src->vcpu, irq, dest_map);
1058 return true;
1059 }
1060
1061 rcu_read_lock();
1062 map = rcu_dereference(kvm->arch.apic_map);
1063
1064 ret = kvm_apic_map_get_dest_lapic(kvm, &src, irq, map, &dst, &bitmap);
1065 if (ret) {
1066 *r = 0;
1067 for_each_set_bit(i, &bitmap, 16) {
1068 if (!dst[i])
1069 continue;
1070 *r += kvm_apic_set_irq(dst[i]->vcpu, irq, dest_map);
1071 }
1072 }
1073
1074 rcu_read_unlock();
1075 return ret;
1076}
1077
1078/*
1079 * This routine tries to handle interrupts in posted mode, here is how
1080 * it deals with different cases:
1081 * - For single-destination interrupts, handle it in posted mode
1082 * - Else if vector hashing is enabled and it is a lowest-priority
1083 * interrupt, handle it in posted mode and use the following mechanism
1084 * to find the destination vCPU.
1085 * 1. For lowest-priority interrupts, store all the possible
1086 * destination vCPUs in an array.
1087 * 2. Use "guest vector % max number of destination vCPUs" to find
1088 * the right destination vCPU in the array for the lowest-priority
1089 * interrupt.
1090 * - Otherwise, use remapped mode to inject the interrupt.
1091 */
1092bool kvm_intr_is_single_vcpu_fast(struct kvm *kvm, struct kvm_lapic_irq *irq,
1093 struct kvm_vcpu **dest_vcpu)
1094{
1095 struct kvm_apic_map *map;
1096 unsigned long bitmap;
1097 struct kvm_lapic **dst = NULL;
1098 bool ret = false;
1099
1100 if (irq->shorthand)
1101 return false;
1102
1103 rcu_read_lock();
1104 map = rcu_dereference(kvm->arch.apic_map);
1105
1106 if (kvm_apic_map_get_dest_lapic(kvm, NULL, irq, map, &dst, &bitmap) &&
1107 hweight16(bitmap) == 1) {
1108 unsigned long i = find_first_bit(&bitmap, 16);
1109
1110 if (dst[i]) {
1111 *dest_vcpu = dst[i]->vcpu;
1112 ret = true;
1113 }
1114 }
1115
1116 rcu_read_unlock();
1117 return ret;
1118}
1119
1120/*
1121 * Add a pending IRQ into lapic.
1122 * Return 1 if successfully added and 0 if discarded.
1123 */
1124static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
1125 int vector, int level, int trig_mode,
1126 struct dest_map *dest_map)
1127{
1128 int result = 0;
1129 struct kvm_vcpu *vcpu = apic->vcpu;
1130
1131 trace_kvm_apic_accept_irq(vcpu->vcpu_id, delivery_mode,
1132 trig_mode, vector);
1133 switch (delivery_mode) {
1134 case APIC_DM_LOWEST:
1135 vcpu->arch.apic_arb_prio++;
1136 fallthrough;
1137 case APIC_DM_FIXED:
1138 if (unlikely(trig_mode && !level))
1139 break;
1140
1141 /* FIXME add logic for vcpu on reset */
1142 if (unlikely(!apic_enabled(apic)))
1143 break;
1144
1145 result = 1;
1146
1147 if (dest_map) {
1148 __set_bit(vcpu->vcpu_id, dest_map->map);
1149 dest_map->vectors[vcpu->vcpu_id] = vector;
1150 }
1151
1152 if (apic_test_vector(vector, apic->regs + APIC_TMR) != !!trig_mode) {
1153 if (trig_mode)
1154 kvm_lapic_set_vector(vector,
1155 apic->regs + APIC_TMR);
1156 else
1157 kvm_lapic_clear_vector(vector,
1158 apic->regs + APIC_TMR);
1159 }
1160
1161 static_call(kvm_x86_deliver_interrupt)(apic, delivery_mode,
1162 trig_mode, vector);
1163 break;
1164
1165 case APIC_DM_REMRD:
1166 result = 1;
1167 vcpu->arch.pv.pv_unhalted = 1;
1168 kvm_make_request(KVM_REQ_EVENT, vcpu);
1169 kvm_vcpu_kick(vcpu);
1170 break;
1171
1172 case APIC_DM_SMI:
1173 if (!kvm_inject_smi(vcpu)) {
1174 kvm_vcpu_kick(vcpu);
1175 result = 1;
1176 }
1177 break;
1178
1179 case APIC_DM_NMI:
1180 result = 1;
1181 kvm_inject_nmi(vcpu);
1182 kvm_vcpu_kick(vcpu);
1183 break;
1184
1185 case APIC_DM_INIT:
1186 if (!trig_mode || level) {
1187 result = 1;
1188 /* assumes that there are only KVM_APIC_INIT/SIPI */
1189 apic->pending_events = (1UL << KVM_APIC_INIT);
1190 kvm_make_request(KVM_REQ_EVENT, vcpu);
1191 kvm_vcpu_kick(vcpu);
1192 }
1193 break;
1194
1195 case APIC_DM_STARTUP:
1196 result = 1;
1197 apic->sipi_vector = vector;
1198 /* make sure sipi_vector is visible for the receiver */
1199 smp_wmb();
1200 set_bit(KVM_APIC_SIPI, &apic->pending_events);
1201 kvm_make_request(KVM_REQ_EVENT, vcpu);
1202 kvm_vcpu_kick(vcpu);
1203 break;
1204
1205 case APIC_DM_EXTINT:
1206 /*
1207 * Should only be called by kvm_apic_local_deliver() with LVT0,
1208 * before NMI watchdog was enabled. Already handled by
1209 * kvm_apic_accept_pic_intr().
1210 */
1211 break;
1212
1213 default:
1214 printk(KERN_ERR "TODO: unsupported delivery mode %x\n",
1215 delivery_mode);
1216 break;
1217 }
1218 return result;
1219}
1220
1221/*
1222 * This routine identifies the destination vcpus mask meant to receive the
1223 * IOAPIC interrupts. It either uses kvm_apic_map_get_dest_lapic() to find
1224 * out the destination vcpus array and set the bitmap or it traverses to
1225 * each available vcpu to identify the same.
1226 */
1227void kvm_bitmap_or_dest_vcpus(struct kvm *kvm, struct kvm_lapic_irq *irq,
1228 unsigned long *vcpu_bitmap)
1229{
1230 struct kvm_lapic **dest_vcpu = NULL;
1231 struct kvm_lapic *src = NULL;
1232 struct kvm_apic_map *map;
1233 struct kvm_vcpu *vcpu;
1234 unsigned long bitmap, i;
1235 int vcpu_idx;
1236 bool ret;
1237
1238 rcu_read_lock();
1239 map = rcu_dereference(kvm->arch.apic_map);
1240
1241 ret = kvm_apic_map_get_dest_lapic(kvm, &src, irq, map, &dest_vcpu,
1242 &bitmap);
1243 if (ret) {
1244 for_each_set_bit(i, &bitmap, 16) {
1245 if (!dest_vcpu[i])
1246 continue;
1247 vcpu_idx = dest_vcpu[i]->vcpu->vcpu_idx;
1248 __set_bit(vcpu_idx, vcpu_bitmap);
1249 }
1250 } else {
1251 kvm_for_each_vcpu(i, vcpu, kvm) {
1252 if (!kvm_apic_present(vcpu))
1253 continue;
1254 if (!kvm_apic_match_dest(vcpu, NULL,
1255 irq->shorthand,
1256 irq->dest_id,
1257 irq->dest_mode))
1258 continue;
1259 __set_bit(i, vcpu_bitmap);
1260 }
1261 }
1262 rcu_read_unlock();
1263}
1264
1265int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2)
1266{
1267 return vcpu1->arch.apic_arb_prio - vcpu2->arch.apic_arb_prio;
1268}
1269
1270static bool kvm_ioapic_handles_vector(struct kvm_lapic *apic, int vector)
1271{
1272 return test_bit(vector, apic->vcpu->arch.ioapic_handled_vectors);
1273}
1274
1275static void kvm_ioapic_send_eoi(struct kvm_lapic *apic, int vector)
1276{
1277 int trigger_mode;
1278
1279 /* Eoi the ioapic only if the ioapic doesn't own the vector. */
1280 if (!kvm_ioapic_handles_vector(apic, vector))
1281 return;
1282
1283 /* Request a KVM exit to inform the userspace IOAPIC. */
1284 if (irqchip_split(apic->vcpu->kvm)) {
1285 apic->vcpu->arch.pending_ioapic_eoi = vector;
1286 kvm_make_request(KVM_REQ_IOAPIC_EOI_EXIT, apic->vcpu);
1287 return;
1288 }
1289
1290 if (apic_test_vector(vector, apic->regs + APIC_TMR))
1291 trigger_mode = IOAPIC_LEVEL_TRIG;
1292 else
1293 trigger_mode = IOAPIC_EDGE_TRIG;
1294
1295 kvm_ioapic_update_eoi(apic->vcpu, vector, trigger_mode);
1296}
1297
1298static int apic_set_eoi(struct kvm_lapic *apic)
1299{
1300 int vector = apic_find_highest_isr(apic);
1301
1302 trace_kvm_eoi(apic, vector);
1303
1304 /*
1305 * Not every write EOI will has corresponding ISR,
1306 * one example is when Kernel check timer on setup_IO_APIC
1307 */
1308 if (vector == -1)
1309 return vector;
1310
1311 apic_clear_isr(vector, apic);
1312 apic_update_ppr(apic);
1313
1314 if (to_hv_vcpu(apic->vcpu) &&
1315 test_bit(vector, to_hv_synic(apic->vcpu)->vec_bitmap))
1316 kvm_hv_synic_send_eoi(apic->vcpu, vector);
1317
1318 kvm_ioapic_send_eoi(apic, vector);
1319 kvm_make_request(KVM_REQ_EVENT, apic->vcpu);
1320 return vector;
1321}
1322
1323/*
1324 * this interface assumes a trap-like exit, which has already finished
1325 * desired side effect including vISR and vPPR update.
1326 */
1327void kvm_apic_set_eoi_accelerated(struct kvm_vcpu *vcpu, int vector)
1328{
1329 struct kvm_lapic *apic = vcpu->arch.apic;
1330
1331 trace_kvm_eoi(apic, vector);
1332
1333 kvm_ioapic_send_eoi(apic, vector);
1334 kvm_make_request(KVM_REQ_EVENT, apic->vcpu);
1335}
1336EXPORT_SYMBOL_GPL(kvm_apic_set_eoi_accelerated);
1337
1338void kvm_apic_send_ipi(struct kvm_lapic *apic, u32 icr_low, u32 icr_high)
1339{
1340 struct kvm_lapic_irq irq;
1341
1342 /* KVM has no delay and should always clear the BUSY/PENDING flag. */
1343 WARN_ON_ONCE(icr_low & APIC_ICR_BUSY);
1344
1345 irq.vector = icr_low & APIC_VECTOR_MASK;
1346 irq.delivery_mode = icr_low & APIC_MODE_MASK;
1347 irq.dest_mode = icr_low & APIC_DEST_MASK;
1348 irq.level = (icr_low & APIC_INT_ASSERT) != 0;
1349 irq.trig_mode = icr_low & APIC_INT_LEVELTRIG;
1350 irq.shorthand = icr_low & APIC_SHORT_MASK;
1351 irq.msi_redir_hint = false;
1352 if (apic_x2apic_mode(apic))
1353 irq.dest_id = icr_high;
1354 else
1355 irq.dest_id = GET_XAPIC_DEST_FIELD(icr_high);
1356
1357 trace_kvm_apic_ipi(icr_low, irq.dest_id);
1358
1359 kvm_irq_delivery_to_apic(apic->vcpu->kvm, apic, &irq, NULL);
1360}
1361EXPORT_SYMBOL_GPL(kvm_apic_send_ipi);
1362
1363static u32 apic_get_tmcct(struct kvm_lapic *apic)
1364{
1365 ktime_t remaining, now;
1366 s64 ns;
1367 u32 tmcct;
1368
1369 ASSERT(apic != NULL);
1370
1371 /* if initial count is 0, current count should also be 0 */
1372 if (kvm_lapic_get_reg(apic, APIC_TMICT) == 0 ||
1373 apic->lapic_timer.period == 0)
1374 return 0;
1375
1376 now = ktime_get();
1377 remaining = ktime_sub(apic->lapic_timer.target_expiration, now);
1378 if (ktime_to_ns(remaining) < 0)
1379 remaining = 0;
1380
1381 ns = mod_64(ktime_to_ns(remaining), apic->lapic_timer.period);
1382 tmcct = div64_u64(ns,
1383 (APIC_BUS_CYCLE_NS * apic->divide_count));
1384
1385 return tmcct;
1386}
1387
1388static void __report_tpr_access(struct kvm_lapic *apic, bool write)
1389{
1390 struct kvm_vcpu *vcpu = apic->vcpu;
1391 struct kvm_run *run = vcpu->run;
1392
1393 kvm_make_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu);
1394 run->tpr_access.rip = kvm_rip_read(vcpu);
1395 run->tpr_access.is_write = write;
1396}
1397
1398static inline void report_tpr_access(struct kvm_lapic *apic, bool write)
1399{
1400 if (apic->vcpu->arch.tpr_access_reporting)
1401 __report_tpr_access(apic, write);
1402}
1403
1404static u32 __apic_read(struct kvm_lapic *apic, unsigned int offset)
1405{
1406 u32 val = 0;
1407
1408 if (offset >= LAPIC_MMIO_LENGTH)
1409 return 0;
1410
1411 switch (offset) {
1412 case APIC_ARBPRI:
1413 break;
1414
1415 case APIC_TMCCT: /* Timer CCR */
1416 if (apic_lvtt_tscdeadline(apic))
1417 return 0;
1418
1419 val = apic_get_tmcct(apic);
1420 break;
1421 case APIC_PROCPRI:
1422 apic_update_ppr(apic);
1423 val = kvm_lapic_get_reg(apic, offset);
1424 break;
1425 case APIC_TASKPRI:
1426 report_tpr_access(apic, false);
1427 fallthrough;
1428 default:
1429 val = kvm_lapic_get_reg(apic, offset);
1430 break;
1431 }
1432
1433 return val;
1434}
1435
1436static inline struct kvm_lapic *to_lapic(struct kvm_io_device *dev)
1437{
1438 return container_of(dev, struct kvm_lapic, dev);
1439}
1440
1441#define APIC_REG_MASK(reg) (1ull << ((reg) >> 4))
1442#define APIC_REGS_MASK(first, count) \
1443 (APIC_REG_MASK(first) * ((1ull << (count)) - 1))
1444
1445static int kvm_lapic_reg_read(struct kvm_lapic *apic, u32 offset, int len,
1446 void *data)
1447{
1448 unsigned char alignment = offset & 0xf;
1449 u32 result;
1450 /* this bitmask has a bit cleared for each reserved register */
1451 u64 valid_reg_mask =
1452 APIC_REG_MASK(APIC_ID) |
1453 APIC_REG_MASK(APIC_LVR) |
1454 APIC_REG_MASK(APIC_TASKPRI) |
1455 APIC_REG_MASK(APIC_PROCPRI) |
1456 APIC_REG_MASK(APIC_LDR) |
1457 APIC_REG_MASK(APIC_DFR) |
1458 APIC_REG_MASK(APIC_SPIV) |
1459 APIC_REGS_MASK(APIC_ISR, APIC_ISR_NR) |
1460 APIC_REGS_MASK(APIC_TMR, APIC_ISR_NR) |
1461 APIC_REGS_MASK(APIC_IRR, APIC_ISR_NR) |
1462 APIC_REG_MASK(APIC_ESR) |
1463 APIC_REG_MASK(APIC_ICR) |
1464 APIC_REG_MASK(APIC_LVTT) |
1465 APIC_REG_MASK(APIC_LVTTHMR) |
1466 APIC_REG_MASK(APIC_LVTPC) |
1467 APIC_REG_MASK(APIC_LVT0) |
1468 APIC_REG_MASK(APIC_LVT1) |
1469 APIC_REG_MASK(APIC_LVTERR) |
1470 APIC_REG_MASK(APIC_TMICT) |
1471 APIC_REG_MASK(APIC_TMCCT) |
1472 APIC_REG_MASK(APIC_TDCR);
1473
1474 if (kvm_lapic_lvt_supported(apic, LVT_CMCI))
1475 valid_reg_mask |= APIC_REG_MASK(APIC_LVTCMCI);
1476
1477 /*
1478 * ARBPRI and ICR2 are not valid in x2APIC mode. WARN if KVM reads ICR
1479 * in x2APIC mode as it's an 8-byte register in x2APIC and needs to be
1480 * manually handled by the caller.
1481 */
1482 if (!apic_x2apic_mode(apic))
1483 valid_reg_mask |= APIC_REG_MASK(APIC_ARBPRI) |
1484 APIC_REG_MASK(APIC_ICR2);
1485 else
1486 WARN_ON_ONCE(offset == APIC_ICR);
1487
1488 if (alignment + len > 4)
1489 return 1;
1490
1491 if (offset > 0x3f0 || !(valid_reg_mask & APIC_REG_MASK(offset)))
1492 return 1;
1493
1494 result = __apic_read(apic, offset & ~0xf);
1495
1496 trace_kvm_apic_read(offset, result);
1497
1498 switch (len) {
1499 case 1:
1500 case 2:
1501 case 4:
1502 memcpy(data, (char *)&result + alignment, len);
1503 break;
1504 default:
1505 printk(KERN_ERR "Local APIC read with len = %x, "
1506 "should be 1,2, or 4 instead\n", len);
1507 break;
1508 }
1509 return 0;
1510}
1511
1512static int apic_mmio_in_range(struct kvm_lapic *apic, gpa_t addr)
1513{
1514 return addr >= apic->base_address &&
1515 addr < apic->base_address + LAPIC_MMIO_LENGTH;
1516}
1517
1518static int apic_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *this,
1519 gpa_t address, int len, void *data)
1520{
1521 struct kvm_lapic *apic = to_lapic(this);
1522 u32 offset = address - apic->base_address;
1523
1524 if (!apic_mmio_in_range(apic, address))
1525 return -EOPNOTSUPP;
1526
1527 if (!kvm_apic_hw_enabled(apic) || apic_x2apic_mode(apic)) {
1528 if (!kvm_check_has_quirk(vcpu->kvm,
1529 KVM_X86_QUIRK_LAPIC_MMIO_HOLE))
1530 return -EOPNOTSUPP;
1531
1532 memset(data, 0xff, len);
1533 return 0;
1534 }
1535
1536 kvm_lapic_reg_read(apic, offset, len, data);
1537
1538 return 0;
1539}
1540
1541static void update_divide_count(struct kvm_lapic *apic)
1542{
1543 u32 tmp1, tmp2, tdcr;
1544
1545 tdcr = kvm_lapic_get_reg(apic, APIC_TDCR);
1546 tmp1 = tdcr & 0xf;
1547 tmp2 = ((tmp1 & 0x3) | ((tmp1 & 0x8) >> 1)) + 1;
1548 apic->divide_count = 0x1 << (tmp2 & 0x7);
1549}
1550
1551static void limit_periodic_timer_frequency(struct kvm_lapic *apic)
1552{
1553 /*
1554 * Do not allow the guest to program periodic timers with small
1555 * interval, since the hrtimers are not throttled by the host
1556 * scheduler.
1557 */
1558 if (apic_lvtt_period(apic) && apic->lapic_timer.period) {
1559 s64 min_period = min_timer_period_us * 1000LL;
1560
1561 if (apic->lapic_timer.period < min_period) {
1562 pr_info_ratelimited(
1563 "kvm: vcpu %i: requested %lld ns "
1564 "lapic timer period limited to %lld ns\n",
1565 apic->vcpu->vcpu_id,
1566 apic->lapic_timer.period, min_period);
1567 apic->lapic_timer.period = min_period;
1568 }
1569 }
1570}
1571
1572static void cancel_hv_timer(struct kvm_lapic *apic);
1573
1574static void cancel_apic_timer(struct kvm_lapic *apic)
1575{
1576 hrtimer_cancel(&apic->lapic_timer.timer);
1577 preempt_disable();
1578 if (apic->lapic_timer.hv_timer_in_use)
1579 cancel_hv_timer(apic);
1580 preempt_enable();
1581 atomic_set(&apic->lapic_timer.pending, 0);
1582}
1583
1584static void apic_update_lvtt(struct kvm_lapic *apic)
1585{
1586 u32 timer_mode = kvm_lapic_get_reg(apic, APIC_LVTT) &
1587 apic->lapic_timer.timer_mode_mask;
1588
1589 if (apic->lapic_timer.timer_mode != timer_mode) {
1590 if (apic_lvtt_tscdeadline(apic) != (timer_mode ==
1591 APIC_LVT_TIMER_TSCDEADLINE)) {
1592 cancel_apic_timer(apic);
1593 kvm_lapic_set_reg(apic, APIC_TMICT, 0);
1594 apic->lapic_timer.period = 0;
1595 apic->lapic_timer.tscdeadline = 0;
1596 }
1597 apic->lapic_timer.timer_mode = timer_mode;
1598 limit_periodic_timer_frequency(apic);
1599 }
1600}
1601
1602/*
1603 * On APICv, this test will cause a busy wait
1604 * during a higher-priority task.
1605 */
1606
1607static bool lapic_timer_int_injected(struct kvm_vcpu *vcpu)
1608{
1609 struct kvm_lapic *apic = vcpu->arch.apic;
1610 u32 reg = kvm_lapic_get_reg(apic, APIC_LVTT);
1611
1612 if (kvm_apic_hw_enabled(apic)) {
1613 int vec = reg & APIC_VECTOR_MASK;
1614 void *bitmap = apic->regs + APIC_ISR;
1615
1616 if (apic->apicv_active)
1617 bitmap = apic->regs + APIC_IRR;
1618
1619 if (apic_test_vector(vec, bitmap))
1620 return true;
1621 }
1622 return false;
1623}
1624
1625static inline void __wait_lapic_expire(struct kvm_vcpu *vcpu, u64 guest_cycles)
1626{
1627 u64 timer_advance_ns = vcpu->arch.apic->lapic_timer.timer_advance_ns;
1628
1629 /*
1630 * If the guest TSC is running at a different ratio than the host, then
1631 * convert the delay to nanoseconds to achieve an accurate delay. Note
1632 * that __delay() uses delay_tsc whenever the hardware has TSC, thus
1633 * always for VMX enabled hardware.
1634 */
1635 if (vcpu->arch.tsc_scaling_ratio == kvm_caps.default_tsc_scaling_ratio) {
1636 __delay(min(guest_cycles,
1637 nsec_to_cycles(vcpu, timer_advance_ns)));
1638 } else {
1639 u64 delay_ns = guest_cycles * 1000000ULL;
1640 do_div(delay_ns, vcpu->arch.virtual_tsc_khz);
1641 ndelay(min_t(u32, delay_ns, timer_advance_ns));
1642 }
1643}
1644
1645static inline void adjust_lapic_timer_advance(struct kvm_vcpu *vcpu,
1646 s64 advance_expire_delta)
1647{
1648 struct kvm_lapic *apic = vcpu->arch.apic;
1649 u32 timer_advance_ns = apic->lapic_timer.timer_advance_ns;
1650 u64 ns;
1651
1652 /* Do not adjust for tiny fluctuations or large random spikes. */
1653 if (abs(advance_expire_delta) > LAPIC_TIMER_ADVANCE_ADJUST_MAX ||
1654 abs(advance_expire_delta) < LAPIC_TIMER_ADVANCE_ADJUST_MIN)
1655 return;
1656
1657 /* too early */
1658 if (advance_expire_delta < 0) {
1659 ns = -advance_expire_delta * 1000000ULL;
1660 do_div(ns, vcpu->arch.virtual_tsc_khz);
1661 timer_advance_ns -= ns/LAPIC_TIMER_ADVANCE_ADJUST_STEP;
1662 } else {
1663 /* too late */
1664 ns = advance_expire_delta * 1000000ULL;
1665 do_div(ns, vcpu->arch.virtual_tsc_khz);
1666 timer_advance_ns += ns/LAPIC_TIMER_ADVANCE_ADJUST_STEP;
1667 }
1668
1669 if (unlikely(timer_advance_ns > LAPIC_TIMER_ADVANCE_NS_MAX))
1670 timer_advance_ns = LAPIC_TIMER_ADVANCE_NS_INIT;
1671 apic->lapic_timer.timer_advance_ns = timer_advance_ns;
1672}
1673
1674static void __kvm_wait_lapic_expire(struct kvm_vcpu *vcpu)
1675{
1676 struct kvm_lapic *apic = vcpu->arch.apic;
1677 u64 guest_tsc, tsc_deadline;
1678
1679 tsc_deadline = apic->lapic_timer.expired_tscdeadline;
1680 apic->lapic_timer.expired_tscdeadline = 0;
1681 guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc());
1682 trace_kvm_wait_lapic_expire(vcpu->vcpu_id, guest_tsc - tsc_deadline);
1683
1684 if (lapic_timer_advance_dynamic) {
1685 adjust_lapic_timer_advance(vcpu, guest_tsc - tsc_deadline);
1686 /*
1687 * If the timer fired early, reread the TSC to account for the
1688 * overhead of the above adjustment to avoid waiting longer
1689 * than is necessary.
1690 */
1691 if (guest_tsc < tsc_deadline)
1692 guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc());
1693 }
1694
1695 if (guest_tsc < tsc_deadline)
1696 __wait_lapic_expire(vcpu, tsc_deadline - guest_tsc);
1697}
1698
1699void kvm_wait_lapic_expire(struct kvm_vcpu *vcpu)
1700{
1701 if (lapic_in_kernel(vcpu) &&
1702 vcpu->arch.apic->lapic_timer.expired_tscdeadline &&
1703 vcpu->arch.apic->lapic_timer.timer_advance_ns &&
1704 lapic_timer_int_injected(vcpu))
1705 __kvm_wait_lapic_expire(vcpu);
1706}
1707EXPORT_SYMBOL_GPL(kvm_wait_lapic_expire);
1708
1709static void kvm_apic_inject_pending_timer_irqs(struct kvm_lapic *apic)
1710{
1711 struct kvm_timer *ktimer = &apic->lapic_timer;
1712
1713 kvm_apic_local_deliver(apic, APIC_LVTT);
1714 if (apic_lvtt_tscdeadline(apic)) {
1715 ktimer->tscdeadline = 0;
1716 } else if (apic_lvtt_oneshot(apic)) {
1717 ktimer->tscdeadline = 0;
1718 ktimer->target_expiration = 0;
1719 }
1720}
1721
1722static void apic_timer_expired(struct kvm_lapic *apic, bool from_timer_fn)
1723{
1724 struct kvm_vcpu *vcpu = apic->vcpu;
1725 struct kvm_timer *ktimer = &apic->lapic_timer;
1726
1727 if (atomic_read(&apic->lapic_timer.pending))
1728 return;
1729
1730 if (apic_lvtt_tscdeadline(apic) || ktimer->hv_timer_in_use)
1731 ktimer->expired_tscdeadline = ktimer->tscdeadline;
1732
1733 if (!from_timer_fn && apic->apicv_active) {
1734 WARN_ON(kvm_get_running_vcpu() != vcpu);
1735 kvm_apic_inject_pending_timer_irqs(apic);
1736 return;
1737 }
1738
1739 if (kvm_use_posted_timer_interrupt(apic->vcpu)) {
1740 /*
1741 * Ensure the guest's timer has truly expired before posting an
1742 * interrupt. Open code the relevant checks to avoid querying
1743 * lapic_timer_int_injected(), which will be false since the
1744 * interrupt isn't yet injected. Waiting until after injecting
1745 * is not an option since that won't help a posted interrupt.
1746 */
1747 if (vcpu->arch.apic->lapic_timer.expired_tscdeadline &&
1748 vcpu->arch.apic->lapic_timer.timer_advance_ns)
1749 __kvm_wait_lapic_expire(vcpu);
1750 kvm_apic_inject_pending_timer_irqs(apic);
1751 return;
1752 }
1753
1754 atomic_inc(&apic->lapic_timer.pending);
1755 kvm_make_request(KVM_REQ_UNBLOCK, vcpu);
1756 if (from_timer_fn)
1757 kvm_vcpu_kick(vcpu);
1758}
1759
1760static void start_sw_tscdeadline(struct kvm_lapic *apic)
1761{
1762 struct kvm_timer *ktimer = &apic->lapic_timer;
1763 u64 guest_tsc, tscdeadline = ktimer->tscdeadline;
1764 u64 ns = 0;
1765 ktime_t expire;
1766 struct kvm_vcpu *vcpu = apic->vcpu;
1767 unsigned long this_tsc_khz = vcpu->arch.virtual_tsc_khz;
1768 unsigned long flags;
1769 ktime_t now;
1770
1771 if (unlikely(!tscdeadline || !this_tsc_khz))
1772 return;
1773
1774 local_irq_save(flags);
1775
1776 now = ktime_get();
1777 guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc());
1778
1779 ns = (tscdeadline - guest_tsc) * 1000000ULL;
1780 do_div(ns, this_tsc_khz);
1781
1782 if (likely(tscdeadline > guest_tsc) &&
1783 likely(ns > apic->lapic_timer.timer_advance_ns)) {
1784 expire = ktime_add_ns(now, ns);
1785 expire = ktime_sub_ns(expire, ktimer->timer_advance_ns);
1786 hrtimer_start(&ktimer->timer, expire, HRTIMER_MODE_ABS_HARD);
1787 } else
1788 apic_timer_expired(apic, false);
1789
1790 local_irq_restore(flags);
1791}
1792
1793static inline u64 tmict_to_ns(struct kvm_lapic *apic, u32 tmict)
1794{
1795 return (u64)tmict * APIC_BUS_CYCLE_NS * (u64)apic->divide_count;
1796}
1797
1798static void update_target_expiration(struct kvm_lapic *apic, uint32_t old_divisor)
1799{
1800 ktime_t now, remaining;
1801 u64 ns_remaining_old, ns_remaining_new;
1802
1803 apic->lapic_timer.period =
1804 tmict_to_ns(apic, kvm_lapic_get_reg(apic, APIC_TMICT));
1805 limit_periodic_timer_frequency(apic);
1806
1807 now = ktime_get();
1808 remaining = ktime_sub(apic->lapic_timer.target_expiration, now);
1809 if (ktime_to_ns(remaining) < 0)
1810 remaining = 0;
1811
1812 ns_remaining_old = ktime_to_ns(remaining);
1813 ns_remaining_new = mul_u64_u32_div(ns_remaining_old,
1814 apic->divide_count, old_divisor);
1815
1816 apic->lapic_timer.tscdeadline +=
1817 nsec_to_cycles(apic->vcpu, ns_remaining_new) -
1818 nsec_to_cycles(apic->vcpu, ns_remaining_old);
1819 apic->lapic_timer.target_expiration = ktime_add_ns(now, ns_remaining_new);
1820}
1821
1822static bool set_target_expiration(struct kvm_lapic *apic, u32 count_reg)
1823{
1824 ktime_t now;
1825 u64 tscl = rdtsc();
1826 s64 deadline;
1827
1828 now = ktime_get();
1829 apic->lapic_timer.period =
1830 tmict_to_ns(apic, kvm_lapic_get_reg(apic, APIC_TMICT));
1831
1832 if (!apic->lapic_timer.period) {
1833 apic->lapic_timer.tscdeadline = 0;
1834 return false;
1835 }
1836
1837 limit_periodic_timer_frequency(apic);
1838 deadline = apic->lapic_timer.period;
1839
1840 if (apic_lvtt_period(apic) || apic_lvtt_oneshot(apic)) {
1841 if (unlikely(count_reg != APIC_TMICT)) {
1842 deadline = tmict_to_ns(apic,
1843 kvm_lapic_get_reg(apic, count_reg));
1844 if (unlikely(deadline <= 0))
1845 deadline = apic->lapic_timer.period;
1846 else if (unlikely(deadline > apic->lapic_timer.period)) {
1847 pr_info_ratelimited(
1848 "kvm: vcpu %i: requested lapic timer restore with "
1849 "starting count register %#x=%u (%lld ns) > initial count (%lld ns). "
1850 "Using initial count to start timer.\n",
1851 apic->vcpu->vcpu_id,
1852 count_reg,
1853 kvm_lapic_get_reg(apic, count_reg),
1854 deadline, apic->lapic_timer.period);
1855 kvm_lapic_set_reg(apic, count_reg, 0);
1856 deadline = apic->lapic_timer.period;
1857 }
1858 }
1859 }
1860
1861 apic->lapic_timer.tscdeadline = kvm_read_l1_tsc(apic->vcpu, tscl) +
1862 nsec_to_cycles(apic->vcpu, deadline);
1863 apic->lapic_timer.target_expiration = ktime_add_ns(now, deadline);
1864
1865 return true;
1866}
1867
1868static void advance_periodic_target_expiration(struct kvm_lapic *apic)
1869{
1870 ktime_t now = ktime_get();
1871 u64 tscl = rdtsc();
1872 ktime_t delta;
1873
1874 /*
1875 * Synchronize both deadlines to the same time source or
1876 * differences in the periods (caused by differences in the
1877 * underlying clocks or numerical approximation errors) will
1878 * cause the two to drift apart over time as the errors
1879 * accumulate.
1880 */
1881 apic->lapic_timer.target_expiration =
1882 ktime_add_ns(apic->lapic_timer.target_expiration,
1883 apic->lapic_timer.period);
1884 delta = ktime_sub(apic->lapic_timer.target_expiration, now);
1885 apic->lapic_timer.tscdeadline = kvm_read_l1_tsc(apic->vcpu, tscl) +
1886 nsec_to_cycles(apic->vcpu, delta);
1887}
1888
1889static void start_sw_period(struct kvm_lapic *apic)
1890{
1891 if (!apic->lapic_timer.period)
1892 return;
1893
1894 if (ktime_after(ktime_get(),
1895 apic->lapic_timer.target_expiration)) {
1896 apic_timer_expired(apic, false);
1897
1898 if (apic_lvtt_oneshot(apic))
1899 return;
1900
1901 advance_periodic_target_expiration(apic);
1902 }
1903
1904 hrtimer_start(&apic->lapic_timer.timer,
1905 apic->lapic_timer.target_expiration,
1906 HRTIMER_MODE_ABS_HARD);
1907}
1908
1909bool kvm_lapic_hv_timer_in_use(struct kvm_vcpu *vcpu)
1910{
1911 if (!lapic_in_kernel(vcpu))
1912 return false;
1913
1914 return vcpu->arch.apic->lapic_timer.hv_timer_in_use;
1915}
1916
1917static void cancel_hv_timer(struct kvm_lapic *apic)
1918{
1919 WARN_ON(preemptible());
1920 WARN_ON(!apic->lapic_timer.hv_timer_in_use);
1921 static_call(kvm_x86_cancel_hv_timer)(apic->vcpu);
1922 apic->lapic_timer.hv_timer_in_use = false;
1923}
1924
1925static bool start_hv_timer(struct kvm_lapic *apic)
1926{
1927 struct kvm_timer *ktimer = &apic->lapic_timer;
1928 struct kvm_vcpu *vcpu = apic->vcpu;
1929 bool expired;
1930
1931 WARN_ON(preemptible());
1932 if (!kvm_can_use_hv_timer(vcpu))
1933 return false;
1934
1935 if (!ktimer->tscdeadline)
1936 return false;
1937
1938 if (static_call(kvm_x86_set_hv_timer)(vcpu, ktimer->tscdeadline, &expired))
1939 return false;
1940
1941 ktimer->hv_timer_in_use = true;
1942 hrtimer_cancel(&ktimer->timer);
1943
1944 /*
1945 * To simplify handling the periodic timer, leave the hv timer running
1946 * even if the deadline timer has expired, i.e. rely on the resulting
1947 * VM-Exit to recompute the periodic timer's target expiration.
1948 */
1949 if (!apic_lvtt_period(apic)) {
1950 /*
1951 * Cancel the hv timer if the sw timer fired while the hv timer
1952 * was being programmed, or if the hv timer itself expired.
1953 */
1954 if (atomic_read(&ktimer->pending)) {
1955 cancel_hv_timer(apic);
1956 } else if (expired) {
1957 apic_timer_expired(apic, false);
1958 cancel_hv_timer(apic);
1959 }
1960 }
1961
1962 trace_kvm_hv_timer_state(vcpu->vcpu_id, ktimer->hv_timer_in_use);
1963
1964 return true;
1965}
1966
1967static void start_sw_timer(struct kvm_lapic *apic)
1968{
1969 struct kvm_timer *ktimer = &apic->lapic_timer;
1970
1971 WARN_ON(preemptible());
1972 if (apic->lapic_timer.hv_timer_in_use)
1973 cancel_hv_timer(apic);
1974 if (!apic_lvtt_period(apic) && atomic_read(&ktimer->pending))
1975 return;
1976
1977 if (apic_lvtt_period(apic) || apic_lvtt_oneshot(apic))
1978 start_sw_period(apic);
1979 else if (apic_lvtt_tscdeadline(apic))
1980 start_sw_tscdeadline(apic);
1981 trace_kvm_hv_timer_state(apic->vcpu->vcpu_id, false);
1982}
1983
1984static void restart_apic_timer(struct kvm_lapic *apic)
1985{
1986 preempt_disable();
1987
1988 if (!apic_lvtt_period(apic) && atomic_read(&apic->lapic_timer.pending))
1989 goto out;
1990
1991 if (!start_hv_timer(apic))
1992 start_sw_timer(apic);
1993out:
1994 preempt_enable();
1995}
1996
1997void kvm_lapic_expired_hv_timer(struct kvm_vcpu *vcpu)
1998{
1999 struct kvm_lapic *apic = vcpu->arch.apic;
2000
2001 preempt_disable();
2002 /* If the preempt notifier has already run, it also called apic_timer_expired */
2003 if (!apic->lapic_timer.hv_timer_in_use)
2004 goto out;
2005 WARN_ON(kvm_vcpu_is_blocking(vcpu));
2006 apic_timer_expired(apic, false);
2007 cancel_hv_timer(apic);
2008
2009 if (apic_lvtt_period(apic) && apic->lapic_timer.period) {
2010 advance_periodic_target_expiration(apic);
2011 restart_apic_timer(apic);
2012 }
2013out:
2014 preempt_enable();
2015}
2016EXPORT_SYMBOL_GPL(kvm_lapic_expired_hv_timer);
2017
2018void kvm_lapic_switch_to_hv_timer(struct kvm_vcpu *vcpu)
2019{
2020 restart_apic_timer(vcpu->arch.apic);
2021}
2022
2023void kvm_lapic_switch_to_sw_timer(struct kvm_vcpu *vcpu)
2024{
2025 struct kvm_lapic *apic = vcpu->arch.apic;
2026
2027 preempt_disable();
2028 /* Possibly the TSC deadline timer is not enabled yet */
2029 if (apic->lapic_timer.hv_timer_in_use)
2030 start_sw_timer(apic);
2031 preempt_enable();
2032}
2033
2034void kvm_lapic_restart_hv_timer(struct kvm_vcpu *vcpu)
2035{
2036 struct kvm_lapic *apic = vcpu->arch.apic;
2037
2038 WARN_ON(!apic->lapic_timer.hv_timer_in_use);
2039 restart_apic_timer(apic);
2040}
2041
2042static void __start_apic_timer(struct kvm_lapic *apic, u32 count_reg)
2043{
2044 atomic_set(&apic->lapic_timer.pending, 0);
2045
2046 if ((apic_lvtt_period(apic) || apic_lvtt_oneshot(apic))
2047 && !set_target_expiration(apic, count_reg))
2048 return;
2049
2050 restart_apic_timer(apic);
2051}
2052
2053static void start_apic_timer(struct kvm_lapic *apic)
2054{
2055 __start_apic_timer(apic, APIC_TMICT);
2056}
2057
2058static void apic_manage_nmi_watchdog(struct kvm_lapic *apic, u32 lvt0_val)
2059{
2060 bool lvt0_in_nmi_mode = apic_lvt_nmi_mode(lvt0_val);
2061
2062 if (apic->lvt0_in_nmi_mode != lvt0_in_nmi_mode) {
2063 apic->lvt0_in_nmi_mode = lvt0_in_nmi_mode;
2064 if (lvt0_in_nmi_mode) {
2065 atomic_inc(&apic->vcpu->kvm->arch.vapics_in_nmi_mode);
2066 } else
2067 atomic_dec(&apic->vcpu->kvm->arch.vapics_in_nmi_mode);
2068 }
2069}
2070
2071static void kvm_lapic_xapic_id_updated(struct kvm_lapic *apic)
2072{
2073 struct kvm *kvm = apic->vcpu->kvm;
2074
2075 if (KVM_BUG_ON(apic_x2apic_mode(apic), kvm))
2076 return;
2077
2078 if (kvm_xapic_id(apic) == apic->vcpu->vcpu_id)
2079 return;
2080
2081 kvm_set_apicv_inhibit(apic->vcpu->kvm, APICV_INHIBIT_REASON_APIC_ID_MODIFIED);
2082}
2083
2084static int get_lvt_index(u32 reg)
2085{
2086 if (reg == APIC_LVTCMCI)
2087 return LVT_CMCI;
2088 if (reg < APIC_LVTT || reg > APIC_LVTERR)
2089 return -1;
2090 return array_index_nospec(
2091 (reg - APIC_LVTT) >> 4, KVM_APIC_MAX_NR_LVT_ENTRIES);
2092}
2093
2094static int kvm_lapic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val)
2095{
2096 int ret = 0;
2097
2098 trace_kvm_apic_write(reg, val);
2099
2100 switch (reg) {
2101 case APIC_ID: /* Local APIC ID */
2102 if (!apic_x2apic_mode(apic)) {
2103 kvm_apic_set_xapic_id(apic, val >> 24);
2104 kvm_lapic_xapic_id_updated(apic);
2105 } else {
2106 ret = 1;
2107 }
2108 break;
2109
2110 case APIC_TASKPRI:
2111 report_tpr_access(apic, true);
2112 apic_set_tpr(apic, val & 0xff);
2113 break;
2114
2115 case APIC_EOI:
2116 apic_set_eoi(apic);
2117 break;
2118
2119 case APIC_LDR:
2120 if (!apic_x2apic_mode(apic))
2121 kvm_apic_set_ldr(apic, val & APIC_LDR_MASK);
2122 else
2123 ret = 1;
2124 break;
2125
2126 case APIC_DFR:
2127 if (!apic_x2apic_mode(apic))
2128 kvm_apic_set_dfr(apic, val | 0x0FFFFFFF);
2129 else
2130 ret = 1;
2131 break;
2132
2133 case APIC_SPIV: {
2134 u32 mask = 0x3ff;
2135 if (kvm_lapic_get_reg(apic, APIC_LVR) & APIC_LVR_DIRECTED_EOI)
2136 mask |= APIC_SPIV_DIRECTED_EOI;
2137 apic_set_spiv(apic, val & mask);
2138 if (!(val & APIC_SPIV_APIC_ENABLED)) {
2139 int i;
2140
2141 for (i = 0; i < apic->nr_lvt_entries; i++) {
2142 kvm_lapic_set_reg(apic, APIC_LVTx(i),
2143 kvm_lapic_get_reg(apic, APIC_LVTx(i)) | APIC_LVT_MASKED);
2144 }
2145 apic_update_lvtt(apic);
2146 atomic_set(&apic->lapic_timer.pending, 0);
2147
2148 }
2149 break;
2150 }
2151 case APIC_ICR:
2152 WARN_ON_ONCE(apic_x2apic_mode(apic));
2153
2154 /* No delay here, so we always clear the pending bit */
2155 val &= ~APIC_ICR_BUSY;
2156 kvm_apic_send_ipi(apic, val, kvm_lapic_get_reg(apic, APIC_ICR2));
2157 kvm_lapic_set_reg(apic, APIC_ICR, val);
2158 break;
2159 case APIC_ICR2:
2160 if (apic_x2apic_mode(apic))
2161 ret = 1;
2162 else
2163 kvm_lapic_set_reg(apic, APIC_ICR2, val & 0xff000000);
2164 break;
2165
2166 case APIC_LVT0:
2167 apic_manage_nmi_watchdog(apic, val);
2168 fallthrough;
2169 case APIC_LVTTHMR:
2170 case APIC_LVTPC:
2171 case APIC_LVT1:
2172 case APIC_LVTERR:
2173 case APIC_LVTCMCI: {
2174 u32 index = get_lvt_index(reg);
2175 if (!kvm_lapic_lvt_supported(apic, index)) {
2176 ret = 1;
2177 break;
2178 }
2179 if (!kvm_apic_sw_enabled(apic))
2180 val |= APIC_LVT_MASKED;
2181 val &= apic_lvt_mask[index];
2182 kvm_lapic_set_reg(apic, reg, val);
2183 break;
2184 }
2185
2186 case APIC_LVTT:
2187 if (!kvm_apic_sw_enabled(apic))
2188 val |= APIC_LVT_MASKED;
2189 val &= (apic_lvt_mask[0] | apic->lapic_timer.timer_mode_mask);
2190 kvm_lapic_set_reg(apic, APIC_LVTT, val);
2191 apic_update_lvtt(apic);
2192 break;
2193
2194 case APIC_TMICT:
2195 if (apic_lvtt_tscdeadline(apic))
2196 break;
2197
2198 cancel_apic_timer(apic);
2199 kvm_lapic_set_reg(apic, APIC_TMICT, val);
2200 start_apic_timer(apic);
2201 break;
2202
2203 case APIC_TDCR: {
2204 uint32_t old_divisor = apic->divide_count;
2205
2206 kvm_lapic_set_reg(apic, APIC_TDCR, val & 0xb);
2207 update_divide_count(apic);
2208 if (apic->divide_count != old_divisor &&
2209 apic->lapic_timer.period) {
2210 hrtimer_cancel(&apic->lapic_timer.timer);
2211 update_target_expiration(apic, old_divisor);
2212 restart_apic_timer(apic);
2213 }
2214 break;
2215 }
2216 case APIC_ESR:
2217 if (apic_x2apic_mode(apic) && val != 0)
2218 ret = 1;
2219 break;
2220
2221 case APIC_SELF_IPI:
2222 if (apic_x2apic_mode(apic))
2223 kvm_apic_send_ipi(apic, APIC_DEST_SELF | (val & APIC_VECTOR_MASK), 0);
2224 else
2225 ret = 1;
2226 break;
2227 default:
2228 ret = 1;
2229 break;
2230 }
2231
2232 /*
2233 * Recalculate APIC maps if necessary, e.g. if the software enable bit
2234 * was toggled, the APIC ID changed, etc... The maps are marked dirty
2235 * on relevant changes, i.e. this is a nop for most writes.
2236 */
2237 kvm_recalculate_apic_map(apic->vcpu->kvm);
2238
2239 return ret;
2240}
2241
2242static int apic_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *this,
2243 gpa_t address, int len, const void *data)
2244{
2245 struct kvm_lapic *apic = to_lapic(this);
2246 unsigned int offset = address - apic->base_address;
2247 u32 val;
2248
2249 if (!apic_mmio_in_range(apic, address))
2250 return -EOPNOTSUPP;
2251
2252 if (!kvm_apic_hw_enabled(apic) || apic_x2apic_mode(apic)) {
2253 if (!kvm_check_has_quirk(vcpu->kvm,
2254 KVM_X86_QUIRK_LAPIC_MMIO_HOLE))
2255 return -EOPNOTSUPP;
2256
2257 return 0;
2258 }
2259
2260 /*
2261 * APIC register must be aligned on 128-bits boundary.
2262 * 32/64/128 bits registers must be accessed thru 32 bits.
2263 * Refer SDM 8.4.1
2264 */
2265 if (len != 4 || (offset & 0xf))
2266 return 0;
2267
2268 val = *(u32*)data;
2269
2270 kvm_lapic_reg_write(apic, offset & 0xff0, val);
2271
2272 return 0;
2273}
2274
2275void kvm_lapic_set_eoi(struct kvm_vcpu *vcpu)
2276{
2277 kvm_lapic_reg_write(vcpu->arch.apic, APIC_EOI, 0);
2278}
2279EXPORT_SYMBOL_GPL(kvm_lapic_set_eoi);
2280
2281/* emulate APIC access in a trap manner */
2282void kvm_apic_write_nodecode(struct kvm_vcpu *vcpu, u32 offset)
2283{
2284 struct kvm_lapic *apic = vcpu->arch.apic;
2285 u64 val;
2286
2287 if (apic_x2apic_mode(apic)) {
2288 if (KVM_BUG_ON(kvm_lapic_msr_read(apic, offset, &val), vcpu->kvm))
2289 return;
2290 } else {
2291 val = kvm_lapic_get_reg(apic, offset);
2292 }
2293
2294 /*
2295 * ICR is a single 64-bit register when x2APIC is enabled. For legacy
2296 * xAPIC, ICR writes need to go down the common (slightly slower) path
2297 * to get the upper half from ICR2.
2298 */
2299 if (apic_x2apic_mode(apic) && offset == APIC_ICR) {
2300 kvm_apic_send_ipi(apic, (u32)val, (u32)(val >> 32));
2301 trace_kvm_apic_write(APIC_ICR, val);
2302 } else {
2303 /* TODO: optimize to just emulate side effect w/o one more write */
2304 kvm_lapic_reg_write(apic, offset, (u32)val);
2305 }
2306}
2307EXPORT_SYMBOL_GPL(kvm_apic_write_nodecode);
2308
2309void kvm_free_lapic(struct kvm_vcpu *vcpu)
2310{
2311 struct kvm_lapic *apic = vcpu->arch.apic;
2312
2313 if (!vcpu->arch.apic)
2314 return;
2315
2316 hrtimer_cancel(&apic->lapic_timer.timer);
2317
2318 if (!(vcpu->arch.apic_base & MSR_IA32_APICBASE_ENABLE))
2319 static_branch_slow_dec_deferred(&apic_hw_disabled);
2320
2321 if (!apic->sw_enabled)
2322 static_branch_slow_dec_deferred(&apic_sw_disabled);
2323
2324 if (apic->regs)
2325 free_page((unsigned long)apic->regs);
2326
2327 kfree(apic);
2328}
2329
2330/*
2331 *----------------------------------------------------------------------
2332 * LAPIC interface
2333 *----------------------------------------------------------------------
2334 */
2335u64 kvm_get_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu)
2336{
2337 struct kvm_lapic *apic = vcpu->arch.apic;
2338
2339 if (!kvm_apic_present(vcpu) || !apic_lvtt_tscdeadline(apic))
2340 return 0;
2341
2342 return apic->lapic_timer.tscdeadline;
2343}
2344
2345void kvm_set_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu, u64 data)
2346{
2347 struct kvm_lapic *apic = vcpu->arch.apic;
2348
2349 if (!kvm_apic_present(vcpu) || !apic_lvtt_tscdeadline(apic))
2350 return;
2351
2352 hrtimer_cancel(&apic->lapic_timer.timer);
2353 apic->lapic_timer.tscdeadline = data;
2354 start_apic_timer(apic);
2355}
2356
2357void kvm_lapic_set_tpr(struct kvm_vcpu *vcpu, unsigned long cr8)
2358{
2359 apic_set_tpr(vcpu->arch.apic, (cr8 & 0x0f) << 4);
2360}
2361
2362u64 kvm_lapic_get_cr8(struct kvm_vcpu *vcpu)
2363{
2364 u64 tpr;
2365
2366 tpr = (u64) kvm_lapic_get_reg(vcpu->arch.apic, APIC_TASKPRI);
2367
2368 return (tpr & 0xf0) >> 4;
2369}
2370
2371void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value)
2372{
2373 u64 old_value = vcpu->arch.apic_base;
2374 struct kvm_lapic *apic = vcpu->arch.apic;
2375
2376 vcpu->arch.apic_base = value;
2377
2378 if ((old_value ^ value) & MSR_IA32_APICBASE_ENABLE)
2379 kvm_update_cpuid_runtime(vcpu);
2380
2381 if (!apic)
2382 return;
2383
2384 /* update jump label if enable bit changes */
2385 if ((old_value ^ value) & MSR_IA32_APICBASE_ENABLE) {
2386 if (value & MSR_IA32_APICBASE_ENABLE) {
2387 kvm_apic_set_xapic_id(apic, vcpu->vcpu_id);
2388 static_branch_slow_dec_deferred(&apic_hw_disabled);
2389 /* Check if there are APF page ready requests pending */
2390 kvm_make_request(KVM_REQ_APF_READY, vcpu);
2391 } else {
2392 static_branch_inc(&apic_hw_disabled.key);
2393 atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
2394 }
2395 }
2396
2397 if (((old_value ^ value) & X2APIC_ENABLE) && (value & X2APIC_ENABLE))
2398 kvm_apic_set_x2apic_id(apic, vcpu->vcpu_id);
2399
2400 if ((old_value ^ value) & (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE)) {
2401 kvm_vcpu_update_apicv(vcpu);
2402 static_call_cond(kvm_x86_set_virtual_apic_mode)(vcpu);
2403 }
2404
2405 apic->base_address = apic->vcpu->arch.apic_base &
2406 MSR_IA32_APICBASE_BASE;
2407
2408 if ((value & MSR_IA32_APICBASE_ENABLE) &&
2409 apic->base_address != APIC_DEFAULT_PHYS_BASE) {
2410 kvm_set_apicv_inhibit(apic->vcpu->kvm,
2411 APICV_INHIBIT_REASON_APIC_BASE_MODIFIED);
2412 }
2413}
2414
2415void kvm_apic_update_apicv(struct kvm_vcpu *vcpu)
2416{
2417 struct kvm_lapic *apic = vcpu->arch.apic;
2418
2419 if (apic->apicv_active) {
2420 /* irr_pending is always true when apicv is activated. */
2421 apic->irr_pending = true;
2422 apic->isr_count = 1;
2423 } else {
2424 /*
2425 * Don't clear irr_pending, searching the IRR can race with
2426 * updates from the CPU as APICv is still active from hardware's
2427 * perspective. The flag will be cleared as appropriate when
2428 * KVM injects the interrupt.
2429 */
2430 apic->isr_count = count_vectors(apic->regs + APIC_ISR);
2431 }
2432}
2433
2434void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event)
2435{
2436 struct kvm_lapic *apic = vcpu->arch.apic;
2437 u64 msr_val;
2438 int i;
2439
2440 if (!init_event) {
2441 msr_val = APIC_DEFAULT_PHYS_BASE | MSR_IA32_APICBASE_ENABLE;
2442 if (kvm_vcpu_is_reset_bsp(vcpu))
2443 msr_val |= MSR_IA32_APICBASE_BSP;
2444 kvm_lapic_set_base(vcpu, msr_val);
2445 }
2446
2447 if (!apic)
2448 return;
2449
2450 /* Stop the timer in case it's a reset to an active apic */
2451 hrtimer_cancel(&apic->lapic_timer.timer);
2452
2453 /* The xAPIC ID is set at RESET even if the APIC was already enabled. */
2454 if (!init_event)
2455 kvm_apic_set_xapic_id(apic, vcpu->vcpu_id);
2456 kvm_apic_set_version(apic->vcpu);
2457
2458 for (i = 0; i < apic->nr_lvt_entries; i++)
2459 kvm_lapic_set_reg(apic, APIC_LVTx(i), APIC_LVT_MASKED);
2460 apic_update_lvtt(apic);
2461 if (kvm_vcpu_is_reset_bsp(vcpu) &&
2462 kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_LINT0_REENABLED))
2463 kvm_lapic_set_reg(apic, APIC_LVT0,
2464 SET_APIC_DELIVERY_MODE(0, APIC_MODE_EXTINT));
2465 apic_manage_nmi_watchdog(apic, kvm_lapic_get_reg(apic, APIC_LVT0));
2466
2467 kvm_apic_set_dfr(apic, 0xffffffffU);
2468 apic_set_spiv(apic, 0xff);
2469 kvm_lapic_set_reg(apic, APIC_TASKPRI, 0);
2470 if (!apic_x2apic_mode(apic))
2471 kvm_apic_set_ldr(apic, 0);
2472 kvm_lapic_set_reg(apic, APIC_ESR, 0);
2473 if (!apic_x2apic_mode(apic)) {
2474 kvm_lapic_set_reg(apic, APIC_ICR, 0);
2475 kvm_lapic_set_reg(apic, APIC_ICR2, 0);
2476 } else {
2477 kvm_lapic_set_reg64(apic, APIC_ICR, 0);
2478 }
2479 kvm_lapic_set_reg(apic, APIC_TDCR, 0);
2480 kvm_lapic_set_reg(apic, APIC_TMICT, 0);
2481 for (i = 0; i < 8; i++) {
2482 kvm_lapic_set_reg(apic, APIC_IRR + 0x10 * i, 0);
2483 kvm_lapic_set_reg(apic, APIC_ISR + 0x10 * i, 0);
2484 kvm_lapic_set_reg(apic, APIC_TMR + 0x10 * i, 0);
2485 }
2486 kvm_apic_update_apicv(vcpu);
2487 apic->highest_isr_cache = -1;
2488 update_divide_count(apic);
2489 atomic_set(&apic->lapic_timer.pending, 0);
2490
2491 vcpu->arch.pv_eoi.msr_val = 0;
2492 apic_update_ppr(apic);
2493 if (apic->apicv_active) {
2494 static_call_cond(kvm_x86_apicv_post_state_restore)(vcpu);
2495 static_call_cond(kvm_x86_hwapic_irr_update)(vcpu, -1);
2496 static_call_cond(kvm_x86_hwapic_isr_update)(-1);
2497 }
2498
2499 vcpu->arch.apic_arb_prio = 0;
2500 vcpu->arch.apic_attention = 0;
2501
2502 kvm_recalculate_apic_map(vcpu->kvm);
2503}
2504
2505/*
2506 *----------------------------------------------------------------------
2507 * timer interface
2508 *----------------------------------------------------------------------
2509 */
2510
2511static bool lapic_is_periodic(struct kvm_lapic *apic)
2512{
2513 return apic_lvtt_period(apic);
2514}
2515
2516int apic_has_pending_timer(struct kvm_vcpu *vcpu)
2517{
2518 struct kvm_lapic *apic = vcpu->arch.apic;
2519
2520 if (apic_enabled(apic) && apic_lvt_enabled(apic, APIC_LVTT))
2521 return atomic_read(&apic->lapic_timer.pending);
2522
2523 return 0;
2524}
2525
2526int kvm_apic_local_deliver(struct kvm_lapic *apic, int lvt_type)
2527{
2528 u32 reg = kvm_lapic_get_reg(apic, lvt_type);
2529 int vector, mode, trig_mode;
2530
2531 if (kvm_apic_hw_enabled(apic) && !(reg & APIC_LVT_MASKED)) {
2532 vector = reg & APIC_VECTOR_MASK;
2533 mode = reg & APIC_MODE_MASK;
2534 trig_mode = reg & APIC_LVT_LEVEL_TRIGGER;
2535 return __apic_accept_irq(apic, mode, vector, 1, trig_mode,
2536 NULL);
2537 }
2538 return 0;
2539}
2540
2541void kvm_apic_nmi_wd_deliver(struct kvm_vcpu *vcpu)
2542{
2543 struct kvm_lapic *apic = vcpu->arch.apic;
2544
2545 if (apic)
2546 kvm_apic_local_deliver(apic, APIC_LVT0);
2547}
2548
2549static const struct kvm_io_device_ops apic_mmio_ops = {
2550 .read = apic_mmio_read,
2551 .write = apic_mmio_write,
2552};
2553
2554static enum hrtimer_restart apic_timer_fn(struct hrtimer *data)
2555{
2556 struct kvm_timer *ktimer = container_of(data, struct kvm_timer, timer);
2557 struct kvm_lapic *apic = container_of(ktimer, struct kvm_lapic, lapic_timer);
2558
2559 apic_timer_expired(apic, true);
2560
2561 if (lapic_is_periodic(apic)) {
2562 advance_periodic_target_expiration(apic);
2563 hrtimer_add_expires_ns(&ktimer->timer, ktimer->period);
2564 return HRTIMER_RESTART;
2565 } else
2566 return HRTIMER_NORESTART;
2567}
2568
2569int kvm_create_lapic(struct kvm_vcpu *vcpu, int timer_advance_ns)
2570{
2571 struct kvm_lapic *apic;
2572
2573 ASSERT(vcpu != NULL);
2574
2575 apic = kzalloc(sizeof(*apic), GFP_KERNEL_ACCOUNT);
2576 if (!apic)
2577 goto nomem;
2578
2579 vcpu->arch.apic = apic;
2580
2581 apic->regs = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT);
2582 if (!apic->regs) {
2583 printk(KERN_ERR "malloc apic regs error for vcpu %x\n",
2584 vcpu->vcpu_id);
2585 goto nomem_free_apic;
2586 }
2587 apic->vcpu = vcpu;
2588
2589 apic->nr_lvt_entries = kvm_apic_calc_nr_lvt_entries(vcpu);
2590
2591 hrtimer_init(&apic->lapic_timer.timer, CLOCK_MONOTONIC,
2592 HRTIMER_MODE_ABS_HARD);
2593 apic->lapic_timer.timer.function = apic_timer_fn;
2594 if (timer_advance_ns == -1) {
2595 apic->lapic_timer.timer_advance_ns = LAPIC_TIMER_ADVANCE_NS_INIT;
2596 lapic_timer_advance_dynamic = true;
2597 } else {
2598 apic->lapic_timer.timer_advance_ns = timer_advance_ns;
2599 lapic_timer_advance_dynamic = false;
2600 }
2601
2602 /*
2603 * Stuff the APIC ENABLE bit in lieu of temporarily incrementing
2604 * apic_hw_disabled; the full RESET value is set by kvm_lapic_reset().
2605 */
2606 vcpu->arch.apic_base = MSR_IA32_APICBASE_ENABLE;
2607 static_branch_inc(&apic_sw_disabled.key); /* sw disabled at reset */
2608 kvm_iodevice_init(&apic->dev, &apic_mmio_ops);
2609
2610 return 0;
2611nomem_free_apic:
2612 kfree(apic);
2613 vcpu->arch.apic = NULL;
2614nomem:
2615 return -ENOMEM;
2616}
2617
2618int kvm_apic_has_interrupt(struct kvm_vcpu *vcpu)
2619{
2620 struct kvm_lapic *apic = vcpu->arch.apic;
2621 u32 ppr;
2622
2623 if (!kvm_apic_present(vcpu))
2624 return -1;
2625
2626 __apic_update_ppr(apic, &ppr);
2627 return apic_has_interrupt_for_ppr(apic, ppr);
2628}
2629EXPORT_SYMBOL_GPL(kvm_apic_has_interrupt);
2630
2631int kvm_apic_accept_pic_intr(struct kvm_vcpu *vcpu)
2632{
2633 u32 lvt0 = kvm_lapic_get_reg(vcpu->arch.apic, APIC_LVT0);
2634
2635 if (!kvm_apic_hw_enabled(vcpu->arch.apic))
2636 return 1;
2637 if ((lvt0 & APIC_LVT_MASKED) == 0 &&
2638 GET_APIC_DELIVERY_MODE(lvt0) == APIC_MODE_EXTINT)
2639 return 1;
2640 return 0;
2641}
2642
2643void kvm_inject_apic_timer_irqs(struct kvm_vcpu *vcpu)
2644{
2645 struct kvm_lapic *apic = vcpu->arch.apic;
2646
2647 if (atomic_read(&apic->lapic_timer.pending) > 0) {
2648 kvm_apic_inject_pending_timer_irqs(apic);
2649 atomic_set(&apic->lapic_timer.pending, 0);
2650 }
2651}
2652
2653int kvm_get_apic_interrupt(struct kvm_vcpu *vcpu)
2654{
2655 int vector = kvm_apic_has_interrupt(vcpu);
2656 struct kvm_lapic *apic = vcpu->arch.apic;
2657 u32 ppr;
2658
2659 if (vector == -1)
2660 return -1;
2661
2662 /*
2663 * We get here even with APIC virtualization enabled, if doing
2664 * nested virtualization and L1 runs with the "acknowledge interrupt
2665 * on exit" mode. Then we cannot inject the interrupt via RVI,
2666 * because the process would deliver it through the IDT.
2667 */
2668
2669 apic_clear_irr(vector, apic);
2670 if (to_hv_vcpu(vcpu) && test_bit(vector, to_hv_synic(vcpu)->auto_eoi_bitmap)) {
2671 /*
2672 * For auto-EOI interrupts, there might be another pending
2673 * interrupt above PPR, so check whether to raise another
2674 * KVM_REQ_EVENT.
2675 */
2676 apic_update_ppr(apic);
2677 } else {
2678 /*
2679 * For normal interrupts, PPR has been raised and there cannot
2680 * be a higher-priority pending interrupt---except if there was
2681 * a concurrent interrupt injection, but that would have
2682 * triggered KVM_REQ_EVENT already.
2683 */
2684 apic_set_isr(vector, apic);
2685 __apic_update_ppr(apic, &ppr);
2686 }
2687
2688 return vector;
2689}
2690
2691static int kvm_apic_state_fixup(struct kvm_vcpu *vcpu,
2692 struct kvm_lapic_state *s, bool set)
2693{
2694 if (apic_x2apic_mode(vcpu->arch.apic)) {
2695 u32 *id = (u32 *)(s->regs + APIC_ID);
2696 u32 *ldr = (u32 *)(s->regs + APIC_LDR);
2697 u64 icr;
2698
2699 if (vcpu->kvm->arch.x2apic_format) {
2700 if (*id != vcpu->vcpu_id)
2701 return -EINVAL;
2702 } else {
2703 if (set)
2704 *id >>= 24;
2705 else
2706 *id <<= 24;
2707 }
2708
2709 /*
2710 * In x2APIC mode, the LDR is fixed and based on the id. And
2711 * ICR is internally a single 64-bit register, but needs to be
2712 * split to ICR+ICR2 in userspace for backwards compatibility.
2713 */
2714 if (set) {
2715 *ldr = kvm_apic_calc_x2apic_ldr(*id);
2716
2717 icr = __kvm_lapic_get_reg(s->regs, APIC_ICR) |
2718 (u64)__kvm_lapic_get_reg(s->regs, APIC_ICR2) << 32;
2719 __kvm_lapic_set_reg64(s->regs, APIC_ICR, icr);
2720 } else {
2721 icr = __kvm_lapic_get_reg64(s->regs, APIC_ICR);
2722 __kvm_lapic_set_reg(s->regs, APIC_ICR2, icr >> 32);
2723 }
2724 }
2725
2726 return 0;
2727}
2728
2729int kvm_apic_get_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s)
2730{
2731 memcpy(s->regs, vcpu->arch.apic->regs, sizeof(*s));
2732
2733 /*
2734 * Get calculated timer current count for remaining timer period (if
2735 * any) and store it in the returned register set.
2736 */
2737 __kvm_lapic_set_reg(s->regs, APIC_TMCCT,
2738 __apic_read(vcpu->arch.apic, APIC_TMCCT));
2739
2740 return kvm_apic_state_fixup(vcpu, s, false);
2741}
2742
2743int kvm_apic_set_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s)
2744{
2745 struct kvm_lapic *apic = vcpu->arch.apic;
2746 int r;
2747
2748 kvm_lapic_set_base(vcpu, vcpu->arch.apic_base);
2749 /* set SPIV separately to get count of SW disabled APICs right */
2750 apic_set_spiv(apic, *((u32 *)(s->regs + APIC_SPIV)));
2751
2752 r = kvm_apic_state_fixup(vcpu, s, true);
2753 if (r) {
2754 kvm_recalculate_apic_map(vcpu->kvm);
2755 return r;
2756 }
2757 memcpy(vcpu->arch.apic->regs, s->regs, sizeof(*s));
2758
2759 if (!apic_x2apic_mode(apic))
2760 kvm_lapic_xapic_id_updated(apic);
2761
2762 atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
2763 kvm_recalculate_apic_map(vcpu->kvm);
2764 kvm_apic_set_version(vcpu);
2765
2766 apic_update_ppr(apic);
2767 cancel_apic_timer(apic);
2768 apic->lapic_timer.expired_tscdeadline = 0;
2769 apic_update_lvtt(apic);
2770 apic_manage_nmi_watchdog(apic, kvm_lapic_get_reg(apic, APIC_LVT0));
2771 update_divide_count(apic);
2772 __start_apic_timer(apic, APIC_TMCCT);
2773 kvm_lapic_set_reg(apic, APIC_TMCCT, 0);
2774 kvm_apic_update_apicv(vcpu);
2775 apic->highest_isr_cache = -1;
2776 if (apic->apicv_active) {
2777 static_call_cond(kvm_x86_apicv_post_state_restore)(vcpu);
2778 static_call_cond(kvm_x86_hwapic_irr_update)(vcpu, apic_find_highest_irr(apic));
2779 static_call_cond(kvm_x86_hwapic_isr_update)(apic_find_highest_isr(apic));
2780 }
2781 kvm_make_request(KVM_REQ_EVENT, vcpu);
2782 if (ioapic_in_kernel(vcpu->kvm))
2783 kvm_rtc_eoi_tracking_restore_one(vcpu);
2784
2785 vcpu->arch.apic_arb_prio = 0;
2786
2787 return 0;
2788}
2789
2790void __kvm_migrate_apic_timer(struct kvm_vcpu *vcpu)
2791{
2792 struct hrtimer *timer;
2793
2794 if (!lapic_in_kernel(vcpu) ||
2795 kvm_can_post_timer_interrupt(vcpu))
2796 return;
2797
2798 timer = &vcpu->arch.apic->lapic_timer.timer;
2799 if (hrtimer_cancel(timer))
2800 hrtimer_start_expires(timer, HRTIMER_MODE_ABS_HARD);
2801}
2802
2803/*
2804 * apic_sync_pv_eoi_from_guest - called on vmexit or cancel interrupt
2805 *
2806 * Detect whether guest triggered PV EOI since the
2807 * last entry. If yes, set EOI on guests's behalf.
2808 * Clear PV EOI in guest memory in any case.
2809 */
2810static void apic_sync_pv_eoi_from_guest(struct kvm_vcpu *vcpu,
2811 struct kvm_lapic *apic)
2812{
2813 int vector;
2814 /*
2815 * PV EOI state is derived from KVM_APIC_PV_EOI_PENDING in host
2816 * and KVM_PV_EOI_ENABLED in guest memory as follows:
2817 *
2818 * KVM_APIC_PV_EOI_PENDING is unset:
2819 * -> host disabled PV EOI.
2820 * KVM_APIC_PV_EOI_PENDING is set, KVM_PV_EOI_ENABLED is set:
2821 * -> host enabled PV EOI, guest did not execute EOI yet.
2822 * KVM_APIC_PV_EOI_PENDING is set, KVM_PV_EOI_ENABLED is unset:
2823 * -> host enabled PV EOI, guest executed EOI.
2824 */
2825 BUG_ON(!pv_eoi_enabled(vcpu));
2826
2827 if (pv_eoi_test_and_clr_pending(vcpu))
2828 return;
2829 vector = apic_set_eoi(apic);
2830 trace_kvm_pv_eoi(apic, vector);
2831}
2832
2833void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu)
2834{
2835 u32 data;
2836
2837 if (test_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention))
2838 apic_sync_pv_eoi_from_guest(vcpu, vcpu->arch.apic);
2839
2840 if (!test_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention))
2841 return;
2842
2843 if (kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data,
2844 sizeof(u32)))
2845 return;
2846
2847 apic_set_tpr(vcpu->arch.apic, data & 0xff);
2848}
2849
2850/*
2851 * apic_sync_pv_eoi_to_guest - called before vmentry
2852 *
2853 * Detect whether it's safe to enable PV EOI and
2854 * if yes do so.
2855 */
2856static void apic_sync_pv_eoi_to_guest(struct kvm_vcpu *vcpu,
2857 struct kvm_lapic *apic)
2858{
2859 if (!pv_eoi_enabled(vcpu) ||
2860 /* IRR set or many bits in ISR: could be nested. */
2861 apic->irr_pending ||
2862 /* Cache not set: could be safe but we don't bother. */
2863 apic->highest_isr_cache == -1 ||
2864 /* Need EOI to update ioapic. */
2865 kvm_ioapic_handles_vector(apic, apic->highest_isr_cache)) {
2866 /*
2867 * PV EOI was disabled by apic_sync_pv_eoi_from_guest
2868 * so we need not do anything here.
2869 */
2870 return;
2871 }
2872
2873 pv_eoi_set_pending(apic->vcpu);
2874}
2875
2876void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu)
2877{
2878 u32 data, tpr;
2879 int max_irr, max_isr;
2880 struct kvm_lapic *apic = vcpu->arch.apic;
2881
2882 apic_sync_pv_eoi_to_guest(vcpu, apic);
2883
2884 if (!test_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention))
2885 return;
2886
2887 tpr = kvm_lapic_get_reg(apic, APIC_TASKPRI) & 0xff;
2888 max_irr = apic_find_highest_irr(apic);
2889 if (max_irr < 0)
2890 max_irr = 0;
2891 max_isr = apic_find_highest_isr(apic);
2892 if (max_isr < 0)
2893 max_isr = 0;
2894 data = (tpr & 0xff) | ((max_isr & 0xf0) << 8) | (max_irr << 24);
2895
2896 kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data,
2897 sizeof(u32));
2898}
2899
2900int kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr)
2901{
2902 if (vapic_addr) {
2903 if (kvm_gfn_to_hva_cache_init(vcpu->kvm,
2904 &vcpu->arch.apic->vapic_cache,
2905 vapic_addr, sizeof(u32)))
2906 return -EINVAL;
2907 __set_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention);
2908 } else {
2909 __clear_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention);
2910 }
2911
2912 vcpu->arch.apic->vapic_addr = vapic_addr;
2913 return 0;
2914}
2915
2916int kvm_x2apic_icr_write(struct kvm_lapic *apic, u64 data)
2917{
2918 data &= ~APIC_ICR_BUSY;
2919
2920 kvm_apic_send_ipi(apic, (u32)data, (u32)(data >> 32));
2921 kvm_lapic_set_reg64(apic, APIC_ICR, data);
2922 trace_kvm_apic_write(APIC_ICR, data);
2923 return 0;
2924}
2925
2926static int kvm_lapic_msr_read(struct kvm_lapic *apic, u32 reg, u64 *data)
2927{
2928 u32 low;
2929
2930 if (reg == APIC_ICR) {
2931 *data = kvm_lapic_get_reg64(apic, APIC_ICR);
2932 return 0;
2933 }
2934
2935 if (kvm_lapic_reg_read(apic, reg, 4, &low))
2936 return 1;
2937
2938 *data = low;
2939
2940 return 0;
2941}
2942
2943static int kvm_lapic_msr_write(struct kvm_lapic *apic, u32 reg, u64 data)
2944{
2945 /*
2946 * ICR is a 64-bit register in x2APIC mode (and Hyper'v PV vAPIC) and
2947 * can be written as such, all other registers remain accessible only
2948 * through 32-bit reads/writes.
2949 */
2950 if (reg == APIC_ICR)
2951 return kvm_x2apic_icr_write(apic, data);
2952
2953 return kvm_lapic_reg_write(apic, reg, (u32)data);
2954}
2955
2956int kvm_x2apic_msr_write(struct kvm_vcpu *vcpu, u32 msr, u64 data)
2957{
2958 struct kvm_lapic *apic = vcpu->arch.apic;
2959 u32 reg = (msr - APIC_BASE_MSR) << 4;
2960
2961 if (!lapic_in_kernel(vcpu) || !apic_x2apic_mode(apic))
2962 return 1;
2963
2964 return kvm_lapic_msr_write(apic, reg, data);
2965}
2966
2967int kvm_x2apic_msr_read(struct kvm_vcpu *vcpu, u32 msr, u64 *data)
2968{
2969 struct kvm_lapic *apic = vcpu->arch.apic;
2970 u32 reg = (msr - APIC_BASE_MSR) << 4;
2971
2972 if (!lapic_in_kernel(vcpu) || !apic_x2apic_mode(apic))
2973 return 1;
2974
2975 if (reg == APIC_DFR)
2976 return 1;
2977
2978 return kvm_lapic_msr_read(apic, reg, data);
2979}
2980
2981int kvm_hv_vapic_msr_write(struct kvm_vcpu *vcpu, u32 reg, u64 data)
2982{
2983 if (!lapic_in_kernel(vcpu))
2984 return 1;
2985
2986 return kvm_lapic_msr_write(vcpu->arch.apic, reg, data);
2987}
2988
2989int kvm_hv_vapic_msr_read(struct kvm_vcpu *vcpu, u32 reg, u64 *data)
2990{
2991 if (!lapic_in_kernel(vcpu))
2992 return 1;
2993
2994 return kvm_lapic_msr_read(vcpu->arch.apic, reg, data);
2995}
2996
2997int kvm_lapic_set_pv_eoi(struct kvm_vcpu *vcpu, u64 data, unsigned long len)
2998{
2999 u64 addr = data & ~KVM_MSR_ENABLED;
3000 struct gfn_to_hva_cache *ghc = &vcpu->arch.pv_eoi.data;
3001 unsigned long new_len;
3002 int ret;
3003
3004 if (!IS_ALIGNED(addr, 4))
3005 return 1;
3006
3007 if (data & KVM_MSR_ENABLED) {
3008 if (addr == ghc->gpa && len <= ghc->len)
3009 new_len = ghc->len;
3010 else
3011 new_len = len;
3012
3013 ret = kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, addr, new_len);
3014 if (ret)
3015 return ret;
3016 }
3017
3018 vcpu->arch.pv_eoi.msr_val = data;
3019
3020 return 0;
3021}
3022
3023int kvm_apic_accept_events(struct kvm_vcpu *vcpu)
3024{
3025 struct kvm_lapic *apic = vcpu->arch.apic;
3026 u8 sipi_vector;
3027 int r;
3028
3029 if (!kvm_apic_has_pending_init_or_sipi(vcpu))
3030 return 0;
3031
3032 if (is_guest_mode(vcpu)) {
3033 r = kvm_check_nested_events(vcpu);
3034 if (r < 0)
3035 return r == -EBUSY ? 0 : r;
3036 /*
3037 * Continue processing INIT/SIPI even if a nested VM-Exit
3038 * occurred, e.g. pending SIPIs should be dropped if INIT+SIPI
3039 * are blocked as a result of transitioning to VMX root mode.
3040 */
3041 }
3042
3043 /*
3044 * INITs are blocked while CPU is in specific states (SMM, VMX root
3045 * mode, SVM with GIF=0), while SIPIs are dropped if the CPU isn't in
3046 * wait-for-SIPI (WFS).
3047 */
3048 if (!kvm_apic_init_sipi_allowed(vcpu)) {
3049 WARN_ON_ONCE(vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED);
3050 clear_bit(KVM_APIC_SIPI, &apic->pending_events);
3051 return 0;
3052 }
3053
3054 if (test_and_clear_bit(KVM_APIC_INIT, &apic->pending_events)) {
3055 kvm_vcpu_reset(vcpu, true);
3056 if (kvm_vcpu_is_bsp(apic->vcpu))
3057 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
3058 else
3059 vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED;
3060 }
3061 if (test_and_clear_bit(KVM_APIC_SIPI, &apic->pending_events)) {
3062 if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) {
3063 /* evaluate pending_events before reading the vector */
3064 smp_rmb();
3065 sipi_vector = apic->sipi_vector;
3066 static_call(kvm_x86_vcpu_deliver_sipi_vector)(vcpu, sipi_vector);
3067 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
3068 }
3069 }
3070 return 0;
3071}
3072
3073void kvm_lapic_exit(void)
3074{
3075 static_key_deferred_flush(&apic_hw_disabled);
3076 WARN_ON(static_branch_unlikely(&apic_hw_disabled.key));
3077 static_key_deferred_flush(&apic_sw_disabled);
3078 WARN_ON(static_branch_unlikely(&apic_sw_disabled.key));
3079}