Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2
3/*
4 * Local APIC virtualization
5 *
6 * Copyright (C) 2006 Qumranet, Inc.
7 * Copyright (C) 2007 Novell
8 * Copyright (C) 2007 Intel
9 * Copyright 2009 Red Hat, Inc. and/or its affiliates.
10 *
11 * Authors:
12 * Dor Laor <dor.laor@qumranet.com>
13 * Gregory Haskins <ghaskins@novell.com>
14 * Yaozu (Eddie) Dong <eddie.dong@intel.com>
15 *
16 * Based on Xen 3.1 code, Copyright (c) 2004, Intel Corporation.
17 */
18
19#include <linux/kvm_host.h>
20#include <linux/kvm.h>
21#include <linux/mm.h>
22#include <linux/highmem.h>
23#include <linux/smp.h>
24#include <linux/hrtimer.h>
25#include <linux/io.h>
26#include <linux/export.h>
27#include <linux/math64.h>
28#include <linux/slab.h>
29#include <asm/processor.h>
30#include <asm/mce.h>
31#include <asm/msr.h>
32#include <asm/page.h>
33#include <asm/current.h>
34#include <asm/apicdef.h>
35#include <asm/delay.h>
36#include <linux/atomic.h>
37#include <linux/jump_label.h>
38#include "kvm_cache_regs.h"
39#include "irq.h"
40#include "ioapic.h"
41#include "trace.h"
42#include "x86.h"
43#include "cpuid.h"
44#include "hyperv.h"
45#include "smm.h"
46
47#ifndef CONFIG_X86_64
48#define mod_64(x, y) ((x) - (y) * div64_u64(x, y))
49#else
50#define mod_64(x, y) ((x) % (y))
51#endif
52
53#define PRId64 "d"
54#define PRIx64 "llx"
55#define PRIu64 "u"
56#define PRIo64 "o"
57
58/* 14 is the version for Xeon and Pentium 8.4.8*/
59#define APIC_VERSION 0x14UL
60#define LAPIC_MMIO_LENGTH (1 << 12)
61/* followed define is not in apicdef.h */
62#define MAX_APIC_VECTOR 256
63#define APIC_VECTORS_PER_REG 32
64
65static bool lapic_timer_advance_dynamic __read_mostly;
66#define LAPIC_TIMER_ADVANCE_ADJUST_MIN 100 /* clock cycles */
67#define LAPIC_TIMER_ADVANCE_ADJUST_MAX 10000 /* clock cycles */
68#define LAPIC_TIMER_ADVANCE_NS_INIT 1000
69#define LAPIC_TIMER_ADVANCE_NS_MAX 5000
70/* step-by-step approximation to mitigate fluctuation */
71#define LAPIC_TIMER_ADVANCE_ADJUST_STEP 8
72static int kvm_lapic_msr_read(struct kvm_lapic *apic, u32 reg, u64 *data);
73static int kvm_lapic_msr_write(struct kvm_lapic *apic, u32 reg, u64 data);
74
75static inline void __kvm_lapic_set_reg(char *regs, int reg_off, u32 val)
76{
77 *((u32 *) (regs + reg_off)) = val;
78}
79
80static inline void kvm_lapic_set_reg(struct kvm_lapic *apic, int reg_off, u32 val)
81{
82 __kvm_lapic_set_reg(apic->regs, reg_off, val);
83}
84
85static __always_inline u64 __kvm_lapic_get_reg64(char *regs, int reg)
86{
87 BUILD_BUG_ON(reg != APIC_ICR);
88 return *((u64 *) (regs + reg));
89}
90
91static __always_inline u64 kvm_lapic_get_reg64(struct kvm_lapic *apic, int reg)
92{
93 return __kvm_lapic_get_reg64(apic->regs, reg);
94}
95
96static __always_inline void __kvm_lapic_set_reg64(char *regs, int reg, u64 val)
97{
98 BUILD_BUG_ON(reg != APIC_ICR);
99 *((u64 *) (regs + reg)) = val;
100}
101
102static __always_inline void kvm_lapic_set_reg64(struct kvm_lapic *apic,
103 int reg, u64 val)
104{
105 __kvm_lapic_set_reg64(apic->regs, reg, val);
106}
107
108static inline int apic_test_vector(int vec, void *bitmap)
109{
110 return test_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
111}
112
113bool kvm_apic_pending_eoi(struct kvm_vcpu *vcpu, int vector)
114{
115 struct kvm_lapic *apic = vcpu->arch.apic;
116
117 return apic_test_vector(vector, apic->regs + APIC_ISR) ||
118 apic_test_vector(vector, apic->regs + APIC_IRR);
119}
120
121static inline int __apic_test_and_set_vector(int vec, void *bitmap)
122{
123 return __test_and_set_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
124}
125
126static inline int __apic_test_and_clear_vector(int vec, void *bitmap)
127{
128 return __test_and_clear_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
129}
130
131__read_mostly DEFINE_STATIC_KEY_DEFERRED_FALSE(apic_hw_disabled, HZ);
132__read_mostly DEFINE_STATIC_KEY_DEFERRED_FALSE(apic_sw_disabled, HZ);
133
134static inline int apic_enabled(struct kvm_lapic *apic)
135{
136 return kvm_apic_sw_enabled(apic) && kvm_apic_hw_enabled(apic);
137}
138
139#define LVT_MASK \
140 (APIC_LVT_MASKED | APIC_SEND_PENDING | APIC_VECTOR_MASK)
141
142#define LINT_MASK \
143 (LVT_MASK | APIC_MODE_MASK | APIC_INPUT_POLARITY | \
144 APIC_LVT_REMOTE_IRR | APIC_LVT_LEVEL_TRIGGER)
145
146static inline u32 kvm_x2apic_id(struct kvm_lapic *apic)
147{
148 return apic->vcpu->vcpu_id;
149}
150
151static bool kvm_can_post_timer_interrupt(struct kvm_vcpu *vcpu)
152{
153 return pi_inject_timer && kvm_vcpu_apicv_active(vcpu) &&
154 (kvm_mwait_in_guest(vcpu->kvm) || kvm_hlt_in_guest(vcpu->kvm));
155}
156
157bool kvm_can_use_hv_timer(struct kvm_vcpu *vcpu)
158{
159 return kvm_x86_ops.set_hv_timer
160 && !(kvm_mwait_in_guest(vcpu->kvm) ||
161 kvm_can_post_timer_interrupt(vcpu));
162}
163
164static bool kvm_use_posted_timer_interrupt(struct kvm_vcpu *vcpu)
165{
166 return kvm_can_post_timer_interrupt(vcpu) && vcpu->mode == IN_GUEST_MODE;
167}
168
169static inline bool kvm_apic_map_get_logical_dest(struct kvm_apic_map *map,
170 u32 dest_id, struct kvm_lapic ***cluster, u16 *mask) {
171 switch (map->mode) {
172 case KVM_APIC_MODE_X2APIC: {
173 u32 offset = (dest_id >> 16) * 16;
174 u32 max_apic_id = map->max_apic_id;
175
176 if (offset <= max_apic_id) {
177 u8 cluster_size = min(max_apic_id - offset + 1, 16U);
178
179 offset = array_index_nospec(offset, map->max_apic_id + 1);
180 *cluster = &map->phys_map[offset];
181 *mask = dest_id & (0xffff >> (16 - cluster_size));
182 } else {
183 *mask = 0;
184 }
185
186 return true;
187 }
188 case KVM_APIC_MODE_XAPIC_FLAT:
189 *cluster = map->xapic_flat_map;
190 *mask = dest_id & 0xff;
191 return true;
192 case KVM_APIC_MODE_XAPIC_CLUSTER:
193 *cluster = map->xapic_cluster_map[(dest_id >> 4) & 0xf];
194 *mask = dest_id & 0xf;
195 return true;
196 default:
197 /* Not optimized. */
198 return false;
199 }
200}
201
202static void kvm_apic_map_free(struct rcu_head *rcu)
203{
204 struct kvm_apic_map *map = container_of(rcu, struct kvm_apic_map, rcu);
205
206 kvfree(map);
207}
208
209/*
210 * CLEAN -> DIRTY and UPDATE_IN_PROGRESS -> DIRTY changes happen without a lock.
211 *
212 * DIRTY -> UPDATE_IN_PROGRESS and UPDATE_IN_PROGRESS -> CLEAN happen with
213 * apic_map_lock_held.
214 */
215enum {
216 CLEAN,
217 UPDATE_IN_PROGRESS,
218 DIRTY
219};
220
221void kvm_recalculate_apic_map(struct kvm *kvm)
222{
223 struct kvm_apic_map *new, *old = NULL;
224 struct kvm_vcpu *vcpu;
225 unsigned long i;
226 u32 max_id = 255; /* enough space for any xAPIC ID */
227
228 /* Read kvm->arch.apic_map_dirty before kvm->arch.apic_map. */
229 if (atomic_read_acquire(&kvm->arch.apic_map_dirty) == CLEAN)
230 return;
231
232 WARN_ONCE(!irqchip_in_kernel(kvm),
233 "Dirty APIC map without an in-kernel local APIC");
234
235 mutex_lock(&kvm->arch.apic_map_lock);
236 /*
237 * Read kvm->arch.apic_map_dirty before kvm->arch.apic_map
238 * (if clean) or the APIC registers (if dirty).
239 */
240 if (atomic_cmpxchg_acquire(&kvm->arch.apic_map_dirty,
241 DIRTY, UPDATE_IN_PROGRESS) == CLEAN) {
242 /* Someone else has updated the map. */
243 mutex_unlock(&kvm->arch.apic_map_lock);
244 return;
245 }
246
247 kvm_for_each_vcpu(i, vcpu, kvm)
248 if (kvm_apic_present(vcpu))
249 max_id = max(max_id, kvm_x2apic_id(vcpu->arch.apic));
250
251 new = kvzalloc(sizeof(struct kvm_apic_map) +
252 sizeof(struct kvm_lapic *) * ((u64)max_id + 1),
253 GFP_KERNEL_ACCOUNT);
254
255 if (!new)
256 goto out;
257
258 new->max_apic_id = max_id;
259
260 kvm_for_each_vcpu(i, vcpu, kvm) {
261 struct kvm_lapic *apic = vcpu->arch.apic;
262 struct kvm_lapic **cluster;
263 u16 mask;
264 u32 ldr;
265 u8 xapic_id;
266 u32 x2apic_id;
267
268 if (!kvm_apic_present(vcpu))
269 continue;
270
271 xapic_id = kvm_xapic_id(apic);
272 x2apic_id = kvm_x2apic_id(apic);
273
274 /* Hotplug hack: see kvm_apic_match_physical_addr(), ... */
275 if ((apic_x2apic_mode(apic) || x2apic_id > 0xff) &&
276 x2apic_id <= new->max_apic_id)
277 new->phys_map[x2apic_id] = apic;
278 /*
279 * ... xAPIC ID of VCPUs with APIC ID > 0xff will wrap-around,
280 * prevent them from masking VCPUs with APIC ID <= 0xff.
281 */
282 if (!apic_x2apic_mode(apic) && !new->phys_map[xapic_id])
283 new->phys_map[xapic_id] = apic;
284
285 if (!kvm_apic_sw_enabled(apic))
286 continue;
287
288 ldr = kvm_lapic_get_reg(apic, APIC_LDR);
289
290 if (apic_x2apic_mode(apic)) {
291 new->mode |= KVM_APIC_MODE_X2APIC;
292 } else if (ldr) {
293 ldr = GET_APIC_LOGICAL_ID(ldr);
294 if (kvm_lapic_get_reg(apic, APIC_DFR) == APIC_DFR_FLAT)
295 new->mode |= KVM_APIC_MODE_XAPIC_FLAT;
296 else
297 new->mode |= KVM_APIC_MODE_XAPIC_CLUSTER;
298 }
299
300 if (!kvm_apic_map_get_logical_dest(new, ldr, &cluster, &mask))
301 continue;
302
303 if (mask)
304 cluster[ffs(mask) - 1] = apic;
305 }
306out:
307 old = rcu_dereference_protected(kvm->arch.apic_map,
308 lockdep_is_held(&kvm->arch.apic_map_lock));
309 rcu_assign_pointer(kvm->arch.apic_map, new);
310 /*
311 * Write kvm->arch.apic_map before clearing apic->apic_map_dirty.
312 * If another update has come in, leave it DIRTY.
313 */
314 atomic_cmpxchg_release(&kvm->arch.apic_map_dirty,
315 UPDATE_IN_PROGRESS, CLEAN);
316 mutex_unlock(&kvm->arch.apic_map_lock);
317
318 if (old)
319 call_rcu(&old->rcu, kvm_apic_map_free);
320
321 kvm_make_scan_ioapic_request(kvm);
322}
323
324static inline void apic_set_spiv(struct kvm_lapic *apic, u32 val)
325{
326 bool enabled = val & APIC_SPIV_APIC_ENABLED;
327
328 kvm_lapic_set_reg(apic, APIC_SPIV, val);
329
330 if (enabled != apic->sw_enabled) {
331 apic->sw_enabled = enabled;
332 if (enabled)
333 static_branch_slow_dec_deferred(&apic_sw_disabled);
334 else
335 static_branch_inc(&apic_sw_disabled.key);
336
337 atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
338 }
339
340 /* Check if there are APF page ready requests pending */
341 if (enabled)
342 kvm_make_request(KVM_REQ_APF_READY, apic->vcpu);
343}
344
345static inline void kvm_apic_set_xapic_id(struct kvm_lapic *apic, u8 id)
346{
347 kvm_lapic_set_reg(apic, APIC_ID, id << 24);
348 atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
349}
350
351static inline void kvm_apic_set_ldr(struct kvm_lapic *apic, u32 id)
352{
353 kvm_lapic_set_reg(apic, APIC_LDR, id);
354 atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
355}
356
357static inline void kvm_apic_set_dfr(struct kvm_lapic *apic, u32 val)
358{
359 kvm_lapic_set_reg(apic, APIC_DFR, val);
360 atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
361}
362
363static inline u32 kvm_apic_calc_x2apic_ldr(u32 id)
364{
365 return ((id >> 4) << 16) | (1 << (id & 0xf));
366}
367
368static inline void kvm_apic_set_x2apic_id(struct kvm_lapic *apic, u32 id)
369{
370 u32 ldr = kvm_apic_calc_x2apic_ldr(id);
371
372 WARN_ON_ONCE(id != apic->vcpu->vcpu_id);
373
374 kvm_lapic_set_reg(apic, APIC_ID, id);
375 kvm_lapic_set_reg(apic, APIC_LDR, ldr);
376 atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
377}
378
379static inline int apic_lvt_enabled(struct kvm_lapic *apic, int lvt_type)
380{
381 return !(kvm_lapic_get_reg(apic, lvt_type) & APIC_LVT_MASKED);
382}
383
384static inline int apic_lvtt_oneshot(struct kvm_lapic *apic)
385{
386 return apic->lapic_timer.timer_mode == APIC_LVT_TIMER_ONESHOT;
387}
388
389static inline int apic_lvtt_period(struct kvm_lapic *apic)
390{
391 return apic->lapic_timer.timer_mode == APIC_LVT_TIMER_PERIODIC;
392}
393
394static inline int apic_lvtt_tscdeadline(struct kvm_lapic *apic)
395{
396 return apic->lapic_timer.timer_mode == APIC_LVT_TIMER_TSCDEADLINE;
397}
398
399static inline int apic_lvt_nmi_mode(u32 lvt_val)
400{
401 return (lvt_val & (APIC_MODE_MASK | APIC_LVT_MASKED)) == APIC_DM_NMI;
402}
403
404static inline bool kvm_lapic_lvt_supported(struct kvm_lapic *apic, int lvt_index)
405{
406 return apic->nr_lvt_entries > lvt_index;
407}
408
409static inline int kvm_apic_calc_nr_lvt_entries(struct kvm_vcpu *vcpu)
410{
411 return KVM_APIC_MAX_NR_LVT_ENTRIES - !(vcpu->arch.mcg_cap & MCG_CMCI_P);
412}
413
414void kvm_apic_set_version(struct kvm_vcpu *vcpu)
415{
416 struct kvm_lapic *apic = vcpu->arch.apic;
417 u32 v = 0;
418
419 if (!lapic_in_kernel(vcpu))
420 return;
421
422 v = APIC_VERSION | ((apic->nr_lvt_entries - 1) << 16);
423
424 /*
425 * KVM emulates 82093AA datasheet (with in-kernel IOAPIC implementation)
426 * which doesn't have EOI register; Some buggy OSes (e.g. Windows with
427 * Hyper-V role) disable EOI broadcast in lapic not checking for IOAPIC
428 * version first and level-triggered interrupts never get EOIed in
429 * IOAPIC.
430 */
431 if (guest_cpuid_has(vcpu, X86_FEATURE_X2APIC) &&
432 !ioapic_in_kernel(vcpu->kvm))
433 v |= APIC_LVR_DIRECTED_EOI;
434 kvm_lapic_set_reg(apic, APIC_LVR, v);
435}
436
437void kvm_apic_after_set_mcg_cap(struct kvm_vcpu *vcpu)
438{
439 int nr_lvt_entries = kvm_apic_calc_nr_lvt_entries(vcpu);
440 struct kvm_lapic *apic = vcpu->arch.apic;
441 int i;
442
443 if (!lapic_in_kernel(vcpu) || nr_lvt_entries == apic->nr_lvt_entries)
444 return;
445
446 /* Initialize/mask any "new" LVT entries. */
447 for (i = apic->nr_lvt_entries; i < nr_lvt_entries; i++)
448 kvm_lapic_set_reg(apic, APIC_LVTx(i), APIC_LVT_MASKED);
449
450 apic->nr_lvt_entries = nr_lvt_entries;
451
452 /* The number of LVT entries is reflected in the version register. */
453 kvm_apic_set_version(vcpu);
454}
455
456static const unsigned int apic_lvt_mask[KVM_APIC_MAX_NR_LVT_ENTRIES] = {
457 [LVT_TIMER] = LVT_MASK, /* timer mode mask added at runtime */
458 [LVT_THERMAL_MONITOR] = LVT_MASK | APIC_MODE_MASK,
459 [LVT_PERFORMANCE_COUNTER] = LVT_MASK | APIC_MODE_MASK,
460 [LVT_LINT0] = LINT_MASK,
461 [LVT_LINT1] = LINT_MASK,
462 [LVT_ERROR] = LVT_MASK,
463 [LVT_CMCI] = LVT_MASK | APIC_MODE_MASK
464};
465
466static int find_highest_vector(void *bitmap)
467{
468 int vec;
469 u32 *reg;
470
471 for (vec = MAX_APIC_VECTOR - APIC_VECTORS_PER_REG;
472 vec >= 0; vec -= APIC_VECTORS_PER_REG) {
473 reg = bitmap + REG_POS(vec);
474 if (*reg)
475 return __fls(*reg) + vec;
476 }
477
478 return -1;
479}
480
481static u8 count_vectors(void *bitmap)
482{
483 int vec;
484 u32 *reg;
485 u8 count = 0;
486
487 for (vec = 0; vec < MAX_APIC_VECTOR; vec += APIC_VECTORS_PER_REG) {
488 reg = bitmap + REG_POS(vec);
489 count += hweight32(*reg);
490 }
491
492 return count;
493}
494
495bool __kvm_apic_update_irr(u32 *pir, void *regs, int *max_irr)
496{
497 u32 i, vec;
498 u32 pir_val, irr_val, prev_irr_val;
499 int max_updated_irr;
500
501 max_updated_irr = -1;
502 *max_irr = -1;
503
504 for (i = vec = 0; i <= 7; i++, vec += 32) {
505 pir_val = READ_ONCE(pir[i]);
506 irr_val = *((u32 *)(regs + APIC_IRR + i * 0x10));
507 if (pir_val) {
508 prev_irr_val = irr_val;
509 irr_val |= xchg(&pir[i], 0);
510 *((u32 *)(regs + APIC_IRR + i * 0x10)) = irr_val;
511 if (prev_irr_val != irr_val) {
512 max_updated_irr =
513 __fls(irr_val ^ prev_irr_val) + vec;
514 }
515 }
516 if (irr_val)
517 *max_irr = __fls(irr_val) + vec;
518 }
519
520 return ((max_updated_irr != -1) &&
521 (max_updated_irr == *max_irr));
522}
523EXPORT_SYMBOL_GPL(__kvm_apic_update_irr);
524
525bool kvm_apic_update_irr(struct kvm_vcpu *vcpu, u32 *pir, int *max_irr)
526{
527 struct kvm_lapic *apic = vcpu->arch.apic;
528
529 return __kvm_apic_update_irr(pir, apic->regs, max_irr);
530}
531EXPORT_SYMBOL_GPL(kvm_apic_update_irr);
532
533static inline int apic_search_irr(struct kvm_lapic *apic)
534{
535 return find_highest_vector(apic->regs + APIC_IRR);
536}
537
538static inline int apic_find_highest_irr(struct kvm_lapic *apic)
539{
540 int result;
541
542 /*
543 * Note that irr_pending is just a hint. It will be always
544 * true with virtual interrupt delivery enabled.
545 */
546 if (!apic->irr_pending)
547 return -1;
548
549 result = apic_search_irr(apic);
550 ASSERT(result == -1 || result >= 16);
551
552 return result;
553}
554
555static inline void apic_clear_irr(int vec, struct kvm_lapic *apic)
556{
557 if (unlikely(apic->apicv_active)) {
558 /* need to update RVI */
559 kvm_lapic_clear_vector(vec, apic->regs + APIC_IRR);
560 static_call_cond(kvm_x86_hwapic_irr_update)(apic->vcpu,
561 apic_find_highest_irr(apic));
562 } else {
563 apic->irr_pending = false;
564 kvm_lapic_clear_vector(vec, apic->regs + APIC_IRR);
565 if (apic_search_irr(apic) != -1)
566 apic->irr_pending = true;
567 }
568}
569
570void kvm_apic_clear_irr(struct kvm_vcpu *vcpu, int vec)
571{
572 apic_clear_irr(vec, vcpu->arch.apic);
573}
574EXPORT_SYMBOL_GPL(kvm_apic_clear_irr);
575
576static inline void apic_set_isr(int vec, struct kvm_lapic *apic)
577{
578 if (__apic_test_and_set_vector(vec, apic->regs + APIC_ISR))
579 return;
580
581 /*
582 * With APIC virtualization enabled, all caching is disabled
583 * because the processor can modify ISR under the hood. Instead
584 * just set SVI.
585 */
586 if (unlikely(apic->apicv_active))
587 static_call_cond(kvm_x86_hwapic_isr_update)(vec);
588 else {
589 ++apic->isr_count;
590 BUG_ON(apic->isr_count > MAX_APIC_VECTOR);
591 /*
592 * ISR (in service register) bit is set when injecting an interrupt.
593 * The highest vector is injected. Thus the latest bit set matches
594 * the highest bit in ISR.
595 */
596 apic->highest_isr_cache = vec;
597 }
598}
599
600static inline int apic_find_highest_isr(struct kvm_lapic *apic)
601{
602 int result;
603
604 /*
605 * Note that isr_count is always 1, and highest_isr_cache
606 * is always -1, with APIC virtualization enabled.
607 */
608 if (!apic->isr_count)
609 return -1;
610 if (likely(apic->highest_isr_cache != -1))
611 return apic->highest_isr_cache;
612
613 result = find_highest_vector(apic->regs + APIC_ISR);
614 ASSERT(result == -1 || result >= 16);
615
616 return result;
617}
618
619static inline void apic_clear_isr(int vec, struct kvm_lapic *apic)
620{
621 if (!__apic_test_and_clear_vector(vec, apic->regs + APIC_ISR))
622 return;
623
624 /*
625 * We do get here for APIC virtualization enabled if the guest
626 * uses the Hyper-V APIC enlightenment. In this case we may need
627 * to trigger a new interrupt delivery by writing the SVI field;
628 * on the other hand isr_count and highest_isr_cache are unused
629 * and must be left alone.
630 */
631 if (unlikely(apic->apicv_active))
632 static_call_cond(kvm_x86_hwapic_isr_update)(apic_find_highest_isr(apic));
633 else {
634 --apic->isr_count;
635 BUG_ON(apic->isr_count < 0);
636 apic->highest_isr_cache = -1;
637 }
638}
639
640int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu)
641{
642 /* This may race with setting of irr in __apic_accept_irq() and
643 * value returned may be wrong, but kvm_vcpu_kick() in __apic_accept_irq
644 * will cause vmexit immediately and the value will be recalculated
645 * on the next vmentry.
646 */
647 return apic_find_highest_irr(vcpu->arch.apic);
648}
649EXPORT_SYMBOL_GPL(kvm_lapic_find_highest_irr);
650
651static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
652 int vector, int level, int trig_mode,
653 struct dest_map *dest_map);
654
655int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq,
656 struct dest_map *dest_map)
657{
658 struct kvm_lapic *apic = vcpu->arch.apic;
659
660 return __apic_accept_irq(apic, irq->delivery_mode, irq->vector,
661 irq->level, irq->trig_mode, dest_map);
662}
663
664static int __pv_send_ipi(unsigned long *ipi_bitmap, struct kvm_apic_map *map,
665 struct kvm_lapic_irq *irq, u32 min)
666{
667 int i, count = 0;
668 struct kvm_vcpu *vcpu;
669
670 if (min > map->max_apic_id)
671 return 0;
672
673 for_each_set_bit(i, ipi_bitmap,
674 min((u32)BITS_PER_LONG, (map->max_apic_id - min + 1))) {
675 if (map->phys_map[min + i]) {
676 vcpu = map->phys_map[min + i]->vcpu;
677 count += kvm_apic_set_irq(vcpu, irq, NULL);
678 }
679 }
680
681 return count;
682}
683
684int kvm_pv_send_ipi(struct kvm *kvm, unsigned long ipi_bitmap_low,
685 unsigned long ipi_bitmap_high, u32 min,
686 unsigned long icr, int op_64_bit)
687{
688 struct kvm_apic_map *map;
689 struct kvm_lapic_irq irq = {0};
690 int cluster_size = op_64_bit ? 64 : 32;
691 int count;
692
693 if (icr & (APIC_DEST_MASK | APIC_SHORT_MASK))
694 return -KVM_EINVAL;
695
696 irq.vector = icr & APIC_VECTOR_MASK;
697 irq.delivery_mode = icr & APIC_MODE_MASK;
698 irq.level = (icr & APIC_INT_ASSERT) != 0;
699 irq.trig_mode = icr & APIC_INT_LEVELTRIG;
700
701 rcu_read_lock();
702 map = rcu_dereference(kvm->arch.apic_map);
703
704 count = -EOPNOTSUPP;
705 if (likely(map)) {
706 count = __pv_send_ipi(&ipi_bitmap_low, map, &irq, min);
707 min += cluster_size;
708 count += __pv_send_ipi(&ipi_bitmap_high, map, &irq, min);
709 }
710
711 rcu_read_unlock();
712 return count;
713}
714
715static int pv_eoi_put_user(struct kvm_vcpu *vcpu, u8 val)
716{
717
718 return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data, &val,
719 sizeof(val));
720}
721
722static int pv_eoi_get_user(struct kvm_vcpu *vcpu, u8 *val)
723{
724
725 return kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data, val,
726 sizeof(*val));
727}
728
729static inline bool pv_eoi_enabled(struct kvm_vcpu *vcpu)
730{
731 return vcpu->arch.pv_eoi.msr_val & KVM_MSR_ENABLED;
732}
733
734static void pv_eoi_set_pending(struct kvm_vcpu *vcpu)
735{
736 if (pv_eoi_put_user(vcpu, KVM_PV_EOI_ENABLED) < 0)
737 return;
738
739 __set_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention);
740}
741
742static bool pv_eoi_test_and_clr_pending(struct kvm_vcpu *vcpu)
743{
744 u8 val;
745
746 if (pv_eoi_get_user(vcpu, &val) < 0)
747 return false;
748
749 val &= KVM_PV_EOI_ENABLED;
750
751 if (val && pv_eoi_put_user(vcpu, KVM_PV_EOI_DISABLED) < 0)
752 return false;
753
754 /*
755 * Clear pending bit in any case: it will be set again on vmentry.
756 * While this might not be ideal from performance point of view,
757 * this makes sure pv eoi is only enabled when we know it's safe.
758 */
759 __clear_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention);
760
761 return val;
762}
763
764static int apic_has_interrupt_for_ppr(struct kvm_lapic *apic, u32 ppr)
765{
766 int highest_irr;
767 if (kvm_x86_ops.sync_pir_to_irr)
768 highest_irr = static_call(kvm_x86_sync_pir_to_irr)(apic->vcpu);
769 else
770 highest_irr = apic_find_highest_irr(apic);
771 if (highest_irr == -1 || (highest_irr & 0xF0) <= ppr)
772 return -1;
773 return highest_irr;
774}
775
776static bool __apic_update_ppr(struct kvm_lapic *apic, u32 *new_ppr)
777{
778 u32 tpr, isrv, ppr, old_ppr;
779 int isr;
780
781 old_ppr = kvm_lapic_get_reg(apic, APIC_PROCPRI);
782 tpr = kvm_lapic_get_reg(apic, APIC_TASKPRI);
783 isr = apic_find_highest_isr(apic);
784 isrv = (isr != -1) ? isr : 0;
785
786 if ((tpr & 0xf0) >= (isrv & 0xf0))
787 ppr = tpr & 0xff;
788 else
789 ppr = isrv & 0xf0;
790
791 *new_ppr = ppr;
792 if (old_ppr != ppr)
793 kvm_lapic_set_reg(apic, APIC_PROCPRI, ppr);
794
795 return ppr < old_ppr;
796}
797
798static void apic_update_ppr(struct kvm_lapic *apic)
799{
800 u32 ppr;
801
802 if (__apic_update_ppr(apic, &ppr) &&
803 apic_has_interrupt_for_ppr(apic, ppr) != -1)
804 kvm_make_request(KVM_REQ_EVENT, apic->vcpu);
805}
806
807void kvm_apic_update_ppr(struct kvm_vcpu *vcpu)
808{
809 apic_update_ppr(vcpu->arch.apic);
810}
811EXPORT_SYMBOL_GPL(kvm_apic_update_ppr);
812
813static void apic_set_tpr(struct kvm_lapic *apic, u32 tpr)
814{
815 kvm_lapic_set_reg(apic, APIC_TASKPRI, tpr);
816 apic_update_ppr(apic);
817}
818
819static bool kvm_apic_broadcast(struct kvm_lapic *apic, u32 mda)
820{
821 return mda == (apic_x2apic_mode(apic) ?
822 X2APIC_BROADCAST : APIC_BROADCAST);
823}
824
825static bool kvm_apic_match_physical_addr(struct kvm_lapic *apic, u32 mda)
826{
827 if (kvm_apic_broadcast(apic, mda))
828 return true;
829
830 /*
831 * Hotplug hack: Accept interrupts for vCPUs in xAPIC mode as if they
832 * were in x2APIC mode if the target APIC ID can't be encoded as an
833 * xAPIC ID. This allows unique addressing of hotplugged vCPUs (which
834 * start in xAPIC mode) with an APIC ID that is unaddressable in xAPIC
835 * mode. Match the x2APIC ID if and only if the target APIC ID can't
836 * be encoded in xAPIC to avoid spurious matches against a vCPU that
837 * changed its (addressable) xAPIC ID (which is writable).
838 */
839 if (apic_x2apic_mode(apic) || mda > 0xff)
840 return mda == kvm_x2apic_id(apic);
841
842 return mda == kvm_xapic_id(apic);
843}
844
845static bool kvm_apic_match_logical_addr(struct kvm_lapic *apic, u32 mda)
846{
847 u32 logical_id;
848
849 if (kvm_apic_broadcast(apic, mda))
850 return true;
851
852 logical_id = kvm_lapic_get_reg(apic, APIC_LDR);
853
854 if (apic_x2apic_mode(apic))
855 return ((logical_id >> 16) == (mda >> 16))
856 && (logical_id & mda & 0xffff) != 0;
857
858 logical_id = GET_APIC_LOGICAL_ID(logical_id);
859
860 switch (kvm_lapic_get_reg(apic, APIC_DFR)) {
861 case APIC_DFR_FLAT:
862 return (logical_id & mda) != 0;
863 case APIC_DFR_CLUSTER:
864 return ((logical_id >> 4) == (mda >> 4))
865 && (logical_id & mda & 0xf) != 0;
866 default:
867 return false;
868 }
869}
870
871/* The KVM local APIC implementation has two quirks:
872 *
873 * - Real hardware delivers interrupts destined to x2APIC ID > 0xff to LAPICs
874 * in xAPIC mode if the "destination & 0xff" matches its xAPIC ID.
875 * KVM doesn't do that aliasing.
876 *
877 * - in-kernel IOAPIC messages have to be delivered directly to
878 * x2APIC, because the kernel does not support interrupt remapping.
879 * In order to support broadcast without interrupt remapping, x2APIC
880 * rewrites the destination of non-IPI messages from APIC_BROADCAST
881 * to X2APIC_BROADCAST.
882 *
883 * The broadcast quirk can be disabled with KVM_CAP_X2APIC_API. This is
884 * important when userspace wants to use x2APIC-format MSIs, because
885 * APIC_BROADCAST (0xff) is a legal route for "cluster 0, CPUs 0-7".
886 */
887static u32 kvm_apic_mda(struct kvm_vcpu *vcpu, unsigned int dest_id,
888 struct kvm_lapic *source, struct kvm_lapic *target)
889{
890 bool ipi = source != NULL;
891
892 if (!vcpu->kvm->arch.x2apic_broadcast_quirk_disabled &&
893 !ipi && dest_id == APIC_BROADCAST && apic_x2apic_mode(target))
894 return X2APIC_BROADCAST;
895
896 return dest_id;
897}
898
899bool kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source,
900 int shorthand, unsigned int dest, int dest_mode)
901{
902 struct kvm_lapic *target = vcpu->arch.apic;
903 u32 mda = kvm_apic_mda(vcpu, dest, source, target);
904
905 ASSERT(target);
906 switch (shorthand) {
907 case APIC_DEST_NOSHORT:
908 if (dest_mode == APIC_DEST_PHYSICAL)
909 return kvm_apic_match_physical_addr(target, mda);
910 else
911 return kvm_apic_match_logical_addr(target, mda);
912 case APIC_DEST_SELF:
913 return target == source;
914 case APIC_DEST_ALLINC:
915 return true;
916 case APIC_DEST_ALLBUT:
917 return target != source;
918 default:
919 return false;
920 }
921}
922EXPORT_SYMBOL_GPL(kvm_apic_match_dest);
923
924int kvm_vector_to_index(u32 vector, u32 dest_vcpus,
925 const unsigned long *bitmap, u32 bitmap_size)
926{
927 u32 mod;
928 int i, idx = -1;
929
930 mod = vector % dest_vcpus;
931
932 for (i = 0; i <= mod; i++) {
933 idx = find_next_bit(bitmap, bitmap_size, idx + 1);
934 BUG_ON(idx == bitmap_size);
935 }
936
937 return idx;
938}
939
940static void kvm_apic_disabled_lapic_found(struct kvm *kvm)
941{
942 if (!kvm->arch.disabled_lapic_found) {
943 kvm->arch.disabled_lapic_found = true;
944 printk(KERN_INFO
945 "Disabled LAPIC found during irq injection\n");
946 }
947}
948
949static bool kvm_apic_is_broadcast_dest(struct kvm *kvm, struct kvm_lapic **src,
950 struct kvm_lapic_irq *irq, struct kvm_apic_map *map)
951{
952 if (kvm->arch.x2apic_broadcast_quirk_disabled) {
953 if ((irq->dest_id == APIC_BROADCAST &&
954 map->mode != KVM_APIC_MODE_X2APIC))
955 return true;
956 if (irq->dest_id == X2APIC_BROADCAST)
957 return true;
958 } else {
959 bool x2apic_ipi = src && *src && apic_x2apic_mode(*src);
960 if (irq->dest_id == (x2apic_ipi ?
961 X2APIC_BROADCAST : APIC_BROADCAST))
962 return true;
963 }
964
965 return false;
966}
967
968/* Return true if the interrupt can be handled by using *bitmap as index mask
969 * for valid destinations in *dst array.
970 * Return false if kvm_apic_map_get_dest_lapic did nothing useful.
971 * Note: we may have zero kvm_lapic destinations when we return true, which
972 * means that the interrupt should be dropped. In this case, *bitmap would be
973 * zero and *dst undefined.
974 */
975static inline bool kvm_apic_map_get_dest_lapic(struct kvm *kvm,
976 struct kvm_lapic **src, struct kvm_lapic_irq *irq,
977 struct kvm_apic_map *map, struct kvm_lapic ***dst,
978 unsigned long *bitmap)
979{
980 int i, lowest;
981
982 if (irq->shorthand == APIC_DEST_SELF && src) {
983 *dst = src;
984 *bitmap = 1;
985 return true;
986 } else if (irq->shorthand)
987 return false;
988
989 if (!map || kvm_apic_is_broadcast_dest(kvm, src, irq, map))
990 return false;
991
992 if (irq->dest_mode == APIC_DEST_PHYSICAL) {
993 if (irq->dest_id > map->max_apic_id) {
994 *bitmap = 0;
995 } else {
996 u32 dest_id = array_index_nospec(irq->dest_id, map->max_apic_id + 1);
997 *dst = &map->phys_map[dest_id];
998 *bitmap = 1;
999 }
1000 return true;
1001 }
1002
1003 *bitmap = 0;
1004 if (!kvm_apic_map_get_logical_dest(map, irq->dest_id, dst,
1005 (u16 *)bitmap))
1006 return false;
1007
1008 if (!kvm_lowest_prio_delivery(irq))
1009 return true;
1010
1011 if (!kvm_vector_hashing_enabled()) {
1012 lowest = -1;
1013 for_each_set_bit(i, bitmap, 16) {
1014 if (!(*dst)[i])
1015 continue;
1016 if (lowest < 0)
1017 lowest = i;
1018 else if (kvm_apic_compare_prio((*dst)[i]->vcpu,
1019 (*dst)[lowest]->vcpu) < 0)
1020 lowest = i;
1021 }
1022 } else {
1023 if (!*bitmap)
1024 return true;
1025
1026 lowest = kvm_vector_to_index(irq->vector, hweight16(*bitmap),
1027 bitmap, 16);
1028
1029 if (!(*dst)[lowest]) {
1030 kvm_apic_disabled_lapic_found(kvm);
1031 *bitmap = 0;
1032 return true;
1033 }
1034 }
1035
1036 *bitmap = (lowest >= 0) ? 1 << lowest : 0;
1037
1038 return true;
1039}
1040
1041bool kvm_irq_delivery_to_apic_fast(struct kvm *kvm, struct kvm_lapic *src,
1042 struct kvm_lapic_irq *irq, int *r, struct dest_map *dest_map)
1043{
1044 struct kvm_apic_map *map;
1045 unsigned long bitmap;
1046 struct kvm_lapic **dst = NULL;
1047 int i;
1048 bool ret;
1049
1050 *r = -1;
1051
1052 if (irq->shorthand == APIC_DEST_SELF) {
1053 if (KVM_BUG_ON(!src, kvm)) {
1054 *r = 0;
1055 return true;
1056 }
1057 *r = kvm_apic_set_irq(src->vcpu, irq, dest_map);
1058 return true;
1059 }
1060
1061 rcu_read_lock();
1062 map = rcu_dereference(kvm->arch.apic_map);
1063
1064 ret = kvm_apic_map_get_dest_lapic(kvm, &src, irq, map, &dst, &bitmap);
1065 if (ret) {
1066 *r = 0;
1067 for_each_set_bit(i, &bitmap, 16) {
1068 if (!dst[i])
1069 continue;
1070 *r += kvm_apic_set_irq(dst[i]->vcpu, irq, dest_map);
1071 }
1072 }
1073
1074 rcu_read_unlock();
1075 return ret;
1076}
1077
1078/*
1079 * This routine tries to handle interrupts in posted mode, here is how
1080 * it deals with different cases:
1081 * - For single-destination interrupts, handle it in posted mode
1082 * - Else if vector hashing is enabled and it is a lowest-priority
1083 * interrupt, handle it in posted mode and use the following mechanism
1084 * to find the destination vCPU.
1085 * 1. For lowest-priority interrupts, store all the possible
1086 * destination vCPUs in an array.
1087 * 2. Use "guest vector % max number of destination vCPUs" to find
1088 * the right destination vCPU in the array for the lowest-priority
1089 * interrupt.
1090 * - Otherwise, use remapped mode to inject the interrupt.
1091 */
1092bool kvm_intr_is_single_vcpu_fast(struct kvm *kvm, struct kvm_lapic_irq *irq,
1093 struct kvm_vcpu **dest_vcpu)
1094{
1095 struct kvm_apic_map *map;
1096 unsigned long bitmap;
1097 struct kvm_lapic **dst = NULL;
1098 bool ret = false;
1099
1100 if (irq->shorthand)
1101 return false;
1102
1103 rcu_read_lock();
1104 map = rcu_dereference(kvm->arch.apic_map);
1105
1106 if (kvm_apic_map_get_dest_lapic(kvm, NULL, irq, map, &dst, &bitmap) &&
1107 hweight16(bitmap) == 1) {
1108 unsigned long i = find_first_bit(&bitmap, 16);
1109
1110 if (dst[i]) {
1111 *dest_vcpu = dst[i]->vcpu;
1112 ret = true;
1113 }
1114 }
1115
1116 rcu_read_unlock();
1117 return ret;
1118}
1119
1120/*
1121 * Add a pending IRQ into lapic.
1122 * Return 1 if successfully added and 0 if discarded.
1123 */
1124static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
1125 int vector, int level, int trig_mode,
1126 struct dest_map *dest_map)
1127{
1128 int result = 0;
1129 struct kvm_vcpu *vcpu = apic->vcpu;
1130
1131 trace_kvm_apic_accept_irq(vcpu->vcpu_id, delivery_mode,
1132 trig_mode, vector);
1133 switch (delivery_mode) {
1134 case APIC_DM_LOWEST:
1135 vcpu->arch.apic_arb_prio++;
1136 fallthrough;
1137 case APIC_DM_FIXED:
1138 if (unlikely(trig_mode && !level))
1139 break;
1140
1141 /* FIXME add logic for vcpu on reset */
1142 if (unlikely(!apic_enabled(apic)))
1143 break;
1144
1145 result = 1;
1146
1147 if (dest_map) {
1148 __set_bit(vcpu->vcpu_id, dest_map->map);
1149 dest_map->vectors[vcpu->vcpu_id] = vector;
1150 }
1151
1152 if (apic_test_vector(vector, apic->regs + APIC_TMR) != !!trig_mode) {
1153 if (trig_mode)
1154 kvm_lapic_set_vector(vector,
1155 apic->regs + APIC_TMR);
1156 else
1157 kvm_lapic_clear_vector(vector,
1158 apic->regs + APIC_TMR);
1159 }
1160
1161 static_call(kvm_x86_deliver_interrupt)(apic, delivery_mode,
1162 trig_mode, vector);
1163 break;
1164
1165 case APIC_DM_REMRD:
1166 result = 1;
1167 vcpu->arch.pv.pv_unhalted = 1;
1168 kvm_make_request(KVM_REQ_EVENT, vcpu);
1169 kvm_vcpu_kick(vcpu);
1170 break;
1171
1172 case APIC_DM_SMI:
1173 if (!kvm_inject_smi(vcpu)) {
1174 kvm_vcpu_kick(vcpu);
1175 result = 1;
1176 }
1177 break;
1178
1179 case APIC_DM_NMI:
1180 result = 1;
1181 kvm_inject_nmi(vcpu);
1182 kvm_vcpu_kick(vcpu);
1183 break;
1184
1185 case APIC_DM_INIT:
1186 if (!trig_mode || level) {
1187 result = 1;
1188 /* assumes that there are only KVM_APIC_INIT/SIPI */
1189 apic->pending_events = (1UL << KVM_APIC_INIT);
1190 kvm_make_request(KVM_REQ_EVENT, vcpu);
1191 kvm_vcpu_kick(vcpu);
1192 }
1193 break;
1194
1195 case APIC_DM_STARTUP:
1196 result = 1;
1197 apic->sipi_vector = vector;
1198 /* make sure sipi_vector is visible for the receiver */
1199 smp_wmb();
1200 set_bit(KVM_APIC_SIPI, &apic->pending_events);
1201 kvm_make_request(KVM_REQ_EVENT, vcpu);
1202 kvm_vcpu_kick(vcpu);
1203 break;
1204
1205 case APIC_DM_EXTINT:
1206 /*
1207 * Should only be called by kvm_apic_local_deliver() with LVT0,
1208 * before NMI watchdog was enabled. Already handled by
1209 * kvm_apic_accept_pic_intr().
1210 */
1211 break;
1212
1213 default:
1214 printk(KERN_ERR "TODO: unsupported delivery mode %x\n",
1215 delivery_mode);
1216 break;
1217 }
1218 return result;
1219}
1220
1221/*
1222 * This routine identifies the destination vcpus mask meant to receive the
1223 * IOAPIC interrupts. It either uses kvm_apic_map_get_dest_lapic() to find
1224 * out the destination vcpus array and set the bitmap or it traverses to
1225 * each available vcpu to identify the same.
1226 */
1227void kvm_bitmap_or_dest_vcpus(struct kvm *kvm, struct kvm_lapic_irq *irq,
1228 unsigned long *vcpu_bitmap)
1229{
1230 struct kvm_lapic **dest_vcpu = NULL;
1231 struct kvm_lapic *src = NULL;
1232 struct kvm_apic_map *map;
1233 struct kvm_vcpu *vcpu;
1234 unsigned long bitmap, i;
1235 int vcpu_idx;
1236 bool ret;
1237
1238 rcu_read_lock();
1239 map = rcu_dereference(kvm->arch.apic_map);
1240
1241 ret = kvm_apic_map_get_dest_lapic(kvm, &src, irq, map, &dest_vcpu,
1242 &bitmap);
1243 if (ret) {
1244 for_each_set_bit(i, &bitmap, 16) {
1245 if (!dest_vcpu[i])
1246 continue;
1247 vcpu_idx = dest_vcpu[i]->vcpu->vcpu_idx;
1248 __set_bit(vcpu_idx, vcpu_bitmap);
1249 }
1250 } else {
1251 kvm_for_each_vcpu(i, vcpu, kvm) {
1252 if (!kvm_apic_present(vcpu))
1253 continue;
1254 if (!kvm_apic_match_dest(vcpu, NULL,
1255 irq->shorthand,
1256 irq->dest_id,
1257 irq->dest_mode))
1258 continue;
1259 __set_bit(i, vcpu_bitmap);
1260 }
1261 }
1262 rcu_read_unlock();
1263}
1264
1265int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2)
1266{
1267 return vcpu1->arch.apic_arb_prio - vcpu2->arch.apic_arb_prio;
1268}
1269
1270static bool kvm_ioapic_handles_vector(struct kvm_lapic *apic, int vector)
1271{
1272 return test_bit(vector, apic->vcpu->arch.ioapic_handled_vectors);
1273}
1274
1275static void kvm_ioapic_send_eoi(struct kvm_lapic *apic, int vector)
1276{
1277 int trigger_mode;
1278
1279 /* Eoi the ioapic only if the ioapic doesn't own the vector. */
1280 if (!kvm_ioapic_handles_vector(apic, vector))
1281 return;
1282
1283 /* Request a KVM exit to inform the userspace IOAPIC. */
1284 if (irqchip_split(apic->vcpu->kvm)) {
1285 apic->vcpu->arch.pending_ioapic_eoi = vector;
1286 kvm_make_request(KVM_REQ_IOAPIC_EOI_EXIT, apic->vcpu);
1287 return;
1288 }
1289
1290 if (apic_test_vector(vector, apic->regs + APIC_TMR))
1291 trigger_mode = IOAPIC_LEVEL_TRIG;
1292 else
1293 trigger_mode = IOAPIC_EDGE_TRIG;
1294
1295 kvm_ioapic_update_eoi(apic->vcpu, vector, trigger_mode);
1296}
1297
1298static int apic_set_eoi(struct kvm_lapic *apic)
1299{
1300 int vector = apic_find_highest_isr(apic);
1301
1302 trace_kvm_eoi(apic, vector);
1303
1304 /*
1305 * Not every write EOI will has corresponding ISR,
1306 * one example is when Kernel check timer on setup_IO_APIC
1307 */
1308 if (vector == -1)
1309 return vector;
1310
1311 apic_clear_isr(vector, apic);
1312 apic_update_ppr(apic);
1313
1314 if (to_hv_vcpu(apic->vcpu) &&
1315 test_bit(vector, to_hv_synic(apic->vcpu)->vec_bitmap))
1316 kvm_hv_synic_send_eoi(apic->vcpu, vector);
1317
1318 kvm_ioapic_send_eoi(apic, vector);
1319 kvm_make_request(KVM_REQ_EVENT, apic->vcpu);
1320 return vector;
1321}
1322
1323/*
1324 * this interface assumes a trap-like exit, which has already finished
1325 * desired side effect including vISR and vPPR update.
1326 */
1327void kvm_apic_set_eoi_accelerated(struct kvm_vcpu *vcpu, int vector)
1328{
1329 struct kvm_lapic *apic = vcpu->arch.apic;
1330
1331 trace_kvm_eoi(apic, vector);
1332
1333 kvm_ioapic_send_eoi(apic, vector);
1334 kvm_make_request(KVM_REQ_EVENT, apic->vcpu);
1335}
1336EXPORT_SYMBOL_GPL(kvm_apic_set_eoi_accelerated);
1337
1338void kvm_apic_send_ipi(struct kvm_lapic *apic, u32 icr_low, u32 icr_high)
1339{
1340 struct kvm_lapic_irq irq;
1341
1342 /* KVM has no delay and should always clear the BUSY/PENDING flag. */
1343 WARN_ON_ONCE(icr_low & APIC_ICR_BUSY);
1344
1345 irq.vector = icr_low & APIC_VECTOR_MASK;
1346 irq.delivery_mode = icr_low & APIC_MODE_MASK;
1347 irq.dest_mode = icr_low & APIC_DEST_MASK;
1348 irq.level = (icr_low & APIC_INT_ASSERT) != 0;
1349 irq.trig_mode = icr_low & APIC_INT_LEVELTRIG;
1350 irq.shorthand = icr_low & APIC_SHORT_MASK;
1351 irq.msi_redir_hint = false;
1352 if (apic_x2apic_mode(apic))
1353 irq.dest_id = icr_high;
1354 else
1355 irq.dest_id = GET_XAPIC_DEST_FIELD(icr_high);
1356
1357 trace_kvm_apic_ipi(icr_low, irq.dest_id);
1358
1359 kvm_irq_delivery_to_apic(apic->vcpu->kvm, apic, &irq, NULL);
1360}
1361EXPORT_SYMBOL_GPL(kvm_apic_send_ipi);
1362
1363static u32 apic_get_tmcct(struct kvm_lapic *apic)
1364{
1365 ktime_t remaining, now;
1366 s64 ns;
1367 u32 tmcct;
1368
1369 ASSERT(apic != NULL);
1370
1371 /* if initial count is 0, current count should also be 0 */
1372 if (kvm_lapic_get_reg(apic, APIC_TMICT) == 0 ||
1373 apic->lapic_timer.period == 0)
1374 return 0;
1375
1376 now = ktime_get();
1377 remaining = ktime_sub(apic->lapic_timer.target_expiration, now);
1378 if (ktime_to_ns(remaining) < 0)
1379 remaining = 0;
1380
1381 ns = mod_64(ktime_to_ns(remaining), apic->lapic_timer.period);
1382 tmcct = div64_u64(ns,
1383 (APIC_BUS_CYCLE_NS * apic->divide_count));
1384
1385 return tmcct;
1386}
1387
1388static void __report_tpr_access(struct kvm_lapic *apic, bool write)
1389{
1390 struct kvm_vcpu *vcpu = apic->vcpu;
1391 struct kvm_run *run = vcpu->run;
1392
1393 kvm_make_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu);
1394 run->tpr_access.rip = kvm_rip_read(vcpu);
1395 run->tpr_access.is_write = write;
1396}
1397
1398static inline void report_tpr_access(struct kvm_lapic *apic, bool write)
1399{
1400 if (apic->vcpu->arch.tpr_access_reporting)
1401 __report_tpr_access(apic, write);
1402}
1403
1404static u32 __apic_read(struct kvm_lapic *apic, unsigned int offset)
1405{
1406 u32 val = 0;
1407
1408 if (offset >= LAPIC_MMIO_LENGTH)
1409 return 0;
1410
1411 switch (offset) {
1412 case APIC_ARBPRI:
1413 break;
1414
1415 case APIC_TMCCT: /* Timer CCR */
1416 if (apic_lvtt_tscdeadline(apic))
1417 return 0;
1418
1419 val = apic_get_tmcct(apic);
1420 break;
1421 case APIC_PROCPRI:
1422 apic_update_ppr(apic);
1423 val = kvm_lapic_get_reg(apic, offset);
1424 break;
1425 case APIC_TASKPRI:
1426 report_tpr_access(apic, false);
1427 fallthrough;
1428 default:
1429 val = kvm_lapic_get_reg(apic, offset);
1430 break;
1431 }
1432
1433 return val;
1434}
1435
1436static inline struct kvm_lapic *to_lapic(struct kvm_io_device *dev)
1437{
1438 return container_of(dev, struct kvm_lapic, dev);
1439}
1440
1441#define APIC_REG_MASK(reg) (1ull << ((reg) >> 4))
1442#define APIC_REGS_MASK(first, count) \
1443 (APIC_REG_MASK(first) * ((1ull << (count)) - 1))
1444
1445static int kvm_lapic_reg_read(struct kvm_lapic *apic, u32 offset, int len,
1446 void *data)
1447{
1448 unsigned char alignment = offset & 0xf;
1449 u32 result;
1450 /* this bitmask has a bit cleared for each reserved register */
1451 u64 valid_reg_mask =
1452 APIC_REG_MASK(APIC_ID) |
1453 APIC_REG_MASK(APIC_LVR) |
1454 APIC_REG_MASK(APIC_TASKPRI) |
1455 APIC_REG_MASK(APIC_PROCPRI) |
1456 APIC_REG_MASK(APIC_LDR) |
1457 APIC_REG_MASK(APIC_DFR) |
1458 APIC_REG_MASK(APIC_SPIV) |
1459 APIC_REGS_MASK(APIC_ISR, APIC_ISR_NR) |
1460 APIC_REGS_MASK(APIC_TMR, APIC_ISR_NR) |
1461 APIC_REGS_MASK(APIC_IRR, APIC_ISR_NR) |
1462 APIC_REG_MASK(APIC_ESR) |
1463 APIC_REG_MASK(APIC_ICR) |
1464 APIC_REG_MASK(APIC_LVTT) |
1465 APIC_REG_MASK(APIC_LVTTHMR) |
1466 APIC_REG_MASK(APIC_LVTPC) |
1467 APIC_REG_MASK(APIC_LVT0) |
1468 APIC_REG_MASK(APIC_LVT1) |
1469 APIC_REG_MASK(APIC_LVTERR) |
1470 APIC_REG_MASK(APIC_TMICT) |
1471 APIC_REG_MASK(APIC_TMCCT) |
1472 APIC_REG_MASK(APIC_TDCR);
1473
1474 if (kvm_lapic_lvt_supported(apic, LVT_CMCI))
1475 valid_reg_mask |= APIC_REG_MASK(APIC_LVTCMCI);
1476
1477 /*
1478 * ARBPRI and ICR2 are not valid in x2APIC mode. WARN if KVM reads ICR
1479 * in x2APIC mode as it's an 8-byte register in x2APIC and needs to be
1480 * manually handled by the caller.
1481 */
1482 if (!apic_x2apic_mode(apic))
1483 valid_reg_mask |= APIC_REG_MASK(APIC_ARBPRI) |
1484 APIC_REG_MASK(APIC_ICR2);
1485 else
1486 WARN_ON_ONCE(offset == APIC_ICR);
1487
1488 if (alignment + len > 4)
1489 return 1;
1490
1491 if (offset > 0x3f0 || !(valid_reg_mask & APIC_REG_MASK(offset)))
1492 return 1;
1493
1494 result = __apic_read(apic, offset & ~0xf);
1495
1496 trace_kvm_apic_read(offset, result);
1497
1498 switch (len) {
1499 case 1:
1500 case 2:
1501 case 4:
1502 memcpy(data, (char *)&result + alignment, len);
1503 break;
1504 default:
1505 printk(KERN_ERR "Local APIC read with len = %x, "
1506 "should be 1,2, or 4 instead\n", len);
1507 break;
1508 }
1509 return 0;
1510}
1511
1512static int apic_mmio_in_range(struct kvm_lapic *apic, gpa_t addr)
1513{
1514 return addr >= apic->base_address &&
1515 addr < apic->base_address + LAPIC_MMIO_LENGTH;
1516}
1517
1518static int apic_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *this,
1519 gpa_t address, int len, void *data)
1520{
1521 struct kvm_lapic *apic = to_lapic(this);
1522 u32 offset = address - apic->base_address;
1523
1524 if (!apic_mmio_in_range(apic, address))
1525 return -EOPNOTSUPP;
1526
1527 if (!kvm_apic_hw_enabled(apic) || apic_x2apic_mode(apic)) {
1528 if (!kvm_check_has_quirk(vcpu->kvm,
1529 KVM_X86_QUIRK_LAPIC_MMIO_HOLE))
1530 return -EOPNOTSUPP;
1531
1532 memset(data, 0xff, len);
1533 return 0;
1534 }
1535
1536 kvm_lapic_reg_read(apic, offset, len, data);
1537
1538 return 0;
1539}
1540
1541static void update_divide_count(struct kvm_lapic *apic)
1542{
1543 u32 tmp1, tmp2, tdcr;
1544
1545 tdcr = kvm_lapic_get_reg(apic, APIC_TDCR);
1546 tmp1 = tdcr & 0xf;
1547 tmp2 = ((tmp1 & 0x3) | ((tmp1 & 0x8) >> 1)) + 1;
1548 apic->divide_count = 0x1 << (tmp2 & 0x7);
1549}
1550
1551static void limit_periodic_timer_frequency(struct kvm_lapic *apic)
1552{
1553 /*
1554 * Do not allow the guest to program periodic timers with small
1555 * interval, since the hrtimers are not throttled by the host
1556 * scheduler.
1557 */
1558 if (apic_lvtt_period(apic) && apic->lapic_timer.period) {
1559 s64 min_period = min_timer_period_us * 1000LL;
1560
1561 if (apic->lapic_timer.period < min_period) {
1562 pr_info_ratelimited(
1563 "kvm: vcpu %i: requested %lld ns "
1564 "lapic timer period limited to %lld ns\n",
1565 apic->vcpu->vcpu_id,
1566 apic->lapic_timer.period, min_period);
1567 apic->lapic_timer.period = min_period;
1568 }
1569 }
1570}
1571
1572static void cancel_hv_timer(struct kvm_lapic *apic);
1573
1574static void cancel_apic_timer(struct kvm_lapic *apic)
1575{
1576 hrtimer_cancel(&apic->lapic_timer.timer);
1577 preempt_disable();
1578 if (apic->lapic_timer.hv_timer_in_use)
1579 cancel_hv_timer(apic);
1580 preempt_enable();
1581 atomic_set(&apic->lapic_timer.pending, 0);
1582}
1583
1584static void apic_update_lvtt(struct kvm_lapic *apic)
1585{
1586 u32 timer_mode = kvm_lapic_get_reg(apic, APIC_LVTT) &
1587 apic->lapic_timer.timer_mode_mask;
1588
1589 if (apic->lapic_timer.timer_mode != timer_mode) {
1590 if (apic_lvtt_tscdeadline(apic) != (timer_mode ==
1591 APIC_LVT_TIMER_TSCDEADLINE)) {
1592 cancel_apic_timer(apic);
1593 kvm_lapic_set_reg(apic, APIC_TMICT, 0);
1594 apic->lapic_timer.period = 0;
1595 apic->lapic_timer.tscdeadline = 0;
1596 }
1597 apic->lapic_timer.timer_mode = timer_mode;
1598 limit_periodic_timer_frequency(apic);
1599 }
1600}
1601
1602/*
1603 * On APICv, this test will cause a busy wait
1604 * during a higher-priority task.
1605 */
1606
1607static bool lapic_timer_int_injected(struct kvm_vcpu *vcpu)
1608{
1609 struct kvm_lapic *apic = vcpu->arch.apic;
1610 u32 reg = kvm_lapic_get_reg(apic, APIC_LVTT);
1611
1612 if (kvm_apic_hw_enabled(apic)) {
1613 int vec = reg & APIC_VECTOR_MASK;
1614 void *bitmap = apic->regs + APIC_ISR;
1615
1616 if (apic->apicv_active)
1617 bitmap = apic->regs + APIC_IRR;
1618
1619 if (apic_test_vector(vec, bitmap))
1620 return true;
1621 }
1622 return false;
1623}
1624
1625static inline void __wait_lapic_expire(struct kvm_vcpu *vcpu, u64 guest_cycles)
1626{
1627 u64 timer_advance_ns = vcpu->arch.apic->lapic_timer.timer_advance_ns;
1628
1629 /*
1630 * If the guest TSC is running at a different ratio than the host, then
1631 * convert the delay to nanoseconds to achieve an accurate delay. Note
1632 * that __delay() uses delay_tsc whenever the hardware has TSC, thus
1633 * always for VMX enabled hardware.
1634 */
1635 if (vcpu->arch.tsc_scaling_ratio == kvm_caps.default_tsc_scaling_ratio) {
1636 __delay(min(guest_cycles,
1637 nsec_to_cycles(vcpu, timer_advance_ns)));
1638 } else {
1639 u64 delay_ns = guest_cycles * 1000000ULL;
1640 do_div(delay_ns, vcpu->arch.virtual_tsc_khz);
1641 ndelay(min_t(u32, delay_ns, timer_advance_ns));
1642 }
1643}
1644
1645static inline void adjust_lapic_timer_advance(struct kvm_vcpu *vcpu,
1646 s64 advance_expire_delta)
1647{
1648 struct kvm_lapic *apic = vcpu->arch.apic;
1649 u32 timer_advance_ns = apic->lapic_timer.timer_advance_ns;
1650 u64 ns;
1651
1652 /* Do not adjust for tiny fluctuations or large random spikes. */
1653 if (abs(advance_expire_delta) > LAPIC_TIMER_ADVANCE_ADJUST_MAX ||
1654 abs(advance_expire_delta) < LAPIC_TIMER_ADVANCE_ADJUST_MIN)
1655 return;
1656
1657 /* too early */
1658 if (advance_expire_delta < 0) {
1659 ns = -advance_expire_delta * 1000000ULL;
1660 do_div(ns, vcpu->arch.virtual_tsc_khz);
1661 timer_advance_ns -= ns/LAPIC_TIMER_ADVANCE_ADJUST_STEP;
1662 } else {
1663 /* too late */
1664 ns = advance_expire_delta * 1000000ULL;
1665 do_div(ns, vcpu->arch.virtual_tsc_khz);
1666 timer_advance_ns += ns/LAPIC_TIMER_ADVANCE_ADJUST_STEP;
1667 }
1668
1669 if (unlikely(timer_advance_ns > LAPIC_TIMER_ADVANCE_NS_MAX))
1670 timer_advance_ns = LAPIC_TIMER_ADVANCE_NS_INIT;
1671 apic->lapic_timer.timer_advance_ns = timer_advance_ns;
1672}
1673
1674static void __kvm_wait_lapic_expire(struct kvm_vcpu *vcpu)
1675{
1676 struct kvm_lapic *apic = vcpu->arch.apic;
1677 u64 guest_tsc, tsc_deadline;
1678
1679 tsc_deadline = apic->lapic_timer.expired_tscdeadline;
1680 apic->lapic_timer.expired_tscdeadline = 0;
1681 guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc());
1682 trace_kvm_wait_lapic_expire(vcpu->vcpu_id, guest_tsc - tsc_deadline);
1683
1684 if (lapic_timer_advance_dynamic) {
1685 adjust_lapic_timer_advance(vcpu, guest_tsc - tsc_deadline);
1686 /*
1687 * If the timer fired early, reread the TSC to account for the
1688 * overhead of the above adjustment to avoid waiting longer
1689 * than is necessary.
1690 */
1691 if (guest_tsc < tsc_deadline)
1692 guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc());
1693 }
1694
1695 if (guest_tsc < tsc_deadline)
1696 __wait_lapic_expire(vcpu, tsc_deadline - guest_tsc);
1697}
1698
1699void kvm_wait_lapic_expire(struct kvm_vcpu *vcpu)
1700{
1701 if (lapic_in_kernel(vcpu) &&
1702 vcpu->arch.apic->lapic_timer.expired_tscdeadline &&
1703 vcpu->arch.apic->lapic_timer.timer_advance_ns &&
1704 lapic_timer_int_injected(vcpu))
1705 __kvm_wait_lapic_expire(vcpu);
1706}
1707EXPORT_SYMBOL_GPL(kvm_wait_lapic_expire);
1708
1709static void kvm_apic_inject_pending_timer_irqs(struct kvm_lapic *apic)
1710{
1711 struct kvm_timer *ktimer = &apic->lapic_timer;
1712
1713 kvm_apic_local_deliver(apic, APIC_LVTT);
1714 if (apic_lvtt_tscdeadline(apic)) {
1715 ktimer->tscdeadline = 0;
1716 } else if (apic_lvtt_oneshot(apic)) {
1717 ktimer->tscdeadline = 0;
1718 ktimer->target_expiration = 0;
1719 }
1720}
1721
1722static void apic_timer_expired(struct kvm_lapic *apic, bool from_timer_fn)
1723{
1724 struct kvm_vcpu *vcpu = apic->vcpu;
1725 struct kvm_timer *ktimer = &apic->lapic_timer;
1726
1727 if (atomic_read(&apic->lapic_timer.pending))
1728 return;
1729
1730 if (apic_lvtt_tscdeadline(apic) || ktimer->hv_timer_in_use)
1731 ktimer->expired_tscdeadline = ktimer->tscdeadline;
1732
1733 if (!from_timer_fn && apic->apicv_active) {
1734 WARN_ON(kvm_get_running_vcpu() != vcpu);
1735 kvm_apic_inject_pending_timer_irqs(apic);
1736 return;
1737 }
1738
1739 if (kvm_use_posted_timer_interrupt(apic->vcpu)) {
1740 /*
1741 * Ensure the guest's timer has truly expired before posting an
1742 * interrupt. Open code the relevant checks to avoid querying
1743 * lapic_timer_int_injected(), which will be false since the
1744 * interrupt isn't yet injected. Waiting until after injecting
1745 * is not an option since that won't help a posted interrupt.
1746 */
1747 if (vcpu->arch.apic->lapic_timer.expired_tscdeadline &&
1748 vcpu->arch.apic->lapic_timer.timer_advance_ns)
1749 __kvm_wait_lapic_expire(vcpu);
1750 kvm_apic_inject_pending_timer_irqs(apic);
1751 return;
1752 }
1753
1754 atomic_inc(&apic->lapic_timer.pending);
1755 kvm_make_request(KVM_REQ_UNBLOCK, vcpu);
1756 if (from_timer_fn)
1757 kvm_vcpu_kick(vcpu);
1758}
1759
1760static void start_sw_tscdeadline(struct kvm_lapic *apic)
1761{
1762 struct kvm_timer *ktimer = &apic->lapic_timer;
1763 u64 guest_tsc, tscdeadline = ktimer->tscdeadline;
1764 u64 ns = 0;
1765 ktime_t expire;
1766 struct kvm_vcpu *vcpu = apic->vcpu;
1767 unsigned long this_tsc_khz = vcpu->arch.virtual_tsc_khz;
1768 unsigned long flags;
1769 ktime_t now;
1770
1771 if (unlikely(!tscdeadline || !this_tsc_khz))
1772 return;
1773
1774 local_irq_save(flags);
1775
1776 now = ktime_get();
1777 guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc());
1778
1779 ns = (tscdeadline - guest_tsc) * 1000000ULL;
1780 do_div(ns, this_tsc_khz);
1781
1782 if (likely(tscdeadline > guest_tsc) &&
1783 likely(ns > apic->lapic_timer.timer_advance_ns)) {
1784 expire = ktime_add_ns(now, ns);
1785 expire = ktime_sub_ns(expire, ktimer->timer_advance_ns);
1786 hrtimer_start(&ktimer->timer, expire, HRTIMER_MODE_ABS_HARD);
1787 } else
1788 apic_timer_expired(apic, false);
1789
1790 local_irq_restore(flags);
1791}
1792
1793static inline u64 tmict_to_ns(struct kvm_lapic *apic, u32 tmict)
1794{
1795 return (u64)tmict * APIC_BUS_CYCLE_NS * (u64)apic->divide_count;
1796}
1797
1798static void update_target_expiration(struct kvm_lapic *apic, uint32_t old_divisor)
1799{
1800 ktime_t now, remaining;
1801 u64 ns_remaining_old, ns_remaining_new;
1802
1803 apic->lapic_timer.period =
1804 tmict_to_ns(apic, kvm_lapic_get_reg(apic, APIC_TMICT));
1805 limit_periodic_timer_frequency(apic);
1806
1807 now = ktime_get();
1808 remaining = ktime_sub(apic->lapic_timer.target_expiration, now);
1809 if (ktime_to_ns(remaining) < 0)
1810 remaining = 0;
1811
1812 ns_remaining_old = ktime_to_ns(remaining);
1813 ns_remaining_new = mul_u64_u32_div(ns_remaining_old,
1814 apic->divide_count, old_divisor);
1815
1816 apic->lapic_timer.tscdeadline +=
1817 nsec_to_cycles(apic->vcpu, ns_remaining_new) -
1818 nsec_to_cycles(apic->vcpu, ns_remaining_old);
1819 apic->lapic_timer.target_expiration = ktime_add_ns(now, ns_remaining_new);
1820}
1821
1822static bool set_target_expiration(struct kvm_lapic *apic, u32 count_reg)
1823{
1824 ktime_t now;
1825 u64 tscl = rdtsc();
1826 s64 deadline;
1827
1828 now = ktime_get();
1829 apic->lapic_timer.period =
1830 tmict_to_ns(apic, kvm_lapic_get_reg(apic, APIC_TMICT));
1831
1832 if (!apic->lapic_timer.period) {
1833 apic->lapic_timer.tscdeadline = 0;
1834 return false;
1835 }
1836
1837 limit_periodic_timer_frequency(apic);
1838 deadline = apic->lapic_timer.period;
1839
1840 if (apic_lvtt_period(apic) || apic_lvtt_oneshot(apic)) {
1841 if (unlikely(count_reg != APIC_TMICT)) {
1842 deadline = tmict_to_ns(apic,
1843 kvm_lapic_get_reg(apic, count_reg));
1844 if (unlikely(deadline <= 0))
1845 deadline = apic->lapic_timer.period;
1846 else if (unlikely(deadline > apic->lapic_timer.period)) {
1847 pr_info_ratelimited(
1848 "kvm: vcpu %i: requested lapic timer restore with "
1849 "starting count register %#x=%u (%lld ns) > initial count (%lld ns). "
1850 "Using initial count to start timer.\n",
1851 apic->vcpu->vcpu_id,
1852 count_reg,
1853 kvm_lapic_get_reg(apic, count_reg),
1854 deadline, apic->lapic_timer.period);
1855 kvm_lapic_set_reg(apic, count_reg, 0);
1856 deadline = apic->lapic_timer.period;
1857 }
1858 }
1859 }
1860
1861 apic->lapic_timer.tscdeadline = kvm_read_l1_tsc(apic->vcpu, tscl) +
1862 nsec_to_cycles(apic->vcpu, deadline);
1863 apic->lapic_timer.target_expiration = ktime_add_ns(now, deadline);
1864
1865 return true;
1866}
1867
1868static void advance_periodic_target_expiration(struct kvm_lapic *apic)
1869{
1870 ktime_t now = ktime_get();
1871 u64 tscl = rdtsc();
1872 ktime_t delta;
1873
1874 /*
1875 * Synchronize both deadlines to the same time source or
1876 * differences in the periods (caused by differences in the
1877 * underlying clocks or numerical approximation errors) will
1878 * cause the two to drift apart over time as the errors
1879 * accumulate.
1880 */
1881 apic->lapic_timer.target_expiration =
1882 ktime_add_ns(apic->lapic_timer.target_expiration,
1883 apic->lapic_timer.period);
1884 delta = ktime_sub(apic->lapic_timer.target_expiration, now);
1885 apic->lapic_timer.tscdeadline = kvm_read_l1_tsc(apic->vcpu, tscl) +
1886 nsec_to_cycles(apic->vcpu, delta);
1887}
1888
1889static void start_sw_period(struct kvm_lapic *apic)
1890{
1891 if (!apic->lapic_timer.period)
1892 return;
1893
1894 if (ktime_after(ktime_get(),
1895 apic->lapic_timer.target_expiration)) {
1896 apic_timer_expired(apic, false);
1897
1898 if (apic_lvtt_oneshot(apic))
1899 return;
1900
1901 advance_periodic_target_expiration(apic);
1902 }
1903
1904 hrtimer_start(&apic->lapic_timer.timer,
1905 apic->lapic_timer.target_expiration,
1906 HRTIMER_MODE_ABS_HARD);
1907}
1908
1909bool kvm_lapic_hv_timer_in_use(struct kvm_vcpu *vcpu)
1910{
1911 if (!lapic_in_kernel(vcpu))
1912 return false;
1913
1914 return vcpu->arch.apic->lapic_timer.hv_timer_in_use;
1915}
1916
1917static void cancel_hv_timer(struct kvm_lapic *apic)
1918{
1919 WARN_ON(preemptible());
1920 WARN_ON(!apic->lapic_timer.hv_timer_in_use);
1921 static_call(kvm_x86_cancel_hv_timer)(apic->vcpu);
1922 apic->lapic_timer.hv_timer_in_use = false;
1923}
1924
1925static bool start_hv_timer(struct kvm_lapic *apic)
1926{
1927 struct kvm_timer *ktimer = &apic->lapic_timer;
1928 struct kvm_vcpu *vcpu = apic->vcpu;
1929 bool expired;
1930
1931 WARN_ON(preemptible());
1932 if (!kvm_can_use_hv_timer(vcpu))
1933 return false;
1934
1935 if (!ktimer->tscdeadline)
1936 return false;
1937
1938 if (static_call(kvm_x86_set_hv_timer)(vcpu, ktimer->tscdeadline, &expired))
1939 return false;
1940
1941 ktimer->hv_timer_in_use = true;
1942 hrtimer_cancel(&ktimer->timer);
1943
1944 /*
1945 * To simplify handling the periodic timer, leave the hv timer running
1946 * even if the deadline timer has expired, i.e. rely on the resulting
1947 * VM-Exit to recompute the periodic timer's target expiration.
1948 */
1949 if (!apic_lvtt_period(apic)) {
1950 /*
1951 * Cancel the hv timer if the sw timer fired while the hv timer
1952 * was being programmed, or if the hv timer itself expired.
1953 */
1954 if (atomic_read(&ktimer->pending)) {
1955 cancel_hv_timer(apic);
1956 } else if (expired) {
1957 apic_timer_expired(apic, false);
1958 cancel_hv_timer(apic);
1959 }
1960 }
1961
1962 trace_kvm_hv_timer_state(vcpu->vcpu_id, ktimer->hv_timer_in_use);
1963
1964 return true;
1965}
1966
1967static void start_sw_timer(struct kvm_lapic *apic)
1968{
1969 struct kvm_timer *ktimer = &apic->lapic_timer;
1970
1971 WARN_ON(preemptible());
1972 if (apic->lapic_timer.hv_timer_in_use)
1973 cancel_hv_timer(apic);
1974 if (!apic_lvtt_period(apic) && atomic_read(&ktimer->pending))
1975 return;
1976
1977 if (apic_lvtt_period(apic) || apic_lvtt_oneshot(apic))
1978 start_sw_period(apic);
1979 else if (apic_lvtt_tscdeadline(apic))
1980 start_sw_tscdeadline(apic);
1981 trace_kvm_hv_timer_state(apic->vcpu->vcpu_id, false);
1982}
1983
1984static void restart_apic_timer(struct kvm_lapic *apic)
1985{
1986 preempt_disable();
1987
1988 if (!apic_lvtt_period(apic) && atomic_read(&apic->lapic_timer.pending))
1989 goto out;
1990
1991 if (!start_hv_timer(apic))
1992 start_sw_timer(apic);
1993out:
1994 preempt_enable();
1995}
1996
1997void kvm_lapic_expired_hv_timer(struct kvm_vcpu *vcpu)
1998{
1999 struct kvm_lapic *apic = vcpu->arch.apic;
2000
2001 preempt_disable();
2002 /* If the preempt notifier has already run, it also called apic_timer_expired */
2003 if (!apic->lapic_timer.hv_timer_in_use)
2004 goto out;
2005 WARN_ON(kvm_vcpu_is_blocking(vcpu));
2006 apic_timer_expired(apic, false);
2007 cancel_hv_timer(apic);
2008
2009 if (apic_lvtt_period(apic) && apic->lapic_timer.period) {
2010 advance_periodic_target_expiration(apic);
2011 restart_apic_timer(apic);
2012 }
2013out:
2014 preempt_enable();
2015}
2016EXPORT_SYMBOL_GPL(kvm_lapic_expired_hv_timer);
2017
2018void kvm_lapic_switch_to_hv_timer(struct kvm_vcpu *vcpu)
2019{
2020 restart_apic_timer(vcpu->arch.apic);
2021}
2022
2023void kvm_lapic_switch_to_sw_timer(struct kvm_vcpu *vcpu)
2024{
2025 struct kvm_lapic *apic = vcpu->arch.apic;
2026
2027 preempt_disable();
2028 /* Possibly the TSC deadline timer is not enabled yet */
2029 if (apic->lapic_timer.hv_timer_in_use)
2030 start_sw_timer(apic);
2031 preempt_enable();
2032}
2033
2034void kvm_lapic_restart_hv_timer(struct kvm_vcpu *vcpu)
2035{
2036 struct kvm_lapic *apic = vcpu->arch.apic;
2037
2038 WARN_ON(!apic->lapic_timer.hv_timer_in_use);
2039 restart_apic_timer(apic);
2040}
2041
2042static void __start_apic_timer(struct kvm_lapic *apic, u32 count_reg)
2043{
2044 atomic_set(&apic->lapic_timer.pending, 0);
2045
2046 if ((apic_lvtt_period(apic) || apic_lvtt_oneshot(apic))
2047 && !set_target_expiration(apic, count_reg))
2048 return;
2049
2050 restart_apic_timer(apic);
2051}
2052
2053static void start_apic_timer(struct kvm_lapic *apic)
2054{
2055 __start_apic_timer(apic, APIC_TMICT);
2056}
2057
2058static void apic_manage_nmi_watchdog(struct kvm_lapic *apic, u32 lvt0_val)
2059{
2060 bool lvt0_in_nmi_mode = apic_lvt_nmi_mode(lvt0_val);
2061
2062 if (apic->lvt0_in_nmi_mode != lvt0_in_nmi_mode) {
2063 apic->lvt0_in_nmi_mode = lvt0_in_nmi_mode;
2064 if (lvt0_in_nmi_mode) {
2065 atomic_inc(&apic->vcpu->kvm->arch.vapics_in_nmi_mode);
2066 } else
2067 atomic_dec(&apic->vcpu->kvm->arch.vapics_in_nmi_mode);
2068 }
2069}
2070
2071static void kvm_lapic_xapic_id_updated(struct kvm_lapic *apic)
2072{
2073 struct kvm *kvm = apic->vcpu->kvm;
2074
2075 if (KVM_BUG_ON(apic_x2apic_mode(apic), kvm))
2076 return;
2077
2078 if (kvm_xapic_id(apic) == apic->vcpu->vcpu_id)
2079 return;
2080
2081 kvm_set_apicv_inhibit(apic->vcpu->kvm, APICV_INHIBIT_REASON_APIC_ID_MODIFIED);
2082}
2083
2084static int get_lvt_index(u32 reg)
2085{
2086 if (reg == APIC_LVTCMCI)
2087 return LVT_CMCI;
2088 if (reg < APIC_LVTT || reg > APIC_LVTERR)
2089 return -1;
2090 return array_index_nospec(
2091 (reg - APIC_LVTT) >> 4, KVM_APIC_MAX_NR_LVT_ENTRIES);
2092}
2093
2094static int kvm_lapic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val)
2095{
2096 int ret = 0;
2097
2098 trace_kvm_apic_write(reg, val);
2099
2100 switch (reg) {
2101 case APIC_ID: /* Local APIC ID */
2102 if (!apic_x2apic_mode(apic)) {
2103 kvm_apic_set_xapic_id(apic, val >> 24);
2104 kvm_lapic_xapic_id_updated(apic);
2105 } else {
2106 ret = 1;
2107 }
2108 break;
2109
2110 case APIC_TASKPRI:
2111 report_tpr_access(apic, true);
2112 apic_set_tpr(apic, val & 0xff);
2113 break;
2114
2115 case APIC_EOI:
2116 apic_set_eoi(apic);
2117 break;
2118
2119 case APIC_LDR:
2120 if (!apic_x2apic_mode(apic))
2121 kvm_apic_set_ldr(apic, val & APIC_LDR_MASK);
2122 else
2123 ret = 1;
2124 break;
2125
2126 case APIC_DFR:
2127 if (!apic_x2apic_mode(apic))
2128 kvm_apic_set_dfr(apic, val | 0x0FFFFFFF);
2129 else
2130 ret = 1;
2131 break;
2132
2133 case APIC_SPIV: {
2134 u32 mask = 0x3ff;
2135 if (kvm_lapic_get_reg(apic, APIC_LVR) & APIC_LVR_DIRECTED_EOI)
2136 mask |= APIC_SPIV_DIRECTED_EOI;
2137 apic_set_spiv(apic, val & mask);
2138 if (!(val & APIC_SPIV_APIC_ENABLED)) {
2139 int i;
2140
2141 for (i = 0; i < apic->nr_lvt_entries; i++) {
2142 kvm_lapic_set_reg(apic, APIC_LVTx(i),
2143 kvm_lapic_get_reg(apic, APIC_LVTx(i)) | APIC_LVT_MASKED);
2144 }
2145 apic_update_lvtt(apic);
2146 atomic_set(&apic->lapic_timer.pending, 0);
2147
2148 }
2149 break;
2150 }
2151 case APIC_ICR:
2152 WARN_ON_ONCE(apic_x2apic_mode(apic));
2153
2154 /* No delay here, so we always clear the pending bit */
2155 val &= ~APIC_ICR_BUSY;
2156 kvm_apic_send_ipi(apic, val, kvm_lapic_get_reg(apic, APIC_ICR2));
2157 kvm_lapic_set_reg(apic, APIC_ICR, val);
2158 break;
2159 case APIC_ICR2:
2160 if (apic_x2apic_mode(apic))
2161 ret = 1;
2162 else
2163 kvm_lapic_set_reg(apic, APIC_ICR2, val & 0xff000000);
2164 break;
2165
2166 case APIC_LVT0:
2167 apic_manage_nmi_watchdog(apic, val);
2168 fallthrough;
2169 case APIC_LVTTHMR:
2170 case APIC_LVTPC:
2171 case APIC_LVT1:
2172 case APIC_LVTERR:
2173 case APIC_LVTCMCI: {
2174 u32 index = get_lvt_index(reg);
2175 if (!kvm_lapic_lvt_supported(apic, index)) {
2176 ret = 1;
2177 break;
2178 }
2179 if (!kvm_apic_sw_enabled(apic))
2180 val |= APIC_LVT_MASKED;
2181 val &= apic_lvt_mask[index];
2182 kvm_lapic_set_reg(apic, reg, val);
2183 break;
2184 }
2185
2186 case APIC_LVTT:
2187 if (!kvm_apic_sw_enabled(apic))
2188 val |= APIC_LVT_MASKED;
2189 val &= (apic_lvt_mask[0] | apic->lapic_timer.timer_mode_mask);
2190 kvm_lapic_set_reg(apic, APIC_LVTT, val);
2191 apic_update_lvtt(apic);
2192 break;
2193
2194 case APIC_TMICT:
2195 if (apic_lvtt_tscdeadline(apic))
2196 break;
2197
2198 cancel_apic_timer(apic);
2199 kvm_lapic_set_reg(apic, APIC_TMICT, val);
2200 start_apic_timer(apic);
2201 break;
2202
2203 case APIC_TDCR: {
2204 uint32_t old_divisor = apic->divide_count;
2205
2206 kvm_lapic_set_reg(apic, APIC_TDCR, val & 0xb);
2207 update_divide_count(apic);
2208 if (apic->divide_count != old_divisor &&
2209 apic->lapic_timer.period) {
2210 hrtimer_cancel(&apic->lapic_timer.timer);
2211 update_target_expiration(apic, old_divisor);
2212 restart_apic_timer(apic);
2213 }
2214 break;
2215 }
2216 case APIC_ESR:
2217 if (apic_x2apic_mode(apic) && val != 0)
2218 ret = 1;
2219 break;
2220
2221 case APIC_SELF_IPI:
2222 if (apic_x2apic_mode(apic))
2223 kvm_apic_send_ipi(apic, APIC_DEST_SELF | (val & APIC_VECTOR_MASK), 0);
2224 else
2225 ret = 1;
2226 break;
2227 default:
2228 ret = 1;
2229 break;
2230 }
2231
2232 /*
2233 * Recalculate APIC maps if necessary, e.g. if the software enable bit
2234 * was toggled, the APIC ID changed, etc... The maps are marked dirty
2235 * on relevant changes, i.e. this is a nop for most writes.
2236 */
2237 kvm_recalculate_apic_map(apic->vcpu->kvm);
2238
2239 return ret;
2240}
2241
2242static int apic_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *this,
2243 gpa_t address, int len, const void *data)
2244{
2245 struct kvm_lapic *apic = to_lapic(this);
2246 unsigned int offset = address - apic->base_address;
2247 u32 val;
2248
2249 if (!apic_mmio_in_range(apic, address))
2250 return -EOPNOTSUPP;
2251
2252 if (!kvm_apic_hw_enabled(apic) || apic_x2apic_mode(apic)) {
2253 if (!kvm_check_has_quirk(vcpu->kvm,
2254 KVM_X86_QUIRK_LAPIC_MMIO_HOLE))
2255 return -EOPNOTSUPP;
2256
2257 return 0;
2258 }
2259
2260 /*
2261 * APIC register must be aligned on 128-bits boundary.
2262 * 32/64/128 bits registers must be accessed thru 32 bits.
2263 * Refer SDM 8.4.1
2264 */
2265 if (len != 4 || (offset & 0xf))
2266 return 0;
2267
2268 val = *(u32*)data;
2269
2270 kvm_lapic_reg_write(apic, offset & 0xff0, val);
2271
2272 return 0;
2273}
2274
2275void kvm_lapic_set_eoi(struct kvm_vcpu *vcpu)
2276{
2277 kvm_lapic_reg_write(vcpu->arch.apic, APIC_EOI, 0);
2278}
2279EXPORT_SYMBOL_GPL(kvm_lapic_set_eoi);
2280
2281/* emulate APIC access in a trap manner */
2282void kvm_apic_write_nodecode(struct kvm_vcpu *vcpu, u32 offset)
2283{
2284 struct kvm_lapic *apic = vcpu->arch.apic;
2285 u64 val;
2286
2287 if (apic_x2apic_mode(apic)) {
2288 if (KVM_BUG_ON(kvm_lapic_msr_read(apic, offset, &val), vcpu->kvm))
2289 return;
2290 } else {
2291 val = kvm_lapic_get_reg(apic, offset);
2292 }
2293
2294 /*
2295 * ICR is a single 64-bit register when x2APIC is enabled. For legacy
2296 * xAPIC, ICR writes need to go down the common (slightly slower) path
2297 * to get the upper half from ICR2.
2298 */
2299 if (apic_x2apic_mode(apic) && offset == APIC_ICR) {
2300 kvm_apic_send_ipi(apic, (u32)val, (u32)(val >> 32));
2301 trace_kvm_apic_write(APIC_ICR, val);
2302 } else {
2303 /* TODO: optimize to just emulate side effect w/o one more write */
2304 kvm_lapic_reg_write(apic, offset, (u32)val);
2305 }
2306}
2307EXPORT_SYMBOL_GPL(kvm_apic_write_nodecode);
2308
2309void kvm_free_lapic(struct kvm_vcpu *vcpu)
2310{
2311 struct kvm_lapic *apic = vcpu->arch.apic;
2312
2313 if (!vcpu->arch.apic)
2314 return;
2315
2316 hrtimer_cancel(&apic->lapic_timer.timer);
2317
2318 if (!(vcpu->arch.apic_base & MSR_IA32_APICBASE_ENABLE))
2319 static_branch_slow_dec_deferred(&apic_hw_disabled);
2320
2321 if (!apic->sw_enabled)
2322 static_branch_slow_dec_deferred(&apic_sw_disabled);
2323
2324 if (apic->regs)
2325 free_page((unsigned long)apic->regs);
2326
2327 kfree(apic);
2328}
2329
2330/*
2331 *----------------------------------------------------------------------
2332 * LAPIC interface
2333 *----------------------------------------------------------------------
2334 */
2335u64 kvm_get_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu)
2336{
2337 struct kvm_lapic *apic = vcpu->arch.apic;
2338
2339 if (!kvm_apic_present(vcpu) || !apic_lvtt_tscdeadline(apic))
2340 return 0;
2341
2342 return apic->lapic_timer.tscdeadline;
2343}
2344
2345void kvm_set_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu, u64 data)
2346{
2347 struct kvm_lapic *apic = vcpu->arch.apic;
2348
2349 if (!kvm_apic_present(vcpu) || !apic_lvtt_tscdeadline(apic))
2350 return;
2351
2352 hrtimer_cancel(&apic->lapic_timer.timer);
2353 apic->lapic_timer.tscdeadline = data;
2354 start_apic_timer(apic);
2355}
2356
2357void kvm_lapic_set_tpr(struct kvm_vcpu *vcpu, unsigned long cr8)
2358{
2359 apic_set_tpr(vcpu->arch.apic, (cr8 & 0x0f) << 4);
2360}
2361
2362u64 kvm_lapic_get_cr8(struct kvm_vcpu *vcpu)
2363{
2364 u64 tpr;
2365
2366 tpr = (u64) kvm_lapic_get_reg(vcpu->arch.apic, APIC_TASKPRI);
2367
2368 return (tpr & 0xf0) >> 4;
2369}
2370
2371void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value)
2372{
2373 u64 old_value = vcpu->arch.apic_base;
2374 struct kvm_lapic *apic = vcpu->arch.apic;
2375
2376 vcpu->arch.apic_base = value;
2377
2378 if ((old_value ^ value) & MSR_IA32_APICBASE_ENABLE)
2379 kvm_update_cpuid_runtime(vcpu);
2380
2381 if (!apic)
2382 return;
2383
2384 /* update jump label if enable bit changes */
2385 if ((old_value ^ value) & MSR_IA32_APICBASE_ENABLE) {
2386 if (value & MSR_IA32_APICBASE_ENABLE) {
2387 kvm_apic_set_xapic_id(apic, vcpu->vcpu_id);
2388 static_branch_slow_dec_deferred(&apic_hw_disabled);
2389 /* Check if there are APF page ready requests pending */
2390 kvm_make_request(KVM_REQ_APF_READY, vcpu);
2391 } else {
2392 static_branch_inc(&apic_hw_disabled.key);
2393 atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
2394 }
2395 }
2396
2397 if (((old_value ^ value) & X2APIC_ENABLE) && (value & X2APIC_ENABLE))
2398 kvm_apic_set_x2apic_id(apic, vcpu->vcpu_id);
2399
2400 if ((old_value ^ value) & (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE)) {
2401 kvm_vcpu_update_apicv(vcpu);
2402 static_call_cond(kvm_x86_set_virtual_apic_mode)(vcpu);
2403 }
2404
2405 apic->base_address = apic->vcpu->arch.apic_base &
2406 MSR_IA32_APICBASE_BASE;
2407
2408 if ((value & MSR_IA32_APICBASE_ENABLE) &&
2409 apic->base_address != APIC_DEFAULT_PHYS_BASE) {
2410 kvm_set_apicv_inhibit(apic->vcpu->kvm,
2411 APICV_INHIBIT_REASON_APIC_BASE_MODIFIED);
2412 }
2413}
2414
2415void kvm_apic_update_apicv(struct kvm_vcpu *vcpu)
2416{
2417 struct kvm_lapic *apic = vcpu->arch.apic;
2418
2419 if (apic->apicv_active) {
2420 /* irr_pending is always true when apicv is activated. */
2421 apic->irr_pending = true;
2422 apic->isr_count = 1;
2423 } else {
2424 /*
2425 * Don't clear irr_pending, searching the IRR can race with
2426 * updates from the CPU as APICv is still active from hardware's
2427 * perspective. The flag will be cleared as appropriate when
2428 * KVM injects the interrupt.
2429 */
2430 apic->isr_count = count_vectors(apic->regs + APIC_ISR);
2431 }
2432}
2433
2434void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event)
2435{
2436 struct kvm_lapic *apic = vcpu->arch.apic;
2437 u64 msr_val;
2438 int i;
2439
2440 if (!init_event) {
2441 msr_val = APIC_DEFAULT_PHYS_BASE | MSR_IA32_APICBASE_ENABLE;
2442 if (kvm_vcpu_is_reset_bsp(vcpu))
2443 msr_val |= MSR_IA32_APICBASE_BSP;
2444 kvm_lapic_set_base(vcpu, msr_val);
2445 }
2446
2447 if (!apic)
2448 return;
2449
2450 /* Stop the timer in case it's a reset to an active apic */
2451 hrtimer_cancel(&apic->lapic_timer.timer);
2452
2453 /* The xAPIC ID is set at RESET even if the APIC was already enabled. */
2454 if (!init_event)
2455 kvm_apic_set_xapic_id(apic, vcpu->vcpu_id);
2456 kvm_apic_set_version(apic->vcpu);
2457
2458 for (i = 0; i < apic->nr_lvt_entries; i++)
2459 kvm_lapic_set_reg(apic, APIC_LVTx(i), APIC_LVT_MASKED);
2460 apic_update_lvtt(apic);
2461 if (kvm_vcpu_is_reset_bsp(vcpu) &&
2462 kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_LINT0_REENABLED))
2463 kvm_lapic_set_reg(apic, APIC_LVT0,
2464 SET_APIC_DELIVERY_MODE(0, APIC_MODE_EXTINT));
2465 apic_manage_nmi_watchdog(apic, kvm_lapic_get_reg(apic, APIC_LVT0));
2466
2467 kvm_apic_set_dfr(apic, 0xffffffffU);
2468 apic_set_spiv(apic, 0xff);
2469 kvm_lapic_set_reg(apic, APIC_TASKPRI, 0);
2470 if (!apic_x2apic_mode(apic))
2471 kvm_apic_set_ldr(apic, 0);
2472 kvm_lapic_set_reg(apic, APIC_ESR, 0);
2473 if (!apic_x2apic_mode(apic)) {
2474 kvm_lapic_set_reg(apic, APIC_ICR, 0);
2475 kvm_lapic_set_reg(apic, APIC_ICR2, 0);
2476 } else {
2477 kvm_lapic_set_reg64(apic, APIC_ICR, 0);
2478 }
2479 kvm_lapic_set_reg(apic, APIC_TDCR, 0);
2480 kvm_lapic_set_reg(apic, APIC_TMICT, 0);
2481 for (i = 0; i < 8; i++) {
2482 kvm_lapic_set_reg(apic, APIC_IRR + 0x10 * i, 0);
2483 kvm_lapic_set_reg(apic, APIC_ISR + 0x10 * i, 0);
2484 kvm_lapic_set_reg(apic, APIC_TMR + 0x10 * i, 0);
2485 }
2486 kvm_apic_update_apicv(vcpu);
2487 apic->highest_isr_cache = -1;
2488 update_divide_count(apic);
2489 atomic_set(&apic->lapic_timer.pending, 0);
2490
2491 vcpu->arch.pv_eoi.msr_val = 0;
2492 apic_update_ppr(apic);
2493 if (apic->apicv_active) {
2494 static_call_cond(kvm_x86_apicv_post_state_restore)(vcpu);
2495 static_call_cond(kvm_x86_hwapic_irr_update)(vcpu, -1);
2496 static_call_cond(kvm_x86_hwapic_isr_update)(-1);
2497 }
2498
2499 vcpu->arch.apic_arb_prio = 0;
2500 vcpu->arch.apic_attention = 0;
2501
2502 kvm_recalculate_apic_map(vcpu->kvm);
2503}
2504
2505/*
2506 *----------------------------------------------------------------------
2507 * timer interface
2508 *----------------------------------------------------------------------
2509 */
2510
2511static bool lapic_is_periodic(struct kvm_lapic *apic)
2512{
2513 return apic_lvtt_period(apic);
2514}
2515
2516int apic_has_pending_timer(struct kvm_vcpu *vcpu)
2517{
2518 struct kvm_lapic *apic = vcpu->arch.apic;
2519
2520 if (apic_enabled(apic) && apic_lvt_enabled(apic, APIC_LVTT))
2521 return atomic_read(&apic->lapic_timer.pending);
2522
2523 return 0;
2524}
2525
2526int kvm_apic_local_deliver(struct kvm_lapic *apic, int lvt_type)
2527{
2528 u32 reg = kvm_lapic_get_reg(apic, lvt_type);
2529 int vector, mode, trig_mode;
2530
2531 if (kvm_apic_hw_enabled(apic) && !(reg & APIC_LVT_MASKED)) {
2532 vector = reg & APIC_VECTOR_MASK;
2533 mode = reg & APIC_MODE_MASK;
2534 trig_mode = reg & APIC_LVT_LEVEL_TRIGGER;
2535 return __apic_accept_irq(apic, mode, vector, 1, trig_mode,
2536 NULL);
2537 }
2538 return 0;
2539}
2540
2541void kvm_apic_nmi_wd_deliver(struct kvm_vcpu *vcpu)
2542{
2543 struct kvm_lapic *apic = vcpu->arch.apic;
2544
2545 if (apic)
2546 kvm_apic_local_deliver(apic, APIC_LVT0);
2547}
2548
2549static const struct kvm_io_device_ops apic_mmio_ops = {
2550 .read = apic_mmio_read,
2551 .write = apic_mmio_write,
2552};
2553
2554static enum hrtimer_restart apic_timer_fn(struct hrtimer *data)
2555{
2556 struct kvm_timer *ktimer = container_of(data, struct kvm_timer, timer);
2557 struct kvm_lapic *apic = container_of(ktimer, struct kvm_lapic, lapic_timer);
2558
2559 apic_timer_expired(apic, true);
2560
2561 if (lapic_is_periodic(apic)) {
2562 advance_periodic_target_expiration(apic);
2563 hrtimer_add_expires_ns(&ktimer->timer, ktimer->period);
2564 return HRTIMER_RESTART;
2565 } else
2566 return HRTIMER_NORESTART;
2567}
2568
2569int kvm_create_lapic(struct kvm_vcpu *vcpu, int timer_advance_ns)
2570{
2571 struct kvm_lapic *apic;
2572
2573 ASSERT(vcpu != NULL);
2574
2575 apic = kzalloc(sizeof(*apic), GFP_KERNEL_ACCOUNT);
2576 if (!apic)
2577 goto nomem;
2578
2579 vcpu->arch.apic = apic;
2580
2581 apic->regs = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT);
2582 if (!apic->regs) {
2583 printk(KERN_ERR "malloc apic regs error for vcpu %x\n",
2584 vcpu->vcpu_id);
2585 goto nomem_free_apic;
2586 }
2587 apic->vcpu = vcpu;
2588
2589 apic->nr_lvt_entries = kvm_apic_calc_nr_lvt_entries(vcpu);
2590
2591 hrtimer_init(&apic->lapic_timer.timer, CLOCK_MONOTONIC,
2592 HRTIMER_MODE_ABS_HARD);
2593 apic->lapic_timer.timer.function = apic_timer_fn;
2594 if (timer_advance_ns == -1) {
2595 apic->lapic_timer.timer_advance_ns = LAPIC_TIMER_ADVANCE_NS_INIT;
2596 lapic_timer_advance_dynamic = true;
2597 } else {
2598 apic->lapic_timer.timer_advance_ns = timer_advance_ns;
2599 lapic_timer_advance_dynamic = false;
2600 }
2601
2602 /*
2603 * Stuff the APIC ENABLE bit in lieu of temporarily incrementing
2604 * apic_hw_disabled; the full RESET value is set by kvm_lapic_reset().
2605 */
2606 vcpu->arch.apic_base = MSR_IA32_APICBASE_ENABLE;
2607 static_branch_inc(&apic_sw_disabled.key); /* sw disabled at reset */
2608 kvm_iodevice_init(&apic->dev, &apic_mmio_ops);
2609
2610 return 0;
2611nomem_free_apic:
2612 kfree(apic);
2613 vcpu->arch.apic = NULL;
2614nomem:
2615 return -ENOMEM;
2616}
2617
2618int kvm_apic_has_interrupt(struct kvm_vcpu *vcpu)
2619{
2620 struct kvm_lapic *apic = vcpu->arch.apic;
2621 u32 ppr;
2622
2623 if (!kvm_apic_present(vcpu))
2624 return -1;
2625
2626 __apic_update_ppr(apic, &ppr);
2627 return apic_has_interrupt_for_ppr(apic, ppr);
2628}
2629EXPORT_SYMBOL_GPL(kvm_apic_has_interrupt);
2630
2631int kvm_apic_accept_pic_intr(struct kvm_vcpu *vcpu)
2632{
2633 u32 lvt0 = kvm_lapic_get_reg(vcpu->arch.apic, APIC_LVT0);
2634
2635 if (!kvm_apic_hw_enabled(vcpu->arch.apic))
2636 return 1;
2637 if ((lvt0 & APIC_LVT_MASKED) == 0 &&
2638 GET_APIC_DELIVERY_MODE(lvt0) == APIC_MODE_EXTINT)
2639 return 1;
2640 return 0;
2641}
2642
2643void kvm_inject_apic_timer_irqs(struct kvm_vcpu *vcpu)
2644{
2645 struct kvm_lapic *apic = vcpu->arch.apic;
2646
2647 if (atomic_read(&apic->lapic_timer.pending) > 0) {
2648 kvm_apic_inject_pending_timer_irqs(apic);
2649 atomic_set(&apic->lapic_timer.pending, 0);
2650 }
2651}
2652
2653int kvm_get_apic_interrupt(struct kvm_vcpu *vcpu)
2654{
2655 int vector = kvm_apic_has_interrupt(vcpu);
2656 struct kvm_lapic *apic = vcpu->arch.apic;
2657 u32 ppr;
2658
2659 if (vector == -1)
2660 return -1;
2661
2662 /*
2663 * We get here even with APIC virtualization enabled, if doing
2664 * nested virtualization and L1 runs with the "acknowledge interrupt
2665 * on exit" mode. Then we cannot inject the interrupt via RVI,
2666 * because the process would deliver it through the IDT.
2667 */
2668
2669 apic_clear_irr(vector, apic);
2670 if (to_hv_vcpu(vcpu) && test_bit(vector, to_hv_synic(vcpu)->auto_eoi_bitmap)) {
2671 /*
2672 * For auto-EOI interrupts, there might be another pending
2673 * interrupt above PPR, so check whether to raise another
2674 * KVM_REQ_EVENT.
2675 */
2676 apic_update_ppr(apic);
2677 } else {
2678 /*
2679 * For normal interrupts, PPR has been raised and there cannot
2680 * be a higher-priority pending interrupt---except if there was
2681 * a concurrent interrupt injection, but that would have
2682 * triggered KVM_REQ_EVENT already.
2683 */
2684 apic_set_isr(vector, apic);
2685 __apic_update_ppr(apic, &ppr);
2686 }
2687
2688 return vector;
2689}
2690
2691static int kvm_apic_state_fixup(struct kvm_vcpu *vcpu,
2692 struct kvm_lapic_state *s, bool set)
2693{
2694 if (apic_x2apic_mode(vcpu->arch.apic)) {
2695 u32 *id = (u32 *)(s->regs + APIC_ID);
2696 u32 *ldr = (u32 *)(s->regs + APIC_LDR);
2697 u64 icr;
2698
2699 if (vcpu->kvm->arch.x2apic_format) {
2700 if (*id != vcpu->vcpu_id)
2701 return -EINVAL;
2702 } else {
2703 if (set)
2704 *id >>= 24;
2705 else
2706 *id <<= 24;
2707 }
2708
2709 /*
2710 * In x2APIC mode, the LDR is fixed and based on the id. And
2711 * ICR is internally a single 64-bit register, but needs to be
2712 * split to ICR+ICR2 in userspace for backwards compatibility.
2713 */
2714 if (set) {
2715 *ldr = kvm_apic_calc_x2apic_ldr(*id);
2716
2717 icr = __kvm_lapic_get_reg(s->regs, APIC_ICR) |
2718 (u64)__kvm_lapic_get_reg(s->regs, APIC_ICR2) << 32;
2719 __kvm_lapic_set_reg64(s->regs, APIC_ICR, icr);
2720 } else {
2721 icr = __kvm_lapic_get_reg64(s->regs, APIC_ICR);
2722 __kvm_lapic_set_reg(s->regs, APIC_ICR2, icr >> 32);
2723 }
2724 }
2725
2726 return 0;
2727}
2728
2729int kvm_apic_get_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s)
2730{
2731 memcpy(s->regs, vcpu->arch.apic->regs, sizeof(*s));
2732
2733 /*
2734 * Get calculated timer current count for remaining timer period (if
2735 * any) and store it in the returned register set.
2736 */
2737 __kvm_lapic_set_reg(s->regs, APIC_TMCCT,
2738 __apic_read(vcpu->arch.apic, APIC_TMCCT));
2739
2740 return kvm_apic_state_fixup(vcpu, s, false);
2741}
2742
2743int kvm_apic_set_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s)
2744{
2745 struct kvm_lapic *apic = vcpu->arch.apic;
2746 int r;
2747
2748 kvm_lapic_set_base(vcpu, vcpu->arch.apic_base);
2749 /* set SPIV separately to get count of SW disabled APICs right */
2750 apic_set_spiv(apic, *((u32 *)(s->regs + APIC_SPIV)));
2751
2752 r = kvm_apic_state_fixup(vcpu, s, true);
2753 if (r) {
2754 kvm_recalculate_apic_map(vcpu->kvm);
2755 return r;
2756 }
2757 memcpy(vcpu->arch.apic->regs, s->regs, sizeof(*s));
2758
2759 if (!apic_x2apic_mode(apic))
2760 kvm_lapic_xapic_id_updated(apic);
2761
2762 atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
2763 kvm_recalculate_apic_map(vcpu->kvm);
2764 kvm_apic_set_version(vcpu);
2765
2766 apic_update_ppr(apic);
2767 cancel_apic_timer(apic);
2768 apic->lapic_timer.expired_tscdeadline = 0;
2769 apic_update_lvtt(apic);
2770 apic_manage_nmi_watchdog(apic, kvm_lapic_get_reg(apic, APIC_LVT0));
2771 update_divide_count(apic);
2772 __start_apic_timer(apic, APIC_TMCCT);
2773 kvm_lapic_set_reg(apic, APIC_TMCCT, 0);
2774 kvm_apic_update_apicv(vcpu);
2775 apic->highest_isr_cache = -1;
2776 if (apic->apicv_active) {
2777 static_call_cond(kvm_x86_apicv_post_state_restore)(vcpu);
2778 static_call_cond(kvm_x86_hwapic_irr_update)(vcpu, apic_find_highest_irr(apic));
2779 static_call_cond(kvm_x86_hwapic_isr_update)(apic_find_highest_isr(apic));
2780 }
2781 kvm_make_request(KVM_REQ_EVENT, vcpu);
2782 if (ioapic_in_kernel(vcpu->kvm))
2783 kvm_rtc_eoi_tracking_restore_one(vcpu);
2784
2785 vcpu->arch.apic_arb_prio = 0;
2786
2787 return 0;
2788}
2789
2790void __kvm_migrate_apic_timer(struct kvm_vcpu *vcpu)
2791{
2792 struct hrtimer *timer;
2793
2794 if (!lapic_in_kernel(vcpu) ||
2795 kvm_can_post_timer_interrupt(vcpu))
2796 return;
2797
2798 timer = &vcpu->arch.apic->lapic_timer.timer;
2799 if (hrtimer_cancel(timer))
2800 hrtimer_start_expires(timer, HRTIMER_MODE_ABS_HARD);
2801}
2802
2803/*
2804 * apic_sync_pv_eoi_from_guest - called on vmexit or cancel interrupt
2805 *
2806 * Detect whether guest triggered PV EOI since the
2807 * last entry. If yes, set EOI on guests's behalf.
2808 * Clear PV EOI in guest memory in any case.
2809 */
2810static void apic_sync_pv_eoi_from_guest(struct kvm_vcpu *vcpu,
2811 struct kvm_lapic *apic)
2812{
2813 int vector;
2814 /*
2815 * PV EOI state is derived from KVM_APIC_PV_EOI_PENDING in host
2816 * and KVM_PV_EOI_ENABLED in guest memory as follows:
2817 *
2818 * KVM_APIC_PV_EOI_PENDING is unset:
2819 * -> host disabled PV EOI.
2820 * KVM_APIC_PV_EOI_PENDING is set, KVM_PV_EOI_ENABLED is set:
2821 * -> host enabled PV EOI, guest did not execute EOI yet.
2822 * KVM_APIC_PV_EOI_PENDING is set, KVM_PV_EOI_ENABLED is unset:
2823 * -> host enabled PV EOI, guest executed EOI.
2824 */
2825 BUG_ON(!pv_eoi_enabled(vcpu));
2826
2827 if (pv_eoi_test_and_clr_pending(vcpu))
2828 return;
2829 vector = apic_set_eoi(apic);
2830 trace_kvm_pv_eoi(apic, vector);
2831}
2832
2833void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu)
2834{
2835 u32 data;
2836
2837 if (test_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention))
2838 apic_sync_pv_eoi_from_guest(vcpu, vcpu->arch.apic);
2839
2840 if (!test_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention))
2841 return;
2842
2843 if (kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data,
2844 sizeof(u32)))
2845 return;
2846
2847 apic_set_tpr(vcpu->arch.apic, data & 0xff);
2848}
2849
2850/*
2851 * apic_sync_pv_eoi_to_guest - called before vmentry
2852 *
2853 * Detect whether it's safe to enable PV EOI and
2854 * if yes do so.
2855 */
2856static void apic_sync_pv_eoi_to_guest(struct kvm_vcpu *vcpu,
2857 struct kvm_lapic *apic)
2858{
2859 if (!pv_eoi_enabled(vcpu) ||
2860 /* IRR set or many bits in ISR: could be nested. */
2861 apic->irr_pending ||
2862 /* Cache not set: could be safe but we don't bother. */
2863 apic->highest_isr_cache == -1 ||
2864 /* Need EOI to update ioapic. */
2865 kvm_ioapic_handles_vector(apic, apic->highest_isr_cache)) {
2866 /*
2867 * PV EOI was disabled by apic_sync_pv_eoi_from_guest
2868 * so we need not do anything here.
2869 */
2870 return;
2871 }
2872
2873 pv_eoi_set_pending(apic->vcpu);
2874}
2875
2876void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu)
2877{
2878 u32 data, tpr;
2879 int max_irr, max_isr;
2880 struct kvm_lapic *apic = vcpu->arch.apic;
2881
2882 apic_sync_pv_eoi_to_guest(vcpu, apic);
2883
2884 if (!test_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention))
2885 return;
2886
2887 tpr = kvm_lapic_get_reg(apic, APIC_TASKPRI) & 0xff;
2888 max_irr = apic_find_highest_irr(apic);
2889 if (max_irr < 0)
2890 max_irr = 0;
2891 max_isr = apic_find_highest_isr(apic);
2892 if (max_isr < 0)
2893 max_isr = 0;
2894 data = (tpr & 0xff) | ((max_isr & 0xf0) << 8) | (max_irr << 24);
2895
2896 kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data,
2897 sizeof(u32));
2898}
2899
2900int kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr)
2901{
2902 if (vapic_addr) {
2903 if (kvm_gfn_to_hva_cache_init(vcpu->kvm,
2904 &vcpu->arch.apic->vapic_cache,
2905 vapic_addr, sizeof(u32)))
2906 return -EINVAL;
2907 __set_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention);
2908 } else {
2909 __clear_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention);
2910 }
2911
2912 vcpu->arch.apic->vapic_addr = vapic_addr;
2913 return 0;
2914}
2915
2916int kvm_x2apic_icr_write(struct kvm_lapic *apic, u64 data)
2917{
2918 data &= ~APIC_ICR_BUSY;
2919
2920 kvm_apic_send_ipi(apic, (u32)data, (u32)(data >> 32));
2921 kvm_lapic_set_reg64(apic, APIC_ICR, data);
2922 trace_kvm_apic_write(APIC_ICR, data);
2923 return 0;
2924}
2925
2926static int kvm_lapic_msr_read(struct kvm_lapic *apic, u32 reg, u64 *data)
2927{
2928 u32 low;
2929
2930 if (reg == APIC_ICR) {
2931 *data = kvm_lapic_get_reg64(apic, APIC_ICR);
2932 return 0;
2933 }
2934
2935 if (kvm_lapic_reg_read(apic, reg, 4, &low))
2936 return 1;
2937
2938 *data = low;
2939
2940 return 0;
2941}
2942
2943static int kvm_lapic_msr_write(struct kvm_lapic *apic, u32 reg, u64 data)
2944{
2945 /*
2946 * ICR is a 64-bit register in x2APIC mode (and Hyper'v PV vAPIC) and
2947 * can be written as such, all other registers remain accessible only
2948 * through 32-bit reads/writes.
2949 */
2950 if (reg == APIC_ICR)
2951 return kvm_x2apic_icr_write(apic, data);
2952
2953 return kvm_lapic_reg_write(apic, reg, (u32)data);
2954}
2955
2956int kvm_x2apic_msr_write(struct kvm_vcpu *vcpu, u32 msr, u64 data)
2957{
2958 struct kvm_lapic *apic = vcpu->arch.apic;
2959 u32 reg = (msr - APIC_BASE_MSR) << 4;
2960
2961 if (!lapic_in_kernel(vcpu) || !apic_x2apic_mode(apic))
2962 return 1;
2963
2964 return kvm_lapic_msr_write(apic, reg, data);
2965}
2966
2967int kvm_x2apic_msr_read(struct kvm_vcpu *vcpu, u32 msr, u64 *data)
2968{
2969 struct kvm_lapic *apic = vcpu->arch.apic;
2970 u32 reg = (msr - APIC_BASE_MSR) << 4;
2971
2972 if (!lapic_in_kernel(vcpu) || !apic_x2apic_mode(apic))
2973 return 1;
2974
2975 if (reg == APIC_DFR)
2976 return 1;
2977
2978 return kvm_lapic_msr_read(apic, reg, data);
2979}
2980
2981int kvm_hv_vapic_msr_write(struct kvm_vcpu *vcpu, u32 reg, u64 data)
2982{
2983 if (!lapic_in_kernel(vcpu))
2984 return 1;
2985
2986 return kvm_lapic_msr_write(vcpu->arch.apic, reg, data);
2987}
2988
2989int kvm_hv_vapic_msr_read(struct kvm_vcpu *vcpu, u32 reg, u64 *data)
2990{
2991 if (!lapic_in_kernel(vcpu))
2992 return 1;
2993
2994 return kvm_lapic_msr_read(vcpu->arch.apic, reg, data);
2995}
2996
2997int kvm_lapic_set_pv_eoi(struct kvm_vcpu *vcpu, u64 data, unsigned long len)
2998{
2999 u64 addr = data & ~KVM_MSR_ENABLED;
3000 struct gfn_to_hva_cache *ghc = &vcpu->arch.pv_eoi.data;
3001 unsigned long new_len;
3002 int ret;
3003
3004 if (!IS_ALIGNED(addr, 4))
3005 return 1;
3006
3007 if (data & KVM_MSR_ENABLED) {
3008 if (addr == ghc->gpa && len <= ghc->len)
3009 new_len = ghc->len;
3010 else
3011 new_len = len;
3012
3013 ret = kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, addr, new_len);
3014 if (ret)
3015 return ret;
3016 }
3017
3018 vcpu->arch.pv_eoi.msr_val = data;
3019
3020 return 0;
3021}
3022
3023int kvm_apic_accept_events(struct kvm_vcpu *vcpu)
3024{
3025 struct kvm_lapic *apic = vcpu->arch.apic;
3026 u8 sipi_vector;
3027 int r;
3028
3029 if (!kvm_apic_has_pending_init_or_sipi(vcpu))
3030 return 0;
3031
3032 if (is_guest_mode(vcpu)) {
3033 r = kvm_check_nested_events(vcpu);
3034 if (r < 0)
3035 return r == -EBUSY ? 0 : r;
3036 /*
3037 * Continue processing INIT/SIPI even if a nested VM-Exit
3038 * occurred, e.g. pending SIPIs should be dropped if INIT+SIPI
3039 * are blocked as a result of transitioning to VMX root mode.
3040 */
3041 }
3042
3043 /*
3044 * INITs are blocked while CPU is in specific states (SMM, VMX root
3045 * mode, SVM with GIF=0), while SIPIs are dropped if the CPU isn't in
3046 * wait-for-SIPI (WFS).
3047 */
3048 if (!kvm_apic_init_sipi_allowed(vcpu)) {
3049 WARN_ON_ONCE(vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED);
3050 clear_bit(KVM_APIC_SIPI, &apic->pending_events);
3051 return 0;
3052 }
3053
3054 if (test_and_clear_bit(KVM_APIC_INIT, &apic->pending_events)) {
3055 kvm_vcpu_reset(vcpu, true);
3056 if (kvm_vcpu_is_bsp(apic->vcpu))
3057 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
3058 else
3059 vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED;
3060 }
3061 if (test_and_clear_bit(KVM_APIC_SIPI, &apic->pending_events)) {
3062 if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) {
3063 /* evaluate pending_events before reading the vector */
3064 smp_rmb();
3065 sipi_vector = apic->sipi_vector;
3066 static_call(kvm_x86_vcpu_deliver_sipi_vector)(vcpu, sipi_vector);
3067 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
3068 }
3069 }
3070 return 0;
3071}
3072
3073void kvm_lapic_exit(void)
3074{
3075 static_key_deferred_flush(&apic_hw_disabled);
3076 WARN_ON(static_branch_unlikely(&apic_hw_disabled.key));
3077 static_key_deferred_flush(&apic_sw_disabled);
3078 WARN_ON(static_branch_unlikely(&apic_sw_disabled.key));
3079}
1
2/*
3 * Local APIC virtualization
4 *
5 * Copyright (C) 2006 Qumranet, Inc.
6 * Copyright (C) 2007 Novell
7 * Copyright (C) 2007 Intel
8 * Copyright 2009 Red Hat, Inc. and/or its affiliates.
9 *
10 * Authors:
11 * Dor Laor <dor.laor@qumranet.com>
12 * Gregory Haskins <ghaskins@novell.com>
13 * Yaozu (Eddie) Dong <eddie.dong@intel.com>
14 *
15 * Based on Xen 3.1 code, Copyright (c) 2004, Intel Corporation.
16 *
17 * This work is licensed under the terms of the GNU GPL, version 2. See
18 * the COPYING file in the top-level directory.
19 */
20
21#include <linux/kvm_host.h>
22#include <linux/kvm.h>
23#include <linux/mm.h>
24#include <linux/highmem.h>
25#include <linux/smp.h>
26#include <linux/hrtimer.h>
27#include <linux/io.h>
28#include <linux/export.h>
29#include <linux/math64.h>
30#include <linux/slab.h>
31#include <asm/processor.h>
32#include <asm/msr.h>
33#include <asm/page.h>
34#include <asm/current.h>
35#include <asm/apicdef.h>
36#include <asm/delay.h>
37#include <linux/atomic.h>
38#include <linux/jump_label.h>
39#include "kvm_cache_regs.h"
40#include "irq.h"
41#include "trace.h"
42#include "x86.h"
43#include "cpuid.h"
44#include "hyperv.h"
45
46#ifndef CONFIG_X86_64
47#define mod_64(x, y) ((x) - (y) * div64_u64(x, y))
48#else
49#define mod_64(x, y) ((x) % (y))
50#endif
51
52#define PRId64 "d"
53#define PRIx64 "llx"
54#define PRIu64 "u"
55#define PRIo64 "o"
56
57#define APIC_BUS_CYCLE_NS 1
58
59/* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
60#define apic_debug(fmt, arg...)
61
62/* 14 is the version for Xeon and Pentium 8.4.8*/
63#define APIC_VERSION (0x14UL | ((KVM_APIC_LVT_NUM - 1) << 16))
64#define LAPIC_MMIO_LENGTH (1 << 12)
65/* followed define is not in apicdef.h */
66#define APIC_SHORT_MASK 0xc0000
67#define APIC_DEST_NOSHORT 0x0
68#define APIC_DEST_MASK 0x800
69#define MAX_APIC_VECTOR 256
70#define APIC_VECTORS_PER_REG 32
71
72#define APIC_BROADCAST 0xFF
73#define X2APIC_BROADCAST 0xFFFFFFFFul
74
75static inline int apic_test_vector(int vec, void *bitmap)
76{
77 return test_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
78}
79
80bool kvm_apic_pending_eoi(struct kvm_vcpu *vcpu, int vector)
81{
82 struct kvm_lapic *apic = vcpu->arch.apic;
83
84 return apic_test_vector(vector, apic->regs + APIC_ISR) ||
85 apic_test_vector(vector, apic->regs + APIC_IRR);
86}
87
88static inline void apic_clear_vector(int vec, void *bitmap)
89{
90 clear_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
91}
92
93static inline int __apic_test_and_set_vector(int vec, void *bitmap)
94{
95 return __test_and_set_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
96}
97
98static inline int __apic_test_and_clear_vector(int vec, void *bitmap)
99{
100 return __test_and_clear_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
101}
102
103struct static_key_deferred apic_hw_disabled __read_mostly;
104struct static_key_deferred apic_sw_disabled __read_mostly;
105
106static inline int apic_enabled(struct kvm_lapic *apic)
107{
108 return kvm_apic_sw_enabled(apic) && kvm_apic_hw_enabled(apic);
109}
110
111#define LVT_MASK \
112 (APIC_LVT_MASKED | APIC_SEND_PENDING | APIC_VECTOR_MASK)
113
114#define LINT_MASK \
115 (LVT_MASK | APIC_MODE_MASK | APIC_INPUT_POLARITY | \
116 APIC_LVT_REMOTE_IRR | APIC_LVT_LEVEL_TRIGGER)
117
118static inline bool kvm_apic_map_get_logical_dest(struct kvm_apic_map *map,
119 u32 dest_id, struct kvm_lapic ***cluster, u16 *mask) {
120 switch (map->mode) {
121 case KVM_APIC_MODE_X2APIC: {
122 u32 offset = (dest_id >> 16) * 16;
123 u32 max_apic_id = map->max_apic_id;
124
125 if (offset <= max_apic_id) {
126 u8 cluster_size = min(max_apic_id - offset + 1, 16U);
127
128 *cluster = &map->phys_map[offset];
129 *mask = dest_id & (0xffff >> (16 - cluster_size));
130 } else {
131 *mask = 0;
132 }
133
134 return true;
135 }
136 case KVM_APIC_MODE_XAPIC_FLAT:
137 *cluster = map->xapic_flat_map;
138 *mask = dest_id & 0xff;
139 return true;
140 case KVM_APIC_MODE_XAPIC_CLUSTER:
141 *cluster = map->xapic_cluster_map[(dest_id >> 4) & 0xf];
142 *mask = dest_id & 0xf;
143 return true;
144 default:
145 /* Not optimized. */
146 return false;
147 }
148}
149
150static void kvm_apic_map_free(struct rcu_head *rcu)
151{
152 struct kvm_apic_map *map = container_of(rcu, struct kvm_apic_map, rcu);
153
154 kvfree(map);
155}
156
157static void recalculate_apic_map(struct kvm *kvm)
158{
159 struct kvm_apic_map *new, *old = NULL;
160 struct kvm_vcpu *vcpu;
161 int i;
162 u32 max_id = 255;
163
164 mutex_lock(&kvm->arch.apic_map_lock);
165
166 kvm_for_each_vcpu(i, vcpu, kvm)
167 if (kvm_apic_present(vcpu))
168 max_id = max(max_id, kvm_apic_id(vcpu->arch.apic));
169
170 new = kvm_kvzalloc(sizeof(struct kvm_apic_map) +
171 sizeof(struct kvm_lapic *) * ((u64)max_id + 1));
172
173 if (!new)
174 goto out;
175
176 new->max_apic_id = max_id;
177
178 kvm_for_each_vcpu(i, vcpu, kvm) {
179 struct kvm_lapic *apic = vcpu->arch.apic;
180 struct kvm_lapic **cluster;
181 u16 mask;
182 u32 ldr, aid;
183
184 if (!kvm_apic_present(vcpu))
185 continue;
186
187 aid = kvm_apic_id(apic);
188 ldr = kvm_lapic_get_reg(apic, APIC_LDR);
189
190 if (aid <= new->max_apic_id)
191 new->phys_map[aid] = apic;
192
193 if (apic_x2apic_mode(apic)) {
194 new->mode |= KVM_APIC_MODE_X2APIC;
195 } else if (ldr) {
196 ldr = GET_APIC_LOGICAL_ID(ldr);
197 if (kvm_lapic_get_reg(apic, APIC_DFR) == APIC_DFR_FLAT)
198 new->mode |= KVM_APIC_MODE_XAPIC_FLAT;
199 else
200 new->mode |= KVM_APIC_MODE_XAPIC_CLUSTER;
201 }
202
203 if (!kvm_apic_map_get_logical_dest(new, ldr, &cluster, &mask))
204 continue;
205
206 if (mask)
207 cluster[ffs(mask) - 1] = apic;
208 }
209out:
210 old = rcu_dereference_protected(kvm->arch.apic_map,
211 lockdep_is_held(&kvm->arch.apic_map_lock));
212 rcu_assign_pointer(kvm->arch.apic_map, new);
213 mutex_unlock(&kvm->arch.apic_map_lock);
214
215 if (old)
216 call_rcu(&old->rcu, kvm_apic_map_free);
217
218 kvm_make_scan_ioapic_request(kvm);
219}
220
221static inline void apic_set_spiv(struct kvm_lapic *apic, u32 val)
222{
223 bool enabled = val & APIC_SPIV_APIC_ENABLED;
224
225 kvm_lapic_set_reg(apic, APIC_SPIV, val);
226
227 if (enabled != apic->sw_enabled) {
228 apic->sw_enabled = enabled;
229 if (enabled) {
230 static_key_slow_dec_deferred(&apic_sw_disabled);
231 recalculate_apic_map(apic->vcpu->kvm);
232 } else
233 static_key_slow_inc(&apic_sw_disabled.key);
234 }
235}
236
237static inline void kvm_apic_set_xapic_id(struct kvm_lapic *apic, u8 id)
238{
239 kvm_lapic_set_reg(apic, APIC_ID, id << 24);
240 recalculate_apic_map(apic->vcpu->kvm);
241}
242
243static inline void kvm_apic_set_ldr(struct kvm_lapic *apic, u32 id)
244{
245 kvm_lapic_set_reg(apic, APIC_LDR, id);
246 recalculate_apic_map(apic->vcpu->kvm);
247}
248
249static inline void kvm_apic_set_x2apic_id(struct kvm_lapic *apic, u32 id)
250{
251 u32 ldr = ((id >> 4) << 16) | (1 << (id & 0xf));
252
253 kvm_lapic_set_reg(apic, APIC_ID, id);
254 kvm_lapic_set_reg(apic, APIC_LDR, ldr);
255 recalculate_apic_map(apic->vcpu->kvm);
256}
257
258static inline int apic_lvt_enabled(struct kvm_lapic *apic, int lvt_type)
259{
260 return !(kvm_lapic_get_reg(apic, lvt_type) & APIC_LVT_MASKED);
261}
262
263static inline int apic_lvt_vector(struct kvm_lapic *apic, int lvt_type)
264{
265 return kvm_lapic_get_reg(apic, lvt_type) & APIC_VECTOR_MASK;
266}
267
268static inline int apic_lvtt_oneshot(struct kvm_lapic *apic)
269{
270 return apic->lapic_timer.timer_mode == APIC_LVT_TIMER_ONESHOT;
271}
272
273static inline int apic_lvtt_period(struct kvm_lapic *apic)
274{
275 return apic->lapic_timer.timer_mode == APIC_LVT_TIMER_PERIODIC;
276}
277
278static inline int apic_lvtt_tscdeadline(struct kvm_lapic *apic)
279{
280 return apic->lapic_timer.timer_mode == APIC_LVT_TIMER_TSCDEADLINE;
281}
282
283static inline int apic_lvt_nmi_mode(u32 lvt_val)
284{
285 return (lvt_val & (APIC_MODE_MASK | APIC_LVT_MASKED)) == APIC_DM_NMI;
286}
287
288void kvm_apic_set_version(struct kvm_vcpu *vcpu)
289{
290 struct kvm_lapic *apic = vcpu->arch.apic;
291 struct kvm_cpuid_entry2 *feat;
292 u32 v = APIC_VERSION;
293
294 if (!lapic_in_kernel(vcpu))
295 return;
296
297 feat = kvm_find_cpuid_entry(apic->vcpu, 0x1, 0);
298 if (feat && (feat->ecx & (1 << (X86_FEATURE_X2APIC & 31))))
299 v |= APIC_LVR_DIRECTED_EOI;
300 kvm_lapic_set_reg(apic, APIC_LVR, v);
301}
302
303static const unsigned int apic_lvt_mask[KVM_APIC_LVT_NUM] = {
304 LVT_MASK , /* part LVTT mask, timer mode mask added at runtime */
305 LVT_MASK | APIC_MODE_MASK, /* LVTTHMR */
306 LVT_MASK | APIC_MODE_MASK, /* LVTPC */
307 LINT_MASK, LINT_MASK, /* LVT0-1 */
308 LVT_MASK /* LVTERR */
309};
310
311static int find_highest_vector(void *bitmap)
312{
313 int vec;
314 u32 *reg;
315
316 for (vec = MAX_APIC_VECTOR - APIC_VECTORS_PER_REG;
317 vec >= 0; vec -= APIC_VECTORS_PER_REG) {
318 reg = bitmap + REG_POS(vec);
319 if (*reg)
320 return fls(*reg) - 1 + vec;
321 }
322
323 return -1;
324}
325
326static u8 count_vectors(void *bitmap)
327{
328 int vec;
329 u32 *reg;
330 u8 count = 0;
331
332 for (vec = 0; vec < MAX_APIC_VECTOR; vec += APIC_VECTORS_PER_REG) {
333 reg = bitmap + REG_POS(vec);
334 count += hweight32(*reg);
335 }
336
337 return count;
338}
339
340void __kvm_apic_update_irr(u32 *pir, void *regs)
341{
342 u32 i, pir_val;
343
344 for (i = 0; i <= 7; i++) {
345 pir_val = READ_ONCE(pir[i]);
346 if (pir_val) {
347 pir_val = xchg(&pir[i], 0);
348 *((u32 *)(regs + APIC_IRR + i * 0x10)) |= pir_val;
349 }
350 }
351}
352EXPORT_SYMBOL_GPL(__kvm_apic_update_irr);
353
354void kvm_apic_update_irr(struct kvm_vcpu *vcpu, u32 *pir)
355{
356 struct kvm_lapic *apic = vcpu->arch.apic;
357
358 __kvm_apic_update_irr(pir, apic->regs);
359
360 kvm_make_request(KVM_REQ_EVENT, vcpu);
361}
362EXPORT_SYMBOL_GPL(kvm_apic_update_irr);
363
364static inline int apic_search_irr(struct kvm_lapic *apic)
365{
366 return find_highest_vector(apic->regs + APIC_IRR);
367}
368
369static inline int apic_find_highest_irr(struct kvm_lapic *apic)
370{
371 int result;
372
373 /*
374 * Note that irr_pending is just a hint. It will be always
375 * true with virtual interrupt delivery enabled.
376 */
377 if (!apic->irr_pending)
378 return -1;
379
380 if (apic->vcpu->arch.apicv_active)
381 kvm_x86_ops->sync_pir_to_irr(apic->vcpu);
382 result = apic_search_irr(apic);
383 ASSERT(result == -1 || result >= 16);
384
385 return result;
386}
387
388static inline void apic_clear_irr(int vec, struct kvm_lapic *apic)
389{
390 struct kvm_vcpu *vcpu;
391
392 vcpu = apic->vcpu;
393
394 if (unlikely(vcpu->arch.apicv_active)) {
395 /* try to update RVI */
396 apic_clear_vector(vec, apic->regs + APIC_IRR);
397 kvm_make_request(KVM_REQ_EVENT, vcpu);
398 } else {
399 apic->irr_pending = false;
400 apic_clear_vector(vec, apic->regs + APIC_IRR);
401 if (apic_search_irr(apic) != -1)
402 apic->irr_pending = true;
403 }
404}
405
406static inline void apic_set_isr(int vec, struct kvm_lapic *apic)
407{
408 struct kvm_vcpu *vcpu;
409
410 if (__apic_test_and_set_vector(vec, apic->regs + APIC_ISR))
411 return;
412
413 vcpu = apic->vcpu;
414
415 /*
416 * With APIC virtualization enabled, all caching is disabled
417 * because the processor can modify ISR under the hood. Instead
418 * just set SVI.
419 */
420 if (unlikely(vcpu->arch.apicv_active))
421 kvm_x86_ops->hwapic_isr_update(vcpu, vec);
422 else {
423 ++apic->isr_count;
424 BUG_ON(apic->isr_count > MAX_APIC_VECTOR);
425 /*
426 * ISR (in service register) bit is set when injecting an interrupt.
427 * The highest vector is injected. Thus the latest bit set matches
428 * the highest bit in ISR.
429 */
430 apic->highest_isr_cache = vec;
431 }
432}
433
434static inline int apic_find_highest_isr(struct kvm_lapic *apic)
435{
436 int result;
437
438 /*
439 * Note that isr_count is always 1, and highest_isr_cache
440 * is always -1, with APIC virtualization enabled.
441 */
442 if (!apic->isr_count)
443 return -1;
444 if (likely(apic->highest_isr_cache != -1))
445 return apic->highest_isr_cache;
446
447 result = find_highest_vector(apic->regs + APIC_ISR);
448 ASSERT(result == -1 || result >= 16);
449
450 return result;
451}
452
453static inline void apic_clear_isr(int vec, struct kvm_lapic *apic)
454{
455 struct kvm_vcpu *vcpu;
456 if (!__apic_test_and_clear_vector(vec, apic->regs + APIC_ISR))
457 return;
458
459 vcpu = apic->vcpu;
460
461 /*
462 * We do get here for APIC virtualization enabled if the guest
463 * uses the Hyper-V APIC enlightenment. In this case we may need
464 * to trigger a new interrupt delivery by writing the SVI field;
465 * on the other hand isr_count and highest_isr_cache are unused
466 * and must be left alone.
467 */
468 if (unlikely(vcpu->arch.apicv_active))
469 kvm_x86_ops->hwapic_isr_update(vcpu,
470 apic_find_highest_isr(apic));
471 else {
472 --apic->isr_count;
473 BUG_ON(apic->isr_count < 0);
474 apic->highest_isr_cache = -1;
475 }
476}
477
478int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu)
479{
480 /* This may race with setting of irr in __apic_accept_irq() and
481 * value returned may be wrong, but kvm_vcpu_kick() in __apic_accept_irq
482 * will cause vmexit immediately and the value will be recalculated
483 * on the next vmentry.
484 */
485 return apic_find_highest_irr(vcpu->arch.apic);
486}
487
488static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
489 int vector, int level, int trig_mode,
490 struct dest_map *dest_map);
491
492int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq,
493 struct dest_map *dest_map)
494{
495 struct kvm_lapic *apic = vcpu->arch.apic;
496
497 return __apic_accept_irq(apic, irq->delivery_mode, irq->vector,
498 irq->level, irq->trig_mode, dest_map);
499}
500
501static int pv_eoi_put_user(struct kvm_vcpu *vcpu, u8 val)
502{
503
504 return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data, &val,
505 sizeof(val));
506}
507
508static int pv_eoi_get_user(struct kvm_vcpu *vcpu, u8 *val)
509{
510
511 return kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data, val,
512 sizeof(*val));
513}
514
515static inline bool pv_eoi_enabled(struct kvm_vcpu *vcpu)
516{
517 return vcpu->arch.pv_eoi.msr_val & KVM_MSR_ENABLED;
518}
519
520static bool pv_eoi_get_pending(struct kvm_vcpu *vcpu)
521{
522 u8 val;
523 if (pv_eoi_get_user(vcpu, &val) < 0)
524 apic_debug("Can't read EOI MSR value: 0x%llx\n",
525 (unsigned long long)vcpu->arch.pv_eoi.msr_val);
526 return val & 0x1;
527}
528
529static void pv_eoi_set_pending(struct kvm_vcpu *vcpu)
530{
531 if (pv_eoi_put_user(vcpu, KVM_PV_EOI_ENABLED) < 0) {
532 apic_debug("Can't set EOI MSR value: 0x%llx\n",
533 (unsigned long long)vcpu->arch.pv_eoi.msr_val);
534 return;
535 }
536 __set_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention);
537}
538
539static void pv_eoi_clr_pending(struct kvm_vcpu *vcpu)
540{
541 if (pv_eoi_put_user(vcpu, KVM_PV_EOI_DISABLED) < 0) {
542 apic_debug("Can't clear EOI MSR value: 0x%llx\n",
543 (unsigned long long)vcpu->arch.pv_eoi.msr_val);
544 return;
545 }
546 __clear_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention);
547}
548
549static void apic_update_ppr(struct kvm_lapic *apic)
550{
551 u32 tpr, isrv, ppr, old_ppr;
552 int isr;
553
554 old_ppr = kvm_lapic_get_reg(apic, APIC_PROCPRI);
555 tpr = kvm_lapic_get_reg(apic, APIC_TASKPRI);
556 isr = apic_find_highest_isr(apic);
557 isrv = (isr != -1) ? isr : 0;
558
559 if ((tpr & 0xf0) >= (isrv & 0xf0))
560 ppr = tpr & 0xff;
561 else
562 ppr = isrv & 0xf0;
563
564 apic_debug("vlapic %p, ppr 0x%x, isr 0x%x, isrv 0x%x",
565 apic, ppr, isr, isrv);
566
567 if (old_ppr != ppr) {
568 kvm_lapic_set_reg(apic, APIC_PROCPRI, ppr);
569 if (ppr < old_ppr)
570 kvm_make_request(KVM_REQ_EVENT, apic->vcpu);
571 }
572}
573
574static void apic_set_tpr(struct kvm_lapic *apic, u32 tpr)
575{
576 kvm_lapic_set_reg(apic, APIC_TASKPRI, tpr);
577 apic_update_ppr(apic);
578}
579
580static bool kvm_apic_broadcast(struct kvm_lapic *apic, u32 mda)
581{
582 if (apic_x2apic_mode(apic))
583 return mda == X2APIC_BROADCAST;
584
585 return GET_APIC_DEST_FIELD(mda) == APIC_BROADCAST;
586}
587
588static bool kvm_apic_match_physical_addr(struct kvm_lapic *apic, u32 mda)
589{
590 if (kvm_apic_broadcast(apic, mda))
591 return true;
592
593 if (apic_x2apic_mode(apic))
594 return mda == kvm_apic_id(apic);
595
596 return mda == SET_APIC_DEST_FIELD(kvm_apic_id(apic));
597}
598
599static bool kvm_apic_match_logical_addr(struct kvm_lapic *apic, u32 mda)
600{
601 u32 logical_id;
602
603 if (kvm_apic_broadcast(apic, mda))
604 return true;
605
606 logical_id = kvm_lapic_get_reg(apic, APIC_LDR);
607
608 if (apic_x2apic_mode(apic))
609 return ((logical_id >> 16) == (mda >> 16))
610 && (logical_id & mda & 0xffff) != 0;
611
612 logical_id = GET_APIC_LOGICAL_ID(logical_id);
613 mda = GET_APIC_DEST_FIELD(mda);
614
615 switch (kvm_lapic_get_reg(apic, APIC_DFR)) {
616 case APIC_DFR_FLAT:
617 return (logical_id & mda) != 0;
618 case APIC_DFR_CLUSTER:
619 return ((logical_id >> 4) == (mda >> 4))
620 && (logical_id & mda & 0xf) != 0;
621 default:
622 apic_debug("Bad DFR vcpu %d: %08x\n",
623 apic->vcpu->vcpu_id, kvm_lapic_get_reg(apic, APIC_DFR));
624 return false;
625 }
626}
627
628/* The KVM local APIC implementation has two quirks:
629 *
630 * - the xAPIC MDA stores the destination at bits 24-31, while this
631 * is not true of struct kvm_lapic_irq's dest_id field. This is
632 * just a quirk in the API and is not problematic.
633 *
634 * - in-kernel IOAPIC messages have to be delivered directly to
635 * x2APIC, because the kernel does not support interrupt remapping.
636 * In order to support broadcast without interrupt remapping, x2APIC
637 * rewrites the destination of non-IPI messages from APIC_BROADCAST
638 * to X2APIC_BROADCAST.
639 *
640 * The broadcast quirk can be disabled with KVM_CAP_X2APIC_API. This is
641 * important when userspace wants to use x2APIC-format MSIs, because
642 * APIC_BROADCAST (0xff) is a legal route for "cluster 0, CPUs 0-7".
643 */
644static u32 kvm_apic_mda(struct kvm_vcpu *vcpu, unsigned int dest_id,
645 struct kvm_lapic *source, struct kvm_lapic *target)
646{
647 bool ipi = source != NULL;
648 bool x2apic_mda = apic_x2apic_mode(ipi ? source : target);
649
650 if (!vcpu->kvm->arch.x2apic_broadcast_quirk_disabled &&
651 !ipi && dest_id == APIC_BROADCAST && x2apic_mda)
652 return X2APIC_BROADCAST;
653
654 return x2apic_mda ? dest_id : SET_APIC_DEST_FIELD(dest_id);
655}
656
657bool kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source,
658 int short_hand, unsigned int dest, int dest_mode)
659{
660 struct kvm_lapic *target = vcpu->arch.apic;
661 u32 mda = kvm_apic_mda(vcpu, dest, source, target);
662
663 apic_debug("target %p, source %p, dest 0x%x, "
664 "dest_mode 0x%x, short_hand 0x%x\n",
665 target, source, dest, dest_mode, short_hand);
666
667 ASSERT(target);
668 switch (short_hand) {
669 case APIC_DEST_NOSHORT:
670 if (dest_mode == APIC_DEST_PHYSICAL)
671 return kvm_apic_match_physical_addr(target, mda);
672 else
673 return kvm_apic_match_logical_addr(target, mda);
674 case APIC_DEST_SELF:
675 return target == source;
676 case APIC_DEST_ALLINC:
677 return true;
678 case APIC_DEST_ALLBUT:
679 return target != source;
680 default:
681 apic_debug("kvm: apic: Bad dest shorthand value %x\n",
682 short_hand);
683 return false;
684 }
685}
686EXPORT_SYMBOL_GPL(kvm_apic_match_dest);
687
688int kvm_vector_to_index(u32 vector, u32 dest_vcpus,
689 const unsigned long *bitmap, u32 bitmap_size)
690{
691 u32 mod;
692 int i, idx = -1;
693
694 mod = vector % dest_vcpus;
695
696 for (i = 0; i <= mod; i++) {
697 idx = find_next_bit(bitmap, bitmap_size, idx + 1);
698 BUG_ON(idx == bitmap_size);
699 }
700
701 return idx;
702}
703
704static void kvm_apic_disabled_lapic_found(struct kvm *kvm)
705{
706 if (!kvm->arch.disabled_lapic_found) {
707 kvm->arch.disabled_lapic_found = true;
708 printk(KERN_INFO
709 "Disabled LAPIC found during irq injection\n");
710 }
711}
712
713static bool kvm_apic_is_broadcast_dest(struct kvm *kvm, struct kvm_lapic **src,
714 struct kvm_lapic_irq *irq, struct kvm_apic_map *map)
715{
716 if (kvm->arch.x2apic_broadcast_quirk_disabled) {
717 if ((irq->dest_id == APIC_BROADCAST &&
718 map->mode != KVM_APIC_MODE_X2APIC))
719 return true;
720 if (irq->dest_id == X2APIC_BROADCAST)
721 return true;
722 } else {
723 bool x2apic_ipi = src && *src && apic_x2apic_mode(*src);
724 if (irq->dest_id == (x2apic_ipi ?
725 X2APIC_BROADCAST : APIC_BROADCAST))
726 return true;
727 }
728
729 return false;
730}
731
732/* Return true if the interrupt can be handled by using *bitmap as index mask
733 * for valid destinations in *dst array.
734 * Return false if kvm_apic_map_get_dest_lapic did nothing useful.
735 * Note: we may have zero kvm_lapic destinations when we return true, which
736 * means that the interrupt should be dropped. In this case, *bitmap would be
737 * zero and *dst undefined.
738 */
739static inline bool kvm_apic_map_get_dest_lapic(struct kvm *kvm,
740 struct kvm_lapic **src, struct kvm_lapic_irq *irq,
741 struct kvm_apic_map *map, struct kvm_lapic ***dst,
742 unsigned long *bitmap)
743{
744 int i, lowest;
745
746 if (irq->shorthand == APIC_DEST_SELF && src) {
747 *dst = src;
748 *bitmap = 1;
749 return true;
750 } else if (irq->shorthand)
751 return false;
752
753 if (!map || kvm_apic_is_broadcast_dest(kvm, src, irq, map))
754 return false;
755
756 if (irq->dest_mode == APIC_DEST_PHYSICAL) {
757 if (irq->dest_id > map->max_apic_id) {
758 *bitmap = 0;
759 } else {
760 *dst = &map->phys_map[irq->dest_id];
761 *bitmap = 1;
762 }
763 return true;
764 }
765
766 *bitmap = 0;
767 if (!kvm_apic_map_get_logical_dest(map, irq->dest_id, dst,
768 (u16 *)bitmap))
769 return false;
770
771 if (!kvm_lowest_prio_delivery(irq))
772 return true;
773
774 if (!kvm_vector_hashing_enabled()) {
775 lowest = -1;
776 for_each_set_bit(i, bitmap, 16) {
777 if (!(*dst)[i])
778 continue;
779 if (lowest < 0)
780 lowest = i;
781 else if (kvm_apic_compare_prio((*dst)[i]->vcpu,
782 (*dst)[lowest]->vcpu) < 0)
783 lowest = i;
784 }
785 } else {
786 if (!*bitmap)
787 return true;
788
789 lowest = kvm_vector_to_index(irq->vector, hweight16(*bitmap),
790 bitmap, 16);
791
792 if (!(*dst)[lowest]) {
793 kvm_apic_disabled_lapic_found(kvm);
794 *bitmap = 0;
795 return true;
796 }
797 }
798
799 *bitmap = (lowest >= 0) ? 1 << lowest : 0;
800
801 return true;
802}
803
804bool kvm_irq_delivery_to_apic_fast(struct kvm *kvm, struct kvm_lapic *src,
805 struct kvm_lapic_irq *irq, int *r, struct dest_map *dest_map)
806{
807 struct kvm_apic_map *map;
808 unsigned long bitmap;
809 struct kvm_lapic **dst = NULL;
810 int i;
811 bool ret;
812
813 *r = -1;
814
815 if (irq->shorthand == APIC_DEST_SELF) {
816 *r = kvm_apic_set_irq(src->vcpu, irq, dest_map);
817 return true;
818 }
819
820 rcu_read_lock();
821 map = rcu_dereference(kvm->arch.apic_map);
822
823 ret = kvm_apic_map_get_dest_lapic(kvm, &src, irq, map, &dst, &bitmap);
824 if (ret)
825 for_each_set_bit(i, &bitmap, 16) {
826 if (!dst[i])
827 continue;
828 if (*r < 0)
829 *r = 0;
830 *r += kvm_apic_set_irq(dst[i]->vcpu, irq, dest_map);
831 }
832
833 rcu_read_unlock();
834 return ret;
835}
836
837/*
838 * This routine tries to handler interrupts in posted mode, here is how
839 * it deals with different cases:
840 * - For single-destination interrupts, handle it in posted mode
841 * - Else if vector hashing is enabled and it is a lowest-priority
842 * interrupt, handle it in posted mode and use the following mechanism
843 * to find the destinaiton vCPU.
844 * 1. For lowest-priority interrupts, store all the possible
845 * destination vCPUs in an array.
846 * 2. Use "guest vector % max number of destination vCPUs" to find
847 * the right destination vCPU in the array for the lowest-priority
848 * interrupt.
849 * - Otherwise, use remapped mode to inject the interrupt.
850 */
851bool kvm_intr_is_single_vcpu_fast(struct kvm *kvm, struct kvm_lapic_irq *irq,
852 struct kvm_vcpu **dest_vcpu)
853{
854 struct kvm_apic_map *map;
855 unsigned long bitmap;
856 struct kvm_lapic **dst = NULL;
857 bool ret = false;
858
859 if (irq->shorthand)
860 return false;
861
862 rcu_read_lock();
863 map = rcu_dereference(kvm->arch.apic_map);
864
865 if (kvm_apic_map_get_dest_lapic(kvm, NULL, irq, map, &dst, &bitmap) &&
866 hweight16(bitmap) == 1) {
867 unsigned long i = find_first_bit(&bitmap, 16);
868
869 if (dst[i]) {
870 *dest_vcpu = dst[i]->vcpu;
871 ret = true;
872 }
873 }
874
875 rcu_read_unlock();
876 return ret;
877}
878
879/*
880 * Add a pending IRQ into lapic.
881 * Return 1 if successfully added and 0 if discarded.
882 */
883static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
884 int vector, int level, int trig_mode,
885 struct dest_map *dest_map)
886{
887 int result = 0;
888 struct kvm_vcpu *vcpu = apic->vcpu;
889
890 trace_kvm_apic_accept_irq(vcpu->vcpu_id, delivery_mode,
891 trig_mode, vector);
892 switch (delivery_mode) {
893 case APIC_DM_LOWEST:
894 vcpu->arch.apic_arb_prio++;
895 case APIC_DM_FIXED:
896 if (unlikely(trig_mode && !level))
897 break;
898
899 /* FIXME add logic for vcpu on reset */
900 if (unlikely(!apic_enabled(apic)))
901 break;
902
903 result = 1;
904
905 if (dest_map) {
906 __set_bit(vcpu->vcpu_id, dest_map->map);
907 dest_map->vectors[vcpu->vcpu_id] = vector;
908 }
909
910 if (apic_test_vector(vector, apic->regs + APIC_TMR) != !!trig_mode) {
911 if (trig_mode)
912 kvm_lapic_set_vector(vector, apic->regs + APIC_TMR);
913 else
914 apic_clear_vector(vector, apic->regs + APIC_TMR);
915 }
916
917 if (vcpu->arch.apicv_active)
918 kvm_x86_ops->deliver_posted_interrupt(vcpu, vector);
919 else {
920 kvm_lapic_set_irr(vector, apic);
921
922 kvm_make_request(KVM_REQ_EVENT, vcpu);
923 kvm_vcpu_kick(vcpu);
924 }
925 break;
926
927 case APIC_DM_REMRD:
928 result = 1;
929 vcpu->arch.pv.pv_unhalted = 1;
930 kvm_make_request(KVM_REQ_EVENT, vcpu);
931 kvm_vcpu_kick(vcpu);
932 break;
933
934 case APIC_DM_SMI:
935 result = 1;
936 kvm_make_request(KVM_REQ_SMI, vcpu);
937 kvm_vcpu_kick(vcpu);
938 break;
939
940 case APIC_DM_NMI:
941 result = 1;
942 kvm_inject_nmi(vcpu);
943 kvm_vcpu_kick(vcpu);
944 break;
945
946 case APIC_DM_INIT:
947 if (!trig_mode || level) {
948 result = 1;
949 /* assumes that there are only KVM_APIC_INIT/SIPI */
950 apic->pending_events = (1UL << KVM_APIC_INIT);
951 /* make sure pending_events is visible before sending
952 * the request */
953 smp_wmb();
954 kvm_make_request(KVM_REQ_EVENT, vcpu);
955 kvm_vcpu_kick(vcpu);
956 } else {
957 apic_debug("Ignoring de-assert INIT to vcpu %d\n",
958 vcpu->vcpu_id);
959 }
960 break;
961
962 case APIC_DM_STARTUP:
963 apic_debug("SIPI to vcpu %d vector 0x%02x\n",
964 vcpu->vcpu_id, vector);
965 result = 1;
966 apic->sipi_vector = vector;
967 /* make sure sipi_vector is visible for the receiver */
968 smp_wmb();
969 set_bit(KVM_APIC_SIPI, &apic->pending_events);
970 kvm_make_request(KVM_REQ_EVENT, vcpu);
971 kvm_vcpu_kick(vcpu);
972 break;
973
974 case APIC_DM_EXTINT:
975 /*
976 * Should only be called by kvm_apic_local_deliver() with LVT0,
977 * before NMI watchdog was enabled. Already handled by
978 * kvm_apic_accept_pic_intr().
979 */
980 break;
981
982 default:
983 printk(KERN_ERR "TODO: unsupported delivery mode %x\n",
984 delivery_mode);
985 break;
986 }
987 return result;
988}
989
990int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2)
991{
992 return vcpu1->arch.apic_arb_prio - vcpu2->arch.apic_arb_prio;
993}
994
995static bool kvm_ioapic_handles_vector(struct kvm_lapic *apic, int vector)
996{
997 return test_bit(vector, apic->vcpu->arch.ioapic_handled_vectors);
998}
999
1000static void kvm_ioapic_send_eoi(struct kvm_lapic *apic, int vector)
1001{
1002 int trigger_mode;
1003
1004 /* Eoi the ioapic only if the ioapic doesn't own the vector. */
1005 if (!kvm_ioapic_handles_vector(apic, vector))
1006 return;
1007
1008 /* Request a KVM exit to inform the userspace IOAPIC. */
1009 if (irqchip_split(apic->vcpu->kvm)) {
1010 apic->vcpu->arch.pending_ioapic_eoi = vector;
1011 kvm_make_request(KVM_REQ_IOAPIC_EOI_EXIT, apic->vcpu);
1012 return;
1013 }
1014
1015 if (apic_test_vector(vector, apic->regs + APIC_TMR))
1016 trigger_mode = IOAPIC_LEVEL_TRIG;
1017 else
1018 trigger_mode = IOAPIC_EDGE_TRIG;
1019
1020 kvm_ioapic_update_eoi(apic->vcpu, vector, trigger_mode);
1021}
1022
1023static int apic_set_eoi(struct kvm_lapic *apic)
1024{
1025 int vector = apic_find_highest_isr(apic);
1026
1027 trace_kvm_eoi(apic, vector);
1028
1029 /*
1030 * Not every write EOI will has corresponding ISR,
1031 * one example is when Kernel check timer on setup_IO_APIC
1032 */
1033 if (vector == -1)
1034 return vector;
1035
1036 apic_clear_isr(vector, apic);
1037 apic_update_ppr(apic);
1038
1039 if (test_bit(vector, vcpu_to_synic(apic->vcpu)->vec_bitmap))
1040 kvm_hv_synic_send_eoi(apic->vcpu, vector);
1041
1042 kvm_ioapic_send_eoi(apic, vector);
1043 kvm_make_request(KVM_REQ_EVENT, apic->vcpu);
1044 return vector;
1045}
1046
1047/*
1048 * this interface assumes a trap-like exit, which has already finished
1049 * desired side effect including vISR and vPPR update.
1050 */
1051void kvm_apic_set_eoi_accelerated(struct kvm_vcpu *vcpu, int vector)
1052{
1053 struct kvm_lapic *apic = vcpu->arch.apic;
1054
1055 trace_kvm_eoi(apic, vector);
1056
1057 kvm_ioapic_send_eoi(apic, vector);
1058 kvm_make_request(KVM_REQ_EVENT, apic->vcpu);
1059}
1060EXPORT_SYMBOL_GPL(kvm_apic_set_eoi_accelerated);
1061
1062static void apic_send_ipi(struct kvm_lapic *apic)
1063{
1064 u32 icr_low = kvm_lapic_get_reg(apic, APIC_ICR);
1065 u32 icr_high = kvm_lapic_get_reg(apic, APIC_ICR2);
1066 struct kvm_lapic_irq irq;
1067
1068 irq.vector = icr_low & APIC_VECTOR_MASK;
1069 irq.delivery_mode = icr_low & APIC_MODE_MASK;
1070 irq.dest_mode = icr_low & APIC_DEST_MASK;
1071 irq.level = (icr_low & APIC_INT_ASSERT) != 0;
1072 irq.trig_mode = icr_low & APIC_INT_LEVELTRIG;
1073 irq.shorthand = icr_low & APIC_SHORT_MASK;
1074 irq.msi_redir_hint = false;
1075 if (apic_x2apic_mode(apic))
1076 irq.dest_id = icr_high;
1077 else
1078 irq.dest_id = GET_APIC_DEST_FIELD(icr_high);
1079
1080 trace_kvm_apic_ipi(icr_low, irq.dest_id);
1081
1082 apic_debug("icr_high 0x%x, icr_low 0x%x, "
1083 "short_hand 0x%x, dest 0x%x, trig_mode 0x%x, level 0x%x, "
1084 "dest_mode 0x%x, delivery_mode 0x%x, vector 0x%x, "
1085 "msi_redir_hint 0x%x\n",
1086 icr_high, icr_low, irq.shorthand, irq.dest_id,
1087 irq.trig_mode, irq.level, irq.dest_mode, irq.delivery_mode,
1088 irq.vector, irq.msi_redir_hint);
1089
1090 kvm_irq_delivery_to_apic(apic->vcpu->kvm, apic, &irq, NULL);
1091}
1092
1093static u32 apic_get_tmcct(struct kvm_lapic *apic)
1094{
1095 ktime_t remaining, now;
1096 s64 ns;
1097 u32 tmcct;
1098
1099 ASSERT(apic != NULL);
1100
1101 /* if initial count is 0, current count should also be 0 */
1102 if (kvm_lapic_get_reg(apic, APIC_TMICT) == 0 ||
1103 apic->lapic_timer.period == 0)
1104 return 0;
1105
1106 now = ktime_get();
1107 remaining = ktime_sub(apic->lapic_timer.target_expiration, now);
1108 if (ktime_to_ns(remaining) < 0)
1109 remaining = 0;
1110
1111 ns = mod_64(ktime_to_ns(remaining), apic->lapic_timer.period);
1112 tmcct = div64_u64(ns,
1113 (APIC_BUS_CYCLE_NS * apic->divide_count));
1114
1115 return tmcct;
1116}
1117
1118static void __report_tpr_access(struct kvm_lapic *apic, bool write)
1119{
1120 struct kvm_vcpu *vcpu = apic->vcpu;
1121 struct kvm_run *run = vcpu->run;
1122
1123 kvm_make_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu);
1124 run->tpr_access.rip = kvm_rip_read(vcpu);
1125 run->tpr_access.is_write = write;
1126}
1127
1128static inline void report_tpr_access(struct kvm_lapic *apic, bool write)
1129{
1130 if (apic->vcpu->arch.tpr_access_reporting)
1131 __report_tpr_access(apic, write);
1132}
1133
1134static u32 __apic_read(struct kvm_lapic *apic, unsigned int offset)
1135{
1136 u32 val = 0;
1137
1138 if (offset >= LAPIC_MMIO_LENGTH)
1139 return 0;
1140
1141 switch (offset) {
1142 case APIC_ARBPRI:
1143 apic_debug("Access APIC ARBPRI register which is for P6\n");
1144 break;
1145
1146 case APIC_TMCCT: /* Timer CCR */
1147 if (apic_lvtt_tscdeadline(apic))
1148 return 0;
1149
1150 val = apic_get_tmcct(apic);
1151 break;
1152 case APIC_PROCPRI:
1153 apic_update_ppr(apic);
1154 val = kvm_lapic_get_reg(apic, offset);
1155 break;
1156 case APIC_TASKPRI:
1157 report_tpr_access(apic, false);
1158 /* fall thru */
1159 default:
1160 val = kvm_lapic_get_reg(apic, offset);
1161 break;
1162 }
1163
1164 return val;
1165}
1166
1167static inline struct kvm_lapic *to_lapic(struct kvm_io_device *dev)
1168{
1169 return container_of(dev, struct kvm_lapic, dev);
1170}
1171
1172int kvm_lapic_reg_read(struct kvm_lapic *apic, u32 offset, int len,
1173 void *data)
1174{
1175 unsigned char alignment = offset & 0xf;
1176 u32 result;
1177 /* this bitmask has a bit cleared for each reserved register */
1178 static const u64 rmask = 0x43ff01ffffffe70cULL;
1179
1180 if ((alignment + len) > 4) {
1181 apic_debug("KVM_APIC_READ: alignment error %x %d\n",
1182 offset, len);
1183 return 1;
1184 }
1185
1186 if (offset > 0x3f0 || !(rmask & (1ULL << (offset >> 4)))) {
1187 apic_debug("KVM_APIC_READ: read reserved register %x\n",
1188 offset);
1189 return 1;
1190 }
1191
1192 result = __apic_read(apic, offset & ~0xf);
1193
1194 trace_kvm_apic_read(offset, result);
1195
1196 switch (len) {
1197 case 1:
1198 case 2:
1199 case 4:
1200 memcpy(data, (char *)&result + alignment, len);
1201 break;
1202 default:
1203 printk(KERN_ERR "Local APIC read with len = %x, "
1204 "should be 1,2, or 4 instead\n", len);
1205 break;
1206 }
1207 return 0;
1208}
1209EXPORT_SYMBOL_GPL(kvm_lapic_reg_read);
1210
1211static int apic_mmio_in_range(struct kvm_lapic *apic, gpa_t addr)
1212{
1213 return kvm_apic_hw_enabled(apic) &&
1214 addr >= apic->base_address &&
1215 addr < apic->base_address + LAPIC_MMIO_LENGTH;
1216}
1217
1218static int apic_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *this,
1219 gpa_t address, int len, void *data)
1220{
1221 struct kvm_lapic *apic = to_lapic(this);
1222 u32 offset = address - apic->base_address;
1223
1224 if (!apic_mmio_in_range(apic, address))
1225 return -EOPNOTSUPP;
1226
1227 kvm_lapic_reg_read(apic, offset, len, data);
1228
1229 return 0;
1230}
1231
1232static void update_divide_count(struct kvm_lapic *apic)
1233{
1234 u32 tmp1, tmp2, tdcr;
1235
1236 tdcr = kvm_lapic_get_reg(apic, APIC_TDCR);
1237 tmp1 = tdcr & 0xf;
1238 tmp2 = ((tmp1 & 0x3) | ((tmp1 & 0x8) >> 1)) + 1;
1239 apic->divide_count = 0x1 << (tmp2 & 0x7);
1240
1241 apic_debug("timer divide count is 0x%x\n",
1242 apic->divide_count);
1243}
1244
1245static void apic_update_lvtt(struct kvm_lapic *apic)
1246{
1247 u32 timer_mode = kvm_lapic_get_reg(apic, APIC_LVTT) &
1248 apic->lapic_timer.timer_mode_mask;
1249
1250 if (apic->lapic_timer.timer_mode != timer_mode) {
1251 apic->lapic_timer.timer_mode = timer_mode;
1252 hrtimer_cancel(&apic->lapic_timer.timer);
1253 }
1254}
1255
1256static void apic_timer_expired(struct kvm_lapic *apic)
1257{
1258 struct kvm_vcpu *vcpu = apic->vcpu;
1259 struct swait_queue_head *q = &vcpu->wq;
1260 struct kvm_timer *ktimer = &apic->lapic_timer;
1261
1262 if (atomic_read(&apic->lapic_timer.pending))
1263 return;
1264
1265 atomic_inc(&apic->lapic_timer.pending);
1266 kvm_set_pending_timer(vcpu);
1267
1268 if (swait_active(q))
1269 swake_up(q);
1270
1271 if (apic_lvtt_tscdeadline(apic))
1272 ktimer->expired_tscdeadline = ktimer->tscdeadline;
1273}
1274
1275/*
1276 * On APICv, this test will cause a busy wait
1277 * during a higher-priority task.
1278 */
1279
1280static bool lapic_timer_int_injected(struct kvm_vcpu *vcpu)
1281{
1282 struct kvm_lapic *apic = vcpu->arch.apic;
1283 u32 reg = kvm_lapic_get_reg(apic, APIC_LVTT);
1284
1285 if (kvm_apic_hw_enabled(apic)) {
1286 int vec = reg & APIC_VECTOR_MASK;
1287 void *bitmap = apic->regs + APIC_ISR;
1288
1289 if (vcpu->arch.apicv_active)
1290 bitmap = apic->regs + APIC_IRR;
1291
1292 if (apic_test_vector(vec, bitmap))
1293 return true;
1294 }
1295 return false;
1296}
1297
1298void wait_lapic_expire(struct kvm_vcpu *vcpu)
1299{
1300 struct kvm_lapic *apic = vcpu->arch.apic;
1301 u64 guest_tsc, tsc_deadline;
1302
1303 if (!lapic_in_kernel(vcpu))
1304 return;
1305
1306 if (apic->lapic_timer.expired_tscdeadline == 0)
1307 return;
1308
1309 if (!lapic_timer_int_injected(vcpu))
1310 return;
1311
1312 tsc_deadline = apic->lapic_timer.expired_tscdeadline;
1313 apic->lapic_timer.expired_tscdeadline = 0;
1314 guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc());
1315 trace_kvm_wait_lapic_expire(vcpu->vcpu_id, guest_tsc - tsc_deadline);
1316
1317 /* __delay is delay_tsc whenever the hardware has TSC, thus always. */
1318 if (guest_tsc < tsc_deadline)
1319 __delay(min(tsc_deadline - guest_tsc,
1320 nsec_to_cycles(vcpu, lapic_timer_advance_ns)));
1321}
1322
1323static void start_sw_tscdeadline(struct kvm_lapic *apic)
1324{
1325 u64 guest_tsc, tscdeadline = apic->lapic_timer.tscdeadline;
1326 u64 ns = 0;
1327 ktime_t expire;
1328 struct kvm_vcpu *vcpu = apic->vcpu;
1329 unsigned long this_tsc_khz = vcpu->arch.virtual_tsc_khz;
1330 unsigned long flags;
1331 ktime_t now;
1332
1333 if (unlikely(!tscdeadline || !this_tsc_khz))
1334 return;
1335
1336 local_irq_save(flags);
1337
1338 now = ktime_get();
1339 guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc());
1340 if (likely(tscdeadline > guest_tsc)) {
1341 ns = (tscdeadline - guest_tsc) * 1000000ULL;
1342 do_div(ns, this_tsc_khz);
1343 expire = ktime_add_ns(now, ns);
1344 expire = ktime_sub_ns(expire, lapic_timer_advance_ns);
1345 hrtimer_start(&apic->lapic_timer.timer,
1346 expire, HRTIMER_MODE_ABS_PINNED);
1347 } else
1348 apic_timer_expired(apic);
1349
1350 local_irq_restore(flags);
1351}
1352
1353static void start_sw_period(struct kvm_lapic *apic)
1354{
1355 if (!apic->lapic_timer.period)
1356 return;
1357
1358 if (apic_lvtt_oneshot(apic) &&
1359 ktime_after(ktime_get(),
1360 apic->lapic_timer.target_expiration)) {
1361 apic_timer_expired(apic);
1362 return;
1363 }
1364
1365 hrtimer_start(&apic->lapic_timer.timer,
1366 apic->lapic_timer.target_expiration,
1367 HRTIMER_MODE_ABS_PINNED);
1368}
1369
1370static bool set_target_expiration(struct kvm_lapic *apic)
1371{
1372 ktime_t now;
1373 u64 tscl = rdtsc();
1374
1375 now = ktime_get();
1376 apic->lapic_timer.period = (u64)kvm_lapic_get_reg(apic, APIC_TMICT)
1377 * APIC_BUS_CYCLE_NS * apic->divide_count;
1378
1379 if (!apic->lapic_timer.period)
1380 return false;
1381
1382 /*
1383 * Do not allow the guest to program periodic timers with small
1384 * interval, since the hrtimers are not throttled by the host
1385 * scheduler.
1386 */
1387 if (apic_lvtt_period(apic)) {
1388 s64 min_period = min_timer_period_us * 1000LL;
1389
1390 if (apic->lapic_timer.period < min_period) {
1391 pr_info_ratelimited(
1392 "kvm: vcpu %i: requested %lld ns "
1393 "lapic timer period limited to %lld ns\n",
1394 apic->vcpu->vcpu_id,
1395 apic->lapic_timer.period, min_period);
1396 apic->lapic_timer.period = min_period;
1397 }
1398 }
1399
1400 apic_debug("%s: bus cycle is %" PRId64 "ns, now 0x%016"
1401 PRIx64 ", "
1402 "timer initial count 0x%x, period %lldns, "
1403 "expire @ 0x%016" PRIx64 ".\n", __func__,
1404 APIC_BUS_CYCLE_NS, ktime_to_ns(now),
1405 kvm_lapic_get_reg(apic, APIC_TMICT),
1406 apic->lapic_timer.period,
1407 ktime_to_ns(ktime_add_ns(now,
1408 apic->lapic_timer.period)));
1409
1410 apic->lapic_timer.tscdeadline = kvm_read_l1_tsc(apic->vcpu, tscl) +
1411 nsec_to_cycles(apic->vcpu, apic->lapic_timer.period);
1412 apic->lapic_timer.target_expiration = ktime_add_ns(now, apic->lapic_timer.period);
1413
1414 return true;
1415}
1416
1417static void advance_periodic_target_expiration(struct kvm_lapic *apic)
1418{
1419 apic->lapic_timer.tscdeadline +=
1420 nsec_to_cycles(apic->vcpu, apic->lapic_timer.period);
1421 apic->lapic_timer.target_expiration =
1422 ktime_add_ns(apic->lapic_timer.target_expiration,
1423 apic->lapic_timer.period);
1424}
1425
1426bool kvm_lapic_hv_timer_in_use(struct kvm_vcpu *vcpu)
1427{
1428 if (!lapic_in_kernel(vcpu))
1429 return false;
1430
1431 return vcpu->arch.apic->lapic_timer.hv_timer_in_use;
1432}
1433EXPORT_SYMBOL_GPL(kvm_lapic_hv_timer_in_use);
1434
1435static void cancel_hv_timer(struct kvm_lapic *apic)
1436{
1437 kvm_x86_ops->cancel_hv_timer(apic->vcpu);
1438 apic->lapic_timer.hv_timer_in_use = false;
1439}
1440
1441static bool start_hv_timer(struct kvm_lapic *apic)
1442{
1443 u64 tscdeadline = apic->lapic_timer.tscdeadline;
1444
1445 if ((atomic_read(&apic->lapic_timer.pending) &&
1446 !apic_lvtt_period(apic)) ||
1447 kvm_x86_ops->set_hv_timer(apic->vcpu, tscdeadline)) {
1448 if (apic->lapic_timer.hv_timer_in_use)
1449 cancel_hv_timer(apic);
1450 } else {
1451 apic->lapic_timer.hv_timer_in_use = true;
1452 hrtimer_cancel(&apic->lapic_timer.timer);
1453
1454 /* In case the sw timer triggered in the window */
1455 if (atomic_read(&apic->lapic_timer.pending) &&
1456 !apic_lvtt_period(apic))
1457 cancel_hv_timer(apic);
1458 }
1459 trace_kvm_hv_timer_state(apic->vcpu->vcpu_id,
1460 apic->lapic_timer.hv_timer_in_use);
1461 return apic->lapic_timer.hv_timer_in_use;
1462}
1463
1464void kvm_lapic_expired_hv_timer(struct kvm_vcpu *vcpu)
1465{
1466 struct kvm_lapic *apic = vcpu->arch.apic;
1467
1468 WARN_ON(!apic->lapic_timer.hv_timer_in_use);
1469 WARN_ON(swait_active(&vcpu->wq));
1470 cancel_hv_timer(apic);
1471 apic_timer_expired(apic);
1472
1473 if (apic_lvtt_period(apic) && apic->lapic_timer.period) {
1474 advance_periodic_target_expiration(apic);
1475 if (!start_hv_timer(apic))
1476 start_sw_period(apic);
1477 }
1478}
1479EXPORT_SYMBOL_GPL(kvm_lapic_expired_hv_timer);
1480
1481void kvm_lapic_switch_to_hv_timer(struct kvm_vcpu *vcpu)
1482{
1483 struct kvm_lapic *apic = vcpu->arch.apic;
1484
1485 WARN_ON(apic->lapic_timer.hv_timer_in_use);
1486
1487 start_hv_timer(apic);
1488}
1489EXPORT_SYMBOL_GPL(kvm_lapic_switch_to_hv_timer);
1490
1491void kvm_lapic_switch_to_sw_timer(struct kvm_vcpu *vcpu)
1492{
1493 struct kvm_lapic *apic = vcpu->arch.apic;
1494
1495 /* Possibly the TSC deadline timer is not enabled yet */
1496 if (!apic->lapic_timer.hv_timer_in_use)
1497 return;
1498
1499 cancel_hv_timer(apic);
1500
1501 if (atomic_read(&apic->lapic_timer.pending))
1502 return;
1503
1504 if (apic_lvtt_period(apic) || apic_lvtt_oneshot(apic))
1505 start_sw_period(apic);
1506 else if (apic_lvtt_tscdeadline(apic))
1507 start_sw_tscdeadline(apic);
1508}
1509EXPORT_SYMBOL_GPL(kvm_lapic_switch_to_sw_timer);
1510
1511static void start_apic_timer(struct kvm_lapic *apic)
1512{
1513 atomic_set(&apic->lapic_timer.pending, 0);
1514
1515 if (apic_lvtt_period(apic) || apic_lvtt_oneshot(apic)) {
1516 if (set_target_expiration(apic) &&
1517 !(kvm_x86_ops->set_hv_timer && start_hv_timer(apic)))
1518 start_sw_period(apic);
1519 } else if (apic_lvtt_tscdeadline(apic)) {
1520 if (!(kvm_x86_ops->set_hv_timer && start_hv_timer(apic)))
1521 start_sw_tscdeadline(apic);
1522 }
1523}
1524
1525static void apic_manage_nmi_watchdog(struct kvm_lapic *apic, u32 lvt0_val)
1526{
1527 bool lvt0_in_nmi_mode = apic_lvt_nmi_mode(lvt0_val);
1528
1529 if (apic->lvt0_in_nmi_mode != lvt0_in_nmi_mode) {
1530 apic->lvt0_in_nmi_mode = lvt0_in_nmi_mode;
1531 if (lvt0_in_nmi_mode) {
1532 apic_debug("Receive NMI setting on APIC_LVT0 "
1533 "for cpu %d\n", apic->vcpu->vcpu_id);
1534 atomic_inc(&apic->vcpu->kvm->arch.vapics_in_nmi_mode);
1535 } else
1536 atomic_dec(&apic->vcpu->kvm->arch.vapics_in_nmi_mode);
1537 }
1538}
1539
1540int kvm_lapic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val)
1541{
1542 int ret = 0;
1543
1544 trace_kvm_apic_write(reg, val);
1545
1546 switch (reg) {
1547 case APIC_ID: /* Local APIC ID */
1548 if (!apic_x2apic_mode(apic))
1549 kvm_apic_set_xapic_id(apic, val >> 24);
1550 else
1551 ret = 1;
1552 break;
1553
1554 case APIC_TASKPRI:
1555 report_tpr_access(apic, true);
1556 apic_set_tpr(apic, val & 0xff);
1557 break;
1558
1559 case APIC_EOI:
1560 apic_set_eoi(apic);
1561 break;
1562
1563 case APIC_LDR:
1564 if (!apic_x2apic_mode(apic))
1565 kvm_apic_set_ldr(apic, val & APIC_LDR_MASK);
1566 else
1567 ret = 1;
1568 break;
1569
1570 case APIC_DFR:
1571 if (!apic_x2apic_mode(apic)) {
1572 kvm_lapic_set_reg(apic, APIC_DFR, val | 0x0FFFFFFF);
1573 recalculate_apic_map(apic->vcpu->kvm);
1574 } else
1575 ret = 1;
1576 break;
1577
1578 case APIC_SPIV: {
1579 u32 mask = 0x3ff;
1580 if (kvm_lapic_get_reg(apic, APIC_LVR) & APIC_LVR_DIRECTED_EOI)
1581 mask |= APIC_SPIV_DIRECTED_EOI;
1582 apic_set_spiv(apic, val & mask);
1583 if (!(val & APIC_SPIV_APIC_ENABLED)) {
1584 int i;
1585 u32 lvt_val;
1586
1587 for (i = 0; i < KVM_APIC_LVT_NUM; i++) {
1588 lvt_val = kvm_lapic_get_reg(apic,
1589 APIC_LVTT + 0x10 * i);
1590 kvm_lapic_set_reg(apic, APIC_LVTT + 0x10 * i,
1591 lvt_val | APIC_LVT_MASKED);
1592 }
1593 apic_update_lvtt(apic);
1594 atomic_set(&apic->lapic_timer.pending, 0);
1595
1596 }
1597 break;
1598 }
1599 case APIC_ICR:
1600 /* No delay here, so we always clear the pending bit */
1601 kvm_lapic_set_reg(apic, APIC_ICR, val & ~(1 << 12));
1602 apic_send_ipi(apic);
1603 break;
1604
1605 case APIC_ICR2:
1606 if (!apic_x2apic_mode(apic))
1607 val &= 0xff000000;
1608 kvm_lapic_set_reg(apic, APIC_ICR2, val);
1609 break;
1610
1611 case APIC_LVT0:
1612 apic_manage_nmi_watchdog(apic, val);
1613 case APIC_LVTTHMR:
1614 case APIC_LVTPC:
1615 case APIC_LVT1:
1616 case APIC_LVTERR:
1617 /* TODO: Check vector */
1618 if (!kvm_apic_sw_enabled(apic))
1619 val |= APIC_LVT_MASKED;
1620
1621 val &= apic_lvt_mask[(reg - APIC_LVTT) >> 4];
1622 kvm_lapic_set_reg(apic, reg, val);
1623
1624 break;
1625
1626 case APIC_LVTT:
1627 if (!kvm_apic_sw_enabled(apic))
1628 val |= APIC_LVT_MASKED;
1629 val &= (apic_lvt_mask[0] | apic->lapic_timer.timer_mode_mask);
1630 kvm_lapic_set_reg(apic, APIC_LVTT, val);
1631 apic_update_lvtt(apic);
1632 break;
1633
1634 case APIC_TMICT:
1635 if (apic_lvtt_tscdeadline(apic))
1636 break;
1637
1638 hrtimer_cancel(&apic->lapic_timer.timer);
1639 kvm_lapic_set_reg(apic, APIC_TMICT, val);
1640 start_apic_timer(apic);
1641 break;
1642
1643 case APIC_TDCR:
1644 if (val & 4)
1645 apic_debug("KVM_WRITE:TDCR %x\n", val);
1646 kvm_lapic_set_reg(apic, APIC_TDCR, val);
1647 update_divide_count(apic);
1648 break;
1649
1650 case APIC_ESR:
1651 if (apic_x2apic_mode(apic) && val != 0) {
1652 apic_debug("KVM_WRITE:ESR not zero %x\n", val);
1653 ret = 1;
1654 }
1655 break;
1656
1657 case APIC_SELF_IPI:
1658 if (apic_x2apic_mode(apic)) {
1659 kvm_lapic_reg_write(apic, APIC_ICR, 0x40000 | (val & 0xff));
1660 } else
1661 ret = 1;
1662 break;
1663 default:
1664 ret = 1;
1665 break;
1666 }
1667 if (ret)
1668 apic_debug("Local APIC Write to read-only register %x\n", reg);
1669 return ret;
1670}
1671EXPORT_SYMBOL_GPL(kvm_lapic_reg_write);
1672
1673static int apic_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *this,
1674 gpa_t address, int len, const void *data)
1675{
1676 struct kvm_lapic *apic = to_lapic(this);
1677 unsigned int offset = address - apic->base_address;
1678 u32 val;
1679
1680 if (!apic_mmio_in_range(apic, address))
1681 return -EOPNOTSUPP;
1682
1683 /*
1684 * APIC register must be aligned on 128-bits boundary.
1685 * 32/64/128 bits registers must be accessed thru 32 bits.
1686 * Refer SDM 8.4.1
1687 */
1688 if (len != 4 || (offset & 0xf)) {
1689 /* Don't shout loud, $infamous_os would cause only noise. */
1690 apic_debug("apic write: bad size=%d %lx\n", len, (long)address);
1691 return 0;
1692 }
1693
1694 val = *(u32*)data;
1695
1696 /* too common printing */
1697 if (offset != APIC_EOI)
1698 apic_debug("%s: offset 0x%x with length 0x%x, and value is "
1699 "0x%x\n", __func__, offset, len, val);
1700
1701 kvm_lapic_reg_write(apic, offset & 0xff0, val);
1702
1703 return 0;
1704}
1705
1706void kvm_lapic_set_eoi(struct kvm_vcpu *vcpu)
1707{
1708 kvm_lapic_reg_write(vcpu->arch.apic, APIC_EOI, 0);
1709}
1710EXPORT_SYMBOL_GPL(kvm_lapic_set_eoi);
1711
1712/* emulate APIC access in a trap manner */
1713void kvm_apic_write_nodecode(struct kvm_vcpu *vcpu, u32 offset)
1714{
1715 u32 val = 0;
1716
1717 /* hw has done the conditional check and inst decode */
1718 offset &= 0xff0;
1719
1720 kvm_lapic_reg_read(vcpu->arch.apic, offset, 4, &val);
1721
1722 /* TODO: optimize to just emulate side effect w/o one more write */
1723 kvm_lapic_reg_write(vcpu->arch.apic, offset, val);
1724}
1725EXPORT_SYMBOL_GPL(kvm_apic_write_nodecode);
1726
1727void kvm_free_lapic(struct kvm_vcpu *vcpu)
1728{
1729 struct kvm_lapic *apic = vcpu->arch.apic;
1730
1731 if (!vcpu->arch.apic)
1732 return;
1733
1734 hrtimer_cancel(&apic->lapic_timer.timer);
1735
1736 if (!(vcpu->arch.apic_base & MSR_IA32_APICBASE_ENABLE))
1737 static_key_slow_dec_deferred(&apic_hw_disabled);
1738
1739 if (!apic->sw_enabled)
1740 static_key_slow_dec_deferred(&apic_sw_disabled);
1741
1742 if (apic->regs)
1743 free_page((unsigned long)apic->regs);
1744
1745 kfree(apic);
1746}
1747
1748/*
1749 *----------------------------------------------------------------------
1750 * LAPIC interface
1751 *----------------------------------------------------------------------
1752 */
1753u64 kvm_get_lapic_target_expiration_tsc(struct kvm_vcpu *vcpu)
1754{
1755 struct kvm_lapic *apic = vcpu->arch.apic;
1756
1757 if (!lapic_in_kernel(vcpu))
1758 return 0;
1759
1760 return apic->lapic_timer.tscdeadline;
1761}
1762
1763u64 kvm_get_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu)
1764{
1765 struct kvm_lapic *apic = vcpu->arch.apic;
1766
1767 if (!lapic_in_kernel(vcpu) ||
1768 !apic_lvtt_tscdeadline(apic))
1769 return 0;
1770
1771 return apic->lapic_timer.tscdeadline;
1772}
1773
1774void kvm_set_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu, u64 data)
1775{
1776 struct kvm_lapic *apic = vcpu->arch.apic;
1777
1778 if (!lapic_in_kernel(vcpu) || apic_lvtt_oneshot(apic) ||
1779 apic_lvtt_period(apic))
1780 return;
1781
1782 hrtimer_cancel(&apic->lapic_timer.timer);
1783 apic->lapic_timer.tscdeadline = data;
1784 start_apic_timer(apic);
1785}
1786
1787void kvm_lapic_set_tpr(struct kvm_vcpu *vcpu, unsigned long cr8)
1788{
1789 struct kvm_lapic *apic = vcpu->arch.apic;
1790
1791 apic_set_tpr(apic, ((cr8 & 0x0f) << 4)
1792 | (kvm_lapic_get_reg(apic, APIC_TASKPRI) & 4));
1793}
1794
1795u64 kvm_lapic_get_cr8(struct kvm_vcpu *vcpu)
1796{
1797 u64 tpr;
1798
1799 tpr = (u64) kvm_lapic_get_reg(vcpu->arch.apic, APIC_TASKPRI);
1800
1801 return (tpr & 0xf0) >> 4;
1802}
1803
1804void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value)
1805{
1806 u64 old_value = vcpu->arch.apic_base;
1807 struct kvm_lapic *apic = vcpu->arch.apic;
1808
1809 if (!apic)
1810 value |= MSR_IA32_APICBASE_BSP;
1811
1812 vcpu->arch.apic_base = value;
1813
1814 if ((old_value ^ value) & MSR_IA32_APICBASE_ENABLE)
1815 kvm_update_cpuid(vcpu);
1816
1817 if (!apic)
1818 return;
1819
1820 /* update jump label if enable bit changes */
1821 if ((old_value ^ value) & MSR_IA32_APICBASE_ENABLE) {
1822 if (value & MSR_IA32_APICBASE_ENABLE) {
1823 kvm_apic_set_xapic_id(apic, vcpu->vcpu_id);
1824 static_key_slow_dec_deferred(&apic_hw_disabled);
1825 } else {
1826 static_key_slow_inc(&apic_hw_disabled.key);
1827 recalculate_apic_map(vcpu->kvm);
1828 }
1829 }
1830
1831 if ((old_value ^ value) & X2APIC_ENABLE) {
1832 if (value & X2APIC_ENABLE) {
1833 kvm_apic_set_x2apic_id(apic, vcpu->vcpu_id);
1834 kvm_x86_ops->set_virtual_x2apic_mode(vcpu, true);
1835 } else
1836 kvm_x86_ops->set_virtual_x2apic_mode(vcpu, false);
1837 }
1838
1839 apic->base_address = apic->vcpu->arch.apic_base &
1840 MSR_IA32_APICBASE_BASE;
1841
1842 if ((value & MSR_IA32_APICBASE_ENABLE) &&
1843 apic->base_address != APIC_DEFAULT_PHYS_BASE)
1844 pr_warn_once("APIC base relocation is unsupported by KVM");
1845
1846 /* with FSB delivery interrupt, we can restart APIC functionality */
1847 apic_debug("apic base msr is 0x%016" PRIx64 ", and base address is "
1848 "0x%lx.\n", apic->vcpu->arch.apic_base, apic->base_address);
1849
1850}
1851
1852void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event)
1853{
1854 struct kvm_lapic *apic;
1855 int i;
1856
1857 apic_debug("%s\n", __func__);
1858
1859 ASSERT(vcpu);
1860 apic = vcpu->arch.apic;
1861 ASSERT(apic != NULL);
1862
1863 /* Stop the timer in case it's a reset to an active apic */
1864 hrtimer_cancel(&apic->lapic_timer.timer);
1865
1866 if (!init_event) {
1867 kvm_lapic_set_base(vcpu, APIC_DEFAULT_PHYS_BASE |
1868 MSR_IA32_APICBASE_ENABLE);
1869 kvm_apic_set_xapic_id(apic, vcpu->vcpu_id);
1870 }
1871 kvm_apic_set_version(apic->vcpu);
1872
1873 for (i = 0; i < KVM_APIC_LVT_NUM; i++)
1874 kvm_lapic_set_reg(apic, APIC_LVTT + 0x10 * i, APIC_LVT_MASKED);
1875 apic_update_lvtt(apic);
1876 if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_LINT0_REENABLED))
1877 kvm_lapic_set_reg(apic, APIC_LVT0,
1878 SET_APIC_DELIVERY_MODE(0, APIC_MODE_EXTINT));
1879 apic_manage_nmi_watchdog(apic, kvm_lapic_get_reg(apic, APIC_LVT0));
1880
1881 kvm_lapic_set_reg(apic, APIC_DFR, 0xffffffffU);
1882 apic_set_spiv(apic, 0xff);
1883 kvm_lapic_set_reg(apic, APIC_TASKPRI, 0);
1884 if (!apic_x2apic_mode(apic))
1885 kvm_apic_set_ldr(apic, 0);
1886 kvm_lapic_set_reg(apic, APIC_ESR, 0);
1887 kvm_lapic_set_reg(apic, APIC_ICR, 0);
1888 kvm_lapic_set_reg(apic, APIC_ICR2, 0);
1889 kvm_lapic_set_reg(apic, APIC_TDCR, 0);
1890 kvm_lapic_set_reg(apic, APIC_TMICT, 0);
1891 for (i = 0; i < 8; i++) {
1892 kvm_lapic_set_reg(apic, APIC_IRR + 0x10 * i, 0);
1893 kvm_lapic_set_reg(apic, APIC_ISR + 0x10 * i, 0);
1894 kvm_lapic_set_reg(apic, APIC_TMR + 0x10 * i, 0);
1895 }
1896 apic->irr_pending = vcpu->arch.apicv_active;
1897 apic->isr_count = vcpu->arch.apicv_active ? 1 : 0;
1898 apic->highest_isr_cache = -1;
1899 update_divide_count(apic);
1900 atomic_set(&apic->lapic_timer.pending, 0);
1901 if (kvm_vcpu_is_bsp(vcpu))
1902 kvm_lapic_set_base(vcpu,
1903 vcpu->arch.apic_base | MSR_IA32_APICBASE_BSP);
1904 vcpu->arch.pv_eoi.msr_val = 0;
1905 apic_update_ppr(apic);
1906
1907 vcpu->arch.apic_arb_prio = 0;
1908 vcpu->arch.apic_attention = 0;
1909
1910 apic_debug("%s: vcpu=%p, id=%d, base_msr="
1911 "0x%016" PRIx64 ", base_address=0x%0lx.\n", __func__,
1912 vcpu, kvm_apic_id(apic),
1913 vcpu->arch.apic_base, apic->base_address);
1914}
1915
1916/*
1917 *----------------------------------------------------------------------
1918 * timer interface
1919 *----------------------------------------------------------------------
1920 */
1921
1922static bool lapic_is_periodic(struct kvm_lapic *apic)
1923{
1924 return apic_lvtt_period(apic);
1925}
1926
1927int apic_has_pending_timer(struct kvm_vcpu *vcpu)
1928{
1929 struct kvm_lapic *apic = vcpu->arch.apic;
1930
1931 if (apic_enabled(apic) && apic_lvt_enabled(apic, APIC_LVTT))
1932 return atomic_read(&apic->lapic_timer.pending);
1933
1934 return 0;
1935}
1936
1937int kvm_apic_local_deliver(struct kvm_lapic *apic, int lvt_type)
1938{
1939 u32 reg = kvm_lapic_get_reg(apic, lvt_type);
1940 int vector, mode, trig_mode;
1941
1942 if (kvm_apic_hw_enabled(apic) && !(reg & APIC_LVT_MASKED)) {
1943 vector = reg & APIC_VECTOR_MASK;
1944 mode = reg & APIC_MODE_MASK;
1945 trig_mode = reg & APIC_LVT_LEVEL_TRIGGER;
1946 return __apic_accept_irq(apic, mode, vector, 1, trig_mode,
1947 NULL);
1948 }
1949 return 0;
1950}
1951
1952void kvm_apic_nmi_wd_deliver(struct kvm_vcpu *vcpu)
1953{
1954 struct kvm_lapic *apic = vcpu->arch.apic;
1955
1956 if (apic)
1957 kvm_apic_local_deliver(apic, APIC_LVT0);
1958}
1959
1960static const struct kvm_io_device_ops apic_mmio_ops = {
1961 .read = apic_mmio_read,
1962 .write = apic_mmio_write,
1963};
1964
1965static enum hrtimer_restart apic_timer_fn(struct hrtimer *data)
1966{
1967 struct kvm_timer *ktimer = container_of(data, struct kvm_timer, timer);
1968 struct kvm_lapic *apic = container_of(ktimer, struct kvm_lapic, lapic_timer);
1969
1970 apic_timer_expired(apic);
1971
1972 if (lapic_is_periodic(apic)) {
1973 advance_periodic_target_expiration(apic);
1974 hrtimer_add_expires_ns(&ktimer->timer, ktimer->period);
1975 return HRTIMER_RESTART;
1976 } else
1977 return HRTIMER_NORESTART;
1978}
1979
1980int kvm_create_lapic(struct kvm_vcpu *vcpu)
1981{
1982 struct kvm_lapic *apic;
1983
1984 ASSERT(vcpu != NULL);
1985 apic_debug("apic_init %d\n", vcpu->vcpu_id);
1986
1987 apic = kzalloc(sizeof(*apic), GFP_KERNEL);
1988 if (!apic)
1989 goto nomem;
1990
1991 vcpu->arch.apic = apic;
1992
1993 apic->regs = (void *)get_zeroed_page(GFP_KERNEL);
1994 if (!apic->regs) {
1995 printk(KERN_ERR "malloc apic regs error for vcpu %x\n",
1996 vcpu->vcpu_id);
1997 goto nomem_free_apic;
1998 }
1999 apic->vcpu = vcpu;
2000
2001 hrtimer_init(&apic->lapic_timer.timer, CLOCK_MONOTONIC,
2002 HRTIMER_MODE_ABS_PINNED);
2003 apic->lapic_timer.timer.function = apic_timer_fn;
2004
2005 /*
2006 * APIC is created enabled. This will prevent kvm_lapic_set_base from
2007 * thinking that APIC satet has changed.
2008 */
2009 vcpu->arch.apic_base = MSR_IA32_APICBASE_ENABLE;
2010 static_key_slow_inc(&apic_sw_disabled.key); /* sw disabled at reset */
2011 kvm_lapic_reset(vcpu, false);
2012 kvm_iodevice_init(&apic->dev, &apic_mmio_ops);
2013
2014 return 0;
2015nomem_free_apic:
2016 kfree(apic);
2017nomem:
2018 return -ENOMEM;
2019}
2020
2021int kvm_apic_has_interrupt(struct kvm_vcpu *vcpu)
2022{
2023 struct kvm_lapic *apic = vcpu->arch.apic;
2024 int highest_irr;
2025
2026 if (!apic_enabled(apic))
2027 return -1;
2028
2029 apic_update_ppr(apic);
2030 highest_irr = apic_find_highest_irr(apic);
2031 if ((highest_irr == -1) ||
2032 ((highest_irr & 0xF0) <= kvm_lapic_get_reg(apic, APIC_PROCPRI)))
2033 return -1;
2034 return highest_irr;
2035}
2036
2037int kvm_apic_accept_pic_intr(struct kvm_vcpu *vcpu)
2038{
2039 u32 lvt0 = kvm_lapic_get_reg(vcpu->arch.apic, APIC_LVT0);
2040 int r = 0;
2041
2042 if (!kvm_apic_hw_enabled(vcpu->arch.apic))
2043 r = 1;
2044 if ((lvt0 & APIC_LVT_MASKED) == 0 &&
2045 GET_APIC_DELIVERY_MODE(lvt0) == APIC_MODE_EXTINT)
2046 r = 1;
2047 return r;
2048}
2049
2050void kvm_inject_apic_timer_irqs(struct kvm_vcpu *vcpu)
2051{
2052 struct kvm_lapic *apic = vcpu->arch.apic;
2053
2054 if (atomic_read(&apic->lapic_timer.pending) > 0) {
2055 kvm_apic_local_deliver(apic, APIC_LVTT);
2056 if (apic_lvtt_tscdeadline(apic))
2057 apic->lapic_timer.tscdeadline = 0;
2058 if (apic_lvtt_oneshot(apic)) {
2059 apic->lapic_timer.tscdeadline = 0;
2060 apic->lapic_timer.target_expiration = 0;
2061 }
2062 atomic_set(&apic->lapic_timer.pending, 0);
2063 }
2064}
2065
2066int kvm_get_apic_interrupt(struct kvm_vcpu *vcpu)
2067{
2068 int vector = kvm_apic_has_interrupt(vcpu);
2069 struct kvm_lapic *apic = vcpu->arch.apic;
2070
2071 if (vector == -1)
2072 return -1;
2073
2074 /*
2075 * We get here even with APIC virtualization enabled, if doing
2076 * nested virtualization and L1 runs with the "acknowledge interrupt
2077 * on exit" mode. Then we cannot inject the interrupt via RVI,
2078 * because the process would deliver it through the IDT.
2079 */
2080
2081 apic_set_isr(vector, apic);
2082 apic_update_ppr(apic);
2083 apic_clear_irr(vector, apic);
2084
2085 if (test_bit(vector, vcpu_to_synic(vcpu)->auto_eoi_bitmap)) {
2086 apic_clear_isr(vector, apic);
2087 apic_update_ppr(apic);
2088 }
2089
2090 return vector;
2091}
2092
2093static int kvm_apic_state_fixup(struct kvm_vcpu *vcpu,
2094 struct kvm_lapic_state *s, bool set)
2095{
2096 if (apic_x2apic_mode(vcpu->arch.apic)) {
2097 u32 *id = (u32 *)(s->regs + APIC_ID);
2098
2099 if (vcpu->kvm->arch.x2apic_format) {
2100 if (*id != vcpu->vcpu_id)
2101 return -EINVAL;
2102 } else {
2103 if (set)
2104 *id >>= 24;
2105 else
2106 *id <<= 24;
2107 }
2108 }
2109
2110 return 0;
2111}
2112
2113int kvm_apic_get_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s)
2114{
2115 memcpy(s->regs, vcpu->arch.apic->regs, sizeof(*s));
2116 return kvm_apic_state_fixup(vcpu, s, false);
2117}
2118
2119int kvm_apic_set_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s)
2120{
2121 struct kvm_lapic *apic = vcpu->arch.apic;
2122 int r;
2123
2124
2125 kvm_lapic_set_base(vcpu, vcpu->arch.apic_base);
2126 /* set SPIV separately to get count of SW disabled APICs right */
2127 apic_set_spiv(apic, *((u32 *)(s->regs + APIC_SPIV)));
2128
2129 r = kvm_apic_state_fixup(vcpu, s, true);
2130 if (r)
2131 return r;
2132 memcpy(vcpu->arch.apic->regs, s->regs, sizeof *s);
2133
2134 recalculate_apic_map(vcpu->kvm);
2135 kvm_apic_set_version(vcpu);
2136
2137 apic_update_ppr(apic);
2138 hrtimer_cancel(&apic->lapic_timer.timer);
2139 apic_update_lvtt(apic);
2140 apic_manage_nmi_watchdog(apic, kvm_lapic_get_reg(apic, APIC_LVT0));
2141 update_divide_count(apic);
2142 start_apic_timer(apic);
2143 apic->irr_pending = true;
2144 apic->isr_count = vcpu->arch.apicv_active ?
2145 1 : count_vectors(apic->regs + APIC_ISR);
2146 apic->highest_isr_cache = -1;
2147 if (vcpu->arch.apicv_active) {
2148 if (kvm_x86_ops->apicv_post_state_restore)
2149 kvm_x86_ops->apicv_post_state_restore(vcpu);
2150 kvm_x86_ops->hwapic_irr_update(vcpu,
2151 apic_find_highest_irr(apic));
2152 kvm_x86_ops->hwapic_isr_update(vcpu,
2153 apic_find_highest_isr(apic));
2154 }
2155 kvm_make_request(KVM_REQ_EVENT, vcpu);
2156 if (ioapic_in_kernel(vcpu->kvm))
2157 kvm_rtc_eoi_tracking_restore_one(vcpu);
2158
2159 vcpu->arch.apic_arb_prio = 0;
2160
2161 return 0;
2162}
2163
2164void __kvm_migrate_apic_timer(struct kvm_vcpu *vcpu)
2165{
2166 struct hrtimer *timer;
2167
2168 if (!lapic_in_kernel(vcpu))
2169 return;
2170
2171 timer = &vcpu->arch.apic->lapic_timer.timer;
2172 if (hrtimer_cancel(timer))
2173 hrtimer_start_expires(timer, HRTIMER_MODE_ABS_PINNED);
2174}
2175
2176/*
2177 * apic_sync_pv_eoi_from_guest - called on vmexit or cancel interrupt
2178 *
2179 * Detect whether guest triggered PV EOI since the
2180 * last entry. If yes, set EOI on guests's behalf.
2181 * Clear PV EOI in guest memory in any case.
2182 */
2183static void apic_sync_pv_eoi_from_guest(struct kvm_vcpu *vcpu,
2184 struct kvm_lapic *apic)
2185{
2186 bool pending;
2187 int vector;
2188 /*
2189 * PV EOI state is derived from KVM_APIC_PV_EOI_PENDING in host
2190 * and KVM_PV_EOI_ENABLED in guest memory as follows:
2191 *
2192 * KVM_APIC_PV_EOI_PENDING is unset:
2193 * -> host disabled PV EOI.
2194 * KVM_APIC_PV_EOI_PENDING is set, KVM_PV_EOI_ENABLED is set:
2195 * -> host enabled PV EOI, guest did not execute EOI yet.
2196 * KVM_APIC_PV_EOI_PENDING is set, KVM_PV_EOI_ENABLED is unset:
2197 * -> host enabled PV EOI, guest executed EOI.
2198 */
2199 BUG_ON(!pv_eoi_enabled(vcpu));
2200 pending = pv_eoi_get_pending(vcpu);
2201 /*
2202 * Clear pending bit in any case: it will be set again on vmentry.
2203 * While this might not be ideal from performance point of view,
2204 * this makes sure pv eoi is only enabled when we know it's safe.
2205 */
2206 pv_eoi_clr_pending(vcpu);
2207 if (pending)
2208 return;
2209 vector = apic_set_eoi(apic);
2210 trace_kvm_pv_eoi(apic, vector);
2211}
2212
2213void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu)
2214{
2215 u32 data;
2216
2217 if (test_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention))
2218 apic_sync_pv_eoi_from_guest(vcpu, vcpu->arch.apic);
2219
2220 if (!test_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention))
2221 return;
2222
2223 if (kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data,
2224 sizeof(u32)))
2225 return;
2226
2227 apic_set_tpr(vcpu->arch.apic, data & 0xff);
2228}
2229
2230/*
2231 * apic_sync_pv_eoi_to_guest - called before vmentry
2232 *
2233 * Detect whether it's safe to enable PV EOI and
2234 * if yes do so.
2235 */
2236static void apic_sync_pv_eoi_to_guest(struct kvm_vcpu *vcpu,
2237 struct kvm_lapic *apic)
2238{
2239 if (!pv_eoi_enabled(vcpu) ||
2240 /* IRR set or many bits in ISR: could be nested. */
2241 apic->irr_pending ||
2242 /* Cache not set: could be safe but we don't bother. */
2243 apic->highest_isr_cache == -1 ||
2244 /* Need EOI to update ioapic. */
2245 kvm_ioapic_handles_vector(apic, apic->highest_isr_cache)) {
2246 /*
2247 * PV EOI was disabled by apic_sync_pv_eoi_from_guest
2248 * so we need not do anything here.
2249 */
2250 return;
2251 }
2252
2253 pv_eoi_set_pending(apic->vcpu);
2254}
2255
2256void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu)
2257{
2258 u32 data, tpr;
2259 int max_irr, max_isr;
2260 struct kvm_lapic *apic = vcpu->arch.apic;
2261
2262 apic_sync_pv_eoi_to_guest(vcpu, apic);
2263
2264 if (!test_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention))
2265 return;
2266
2267 tpr = kvm_lapic_get_reg(apic, APIC_TASKPRI) & 0xff;
2268 max_irr = apic_find_highest_irr(apic);
2269 if (max_irr < 0)
2270 max_irr = 0;
2271 max_isr = apic_find_highest_isr(apic);
2272 if (max_isr < 0)
2273 max_isr = 0;
2274 data = (tpr & 0xff) | ((max_isr & 0xf0) << 8) | (max_irr << 24);
2275
2276 kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data,
2277 sizeof(u32));
2278}
2279
2280int kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr)
2281{
2282 if (vapic_addr) {
2283 if (kvm_gfn_to_hva_cache_init(vcpu->kvm,
2284 &vcpu->arch.apic->vapic_cache,
2285 vapic_addr, sizeof(u32)))
2286 return -EINVAL;
2287 __set_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention);
2288 } else {
2289 __clear_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention);
2290 }
2291
2292 vcpu->arch.apic->vapic_addr = vapic_addr;
2293 return 0;
2294}
2295
2296int kvm_x2apic_msr_write(struct kvm_vcpu *vcpu, u32 msr, u64 data)
2297{
2298 struct kvm_lapic *apic = vcpu->arch.apic;
2299 u32 reg = (msr - APIC_BASE_MSR) << 4;
2300
2301 if (!lapic_in_kernel(vcpu) || !apic_x2apic_mode(apic))
2302 return 1;
2303
2304 if (reg == APIC_ICR2)
2305 return 1;
2306
2307 /* if this is ICR write vector before command */
2308 if (reg == APIC_ICR)
2309 kvm_lapic_reg_write(apic, APIC_ICR2, (u32)(data >> 32));
2310 return kvm_lapic_reg_write(apic, reg, (u32)data);
2311}
2312
2313int kvm_x2apic_msr_read(struct kvm_vcpu *vcpu, u32 msr, u64 *data)
2314{
2315 struct kvm_lapic *apic = vcpu->arch.apic;
2316 u32 reg = (msr - APIC_BASE_MSR) << 4, low, high = 0;
2317
2318 if (!lapic_in_kernel(vcpu) || !apic_x2apic_mode(apic))
2319 return 1;
2320
2321 if (reg == APIC_DFR || reg == APIC_ICR2) {
2322 apic_debug("KVM_APIC_READ: read x2apic reserved register %x\n",
2323 reg);
2324 return 1;
2325 }
2326
2327 if (kvm_lapic_reg_read(apic, reg, 4, &low))
2328 return 1;
2329 if (reg == APIC_ICR)
2330 kvm_lapic_reg_read(apic, APIC_ICR2, 4, &high);
2331
2332 *data = (((u64)high) << 32) | low;
2333
2334 return 0;
2335}
2336
2337int kvm_hv_vapic_msr_write(struct kvm_vcpu *vcpu, u32 reg, u64 data)
2338{
2339 struct kvm_lapic *apic = vcpu->arch.apic;
2340
2341 if (!lapic_in_kernel(vcpu))
2342 return 1;
2343
2344 /* if this is ICR write vector before command */
2345 if (reg == APIC_ICR)
2346 kvm_lapic_reg_write(apic, APIC_ICR2, (u32)(data >> 32));
2347 return kvm_lapic_reg_write(apic, reg, (u32)data);
2348}
2349
2350int kvm_hv_vapic_msr_read(struct kvm_vcpu *vcpu, u32 reg, u64 *data)
2351{
2352 struct kvm_lapic *apic = vcpu->arch.apic;
2353 u32 low, high = 0;
2354
2355 if (!lapic_in_kernel(vcpu))
2356 return 1;
2357
2358 if (kvm_lapic_reg_read(apic, reg, 4, &low))
2359 return 1;
2360 if (reg == APIC_ICR)
2361 kvm_lapic_reg_read(apic, APIC_ICR2, 4, &high);
2362
2363 *data = (((u64)high) << 32) | low;
2364
2365 return 0;
2366}
2367
2368int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data)
2369{
2370 u64 addr = data & ~KVM_MSR_ENABLED;
2371 if (!IS_ALIGNED(addr, 4))
2372 return 1;
2373
2374 vcpu->arch.pv_eoi.msr_val = data;
2375 if (!pv_eoi_enabled(vcpu))
2376 return 0;
2377 return kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.pv_eoi.data,
2378 addr, sizeof(u8));
2379}
2380
2381void kvm_apic_accept_events(struct kvm_vcpu *vcpu)
2382{
2383 struct kvm_lapic *apic = vcpu->arch.apic;
2384 u8 sipi_vector;
2385 unsigned long pe;
2386
2387 if (!lapic_in_kernel(vcpu) || !apic->pending_events)
2388 return;
2389
2390 /*
2391 * INITs are latched while in SMM. Because an SMM CPU cannot
2392 * be in KVM_MP_STATE_INIT_RECEIVED state, just eat SIPIs
2393 * and delay processing of INIT until the next RSM.
2394 */
2395 if (is_smm(vcpu)) {
2396 WARN_ON_ONCE(vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED);
2397 if (test_bit(KVM_APIC_SIPI, &apic->pending_events))
2398 clear_bit(KVM_APIC_SIPI, &apic->pending_events);
2399 return;
2400 }
2401
2402 pe = xchg(&apic->pending_events, 0);
2403 if (test_bit(KVM_APIC_INIT, &pe)) {
2404 kvm_lapic_reset(vcpu, true);
2405 kvm_vcpu_reset(vcpu, true);
2406 if (kvm_vcpu_is_bsp(apic->vcpu))
2407 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
2408 else
2409 vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED;
2410 }
2411 if (test_bit(KVM_APIC_SIPI, &pe) &&
2412 vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) {
2413 /* evaluate pending_events before reading the vector */
2414 smp_rmb();
2415 sipi_vector = apic->sipi_vector;
2416 apic_debug("vcpu %d received sipi with vector # %x\n",
2417 vcpu->vcpu_id, sipi_vector);
2418 kvm_vcpu_deliver_sipi_vector(vcpu, sipi_vector);
2419 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
2420 }
2421}
2422
2423void kvm_lapic_init(void)
2424{
2425 /* do not patch jump label more than once per second */
2426 jump_label_rate_limit(&apic_hw_disabled, HZ);
2427 jump_label_rate_limit(&apic_sw_disabled, HZ);
2428}
2429
2430void kvm_lapic_exit(void)
2431{
2432 static_key_deferred_flush(&apic_hw_disabled);
2433 static_key_deferred_flush(&apic_sw_disabled);
2434}