Loading...
1
2/*
3 * Local APIC virtualization
4 *
5 * Copyright (C) 2006 Qumranet, Inc.
6 * Copyright (C) 2007 Novell
7 * Copyright (C) 2007 Intel
8 * Copyright 2009 Red Hat, Inc. and/or its affiliates.
9 *
10 * Authors:
11 * Dor Laor <dor.laor@qumranet.com>
12 * Gregory Haskins <ghaskins@novell.com>
13 * Yaozu (Eddie) Dong <eddie.dong@intel.com>
14 *
15 * Based on Xen 3.1 code, Copyright (c) 2004, Intel Corporation.
16 *
17 * This work is licensed under the terms of the GNU GPL, version 2. See
18 * the COPYING file in the top-level directory.
19 */
20
21#include <linux/kvm_host.h>
22#include <linux/kvm.h>
23#include <linux/mm.h>
24#include <linux/highmem.h>
25#include <linux/smp.h>
26#include <linux/hrtimer.h>
27#include <linux/io.h>
28#include <linux/export.h>
29#include <linux/math64.h>
30#include <linux/slab.h>
31#include <asm/processor.h>
32#include <asm/msr.h>
33#include <asm/page.h>
34#include <asm/current.h>
35#include <asm/apicdef.h>
36#include <asm/delay.h>
37#include <linux/atomic.h>
38#include <linux/jump_label.h>
39#include "kvm_cache_regs.h"
40#include "irq.h"
41#include "trace.h"
42#include "x86.h"
43#include "cpuid.h"
44#include "hyperv.h"
45
46#ifndef CONFIG_X86_64
47#define mod_64(x, y) ((x) - (y) * div64_u64(x, y))
48#else
49#define mod_64(x, y) ((x) % (y))
50#endif
51
52#define PRId64 "d"
53#define PRIx64 "llx"
54#define PRIu64 "u"
55#define PRIo64 "o"
56
57/* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
58#define apic_debug(fmt, arg...)
59
60/* 14 is the version for Xeon and Pentium 8.4.8*/
61#define APIC_VERSION (0x14UL | ((KVM_APIC_LVT_NUM - 1) << 16))
62#define LAPIC_MMIO_LENGTH (1 << 12)
63/* followed define is not in apicdef.h */
64#define APIC_SHORT_MASK 0xc0000
65#define APIC_DEST_NOSHORT 0x0
66#define APIC_DEST_MASK 0x800
67#define MAX_APIC_VECTOR 256
68#define APIC_VECTORS_PER_REG 32
69
70#define APIC_BROADCAST 0xFF
71#define X2APIC_BROADCAST 0xFFFFFFFFul
72
73static inline int apic_test_vector(int vec, void *bitmap)
74{
75 return test_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
76}
77
78bool kvm_apic_pending_eoi(struct kvm_vcpu *vcpu, int vector)
79{
80 struct kvm_lapic *apic = vcpu->arch.apic;
81
82 return apic_test_vector(vector, apic->regs + APIC_ISR) ||
83 apic_test_vector(vector, apic->regs + APIC_IRR);
84}
85
86static inline void apic_clear_vector(int vec, void *bitmap)
87{
88 clear_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
89}
90
91static inline int __apic_test_and_set_vector(int vec, void *bitmap)
92{
93 return __test_and_set_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
94}
95
96static inline int __apic_test_and_clear_vector(int vec, void *bitmap)
97{
98 return __test_and_clear_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
99}
100
101struct static_key_deferred apic_hw_disabled __read_mostly;
102struct static_key_deferred apic_sw_disabled __read_mostly;
103
104static inline int apic_enabled(struct kvm_lapic *apic)
105{
106 return kvm_apic_sw_enabled(apic) && kvm_apic_hw_enabled(apic);
107}
108
109#define LVT_MASK \
110 (APIC_LVT_MASKED | APIC_SEND_PENDING | APIC_VECTOR_MASK)
111
112#define LINT_MASK \
113 (LVT_MASK | APIC_MODE_MASK | APIC_INPUT_POLARITY | \
114 APIC_LVT_REMOTE_IRR | APIC_LVT_LEVEL_TRIGGER)
115
116static inline u8 kvm_xapic_id(struct kvm_lapic *apic)
117{
118 return kvm_lapic_get_reg(apic, APIC_ID) >> 24;
119}
120
121static inline u32 kvm_x2apic_id(struct kvm_lapic *apic)
122{
123 return apic->vcpu->vcpu_id;
124}
125
126static inline bool kvm_apic_map_get_logical_dest(struct kvm_apic_map *map,
127 u32 dest_id, struct kvm_lapic ***cluster, u16 *mask) {
128 switch (map->mode) {
129 case KVM_APIC_MODE_X2APIC: {
130 u32 offset = (dest_id >> 16) * 16;
131 u32 max_apic_id = map->max_apic_id;
132
133 if (offset <= max_apic_id) {
134 u8 cluster_size = min(max_apic_id - offset + 1, 16U);
135
136 *cluster = &map->phys_map[offset];
137 *mask = dest_id & (0xffff >> (16 - cluster_size));
138 } else {
139 *mask = 0;
140 }
141
142 return true;
143 }
144 case KVM_APIC_MODE_XAPIC_FLAT:
145 *cluster = map->xapic_flat_map;
146 *mask = dest_id & 0xff;
147 return true;
148 case KVM_APIC_MODE_XAPIC_CLUSTER:
149 *cluster = map->xapic_cluster_map[(dest_id >> 4) & 0xf];
150 *mask = dest_id & 0xf;
151 return true;
152 default:
153 /* Not optimized. */
154 return false;
155 }
156}
157
158static void kvm_apic_map_free(struct rcu_head *rcu)
159{
160 struct kvm_apic_map *map = container_of(rcu, struct kvm_apic_map, rcu);
161
162 kvfree(map);
163}
164
165static void recalculate_apic_map(struct kvm *kvm)
166{
167 struct kvm_apic_map *new, *old = NULL;
168 struct kvm_vcpu *vcpu;
169 int i;
170 u32 max_id = 255; /* enough space for any xAPIC ID */
171
172 mutex_lock(&kvm->arch.apic_map_lock);
173
174 kvm_for_each_vcpu(i, vcpu, kvm)
175 if (kvm_apic_present(vcpu))
176 max_id = max(max_id, kvm_x2apic_id(vcpu->arch.apic));
177
178 new = kvzalloc(sizeof(struct kvm_apic_map) +
179 sizeof(struct kvm_lapic *) * ((u64)max_id + 1), GFP_KERNEL);
180
181 if (!new)
182 goto out;
183
184 new->max_apic_id = max_id;
185
186 kvm_for_each_vcpu(i, vcpu, kvm) {
187 struct kvm_lapic *apic = vcpu->arch.apic;
188 struct kvm_lapic **cluster;
189 u16 mask;
190 u32 ldr;
191 u8 xapic_id;
192 u32 x2apic_id;
193
194 if (!kvm_apic_present(vcpu))
195 continue;
196
197 xapic_id = kvm_xapic_id(apic);
198 x2apic_id = kvm_x2apic_id(apic);
199
200 /* Hotplug hack: see kvm_apic_match_physical_addr(), ... */
201 if ((apic_x2apic_mode(apic) || x2apic_id > 0xff) &&
202 x2apic_id <= new->max_apic_id)
203 new->phys_map[x2apic_id] = apic;
204 /*
205 * ... xAPIC ID of VCPUs with APIC ID > 0xff will wrap-around,
206 * prevent them from masking VCPUs with APIC ID <= 0xff.
207 */
208 if (!apic_x2apic_mode(apic) && !new->phys_map[xapic_id])
209 new->phys_map[xapic_id] = apic;
210
211 ldr = kvm_lapic_get_reg(apic, APIC_LDR);
212
213 if (apic_x2apic_mode(apic)) {
214 new->mode |= KVM_APIC_MODE_X2APIC;
215 } else if (ldr) {
216 ldr = GET_APIC_LOGICAL_ID(ldr);
217 if (kvm_lapic_get_reg(apic, APIC_DFR) == APIC_DFR_FLAT)
218 new->mode |= KVM_APIC_MODE_XAPIC_FLAT;
219 else
220 new->mode |= KVM_APIC_MODE_XAPIC_CLUSTER;
221 }
222
223 if (!kvm_apic_map_get_logical_dest(new, ldr, &cluster, &mask))
224 continue;
225
226 if (mask)
227 cluster[ffs(mask) - 1] = apic;
228 }
229out:
230 old = rcu_dereference_protected(kvm->arch.apic_map,
231 lockdep_is_held(&kvm->arch.apic_map_lock));
232 rcu_assign_pointer(kvm->arch.apic_map, new);
233 mutex_unlock(&kvm->arch.apic_map_lock);
234
235 if (old)
236 call_rcu(&old->rcu, kvm_apic_map_free);
237
238 kvm_make_scan_ioapic_request(kvm);
239}
240
241static inline void apic_set_spiv(struct kvm_lapic *apic, u32 val)
242{
243 bool enabled = val & APIC_SPIV_APIC_ENABLED;
244
245 kvm_lapic_set_reg(apic, APIC_SPIV, val);
246
247 if (enabled != apic->sw_enabled) {
248 apic->sw_enabled = enabled;
249 if (enabled) {
250 static_key_slow_dec_deferred(&apic_sw_disabled);
251 recalculate_apic_map(apic->vcpu->kvm);
252 } else
253 static_key_slow_inc(&apic_sw_disabled.key);
254 }
255}
256
257static inline void kvm_apic_set_xapic_id(struct kvm_lapic *apic, u8 id)
258{
259 kvm_lapic_set_reg(apic, APIC_ID, id << 24);
260 recalculate_apic_map(apic->vcpu->kvm);
261}
262
263static inline void kvm_apic_set_ldr(struct kvm_lapic *apic, u32 id)
264{
265 kvm_lapic_set_reg(apic, APIC_LDR, id);
266 recalculate_apic_map(apic->vcpu->kvm);
267}
268
269static inline u32 kvm_apic_calc_x2apic_ldr(u32 id)
270{
271 return ((id >> 4) << 16) | (1 << (id & 0xf));
272}
273
274static inline void kvm_apic_set_x2apic_id(struct kvm_lapic *apic, u32 id)
275{
276 u32 ldr = kvm_apic_calc_x2apic_ldr(id);
277
278 WARN_ON_ONCE(id != apic->vcpu->vcpu_id);
279
280 kvm_lapic_set_reg(apic, APIC_ID, id);
281 kvm_lapic_set_reg(apic, APIC_LDR, ldr);
282 recalculate_apic_map(apic->vcpu->kvm);
283}
284
285static inline int apic_lvt_enabled(struct kvm_lapic *apic, int lvt_type)
286{
287 return !(kvm_lapic_get_reg(apic, lvt_type) & APIC_LVT_MASKED);
288}
289
290static inline int apic_lvt_vector(struct kvm_lapic *apic, int lvt_type)
291{
292 return kvm_lapic_get_reg(apic, lvt_type) & APIC_VECTOR_MASK;
293}
294
295static inline int apic_lvtt_oneshot(struct kvm_lapic *apic)
296{
297 return apic->lapic_timer.timer_mode == APIC_LVT_TIMER_ONESHOT;
298}
299
300static inline int apic_lvtt_period(struct kvm_lapic *apic)
301{
302 return apic->lapic_timer.timer_mode == APIC_LVT_TIMER_PERIODIC;
303}
304
305static inline int apic_lvtt_tscdeadline(struct kvm_lapic *apic)
306{
307 return apic->lapic_timer.timer_mode == APIC_LVT_TIMER_TSCDEADLINE;
308}
309
310static inline int apic_lvt_nmi_mode(u32 lvt_val)
311{
312 return (lvt_val & (APIC_MODE_MASK | APIC_LVT_MASKED)) == APIC_DM_NMI;
313}
314
315void kvm_apic_set_version(struct kvm_vcpu *vcpu)
316{
317 struct kvm_lapic *apic = vcpu->arch.apic;
318 struct kvm_cpuid_entry2 *feat;
319 u32 v = APIC_VERSION;
320
321 if (!lapic_in_kernel(vcpu))
322 return;
323
324 /*
325 * KVM emulates 82093AA datasheet (with in-kernel IOAPIC implementation)
326 * which doesn't have EOI register; Some buggy OSes (e.g. Windows with
327 * Hyper-V role) disable EOI broadcast in lapic not checking for IOAPIC
328 * version first and level-triggered interrupts never get EOIed in
329 * IOAPIC.
330 */
331 feat = kvm_find_cpuid_entry(apic->vcpu, 0x1, 0);
332 if (feat && (feat->ecx & (1 << (X86_FEATURE_X2APIC & 31))) &&
333 !ioapic_in_kernel(vcpu->kvm))
334 v |= APIC_LVR_DIRECTED_EOI;
335 kvm_lapic_set_reg(apic, APIC_LVR, v);
336}
337
338static const unsigned int apic_lvt_mask[KVM_APIC_LVT_NUM] = {
339 LVT_MASK , /* part LVTT mask, timer mode mask added at runtime */
340 LVT_MASK | APIC_MODE_MASK, /* LVTTHMR */
341 LVT_MASK | APIC_MODE_MASK, /* LVTPC */
342 LINT_MASK, LINT_MASK, /* LVT0-1 */
343 LVT_MASK /* LVTERR */
344};
345
346static int find_highest_vector(void *bitmap)
347{
348 int vec;
349 u32 *reg;
350
351 for (vec = MAX_APIC_VECTOR - APIC_VECTORS_PER_REG;
352 vec >= 0; vec -= APIC_VECTORS_PER_REG) {
353 reg = bitmap + REG_POS(vec);
354 if (*reg)
355 return __fls(*reg) + vec;
356 }
357
358 return -1;
359}
360
361static u8 count_vectors(void *bitmap)
362{
363 int vec;
364 u32 *reg;
365 u8 count = 0;
366
367 for (vec = 0; vec < MAX_APIC_VECTOR; vec += APIC_VECTORS_PER_REG) {
368 reg = bitmap + REG_POS(vec);
369 count += hweight32(*reg);
370 }
371
372 return count;
373}
374
375bool __kvm_apic_update_irr(u32 *pir, void *regs, int *max_irr)
376{
377 u32 i, vec;
378 u32 pir_val, irr_val, prev_irr_val;
379 int max_updated_irr;
380
381 max_updated_irr = -1;
382 *max_irr = -1;
383
384 for (i = vec = 0; i <= 7; i++, vec += 32) {
385 pir_val = READ_ONCE(pir[i]);
386 irr_val = *((u32 *)(regs + APIC_IRR + i * 0x10));
387 if (pir_val) {
388 prev_irr_val = irr_val;
389 irr_val |= xchg(&pir[i], 0);
390 *((u32 *)(regs + APIC_IRR + i * 0x10)) = irr_val;
391 if (prev_irr_val != irr_val) {
392 max_updated_irr =
393 __fls(irr_val ^ prev_irr_val) + vec;
394 }
395 }
396 if (irr_val)
397 *max_irr = __fls(irr_val) + vec;
398 }
399
400 return ((max_updated_irr != -1) &&
401 (max_updated_irr == *max_irr));
402}
403EXPORT_SYMBOL_GPL(__kvm_apic_update_irr);
404
405bool kvm_apic_update_irr(struct kvm_vcpu *vcpu, u32 *pir, int *max_irr)
406{
407 struct kvm_lapic *apic = vcpu->arch.apic;
408
409 return __kvm_apic_update_irr(pir, apic->regs, max_irr);
410}
411EXPORT_SYMBOL_GPL(kvm_apic_update_irr);
412
413static inline int apic_search_irr(struct kvm_lapic *apic)
414{
415 return find_highest_vector(apic->regs + APIC_IRR);
416}
417
418static inline int apic_find_highest_irr(struct kvm_lapic *apic)
419{
420 int result;
421
422 /*
423 * Note that irr_pending is just a hint. It will be always
424 * true with virtual interrupt delivery enabled.
425 */
426 if (!apic->irr_pending)
427 return -1;
428
429 result = apic_search_irr(apic);
430 ASSERT(result == -1 || result >= 16);
431
432 return result;
433}
434
435static inline void apic_clear_irr(int vec, struct kvm_lapic *apic)
436{
437 struct kvm_vcpu *vcpu;
438
439 vcpu = apic->vcpu;
440
441 if (unlikely(vcpu->arch.apicv_active)) {
442 /* need to update RVI */
443 apic_clear_vector(vec, apic->regs + APIC_IRR);
444 kvm_x86_ops->hwapic_irr_update(vcpu,
445 apic_find_highest_irr(apic));
446 } else {
447 apic->irr_pending = false;
448 apic_clear_vector(vec, apic->regs + APIC_IRR);
449 if (apic_search_irr(apic) != -1)
450 apic->irr_pending = true;
451 }
452}
453
454static inline void apic_set_isr(int vec, struct kvm_lapic *apic)
455{
456 struct kvm_vcpu *vcpu;
457
458 if (__apic_test_and_set_vector(vec, apic->regs + APIC_ISR))
459 return;
460
461 vcpu = apic->vcpu;
462
463 /*
464 * With APIC virtualization enabled, all caching is disabled
465 * because the processor can modify ISR under the hood. Instead
466 * just set SVI.
467 */
468 if (unlikely(vcpu->arch.apicv_active))
469 kvm_x86_ops->hwapic_isr_update(vcpu, vec);
470 else {
471 ++apic->isr_count;
472 BUG_ON(apic->isr_count > MAX_APIC_VECTOR);
473 /*
474 * ISR (in service register) bit is set when injecting an interrupt.
475 * The highest vector is injected. Thus the latest bit set matches
476 * the highest bit in ISR.
477 */
478 apic->highest_isr_cache = vec;
479 }
480}
481
482static inline int apic_find_highest_isr(struct kvm_lapic *apic)
483{
484 int result;
485
486 /*
487 * Note that isr_count is always 1, and highest_isr_cache
488 * is always -1, with APIC virtualization enabled.
489 */
490 if (!apic->isr_count)
491 return -1;
492 if (likely(apic->highest_isr_cache != -1))
493 return apic->highest_isr_cache;
494
495 result = find_highest_vector(apic->regs + APIC_ISR);
496 ASSERT(result == -1 || result >= 16);
497
498 return result;
499}
500
501static inline void apic_clear_isr(int vec, struct kvm_lapic *apic)
502{
503 struct kvm_vcpu *vcpu;
504 if (!__apic_test_and_clear_vector(vec, apic->regs + APIC_ISR))
505 return;
506
507 vcpu = apic->vcpu;
508
509 /*
510 * We do get here for APIC virtualization enabled if the guest
511 * uses the Hyper-V APIC enlightenment. In this case we may need
512 * to trigger a new interrupt delivery by writing the SVI field;
513 * on the other hand isr_count and highest_isr_cache are unused
514 * and must be left alone.
515 */
516 if (unlikely(vcpu->arch.apicv_active))
517 kvm_x86_ops->hwapic_isr_update(vcpu,
518 apic_find_highest_isr(apic));
519 else {
520 --apic->isr_count;
521 BUG_ON(apic->isr_count < 0);
522 apic->highest_isr_cache = -1;
523 }
524}
525
526int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu)
527{
528 /* This may race with setting of irr in __apic_accept_irq() and
529 * value returned may be wrong, but kvm_vcpu_kick() in __apic_accept_irq
530 * will cause vmexit immediately and the value will be recalculated
531 * on the next vmentry.
532 */
533 return apic_find_highest_irr(vcpu->arch.apic);
534}
535EXPORT_SYMBOL_GPL(kvm_lapic_find_highest_irr);
536
537static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
538 int vector, int level, int trig_mode,
539 struct dest_map *dest_map);
540
541int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq,
542 struct dest_map *dest_map)
543{
544 struct kvm_lapic *apic = vcpu->arch.apic;
545
546 return __apic_accept_irq(apic, irq->delivery_mode, irq->vector,
547 irq->level, irq->trig_mode, dest_map);
548}
549
550static int pv_eoi_put_user(struct kvm_vcpu *vcpu, u8 val)
551{
552
553 return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data, &val,
554 sizeof(val));
555}
556
557static int pv_eoi_get_user(struct kvm_vcpu *vcpu, u8 *val)
558{
559
560 return kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data, val,
561 sizeof(*val));
562}
563
564static inline bool pv_eoi_enabled(struct kvm_vcpu *vcpu)
565{
566 return vcpu->arch.pv_eoi.msr_val & KVM_MSR_ENABLED;
567}
568
569static bool pv_eoi_get_pending(struct kvm_vcpu *vcpu)
570{
571 u8 val;
572 if (pv_eoi_get_user(vcpu, &val) < 0)
573 apic_debug("Can't read EOI MSR value: 0x%llx\n",
574 (unsigned long long)vcpu->arch.pv_eoi.msr_val);
575 return val & 0x1;
576}
577
578static void pv_eoi_set_pending(struct kvm_vcpu *vcpu)
579{
580 if (pv_eoi_put_user(vcpu, KVM_PV_EOI_ENABLED) < 0) {
581 apic_debug("Can't set EOI MSR value: 0x%llx\n",
582 (unsigned long long)vcpu->arch.pv_eoi.msr_val);
583 return;
584 }
585 __set_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention);
586}
587
588static void pv_eoi_clr_pending(struct kvm_vcpu *vcpu)
589{
590 if (pv_eoi_put_user(vcpu, KVM_PV_EOI_DISABLED) < 0) {
591 apic_debug("Can't clear EOI MSR value: 0x%llx\n",
592 (unsigned long long)vcpu->arch.pv_eoi.msr_val);
593 return;
594 }
595 __clear_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention);
596}
597
598static int apic_has_interrupt_for_ppr(struct kvm_lapic *apic, u32 ppr)
599{
600 int highest_irr;
601 if (apic->vcpu->arch.apicv_active)
602 highest_irr = kvm_x86_ops->sync_pir_to_irr(apic->vcpu);
603 else
604 highest_irr = apic_find_highest_irr(apic);
605 if (highest_irr == -1 || (highest_irr & 0xF0) <= ppr)
606 return -1;
607 return highest_irr;
608}
609
610static bool __apic_update_ppr(struct kvm_lapic *apic, u32 *new_ppr)
611{
612 u32 tpr, isrv, ppr, old_ppr;
613 int isr;
614
615 old_ppr = kvm_lapic_get_reg(apic, APIC_PROCPRI);
616 tpr = kvm_lapic_get_reg(apic, APIC_TASKPRI);
617 isr = apic_find_highest_isr(apic);
618 isrv = (isr != -1) ? isr : 0;
619
620 if ((tpr & 0xf0) >= (isrv & 0xf0))
621 ppr = tpr & 0xff;
622 else
623 ppr = isrv & 0xf0;
624
625 apic_debug("vlapic %p, ppr 0x%x, isr 0x%x, isrv 0x%x",
626 apic, ppr, isr, isrv);
627
628 *new_ppr = ppr;
629 if (old_ppr != ppr)
630 kvm_lapic_set_reg(apic, APIC_PROCPRI, ppr);
631
632 return ppr < old_ppr;
633}
634
635static void apic_update_ppr(struct kvm_lapic *apic)
636{
637 u32 ppr;
638
639 if (__apic_update_ppr(apic, &ppr) &&
640 apic_has_interrupt_for_ppr(apic, ppr) != -1)
641 kvm_make_request(KVM_REQ_EVENT, apic->vcpu);
642}
643
644void kvm_apic_update_ppr(struct kvm_vcpu *vcpu)
645{
646 apic_update_ppr(vcpu->arch.apic);
647}
648EXPORT_SYMBOL_GPL(kvm_apic_update_ppr);
649
650static void apic_set_tpr(struct kvm_lapic *apic, u32 tpr)
651{
652 kvm_lapic_set_reg(apic, APIC_TASKPRI, tpr);
653 apic_update_ppr(apic);
654}
655
656static bool kvm_apic_broadcast(struct kvm_lapic *apic, u32 mda)
657{
658 return mda == (apic_x2apic_mode(apic) ?
659 X2APIC_BROADCAST : APIC_BROADCAST);
660}
661
662static bool kvm_apic_match_physical_addr(struct kvm_lapic *apic, u32 mda)
663{
664 if (kvm_apic_broadcast(apic, mda))
665 return true;
666
667 if (apic_x2apic_mode(apic))
668 return mda == kvm_x2apic_id(apic);
669
670 /*
671 * Hotplug hack: Make LAPIC in xAPIC mode also accept interrupts as if
672 * it were in x2APIC mode. Hotplugged VCPUs start in xAPIC mode and
673 * this allows unique addressing of VCPUs with APIC ID over 0xff.
674 * The 0xff condition is needed because writeable xAPIC ID.
675 */
676 if (kvm_x2apic_id(apic) > 0xff && mda == kvm_x2apic_id(apic))
677 return true;
678
679 return mda == kvm_xapic_id(apic);
680}
681
682static bool kvm_apic_match_logical_addr(struct kvm_lapic *apic, u32 mda)
683{
684 u32 logical_id;
685
686 if (kvm_apic_broadcast(apic, mda))
687 return true;
688
689 logical_id = kvm_lapic_get_reg(apic, APIC_LDR);
690
691 if (apic_x2apic_mode(apic))
692 return ((logical_id >> 16) == (mda >> 16))
693 && (logical_id & mda & 0xffff) != 0;
694
695 logical_id = GET_APIC_LOGICAL_ID(logical_id);
696
697 switch (kvm_lapic_get_reg(apic, APIC_DFR)) {
698 case APIC_DFR_FLAT:
699 return (logical_id & mda) != 0;
700 case APIC_DFR_CLUSTER:
701 return ((logical_id >> 4) == (mda >> 4))
702 && (logical_id & mda & 0xf) != 0;
703 default:
704 apic_debug("Bad DFR vcpu %d: %08x\n",
705 apic->vcpu->vcpu_id, kvm_lapic_get_reg(apic, APIC_DFR));
706 return false;
707 }
708}
709
710/* The KVM local APIC implementation has two quirks:
711 *
712 * - Real hardware delivers interrupts destined to x2APIC ID > 0xff to LAPICs
713 * in xAPIC mode if the "destination & 0xff" matches its xAPIC ID.
714 * KVM doesn't do that aliasing.
715 *
716 * - in-kernel IOAPIC messages have to be delivered directly to
717 * x2APIC, because the kernel does not support interrupt remapping.
718 * In order to support broadcast without interrupt remapping, x2APIC
719 * rewrites the destination of non-IPI messages from APIC_BROADCAST
720 * to X2APIC_BROADCAST.
721 *
722 * The broadcast quirk can be disabled with KVM_CAP_X2APIC_API. This is
723 * important when userspace wants to use x2APIC-format MSIs, because
724 * APIC_BROADCAST (0xff) is a legal route for "cluster 0, CPUs 0-7".
725 */
726static u32 kvm_apic_mda(struct kvm_vcpu *vcpu, unsigned int dest_id,
727 struct kvm_lapic *source, struct kvm_lapic *target)
728{
729 bool ipi = source != NULL;
730
731 if (!vcpu->kvm->arch.x2apic_broadcast_quirk_disabled &&
732 !ipi && dest_id == APIC_BROADCAST && apic_x2apic_mode(target))
733 return X2APIC_BROADCAST;
734
735 return dest_id;
736}
737
738bool kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source,
739 int short_hand, unsigned int dest, int dest_mode)
740{
741 struct kvm_lapic *target = vcpu->arch.apic;
742 u32 mda = kvm_apic_mda(vcpu, dest, source, target);
743
744 apic_debug("target %p, source %p, dest 0x%x, "
745 "dest_mode 0x%x, short_hand 0x%x\n",
746 target, source, dest, dest_mode, short_hand);
747
748 ASSERT(target);
749 switch (short_hand) {
750 case APIC_DEST_NOSHORT:
751 if (dest_mode == APIC_DEST_PHYSICAL)
752 return kvm_apic_match_physical_addr(target, mda);
753 else
754 return kvm_apic_match_logical_addr(target, mda);
755 case APIC_DEST_SELF:
756 return target == source;
757 case APIC_DEST_ALLINC:
758 return true;
759 case APIC_DEST_ALLBUT:
760 return target != source;
761 default:
762 apic_debug("kvm: apic: Bad dest shorthand value %x\n",
763 short_hand);
764 return false;
765 }
766}
767EXPORT_SYMBOL_GPL(kvm_apic_match_dest);
768
769int kvm_vector_to_index(u32 vector, u32 dest_vcpus,
770 const unsigned long *bitmap, u32 bitmap_size)
771{
772 u32 mod;
773 int i, idx = -1;
774
775 mod = vector % dest_vcpus;
776
777 for (i = 0; i <= mod; i++) {
778 idx = find_next_bit(bitmap, bitmap_size, idx + 1);
779 BUG_ON(idx == bitmap_size);
780 }
781
782 return idx;
783}
784
785static void kvm_apic_disabled_lapic_found(struct kvm *kvm)
786{
787 if (!kvm->arch.disabled_lapic_found) {
788 kvm->arch.disabled_lapic_found = true;
789 printk(KERN_INFO
790 "Disabled LAPIC found during irq injection\n");
791 }
792}
793
794static bool kvm_apic_is_broadcast_dest(struct kvm *kvm, struct kvm_lapic **src,
795 struct kvm_lapic_irq *irq, struct kvm_apic_map *map)
796{
797 if (kvm->arch.x2apic_broadcast_quirk_disabled) {
798 if ((irq->dest_id == APIC_BROADCAST &&
799 map->mode != KVM_APIC_MODE_X2APIC))
800 return true;
801 if (irq->dest_id == X2APIC_BROADCAST)
802 return true;
803 } else {
804 bool x2apic_ipi = src && *src && apic_x2apic_mode(*src);
805 if (irq->dest_id == (x2apic_ipi ?
806 X2APIC_BROADCAST : APIC_BROADCAST))
807 return true;
808 }
809
810 return false;
811}
812
813/* Return true if the interrupt can be handled by using *bitmap as index mask
814 * for valid destinations in *dst array.
815 * Return false if kvm_apic_map_get_dest_lapic did nothing useful.
816 * Note: we may have zero kvm_lapic destinations when we return true, which
817 * means that the interrupt should be dropped. In this case, *bitmap would be
818 * zero and *dst undefined.
819 */
820static inline bool kvm_apic_map_get_dest_lapic(struct kvm *kvm,
821 struct kvm_lapic **src, struct kvm_lapic_irq *irq,
822 struct kvm_apic_map *map, struct kvm_lapic ***dst,
823 unsigned long *bitmap)
824{
825 int i, lowest;
826
827 if (irq->shorthand == APIC_DEST_SELF && src) {
828 *dst = src;
829 *bitmap = 1;
830 return true;
831 } else if (irq->shorthand)
832 return false;
833
834 if (!map || kvm_apic_is_broadcast_dest(kvm, src, irq, map))
835 return false;
836
837 if (irq->dest_mode == APIC_DEST_PHYSICAL) {
838 if (irq->dest_id > map->max_apic_id) {
839 *bitmap = 0;
840 } else {
841 *dst = &map->phys_map[irq->dest_id];
842 *bitmap = 1;
843 }
844 return true;
845 }
846
847 *bitmap = 0;
848 if (!kvm_apic_map_get_logical_dest(map, irq->dest_id, dst,
849 (u16 *)bitmap))
850 return false;
851
852 if (!kvm_lowest_prio_delivery(irq))
853 return true;
854
855 if (!kvm_vector_hashing_enabled()) {
856 lowest = -1;
857 for_each_set_bit(i, bitmap, 16) {
858 if (!(*dst)[i])
859 continue;
860 if (lowest < 0)
861 lowest = i;
862 else if (kvm_apic_compare_prio((*dst)[i]->vcpu,
863 (*dst)[lowest]->vcpu) < 0)
864 lowest = i;
865 }
866 } else {
867 if (!*bitmap)
868 return true;
869
870 lowest = kvm_vector_to_index(irq->vector, hweight16(*bitmap),
871 bitmap, 16);
872
873 if (!(*dst)[lowest]) {
874 kvm_apic_disabled_lapic_found(kvm);
875 *bitmap = 0;
876 return true;
877 }
878 }
879
880 *bitmap = (lowest >= 0) ? 1 << lowest : 0;
881
882 return true;
883}
884
885bool kvm_irq_delivery_to_apic_fast(struct kvm *kvm, struct kvm_lapic *src,
886 struct kvm_lapic_irq *irq, int *r, struct dest_map *dest_map)
887{
888 struct kvm_apic_map *map;
889 unsigned long bitmap;
890 struct kvm_lapic **dst = NULL;
891 int i;
892 bool ret;
893
894 *r = -1;
895
896 if (irq->shorthand == APIC_DEST_SELF) {
897 *r = kvm_apic_set_irq(src->vcpu, irq, dest_map);
898 return true;
899 }
900
901 rcu_read_lock();
902 map = rcu_dereference(kvm->arch.apic_map);
903
904 ret = kvm_apic_map_get_dest_lapic(kvm, &src, irq, map, &dst, &bitmap);
905 if (ret)
906 for_each_set_bit(i, &bitmap, 16) {
907 if (!dst[i])
908 continue;
909 if (*r < 0)
910 *r = 0;
911 *r += kvm_apic_set_irq(dst[i]->vcpu, irq, dest_map);
912 }
913
914 rcu_read_unlock();
915 return ret;
916}
917
918/*
919 * This routine tries to handler interrupts in posted mode, here is how
920 * it deals with different cases:
921 * - For single-destination interrupts, handle it in posted mode
922 * - Else if vector hashing is enabled and it is a lowest-priority
923 * interrupt, handle it in posted mode and use the following mechanism
924 * to find the destinaiton vCPU.
925 * 1. For lowest-priority interrupts, store all the possible
926 * destination vCPUs in an array.
927 * 2. Use "guest vector % max number of destination vCPUs" to find
928 * the right destination vCPU in the array for the lowest-priority
929 * interrupt.
930 * - Otherwise, use remapped mode to inject the interrupt.
931 */
932bool kvm_intr_is_single_vcpu_fast(struct kvm *kvm, struct kvm_lapic_irq *irq,
933 struct kvm_vcpu **dest_vcpu)
934{
935 struct kvm_apic_map *map;
936 unsigned long bitmap;
937 struct kvm_lapic **dst = NULL;
938 bool ret = false;
939
940 if (irq->shorthand)
941 return false;
942
943 rcu_read_lock();
944 map = rcu_dereference(kvm->arch.apic_map);
945
946 if (kvm_apic_map_get_dest_lapic(kvm, NULL, irq, map, &dst, &bitmap) &&
947 hweight16(bitmap) == 1) {
948 unsigned long i = find_first_bit(&bitmap, 16);
949
950 if (dst[i]) {
951 *dest_vcpu = dst[i]->vcpu;
952 ret = true;
953 }
954 }
955
956 rcu_read_unlock();
957 return ret;
958}
959
960/*
961 * Add a pending IRQ into lapic.
962 * Return 1 if successfully added and 0 if discarded.
963 */
964static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
965 int vector, int level, int trig_mode,
966 struct dest_map *dest_map)
967{
968 int result = 0;
969 struct kvm_vcpu *vcpu = apic->vcpu;
970
971 trace_kvm_apic_accept_irq(vcpu->vcpu_id, delivery_mode,
972 trig_mode, vector);
973 switch (delivery_mode) {
974 case APIC_DM_LOWEST:
975 vcpu->arch.apic_arb_prio++;
976 case APIC_DM_FIXED:
977 if (unlikely(trig_mode && !level))
978 break;
979
980 /* FIXME add logic for vcpu on reset */
981 if (unlikely(!apic_enabled(apic)))
982 break;
983
984 result = 1;
985
986 if (dest_map) {
987 __set_bit(vcpu->vcpu_id, dest_map->map);
988 dest_map->vectors[vcpu->vcpu_id] = vector;
989 }
990
991 if (apic_test_vector(vector, apic->regs + APIC_TMR) != !!trig_mode) {
992 if (trig_mode)
993 kvm_lapic_set_vector(vector, apic->regs + APIC_TMR);
994 else
995 apic_clear_vector(vector, apic->regs + APIC_TMR);
996 }
997
998 if (vcpu->arch.apicv_active)
999 kvm_x86_ops->deliver_posted_interrupt(vcpu, vector);
1000 else {
1001 kvm_lapic_set_irr(vector, apic);
1002
1003 kvm_make_request(KVM_REQ_EVENT, vcpu);
1004 kvm_vcpu_kick(vcpu);
1005 }
1006 break;
1007
1008 case APIC_DM_REMRD:
1009 result = 1;
1010 vcpu->arch.pv.pv_unhalted = 1;
1011 kvm_make_request(KVM_REQ_EVENT, vcpu);
1012 kvm_vcpu_kick(vcpu);
1013 break;
1014
1015 case APIC_DM_SMI:
1016 result = 1;
1017 kvm_make_request(KVM_REQ_SMI, vcpu);
1018 kvm_vcpu_kick(vcpu);
1019 break;
1020
1021 case APIC_DM_NMI:
1022 result = 1;
1023 kvm_inject_nmi(vcpu);
1024 kvm_vcpu_kick(vcpu);
1025 break;
1026
1027 case APIC_DM_INIT:
1028 if (!trig_mode || level) {
1029 result = 1;
1030 /* assumes that there are only KVM_APIC_INIT/SIPI */
1031 apic->pending_events = (1UL << KVM_APIC_INIT);
1032 /* make sure pending_events is visible before sending
1033 * the request */
1034 smp_wmb();
1035 kvm_make_request(KVM_REQ_EVENT, vcpu);
1036 kvm_vcpu_kick(vcpu);
1037 } else {
1038 apic_debug("Ignoring de-assert INIT to vcpu %d\n",
1039 vcpu->vcpu_id);
1040 }
1041 break;
1042
1043 case APIC_DM_STARTUP:
1044 apic_debug("SIPI to vcpu %d vector 0x%02x\n",
1045 vcpu->vcpu_id, vector);
1046 result = 1;
1047 apic->sipi_vector = vector;
1048 /* make sure sipi_vector is visible for the receiver */
1049 smp_wmb();
1050 set_bit(KVM_APIC_SIPI, &apic->pending_events);
1051 kvm_make_request(KVM_REQ_EVENT, vcpu);
1052 kvm_vcpu_kick(vcpu);
1053 break;
1054
1055 case APIC_DM_EXTINT:
1056 /*
1057 * Should only be called by kvm_apic_local_deliver() with LVT0,
1058 * before NMI watchdog was enabled. Already handled by
1059 * kvm_apic_accept_pic_intr().
1060 */
1061 break;
1062
1063 default:
1064 printk(KERN_ERR "TODO: unsupported delivery mode %x\n",
1065 delivery_mode);
1066 break;
1067 }
1068 return result;
1069}
1070
1071int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2)
1072{
1073 return vcpu1->arch.apic_arb_prio - vcpu2->arch.apic_arb_prio;
1074}
1075
1076static bool kvm_ioapic_handles_vector(struct kvm_lapic *apic, int vector)
1077{
1078 return test_bit(vector, apic->vcpu->arch.ioapic_handled_vectors);
1079}
1080
1081static void kvm_ioapic_send_eoi(struct kvm_lapic *apic, int vector)
1082{
1083 int trigger_mode;
1084
1085 /* Eoi the ioapic only if the ioapic doesn't own the vector. */
1086 if (!kvm_ioapic_handles_vector(apic, vector))
1087 return;
1088
1089 /* Request a KVM exit to inform the userspace IOAPIC. */
1090 if (irqchip_split(apic->vcpu->kvm)) {
1091 apic->vcpu->arch.pending_ioapic_eoi = vector;
1092 kvm_make_request(KVM_REQ_IOAPIC_EOI_EXIT, apic->vcpu);
1093 return;
1094 }
1095
1096 if (apic_test_vector(vector, apic->regs + APIC_TMR))
1097 trigger_mode = IOAPIC_LEVEL_TRIG;
1098 else
1099 trigger_mode = IOAPIC_EDGE_TRIG;
1100
1101 kvm_ioapic_update_eoi(apic->vcpu, vector, trigger_mode);
1102}
1103
1104static int apic_set_eoi(struct kvm_lapic *apic)
1105{
1106 int vector = apic_find_highest_isr(apic);
1107
1108 trace_kvm_eoi(apic, vector);
1109
1110 /*
1111 * Not every write EOI will has corresponding ISR,
1112 * one example is when Kernel check timer on setup_IO_APIC
1113 */
1114 if (vector == -1)
1115 return vector;
1116
1117 apic_clear_isr(vector, apic);
1118 apic_update_ppr(apic);
1119
1120 if (test_bit(vector, vcpu_to_synic(apic->vcpu)->vec_bitmap))
1121 kvm_hv_synic_send_eoi(apic->vcpu, vector);
1122
1123 kvm_ioapic_send_eoi(apic, vector);
1124 kvm_make_request(KVM_REQ_EVENT, apic->vcpu);
1125 return vector;
1126}
1127
1128/*
1129 * this interface assumes a trap-like exit, which has already finished
1130 * desired side effect including vISR and vPPR update.
1131 */
1132void kvm_apic_set_eoi_accelerated(struct kvm_vcpu *vcpu, int vector)
1133{
1134 struct kvm_lapic *apic = vcpu->arch.apic;
1135
1136 trace_kvm_eoi(apic, vector);
1137
1138 kvm_ioapic_send_eoi(apic, vector);
1139 kvm_make_request(KVM_REQ_EVENT, apic->vcpu);
1140}
1141EXPORT_SYMBOL_GPL(kvm_apic_set_eoi_accelerated);
1142
1143static void apic_send_ipi(struct kvm_lapic *apic)
1144{
1145 u32 icr_low = kvm_lapic_get_reg(apic, APIC_ICR);
1146 u32 icr_high = kvm_lapic_get_reg(apic, APIC_ICR2);
1147 struct kvm_lapic_irq irq;
1148
1149 irq.vector = icr_low & APIC_VECTOR_MASK;
1150 irq.delivery_mode = icr_low & APIC_MODE_MASK;
1151 irq.dest_mode = icr_low & APIC_DEST_MASK;
1152 irq.level = (icr_low & APIC_INT_ASSERT) != 0;
1153 irq.trig_mode = icr_low & APIC_INT_LEVELTRIG;
1154 irq.shorthand = icr_low & APIC_SHORT_MASK;
1155 irq.msi_redir_hint = false;
1156 if (apic_x2apic_mode(apic))
1157 irq.dest_id = icr_high;
1158 else
1159 irq.dest_id = GET_APIC_DEST_FIELD(icr_high);
1160
1161 trace_kvm_apic_ipi(icr_low, irq.dest_id);
1162
1163 apic_debug("icr_high 0x%x, icr_low 0x%x, "
1164 "short_hand 0x%x, dest 0x%x, trig_mode 0x%x, level 0x%x, "
1165 "dest_mode 0x%x, delivery_mode 0x%x, vector 0x%x, "
1166 "msi_redir_hint 0x%x\n",
1167 icr_high, icr_low, irq.shorthand, irq.dest_id,
1168 irq.trig_mode, irq.level, irq.dest_mode, irq.delivery_mode,
1169 irq.vector, irq.msi_redir_hint);
1170
1171 kvm_irq_delivery_to_apic(apic->vcpu->kvm, apic, &irq, NULL);
1172}
1173
1174static u32 apic_get_tmcct(struct kvm_lapic *apic)
1175{
1176 ktime_t remaining, now;
1177 s64 ns;
1178 u32 tmcct;
1179
1180 ASSERT(apic != NULL);
1181
1182 /* if initial count is 0, current count should also be 0 */
1183 if (kvm_lapic_get_reg(apic, APIC_TMICT) == 0 ||
1184 apic->lapic_timer.period == 0)
1185 return 0;
1186
1187 now = ktime_get();
1188 remaining = ktime_sub(apic->lapic_timer.target_expiration, now);
1189 if (ktime_to_ns(remaining) < 0)
1190 remaining = 0;
1191
1192 ns = mod_64(ktime_to_ns(remaining), apic->lapic_timer.period);
1193 tmcct = div64_u64(ns,
1194 (APIC_BUS_CYCLE_NS * apic->divide_count));
1195
1196 return tmcct;
1197}
1198
1199static void __report_tpr_access(struct kvm_lapic *apic, bool write)
1200{
1201 struct kvm_vcpu *vcpu = apic->vcpu;
1202 struct kvm_run *run = vcpu->run;
1203
1204 kvm_make_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu);
1205 run->tpr_access.rip = kvm_rip_read(vcpu);
1206 run->tpr_access.is_write = write;
1207}
1208
1209static inline void report_tpr_access(struct kvm_lapic *apic, bool write)
1210{
1211 if (apic->vcpu->arch.tpr_access_reporting)
1212 __report_tpr_access(apic, write);
1213}
1214
1215static u32 __apic_read(struct kvm_lapic *apic, unsigned int offset)
1216{
1217 u32 val = 0;
1218
1219 if (offset >= LAPIC_MMIO_LENGTH)
1220 return 0;
1221
1222 switch (offset) {
1223 case APIC_ARBPRI:
1224 apic_debug("Access APIC ARBPRI register which is for P6\n");
1225 break;
1226
1227 case APIC_TMCCT: /* Timer CCR */
1228 if (apic_lvtt_tscdeadline(apic))
1229 return 0;
1230
1231 val = apic_get_tmcct(apic);
1232 break;
1233 case APIC_PROCPRI:
1234 apic_update_ppr(apic);
1235 val = kvm_lapic_get_reg(apic, offset);
1236 break;
1237 case APIC_TASKPRI:
1238 report_tpr_access(apic, false);
1239 /* fall thru */
1240 default:
1241 val = kvm_lapic_get_reg(apic, offset);
1242 break;
1243 }
1244
1245 return val;
1246}
1247
1248static inline struct kvm_lapic *to_lapic(struct kvm_io_device *dev)
1249{
1250 return container_of(dev, struct kvm_lapic, dev);
1251}
1252
1253int kvm_lapic_reg_read(struct kvm_lapic *apic, u32 offset, int len,
1254 void *data)
1255{
1256 unsigned char alignment = offset & 0xf;
1257 u32 result;
1258 /* this bitmask has a bit cleared for each reserved register */
1259 static const u64 rmask = 0x43ff01ffffffe70cULL;
1260
1261 if ((alignment + len) > 4) {
1262 apic_debug("KVM_APIC_READ: alignment error %x %d\n",
1263 offset, len);
1264 return 1;
1265 }
1266
1267 if (offset > 0x3f0 || !(rmask & (1ULL << (offset >> 4)))) {
1268 apic_debug("KVM_APIC_READ: read reserved register %x\n",
1269 offset);
1270 return 1;
1271 }
1272
1273 result = __apic_read(apic, offset & ~0xf);
1274
1275 trace_kvm_apic_read(offset, result);
1276
1277 switch (len) {
1278 case 1:
1279 case 2:
1280 case 4:
1281 memcpy(data, (char *)&result + alignment, len);
1282 break;
1283 default:
1284 printk(KERN_ERR "Local APIC read with len = %x, "
1285 "should be 1,2, or 4 instead\n", len);
1286 break;
1287 }
1288 return 0;
1289}
1290EXPORT_SYMBOL_GPL(kvm_lapic_reg_read);
1291
1292static int apic_mmio_in_range(struct kvm_lapic *apic, gpa_t addr)
1293{
1294 return kvm_apic_hw_enabled(apic) &&
1295 addr >= apic->base_address &&
1296 addr < apic->base_address + LAPIC_MMIO_LENGTH;
1297}
1298
1299static int apic_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *this,
1300 gpa_t address, int len, void *data)
1301{
1302 struct kvm_lapic *apic = to_lapic(this);
1303 u32 offset = address - apic->base_address;
1304
1305 if (!apic_mmio_in_range(apic, address))
1306 return -EOPNOTSUPP;
1307
1308 kvm_lapic_reg_read(apic, offset, len, data);
1309
1310 return 0;
1311}
1312
1313static void update_divide_count(struct kvm_lapic *apic)
1314{
1315 u32 tmp1, tmp2, tdcr;
1316
1317 tdcr = kvm_lapic_get_reg(apic, APIC_TDCR);
1318 tmp1 = tdcr & 0xf;
1319 tmp2 = ((tmp1 & 0x3) | ((tmp1 & 0x8) >> 1)) + 1;
1320 apic->divide_count = 0x1 << (tmp2 & 0x7);
1321
1322 apic_debug("timer divide count is 0x%x\n",
1323 apic->divide_count);
1324}
1325
1326static void limit_periodic_timer_frequency(struct kvm_lapic *apic)
1327{
1328 /*
1329 * Do not allow the guest to program periodic timers with small
1330 * interval, since the hrtimers are not throttled by the host
1331 * scheduler.
1332 */
1333 if (apic_lvtt_period(apic) && apic->lapic_timer.period) {
1334 s64 min_period = min_timer_period_us * 1000LL;
1335
1336 if (apic->lapic_timer.period < min_period) {
1337 pr_info_ratelimited(
1338 "kvm: vcpu %i: requested %lld ns "
1339 "lapic timer period limited to %lld ns\n",
1340 apic->vcpu->vcpu_id,
1341 apic->lapic_timer.period, min_period);
1342 apic->lapic_timer.period = min_period;
1343 }
1344 }
1345}
1346
1347static void apic_update_lvtt(struct kvm_lapic *apic)
1348{
1349 u32 timer_mode = kvm_lapic_get_reg(apic, APIC_LVTT) &
1350 apic->lapic_timer.timer_mode_mask;
1351
1352 if (apic->lapic_timer.timer_mode != timer_mode) {
1353 if (apic_lvtt_tscdeadline(apic) != (timer_mode ==
1354 APIC_LVT_TIMER_TSCDEADLINE)) {
1355 hrtimer_cancel(&apic->lapic_timer.timer);
1356 kvm_lapic_set_reg(apic, APIC_TMICT, 0);
1357 apic->lapic_timer.period = 0;
1358 apic->lapic_timer.tscdeadline = 0;
1359 }
1360 apic->lapic_timer.timer_mode = timer_mode;
1361 limit_periodic_timer_frequency(apic);
1362 }
1363}
1364
1365static void apic_timer_expired(struct kvm_lapic *apic)
1366{
1367 struct kvm_vcpu *vcpu = apic->vcpu;
1368 struct swait_queue_head *q = &vcpu->wq;
1369 struct kvm_timer *ktimer = &apic->lapic_timer;
1370
1371 if (atomic_read(&apic->lapic_timer.pending))
1372 return;
1373
1374 atomic_inc(&apic->lapic_timer.pending);
1375 kvm_set_pending_timer(vcpu);
1376
1377 /*
1378 * For x86, the atomic_inc() is serialized, thus
1379 * using swait_active() is safe.
1380 */
1381 if (swait_active(q))
1382 swake_up(q);
1383
1384 if (apic_lvtt_tscdeadline(apic))
1385 ktimer->expired_tscdeadline = ktimer->tscdeadline;
1386}
1387
1388/*
1389 * On APICv, this test will cause a busy wait
1390 * during a higher-priority task.
1391 */
1392
1393static bool lapic_timer_int_injected(struct kvm_vcpu *vcpu)
1394{
1395 struct kvm_lapic *apic = vcpu->arch.apic;
1396 u32 reg = kvm_lapic_get_reg(apic, APIC_LVTT);
1397
1398 if (kvm_apic_hw_enabled(apic)) {
1399 int vec = reg & APIC_VECTOR_MASK;
1400 void *bitmap = apic->regs + APIC_ISR;
1401
1402 if (vcpu->arch.apicv_active)
1403 bitmap = apic->regs + APIC_IRR;
1404
1405 if (apic_test_vector(vec, bitmap))
1406 return true;
1407 }
1408 return false;
1409}
1410
1411void wait_lapic_expire(struct kvm_vcpu *vcpu)
1412{
1413 struct kvm_lapic *apic = vcpu->arch.apic;
1414 u64 guest_tsc, tsc_deadline;
1415
1416 if (!lapic_in_kernel(vcpu))
1417 return;
1418
1419 if (apic->lapic_timer.expired_tscdeadline == 0)
1420 return;
1421
1422 if (!lapic_timer_int_injected(vcpu))
1423 return;
1424
1425 tsc_deadline = apic->lapic_timer.expired_tscdeadline;
1426 apic->lapic_timer.expired_tscdeadline = 0;
1427 guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc());
1428 trace_kvm_wait_lapic_expire(vcpu->vcpu_id, guest_tsc - tsc_deadline);
1429
1430 /* __delay is delay_tsc whenever the hardware has TSC, thus always. */
1431 if (guest_tsc < tsc_deadline)
1432 __delay(min(tsc_deadline - guest_tsc,
1433 nsec_to_cycles(vcpu, lapic_timer_advance_ns)));
1434}
1435
1436static void start_sw_tscdeadline(struct kvm_lapic *apic)
1437{
1438 u64 guest_tsc, tscdeadline = apic->lapic_timer.tscdeadline;
1439 u64 ns = 0;
1440 ktime_t expire;
1441 struct kvm_vcpu *vcpu = apic->vcpu;
1442 unsigned long this_tsc_khz = vcpu->arch.virtual_tsc_khz;
1443 unsigned long flags;
1444 ktime_t now;
1445
1446 if (unlikely(!tscdeadline || !this_tsc_khz))
1447 return;
1448
1449 local_irq_save(flags);
1450
1451 now = ktime_get();
1452 guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc());
1453 if (likely(tscdeadline > guest_tsc)) {
1454 ns = (tscdeadline - guest_tsc) * 1000000ULL;
1455 do_div(ns, this_tsc_khz);
1456 expire = ktime_add_ns(now, ns);
1457 expire = ktime_sub_ns(expire, lapic_timer_advance_ns);
1458 hrtimer_start(&apic->lapic_timer.timer,
1459 expire, HRTIMER_MODE_ABS_PINNED);
1460 } else
1461 apic_timer_expired(apic);
1462
1463 local_irq_restore(flags);
1464}
1465
1466static void update_target_expiration(struct kvm_lapic *apic, uint32_t old_divisor)
1467{
1468 ktime_t now, remaining;
1469 u64 ns_remaining_old, ns_remaining_new;
1470
1471 apic->lapic_timer.period = (u64)kvm_lapic_get_reg(apic, APIC_TMICT)
1472 * APIC_BUS_CYCLE_NS * apic->divide_count;
1473 limit_periodic_timer_frequency(apic);
1474
1475 now = ktime_get();
1476 remaining = ktime_sub(apic->lapic_timer.target_expiration, now);
1477 if (ktime_to_ns(remaining) < 0)
1478 remaining = 0;
1479
1480 ns_remaining_old = ktime_to_ns(remaining);
1481 ns_remaining_new = mul_u64_u32_div(ns_remaining_old,
1482 apic->divide_count, old_divisor);
1483
1484 apic->lapic_timer.tscdeadline +=
1485 nsec_to_cycles(apic->vcpu, ns_remaining_new) -
1486 nsec_to_cycles(apic->vcpu, ns_remaining_old);
1487 apic->lapic_timer.target_expiration = ktime_add_ns(now, ns_remaining_new);
1488}
1489
1490static bool set_target_expiration(struct kvm_lapic *apic)
1491{
1492 ktime_t now;
1493 u64 tscl = rdtsc();
1494
1495 now = ktime_get();
1496 apic->lapic_timer.period = (u64)kvm_lapic_get_reg(apic, APIC_TMICT)
1497 * APIC_BUS_CYCLE_NS * apic->divide_count;
1498
1499 if (!apic->lapic_timer.period) {
1500 apic->lapic_timer.tscdeadline = 0;
1501 return false;
1502 }
1503
1504 limit_periodic_timer_frequency(apic);
1505
1506 apic_debug("%s: bus cycle is %" PRId64 "ns, now 0x%016"
1507 PRIx64 ", "
1508 "timer initial count 0x%x, period %lldns, "
1509 "expire @ 0x%016" PRIx64 ".\n", __func__,
1510 APIC_BUS_CYCLE_NS, ktime_to_ns(now),
1511 kvm_lapic_get_reg(apic, APIC_TMICT),
1512 apic->lapic_timer.period,
1513 ktime_to_ns(ktime_add_ns(now,
1514 apic->lapic_timer.period)));
1515
1516 apic->lapic_timer.tscdeadline = kvm_read_l1_tsc(apic->vcpu, tscl) +
1517 nsec_to_cycles(apic->vcpu, apic->lapic_timer.period);
1518 apic->lapic_timer.target_expiration = ktime_add_ns(now, apic->lapic_timer.period);
1519
1520 return true;
1521}
1522
1523static void advance_periodic_target_expiration(struct kvm_lapic *apic)
1524{
1525 ktime_t now = ktime_get();
1526 u64 tscl = rdtsc();
1527 ktime_t delta;
1528
1529 /*
1530 * Synchronize both deadlines to the same time source or
1531 * differences in the periods (caused by differences in the
1532 * underlying clocks or numerical approximation errors) will
1533 * cause the two to drift apart over time as the errors
1534 * accumulate.
1535 */
1536 apic->lapic_timer.target_expiration =
1537 ktime_add_ns(apic->lapic_timer.target_expiration,
1538 apic->lapic_timer.period);
1539 delta = ktime_sub(apic->lapic_timer.target_expiration, now);
1540 apic->lapic_timer.tscdeadline = kvm_read_l1_tsc(apic->vcpu, tscl) +
1541 nsec_to_cycles(apic->vcpu, delta);
1542}
1543
1544static void start_sw_period(struct kvm_lapic *apic)
1545{
1546 if (!apic->lapic_timer.period)
1547 return;
1548
1549 if (ktime_after(ktime_get(),
1550 apic->lapic_timer.target_expiration)) {
1551 apic_timer_expired(apic);
1552
1553 if (apic_lvtt_oneshot(apic))
1554 return;
1555
1556 advance_periodic_target_expiration(apic);
1557 }
1558
1559 hrtimer_start(&apic->lapic_timer.timer,
1560 apic->lapic_timer.target_expiration,
1561 HRTIMER_MODE_ABS_PINNED);
1562}
1563
1564bool kvm_lapic_hv_timer_in_use(struct kvm_vcpu *vcpu)
1565{
1566 if (!lapic_in_kernel(vcpu))
1567 return false;
1568
1569 return vcpu->arch.apic->lapic_timer.hv_timer_in_use;
1570}
1571EXPORT_SYMBOL_GPL(kvm_lapic_hv_timer_in_use);
1572
1573static void cancel_hv_timer(struct kvm_lapic *apic)
1574{
1575 WARN_ON(preemptible());
1576 WARN_ON(!apic->lapic_timer.hv_timer_in_use);
1577 kvm_x86_ops->cancel_hv_timer(apic->vcpu);
1578 apic->lapic_timer.hv_timer_in_use = false;
1579}
1580
1581static bool start_hv_timer(struct kvm_lapic *apic)
1582{
1583 struct kvm_timer *ktimer = &apic->lapic_timer;
1584 int r;
1585
1586 WARN_ON(preemptible());
1587 if (!kvm_x86_ops->set_hv_timer)
1588 return false;
1589
1590 if (!apic_lvtt_period(apic) && atomic_read(&ktimer->pending))
1591 return false;
1592
1593 if (!ktimer->tscdeadline)
1594 return false;
1595
1596 r = kvm_x86_ops->set_hv_timer(apic->vcpu, ktimer->tscdeadline);
1597 if (r < 0)
1598 return false;
1599
1600 ktimer->hv_timer_in_use = true;
1601 hrtimer_cancel(&ktimer->timer);
1602
1603 /*
1604 * Also recheck ktimer->pending, in case the sw timer triggered in
1605 * the window. For periodic timer, leave the hv timer running for
1606 * simplicity, and the deadline will be recomputed on the next vmexit.
1607 */
1608 if (!apic_lvtt_period(apic) && (r || atomic_read(&ktimer->pending))) {
1609 if (r)
1610 apic_timer_expired(apic);
1611 return false;
1612 }
1613
1614 trace_kvm_hv_timer_state(apic->vcpu->vcpu_id, true);
1615 return true;
1616}
1617
1618static void start_sw_timer(struct kvm_lapic *apic)
1619{
1620 struct kvm_timer *ktimer = &apic->lapic_timer;
1621
1622 WARN_ON(preemptible());
1623 if (apic->lapic_timer.hv_timer_in_use)
1624 cancel_hv_timer(apic);
1625 if (!apic_lvtt_period(apic) && atomic_read(&ktimer->pending))
1626 return;
1627
1628 if (apic_lvtt_period(apic) || apic_lvtt_oneshot(apic))
1629 start_sw_period(apic);
1630 else if (apic_lvtt_tscdeadline(apic))
1631 start_sw_tscdeadline(apic);
1632 trace_kvm_hv_timer_state(apic->vcpu->vcpu_id, false);
1633}
1634
1635static void restart_apic_timer(struct kvm_lapic *apic)
1636{
1637 preempt_disable();
1638 if (!start_hv_timer(apic))
1639 start_sw_timer(apic);
1640 preempt_enable();
1641}
1642
1643void kvm_lapic_expired_hv_timer(struct kvm_vcpu *vcpu)
1644{
1645 struct kvm_lapic *apic = vcpu->arch.apic;
1646
1647 preempt_disable();
1648 /* If the preempt notifier has already run, it also called apic_timer_expired */
1649 if (!apic->lapic_timer.hv_timer_in_use)
1650 goto out;
1651 WARN_ON(swait_active(&vcpu->wq));
1652 cancel_hv_timer(apic);
1653 apic_timer_expired(apic);
1654
1655 if (apic_lvtt_period(apic) && apic->lapic_timer.period) {
1656 advance_periodic_target_expiration(apic);
1657 restart_apic_timer(apic);
1658 }
1659out:
1660 preempt_enable();
1661}
1662EXPORT_SYMBOL_GPL(kvm_lapic_expired_hv_timer);
1663
1664void kvm_lapic_switch_to_hv_timer(struct kvm_vcpu *vcpu)
1665{
1666 restart_apic_timer(vcpu->arch.apic);
1667}
1668EXPORT_SYMBOL_GPL(kvm_lapic_switch_to_hv_timer);
1669
1670void kvm_lapic_switch_to_sw_timer(struct kvm_vcpu *vcpu)
1671{
1672 struct kvm_lapic *apic = vcpu->arch.apic;
1673
1674 preempt_disable();
1675 /* Possibly the TSC deadline timer is not enabled yet */
1676 if (apic->lapic_timer.hv_timer_in_use)
1677 start_sw_timer(apic);
1678 preempt_enable();
1679}
1680EXPORT_SYMBOL_GPL(kvm_lapic_switch_to_sw_timer);
1681
1682void kvm_lapic_restart_hv_timer(struct kvm_vcpu *vcpu)
1683{
1684 struct kvm_lapic *apic = vcpu->arch.apic;
1685
1686 WARN_ON(!apic->lapic_timer.hv_timer_in_use);
1687 restart_apic_timer(apic);
1688}
1689
1690static void start_apic_timer(struct kvm_lapic *apic)
1691{
1692 atomic_set(&apic->lapic_timer.pending, 0);
1693
1694 if ((apic_lvtt_period(apic) || apic_lvtt_oneshot(apic))
1695 && !set_target_expiration(apic))
1696 return;
1697
1698 restart_apic_timer(apic);
1699}
1700
1701static void apic_manage_nmi_watchdog(struct kvm_lapic *apic, u32 lvt0_val)
1702{
1703 bool lvt0_in_nmi_mode = apic_lvt_nmi_mode(lvt0_val);
1704
1705 if (apic->lvt0_in_nmi_mode != lvt0_in_nmi_mode) {
1706 apic->lvt0_in_nmi_mode = lvt0_in_nmi_mode;
1707 if (lvt0_in_nmi_mode) {
1708 apic_debug("Receive NMI setting on APIC_LVT0 "
1709 "for cpu %d\n", apic->vcpu->vcpu_id);
1710 atomic_inc(&apic->vcpu->kvm->arch.vapics_in_nmi_mode);
1711 } else
1712 atomic_dec(&apic->vcpu->kvm->arch.vapics_in_nmi_mode);
1713 }
1714}
1715
1716int kvm_lapic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val)
1717{
1718 int ret = 0;
1719
1720 trace_kvm_apic_write(reg, val);
1721
1722 switch (reg) {
1723 case APIC_ID: /* Local APIC ID */
1724 if (!apic_x2apic_mode(apic))
1725 kvm_apic_set_xapic_id(apic, val >> 24);
1726 else
1727 ret = 1;
1728 break;
1729
1730 case APIC_TASKPRI:
1731 report_tpr_access(apic, true);
1732 apic_set_tpr(apic, val & 0xff);
1733 break;
1734
1735 case APIC_EOI:
1736 apic_set_eoi(apic);
1737 break;
1738
1739 case APIC_LDR:
1740 if (!apic_x2apic_mode(apic))
1741 kvm_apic_set_ldr(apic, val & APIC_LDR_MASK);
1742 else
1743 ret = 1;
1744 break;
1745
1746 case APIC_DFR:
1747 if (!apic_x2apic_mode(apic)) {
1748 kvm_lapic_set_reg(apic, APIC_DFR, val | 0x0FFFFFFF);
1749 recalculate_apic_map(apic->vcpu->kvm);
1750 } else
1751 ret = 1;
1752 break;
1753
1754 case APIC_SPIV: {
1755 u32 mask = 0x3ff;
1756 if (kvm_lapic_get_reg(apic, APIC_LVR) & APIC_LVR_DIRECTED_EOI)
1757 mask |= APIC_SPIV_DIRECTED_EOI;
1758 apic_set_spiv(apic, val & mask);
1759 if (!(val & APIC_SPIV_APIC_ENABLED)) {
1760 int i;
1761 u32 lvt_val;
1762
1763 for (i = 0; i < KVM_APIC_LVT_NUM; i++) {
1764 lvt_val = kvm_lapic_get_reg(apic,
1765 APIC_LVTT + 0x10 * i);
1766 kvm_lapic_set_reg(apic, APIC_LVTT + 0x10 * i,
1767 lvt_val | APIC_LVT_MASKED);
1768 }
1769 apic_update_lvtt(apic);
1770 atomic_set(&apic->lapic_timer.pending, 0);
1771
1772 }
1773 break;
1774 }
1775 case APIC_ICR:
1776 /* No delay here, so we always clear the pending bit */
1777 kvm_lapic_set_reg(apic, APIC_ICR, val & ~(1 << 12));
1778 apic_send_ipi(apic);
1779 break;
1780
1781 case APIC_ICR2:
1782 if (!apic_x2apic_mode(apic))
1783 val &= 0xff000000;
1784 kvm_lapic_set_reg(apic, APIC_ICR2, val);
1785 break;
1786
1787 case APIC_LVT0:
1788 apic_manage_nmi_watchdog(apic, val);
1789 case APIC_LVTTHMR:
1790 case APIC_LVTPC:
1791 case APIC_LVT1:
1792 case APIC_LVTERR:
1793 /* TODO: Check vector */
1794 if (!kvm_apic_sw_enabled(apic))
1795 val |= APIC_LVT_MASKED;
1796
1797 val &= apic_lvt_mask[(reg - APIC_LVTT) >> 4];
1798 kvm_lapic_set_reg(apic, reg, val);
1799
1800 break;
1801
1802 case APIC_LVTT:
1803 if (!kvm_apic_sw_enabled(apic))
1804 val |= APIC_LVT_MASKED;
1805 val &= (apic_lvt_mask[0] | apic->lapic_timer.timer_mode_mask);
1806 kvm_lapic_set_reg(apic, APIC_LVTT, val);
1807 apic_update_lvtt(apic);
1808 break;
1809
1810 case APIC_TMICT:
1811 if (apic_lvtt_tscdeadline(apic))
1812 break;
1813
1814 hrtimer_cancel(&apic->lapic_timer.timer);
1815 kvm_lapic_set_reg(apic, APIC_TMICT, val);
1816 start_apic_timer(apic);
1817 break;
1818
1819 case APIC_TDCR: {
1820 uint32_t old_divisor = apic->divide_count;
1821
1822 if (val & 4)
1823 apic_debug("KVM_WRITE:TDCR %x\n", val);
1824 kvm_lapic_set_reg(apic, APIC_TDCR, val);
1825 update_divide_count(apic);
1826 if (apic->divide_count != old_divisor &&
1827 apic->lapic_timer.period) {
1828 hrtimer_cancel(&apic->lapic_timer.timer);
1829 update_target_expiration(apic, old_divisor);
1830 restart_apic_timer(apic);
1831 }
1832 break;
1833 }
1834 case APIC_ESR:
1835 if (apic_x2apic_mode(apic) && val != 0) {
1836 apic_debug("KVM_WRITE:ESR not zero %x\n", val);
1837 ret = 1;
1838 }
1839 break;
1840
1841 case APIC_SELF_IPI:
1842 if (apic_x2apic_mode(apic)) {
1843 kvm_lapic_reg_write(apic, APIC_ICR, 0x40000 | (val & 0xff));
1844 } else
1845 ret = 1;
1846 break;
1847 default:
1848 ret = 1;
1849 break;
1850 }
1851 if (ret)
1852 apic_debug("Local APIC Write to read-only register %x\n", reg);
1853 return ret;
1854}
1855EXPORT_SYMBOL_GPL(kvm_lapic_reg_write);
1856
1857static int apic_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *this,
1858 gpa_t address, int len, const void *data)
1859{
1860 struct kvm_lapic *apic = to_lapic(this);
1861 unsigned int offset = address - apic->base_address;
1862 u32 val;
1863
1864 if (!apic_mmio_in_range(apic, address))
1865 return -EOPNOTSUPP;
1866
1867 /*
1868 * APIC register must be aligned on 128-bits boundary.
1869 * 32/64/128 bits registers must be accessed thru 32 bits.
1870 * Refer SDM 8.4.1
1871 */
1872 if (len != 4 || (offset & 0xf)) {
1873 /* Don't shout loud, $infamous_os would cause only noise. */
1874 apic_debug("apic write: bad size=%d %lx\n", len, (long)address);
1875 return 0;
1876 }
1877
1878 val = *(u32*)data;
1879
1880 /* too common printing */
1881 if (offset != APIC_EOI)
1882 apic_debug("%s: offset 0x%x with length 0x%x, and value is "
1883 "0x%x\n", __func__, offset, len, val);
1884
1885 kvm_lapic_reg_write(apic, offset & 0xff0, val);
1886
1887 return 0;
1888}
1889
1890void kvm_lapic_set_eoi(struct kvm_vcpu *vcpu)
1891{
1892 kvm_lapic_reg_write(vcpu->arch.apic, APIC_EOI, 0);
1893}
1894EXPORT_SYMBOL_GPL(kvm_lapic_set_eoi);
1895
1896/* emulate APIC access in a trap manner */
1897void kvm_apic_write_nodecode(struct kvm_vcpu *vcpu, u32 offset)
1898{
1899 u32 val = 0;
1900
1901 /* hw has done the conditional check and inst decode */
1902 offset &= 0xff0;
1903
1904 kvm_lapic_reg_read(vcpu->arch.apic, offset, 4, &val);
1905
1906 /* TODO: optimize to just emulate side effect w/o one more write */
1907 kvm_lapic_reg_write(vcpu->arch.apic, offset, val);
1908}
1909EXPORT_SYMBOL_GPL(kvm_apic_write_nodecode);
1910
1911void kvm_free_lapic(struct kvm_vcpu *vcpu)
1912{
1913 struct kvm_lapic *apic = vcpu->arch.apic;
1914
1915 if (!vcpu->arch.apic)
1916 return;
1917
1918 hrtimer_cancel(&apic->lapic_timer.timer);
1919
1920 if (!(vcpu->arch.apic_base & MSR_IA32_APICBASE_ENABLE))
1921 static_key_slow_dec_deferred(&apic_hw_disabled);
1922
1923 if (!apic->sw_enabled)
1924 static_key_slow_dec_deferred(&apic_sw_disabled);
1925
1926 if (apic->regs)
1927 free_page((unsigned long)apic->regs);
1928
1929 kfree(apic);
1930}
1931
1932/*
1933 *----------------------------------------------------------------------
1934 * LAPIC interface
1935 *----------------------------------------------------------------------
1936 */
1937u64 kvm_get_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu)
1938{
1939 struct kvm_lapic *apic = vcpu->arch.apic;
1940
1941 if (!lapic_in_kernel(vcpu) ||
1942 !apic_lvtt_tscdeadline(apic))
1943 return 0;
1944
1945 return apic->lapic_timer.tscdeadline;
1946}
1947
1948void kvm_set_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu, u64 data)
1949{
1950 struct kvm_lapic *apic = vcpu->arch.apic;
1951
1952 if (!lapic_in_kernel(vcpu) || apic_lvtt_oneshot(apic) ||
1953 apic_lvtt_period(apic))
1954 return;
1955
1956 hrtimer_cancel(&apic->lapic_timer.timer);
1957 apic->lapic_timer.tscdeadline = data;
1958 start_apic_timer(apic);
1959}
1960
1961void kvm_lapic_set_tpr(struct kvm_vcpu *vcpu, unsigned long cr8)
1962{
1963 struct kvm_lapic *apic = vcpu->arch.apic;
1964
1965 apic_set_tpr(apic, ((cr8 & 0x0f) << 4)
1966 | (kvm_lapic_get_reg(apic, APIC_TASKPRI) & 4));
1967}
1968
1969u64 kvm_lapic_get_cr8(struct kvm_vcpu *vcpu)
1970{
1971 u64 tpr;
1972
1973 tpr = (u64) kvm_lapic_get_reg(vcpu->arch.apic, APIC_TASKPRI);
1974
1975 return (tpr & 0xf0) >> 4;
1976}
1977
1978void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value)
1979{
1980 u64 old_value = vcpu->arch.apic_base;
1981 struct kvm_lapic *apic = vcpu->arch.apic;
1982
1983 if (!apic)
1984 value |= MSR_IA32_APICBASE_BSP;
1985
1986 vcpu->arch.apic_base = value;
1987
1988 if ((old_value ^ value) & MSR_IA32_APICBASE_ENABLE)
1989 kvm_update_cpuid(vcpu);
1990
1991 if (!apic)
1992 return;
1993
1994 /* update jump label if enable bit changes */
1995 if ((old_value ^ value) & MSR_IA32_APICBASE_ENABLE) {
1996 if (value & MSR_IA32_APICBASE_ENABLE) {
1997 kvm_apic_set_xapic_id(apic, vcpu->vcpu_id);
1998 static_key_slow_dec_deferred(&apic_hw_disabled);
1999 } else {
2000 static_key_slow_inc(&apic_hw_disabled.key);
2001 recalculate_apic_map(vcpu->kvm);
2002 }
2003 }
2004
2005 if ((old_value ^ value) & X2APIC_ENABLE) {
2006 if (value & X2APIC_ENABLE) {
2007 kvm_apic_set_x2apic_id(apic, vcpu->vcpu_id);
2008 kvm_x86_ops->set_virtual_x2apic_mode(vcpu, true);
2009 } else
2010 kvm_x86_ops->set_virtual_x2apic_mode(vcpu, false);
2011 }
2012
2013 apic->base_address = apic->vcpu->arch.apic_base &
2014 MSR_IA32_APICBASE_BASE;
2015
2016 if ((value & MSR_IA32_APICBASE_ENABLE) &&
2017 apic->base_address != APIC_DEFAULT_PHYS_BASE)
2018 pr_warn_once("APIC base relocation is unsupported by KVM");
2019
2020 /* with FSB delivery interrupt, we can restart APIC functionality */
2021 apic_debug("apic base msr is 0x%016" PRIx64 ", and base address is "
2022 "0x%lx.\n", apic->vcpu->arch.apic_base, apic->base_address);
2023
2024}
2025
2026void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event)
2027{
2028 struct kvm_lapic *apic = vcpu->arch.apic;
2029 int i;
2030
2031 if (!apic)
2032 return;
2033
2034 apic_debug("%s\n", __func__);
2035
2036 /* Stop the timer in case it's a reset to an active apic */
2037 hrtimer_cancel(&apic->lapic_timer.timer);
2038
2039 if (!init_event) {
2040 kvm_lapic_set_base(vcpu, APIC_DEFAULT_PHYS_BASE |
2041 MSR_IA32_APICBASE_ENABLE);
2042 kvm_apic_set_xapic_id(apic, vcpu->vcpu_id);
2043 }
2044 kvm_apic_set_version(apic->vcpu);
2045
2046 for (i = 0; i < KVM_APIC_LVT_NUM; i++)
2047 kvm_lapic_set_reg(apic, APIC_LVTT + 0x10 * i, APIC_LVT_MASKED);
2048 apic_update_lvtt(apic);
2049 if (kvm_vcpu_is_reset_bsp(vcpu) &&
2050 kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_LINT0_REENABLED))
2051 kvm_lapic_set_reg(apic, APIC_LVT0,
2052 SET_APIC_DELIVERY_MODE(0, APIC_MODE_EXTINT));
2053 apic_manage_nmi_watchdog(apic, kvm_lapic_get_reg(apic, APIC_LVT0));
2054
2055 kvm_lapic_set_reg(apic, APIC_DFR, 0xffffffffU);
2056 apic_set_spiv(apic, 0xff);
2057 kvm_lapic_set_reg(apic, APIC_TASKPRI, 0);
2058 if (!apic_x2apic_mode(apic))
2059 kvm_apic_set_ldr(apic, 0);
2060 kvm_lapic_set_reg(apic, APIC_ESR, 0);
2061 kvm_lapic_set_reg(apic, APIC_ICR, 0);
2062 kvm_lapic_set_reg(apic, APIC_ICR2, 0);
2063 kvm_lapic_set_reg(apic, APIC_TDCR, 0);
2064 kvm_lapic_set_reg(apic, APIC_TMICT, 0);
2065 for (i = 0; i < 8; i++) {
2066 kvm_lapic_set_reg(apic, APIC_IRR + 0x10 * i, 0);
2067 kvm_lapic_set_reg(apic, APIC_ISR + 0x10 * i, 0);
2068 kvm_lapic_set_reg(apic, APIC_TMR + 0x10 * i, 0);
2069 }
2070 apic->irr_pending = vcpu->arch.apicv_active;
2071 apic->isr_count = vcpu->arch.apicv_active ? 1 : 0;
2072 apic->highest_isr_cache = -1;
2073 update_divide_count(apic);
2074 atomic_set(&apic->lapic_timer.pending, 0);
2075 if (kvm_vcpu_is_bsp(vcpu))
2076 kvm_lapic_set_base(vcpu,
2077 vcpu->arch.apic_base | MSR_IA32_APICBASE_BSP);
2078 vcpu->arch.pv_eoi.msr_val = 0;
2079 apic_update_ppr(apic);
2080 if (vcpu->arch.apicv_active) {
2081 kvm_x86_ops->apicv_post_state_restore(vcpu);
2082 kvm_x86_ops->hwapic_irr_update(vcpu, -1);
2083 kvm_x86_ops->hwapic_isr_update(vcpu, -1);
2084 }
2085
2086 vcpu->arch.apic_arb_prio = 0;
2087 vcpu->arch.apic_attention = 0;
2088
2089 apic_debug("%s: vcpu=%p, id=0x%x, base_msr="
2090 "0x%016" PRIx64 ", base_address=0x%0lx.\n", __func__,
2091 vcpu, kvm_lapic_get_reg(apic, APIC_ID),
2092 vcpu->arch.apic_base, apic->base_address);
2093}
2094
2095/*
2096 *----------------------------------------------------------------------
2097 * timer interface
2098 *----------------------------------------------------------------------
2099 */
2100
2101static bool lapic_is_periodic(struct kvm_lapic *apic)
2102{
2103 return apic_lvtt_period(apic);
2104}
2105
2106int apic_has_pending_timer(struct kvm_vcpu *vcpu)
2107{
2108 struct kvm_lapic *apic = vcpu->arch.apic;
2109
2110 if (apic_enabled(apic) && apic_lvt_enabled(apic, APIC_LVTT))
2111 return atomic_read(&apic->lapic_timer.pending);
2112
2113 return 0;
2114}
2115
2116int kvm_apic_local_deliver(struct kvm_lapic *apic, int lvt_type)
2117{
2118 u32 reg = kvm_lapic_get_reg(apic, lvt_type);
2119 int vector, mode, trig_mode;
2120
2121 if (kvm_apic_hw_enabled(apic) && !(reg & APIC_LVT_MASKED)) {
2122 vector = reg & APIC_VECTOR_MASK;
2123 mode = reg & APIC_MODE_MASK;
2124 trig_mode = reg & APIC_LVT_LEVEL_TRIGGER;
2125 return __apic_accept_irq(apic, mode, vector, 1, trig_mode,
2126 NULL);
2127 }
2128 return 0;
2129}
2130
2131void kvm_apic_nmi_wd_deliver(struct kvm_vcpu *vcpu)
2132{
2133 struct kvm_lapic *apic = vcpu->arch.apic;
2134
2135 if (apic)
2136 kvm_apic_local_deliver(apic, APIC_LVT0);
2137}
2138
2139static const struct kvm_io_device_ops apic_mmio_ops = {
2140 .read = apic_mmio_read,
2141 .write = apic_mmio_write,
2142};
2143
2144static enum hrtimer_restart apic_timer_fn(struct hrtimer *data)
2145{
2146 struct kvm_timer *ktimer = container_of(data, struct kvm_timer, timer);
2147 struct kvm_lapic *apic = container_of(ktimer, struct kvm_lapic, lapic_timer);
2148
2149 apic_timer_expired(apic);
2150
2151 if (lapic_is_periodic(apic)) {
2152 advance_periodic_target_expiration(apic);
2153 hrtimer_add_expires_ns(&ktimer->timer, ktimer->period);
2154 return HRTIMER_RESTART;
2155 } else
2156 return HRTIMER_NORESTART;
2157}
2158
2159int kvm_create_lapic(struct kvm_vcpu *vcpu)
2160{
2161 struct kvm_lapic *apic;
2162
2163 ASSERT(vcpu != NULL);
2164 apic_debug("apic_init %d\n", vcpu->vcpu_id);
2165
2166 apic = kzalloc(sizeof(*apic), GFP_KERNEL);
2167 if (!apic)
2168 goto nomem;
2169
2170 vcpu->arch.apic = apic;
2171
2172 apic->regs = (void *)get_zeroed_page(GFP_KERNEL);
2173 if (!apic->regs) {
2174 printk(KERN_ERR "malloc apic regs error for vcpu %x\n",
2175 vcpu->vcpu_id);
2176 goto nomem_free_apic;
2177 }
2178 apic->vcpu = vcpu;
2179
2180 hrtimer_init(&apic->lapic_timer.timer, CLOCK_MONOTONIC,
2181 HRTIMER_MODE_ABS_PINNED);
2182 apic->lapic_timer.timer.function = apic_timer_fn;
2183
2184 /*
2185 * APIC is created enabled. This will prevent kvm_lapic_set_base from
2186 * thinking that APIC satet has changed.
2187 */
2188 vcpu->arch.apic_base = MSR_IA32_APICBASE_ENABLE;
2189 static_key_slow_inc(&apic_sw_disabled.key); /* sw disabled at reset */
2190 kvm_iodevice_init(&apic->dev, &apic_mmio_ops);
2191
2192 return 0;
2193nomem_free_apic:
2194 kfree(apic);
2195nomem:
2196 return -ENOMEM;
2197}
2198
2199int kvm_apic_has_interrupt(struct kvm_vcpu *vcpu)
2200{
2201 struct kvm_lapic *apic = vcpu->arch.apic;
2202 u32 ppr;
2203
2204 if (!apic_enabled(apic))
2205 return -1;
2206
2207 __apic_update_ppr(apic, &ppr);
2208 return apic_has_interrupt_for_ppr(apic, ppr);
2209}
2210
2211int kvm_apic_accept_pic_intr(struct kvm_vcpu *vcpu)
2212{
2213 u32 lvt0 = kvm_lapic_get_reg(vcpu->arch.apic, APIC_LVT0);
2214 int r = 0;
2215
2216 if (!kvm_apic_hw_enabled(vcpu->arch.apic))
2217 r = 1;
2218 if ((lvt0 & APIC_LVT_MASKED) == 0 &&
2219 GET_APIC_DELIVERY_MODE(lvt0) == APIC_MODE_EXTINT)
2220 r = 1;
2221 return r;
2222}
2223
2224void kvm_inject_apic_timer_irqs(struct kvm_vcpu *vcpu)
2225{
2226 struct kvm_lapic *apic = vcpu->arch.apic;
2227
2228 if (atomic_read(&apic->lapic_timer.pending) > 0) {
2229 kvm_apic_local_deliver(apic, APIC_LVTT);
2230 if (apic_lvtt_tscdeadline(apic))
2231 apic->lapic_timer.tscdeadline = 0;
2232 if (apic_lvtt_oneshot(apic)) {
2233 apic->lapic_timer.tscdeadline = 0;
2234 apic->lapic_timer.target_expiration = 0;
2235 }
2236 atomic_set(&apic->lapic_timer.pending, 0);
2237 }
2238}
2239
2240int kvm_get_apic_interrupt(struct kvm_vcpu *vcpu)
2241{
2242 int vector = kvm_apic_has_interrupt(vcpu);
2243 struct kvm_lapic *apic = vcpu->arch.apic;
2244 u32 ppr;
2245
2246 if (vector == -1)
2247 return -1;
2248
2249 /*
2250 * We get here even with APIC virtualization enabled, if doing
2251 * nested virtualization and L1 runs with the "acknowledge interrupt
2252 * on exit" mode. Then we cannot inject the interrupt via RVI,
2253 * because the process would deliver it through the IDT.
2254 */
2255
2256 apic_clear_irr(vector, apic);
2257 if (test_bit(vector, vcpu_to_synic(vcpu)->auto_eoi_bitmap)) {
2258 /*
2259 * For auto-EOI interrupts, there might be another pending
2260 * interrupt above PPR, so check whether to raise another
2261 * KVM_REQ_EVENT.
2262 */
2263 apic_update_ppr(apic);
2264 } else {
2265 /*
2266 * For normal interrupts, PPR has been raised and there cannot
2267 * be a higher-priority pending interrupt---except if there was
2268 * a concurrent interrupt injection, but that would have
2269 * triggered KVM_REQ_EVENT already.
2270 */
2271 apic_set_isr(vector, apic);
2272 __apic_update_ppr(apic, &ppr);
2273 }
2274
2275 return vector;
2276}
2277
2278static int kvm_apic_state_fixup(struct kvm_vcpu *vcpu,
2279 struct kvm_lapic_state *s, bool set)
2280{
2281 if (apic_x2apic_mode(vcpu->arch.apic)) {
2282 u32 *id = (u32 *)(s->regs + APIC_ID);
2283 u32 *ldr = (u32 *)(s->regs + APIC_LDR);
2284
2285 if (vcpu->kvm->arch.x2apic_format) {
2286 if (*id != vcpu->vcpu_id)
2287 return -EINVAL;
2288 } else {
2289 if (set)
2290 *id >>= 24;
2291 else
2292 *id <<= 24;
2293 }
2294
2295 /* In x2APIC mode, the LDR is fixed and based on the id */
2296 if (set)
2297 *ldr = kvm_apic_calc_x2apic_ldr(*id);
2298 }
2299
2300 return 0;
2301}
2302
2303int kvm_apic_get_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s)
2304{
2305 memcpy(s->regs, vcpu->arch.apic->regs, sizeof(*s));
2306 return kvm_apic_state_fixup(vcpu, s, false);
2307}
2308
2309int kvm_apic_set_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s)
2310{
2311 struct kvm_lapic *apic = vcpu->arch.apic;
2312 int r;
2313
2314
2315 kvm_lapic_set_base(vcpu, vcpu->arch.apic_base);
2316 /* set SPIV separately to get count of SW disabled APICs right */
2317 apic_set_spiv(apic, *((u32 *)(s->regs + APIC_SPIV)));
2318
2319 r = kvm_apic_state_fixup(vcpu, s, true);
2320 if (r)
2321 return r;
2322 memcpy(vcpu->arch.apic->regs, s->regs, sizeof *s);
2323
2324 recalculate_apic_map(vcpu->kvm);
2325 kvm_apic_set_version(vcpu);
2326
2327 apic_update_ppr(apic);
2328 hrtimer_cancel(&apic->lapic_timer.timer);
2329 apic_update_lvtt(apic);
2330 apic_manage_nmi_watchdog(apic, kvm_lapic_get_reg(apic, APIC_LVT0));
2331 update_divide_count(apic);
2332 start_apic_timer(apic);
2333 apic->irr_pending = true;
2334 apic->isr_count = vcpu->arch.apicv_active ?
2335 1 : count_vectors(apic->regs + APIC_ISR);
2336 apic->highest_isr_cache = -1;
2337 if (vcpu->arch.apicv_active) {
2338 kvm_x86_ops->apicv_post_state_restore(vcpu);
2339 kvm_x86_ops->hwapic_irr_update(vcpu,
2340 apic_find_highest_irr(apic));
2341 kvm_x86_ops->hwapic_isr_update(vcpu,
2342 apic_find_highest_isr(apic));
2343 }
2344 kvm_make_request(KVM_REQ_EVENT, vcpu);
2345 if (ioapic_in_kernel(vcpu->kvm))
2346 kvm_rtc_eoi_tracking_restore_one(vcpu);
2347
2348 vcpu->arch.apic_arb_prio = 0;
2349
2350 return 0;
2351}
2352
2353void __kvm_migrate_apic_timer(struct kvm_vcpu *vcpu)
2354{
2355 struct hrtimer *timer;
2356
2357 if (!lapic_in_kernel(vcpu))
2358 return;
2359
2360 timer = &vcpu->arch.apic->lapic_timer.timer;
2361 if (hrtimer_cancel(timer))
2362 hrtimer_start_expires(timer, HRTIMER_MODE_ABS_PINNED);
2363}
2364
2365/*
2366 * apic_sync_pv_eoi_from_guest - called on vmexit or cancel interrupt
2367 *
2368 * Detect whether guest triggered PV EOI since the
2369 * last entry. If yes, set EOI on guests's behalf.
2370 * Clear PV EOI in guest memory in any case.
2371 */
2372static void apic_sync_pv_eoi_from_guest(struct kvm_vcpu *vcpu,
2373 struct kvm_lapic *apic)
2374{
2375 bool pending;
2376 int vector;
2377 /*
2378 * PV EOI state is derived from KVM_APIC_PV_EOI_PENDING in host
2379 * and KVM_PV_EOI_ENABLED in guest memory as follows:
2380 *
2381 * KVM_APIC_PV_EOI_PENDING is unset:
2382 * -> host disabled PV EOI.
2383 * KVM_APIC_PV_EOI_PENDING is set, KVM_PV_EOI_ENABLED is set:
2384 * -> host enabled PV EOI, guest did not execute EOI yet.
2385 * KVM_APIC_PV_EOI_PENDING is set, KVM_PV_EOI_ENABLED is unset:
2386 * -> host enabled PV EOI, guest executed EOI.
2387 */
2388 BUG_ON(!pv_eoi_enabled(vcpu));
2389 pending = pv_eoi_get_pending(vcpu);
2390 /*
2391 * Clear pending bit in any case: it will be set again on vmentry.
2392 * While this might not be ideal from performance point of view,
2393 * this makes sure pv eoi is only enabled when we know it's safe.
2394 */
2395 pv_eoi_clr_pending(vcpu);
2396 if (pending)
2397 return;
2398 vector = apic_set_eoi(apic);
2399 trace_kvm_pv_eoi(apic, vector);
2400}
2401
2402void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu)
2403{
2404 u32 data;
2405
2406 if (test_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention))
2407 apic_sync_pv_eoi_from_guest(vcpu, vcpu->arch.apic);
2408
2409 if (!test_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention))
2410 return;
2411
2412 if (kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data,
2413 sizeof(u32)))
2414 return;
2415
2416 apic_set_tpr(vcpu->arch.apic, data & 0xff);
2417}
2418
2419/*
2420 * apic_sync_pv_eoi_to_guest - called before vmentry
2421 *
2422 * Detect whether it's safe to enable PV EOI and
2423 * if yes do so.
2424 */
2425static void apic_sync_pv_eoi_to_guest(struct kvm_vcpu *vcpu,
2426 struct kvm_lapic *apic)
2427{
2428 if (!pv_eoi_enabled(vcpu) ||
2429 /* IRR set or many bits in ISR: could be nested. */
2430 apic->irr_pending ||
2431 /* Cache not set: could be safe but we don't bother. */
2432 apic->highest_isr_cache == -1 ||
2433 /* Need EOI to update ioapic. */
2434 kvm_ioapic_handles_vector(apic, apic->highest_isr_cache)) {
2435 /*
2436 * PV EOI was disabled by apic_sync_pv_eoi_from_guest
2437 * so we need not do anything here.
2438 */
2439 return;
2440 }
2441
2442 pv_eoi_set_pending(apic->vcpu);
2443}
2444
2445void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu)
2446{
2447 u32 data, tpr;
2448 int max_irr, max_isr;
2449 struct kvm_lapic *apic = vcpu->arch.apic;
2450
2451 apic_sync_pv_eoi_to_guest(vcpu, apic);
2452
2453 if (!test_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention))
2454 return;
2455
2456 tpr = kvm_lapic_get_reg(apic, APIC_TASKPRI) & 0xff;
2457 max_irr = apic_find_highest_irr(apic);
2458 if (max_irr < 0)
2459 max_irr = 0;
2460 max_isr = apic_find_highest_isr(apic);
2461 if (max_isr < 0)
2462 max_isr = 0;
2463 data = (tpr & 0xff) | ((max_isr & 0xf0) << 8) | (max_irr << 24);
2464
2465 kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data,
2466 sizeof(u32));
2467}
2468
2469int kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr)
2470{
2471 if (vapic_addr) {
2472 if (kvm_gfn_to_hva_cache_init(vcpu->kvm,
2473 &vcpu->arch.apic->vapic_cache,
2474 vapic_addr, sizeof(u32)))
2475 return -EINVAL;
2476 __set_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention);
2477 } else {
2478 __clear_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention);
2479 }
2480
2481 vcpu->arch.apic->vapic_addr = vapic_addr;
2482 return 0;
2483}
2484
2485int kvm_x2apic_msr_write(struct kvm_vcpu *vcpu, u32 msr, u64 data)
2486{
2487 struct kvm_lapic *apic = vcpu->arch.apic;
2488 u32 reg = (msr - APIC_BASE_MSR) << 4;
2489
2490 if (!lapic_in_kernel(vcpu) || !apic_x2apic_mode(apic))
2491 return 1;
2492
2493 if (reg == APIC_ICR2)
2494 return 1;
2495
2496 /* if this is ICR write vector before command */
2497 if (reg == APIC_ICR)
2498 kvm_lapic_reg_write(apic, APIC_ICR2, (u32)(data >> 32));
2499 return kvm_lapic_reg_write(apic, reg, (u32)data);
2500}
2501
2502int kvm_x2apic_msr_read(struct kvm_vcpu *vcpu, u32 msr, u64 *data)
2503{
2504 struct kvm_lapic *apic = vcpu->arch.apic;
2505 u32 reg = (msr - APIC_BASE_MSR) << 4, low, high = 0;
2506
2507 if (!lapic_in_kernel(vcpu) || !apic_x2apic_mode(apic))
2508 return 1;
2509
2510 if (reg == APIC_DFR || reg == APIC_ICR2) {
2511 apic_debug("KVM_APIC_READ: read x2apic reserved register %x\n",
2512 reg);
2513 return 1;
2514 }
2515
2516 if (kvm_lapic_reg_read(apic, reg, 4, &low))
2517 return 1;
2518 if (reg == APIC_ICR)
2519 kvm_lapic_reg_read(apic, APIC_ICR2, 4, &high);
2520
2521 *data = (((u64)high) << 32) | low;
2522
2523 return 0;
2524}
2525
2526int kvm_hv_vapic_msr_write(struct kvm_vcpu *vcpu, u32 reg, u64 data)
2527{
2528 struct kvm_lapic *apic = vcpu->arch.apic;
2529
2530 if (!lapic_in_kernel(vcpu))
2531 return 1;
2532
2533 /* if this is ICR write vector before command */
2534 if (reg == APIC_ICR)
2535 kvm_lapic_reg_write(apic, APIC_ICR2, (u32)(data >> 32));
2536 return kvm_lapic_reg_write(apic, reg, (u32)data);
2537}
2538
2539int kvm_hv_vapic_msr_read(struct kvm_vcpu *vcpu, u32 reg, u64 *data)
2540{
2541 struct kvm_lapic *apic = vcpu->arch.apic;
2542 u32 low, high = 0;
2543
2544 if (!lapic_in_kernel(vcpu))
2545 return 1;
2546
2547 if (kvm_lapic_reg_read(apic, reg, 4, &low))
2548 return 1;
2549 if (reg == APIC_ICR)
2550 kvm_lapic_reg_read(apic, APIC_ICR2, 4, &high);
2551
2552 *data = (((u64)high) << 32) | low;
2553
2554 return 0;
2555}
2556
2557int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data)
2558{
2559 u64 addr = data & ~KVM_MSR_ENABLED;
2560 if (!IS_ALIGNED(addr, 4))
2561 return 1;
2562
2563 vcpu->arch.pv_eoi.msr_val = data;
2564 if (!pv_eoi_enabled(vcpu))
2565 return 0;
2566 return kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.pv_eoi.data,
2567 addr, sizeof(u8));
2568}
2569
2570void kvm_apic_accept_events(struct kvm_vcpu *vcpu)
2571{
2572 struct kvm_lapic *apic = vcpu->arch.apic;
2573 u8 sipi_vector;
2574 unsigned long pe;
2575
2576 if (!lapic_in_kernel(vcpu) || !apic->pending_events)
2577 return;
2578
2579 /*
2580 * INITs are latched while in SMM. Because an SMM CPU cannot
2581 * be in KVM_MP_STATE_INIT_RECEIVED state, just eat SIPIs
2582 * and delay processing of INIT until the next RSM.
2583 */
2584 if (is_smm(vcpu)) {
2585 WARN_ON_ONCE(vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED);
2586 if (test_bit(KVM_APIC_SIPI, &apic->pending_events))
2587 clear_bit(KVM_APIC_SIPI, &apic->pending_events);
2588 return;
2589 }
2590
2591 pe = xchg(&apic->pending_events, 0);
2592 if (test_bit(KVM_APIC_INIT, &pe)) {
2593 kvm_vcpu_reset(vcpu, true);
2594 if (kvm_vcpu_is_bsp(apic->vcpu))
2595 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
2596 else
2597 vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED;
2598 }
2599 if (test_bit(KVM_APIC_SIPI, &pe) &&
2600 vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) {
2601 /* evaluate pending_events before reading the vector */
2602 smp_rmb();
2603 sipi_vector = apic->sipi_vector;
2604 apic_debug("vcpu %d received sipi with vector # %x\n",
2605 vcpu->vcpu_id, sipi_vector);
2606 kvm_vcpu_deliver_sipi_vector(vcpu, sipi_vector);
2607 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
2608 }
2609}
2610
2611void kvm_lapic_init(void)
2612{
2613 /* do not patch jump label more than once per second */
2614 jump_label_rate_limit(&apic_hw_disabled, HZ);
2615 jump_label_rate_limit(&apic_sw_disabled, HZ);
2616}
2617
2618void kvm_lapic_exit(void)
2619{
2620 static_key_deferred_flush(&apic_hw_disabled);
2621 static_key_deferred_flush(&apic_sw_disabled);
2622}
1// SPDX-License-Identifier: GPL-2.0-only
2
3/*
4 * Local APIC virtualization
5 *
6 * Copyright (C) 2006 Qumranet, Inc.
7 * Copyright (C) 2007 Novell
8 * Copyright (C) 2007 Intel
9 * Copyright 2009 Red Hat, Inc. and/or its affiliates.
10 *
11 * Authors:
12 * Dor Laor <dor.laor@qumranet.com>
13 * Gregory Haskins <ghaskins@novell.com>
14 * Yaozu (Eddie) Dong <eddie.dong@intel.com>
15 *
16 * Based on Xen 3.1 code, Copyright (c) 2004, Intel Corporation.
17 */
18
19#include <linux/kvm_host.h>
20#include <linux/kvm.h>
21#include <linux/mm.h>
22#include <linux/highmem.h>
23#include <linux/smp.h>
24#include <linux/hrtimer.h>
25#include <linux/io.h>
26#include <linux/export.h>
27#include <linux/math64.h>
28#include <linux/slab.h>
29#include <asm/processor.h>
30#include <asm/mce.h>
31#include <asm/msr.h>
32#include <asm/page.h>
33#include <asm/current.h>
34#include <asm/apicdef.h>
35#include <asm/delay.h>
36#include <linux/atomic.h>
37#include <linux/jump_label.h>
38#include "kvm_cache_regs.h"
39#include "irq.h"
40#include "ioapic.h"
41#include "trace.h"
42#include "x86.h"
43#include "cpuid.h"
44#include "hyperv.h"
45#include "smm.h"
46
47#ifndef CONFIG_X86_64
48#define mod_64(x, y) ((x) - (y) * div64_u64(x, y))
49#else
50#define mod_64(x, y) ((x) % (y))
51#endif
52
53#define PRId64 "d"
54#define PRIx64 "llx"
55#define PRIu64 "u"
56#define PRIo64 "o"
57
58/* 14 is the version for Xeon and Pentium 8.4.8*/
59#define APIC_VERSION 0x14UL
60#define LAPIC_MMIO_LENGTH (1 << 12)
61/* followed define is not in apicdef.h */
62#define MAX_APIC_VECTOR 256
63#define APIC_VECTORS_PER_REG 32
64
65static bool lapic_timer_advance_dynamic __read_mostly;
66#define LAPIC_TIMER_ADVANCE_ADJUST_MIN 100 /* clock cycles */
67#define LAPIC_TIMER_ADVANCE_ADJUST_MAX 10000 /* clock cycles */
68#define LAPIC_TIMER_ADVANCE_NS_INIT 1000
69#define LAPIC_TIMER_ADVANCE_NS_MAX 5000
70/* step-by-step approximation to mitigate fluctuation */
71#define LAPIC_TIMER_ADVANCE_ADJUST_STEP 8
72static int kvm_lapic_msr_read(struct kvm_lapic *apic, u32 reg, u64 *data);
73static int kvm_lapic_msr_write(struct kvm_lapic *apic, u32 reg, u64 data);
74
75static inline void __kvm_lapic_set_reg(char *regs, int reg_off, u32 val)
76{
77 *((u32 *) (regs + reg_off)) = val;
78}
79
80static inline void kvm_lapic_set_reg(struct kvm_lapic *apic, int reg_off, u32 val)
81{
82 __kvm_lapic_set_reg(apic->regs, reg_off, val);
83}
84
85static __always_inline u64 __kvm_lapic_get_reg64(char *regs, int reg)
86{
87 BUILD_BUG_ON(reg != APIC_ICR);
88 return *((u64 *) (regs + reg));
89}
90
91static __always_inline u64 kvm_lapic_get_reg64(struct kvm_lapic *apic, int reg)
92{
93 return __kvm_lapic_get_reg64(apic->regs, reg);
94}
95
96static __always_inline void __kvm_lapic_set_reg64(char *regs, int reg, u64 val)
97{
98 BUILD_BUG_ON(reg != APIC_ICR);
99 *((u64 *) (regs + reg)) = val;
100}
101
102static __always_inline void kvm_lapic_set_reg64(struct kvm_lapic *apic,
103 int reg, u64 val)
104{
105 __kvm_lapic_set_reg64(apic->regs, reg, val);
106}
107
108static inline int apic_test_vector(int vec, void *bitmap)
109{
110 return test_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
111}
112
113bool kvm_apic_pending_eoi(struct kvm_vcpu *vcpu, int vector)
114{
115 struct kvm_lapic *apic = vcpu->arch.apic;
116
117 return apic_test_vector(vector, apic->regs + APIC_ISR) ||
118 apic_test_vector(vector, apic->regs + APIC_IRR);
119}
120
121static inline int __apic_test_and_set_vector(int vec, void *bitmap)
122{
123 return __test_and_set_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
124}
125
126static inline int __apic_test_and_clear_vector(int vec, void *bitmap)
127{
128 return __test_and_clear_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
129}
130
131__read_mostly DEFINE_STATIC_KEY_DEFERRED_FALSE(apic_hw_disabled, HZ);
132__read_mostly DEFINE_STATIC_KEY_DEFERRED_FALSE(apic_sw_disabled, HZ);
133
134static inline int apic_enabled(struct kvm_lapic *apic)
135{
136 return kvm_apic_sw_enabled(apic) && kvm_apic_hw_enabled(apic);
137}
138
139#define LVT_MASK \
140 (APIC_LVT_MASKED | APIC_SEND_PENDING | APIC_VECTOR_MASK)
141
142#define LINT_MASK \
143 (LVT_MASK | APIC_MODE_MASK | APIC_INPUT_POLARITY | \
144 APIC_LVT_REMOTE_IRR | APIC_LVT_LEVEL_TRIGGER)
145
146static inline u32 kvm_x2apic_id(struct kvm_lapic *apic)
147{
148 return apic->vcpu->vcpu_id;
149}
150
151static bool kvm_can_post_timer_interrupt(struct kvm_vcpu *vcpu)
152{
153 return pi_inject_timer && kvm_vcpu_apicv_active(vcpu) &&
154 (kvm_mwait_in_guest(vcpu->kvm) || kvm_hlt_in_guest(vcpu->kvm));
155}
156
157bool kvm_can_use_hv_timer(struct kvm_vcpu *vcpu)
158{
159 return kvm_x86_ops.set_hv_timer
160 && !(kvm_mwait_in_guest(vcpu->kvm) ||
161 kvm_can_post_timer_interrupt(vcpu));
162}
163
164static bool kvm_use_posted_timer_interrupt(struct kvm_vcpu *vcpu)
165{
166 return kvm_can_post_timer_interrupt(vcpu) && vcpu->mode == IN_GUEST_MODE;
167}
168
169static inline bool kvm_apic_map_get_logical_dest(struct kvm_apic_map *map,
170 u32 dest_id, struct kvm_lapic ***cluster, u16 *mask) {
171 switch (map->mode) {
172 case KVM_APIC_MODE_X2APIC: {
173 u32 offset = (dest_id >> 16) * 16;
174 u32 max_apic_id = map->max_apic_id;
175
176 if (offset <= max_apic_id) {
177 u8 cluster_size = min(max_apic_id - offset + 1, 16U);
178
179 offset = array_index_nospec(offset, map->max_apic_id + 1);
180 *cluster = &map->phys_map[offset];
181 *mask = dest_id & (0xffff >> (16 - cluster_size));
182 } else {
183 *mask = 0;
184 }
185
186 return true;
187 }
188 case KVM_APIC_MODE_XAPIC_FLAT:
189 *cluster = map->xapic_flat_map;
190 *mask = dest_id & 0xff;
191 return true;
192 case KVM_APIC_MODE_XAPIC_CLUSTER:
193 *cluster = map->xapic_cluster_map[(dest_id >> 4) & 0xf];
194 *mask = dest_id & 0xf;
195 return true;
196 default:
197 /* Not optimized. */
198 return false;
199 }
200}
201
202static void kvm_apic_map_free(struct rcu_head *rcu)
203{
204 struct kvm_apic_map *map = container_of(rcu, struct kvm_apic_map, rcu);
205
206 kvfree(map);
207}
208
209/*
210 * CLEAN -> DIRTY and UPDATE_IN_PROGRESS -> DIRTY changes happen without a lock.
211 *
212 * DIRTY -> UPDATE_IN_PROGRESS and UPDATE_IN_PROGRESS -> CLEAN happen with
213 * apic_map_lock_held.
214 */
215enum {
216 CLEAN,
217 UPDATE_IN_PROGRESS,
218 DIRTY
219};
220
221void kvm_recalculate_apic_map(struct kvm *kvm)
222{
223 struct kvm_apic_map *new, *old = NULL;
224 struct kvm_vcpu *vcpu;
225 unsigned long i;
226 u32 max_id = 255; /* enough space for any xAPIC ID */
227
228 /* Read kvm->arch.apic_map_dirty before kvm->arch.apic_map. */
229 if (atomic_read_acquire(&kvm->arch.apic_map_dirty) == CLEAN)
230 return;
231
232 WARN_ONCE(!irqchip_in_kernel(kvm),
233 "Dirty APIC map without an in-kernel local APIC");
234
235 mutex_lock(&kvm->arch.apic_map_lock);
236 /*
237 * Read kvm->arch.apic_map_dirty before kvm->arch.apic_map
238 * (if clean) or the APIC registers (if dirty).
239 */
240 if (atomic_cmpxchg_acquire(&kvm->arch.apic_map_dirty,
241 DIRTY, UPDATE_IN_PROGRESS) == CLEAN) {
242 /* Someone else has updated the map. */
243 mutex_unlock(&kvm->arch.apic_map_lock);
244 return;
245 }
246
247 kvm_for_each_vcpu(i, vcpu, kvm)
248 if (kvm_apic_present(vcpu))
249 max_id = max(max_id, kvm_x2apic_id(vcpu->arch.apic));
250
251 new = kvzalloc(sizeof(struct kvm_apic_map) +
252 sizeof(struct kvm_lapic *) * ((u64)max_id + 1),
253 GFP_KERNEL_ACCOUNT);
254
255 if (!new)
256 goto out;
257
258 new->max_apic_id = max_id;
259
260 kvm_for_each_vcpu(i, vcpu, kvm) {
261 struct kvm_lapic *apic = vcpu->arch.apic;
262 struct kvm_lapic **cluster;
263 u16 mask;
264 u32 ldr;
265 u8 xapic_id;
266 u32 x2apic_id;
267
268 if (!kvm_apic_present(vcpu))
269 continue;
270
271 xapic_id = kvm_xapic_id(apic);
272 x2apic_id = kvm_x2apic_id(apic);
273
274 /* Hotplug hack: see kvm_apic_match_physical_addr(), ... */
275 if ((apic_x2apic_mode(apic) || x2apic_id > 0xff) &&
276 x2apic_id <= new->max_apic_id)
277 new->phys_map[x2apic_id] = apic;
278 /*
279 * ... xAPIC ID of VCPUs with APIC ID > 0xff will wrap-around,
280 * prevent them from masking VCPUs with APIC ID <= 0xff.
281 */
282 if (!apic_x2apic_mode(apic) && !new->phys_map[xapic_id])
283 new->phys_map[xapic_id] = apic;
284
285 if (!kvm_apic_sw_enabled(apic))
286 continue;
287
288 ldr = kvm_lapic_get_reg(apic, APIC_LDR);
289
290 if (apic_x2apic_mode(apic)) {
291 new->mode |= KVM_APIC_MODE_X2APIC;
292 } else if (ldr) {
293 ldr = GET_APIC_LOGICAL_ID(ldr);
294 if (kvm_lapic_get_reg(apic, APIC_DFR) == APIC_DFR_FLAT)
295 new->mode |= KVM_APIC_MODE_XAPIC_FLAT;
296 else
297 new->mode |= KVM_APIC_MODE_XAPIC_CLUSTER;
298 }
299
300 if (!kvm_apic_map_get_logical_dest(new, ldr, &cluster, &mask))
301 continue;
302
303 if (mask)
304 cluster[ffs(mask) - 1] = apic;
305 }
306out:
307 old = rcu_dereference_protected(kvm->arch.apic_map,
308 lockdep_is_held(&kvm->arch.apic_map_lock));
309 rcu_assign_pointer(kvm->arch.apic_map, new);
310 /*
311 * Write kvm->arch.apic_map before clearing apic->apic_map_dirty.
312 * If another update has come in, leave it DIRTY.
313 */
314 atomic_cmpxchg_release(&kvm->arch.apic_map_dirty,
315 UPDATE_IN_PROGRESS, CLEAN);
316 mutex_unlock(&kvm->arch.apic_map_lock);
317
318 if (old)
319 call_rcu(&old->rcu, kvm_apic_map_free);
320
321 kvm_make_scan_ioapic_request(kvm);
322}
323
324static inline void apic_set_spiv(struct kvm_lapic *apic, u32 val)
325{
326 bool enabled = val & APIC_SPIV_APIC_ENABLED;
327
328 kvm_lapic_set_reg(apic, APIC_SPIV, val);
329
330 if (enabled != apic->sw_enabled) {
331 apic->sw_enabled = enabled;
332 if (enabled)
333 static_branch_slow_dec_deferred(&apic_sw_disabled);
334 else
335 static_branch_inc(&apic_sw_disabled.key);
336
337 atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
338 }
339
340 /* Check if there are APF page ready requests pending */
341 if (enabled)
342 kvm_make_request(KVM_REQ_APF_READY, apic->vcpu);
343}
344
345static inline void kvm_apic_set_xapic_id(struct kvm_lapic *apic, u8 id)
346{
347 kvm_lapic_set_reg(apic, APIC_ID, id << 24);
348 atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
349}
350
351static inline void kvm_apic_set_ldr(struct kvm_lapic *apic, u32 id)
352{
353 kvm_lapic_set_reg(apic, APIC_LDR, id);
354 atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
355}
356
357static inline void kvm_apic_set_dfr(struct kvm_lapic *apic, u32 val)
358{
359 kvm_lapic_set_reg(apic, APIC_DFR, val);
360 atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
361}
362
363static inline u32 kvm_apic_calc_x2apic_ldr(u32 id)
364{
365 return ((id >> 4) << 16) | (1 << (id & 0xf));
366}
367
368static inline void kvm_apic_set_x2apic_id(struct kvm_lapic *apic, u32 id)
369{
370 u32 ldr = kvm_apic_calc_x2apic_ldr(id);
371
372 WARN_ON_ONCE(id != apic->vcpu->vcpu_id);
373
374 kvm_lapic_set_reg(apic, APIC_ID, id);
375 kvm_lapic_set_reg(apic, APIC_LDR, ldr);
376 atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
377}
378
379static inline int apic_lvt_enabled(struct kvm_lapic *apic, int lvt_type)
380{
381 return !(kvm_lapic_get_reg(apic, lvt_type) & APIC_LVT_MASKED);
382}
383
384static inline int apic_lvtt_oneshot(struct kvm_lapic *apic)
385{
386 return apic->lapic_timer.timer_mode == APIC_LVT_TIMER_ONESHOT;
387}
388
389static inline int apic_lvtt_period(struct kvm_lapic *apic)
390{
391 return apic->lapic_timer.timer_mode == APIC_LVT_TIMER_PERIODIC;
392}
393
394static inline int apic_lvtt_tscdeadline(struct kvm_lapic *apic)
395{
396 return apic->lapic_timer.timer_mode == APIC_LVT_TIMER_TSCDEADLINE;
397}
398
399static inline int apic_lvt_nmi_mode(u32 lvt_val)
400{
401 return (lvt_val & (APIC_MODE_MASK | APIC_LVT_MASKED)) == APIC_DM_NMI;
402}
403
404static inline bool kvm_lapic_lvt_supported(struct kvm_lapic *apic, int lvt_index)
405{
406 return apic->nr_lvt_entries > lvt_index;
407}
408
409static inline int kvm_apic_calc_nr_lvt_entries(struct kvm_vcpu *vcpu)
410{
411 return KVM_APIC_MAX_NR_LVT_ENTRIES - !(vcpu->arch.mcg_cap & MCG_CMCI_P);
412}
413
414void kvm_apic_set_version(struct kvm_vcpu *vcpu)
415{
416 struct kvm_lapic *apic = vcpu->arch.apic;
417 u32 v = 0;
418
419 if (!lapic_in_kernel(vcpu))
420 return;
421
422 v = APIC_VERSION | ((apic->nr_lvt_entries - 1) << 16);
423
424 /*
425 * KVM emulates 82093AA datasheet (with in-kernel IOAPIC implementation)
426 * which doesn't have EOI register; Some buggy OSes (e.g. Windows with
427 * Hyper-V role) disable EOI broadcast in lapic not checking for IOAPIC
428 * version first and level-triggered interrupts never get EOIed in
429 * IOAPIC.
430 */
431 if (guest_cpuid_has(vcpu, X86_FEATURE_X2APIC) &&
432 !ioapic_in_kernel(vcpu->kvm))
433 v |= APIC_LVR_DIRECTED_EOI;
434 kvm_lapic_set_reg(apic, APIC_LVR, v);
435}
436
437void kvm_apic_after_set_mcg_cap(struct kvm_vcpu *vcpu)
438{
439 int nr_lvt_entries = kvm_apic_calc_nr_lvt_entries(vcpu);
440 struct kvm_lapic *apic = vcpu->arch.apic;
441 int i;
442
443 if (!lapic_in_kernel(vcpu) || nr_lvt_entries == apic->nr_lvt_entries)
444 return;
445
446 /* Initialize/mask any "new" LVT entries. */
447 for (i = apic->nr_lvt_entries; i < nr_lvt_entries; i++)
448 kvm_lapic_set_reg(apic, APIC_LVTx(i), APIC_LVT_MASKED);
449
450 apic->nr_lvt_entries = nr_lvt_entries;
451
452 /* The number of LVT entries is reflected in the version register. */
453 kvm_apic_set_version(vcpu);
454}
455
456static const unsigned int apic_lvt_mask[KVM_APIC_MAX_NR_LVT_ENTRIES] = {
457 [LVT_TIMER] = LVT_MASK, /* timer mode mask added at runtime */
458 [LVT_THERMAL_MONITOR] = LVT_MASK | APIC_MODE_MASK,
459 [LVT_PERFORMANCE_COUNTER] = LVT_MASK | APIC_MODE_MASK,
460 [LVT_LINT0] = LINT_MASK,
461 [LVT_LINT1] = LINT_MASK,
462 [LVT_ERROR] = LVT_MASK,
463 [LVT_CMCI] = LVT_MASK | APIC_MODE_MASK
464};
465
466static int find_highest_vector(void *bitmap)
467{
468 int vec;
469 u32 *reg;
470
471 for (vec = MAX_APIC_VECTOR - APIC_VECTORS_PER_REG;
472 vec >= 0; vec -= APIC_VECTORS_PER_REG) {
473 reg = bitmap + REG_POS(vec);
474 if (*reg)
475 return __fls(*reg) + vec;
476 }
477
478 return -1;
479}
480
481static u8 count_vectors(void *bitmap)
482{
483 int vec;
484 u32 *reg;
485 u8 count = 0;
486
487 for (vec = 0; vec < MAX_APIC_VECTOR; vec += APIC_VECTORS_PER_REG) {
488 reg = bitmap + REG_POS(vec);
489 count += hweight32(*reg);
490 }
491
492 return count;
493}
494
495bool __kvm_apic_update_irr(u32 *pir, void *regs, int *max_irr)
496{
497 u32 i, vec;
498 u32 pir_val, irr_val, prev_irr_val;
499 int max_updated_irr;
500
501 max_updated_irr = -1;
502 *max_irr = -1;
503
504 for (i = vec = 0; i <= 7; i++, vec += 32) {
505 pir_val = READ_ONCE(pir[i]);
506 irr_val = *((u32 *)(regs + APIC_IRR + i * 0x10));
507 if (pir_val) {
508 prev_irr_val = irr_val;
509 irr_val |= xchg(&pir[i], 0);
510 *((u32 *)(regs + APIC_IRR + i * 0x10)) = irr_val;
511 if (prev_irr_val != irr_val) {
512 max_updated_irr =
513 __fls(irr_val ^ prev_irr_val) + vec;
514 }
515 }
516 if (irr_val)
517 *max_irr = __fls(irr_val) + vec;
518 }
519
520 return ((max_updated_irr != -1) &&
521 (max_updated_irr == *max_irr));
522}
523EXPORT_SYMBOL_GPL(__kvm_apic_update_irr);
524
525bool kvm_apic_update_irr(struct kvm_vcpu *vcpu, u32 *pir, int *max_irr)
526{
527 struct kvm_lapic *apic = vcpu->arch.apic;
528
529 return __kvm_apic_update_irr(pir, apic->regs, max_irr);
530}
531EXPORT_SYMBOL_GPL(kvm_apic_update_irr);
532
533static inline int apic_search_irr(struct kvm_lapic *apic)
534{
535 return find_highest_vector(apic->regs + APIC_IRR);
536}
537
538static inline int apic_find_highest_irr(struct kvm_lapic *apic)
539{
540 int result;
541
542 /*
543 * Note that irr_pending is just a hint. It will be always
544 * true with virtual interrupt delivery enabled.
545 */
546 if (!apic->irr_pending)
547 return -1;
548
549 result = apic_search_irr(apic);
550 ASSERT(result == -1 || result >= 16);
551
552 return result;
553}
554
555static inline void apic_clear_irr(int vec, struct kvm_lapic *apic)
556{
557 if (unlikely(apic->apicv_active)) {
558 /* need to update RVI */
559 kvm_lapic_clear_vector(vec, apic->regs + APIC_IRR);
560 static_call_cond(kvm_x86_hwapic_irr_update)(apic->vcpu,
561 apic_find_highest_irr(apic));
562 } else {
563 apic->irr_pending = false;
564 kvm_lapic_clear_vector(vec, apic->regs + APIC_IRR);
565 if (apic_search_irr(apic) != -1)
566 apic->irr_pending = true;
567 }
568}
569
570void kvm_apic_clear_irr(struct kvm_vcpu *vcpu, int vec)
571{
572 apic_clear_irr(vec, vcpu->arch.apic);
573}
574EXPORT_SYMBOL_GPL(kvm_apic_clear_irr);
575
576static inline void apic_set_isr(int vec, struct kvm_lapic *apic)
577{
578 if (__apic_test_and_set_vector(vec, apic->regs + APIC_ISR))
579 return;
580
581 /*
582 * With APIC virtualization enabled, all caching is disabled
583 * because the processor can modify ISR under the hood. Instead
584 * just set SVI.
585 */
586 if (unlikely(apic->apicv_active))
587 static_call_cond(kvm_x86_hwapic_isr_update)(vec);
588 else {
589 ++apic->isr_count;
590 BUG_ON(apic->isr_count > MAX_APIC_VECTOR);
591 /*
592 * ISR (in service register) bit is set when injecting an interrupt.
593 * The highest vector is injected. Thus the latest bit set matches
594 * the highest bit in ISR.
595 */
596 apic->highest_isr_cache = vec;
597 }
598}
599
600static inline int apic_find_highest_isr(struct kvm_lapic *apic)
601{
602 int result;
603
604 /*
605 * Note that isr_count is always 1, and highest_isr_cache
606 * is always -1, with APIC virtualization enabled.
607 */
608 if (!apic->isr_count)
609 return -1;
610 if (likely(apic->highest_isr_cache != -1))
611 return apic->highest_isr_cache;
612
613 result = find_highest_vector(apic->regs + APIC_ISR);
614 ASSERT(result == -1 || result >= 16);
615
616 return result;
617}
618
619static inline void apic_clear_isr(int vec, struct kvm_lapic *apic)
620{
621 if (!__apic_test_and_clear_vector(vec, apic->regs + APIC_ISR))
622 return;
623
624 /*
625 * We do get here for APIC virtualization enabled if the guest
626 * uses the Hyper-V APIC enlightenment. In this case we may need
627 * to trigger a new interrupt delivery by writing the SVI field;
628 * on the other hand isr_count and highest_isr_cache are unused
629 * and must be left alone.
630 */
631 if (unlikely(apic->apicv_active))
632 static_call_cond(kvm_x86_hwapic_isr_update)(apic_find_highest_isr(apic));
633 else {
634 --apic->isr_count;
635 BUG_ON(apic->isr_count < 0);
636 apic->highest_isr_cache = -1;
637 }
638}
639
640int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu)
641{
642 /* This may race with setting of irr in __apic_accept_irq() and
643 * value returned may be wrong, but kvm_vcpu_kick() in __apic_accept_irq
644 * will cause vmexit immediately and the value will be recalculated
645 * on the next vmentry.
646 */
647 return apic_find_highest_irr(vcpu->arch.apic);
648}
649EXPORT_SYMBOL_GPL(kvm_lapic_find_highest_irr);
650
651static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
652 int vector, int level, int trig_mode,
653 struct dest_map *dest_map);
654
655int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq,
656 struct dest_map *dest_map)
657{
658 struct kvm_lapic *apic = vcpu->arch.apic;
659
660 return __apic_accept_irq(apic, irq->delivery_mode, irq->vector,
661 irq->level, irq->trig_mode, dest_map);
662}
663
664static int __pv_send_ipi(unsigned long *ipi_bitmap, struct kvm_apic_map *map,
665 struct kvm_lapic_irq *irq, u32 min)
666{
667 int i, count = 0;
668 struct kvm_vcpu *vcpu;
669
670 if (min > map->max_apic_id)
671 return 0;
672
673 for_each_set_bit(i, ipi_bitmap,
674 min((u32)BITS_PER_LONG, (map->max_apic_id - min + 1))) {
675 if (map->phys_map[min + i]) {
676 vcpu = map->phys_map[min + i]->vcpu;
677 count += kvm_apic_set_irq(vcpu, irq, NULL);
678 }
679 }
680
681 return count;
682}
683
684int kvm_pv_send_ipi(struct kvm *kvm, unsigned long ipi_bitmap_low,
685 unsigned long ipi_bitmap_high, u32 min,
686 unsigned long icr, int op_64_bit)
687{
688 struct kvm_apic_map *map;
689 struct kvm_lapic_irq irq = {0};
690 int cluster_size = op_64_bit ? 64 : 32;
691 int count;
692
693 if (icr & (APIC_DEST_MASK | APIC_SHORT_MASK))
694 return -KVM_EINVAL;
695
696 irq.vector = icr & APIC_VECTOR_MASK;
697 irq.delivery_mode = icr & APIC_MODE_MASK;
698 irq.level = (icr & APIC_INT_ASSERT) != 0;
699 irq.trig_mode = icr & APIC_INT_LEVELTRIG;
700
701 rcu_read_lock();
702 map = rcu_dereference(kvm->arch.apic_map);
703
704 count = -EOPNOTSUPP;
705 if (likely(map)) {
706 count = __pv_send_ipi(&ipi_bitmap_low, map, &irq, min);
707 min += cluster_size;
708 count += __pv_send_ipi(&ipi_bitmap_high, map, &irq, min);
709 }
710
711 rcu_read_unlock();
712 return count;
713}
714
715static int pv_eoi_put_user(struct kvm_vcpu *vcpu, u8 val)
716{
717
718 return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data, &val,
719 sizeof(val));
720}
721
722static int pv_eoi_get_user(struct kvm_vcpu *vcpu, u8 *val)
723{
724
725 return kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data, val,
726 sizeof(*val));
727}
728
729static inline bool pv_eoi_enabled(struct kvm_vcpu *vcpu)
730{
731 return vcpu->arch.pv_eoi.msr_val & KVM_MSR_ENABLED;
732}
733
734static void pv_eoi_set_pending(struct kvm_vcpu *vcpu)
735{
736 if (pv_eoi_put_user(vcpu, KVM_PV_EOI_ENABLED) < 0)
737 return;
738
739 __set_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention);
740}
741
742static bool pv_eoi_test_and_clr_pending(struct kvm_vcpu *vcpu)
743{
744 u8 val;
745
746 if (pv_eoi_get_user(vcpu, &val) < 0)
747 return false;
748
749 val &= KVM_PV_EOI_ENABLED;
750
751 if (val && pv_eoi_put_user(vcpu, KVM_PV_EOI_DISABLED) < 0)
752 return false;
753
754 /*
755 * Clear pending bit in any case: it will be set again on vmentry.
756 * While this might not be ideal from performance point of view,
757 * this makes sure pv eoi is only enabled when we know it's safe.
758 */
759 __clear_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention);
760
761 return val;
762}
763
764static int apic_has_interrupt_for_ppr(struct kvm_lapic *apic, u32 ppr)
765{
766 int highest_irr;
767 if (kvm_x86_ops.sync_pir_to_irr)
768 highest_irr = static_call(kvm_x86_sync_pir_to_irr)(apic->vcpu);
769 else
770 highest_irr = apic_find_highest_irr(apic);
771 if (highest_irr == -1 || (highest_irr & 0xF0) <= ppr)
772 return -1;
773 return highest_irr;
774}
775
776static bool __apic_update_ppr(struct kvm_lapic *apic, u32 *new_ppr)
777{
778 u32 tpr, isrv, ppr, old_ppr;
779 int isr;
780
781 old_ppr = kvm_lapic_get_reg(apic, APIC_PROCPRI);
782 tpr = kvm_lapic_get_reg(apic, APIC_TASKPRI);
783 isr = apic_find_highest_isr(apic);
784 isrv = (isr != -1) ? isr : 0;
785
786 if ((tpr & 0xf0) >= (isrv & 0xf0))
787 ppr = tpr & 0xff;
788 else
789 ppr = isrv & 0xf0;
790
791 *new_ppr = ppr;
792 if (old_ppr != ppr)
793 kvm_lapic_set_reg(apic, APIC_PROCPRI, ppr);
794
795 return ppr < old_ppr;
796}
797
798static void apic_update_ppr(struct kvm_lapic *apic)
799{
800 u32 ppr;
801
802 if (__apic_update_ppr(apic, &ppr) &&
803 apic_has_interrupt_for_ppr(apic, ppr) != -1)
804 kvm_make_request(KVM_REQ_EVENT, apic->vcpu);
805}
806
807void kvm_apic_update_ppr(struct kvm_vcpu *vcpu)
808{
809 apic_update_ppr(vcpu->arch.apic);
810}
811EXPORT_SYMBOL_GPL(kvm_apic_update_ppr);
812
813static void apic_set_tpr(struct kvm_lapic *apic, u32 tpr)
814{
815 kvm_lapic_set_reg(apic, APIC_TASKPRI, tpr);
816 apic_update_ppr(apic);
817}
818
819static bool kvm_apic_broadcast(struct kvm_lapic *apic, u32 mda)
820{
821 return mda == (apic_x2apic_mode(apic) ?
822 X2APIC_BROADCAST : APIC_BROADCAST);
823}
824
825static bool kvm_apic_match_physical_addr(struct kvm_lapic *apic, u32 mda)
826{
827 if (kvm_apic_broadcast(apic, mda))
828 return true;
829
830 /*
831 * Hotplug hack: Accept interrupts for vCPUs in xAPIC mode as if they
832 * were in x2APIC mode if the target APIC ID can't be encoded as an
833 * xAPIC ID. This allows unique addressing of hotplugged vCPUs (which
834 * start in xAPIC mode) with an APIC ID that is unaddressable in xAPIC
835 * mode. Match the x2APIC ID if and only if the target APIC ID can't
836 * be encoded in xAPIC to avoid spurious matches against a vCPU that
837 * changed its (addressable) xAPIC ID (which is writable).
838 */
839 if (apic_x2apic_mode(apic) || mda > 0xff)
840 return mda == kvm_x2apic_id(apic);
841
842 return mda == kvm_xapic_id(apic);
843}
844
845static bool kvm_apic_match_logical_addr(struct kvm_lapic *apic, u32 mda)
846{
847 u32 logical_id;
848
849 if (kvm_apic_broadcast(apic, mda))
850 return true;
851
852 logical_id = kvm_lapic_get_reg(apic, APIC_LDR);
853
854 if (apic_x2apic_mode(apic))
855 return ((logical_id >> 16) == (mda >> 16))
856 && (logical_id & mda & 0xffff) != 0;
857
858 logical_id = GET_APIC_LOGICAL_ID(logical_id);
859
860 switch (kvm_lapic_get_reg(apic, APIC_DFR)) {
861 case APIC_DFR_FLAT:
862 return (logical_id & mda) != 0;
863 case APIC_DFR_CLUSTER:
864 return ((logical_id >> 4) == (mda >> 4))
865 && (logical_id & mda & 0xf) != 0;
866 default:
867 return false;
868 }
869}
870
871/* The KVM local APIC implementation has two quirks:
872 *
873 * - Real hardware delivers interrupts destined to x2APIC ID > 0xff to LAPICs
874 * in xAPIC mode if the "destination & 0xff" matches its xAPIC ID.
875 * KVM doesn't do that aliasing.
876 *
877 * - in-kernel IOAPIC messages have to be delivered directly to
878 * x2APIC, because the kernel does not support interrupt remapping.
879 * In order to support broadcast without interrupt remapping, x2APIC
880 * rewrites the destination of non-IPI messages from APIC_BROADCAST
881 * to X2APIC_BROADCAST.
882 *
883 * The broadcast quirk can be disabled with KVM_CAP_X2APIC_API. This is
884 * important when userspace wants to use x2APIC-format MSIs, because
885 * APIC_BROADCAST (0xff) is a legal route for "cluster 0, CPUs 0-7".
886 */
887static u32 kvm_apic_mda(struct kvm_vcpu *vcpu, unsigned int dest_id,
888 struct kvm_lapic *source, struct kvm_lapic *target)
889{
890 bool ipi = source != NULL;
891
892 if (!vcpu->kvm->arch.x2apic_broadcast_quirk_disabled &&
893 !ipi && dest_id == APIC_BROADCAST && apic_x2apic_mode(target))
894 return X2APIC_BROADCAST;
895
896 return dest_id;
897}
898
899bool kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source,
900 int shorthand, unsigned int dest, int dest_mode)
901{
902 struct kvm_lapic *target = vcpu->arch.apic;
903 u32 mda = kvm_apic_mda(vcpu, dest, source, target);
904
905 ASSERT(target);
906 switch (shorthand) {
907 case APIC_DEST_NOSHORT:
908 if (dest_mode == APIC_DEST_PHYSICAL)
909 return kvm_apic_match_physical_addr(target, mda);
910 else
911 return kvm_apic_match_logical_addr(target, mda);
912 case APIC_DEST_SELF:
913 return target == source;
914 case APIC_DEST_ALLINC:
915 return true;
916 case APIC_DEST_ALLBUT:
917 return target != source;
918 default:
919 return false;
920 }
921}
922EXPORT_SYMBOL_GPL(kvm_apic_match_dest);
923
924int kvm_vector_to_index(u32 vector, u32 dest_vcpus,
925 const unsigned long *bitmap, u32 bitmap_size)
926{
927 u32 mod;
928 int i, idx = -1;
929
930 mod = vector % dest_vcpus;
931
932 for (i = 0; i <= mod; i++) {
933 idx = find_next_bit(bitmap, bitmap_size, idx + 1);
934 BUG_ON(idx == bitmap_size);
935 }
936
937 return idx;
938}
939
940static void kvm_apic_disabled_lapic_found(struct kvm *kvm)
941{
942 if (!kvm->arch.disabled_lapic_found) {
943 kvm->arch.disabled_lapic_found = true;
944 printk(KERN_INFO
945 "Disabled LAPIC found during irq injection\n");
946 }
947}
948
949static bool kvm_apic_is_broadcast_dest(struct kvm *kvm, struct kvm_lapic **src,
950 struct kvm_lapic_irq *irq, struct kvm_apic_map *map)
951{
952 if (kvm->arch.x2apic_broadcast_quirk_disabled) {
953 if ((irq->dest_id == APIC_BROADCAST &&
954 map->mode != KVM_APIC_MODE_X2APIC))
955 return true;
956 if (irq->dest_id == X2APIC_BROADCAST)
957 return true;
958 } else {
959 bool x2apic_ipi = src && *src && apic_x2apic_mode(*src);
960 if (irq->dest_id == (x2apic_ipi ?
961 X2APIC_BROADCAST : APIC_BROADCAST))
962 return true;
963 }
964
965 return false;
966}
967
968/* Return true if the interrupt can be handled by using *bitmap as index mask
969 * for valid destinations in *dst array.
970 * Return false if kvm_apic_map_get_dest_lapic did nothing useful.
971 * Note: we may have zero kvm_lapic destinations when we return true, which
972 * means that the interrupt should be dropped. In this case, *bitmap would be
973 * zero and *dst undefined.
974 */
975static inline bool kvm_apic_map_get_dest_lapic(struct kvm *kvm,
976 struct kvm_lapic **src, struct kvm_lapic_irq *irq,
977 struct kvm_apic_map *map, struct kvm_lapic ***dst,
978 unsigned long *bitmap)
979{
980 int i, lowest;
981
982 if (irq->shorthand == APIC_DEST_SELF && src) {
983 *dst = src;
984 *bitmap = 1;
985 return true;
986 } else if (irq->shorthand)
987 return false;
988
989 if (!map || kvm_apic_is_broadcast_dest(kvm, src, irq, map))
990 return false;
991
992 if (irq->dest_mode == APIC_DEST_PHYSICAL) {
993 if (irq->dest_id > map->max_apic_id) {
994 *bitmap = 0;
995 } else {
996 u32 dest_id = array_index_nospec(irq->dest_id, map->max_apic_id + 1);
997 *dst = &map->phys_map[dest_id];
998 *bitmap = 1;
999 }
1000 return true;
1001 }
1002
1003 *bitmap = 0;
1004 if (!kvm_apic_map_get_logical_dest(map, irq->dest_id, dst,
1005 (u16 *)bitmap))
1006 return false;
1007
1008 if (!kvm_lowest_prio_delivery(irq))
1009 return true;
1010
1011 if (!kvm_vector_hashing_enabled()) {
1012 lowest = -1;
1013 for_each_set_bit(i, bitmap, 16) {
1014 if (!(*dst)[i])
1015 continue;
1016 if (lowest < 0)
1017 lowest = i;
1018 else if (kvm_apic_compare_prio((*dst)[i]->vcpu,
1019 (*dst)[lowest]->vcpu) < 0)
1020 lowest = i;
1021 }
1022 } else {
1023 if (!*bitmap)
1024 return true;
1025
1026 lowest = kvm_vector_to_index(irq->vector, hweight16(*bitmap),
1027 bitmap, 16);
1028
1029 if (!(*dst)[lowest]) {
1030 kvm_apic_disabled_lapic_found(kvm);
1031 *bitmap = 0;
1032 return true;
1033 }
1034 }
1035
1036 *bitmap = (lowest >= 0) ? 1 << lowest : 0;
1037
1038 return true;
1039}
1040
1041bool kvm_irq_delivery_to_apic_fast(struct kvm *kvm, struct kvm_lapic *src,
1042 struct kvm_lapic_irq *irq, int *r, struct dest_map *dest_map)
1043{
1044 struct kvm_apic_map *map;
1045 unsigned long bitmap;
1046 struct kvm_lapic **dst = NULL;
1047 int i;
1048 bool ret;
1049
1050 *r = -1;
1051
1052 if (irq->shorthand == APIC_DEST_SELF) {
1053 if (KVM_BUG_ON(!src, kvm)) {
1054 *r = 0;
1055 return true;
1056 }
1057 *r = kvm_apic_set_irq(src->vcpu, irq, dest_map);
1058 return true;
1059 }
1060
1061 rcu_read_lock();
1062 map = rcu_dereference(kvm->arch.apic_map);
1063
1064 ret = kvm_apic_map_get_dest_lapic(kvm, &src, irq, map, &dst, &bitmap);
1065 if (ret) {
1066 *r = 0;
1067 for_each_set_bit(i, &bitmap, 16) {
1068 if (!dst[i])
1069 continue;
1070 *r += kvm_apic_set_irq(dst[i]->vcpu, irq, dest_map);
1071 }
1072 }
1073
1074 rcu_read_unlock();
1075 return ret;
1076}
1077
1078/*
1079 * This routine tries to handle interrupts in posted mode, here is how
1080 * it deals with different cases:
1081 * - For single-destination interrupts, handle it in posted mode
1082 * - Else if vector hashing is enabled and it is a lowest-priority
1083 * interrupt, handle it in posted mode and use the following mechanism
1084 * to find the destination vCPU.
1085 * 1. For lowest-priority interrupts, store all the possible
1086 * destination vCPUs in an array.
1087 * 2. Use "guest vector % max number of destination vCPUs" to find
1088 * the right destination vCPU in the array for the lowest-priority
1089 * interrupt.
1090 * - Otherwise, use remapped mode to inject the interrupt.
1091 */
1092bool kvm_intr_is_single_vcpu_fast(struct kvm *kvm, struct kvm_lapic_irq *irq,
1093 struct kvm_vcpu **dest_vcpu)
1094{
1095 struct kvm_apic_map *map;
1096 unsigned long bitmap;
1097 struct kvm_lapic **dst = NULL;
1098 bool ret = false;
1099
1100 if (irq->shorthand)
1101 return false;
1102
1103 rcu_read_lock();
1104 map = rcu_dereference(kvm->arch.apic_map);
1105
1106 if (kvm_apic_map_get_dest_lapic(kvm, NULL, irq, map, &dst, &bitmap) &&
1107 hweight16(bitmap) == 1) {
1108 unsigned long i = find_first_bit(&bitmap, 16);
1109
1110 if (dst[i]) {
1111 *dest_vcpu = dst[i]->vcpu;
1112 ret = true;
1113 }
1114 }
1115
1116 rcu_read_unlock();
1117 return ret;
1118}
1119
1120/*
1121 * Add a pending IRQ into lapic.
1122 * Return 1 if successfully added and 0 if discarded.
1123 */
1124static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
1125 int vector, int level, int trig_mode,
1126 struct dest_map *dest_map)
1127{
1128 int result = 0;
1129 struct kvm_vcpu *vcpu = apic->vcpu;
1130
1131 trace_kvm_apic_accept_irq(vcpu->vcpu_id, delivery_mode,
1132 trig_mode, vector);
1133 switch (delivery_mode) {
1134 case APIC_DM_LOWEST:
1135 vcpu->arch.apic_arb_prio++;
1136 fallthrough;
1137 case APIC_DM_FIXED:
1138 if (unlikely(trig_mode && !level))
1139 break;
1140
1141 /* FIXME add logic for vcpu on reset */
1142 if (unlikely(!apic_enabled(apic)))
1143 break;
1144
1145 result = 1;
1146
1147 if (dest_map) {
1148 __set_bit(vcpu->vcpu_id, dest_map->map);
1149 dest_map->vectors[vcpu->vcpu_id] = vector;
1150 }
1151
1152 if (apic_test_vector(vector, apic->regs + APIC_TMR) != !!trig_mode) {
1153 if (trig_mode)
1154 kvm_lapic_set_vector(vector,
1155 apic->regs + APIC_TMR);
1156 else
1157 kvm_lapic_clear_vector(vector,
1158 apic->regs + APIC_TMR);
1159 }
1160
1161 static_call(kvm_x86_deliver_interrupt)(apic, delivery_mode,
1162 trig_mode, vector);
1163 break;
1164
1165 case APIC_DM_REMRD:
1166 result = 1;
1167 vcpu->arch.pv.pv_unhalted = 1;
1168 kvm_make_request(KVM_REQ_EVENT, vcpu);
1169 kvm_vcpu_kick(vcpu);
1170 break;
1171
1172 case APIC_DM_SMI:
1173 if (!kvm_inject_smi(vcpu)) {
1174 kvm_vcpu_kick(vcpu);
1175 result = 1;
1176 }
1177 break;
1178
1179 case APIC_DM_NMI:
1180 result = 1;
1181 kvm_inject_nmi(vcpu);
1182 kvm_vcpu_kick(vcpu);
1183 break;
1184
1185 case APIC_DM_INIT:
1186 if (!trig_mode || level) {
1187 result = 1;
1188 /* assumes that there are only KVM_APIC_INIT/SIPI */
1189 apic->pending_events = (1UL << KVM_APIC_INIT);
1190 kvm_make_request(KVM_REQ_EVENT, vcpu);
1191 kvm_vcpu_kick(vcpu);
1192 }
1193 break;
1194
1195 case APIC_DM_STARTUP:
1196 result = 1;
1197 apic->sipi_vector = vector;
1198 /* make sure sipi_vector is visible for the receiver */
1199 smp_wmb();
1200 set_bit(KVM_APIC_SIPI, &apic->pending_events);
1201 kvm_make_request(KVM_REQ_EVENT, vcpu);
1202 kvm_vcpu_kick(vcpu);
1203 break;
1204
1205 case APIC_DM_EXTINT:
1206 /*
1207 * Should only be called by kvm_apic_local_deliver() with LVT0,
1208 * before NMI watchdog was enabled. Already handled by
1209 * kvm_apic_accept_pic_intr().
1210 */
1211 break;
1212
1213 default:
1214 printk(KERN_ERR "TODO: unsupported delivery mode %x\n",
1215 delivery_mode);
1216 break;
1217 }
1218 return result;
1219}
1220
1221/*
1222 * This routine identifies the destination vcpus mask meant to receive the
1223 * IOAPIC interrupts. It either uses kvm_apic_map_get_dest_lapic() to find
1224 * out the destination vcpus array and set the bitmap or it traverses to
1225 * each available vcpu to identify the same.
1226 */
1227void kvm_bitmap_or_dest_vcpus(struct kvm *kvm, struct kvm_lapic_irq *irq,
1228 unsigned long *vcpu_bitmap)
1229{
1230 struct kvm_lapic **dest_vcpu = NULL;
1231 struct kvm_lapic *src = NULL;
1232 struct kvm_apic_map *map;
1233 struct kvm_vcpu *vcpu;
1234 unsigned long bitmap, i;
1235 int vcpu_idx;
1236 bool ret;
1237
1238 rcu_read_lock();
1239 map = rcu_dereference(kvm->arch.apic_map);
1240
1241 ret = kvm_apic_map_get_dest_lapic(kvm, &src, irq, map, &dest_vcpu,
1242 &bitmap);
1243 if (ret) {
1244 for_each_set_bit(i, &bitmap, 16) {
1245 if (!dest_vcpu[i])
1246 continue;
1247 vcpu_idx = dest_vcpu[i]->vcpu->vcpu_idx;
1248 __set_bit(vcpu_idx, vcpu_bitmap);
1249 }
1250 } else {
1251 kvm_for_each_vcpu(i, vcpu, kvm) {
1252 if (!kvm_apic_present(vcpu))
1253 continue;
1254 if (!kvm_apic_match_dest(vcpu, NULL,
1255 irq->shorthand,
1256 irq->dest_id,
1257 irq->dest_mode))
1258 continue;
1259 __set_bit(i, vcpu_bitmap);
1260 }
1261 }
1262 rcu_read_unlock();
1263}
1264
1265int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2)
1266{
1267 return vcpu1->arch.apic_arb_prio - vcpu2->arch.apic_arb_prio;
1268}
1269
1270static bool kvm_ioapic_handles_vector(struct kvm_lapic *apic, int vector)
1271{
1272 return test_bit(vector, apic->vcpu->arch.ioapic_handled_vectors);
1273}
1274
1275static void kvm_ioapic_send_eoi(struct kvm_lapic *apic, int vector)
1276{
1277 int trigger_mode;
1278
1279 /* Eoi the ioapic only if the ioapic doesn't own the vector. */
1280 if (!kvm_ioapic_handles_vector(apic, vector))
1281 return;
1282
1283 /* Request a KVM exit to inform the userspace IOAPIC. */
1284 if (irqchip_split(apic->vcpu->kvm)) {
1285 apic->vcpu->arch.pending_ioapic_eoi = vector;
1286 kvm_make_request(KVM_REQ_IOAPIC_EOI_EXIT, apic->vcpu);
1287 return;
1288 }
1289
1290 if (apic_test_vector(vector, apic->regs + APIC_TMR))
1291 trigger_mode = IOAPIC_LEVEL_TRIG;
1292 else
1293 trigger_mode = IOAPIC_EDGE_TRIG;
1294
1295 kvm_ioapic_update_eoi(apic->vcpu, vector, trigger_mode);
1296}
1297
1298static int apic_set_eoi(struct kvm_lapic *apic)
1299{
1300 int vector = apic_find_highest_isr(apic);
1301
1302 trace_kvm_eoi(apic, vector);
1303
1304 /*
1305 * Not every write EOI will has corresponding ISR,
1306 * one example is when Kernel check timer on setup_IO_APIC
1307 */
1308 if (vector == -1)
1309 return vector;
1310
1311 apic_clear_isr(vector, apic);
1312 apic_update_ppr(apic);
1313
1314 if (to_hv_vcpu(apic->vcpu) &&
1315 test_bit(vector, to_hv_synic(apic->vcpu)->vec_bitmap))
1316 kvm_hv_synic_send_eoi(apic->vcpu, vector);
1317
1318 kvm_ioapic_send_eoi(apic, vector);
1319 kvm_make_request(KVM_REQ_EVENT, apic->vcpu);
1320 return vector;
1321}
1322
1323/*
1324 * this interface assumes a trap-like exit, which has already finished
1325 * desired side effect including vISR and vPPR update.
1326 */
1327void kvm_apic_set_eoi_accelerated(struct kvm_vcpu *vcpu, int vector)
1328{
1329 struct kvm_lapic *apic = vcpu->arch.apic;
1330
1331 trace_kvm_eoi(apic, vector);
1332
1333 kvm_ioapic_send_eoi(apic, vector);
1334 kvm_make_request(KVM_REQ_EVENT, apic->vcpu);
1335}
1336EXPORT_SYMBOL_GPL(kvm_apic_set_eoi_accelerated);
1337
1338void kvm_apic_send_ipi(struct kvm_lapic *apic, u32 icr_low, u32 icr_high)
1339{
1340 struct kvm_lapic_irq irq;
1341
1342 /* KVM has no delay and should always clear the BUSY/PENDING flag. */
1343 WARN_ON_ONCE(icr_low & APIC_ICR_BUSY);
1344
1345 irq.vector = icr_low & APIC_VECTOR_MASK;
1346 irq.delivery_mode = icr_low & APIC_MODE_MASK;
1347 irq.dest_mode = icr_low & APIC_DEST_MASK;
1348 irq.level = (icr_low & APIC_INT_ASSERT) != 0;
1349 irq.trig_mode = icr_low & APIC_INT_LEVELTRIG;
1350 irq.shorthand = icr_low & APIC_SHORT_MASK;
1351 irq.msi_redir_hint = false;
1352 if (apic_x2apic_mode(apic))
1353 irq.dest_id = icr_high;
1354 else
1355 irq.dest_id = GET_XAPIC_DEST_FIELD(icr_high);
1356
1357 trace_kvm_apic_ipi(icr_low, irq.dest_id);
1358
1359 kvm_irq_delivery_to_apic(apic->vcpu->kvm, apic, &irq, NULL);
1360}
1361EXPORT_SYMBOL_GPL(kvm_apic_send_ipi);
1362
1363static u32 apic_get_tmcct(struct kvm_lapic *apic)
1364{
1365 ktime_t remaining, now;
1366 s64 ns;
1367 u32 tmcct;
1368
1369 ASSERT(apic != NULL);
1370
1371 /* if initial count is 0, current count should also be 0 */
1372 if (kvm_lapic_get_reg(apic, APIC_TMICT) == 0 ||
1373 apic->lapic_timer.period == 0)
1374 return 0;
1375
1376 now = ktime_get();
1377 remaining = ktime_sub(apic->lapic_timer.target_expiration, now);
1378 if (ktime_to_ns(remaining) < 0)
1379 remaining = 0;
1380
1381 ns = mod_64(ktime_to_ns(remaining), apic->lapic_timer.period);
1382 tmcct = div64_u64(ns,
1383 (APIC_BUS_CYCLE_NS * apic->divide_count));
1384
1385 return tmcct;
1386}
1387
1388static void __report_tpr_access(struct kvm_lapic *apic, bool write)
1389{
1390 struct kvm_vcpu *vcpu = apic->vcpu;
1391 struct kvm_run *run = vcpu->run;
1392
1393 kvm_make_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu);
1394 run->tpr_access.rip = kvm_rip_read(vcpu);
1395 run->tpr_access.is_write = write;
1396}
1397
1398static inline void report_tpr_access(struct kvm_lapic *apic, bool write)
1399{
1400 if (apic->vcpu->arch.tpr_access_reporting)
1401 __report_tpr_access(apic, write);
1402}
1403
1404static u32 __apic_read(struct kvm_lapic *apic, unsigned int offset)
1405{
1406 u32 val = 0;
1407
1408 if (offset >= LAPIC_MMIO_LENGTH)
1409 return 0;
1410
1411 switch (offset) {
1412 case APIC_ARBPRI:
1413 break;
1414
1415 case APIC_TMCCT: /* Timer CCR */
1416 if (apic_lvtt_tscdeadline(apic))
1417 return 0;
1418
1419 val = apic_get_tmcct(apic);
1420 break;
1421 case APIC_PROCPRI:
1422 apic_update_ppr(apic);
1423 val = kvm_lapic_get_reg(apic, offset);
1424 break;
1425 case APIC_TASKPRI:
1426 report_tpr_access(apic, false);
1427 fallthrough;
1428 default:
1429 val = kvm_lapic_get_reg(apic, offset);
1430 break;
1431 }
1432
1433 return val;
1434}
1435
1436static inline struct kvm_lapic *to_lapic(struct kvm_io_device *dev)
1437{
1438 return container_of(dev, struct kvm_lapic, dev);
1439}
1440
1441#define APIC_REG_MASK(reg) (1ull << ((reg) >> 4))
1442#define APIC_REGS_MASK(first, count) \
1443 (APIC_REG_MASK(first) * ((1ull << (count)) - 1))
1444
1445static int kvm_lapic_reg_read(struct kvm_lapic *apic, u32 offset, int len,
1446 void *data)
1447{
1448 unsigned char alignment = offset & 0xf;
1449 u32 result;
1450 /* this bitmask has a bit cleared for each reserved register */
1451 u64 valid_reg_mask =
1452 APIC_REG_MASK(APIC_ID) |
1453 APIC_REG_MASK(APIC_LVR) |
1454 APIC_REG_MASK(APIC_TASKPRI) |
1455 APIC_REG_MASK(APIC_PROCPRI) |
1456 APIC_REG_MASK(APIC_LDR) |
1457 APIC_REG_MASK(APIC_DFR) |
1458 APIC_REG_MASK(APIC_SPIV) |
1459 APIC_REGS_MASK(APIC_ISR, APIC_ISR_NR) |
1460 APIC_REGS_MASK(APIC_TMR, APIC_ISR_NR) |
1461 APIC_REGS_MASK(APIC_IRR, APIC_ISR_NR) |
1462 APIC_REG_MASK(APIC_ESR) |
1463 APIC_REG_MASK(APIC_ICR) |
1464 APIC_REG_MASK(APIC_LVTT) |
1465 APIC_REG_MASK(APIC_LVTTHMR) |
1466 APIC_REG_MASK(APIC_LVTPC) |
1467 APIC_REG_MASK(APIC_LVT0) |
1468 APIC_REG_MASK(APIC_LVT1) |
1469 APIC_REG_MASK(APIC_LVTERR) |
1470 APIC_REG_MASK(APIC_TMICT) |
1471 APIC_REG_MASK(APIC_TMCCT) |
1472 APIC_REG_MASK(APIC_TDCR);
1473
1474 if (kvm_lapic_lvt_supported(apic, LVT_CMCI))
1475 valid_reg_mask |= APIC_REG_MASK(APIC_LVTCMCI);
1476
1477 /*
1478 * ARBPRI and ICR2 are not valid in x2APIC mode. WARN if KVM reads ICR
1479 * in x2APIC mode as it's an 8-byte register in x2APIC and needs to be
1480 * manually handled by the caller.
1481 */
1482 if (!apic_x2apic_mode(apic))
1483 valid_reg_mask |= APIC_REG_MASK(APIC_ARBPRI) |
1484 APIC_REG_MASK(APIC_ICR2);
1485 else
1486 WARN_ON_ONCE(offset == APIC_ICR);
1487
1488 if (alignment + len > 4)
1489 return 1;
1490
1491 if (offset > 0x3f0 || !(valid_reg_mask & APIC_REG_MASK(offset)))
1492 return 1;
1493
1494 result = __apic_read(apic, offset & ~0xf);
1495
1496 trace_kvm_apic_read(offset, result);
1497
1498 switch (len) {
1499 case 1:
1500 case 2:
1501 case 4:
1502 memcpy(data, (char *)&result + alignment, len);
1503 break;
1504 default:
1505 printk(KERN_ERR "Local APIC read with len = %x, "
1506 "should be 1,2, or 4 instead\n", len);
1507 break;
1508 }
1509 return 0;
1510}
1511
1512static int apic_mmio_in_range(struct kvm_lapic *apic, gpa_t addr)
1513{
1514 return addr >= apic->base_address &&
1515 addr < apic->base_address + LAPIC_MMIO_LENGTH;
1516}
1517
1518static int apic_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *this,
1519 gpa_t address, int len, void *data)
1520{
1521 struct kvm_lapic *apic = to_lapic(this);
1522 u32 offset = address - apic->base_address;
1523
1524 if (!apic_mmio_in_range(apic, address))
1525 return -EOPNOTSUPP;
1526
1527 if (!kvm_apic_hw_enabled(apic) || apic_x2apic_mode(apic)) {
1528 if (!kvm_check_has_quirk(vcpu->kvm,
1529 KVM_X86_QUIRK_LAPIC_MMIO_HOLE))
1530 return -EOPNOTSUPP;
1531
1532 memset(data, 0xff, len);
1533 return 0;
1534 }
1535
1536 kvm_lapic_reg_read(apic, offset, len, data);
1537
1538 return 0;
1539}
1540
1541static void update_divide_count(struct kvm_lapic *apic)
1542{
1543 u32 tmp1, tmp2, tdcr;
1544
1545 tdcr = kvm_lapic_get_reg(apic, APIC_TDCR);
1546 tmp1 = tdcr & 0xf;
1547 tmp2 = ((tmp1 & 0x3) | ((tmp1 & 0x8) >> 1)) + 1;
1548 apic->divide_count = 0x1 << (tmp2 & 0x7);
1549}
1550
1551static void limit_periodic_timer_frequency(struct kvm_lapic *apic)
1552{
1553 /*
1554 * Do not allow the guest to program periodic timers with small
1555 * interval, since the hrtimers are not throttled by the host
1556 * scheduler.
1557 */
1558 if (apic_lvtt_period(apic) && apic->lapic_timer.period) {
1559 s64 min_period = min_timer_period_us * 1000LL;
1560
1561 if (apic->lapic_timer.period < min_period) {
1562 pr_info_ratelimited(
1563 "kvm: vcpu %i: requested %lld ns "
1564 "lapic timer period limited to %lld ns\n",
1565 apic->vcpu->vcpu_id,
1566 apic->lapic_timer.period, min_period);
1567 apic->lapic_timer.period = min_period;
1568 }
1569 }
1570}
1571
1572static void cancel_hv_timer(struct kvm_lapic *apic);
1573
1574static void cancel_apic_timer(struct kvm_lapic *apic)
1575{
1576 hrtimer_cancel(&apic->lapic_timer.timer);
1577 preempt_disable();
1578 if (apic->lapic_timer.hv_timer_in_use)
1579 cancel_hv_timer(apic);
1580 preempt_enable();
1581 atomic_set(&apic->lapic_timer.pending, 0);
1582}
1583
1584static void apic_update_lvtt(struct kvm_lapic *apic)
1585{
1586 u32 timer_mode = kvm_lapic_get_reg(apic, APIC_LVTT) &
1587 apic->lapic_timer.timer_mode_mask;
1588
1589 if (apic->lapic_timer.timer_mode != timer_mode) {
1590 if (apic_lvtt_tscdeadline(apic) != (timer_mode ==
1591 APIC_LVT_TIMER_TSCDEADLINE)) {
1592 cancel_apic_timer(apic);
1593 kvm_lapic_set_reg(apic, APIC_TMICT, 0);
1594 apic->lapic_timer.period = 0;
1595 apic->lapic_timer.tscdeadline = 0;
1596 }
1597 apic->lapic_timer.timer_mode = timer_mode;
1598 limit_periodic_timer_frequency(apic);
1599 }
1600}
1601
1602/*
1603 * On APICv, this test will cause a busy wait
1604 * during a higher-priority task.
1605 */
1606
1607static bool lapic_timer_int_injected(struct kvm_vcpu *vcpu)
1608{
1609 struct kvm_lapic *apic = vcpu->arch.apic;
1610 u32 reg = kvm_lapic_get_reg(apic, APIC_LVTT);
1611
1612 if (kvm_apic_hw_enabled(apic)) {
1613 int vec = reg & APIC_VECTOR_MASK;
1614 void *bitmap = apic->regs + APIC_ISR;
1615
1616 if (apic->apicv_active)
1617 bitmap = apic->regs + APIC_IRR;
1618
1619 if (apic_test_vector(vec, bitmap))
1620 return true;
1621 }
1622 return false;
1623}
1624
1625static inline void __wait_lapic_expire(struct kvm_vcpu *vcpu, u64 guest_cycles)
1626{
1627 u64 timer_advance_ns = vcpu->arch.apic->lapic_timer.timer_advance_ns;
1628
1629 /*
1630 * If the guest TSC is running at a different ratio than the host, then
1631 * convert the delay to nanoseconds to achieve an accurate delay. Note
1632 * that __delay() uses delay_tsc whenever the hardware has TSC, thus
1633 * always for VMX enabled hardware.
1634 */
1635 if (vcpu->arch.tsc_scaling_ratio == kvm_caps.default_tsc_scaling_ratio) {
1636 __delay(min(guest_cycles,
1637 nsec_to_cycles(vcpu, timer_advance_ns)));
1638 } else {
1639 u64 delay_ns = guest_cycles * 1000000ULL;
1640 do_div(delay_ns, vcpu->arch.virtual_tsc_khz);
1641 ndelay(min_t(u32, delay_ns, timer_advance_ns));
1642 }
1643}
1644
1645static inline void adjust_lapic_timer_advance(struct kvm_vcpu *vcpu,
1646 s64 advance_expire_delta)
1647{
1648 struct kvm_lapic *apic = vcpu->arch.apic;
1649 u32 timer_advance_ns = apic->lapic_timer.timer_advance_ns;
1650 u64 ns;
1651
1652 /* Do not adjust for tiny fluctuations or large random spikes. */
1653 if (abs(advance_expire_delta) > LAPIC_TIMER_ADVANCE_ADJUST_MAX ||
1654 abs(advance_expire_delta) < LAPIC_TIMER_ADVANCE_ADJUST_MIN)
1655 return;
1656
1657 /* too early */
1658 if (advance_expire_delta < 0) {
1659 ns = -advance_expire_delta * 1000000ULL;
1660 do_div(ns, vcpu->arch.virtual_tsc_khz);
1661 timer_advance_ns -= ns/LAPIC_TIMER_ADVANCE_ADJUST_STEP;
1662 } else {
1663 /* too late */
1664 ns = advance_expire_delta * 1000000ULL;
1665 do_div(ns, vcpu->arch.virtual_tsc_khz);
1666 timer_advance_ns += ns/LAPIC_TIMER_ADVANCE_ADJUST_STEP;
1667 }
1668
1669 if (unlikely(timer_advance_ns > LAPIC_TIMER_ADVANCE_NS_MAX))
1670 timer_advance_ns = LAPIC_TIMER_ADVANCE_NS_INIT;
1671 apic->lapic_timer.timer_advance_ns = timer_advance_ns;
1672}
1673
1674static void __kvm_wait_lapic_expire(struct kvm_vcpu *vcpu)
1675{
1676 struct kvm_lapic *apic = vcpu->arch.apic;
1677 u64 guest_tsc, tsc_deadline;
1678
1679 tsc_deadline = apic->lapic_timer.expired_tscdeadline;
1680 apic->lapic_timer.expired_tscdeadline = 0;
1681 guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc());
1682 trace_kvm_wait_lapic_expire(vcpu->vcpu_id, guest_tsc - tsc_deadline);
1683
1684 if (lapic_timer_advance_dynamic) {
1685 adjust_lapic_timer_advance(vcpu, guest_tsc - tsc_deadline);
1686 /*
1687 * If the timer fired early, reread the TSC to account for the
1688 * overhead of the above adjustment to avoid waiting longer
1689 * than is necessary.
1690 */
1691 if (guest_tsc < tsc_deadline)
1692 guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc());
1693 }
1694
1695 if (guest_tsc < tsc_deadline)
1696 __wait_lapic_expire(vcpu, tsc_deadline - guest_tsc);
1697}
1698
1699void kvm_wait_lapic_expire(struct kvm_vcpu *vcpu)
1700{
1701 if (lapic_in_kernel(vcpu) &&
1702 vcpu->arch.apic->lapic_timer.expired_tscdeadline &&
1703 vcpu->arch.apic->lapic_timer.timer_advance_ns &&
1704 lapic_timer_int_injected(vcpu))
1705 __kvm_wait_lapic_expire(vcpu);
1706}
1707EXPORT_SYMBOL_GPL(kvm_wait_lapic_expire);
1708
1709static void kvm_apic_inject_pending_timer_irqs(struct kvm_lapic *apic)
1710{
1711 struct kvm_timer *ktimer = &apic->lapic_timer;
1712
1713 kvm_apic_local_deliver(apic, APIC_LVTT);
1714 if (apic_lvtt_tscdeadline(apic)) {
1715 ktimer->tscdeadline = 0;
1716 } else if (apic_lvtt_oneshot(apic)) {
1717 ktimer->tscdeadline = 0;
1718 ktimer->target_expiration = 0;
1719 }
1720}
1721
1722static void apic_timer_expired(struct kvm_lapic *apic, bool from_timer_fn)
1723{
1724 struct kvm_vcpu *vcpu = apic->vcpu;
1725 struct kvm_timer *ktimer = &apic->lapic_timer;
1726
1727 if (atomic_read(&apic->lapic_timer.pending))
1728 return;
1729
1730 if (apic_lvtt_tscdeadline(apic) || ktimer->hv_timer_in_use)
1731 ktimer->expired_tscdeadline = ktimer->tscdeadline;
1732
1733 if (!from_timer_fn && apic->apicv_active) {
1734 WARN_ON(kvm_get_running_vcpu() != vcpu);
1735 kvm_apic_inject_pending_timer_irqs(apic);
1736 return;
1737 }
1738
1739 if (kvm_use_posted_timer_interrupt(apic->vcpu)) {
1740 /*
1741 * Ensure the guest's timer has truly expired before posting an
1742 * interrupt. Open code the relevant checks to avoid querying
1743 * lapic_timer_int_injected(), which will be false since the
1744 * interrupt isn't yet injected. Waiting until after injecting
1745 * is not an option since that won't help a posted interrupt.
1746 */
1747 if (vcpu->arch.apic->lapic_timer.expired_tscdeadline &&
1748 vcpu->arch.apic->lapic_timer.timer_advance_ns)
1749 __kvm_wait_lapic_expire(vcpu);
1750 kvm_apic_inject_pending_timer_irqs(apic);
1751 return;
1752 }
1753
1754 atomic_inc(&apic->lapic_timer.pending);
1755 kvm_make_request(KVM_REQ_UNBLOCK, vcpu);
1756 if (from_timer_fn)
1757 kvm_vcpu_kick(vcpu);
1758}
1759
1760static void start_sw_tscdeadline(struct kvm_lapic *apic)
1761{
1762 struct kvm_timer *ktimer = &apic->lapic_timer;
1763 u64 guest_tsc, tscdeadline = ktimer->tscdeadline;
1764 u64 ns = 0;
1765 ktime_t expire;
1766 struct kvm_vcpu *vcpu = apic->vcpu;
1767 unsigned long this_tsc_khz = vcpu->arch.virtual_tsc_khz;
1768 unsigned long flags;
1769 ktime_t now;
1770
1771 if (unlikely(!tscdeadline || !this_tsc_khz))
1772 return;
1773
1774 local_irq_save(flags);
1775
1776 now = ktime_get();
1777 guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc());
1778
1779 ns = (tscdeadline - guest_tsc) * 1000000ULL;
1780 do_div(ns, this_tsc_khz);
1781
1782 if (likely(tscdeadline > guest_tsc) &&
1783 likely(ns > apic->lapic_timer.timer_advance_ns)) {
1784 expire = ktime_add_ns(now, ns);
1785 expire = ktime_sub_ns(expire, ktimer->timer_advance_ns);
1786 hrtimer_start(&ktimer->timer, expire, HRTIMER_MODE_ABS_HARD);
1787 } else
1788 apic_timer_expired(apic, false);
1789
1790 local_irq_restore(flags);
1791}
1792
1793static inline u64 tmict_to_ns(struct kvm_lapic *apic, u32 tmict)
1794{
1795 return (u64)tmict * APIC_BUS_CYCLE_NS * (u64)apic->divide_count;
1796}
1797
1798static void update_target_expiration(struct kvm_lapic *apic, uint32_t old_divisor)
1799{
1800 ktime_t now, remaining;
1801 u64 ns_remaining_old, ns_remaining_new;
1802
1803 apic->lapic_timer.period =
1804 tmict_to_ns(apic, kvm_lapic_get_reg(apic, APIC_TMICT));
1805 limit_periodic_timer_frequency(apic);
1806
1807 now = ktime_get();
1808 remaining = ktime_sub(apic->lapic_timer.target_expiration, now);
1809 if (ktime_to_ns(remaining) < 0)
1810 remaining = 0;
1811
1812 ns_remaining_old = ktime_to_ns(remaining);
1813 ns_remaining_new = mul_u64_u32_div(ns_remaining_old,
1814 apic->divide_count, old_divisor);
1815
1816 apic->lapic_timer.tscdeadline +=
1817 nsec_to_cycles(apic->vcpu, ns_remaining_new) -
1818 nsec_to_cycles(apic->vcpu, ns_remaining_old);
1819 apic->lapic_timer.target_expiration = ktime_add_ns(now, ns_remaining_new);
1820}
1821
1822static bool set_target_expiration(struct kvm_lapic *apic, u32 count_reg)
1823{
1824 ktime_t now;
1825 u64 tscl = rdtsc();
1826 s64 deadline;
1827
1828 now = ktime_get();
1829 apic->lapic_timer.period =
1830 tmict_to_ns(apic, kvm_lapic_get_reg(apic, APIC_TMICT));
1831
1832 if (!apic->lapic_timer.period) {
1833 apic->lapic_timer.tscdeadline = 0;
1834 return false;
1835 }
1836
1837 limit_periodic_timer_frequency(apic);
1838 deadline = apic->lapic_timer.period;
1839
1840 if (apic_lvtt_period(apic) || apic_lvtt_oneshot(apic)) {
1841 if (unlikely(count_reg != APIC_TMICT)) {
1842 deadline = tmict_to_ns(apic,
1843 kvm_lapic_get_reg(apic, count_reg));
1844 if (unlikely(deadline <= 0))
1845 deadline = apic->lapic_timer.period;
1846 else if (unlikely(deadline > apic->lapic_timer.period)) {
1847 pr_info_ratelimited(
1848 "kvm: vcpu %i: requested lapic timer restore with "
1849 "starting count register %#x=%u (%lld ns) > initial count (%lld ns). "
1850 "Using initial count to start timer.\n",
1851 apic->vcpu->vcpu_id,
1852 count_reg,
1853 kvm_lapic_get_reg(apic, count_reg),
1854 deadline, apic->lapic_timer.period);
1855 kvm_lapic_set_reg(apic, count_reg, 0);
1856 deadline = apic->lapic_timer.period;
1857 }
1858 }
1859 }
1860
1861 apic->lapic_timer.tscdeadline = kvm_read_l1_tsc(apic->vcpu, tscl) +
1862 nsec_to_cycles(apic->vcpu, deadline);
1863 apic->lapic_timer.target_expiration = ktime_add_ns(now, deadline);
1864
1865 return true;
1866}
1867
1868static void advance_periodic_target_expiration(struct kvm_lapic *apic)
1869{
1870 ktime_t now = ktime_get();
1871 u64 tscl = rdtsc();
1872 ktime_t delta;
1873
1874 /*
1875 * Synchronize both deadlines to the same time source or
1876 * differences in the periods (caused by differences in the
1877 * underlying clocks or numerical approximation errors) will
1878 * cause the two to drift apart over time as the errors
1879 * accumulate.
1880 */
1881 apic->lapic_timer.target_expiration =
1882 ktime_add_ns(apic->lapic_timer.target_expiration,
1883 apic->lapic_timer.period);
1884 delta = ktime_sub(apic->lapic_timer.target_expiration, now);
1885 apic->lapic_timer.tscdeadline = kvm_read_l1_tsc(apic->vcpu, tscl) +
1886 nsec_to_cycles(apic->vcpu, delta);
1887}
1888
1889static void start_sw_period(struct kvm_lapic *apic)
1890{
1891 if (!apic->lapic_timer.period)
1892 return;
1893
1894 if (ktime_after(ktime_get(),
1895 apic->lapic_timer.target_expiration)) {
1896 apic_timer_expired(apic, false);
1897
1898 if (apic_lvtt_oneshot(apic))
1899 return;
1900
1901 advance_periodic_target_expiration(apic);
1902 }
1903
1904 hrtimer_start(&apic->lapic_timer.timer,
1905 apic->lapic_timer.target_expiration,
1906 HRTIMER_MODE_ABS_HARD);
1907}
1908
1909bool kvm_lapic_hv_timer_in_use(struct kvm_vcpu *vcpu)
1910{
1911 if (!lapic_in_kernel(vcpu))
1912 return false;
1913
1914 return vcpu->arch.apic->lapic_timer.hv_timer_in_use;
1915}
1916
1917static void cancel_hv_timer(struct kvm_lapic *apic)
1918{
1919 WARN_ON(preemptible());
1920 WARN_ON(!apic->lapic_timer.hv_timer_in_use);
1921 static_call(kvm_x86_cancel_hv_timer)(apic->vcpu);
1922 apic->lapic_timer.hv_timer_in_use = false;
1923}
1924
1925static bool start_hv_timer(struct kvm_lapic *apic)
1926{
1927 struct kvm_timer *ktimer = &apic->lapic_timer;
1928 struct kvm_vcpu *vcpu = apic->vcpu;
1929 bool expired;
1930
1931 WARN_ON(preemptible());
1932 if (!kvm_can_use_hv_timer(vcpu))
1933 return false;
1934
1935 if (!ktimer->tscdeadline)
1936 return false;
1937
1938 if (static_call(kvm_x86_set_hv_timer)(vcpu, ktimer->tscdeadline, &expired))
1939 return false;
1940
1941 ktimer->hv_timer_in_use = true;
1942 hrtimer_cancel(&ktimer->timer);
1943
1944 /*
1945 * To simplify handling the periodic timer, leave the hv timer running
1946 * even if the deadline timer has expired, i.e. rely on the resulting
1947 * VM-Exit to recompute the periodic timer's target expiration.
1948 */
1949 if (!apic_lvtt_period(apic)) {
1950 /*
1951 * Cancel the hv timer if the sw timer fired while the hv timer
1952 * was being programmed, or if the hv timer itself expired.
1953 */
1954 if (atomic_read(&ktimer->pending)) {
1955 cancel_hv_timer(apic);
1956 } else if (expired) {
1957 apic_timer_expired(apic, false);
1958 cancel_hv_timer(apic);
1959 }
1960 }
1961
1962 trace_kvm_hv_timer_state(vcpu->vcpu_id, ktimer->hv_timer_in_use);
1963
1964 return true;
1965}
1966
1967static void start_sw_timer(struct kvm_lapic *apic)
1968{
1969 struct kvm_timer *ktimer = &apic->lapic_timer;
1970
1971 WARN_ON(preemptible());
1972 if (apic->lapic_timer.hv_timer_in_use)
1973 cancel_hv_timer(apic);
1974 if (!apic_lvtt_period(apic) && atomic_read(&ktimer->pending))
1975 return;
1976
1977 if (apic_lvtt_period(apic) || apic_lvtt_oneshot(apic))
1978 start_sw_period(apic);
1979 else if (apic_lvtt_tscdeadline(apic))
1980 start_sw_tscdeadline(apic);
1981 trace_kvm_hv_timer_state(apic->vcpu->vcpu_id, false);
1982}
1983
1984static void restart_apic_timer(struct kvm_lapic *apic)
1985{
1986 preempt_disable();
1987
1988 if (!apic_lvtt_period(apic) && atomic_read(&apic->lapic_timer.pending))
1989 goto out;
1990
1991 if (!start_hv_timer(apic))
1992 start_sw_timer(apic);
1993out:
1994 preempt_enable();
1995}
1996
1997void kvm_lapic_expired_hv_timer(struct kvm_vcpu *vcpu)
1998{
1999 struct kvm_lapic *apic = vcpu->arch.apic;
2000
2001 preempt_disable();
2002 /* If the preempt notifier has already run, it also called apic_timer_expired */
2003 if (!apic->lapic_timer.hv_timer_in_use)
2004 goto out;
2005 WARN_ON(kvm_vcpu_is_blocking(vcpu));
2006 apic_timer_expired(apic, false);
2007 cancel_hv_timer(apic);
2008
2009 if (apic_lvtt_period(apic) && apic->lapic_timer.period) {
2010 advance_periodic_target_expiration(apic);
2011 restart_apic_timer(apic);
2012 }
2013out:
2014 preempt_enable();
2015}
2016EXPORT_SYMBOL_GPL(kvm_lapic_expired_hv_timer);
2017
2018void kvm_lapic_switch_to_hv_timer(struct kvm_vcpu *vcpu)
2019{
2020 restart_apic_timer(vcpu->arch.apic);
2021}
2022
2023void kvm_lapic_switch_to_sw_timer(struct kvm_vcpu *vcpu)
2024{
2025 struct kvm_lapic *apic = vcpu->arch.apic;
2026
2027 preempt_disable();
2028 /* Possibly the TSC deadline timer is not enabled yet */
2029 if (apic->lapic_timer.hv_timer_in_use)
2030 start_sw_timer(apic);
2031 preempt_enable();
2032}
2033
2034void kvm_lapic_restart_hv_timer(struct kvm_vcpu *vcpu)
2035{
2036 struct kvm_lapic *apic = vcpu->arch.apic;
2037
2038 WARN_ON(!apic->lapic_timer.hv_timer_in_use);
2039 restart_apic_timer(apic);
2040}
2041
2042static void __start_apic_timer(struct kvm_lapic *apic, u32 count_reg)
2043{
2044 atomic_set(&apic->lapic_timer.pending, 0);
2045
2046 if ((apic_lvtt_period(apic) || apic_lvtt_oneshot(apic))
2047 && !set_target_expiration(apic, count_reg))
2048 return;
2049
2050 restart_apic_timer(apic);
2051}
2052
2053static void start_apic_timer(struct kvm_lapic *apic)
2054{
2055 __start_apic_timer(apic, APIC_TMICT);
2056}
2057
2058static void apic_manage_nmi_watchdog(struct kvm_lapic *apic, u32 lvt0_val)
2059{
2060 bool lvt0_in_nmi_mode = apic_lvt_nmi_mode(lvt0_val);
2061
2062 if (apic->lvt0_in_nmi_mode != lvt0_in_nmi_mode) {
2063 apic->lvt0_in_nmi_mode = lvt0_in_nmi_mode;
2064 if (lvt0_in_nmi_mode) {
2065 atomic_inc(&apic->vcpu->kvm->arch.vapics_in_nmi_mode);
2066 } else
2067 atomic_dec(&apic->vcpu->kvm->arch.vapics_in_nmi_mode);
2068 }
2069}
2070
2071static void kvm_lapic_xapic_id_updated(struct kvm_lapic *apic)
2072{
2073 struct kvm *kvm = apic->vcpu->kvm;
2074
2075 if (KVM_BUG_ON(apic_x2apic_mode(apic), kvm))
2076 return;
2077
2078 if (kvm_xapic_id(apic) == apic->vcpu->vcpu_id)
2079 return;
2080
2081 kvm_set_apicv_inhibit(apic->vcpu->kvm, APICV_INHIBIT_REASON_APIC_ID_MODIFIED);
2082}
2083
2084static int get_lvt_index(u32 reg)
2085{
2086 if (reg == APIC_LVTCMCI)
2087 return LVT_CMCI;
2088 if (reg < APIC_LVTT || reg > APIC_LVTERR)
2089 return -1;
2090 return array_index_nospec(
2091 (reg - APIC_LVTT) >> 4, KVM_APIC_MAX_NR_LVT_ENTRIES);
2092}
2093
2094static int kvm_lapic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val)
2095{
2096 int ret = 0;
2097
2098 trace_kvm_apic_write(reg, val);
2099
2100 switch (reg) {
2101 case APIC_ID: /* Local APIC ID */
2102 if (!apic_x2apic_mode(apic)) {
2103 kvm_apic_set_xapic_id(apic, val >> 24);
2104 kvm_lapic_xapic_id_updated(apic);
2105 } else {
2106 ret = 1;
2107 }
2108 break;
2109
2110 case APIC_TASKPRI:
2111 report_tpr_access(apic, true);
2112 apic_set_tpr(apic, val & 0xff);
2113 break;
2114
2115 case APIC_EOI:
2116 apic_set_eoi(apic);
2117 break;
2118
2119 case APIC_LDR:
2120 if (!apic_x2apic_mode(apic))
2121 kvm_apic_set_ldr(apic, val & APIC_LDR_MASK);
2122 else
2123 ret = 1;
2124 break;
2125
2126 case APIC_DFR:
2127 if (!apic_x2apic_mode(apic))
2128 kvm_apic_set_dfr(apic, val | 0x0FFFFFFF);
2129 else
2130 ret = 1;
2131 break;
2132
2133 case APIC_SPIV: {
2134 u32 mask = 0x3ff;
2135 if (kvm_lapic_get_reg(apic, APIC_LVR) & APIC_LVR_DIRECTED_EOI)
2136 mask |= APIC_SPIV_DIRECTED_EOI;
2137 apic_set_spiv(apic, val & mask);
2138 if (!(val & APIC_SPIV_APIC_ENABLED)) {
2139 int i;
2140
2141 for (i = 0; i < apic->nr_lvt_entries; i++) {
2142 kvm_lapic_set_reg(apic, APIC_LVTx(i),
2143 kvm_lapic_get_reg(apic, APIC_LVTx(i)) | APIC_LVT_MASKED);
2144 }
2145 apic_update_lvtt(apic);
2146 atomic_set(&apic->lapic_timer.pending, 0);
2147
2148 }
2149 break;
2150 }
2151 case APIC_ICR:
2152 WARN_ON_ONCE(apic_x2apic_mode(apic));
2153
2154 /* No delay here, so we always clear the pending bit */
2155 val &= ~APIC_ICR_BUSY;
2156 kvm_apic_send_ipi(apic, val, kvm_lapic_get_reg(apic, APIC_ICR2));
2157 kvm_lapic_set_reg(apic, APIC_ICR, val);
2158 break;
2159 case APIC_ICR2:
2160 if (apic_x2apic_mode(apic))
2161 ret = 1;
2162 else
2163 kvm_lapic_set_reg(apic, APIC_ICR2, val & 0xff000000);
2164 break;
2165
2166 case APIC_LVT0:
2167 apic_manage_nmi_watchdog(apic, val);
2168 fallthrough;
2169 case APIC_LVTTHMR:
2170 case APIC_LVTPC:
2171 case APIC_LVT1:
2172 case APIC_LVTERR:
2173 case APIC_LVTCMCI: {
2174 u32 index = get_lvt_index(reg);
2175 if (!kvm_lapic_lvt_supported(apic, index)) {
2176 ret = 1;
2177 break;
2178 }
2179 if (!kvm_apic_sw_enabled(apic))
2180 val |= APIC_LVT_MASKED;
2181 val &= apic_lvt_mask[index];
2182 kvm_lapic_set_reg(apic, reg, val);
2183 break;
2184 }
2185
2186 case APIC_LVTT:
2187 if (!kvm_apic_sw_enabled(apic))
2188 val |= APIC_LVT_MASKED;
2189 val &= (apic_lvt_mask[0] | apic->lapic_timer.timer_mode_mask);
2190 kvm_lapic_set_reg(apic, APIC_LVTT, val);
2191 apic_update_lvtt(apic);
2192 break;
2193
2194 case APIC_TMICT:
2195 if (apic_lvtt_tscdeadline(apic))
2196 break;
2197
2198 cancel_apic_timer(apic);
2199 kvm_lapic_set_reg(apic, APIC_TMICT, val);
2200 start_apic_timer(apic);
2201 break;
2202
2203 case APIC_TDCR: {
2204 uint32_t old_divisor = apic->divide_count;
2205
2206 kvm_lapic_set_reg(apic, APIC_TDCR, val & 0xb);
2207 update_divide_count(apic);
2208 if (apic->divide_count != old_divisor &&
2209 apic->lapic_timer.period) {
2210 hrtimer_cancel(&apic->lapic_timer.timer);
2211 update_target_expiration(apic, old_divisor);
2212 restart_apic_timer(apic);
2213 }
2214 break;
2215 }
2216 case APIC_ESR:
2217 if (apic_x2apic_mode(apic) && val != 0)
2218 ret = 1;
2219 break;
2220
2221 case APIC_SELF_IPI:
2222 if (apic_x2apic_mode(apic))
2223 kvm_apic_send_ipi(apic, APIC_DEST_SELF | (val & APIC_VECTOR_MASK), 0);
2224 else
2225 ret = 1;
2226 break;
2227 default:
2228 ret = 1;
2229 break;
2230 }
2231
2232 /*
2233 * Recalculate APIC maps if necessary, e.g. if the software enable bit
2234 * was toggled, the APIC ID changed, etc... The maps are marked dirty
2235 * on relevant changes, i.e. this is a nop for most writes.
2236 */
2237 kvm_recalculate_apic_map(apic->vcpu->kvm);
2238
2239 return ret;
2240}
2241
2242static int apic_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *this,
2243 gpa_t address, int len, const void *data)
2244{
2245 struct kvm_lapic *apic = to_lapic(this);
2246 unsigned int offset = address - apic->base_address;
2247 u32 val;
2248
2249 if (!apic_mmio_in_range(apic, address))
2250 return -EOPNOTSUPP;
2251
2252 if (!kvm_apic_hw_enabled(apic) || apic_x2apic_mode(apic)) {
2253 if (!kvm_check_has_quirk(vcpu->kvm,
2254 KVM_X86_QUIRK_LAPIC_MMIO_HOLE))
2255 return -EOPNOTSUPP;
2256
2257 return 0;
2258 }
2259
2260 /*
2261 * APIC register must be aligned on 128-bits boundary.
2262 * 32/64/128 bits registers must be accessed thru 32 bits.
2263 * Refer SDM 8.4.1
2264 */
2265 if (len != 4 || (offset & 0xf))
2266 return 0;
2267
2268 val = *(u32*)data;
2269
2270 kvm_lapic_reg_write(apic, offset & 0xff0, val);
2271
2272 return 0;
2273}
2274
2275void kvm_lapic_set_eoi(struct kvm_vcpu *vcpu)
2276{
2277 kvm_lapic_reg_write(vcpu->arch.apic, APIC_EOI, 0);
2278}
2279EXPORT_SYMBOL_GPL(kvm_lapic_set_eoi);
2280
2281/* emulate APIC access in a trap manner */
2282void kvm_apic_write_nodecode(struct kvm_vcpu *vcpu, u32 offset)
2283{
2284 struct kvm_lapic *apic = vcpu->arch.apic;
2285 u64 val;
2286
2287 if (apic_x2apic_mode(apic)) {
2288 if (KVM_BUG_ON(kvm_lapic_msr_read(apic, offset, &val), vcpu->kvm))
2289 return;
2290 } else {
2291 val = kvm_lapic_get_reg(apic, offset);
2292 }
2293
2294 /*
2295 * ICR is a single 64-bit register when x2APIC is enabled. For legacy
2296 * xAPIC, ICR writes need to go down the common (slightly slower) path
2297 * to get the upper half from ICR2.
2298 */
2299 if (apic_x2apic_mode(apic) && offset == APIC_ICR) {
2300 kvm_apic_send_ipi(apic, (u32)val, (u32)(val >> 32));
2301 trace_kvm_apic_write(APIC_ICR, val);
2302 } else {
2303 /* TODO: optimize to just emulate side effect w/o one more write */
2304 kvm_lapic_reg_write(apic, offset, (u32)val);
2305 }
2306}
2307EXPORT_SYMBOL_GPL(kvm_apic_write_nodecode);
2308
2309void kvm_free_lapic(struct kvm_vcpu *vcpu)
2310{
2311 struct kvm_lapic *apic = vcpu->arch.apic;
2312
2313 if (!vcpu->arch.apic)
2314 return;
2315
2316 hrtimer_cancel(&apic->lapic_timer.timer);
2317
2318 if (!(vcpu->arch.apic_base & MSR_IA32_APICBASE_ENABLE))
2319 static_branch_slow_dec_deferred(&apic_hw_disabled);
2320
2321 if (!apic->sw_enabled)
2322 static_branch_slow_dec_deferred(&apic_sw_disabled);
2323
2324 if (apic->regs)
2325 free_page((unsigned long)apic->regs);
2326
2327 kfree(apic);
2328}
2329
2330/*
2331 *----------------------------------------------------------------------
2332 * LAPIC interface
2333 *----------------------------------------------------------------------
2334 */
2335u64 kvm_get_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu)
2336{
2337 struct kvm_lapic *apic = vcpu->arch.apic;
2338
2339 if (!kvm_apic_present(vcpu) || !apic_lvtt_tscdeadline(apic))
2340 return 0;
2341
2342 return apic->lapic_timer.tscdeadline;
2343}
2344
2345void kvm_set_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu, u64 data)
2346{
2347 struct kvm_lapic *apic = vcpu->arch.apic;
2348
2349 if (!kvm_apic_present(vcpu) || !apic_lvtt_tscdeadline(apic))
2350 return;
2351
2352 hrtimer_cancel(&apic->lapic_timer.timer);
2353 apic->lapic_timer.tscdeadline = data;
2354 start_apic_timer(apic);
2355}
2356
2357void kvm_lapic_set_tpr(struct kvm_vcpu *vcpu, unsigned long cr8)
2358{
2359 apic_set_tpr(vcpu->arch.apic, (cr8 & 0x0f) << 4);
2360}
2361
2362u64 kvm_lapic_get_cr8(struct kvm_vcpu *vcpu)
2363{
2364 u64 tpr;
2365
2366 tpr = (u64) kvm_lapic_get_reg(vcpu->arch.apic, APIC_TASKPRI);
2367
2368 return (tpr & 0xf0) >> 4;
2369}
2370
2371void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value)
2372{
2373 u64 old_value = vcpu->arch.apic_base;
2374 struct kvm_lapic *apic = vcpu->arch.apic;
2375
2376 vcpu->arch.apic_base = value;
2377
2378 if ((old_value ^ value) & MSR_IA32_APICBASE_ENABLE)
2379 kvm_update_cpuid_runtime(vcpu);
2380
2381 if (!apic)
2382 return;
2383
2384 /* update jump label if enable bit changes */
2385 if ((old_value ^ value) & MSR_IA32_APICBASE_ENABLE) {
2386 if (value & MSR_IA32_APICBASE_ENABLE) {
2387 kvm_apic_set_xapic_id(apic, vcpu->vcpu_id);
2388 static_branch_slow_dec_deferred(&apic_hw_disabled);
2389 /* Check if there are APF page ready requests pending */
2390 kvm_make_request(KVM_REQ_APF_READY, vcpu);
2391 } else {
2392 static_branch_inc(&apic_hw_disabled.key);
2393 atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
2394 }
2395 }
2396
2397 if (((old_value ^ value) & X2APIC_ENABLE) && (value & X2APIC_ENABLE))
2398 kvm_apic_set_x2apic_id(apic, vcpu->vcpu_id);
2399
2400 if ((old_value ^ value) & (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE)) {
2401 kvm_vcpu_update_apicv(vcpu);
2402 static_call_cond(kvm_x86_set_virtual_apic_mode)(vcpu);
2403 }
2404
2405 apic->base_address = apic->vcpu->arch.apic_base &
2406 MSR_IA32_APICBASE_BASE;
2407
2408 if ((value & MSR_IA32_APICBASE_ENABLE) &&
2409 apic->base_address != APIC_DEFAULT_PHYS_BASE) {
2410 kvm_set_apicv_inhibit(apic->vcpu->kvm,
2411 APICV_INHIBIT_REASON_APIC_BASE_MODIFIED);
2412 }
2413}
2414
2415void kvm_apic_update_apicv(struct kvm_vcpu *vcpu)
2416{
2417 struct kvm_lapic *apic = vcpu->arch.apic;
2418
2419 if (apic->apicv_active) {
2420 /* irr_pending is always true when apicv is activated. */
2421 apic->irr_pending = true;
2422 apic->isr_count = 1;
2423 } else {
2424 /*
2425 * Don't clear irr_pending, searching the IRR can race with
2426 * updates from the CPU as APICv is still active from hardware's
2427 * perspective. The flag will be cleared as appropriate when
2428 * KVM injects the interrupt.
2429 */
2430 apic->isr_count = count_vectors(apic->regs + APIC_ISR);
2431 }
2432}
2433
2434void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event)
2435{
2436 struct kvm_lapic *apic = vcpu->arch.apic;
2437 u64 msr_val;
2438 int i;
2439
2440 if (!init_event) {
2441 msr_val = APIC_DEFAULT_PHYS_BASE | MSR_IA32_APICBASE_ENABLE;
2442 if (kvm_vcpu_is_reset_bsp(vcpu))
2443 msr_val |= MSR_IA32_APICBASE_BSP;
2444 kvm_lapic_set_base(vcpu, msr_val);
2445 }
2446
2447 if (!apic)
2448 return;
2449
2450 /* Stop the timer in case it's a reset to an active apic */
2451 hrtimer_cancel(&apic->lapic_timer.timer);
2452
2453 /* The xAPIC ID is set at RESET even if the APIC was already enabled. */
2454 if (!init_event)
2455 kvm_apic_set_xapic_id(apic, vcpu->vcpu_id);
2456 kvm_apic_set_version(apic->vcpu);
2457
2458 for (i = 0; i < apic->nr_lvt_entries; i++)
2459 kvm_lapic_set_reg(apic, APIC_LVTx(i), APIC_LVT_MASKED);
2460 apic_update_lvtt(apic);
2461 if (kvm_vcpu_is_reset_bsp(vcpu) &&
2462 kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_LINT0_REENABLED))
2463 kvm_lapic_set_reg(apic, APIC_LVT0,
2464 SET_APIC_DELIVERY_MODE(0, APIC_MODE_EXTINT));
2465 apic_manage_nmi_watchdog(apic, kvm_lapic_get_reg(apic, APIC_LVT0));
2466
2467 kvm_apic_set_dfr(apic, 0xffffffffU);
2468 apic_set_spiv(apic, 0xff);
2469 kvm_lapic_set_reg(apic, APIC_TASKPRI, 0);
2470 if (!apic_x2apic_mode(apic))
2471 kvm_apic_set_ldr(apic, 0);
2472 kvm_lapic_set_reg(apic, APIC_ESR, 0);
2473 if (!apic_x2apic_mode(apic)) {
2474 kvm_lapic_set_reg(apic, APIC_ICR, 0);
2475 kvm_lapic_set_reg(apic, APIC_ICR2, 0);
2476 } else {
2477 kvm_lapic_set_reg64(apic, APIC_ICR, 0);
2478 }
2479 kvm_lapic_set_reg(apic, APIC_TDCR, 0);
2480 kvm_lapic_set_reg(apic, APIC_TMICT, 0);
2481 for (i = 0; i < 8; i++) {
2482 kvm_lapic_set_reg(apic, APIC_IRR + 0x10 * i, 0);
2483 kvm_lapic_set_reg(apic, APIC_ISR + 0x10 * i, 0);
2484 kvm_lapic_set_reg(apic, APIC_TMR + 0x10 * i, 0);
2485 }
2486 kvm_apic_update_apicv(vcpu);
2487 apic->highest_isr_cache = -1;
2488 update_divide_count(apic);
2489 atomic_set(&apic->lapic_timer.pending, 0);
2490
2491 vcpu->arch.pv_eoi.msr_val = 0;
2492 apic_update_ppr(apic);
2493 if (apic->apicv_active) {
2494 static_call_cond(kvm_x86_apicv_post_state_restore)(vcpu);
2495 static_call_cond(kvm_x86_hwapic_irr_update)(vcpu, -1);
2496 static_call_cond(kvm_x86_hwapic_isr_update)(-1);
2497 }
2498
2499 vcpu->arch.apic_arb_prio = 0;
2500 vcpu->arch.apic_attention = 0;
2501
2502 kvm_recalculate_apic_map(vcpu->kvm);
2503}
2504
2505/*
2506 *----------------------------------------------------------------------
2507 * timer interface
2508 *----------------------------------------------------------------------
2509 */
2510
2511static bool lapic_is_periodic(struct kvm_lapic *apic)
2512{
2513 return apic_lvtt_period(apic);
2514}
2515
2516int apic_has_pending_timer(struct kvm_vcpu *vcpu)
2517{
2518 struct kvm_lapic *apic = vcpu->arch.apic;
2519
2520 if (apic_enabled(apic) && apic_lvt_enabled(apic, APIC_LVTT))
2521 return atomic_read(&apic->lapic_timer.pending);
2522
2523 return 0;
2524}
2525
2526int kvm_apic_local_deliver(struct kvm_lapic *apic, int lvt_type)
2527{
2528 u32 reg = kvm_lapic_get_reg(apic, lvt_type);
2529 int vector, mode, trig_mode;
2530
2531 if (kvm_apic_hw_enabled(apic) && !(reg & APIC_LVT_MASKED)) {
2532 vector = reg & APIC_VECTOR_MASK;
2533 mode = reg & APIC_MODE_MASK;
2534 trig_mode = reg & APIC_LVT_LEVEL_TRIGGER;
2535 return __apic_accept_irq(apic, mode, vector, 1, trig_mode,
2536 NULL);
2537 }
2538 return 0;
2539}
2540
2541void kvm_apic_nmi_wd_deliver(struct kvm_vcpu *vcpu)
2542{
2543 struct kvm_lapic *apic = vcpu->arch.apic;
2544
2545 if (apic)
2546 kvm_apic_local_deliver(apic, APIC_LVT0);
2547}
2548
2549static const struct kvm_io_device_ops apic_mmio_ops = {
2550 .read = apic_mmio_read,
2551 .write = apic_mmio_write,
2552};
2553
2554static enum hrtimer_restart apic_timer_fn(struct hrtimer *data)
2555{
2556 struct kvm_timer *ktimer = container_of(data, struct kvm_timer, timer);
2557 struct kvm_lapic *apic = container_of(ktimer, struct kvm_lapic, lapic_timer);
2558
2559 apic_timer_expired(apic, true);
2560
2561 if (lapic_is_periodic(apic)) {
2562 advance_periodic_target_expiration(apic);
2563 hrtimer_add_expires_ns(&ktimer->timer, ktimer->period);
2564 return HRTIMER_RESTART;
2565 } else
2566 return HRTIMER_NORESTART;
2567}
2568
2569int kvm_create_lapic(struct kvm_vcpu *vcpu, int timer_advance_ns)
2570{
2571 struct kvm_lapic *apic;
2572
2573 ASSERT(vcpu != NULL);
2574
2575 apic = kzalloc(sizeof(*apic), GFP_KERNEL_ACCOUNT);
2576 if (!apic)
2577 goto nomem;
2578
2579 vcpu->arch.apic = apic;
2580
2581 apic->regs = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT);
2582 if (!apic->regs) {
2583 printk(KERN_ERR "malloc apic regs error for vcpu %x\n",
2584 vcpu->vcpu_id);
2585 goto nomem_free_apic;
2586 }
2587 apic->vcpu = vcpu;
2588
2589 apic->nr_lvt_entries = kvm_apic_calc_nr_lvt_entries(vcpu);
2590
2591 hrtimer_init(&apic->lapic_timer.timer, CLOCK_MONOTONIC,
2592 HRTIMER_MODE_ABS_HARD);
2593 apic->lapic_timer.timer.function = apic_timer_fn;
2594 if (timer_advance_ns == -1) {
2595 apic->lapic_timer.timer_advance_ns = LAPIC_TIMER_ADVANCE_NS_INIT;
2596 lapic_timer_advance_dynamic = true;
2597 } else {
2598 apic->lapic_timer.timer_advance_ns = timer_advance_ns;
2599 lapic_timer_advance_dynamic = false;
2600 }
2601
2602 /*
2603 * Stuff the APIC ENABLE bit in lieu of temporarily incrementing
2604 * apic_hw_disabled; the full RESET value is set by kvm_lapic_reset().
2605 */
2606 vcpu->arch.apic_base = MSR_IA32_APICBASE_ENABLE;
2607 static_branch_inc(&apic_sw_disabled.key); /* sw disabled at reset */
2608 kvm_iodevice_init(&apic->dev, &apic_mmio_ops);
2609
2610 return 0;
2611nomem_free_apic:
2612 kfree(apic);
2613 vcpu->arch.apic = NULL;
2614nomem:
2615 return -ENOMEM;
2616}
2617
2618int kvm_apic_has_interrupt(struct kvm_vcpu *vcpu)
2619{
2620 struct kvm_lapic *apic = vcpu->arch.apic;
2621 u32 ppr;
2622
2623 if (!kvm_apic_present(vcpu))
2624 return -1;
2625
2626 __apic_update_ppr(apic, &ppr);
2627 return apic_has_interrupt_for_ppr(apic, ppr);
2628}
2629EXPORT_SYMBOL_GPL(kvm_apic_has_interrupt);
2630
2631int kvm_apic_accept_pic_intr(struct kvm_vcpu *vcpu)
2632{
2633 u32 lvt0 = kvm_lapic_get_reg(vcpu->arch.apic, APIC_LVT0);
2634
2635 if (!kvm_apic_hw_enabled(vcpu->arch.apic))
2636 return 1;
2637 if ((lvt0 & APIC_LVT_MASKED) == 0 &&
2638 GET_APIC_DELIVERY_MODE(lvt0) == APIC_MODE_EXTINT)
2639 return 1;
2640 return 0;
2641}
2642
2643void kvm_inject_apic_timer_irqs(struct kvm_vcpu *vcpu)
2644{
2645 struct kvm_lapic *apic = vcpu->arch.apic;
2646
2647 if (atomic_read(&apic->lapic_timer.pending) > 0) {
2648 kvm_apic_inject_pending_timer_irqs(apic);
2649 atomic_set(&apic->lapic_timer.pending, 0);
2650 }
2651}
2652
2653int kvm_get_apic_interrupt(struct kvm_vcpu *vcpu)
2654{
2655 int vector = kvm_apic_has_interrupt(vcpu);
2656 struct kvm_lapic *apic = vcpu->arch.apic;
2657 u32 ppr;
2658
2659 if (vector == -1)
2660 return -1;
2661
2662 /*
2663 * We get here even with APIC virtualization enabled, if doing
2664 * nested virtualization and L1 runs with the "acknowledge interrupt
2665 * on exit" mode. Then we cannot inject the interrupt via RVI,
2666 * because the process would deliver it through the IDT.
2667 */
2668
2669 apic_clear_irr(vector, apic);
2670 if (to_hv_vcpu(vcpu) && test_bit(vector, to_hv_synic(vcpu)->auto_eoi_bitmap)) {
2671 /*
2672 * For auto-EOI interrupts, there might be another pending
2673 * interrupt above PPR, so check whether to raise another
2674 * KVM_REQ_EVENT.
2675 */
2676 apic_update_ppr(apic);
2677 } else {
2678 /*
2679 * For normal interrupts, PPR has been raised and there cannot
2680 * be a higher-priority pending interrupt---except if there was
2681 * a concurrent interrupt injection, but that would have
2682 * triggered KVM_REQ_EVENT already.
2683 */
2684 apic_set_isr(vector, apic);
2685 __apic_update_ppr(apic, &ppr);
2686 }
2687
2688 return vector;
2689}
2690
2691static int kvm_apic_state_fixup(struct kvm_vcpu *vcpu,
2692 struct kvm_lapic_state *s, bool set)
2693{
2694 if (apic_x2apic_mode(vcpu->arch.apic)) {
2695 u32 *id = (u32 *)(s->regs + APIC_ID);
2696 u32 *ldr = (u32 *)(s->regs + APIC_LDR);
2697 u64 icr;
2698
2699 if (vcpu->kvm->arch.x2apic_format) {
2700 if (*id != vcpu->vcpu_id)
2701 return -EINVAL;
2702 } else {
2703 if (set)
2704 *id >>= 24;
2705 else
2706 *id <<= 24;
2707 }
2708
2709 /*
2710 * In x2APIC mode, the LDR is fixed and based on the id. And
2711 * ICR is internally a single 64-bit register, but needs to be
2712 * split to ICR+ICR2 in userspace for backwards compatibility.
2713 */
2714 if (set) {
2715 *ldr = kvm_apic_calc_x2apic_ldr(*id);
2716
2717 icr = __kvm_lapic_get_reg(s->regs, APIC_ICR) |
2718 (u64)__kvm_lapic_get_reg(s->regs, APIC_ICR2) << 32;
2719 __kvm_lapic_set_reg64(s->regs, APIC_ICR, icr);
2720 } else {
2721 icr = __kvm_lapic_get_reg64(s->regs, APIC_ICR);
2722 __kvm_lapic_set_reg(s->regs, APIC_ICR2, icr >> 32);
2723 }
2724 }
2725
2726 return 0;
2727}
2728
2729int kvm_apic_get_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s)
2730{
2731 memcpy(s->regs, vcpu->arch.apic->regs, sizeof(*s));
2732
2733 /*
2734 * Get calculated timer current count for remaining timer period (if
2735 * any) and store it in the returned register set.
2736 */
2737 __kvm_lapic_set_reg(s->regs, APIC_TMCCT,
2738 __apic_read(vcpu->arch.apic, APIC_TMCCT));
2739
2740 return kvm_apic_state_fixup(vcpu, s, false);
2741}
2742
2743int kvm_apic_set_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s)
2744{
2745 struct kvm_lapic *apic = vcpu->arch.apic;
2746 int r;
2747
2748 kvm_lapic_set_base(vcpu, vcpu->arch.apic_base);
2749 /* set SPIV separately to get count of SW disabled APICs right */
2750 apic_set_spiv(apic, *((u32 *)(s->regs + APIC_SPIV)));
2751
2752 r = kvm_apic_state_fixup(vcpu, s, true);
2753 if (r) {
2754 kvm_recalculate_apic_map(vcpu->kvm);
2755 return r;
2756 }
2757 memcpy(vcpu->arch.apic->regs, s->regs, sizeof(*s));
2758
2759 if (!apic_x2apic_mode(apic))
2760 kvm_lapic_xapic_id_updated(apic);
2761
2762 atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
2763 kvm_recalculate_apic_map(vcpu->kvm);
2764 kvm_apic_set_version(vcpu);
2765
2766 apic_update_ppr(apic);
2767 cancel_apic_timer(apic);
2768 apic->lapic_timer.expired_tscdeadline = 0;
2769 apic_update_lvtt(apic);
2770 apic_manage_nmi_watchdog(apic, kvm_lapic_get_reg(apic, APIC_LVT0));
2771 update_divide_count(apic);
2772 __start_apic_timer(apic, APIC_TMCCT);
2773 kvm_lapic_set_reg(apic, APIC_TMCCT, 0);
2774 kvm_apic_update_apicv(vcpu);
2775 apic->highest_isr_cache = -1;
2776 if (apic->apicv_active) {
2777 static_call_cond(kvm_x86_apicv_post_state_restore)(vcpu);
2778 static_call_cond(kvm_x86_hwapic_irr_update)(vcpu, apic_find_highest_irr(apic));
2779 static_call_cond(kvm_x86_hwapic_isr_update)(apic_find_highest_isr(apic));
2780 }
2781 kvm_make_request(KVM_REQ_EVENT, vcpu);
2782 if (ioapic_in_kernel(vcpu->kvm))
2783 kvm_rtc_eoi_tracking_restore_one(vcpu);
2784
2785 vcpu->arch.apic_arb_prio = 0;
2786
2787 return 0;
2788}
2789
2790void __kvm_migrate_apic_timer(struct kvm_vcpu *vcpu)
2791{
2792 struct hrtimer *timer;
2793
2794 if (!lapic_in_kernel(vcpu) ||
2795 kvm_can_post_timer_interrupt(vcpu))
2796 return;
2797
2798 timer = &vcpu->arch.apic->lapic_timer.timer;
2799 if (hrtimer_cancel(timer))
2800 hrtimer_start_expires(timer, HRTIMER_MODE_ABS_HARD);
2801}
2802
2803/*
2804 * apic_sync_pv_eoi_from_guest - called on vmexit or cancel interrupt
2805 *
2806 * Detect whether guest triggered PV EOI since the
2807 * last entry. If yes, set EOI on guests's behalf.
2808 * Clear PV EOI in guest memory in any case.
2809 */
2810static void apic_sync_pv_eoi_from_guest(struct kvm_vcpu *vcpu,
2811 struct kvm_lapic *apic)
2812{
2813 int vector;
2814 /*
2815 * PV EOI state is derived from KVM_APIC_PV_EOI_PENDING in host
2816 * and KVM_PV_EOI_ENABLED in guest memory as follows:
2817 *
2818 * KVM_APIC_PV_EOI_PENDING is unset:
2819 * -> host disabled PV EOI.
2820 * KVM_APIC_PV_EOI_PENDING is set, KVM_PV_EOI_ENABLED is set:
2821 * -> host enabled PV EOI, guest did not execute EOI yet.
2822 * KVM_APIC_PV_EOI_PENDING is set, KVM_PV_EOI_ENABLED is unset:
2823 * -> host enabled PV EOI, guest executed EOI.
2824 */
2825 BUG_ON(!pv_eoi_enabled(vcpu));
2826
2827 if (pv_eoi_test_and_clr_pending(vcpu))
2828 return;
2829 vector = apic_set_eoi(apic);
2830 trace_kvm_pv_eoi(apic, vector);
2831}
2832
2833void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu)
2834{
2835 u32 data;
2836
2837 if (test_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention))
2838 apic_sync_pv_eoi_from_guest(vcpu, vcpu->arch.apic);
2839
2840 if (!test_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention))
2841 return;
2842
2843 if (kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data,
2844 sizeof(u32)))
2845 return;
2846
2847 apic_set_tpr(vcpu->arch.apic, data & 0xff);
2848}
2849
2850/*
2851 * apic_sync_pv_eoi_to_guest - called before vmentry
2852 *
2853 * Detect whether it's safe to enable PV EOI and
2854 * if yes do so.
2855 */
2856static void apic_sync_pv_eoi_to_guest(struct kvm_vcpu *vcpu,
2857 struct kvm_lapic *apic)
2858{
2859 if (!pv_eoi_enabled(vcpu) ||
2860 /* IRR set or many bits in ISR: could be nested. */
2861 apic->irr_pending ||
2862 /* Cache not set: could be safe but we don't bother. */
2863 apic->highest_isr_cache == -1 ||
2864 /* Need EOI to update ioapic. */
2865 kvm_ioapic_handles_vector(apic, apic->highest_isr_cache)) {
2866 /*
2867 * PV EOI was disabled by apic_sync_pv_eoi_from_guest
2868 * so we need not do anything here.
2869 */
2870 return;
2871 }
2872
2873 pv_eoi_set_pending(apic->vcpu);
2874}
2875
2876void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu)
2877{
2878 u32 data, tpr;
2879 int max_irr, max_isr;
2880 struct kvm_lapic *apic = vcpu->arch.apic;
2881
2882 apic_sync_pv_eoi_to_guest(vcpu, apic);
2883
2884 if (!test_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention))
2885 return;
2886
2887 tpr = kvm_lapic_get_reg(apic, APIC_TASKPRI) & 0xff;
2888 max_irr = apic_find_highest_irr(apic);
2889 if (max_irr < 0)
2890 max_irr = 0;
2891 max_isr = apic_find_highest_isr(apic);
2892 if (max_isr < 0)
2893 max_isr = 0;
2894 data = (tpr & 0xff) | ((max_isr & 0xf0) << 8) | (max_irr << 24);
2895
2896 kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data,
2897 sizeof(u32));
2898}
2899
2900int kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr)
2901{
2902 if (vapic_addr) {
2903 if (kvm_gfn_to_hva_cache_init(vcpu->kvm,
2904 &vcpu->arch.apic->vapic_cache,
2905 vapic_addr, sizeof(u32)))
2906 return -EINVAL;
2907 __set_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention);
2908 } else {
2909 __clear_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention);
2910 }
2911
2912 vcpu->arch.apic->vapic_addr = vapic_addr;
2913 return 0;
2914}
2915
2916int kvm_x2apic_icr_write(struct kvm_lapic *apic, u64 data)
2917{
2918 data &= ~APIC_ICR_BUSY;
2919
2920 kvm_apic_send_ipi(apic, (u32)data, (u32)(data >> 32));
2921 kvm_lapic_set_reg64(apic, APIC_ICR, data);
2922 trace_kvm_apic_write(APIC_ICR, data);
2923 return 0;
2924}
2925
2926static int kvm_lapic_msr_read(struct kvm_lapic *apic, u32 reg, u64 *data)
2927{
2928 u32 low;
2929
2930 if (reg == APIC_ICR) {
2931 *data = kvm_lapic_get_reg64(apic, APIC_ICR);
2932 return 0;
2933 }
2934
2935 if (kvm_lapic_reg_read(apic, reg, 4, &low))
2936 return 1;
2937
2938 *data = low;
2939
2940 return 0;
2941}
2942
2943static int kvm_lapic_msr_write(struct kvm_lapic *apic, u32 reg, u64 data)
2944{
2945 /*
2946 * ICR is a 64-bit register in x2APIC mode (and Hyper'v PV vAPIC) and
2947 * can be written as such, all other registers remain accessible only
2948 * through 32-bit reads/writes.
2949 */
2950 if (reg == APIC_ICR)
2951 return kvm_x2apic_icr_write(apic, data);
2952
2953 return kvm_lapic_reg_write(apic, reg, (u32)data);
2954}
2955
2956int kvm_x2apic_msr_write(struct kvm_vcpu *vcpu, u32 msr, u64 data)
2957{
2958 struct kvm_lapic *apic = vcpu->arch.apic;
2959 u32 reg = (msr - APIC_BASE_MSR) << 4;
2960
2961 if (!lapic_in_kernel(vcpu) || !apic_x2apic_mode(apic))
2962 return 1;
2963
2964 return kvm_lapic_msr_write(apic, reg, data);
2965}
2966
2967int kvm_x2apic_msr_read(struct kvm_vcpu *vcpu, u32 msr, u64 *data)
2968{
2969 struct kvm_lapic *apic = vcpu->arch.apic;
2970 u32 reg = (msr - APIC_BASE_MSR) << 4;
2971
2972 if (!lapic_in_kernel(vcpu) || !apic_x2apic_mode(apic))
2973 return 1;
2974
2975 if (reg == APIC_DFR)
2976 return 1;
2977
2978 return kvm_lapic_msr_read(apic, reg, data);
2979}
2980
2981int kvm_hv_vapic_msr_write(struct kvm_vcpu *vcpu, u32 reg, u64 data)
2982{
2983 if (!lapic_in_kernel(vcpu))
2984 return 1;
2985
2986 return kvm_lapic_msr_write(vcpu->arch.apic, reg, data);
2987}
2988
2989int kvm_hv_vapic_msr_read(struct kvm_vcpu *vcpu, u32 reg, u64 *data)
2990{
2991 if (!lapic_in_kernel(vcpu))
2992 return 1;
2993
2994 return kvm_lapic_msr_read(vcpu->arch.apic, reg, data);
2995}
2996
2997int kvm_lapic_set_pv_eoi(struct kvm_vcpu *vcpu, u64 data, unsigned long len)
2998{
2999 u64 addr = data & ~KVM_MSR_ENABLED;
3000 struct gfn_to_hva_cache *ghc = &vcpu->arch.pv_eoi.data;
3001 unsigned long new_len;
3002 int ret;
3003
3004 if (!IS_ALIGNED(addr, 4))
3005 return 1;
3006
3007 if (data & KVM_MSR_ENABLED) {
3008 if (addr == ghc->gpa && len <= ghc->len)
3009 new_len = ghc->len;
3010 else
3011 new_len = len;
3012
3013 ret = kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, addr, new_len);
3014 if (ret)
3015 return ret;
3016 }
3017
3018 vcpu->arch.pv_eoi.msr_val = data;
3019
3020 return 0;
3021}
3022
3023int kvm_apic_accept_events(struct kvm_vcpu *vcpu)
3024{
3025 struct kvm_lapic *apic = vcpu->arch.apic;
3026 u8 sipi_vector;
3027 int r;
3028
3029 if (!kvm_apic_has_pending_init_or_sipi(vcpu))
3030 return 0;
3031
3032 if (is_guest_mode(vcpu)) {
3033 r = kvm_check_nested_events(vcpu);
3034 if (r < 0)
3035 return r == -EBUSY ? 0 : r;
3036 /*
3037 * Continue processing INIT/SIPI even if a nested VM-Exit
3038 * occurred, e.g. pending SIPIs should be dropped if INIT+SIPI
3039 * are blocked as a result of transitioning to VMX root mode.
3040 */
3041 }
3042
3043 /*
3044 * INITs are blocked while CPU is in specific states (SMM, VMX root
3045 * mode, SVM with GIF=0), while SIPIs are dropped if the CPU isn't in
3046 * wait-for-SIPI (WFS).
3047 */
3048 if (!kvm_apic_init_sipi_allowed(vcpu)) {
3049 WARN_ON_ONCE(vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED);
3050 clear_bit(KVM_APIC_SIPI, &apic->pending_events);
3051 return 0;
3052 }
3053
3054 if (test_and_clear_bit(KVM_APIC_INIT, &apic->pending_events)) {
3055 kvm_vcpu_reset(vcpu, true);
3056 if (kvm_vcpu_is_bsp(apic->vcpu))
3057 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
3058 else
3059 vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED;
3060 }
3061 if (test_and_clear_bit(KVM_APIC_SIPI, &apic->pending_events)) {
3062 if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) {
3063 /* evaluate pending_events before reading the vector */
3064 smp_rmb();
3065 sipi_vector = apic->sipi_vector;
3066 static_call(kvm_x86_vcpu_deliver_sipi_vector)(vcpu, sipi_vector);
3067 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
3068 }
3069 }
3070 return 0;
3071}
3072
3073void kvm_lapic_exit(void)
3074{
3075 static_key_deferred_flush(&apic_hw_disabled);
3076 WARN_ON(static_branch_unlikely(&apic_hw_disabled.key));
3077 static_key_deferred_flush(&apic_sw_disabled);
3078 WARN_ON(static_branch_unlikely(&apic_sw_disabled.key));
3079}