Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * KVM paravirt_ops implementation
4 *
5 * Copyright (C) 2007, Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
6 * Copyright IBM Corporation, 2007
7 * Authors: Anthony Liguori <aliguori@us.ibm.com>
8 */
9
10#include <linux/context_tracking.h>
11#include <linux/init.h>
12#include <linux/kernel.h>
13#include <linux/kvm_para.h>
14#include <linux/cpu.h>
15#include <linux/mm.h>
16#include <linux/highmem.h>
17#include <linux/hardirq.h>
18#include <linux/notifier.h>
19#include <linux/reboot.h>
20#include <linux/hash.h>
21#include <linux/sched.h>
22#include <linux/slab.h>
23#include <linux/kprobes.h>
24#include <linux/debugfs.h>
25#include <linux/nmi.h>
26#include <linux/swait.h>
27#include <asm/timer.h>
28#include <asm/cpu.h>
29#include <asm/traps.h>
30#include <asm/desc.h>
31#include <asm/tlbflush.h>
32#include <asm/apic.h>
33#include <asm/apicdef.h>
34#include <asm/hypervisor.h>
35#include <asm/tlb.h>
36
37static int kvmapf = 1;
38
39static int __init parse_no_kvmapf(char *arg)
40{
41 kvmapf = 0;
42 return 0;
43}
44
45early_param("no-kvmapf", parse_no_kvmapf);
46
47static int steal_acc = 1;
48static int __init parse_no_stealacc(char *arg)
49{
50 steal_acc = 0;
51 return 0;
52}
53
54early_param("no-steal-acc", parse_no_stealacc);
55
56static DEFINE_PER_CPU_DECRYPTED(struct kvm_vcpu_pv_apf_data, apf_reason) __aligned(64);
57DEFINE_PER_CPU_DECRYPTED(struct kvm_steal_time, steal_time) __aligned(64) __visible;
58static int has_steal_clock = 0;
59
60/*
61 * No need for any "IO delay" on KVM
62 */
63static void kvm_io_delay(void)
64{
65}
66
67#define KVM_TASK_SLEEP_HASHBITS 8
68#define KVM_TASK_SLEEP_HASHSIZE (1<<KVM_TASK_SLEEP_HASHBITS)
69
70struct kvm_task_sleep_node {
71 struct hlist_node link;
72 struct swait_queue_head wq;
73 u32 token;
74 int cpu;
75 bool halted;
76};
77
78static struct kvm_task_sleep_head {
79 raw_spinlock_t lock;
80 struct hlist_head list;
81} async_pf_sleepers[KVM_TASK_SLEEP_HASHSIZE];
82
83static struct kvm_task_sleep_node *_find_apf_task(struct kvm_task_sleep_head *b,
84 u32 token)
85{
86 struct hlist_node *p;
87
88 hlist_for_each(p, &b->list) {
89 struct kvm_task_sleep_node *n =
90 hlist_entry(p, typeof(*n), link);
91 if (n->token == token)
92 return n;
93 }
94
95 return NULL;
96}
97
98/*
99 * @interrupt_kernel: Is this called from a routine which interrupts the kernel
100 * (other than user space)?
101 */
102void kvm_async_pf_task_wait(u32 token, int interrupt_kernel)
103{
104 u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
105 struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
106 struct kvm_task_sleep_node n, *e;
107 DECLARE_SWAITQUEUE(wait);
108
109 rcu_irq_enter();
110
111 raw_spin_lock(&b->lock);
112 e = _find_apf_task(b, token);
113 if (e) {
114 /* dummy entry exist -> wake up was delivered ahead of PF */
115 hlist_del(&e->link);
116 kfree(e);
117 raw_spin_unlock(&b->lock);
118
119 rcu_irq_exit();
120 return;
121 }
122
123 n.token = token;
124 n.cpu = smp_processor_id();
125 n.halted = is_idle_task(current) ||
126 (IS_ENABLED(CONFIG_PREEMPT_COUNT)
127 ? preempt_count() > 1 || rcu_preempt_depth()
128 : interrupt_kernel);
129 init_swait_queue_head(&n.wq);
130 hlist_add_head(&n.link, &b->list);
131 raw_spin_unlock(&b->lock);
132
133 for (;;) {
134 if (!n.halted)
135 prepare_to_swait_exclusive(&n.wq, &wait, TASK_UNINTERRUPTIBLE);
136 if (hlist_unhashed(&n.link))
137 break;
138
139 rcu_irq_exit();
140
141 if (!n.halted) {
142 local_irq_enable();
143 schedule();
144 local_irq_disable();
145 } else {
146 /*
147 * We cannot reschedule. So halt.
148 */
149 native_safe_halt();
150 local_irq_disable();
151 }
152
153 rcu_irq_enter();
154 }
155 if (!n.halted)
156 finish_swait(&n.wq, &wait);
157
158 rcu_irq_exit();
159 return;
160}
161EXPORT_SYMBOL_GPL(kvm_async_pf_task_wait);
162
163static void apf_task_wake_one(struct kvm_task_sleep_node *n)
164{
165 hlist_del_init(&n->link);
166 if (n->halted)
167 smp_send_reschedule(n->cpu);
168 else if (swq_has_sleeper(&n->wq))
169 swake_up_one(&n->wq);
170}
171
172static void apf_task_wake_all(void)
173{
174 int i;
175
176 for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++) {
177 struct hlist_node *p, *next;
178 struct kvm_task_sleep_head *b = &async_pf_sleepers[i];
179 raw_spin_lock(&b->lock);
180 hlist_for_each_safe(p, next, &b->list) {
181 struct kvm_task_sleep_node *n =
182 hlist_entry(p, typeof(*n), link);
183 if (n->cpu == smp_processor_id())
184 apf_task_wake_one(n);
185 }
186 raw_spin_unlock(&b->lock);
187 }
188}
189
190void kvm_async_pf_task_wake(u32 token)
191{
192 u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
193 struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
194 struct kvm_task_sleep_node *n;
195
196 if (token == ~0) {
197 apf_task_wake_all();
198 return;
199 }
200
201again:
202 raw_spin_lock(&b->lock);
203 n = _find_apf_task(b, token);
204 if (!n) {
205 /*
206 * async PF was not yet handled.
207 * Add dummy entry for the token.
208 */
209 n = kzalloc(sizeof(*n), GFP_ATOMIC);
210 if (!n) {
211 /*
212 * Allocation failed! Busy wait while other cpu
213 * handles async PF.
214 */
215 raw_spin_unlock(&b->lock);
216 cpu_relax();
217 goto again;
218 }
219 n->token = token;
220 n->cpu = smp_processor_id();
221 init_swait_queue_head(&n->wq);
222 hlist_add_head(&n->link, &b->list);
223 } else
224 apf_task_wake_one(n);
225 raw_spin_unlock(&b->lock);
226 return;
227}
228EXPORT_SYMBOL_GPL(kvm_async_pf_task_wake);
229
230u32 kvm_read_and_reset_pf_reason(void)
231{
232 u32 reason = 0;
233
234 if (__this_cpu_read(apf_reason.enabled)) {
235 reason = __this_cpu_read(apf_reason.reason);
236 __this_cpu_write(apf_reason.reason, 0);
237 }
238
239 return reason;
240}
241EXPORT_SYMBOL_GPL(kvm_read_and_reset_pf_reason);
242NOKPROBE_SYMBOL(kvm_read_and_reset_pf_reason);
243
244dotraplinkage void
245do_async_page_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address)
246{
247 enum ctx_state prev_state;
248
249 switch (kvm_read_and_reset_pf_reason()) {
250 default:
251 do_page_fault(regs, error_code, address);
252 break;
253 case KVM_PV_REASON_PAGE_NOT_PRESENT:
254 /* page is swapped out by the host. */
255 prev_state = exception_enter();
256 kvm_async_pf_task_wait((u32)address, !user_mode(regs));
257 exception_exit(prev_state);
258 break;
259 case KVM_PV_REASON_PAGE_READY:
260 rcu_irq_enter();
261 kvm_async_pf_task_wake((u32)address);
262 rcu_irq_exit();
263 break;
264 }
265}
266NOKPROBE_SYMBOL(do_async_page_fault);
267
268static void __init paravirt_ops_setup(void)
269{
270 pv_info.name = "KVM";
271
272 if (kvm_para_has_feature(KVM_FEATURE_NOP_IO_DELAY))
273 pv_ops.cpu.io_delay = kvm_io_delay;
274
275#ifdef CONFIG_X86_IO_APIC
276 no_timer_check = 1;
277#endif
278}
279
280static void kvm_register_steal_time(void)
281{
282 int cpu = smp_processor_id();
283 struct kvm_steal_time *st = &per_cpu(steal_time, cpu);
284
285 if (!has_steal_clock)
286 return;
287
288 wrmsrl(MSR_KVM_STEAL_TIME, (slow_virt_to_phys(st) | KVM_MSR_ENABLED));
289 pr_info("kvm-stealtime: cpu %d, msr %llx\n",
290 cpu, (unsigned long long) slow_virt_to_phys(st));
291}
292
293static DEFINE_PER_CPU_DECRYPTED(unsigned long, kvm_apic_eoi) = KVM_PV_EOI_DISABLED;
294
295static notrace void kvm_guest_apic_eoi_write(u32 reg, u32 val)
296{
297 /**
298 * This relies on __test_and_clear_bit to modify the memory
299 * in a way that is atomic with respect to the local CPU.
300 * The hypervisor only accesses this memory from the local CPU so
301 * there's no need for lock or memory barriers.
302 * An optimization barrier is implied in apic write.
303 */
304 if (__test_and_clear_bit(KVM_PV_EOI_BIT, this_cpu_ptr(&kvm_apic_eoi)))
305 return;
306 apic->native_eoi_write(APIC_EOI, APIC_EOI_ACK);
307}
308
309static void kvm_guest_cpu_init(void)
310{
311 if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF) && kvmapf) {
312 u64 pa = slow_virt_to_phys(this_cpu_ptr(&apf_reason));
313
314#ifdef CONFIG_PREEMPTION
315 pa |= KVM_ASYNC_PF_SEND_ALWAYS;
316#endif
317 pa |= KVM_ASYNC_PF_ENABLED;
318
319 if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF_VMEXIT))
320 pa |= KVM_ASYNC_PF_DELIVERY_AS_PF_VMEXIT;
321
322 wrmsrl(MSR_KVM_ASYNC_PF_EN, pa);
323 __this_cpu_write(apf_reason.enabled, 1);
324 printk(KERN_INFO"KVM setup async PF for cpu %d\n",
325 smp_processor_id());
326 }
327
328 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) {
329 unsigned long pa;
330 /* Size alignment is implied but just to make it explicit. */
331 BUILD_BUG_ON(__alignof__(kvm_apic_eoi) < 4);
332 __this_cpu_write(kvm_apic_eoi, 0);
333 pa = slow_virt_to_phys(this_cpu_ptr(&kvm_apic_eoi))
334 | KVM_MSR_ENABLED;
335 wrmsrl(MSR_KVM_PV_EOI_EN, pa);
336 }
337
338 if (has_steal_clock)
339 kvm_register_steal_time();
340}
341
342static void kvm_pv_disable_apf(void)
343{
344 if (!__this_cpu_read(apf_reason.enabled))
345 return;
346
347 wrmsrl(MSR_KVM_ASYNC_PF_EN, 0);
348 __this_cpu_write(apf_reason.enabled, 0);
349
350 printk(KERN_INFO"Unregister pv shared memory for cpu %d\n",
351 smp_processor_id());
352}
353
354static void kvm_pv_guest_cpu_reboot(void *unused)
355{
356 /*
357 * We disable PV EOI before we load a new kernel by kexec,
358 * since MSR_KVM_PV_EOI_EN stores a pointer into old kernel's memory.
359 * New kernel can re-enable when it boots.
360 */
361 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
362 wrmsrl(MSR_KVM_PV_EOI_EN, 0);
363 kvm_pv_disable_apf();
364 kvm_disable_steal_time();
365}
366
367static int kvm_pv_reboot_notify(struct notifier_block *nb,
368 unsigned long code, void *unused)
369{
370 if (code == SYS_RESTART)
371 on_each_cpu(kvm_pv_guest_cpu_reboot, NULL, 1);
372 return NOTIFY_DONE;
373}
374
375static struct notifier_block kvm_pv_reboot_nb = {
376 .notifier_call = kvm_pv_reboot_notify,
377};
378
379static u64 kvm_steal_clock(int cpu)
380{
381 u64 steal;
382 struct kvm_steal_time *src;
383 int version;
384
385 src = &per_cpu(steal_time, cpu);
386 do {
387 version = src->version;
388 virt_rmb();
389 steal = src->steal;
390 virt_rmb();
391 } while ((version & 1) || (version != src->version));
392
393 return steal;
394}
395
396void kvm_disable_steal_time(void)
397{
398 if (!has_steal_clock)
399 return;
400
401 wrmsr(MSR_KVM_STEAL_TIME, 0, 0);
402}
403
404static inline void __set_percpu_decrypted(void *ptr, unsigned long size)
405{
406 early_set_memory_decrypted((unsigned long) ptr, size);
407}
408
409/*
410 * Iterate through all possible CPUs and map the memory region pointed
411 * by apf_reason, steal_time and kvm_apic_eoi as decrypted at once.
412 *
413 * Note: we iterate through all possible CPUs to ensure that CPUs
414 * hotplugged will have their per-cpu variable already mapped as
415 * decrypted.
416 */
417static void __init sev_map_percpu_data(void)
418{
419 int cpu;
420
421 if (!sev_active())
422 return;
423
424 for_each_possible_cpu(cpu) {
425 __set_percpu_decrypted(&per_cpu(apf_reason, cpu), sizeof(apf_reason));
426 __set_percpu_decrypted(&per_cpu(steal_time, cpu), sizeof(steal_time));
427 __set_percpu_decrypted(&per_cpu(kvm_apic_eoi, cpu), sizeof(kvm_apic_eoi));
428 }
429}
430
431#ifdef CONFIG_SMP
432#define KVM_IPI_CLUSTER_SIZE (2 * BITS_PER_LONG)
433
434static void __send_ipi_mask(const struct cpumask *mask, int vector)
435{
436 unsigned long flags;
437 int cpu, apic_id, icr;
438 int min = 0, max = 0;
439#ifdef CONFIG_X86_64
440 __uint128_t ipi_bitmap = 0;
441#else
442 u64 ipi_bitmap = 0;
443#endif
444 long ret;
445
446 if (cpumask_empty(mask))
447 return;
448
449 local_irq_save(flags);
450
451 switch (vector) {
452 default:
453 icr = APIC_DM_FIXED | vector;
454 break;
455 case NMI_VECTOR:
456 icr = APIC_DM_NMI;
457 break;
458 }
459
460 for_each_cpu(cpu, mask) {
461 apic_id = per_cpu(x86_cpu_to_apicid, cpu);
462 if (!ipi_bitmap) {
463 min = max = apic_id;
464 } else if (apic_id < min && max - apic_id < KVM_IPI_CLUSTER_SIZE) {
465 ipi_bitmap <<= min - apic_id;
466 min = apic_id;
467 } else if (apic_id < min + KVM_IPI_CLUSTER_SIZE) {
468 max = apic_id < max ? max : apic_id;
469 } else {
470 ret = kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap,
471 (unsigned long)(ipi_bitmap >> BITS_PER_LONG), min, icr);
472 WARN_ONCE(ret < 0, "KVM: failed to send PV IPI: %ld", ret);
473 min = max = apic_id;
474 ipi_bitmap = 0;
475 }
476 __set_bit(apic_id - min, (unsigned long *)&ipi_bitmap);
477 }
478
479 if (ipi_bitmap) {
480 ret = kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap,
481 (unsigned long)(ipi_bitmap >> BITS_PER_LONG), min, icr);
482 WARN_ONCE(ret < 0, "KVM: failed to send PV IPI: %ld", ret);
483 }
484
485 local_irq_restore(flags);
486}
487
488static void kvm_send_ipi_mask(const struct cpumask *mask, int vector)
489{
490 __send_ipi_mask(mask, vector);
491}
492
493static void kvm_send_ipi_mask_allbutself(const struct cpumask *mask, int vector)
494{
495 unsigned int this_cpu = smp_processor_id();
496 struct cpumask new_mask;
497 const struct cpumask *local_mask;
498
499 cpumask_copy(&new_mask, mask);
500 cpumask_clear_cpu(this_cpu, &new_mask);
501 local_mask = &new_mask;
502 __send_ipi_mask(local_mask, vector);
503}
504
505/*
506 * Set the IPI entry points
507 */
508static void kvm_setup_pv_ipi(void)
509{
510 apic->send_IPI_mask = kvm_send_ipi_mask;
511 apic->send_IPI_mask_allbutself = kvm_send_ipi_mask_allbutself;
512 pr_info("KVM setup pv IPIs\n");
513}
514
515static void kvm_smp_send_call_func_ipi(const struct cpumask *mask)
516{
517 int cpu;
518
519 native_send_call_func_ipi(mask);
520
521 /* Make sure other vCPUs get a chance to run if they need to. */
522 for_each_cpu(cpu, mask) {
523 if (vcpu_is_preempted(cpu)) {
524 kvm_hypercall1(KVM_HC_SCHED_YIELD, per_cpu(x86_cpu_to_apicid, cpu));
525 break;
526 }
527 }
528}
529
530static void __init kvm_smp_prepare_cpus(unsigned int max_cpus)
531{
532 native_smp_prepare_cpus(max_cpus);
533 if (kvm_para_has_hint(KVM_HINTS_REALTIME))
534 static_branch_disable(&virt_spin_lock_key);
535}
536
537static void __init kvm_smp_prepare_boot_cpu(void)
538{
539 /*
540 * Map the per-cpu variables as decrypted before kvm_guest_cpu_init()
541 * shares the guest physical address with the hypervisor.
542 */
543 sev_map_percpu_data();
544
545 kvm_guest_cpu_init();
546 native_smp_prepare_boot_cpu();
547 kvm_spinlock_init();
548}
549
550static void kvm_guest_cpu_offline(void)
551{
552 kvm_disable_steal_time();
553 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
554 wrmsrl(MSR_KVM_PV_EOI_EN, 0);
555 kvm_pv_disable_apf();
556 apf_task_wake_all();
557}
558
559static int kvm_cpu_online(unsigned int cpu)
560{
561 local_irq_disable();
562 kvm_guest_cpu_init();
563 local_irq_enable();
564 return 0;
565}
566
567static int kvm_cpu_down_prepare(unsigned int cpu)
568{
569 local_irq_disable();
570 kvm_guest_cpu_offline();
571 local_irq_enable();
572 return 0;
573}
574#endif
575
576static void __init kvm_apf_trap_init(void)
577{
578 update_intr_gate(X86_TRAP_PF, async_page_fault);
579}
580
581static DEFINE_PER_CPU(cpumask_var_t, __pv_tlb_mask);
582
583static void kvm_flush_tlb_others(const struct cpumask *cpumask,
584 const struct flush_tlb_info *info)
585{
586 u8 state;
587 int cpu;
588 struct kvm_steal_time *src;
589 struct cpumask *flushmask = this_cpu_cpumask_var_ptr(__pv_tlb_mask);
590
591 cpumask_copy(flushmask, cpumask);
592 /*
593 * We have to call flush only on online vCPUs. And
594 * queue flush_on_enter for pre-empted vCPUs
595 */
596 for_each_cpu(cpu, flushmask) {
597 src = &per_cpu(steal_time, cpu);
598 state = READ_ONCE(src->preempted);
599 if ((state & KVM_VCPU_PREEMPTED)) {
600 if (try_cmpxchg(&src->preempted, &state,
601 state | KVM_VCPU_FLUSH_TLB))
602 __cpumask_clear_cpu(cpu, flushmask);
603 }
604 }
605
606 native_flush_tlb_others(flushmask, info);
607}
608
609static void __init kvm_guest_init(void)
610{
611 int i;
612
613 paravirt_ops_setup();
614 register_reboot_notifier(&kvm_pv_reboot_nb);
615 for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++)
616 raw_spin_lock_init(&async_pf_sleepers[i].lock);
617 if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF))
618 x86_init.irqs.trap_init = kvm_apf_trap_init;
619
620 if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
621 has_steal_clock = 1;
622 pv_ops.time.steal_clock = kvm_steal_clock;
623 }
624
625 if (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) &&
626 !kvm_para_has_hint(KVM_HINTS_REALTIME) &&
627 kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
628 pv_ops.mmu.flush_tlb_others = kvm_flush_tlb_others;
629 pv_ops.mmu.tlb_remove_table = tlb_remove_table;
630 }
631
632 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
633 apic_set_eoi_write(kvm_guest_apic_eoi_write);
634
635#ifdef CONFIG_SMP
636 smp_ops.smp_prepare_cpus = kvm_smp_prepare_cpus;
637 smp_ops.smp_prepare_boot_cpu = kvm_smp_prepare_boot_cpu;
638 if (kvm_para_has_feature(KVM_FEATURE_PV_SCHED_YIELD) &&
639 !kvm_para_has_hint(KVM_HINTS_REALTIME) &&
640 kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
641 smp_ops.send_call_func_ipi = kvm_smp_send_call_func_ipi;
642 pr_info("KVM setup pv sched yield\n");
643 }
644 if (cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "x86/kvm:online",
645 kvm_cpu_online, kvm_cpu_down_prepare) < 0)
646 pr_err("kvm_guest: Failed to install cpu hotplug callbacks\n");
647#else
648 sev_map_percpu_data();
649 kvm_guest_cpu_init();
650#endif
651
652 /*
653 * Hard lockup detection is enabled by default. Disable it, as guests
654 * can get false positives too easily, for example if the host is
655 * overcommitted.
656 */
657 hardlockup_detector_disable();
658}
659
660static noinline uint32_t __kvm_cpuid_base(void)
661{
662 if (boot_cpu_data.cpuid_level < 0)
663 return 0; /* So we don't blow up on old processors */
664
665 if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
666 return hypervisor_cpuid_base("KVMKVMKVM\0\0\0", 0);
667
668 return 0;
669}
670
671static inline uint32_t kvm_cpuid_base(void)
672{
673 static int kvm_cpuid_base = -1;
674
675 if (kvm_cpuid_base == -1)
676 kvm_cpuid_base = __kvm_cpuid_base();
677
678 return kvm_cpuid_base;
679}
680
681bool kvm_para_available(void)
682{
683 return kvm_cpuid_base() != 0;
684}
685EXPORT_SYMBOL_GPL(kvm_para_available);
686
687unsigned int kvm_arch_para_features(void)
688{
689 return cpuid_eax(kvm_cpuid_base() | KVM_CPUID_FEATURES);
690}
691
692unsigned int kvm_arch_para_hints(void)
693{
694 return cpuid_edx(kvm_cpuid_base() | KVM_CPUID_FEATURES);
695}
696EXPORT_SYMBOL_GPL(kvm_arch_para_hints);
697
698static uint32_t __init kvm_detect(void)
699{
700 return kvm_cpuid_base();
701}
702
703static void __init kvm_apic_init(void)
704{
705#if defined(CONFIG_SMP)
706 if (kvm_para_has_feature(KVM_FEATURE_PV_SEND_IPI))
707 kvm_setup_pv_ipi();
708#endif
709}
710
711static void __init kvm_init_platform(void)
712{
713 kvmclock_init();
714 x86_platform.apic_post_init = kvm_apic_init;
715}
716
717const __initconst struct hypervisor_x86 x86_hyper_kvm = {
718 .name = "KVM",
719 .detect = kvm_detect,
720 .type = X86_HYPER_KVM,
721 .init.guest_late_init = kvm_guest_init,
722 .init.x2apic_available = kvm_para_available,
723 .init.init_platform = kvm_init_platform,
724};
725
726static __init int activate_jump_labels(void)
727{
728 if (has_steal_clock) {
729 static_key_slow_inc(¶virt_steal_enabled);
730 if (steal_acc)
731 static_key_slow_inc(¶virt_steal_rq_enabled);
732 }
733
734 return 0;
735}
736arch_initcall(activate_jump_labels);
737
738static __init int kvm_setup_pv_tlb_flush(void)
739{
740 int cpu;
741
742 if (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) &&
743 !kvm_para_has_hint(KVM_HINTS_REALTIME) &&
744 kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
745 for_each_possible_cpu(cpu) {
746 zalloc_cpumask_var_node(per_cpu_ptr(&__pv_tlb_mask, cpu),
747 GFP_KERNEL, cpu_to_node(cpu));
748 }
749 pr_info("KVM setup pv remote TLB flush\n");
750 }
751
752 return 0;
753}
754arch_initcall(kvm_setup_pv_tlb_flush);
755
756#ifdef CONFIG_PARAVIRT_SPINLOCKS
757
758/* Kick a cpu by its apicid. Used to wake up a halted vcpu */
759static void kvm_kick_cpu(int cpu)
760{
761 int apicid;
762 unsigned long flags = 0;
763
764 apicid = per_cpu(x86_cpu_to_apicid, cpu);
765 kvm_hypercall2(KVM_HC_KICK_CPU, flags, apicid);
766}
767
768#include <asm/qspinlock.h>
769
770static void kvm_wait(u8 *ptr, u8 val)
771{
772 unsigned long flags;
773
774 if (in_nmi())
775 return;
776
777 local_irq_save(flags);
778
779 if (READ_ONCE(*ptr) != val)
780 goto out;
781
782 /*
783 * halt until it's our turn and kicked. Note that we do safe halt
784 * for irq enabled case to avoid hang when lock info is overwritten
785 * in irq spinlock slowpath and no spurious interrupt occur to save us.
786 */
787 if (arch_irqs_disabled_flags(flags))
788 halt();
789 else
790 safe_halt();
791
792out:
793 local_irq_restore(flags);
794}
795
796#ifdef CONFIG_X86_32
797__visible bool __kvm_vcpu_is_preempted(long cpu)
798{
799 struct kvm_steal_time *src = &per_cpu(steal_time, cpu);
800
801 return !!(src->preempted & KVM_VCPU_PREEMPTED);
802}
803PV_CALLEE_SAVE_REGS_THUNK(__kvm_vcpu_is_preempted);
804
805#else
806
807#include <asm/asm-offsets.h>
808
809extern bool __raw_callee_save___kvm_vcpu_is_preempted(long);
810
811/*
812 * Hand-optimize version for x86-64 to avoid 8 64-bit register saving and
813 * restoring to/from the stack.
814 */
815asm(
816".pushsection .text;"
817".global __raw_callee_save___kvm_vcpu_is_preempted;"
818".type __raw_callee_save___kvm_vcpu_is_preempted, @function;"
819"__raw_callee_save___kvm_vcpu_is_preempted:"
820"movq __per_cpu_offset(,%rdi,8), %rax;"
821"cmpb $0, " __stringify(KVM_STEAL_TIME_preempted) "+steal_time(%rax);"
822"setne %al;"
823"ret;"
824".size __raw_callee_save___kvm_vcpu_is_preempted, .-__raw_callee_save___kvm_vcpu_is_preempted;"
825".popsection");
826
827#endif
828
829/*
830 * Setup pv_lock_ops to exploit KVM_FEATURE_PV_UNHALT if present.
831 */
832void __init kvm_spinlock_init(void)
833{
834 /* Does host kernel support KVM_FEATURE_PV_UNHALT? */
835 if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT))
836 return;
837
838 if (kvm_para_has_hint(KVM_HINTS_REALTIME))
839 return;
840
841 /* Don't use the pvqspinlock code if there is only 1 vCPU. */
842 if (num_possible_cpus() == 1)
843 return;
844
845 __pv_init_lock_hash();
846 pv_ops.lock.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath;
847 pv_ops.lock.queued_spin_unlock =
848 PV_CALLEE_SAVE(__pv_queued_spin_unlock);
849 pv_ops.lock.wait = kvm_wait;
850 pv_ops.lock.kick = kvm_kick_cpu;
851
852 if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
853 pv_ops.lock.vcpu_is_preempted =
854 PV_CALLEE_SAVE(__kvm_vcpu_is_preempted);
855 }
856}
857
858#endif /* CONFIG_PARAVIRT_SPINLOCKS */
859
860#ifdef CONFIG_ARCH_CPUIDLE_HALTPOLL
861
862static void kvm_disable_host_haltpoll(void *i)
863{
864 wrmsrl(MSR_KVM_POLL_CONTROL, 0);
865}
866
867static void kvm_enable_host_haltpoll(void *i)
868{
869 wrmsrl(MSR_KVM_POLL_CONTROL, 1);
870}
871
872void arch_haltpoll_enable(unsigned int cpu)
873{
874 if (!kvm_para_has_feature(KVM_FEATURE_POLL_CONTROL)) {
875 pr_err_once("kvm: host does not support poll control\n");
876 pr_err_once("kvm: host upgrade recommended\n");
877 return;
878 }
879
880 /* Enable guest halt poll disables host halt poll */
881 smp_call_function_single(cpu, kvm_disable_host_haltpoll, NULL, 1);
882}
883EXPORT_SYMBOL_GPL(arch_haltpoll_enable);
884
885void arch_haltpoll_disable(unsigned int cpu)
886{
887 if (!kvm_para_has_feature(KVM_FEATURE_POLL_CONTROL))
888 return;
889
890 /* Enable guest halt poll disables host halt poll */
891 smp_call_function_single(cpu, kvm_enable_host_haltpoll, NULL, 1);
892}
893EXPORT_SYMBOL_GPL(arch_haltpoll_disable);
894#endif
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * KVM paravirt_ops implementation
4 *
5 * Copyright (C) 2007, Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
6 * Copyright IBM Corporation, 2007
7 * Authors: Anthony Liguori <aliguori@us.ibm.com>
8 */
9
10#define pr_fmt(fmt) "kvm-guest: " fmt
11
12#include <linux/context_tracking.h>
13#include <linux/init.h>
14#include <linux/irq.h>
15#include <linux/kernel.h>
16#include <linux/kvm_para.h>
17#include <linux/cpu.h>
18#include <linux/mm.h>
19#include <linux/highmem.h>
20#include <linux/hardirq.h>
21#include <linux/notifier.h>
22#include <linux/reboot.h>
23#include <linux/hash.h>
24#include <linux/sched.h>
25#include <linux/slab.h>
26#include <linux/kprobes.h>
27#include <linux/nmi.h>
28#include <linux/swait.h>
29#include <asm/timer.h>
30#include <asm/cpu.h>
31#include <asm/traps.h>
32#include <asm/desc.h>
33#include <asm/tlbflush.h>
34#include <asm/apic.h>
35#include <asm/apicdef.h>
36#include <asm/hypervisor.h>
37#include <asm/tlb.h>
38#include <asm/cpuidle_haltpoll.h>
39
40DEFINE_STATIC_KEY_FALSE(kvm_async_pf_enabled);
41
42static int kvmapf = 1;
43
44static int __init parse_no_kvmapf(char *arg)
45{
46 kvmapf = 0;
47 return 0;
48}
49
50early_param("no-kvmapf", parse_no_kvmapf);
51
52static int steal_acc = 1;
53static int __init parse_no_stealacc(char *arg)
54{
55 steal_acc = 0;
56 return 0;
57}
58
59early_param("no-steal-acc", parse_no_stealacc);
60
61static DEFINE_PER_CPU_DECRYPTED(struct kvm_vcpu_pv_apf_data, apf_reason) __aligned(64);
62DEFINE_PER_CPU_DECRYPTED(struct kvm_steal_time, steal_time) __aligned(64) __visible;
63static int has_steal_clock = 0;
64
65/*
66 * No need for any "IO delay" on KVM
67 */
68static void kvm_io_delay(void)
69{
70}
71
72#define KVM_TASK_SLEEP_HASHBITS 8
73#define KVM_TASK_SLEEP_HASHSIZE (1<<KVM_TASK_SLEEP_HASHBITS)
74
75struct kvm_task_sleep_node {
76 struct hlist_node link;
77 struct swait_queue_head wq;
78 u32 token;
79 int cpu;
80};
81
82static struct kvm_task_sleep_head {
83 raw_spinlock_t lock;
84 struct hlist_head list;
85} async_pf_sleepers[KVM_TASK_SLEEP_HASHSIZE];
86
87static struct kvm_task_sleep_node *_find_apf_task(struct kvm_task_sleep_head *b,
88 u32 token)
89{
90 struct hlist_node *p;
91
92 hlist_for_each(p, &b->list) {
93 struct kvm_task_sleep_node *n =
94 hlist_entry(p, typeof(*n), link);
95 if (n->token == token)
96 return n;
97 }
98
99 return NULL;
100}
101
102static bool kvm_async_pf_queue_task(u32 token, struct kvm_task_sleep_node *n)
103{
104 u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
105 struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
106 struct kvm_task_sleep_node *e;
107
108 raw_spin_lock(&b->lock);
109 e = _find_apf_task(b, token);
110 if (e) {
111 /* dummy entry exist -> wake up was delivered ahead of PF */
112 hlist_del(&e->link);
113 raw_spin_unlock(&b->lock);
114 kfree(e);
115 return false;
116 }
117
118 n->token = token;
119 n->cpu = smp_processor_id();
120 init_swait_queue_head(&n->wq);
121 hlist_add_head(&n->link, &b->list);
122 raw_spin_unlock(&b->lock);
123 return true;
124}
125
126/*
127 * kvm_async_pf_task_wait_schedule - Wait for pagefault to be handled
128 * @token: Token to identify the sleep node entry
129 *
130 * Invoked from the async pagefault handling code or from the VM exit page
131 * fault handler. In both cases RCU is watching.
132 */
133void kvm_async_pf_task_wait_schedule(u32 token)
134{
135 struct kvm_task_sleep_node n;
136 DECLARE_SWAITQUEUE(wait);
137
138 lockdep_assert_irqs_disabled();
139
140 if (!kvm_async_pf_queue_task(token, &n))
141 return;
142
143 for (;;) {
144 prepare_to_swait_exclusive(&n.wq, &wait, TASK_UNINTERRUPTIBLE);
145 if (hlist_unhashed(&n.link))
146 break;
147
148 local_irq_enable();
149 schedule();
150 local_irq_disable();
151 }
152 finish_swait(&n.wq, &wait);
153}
154EXPORT_SYMBOL_GPL(kvm_async_pf_task_wait_schedule);
155
156static void apf_task_wake_one(struct kvm_task_sleep_node *n)
157{
158 hlist_del_init(&n->link);
159 if (swq_has_sleeper(&n->wq))
160 swake_up_one(&n->wq);
161}
162
163static void apf_task_wake_all(void)
164{
165 int i;
166
167 for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++) {
168 struct kvm_task_sleep_head *b = &async_pf_sleepers[i];
169 struct kvm_task_sleep_node *n;
170 struct hlist_node *p, *next;
171
172 raw_spin_lock(&b->lock);
173 hlist_for_each_safe(p, next, &b->list) {
174 n = hlist_entry(p, typeof(*n), link);
175 if (n->cpu == smp_processor_id())
176 apf_task_wake_one(n);
177 }
178 raw_spin_unlock(&b->lock);
179 }
180}
181
182void kvm_async_pf_task_wake(u32 token)
183{
184 u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
185 struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
186 struct kvm_task_sleep_node *n;
187
188 if (token == ~0) {
189 apf_task_wake_all();
190 return;
191 }
192
193again:
194 raw_spin_lock(&b->lock);
195 n = _find_apf_task(b, token);
196 if (!n) {
197 /*
198 * async PF was not yet handled.
199 * Add dummy entry for the token.
200 */
201 n = kzalloc(sizeof(*n), GFP_ATOMIC);
202 if (!n) {
203 /*
204 * Allocation failed! Busy wait while other cpu
205 * handles async PF.
206 */
207 raw_spin_unlock(&b->lock);
208 cpu_relax();
209 goto again;
210 }
211 n->token = token;
212 n->cpu = smp_processor_id();
213 init_swait_queue_head(&n->wq);
214 hlist_add_head(&n->link, &b->list);
215 } else {
216 apf_task_wake_one(n);
217 }
218 raw_spin_unlock(&b->lock);
219 return;
220}
221EXPORT_SYMBOL_GPL(kvm_async_pf_task_wake);
222
223noinstr u32 kvm_read_and_reset_apf_flags(void)
224{
225 u32 flags = 0;
226
227 if (__this_cpu_read(apf_reason.enabled)) {
228 flags = __this_cpu_read(apf_reason.flags);
229 __this_cpu_write(apf_reason.flags, 0);
230 }
231
232 return flags;
233}
234EXPORT_SYMBOL_GPL(kvm_read_and_reset_apf_flags);
235
236noinstr bool __kvm_handle_async_pf(struct pt_regs *regs, u32 token)
237{
238 u32 flags = kvm_read_and_reset_apf_flags();
239 irqentry_state_t state;
240
241 if (!flags)
242 return false;
243
244 state = irqentry_enter(regs);
245 instrumentation_begin();
246
247 /*
248 * If the host managed to inject an async #PF into an interrupt
249 * disabled region, then die hard as this is not going to end well
250 * and the host side is seriously broken.
251 */
252 if (unlikely(!(regs->flags & X86_EFLAGS_IF)))
253 panic("Host injected async #PF in interrupt disabled region\n");
254
255 if (flags & KVM_PV_REASON_PAGE_NOT_PRESENT) {
256 if (unlikely(!(user_mode(regs))))
257 panic("Host injected async #PF in kernel mode\n");
258 /* Page is swapped out by the host. */
259 kvm_async_pf_task_wait_schedule(token);
260 } else {
261 WARN_ONCE(1, "Unexpected async PF flags: %x\n", flags);
262 }
263
264 instrumentation_end();
265 irqentry_exit(regs, state);
266 return true;
267}
268
269DEFINE_IDTENTRY_SYSVEC(sysvec_kvm_asyncpf_interrupt)
270{
271 struct pt_regs *old_regs = set_irq_regs(regs);
272 u32 token;
273
274 ack_APIC_irq();
275
276 inc_irq_stat(irq_hv_callback_count);
277
278 if (__this_cpu_read(apf_reason.enabled)) {
279 token = __this_cpu_read(apf_reason.token);
280 kvm_async_pf_task_wake(token);
281 __this_cpu_write(apf_reason.token, 0);
282 wrmsrl(MSR_KVM_ASYNC_PF_ACK, 1);
283 }
284
285 set_irq_regs(old_regs);
286}
287
288static void __init paravirt_ops_setup(void)
289{
290 pv_info.name = "KVM";
291
292 if (kvm_para_has_feature(KVM_FEATURE_NOP_IO_DELAY))
293 pv_ops.cpu.io_delay = kvm_io_delay;
294
295#ifdef CONFIG_X86_IO_APIC
296 no_timer_check = 1;
297#endif
298}
299
300static void kvm_register_steal_time(void)
301{
302 int cpu = smp_processor_id();
303 struct kvm_steal_time *st = &per_cpu(steal_time, cpu);
304
305 if (!has_steal_clock)
306 return;
307
308 wrmsrl(MSR_KVM_STEAL_TIME, (slow_virt_to_phys(st) | KVM_MSR_ENABLED));
309 pr_info("stealtime: cpu %d, msr %llx\n", cpu,
310 (unsigned long long) slow_virt_to_phys(st));
311}
312
313static DEFINE_PER_CPU_DECRYPTED(unsigned long, kvm_apic_eoi) = KVM_PV_EOI_DISABLED;
314
315static notrace void kvm_guest_apic_eoi_write(u32 reg, u32 val)
316{
317 /**
318 * This relies on __test_and_clear_bit to modify the memory
319 * in a way that is atomic with respect to the local CPU.
320 * The hypervisor only accesses this memory from the local CPU so
321 * there's no need for lock or memory barriers.
322 * An optimization barrier is implied in apic write.
323 */
324 if (__test_and_clear_bit(KVM_PV_EOI_BIT, this_cpu_ptr(&kvm_apic_eoi)))
325 return;
326 apic->native_eoi_write(APIC_EOI, APIC_EOI_ACK);
327}
328
329static void kvm_guest_cpu_init(void)
330{
331 if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF_INT) && kvmapf) {
332 u64 pa = slow_virt_to_phys(this_cpu_ptr(&apf_reason));
333
334 WARN_ON_ONCE(!static_branch_likely(&kvm_async_pf_enabled));
335
336 pa = slow_virt_to_phys(this_cpu_ptr(&apf_reason));
337 pa |= KVM_ASYNC_PF_ENABLED | KVM_ASYNC_PF_DELIVERY_AS_INT;
338
339 if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF_VMEXIT))
340 pa |= KVM_ASYNC_PF_DELIVERY_AS_PF_VMEXIT;
341
342 wrmsrl(MSR_KVM_ASYNC_PF_INT, HYPERVISOR_CALLBACK_VECTOR);
343
344 wrmsrl(MSR_KVM_ASYNC_PF_EN, pa);
345 __this_cpu_write(apf_reason.enabled, 1);
346 pr_info("KVM setup async PF for cpu %d\n", smp_processor_id());
347 }
348
349 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) {
350 unsigned long pa;
351
352 /* Size alignment is implied but just to make it explicit. */
353 BUILD_BUG_ON(__alignof__(kvm_apic_eoi) < 4);
354 __this_cpu_write(kvm_apic_eoi, 0);
355 pa = slow_virt_to_phys(this_cpu_ptr(&kvm_apic_eoi))
356 | KVM_MSR_ENABLED;
357 wrmsrl(MSR_KVM_PV_EOI_EN, pa);
358 }
359
360 if (has_steal_clock)
361 kvm_register_steal_time();
362}
363
364static void kvm_pv_disable_apf(void)
365{
366 if (!__this_cpu_read(apf_reason.enabled))
367 return;
368
369 wrmsrl(MSR_KVM_ASYNC_PF_EN, 0);
370 __this_cpu_write(apf_reason.enabled, 0);
371
372 pr_info("Unregister pv shared memory for cpu %d\n", smp_processor_id());
373}
374
375static void kvm_pv_guest_cpu_reboot(void *unused)
376{
377 /*
378 * We disable PV EOI before we load a new kernel by kexec,
379 * since MSR_KVM_PV_EOI_EN stores a pointer into old kernel's memory.
380 * New kernel can re-enable when it boots.
381 */
382 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
383 wrmsrl(MSR_KVM_PV_EOI_EN, 0);
384 kvm_pv_disable_apf();
385 kvm_disable_steal_time();
386}
387
388static int kvm_pv_reboot_notify(struct notifier_block *nb,
389 unsigned long code, void *unused)
390{
391 if (code == SYS_RESTART)
392 on_each_cpu(kvm_pv_guest_cpu_reboot, NULL, 1);
393 return NOTIFY_DONE;
394}
395
396static struct notifier_block kvm_pv_reboot_nb = {
397 .notifier_call = kvm_pv_reboot_notify,
398};
399
400static u64 kvm_steal_clock(int cpu)
401{
402 u64 steal;
403 struct kvm_steal_time *src;
404 int version;
405
406 src = &per_cpu(steal_time, cpu);
407 do {
408 version = src->version;
409 virt_rmb();
410 steal = src->steal;
411 virt_rmb();
412 } while ((version & 1) || (version != src->version));
413
414 return steal;
415}
416
417void kvm_disable_steal_time(void)
418{
419 if (!has_steal_clock)
420 return;
421
422 wrmsr(MSR_KVM_STEAL_TIME, 0, 0);
423}
424
425static inline void __set_percpu_decrypted(void *ptr, unsigned long size)
426{
427 early_set_memory_decrypted((unsigned long) ptr, size);
428}
429
430/*
431 * Iterate through all possible CPUs and map the memory region pointed
432 * by apf_reason, steal_time and kvm_apic_eoi as decrypted at once.
433 *
434 * Note: we iterate through all possible CPUs to ensure that CPUs
435 * hotplugged will have their per-cpu variable already mapped as
436 * decrypted.
437 */
438static void __init sev_map_percpu_data(void)
439{
440 int cpu;
441
442 if (!sev_active())
443 return;
444
445 for_each_possible_cpu(cpu) {
446 __set_percpu_decrypted(&per_cpu(apf_reason, cpu), sizeof(apf_reason));
447 __set_percpu_decrypted(&per_cpu(steal_time, cpu), sizeof(steal_time));
448 __set_percpu_decrypted(&per_cpu(kvm_apic_eoi, cpu), sizeof(kvm_apic_eoi));
449 }
450}
451
452static bool pv_tlb_flush_supported(void)
453{
454 return (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) &&
455 !kvm_para_has_hint(KVM_HINTS_REALTIME) &&
456 kvm_para_has_feature(KVM_FEATURE_STEAL_TIME));
457}
458
459static DEFINE_PER_CPU(cpumask_var_t, __pv_cpu_mask);
460
461#ifdef CONFIG_SMP
462
463static bool pv_ipi_supported(void)
464{
465 return kvm_para_has_feature(KVM_FEATURE_PV_SEND_IPI);
466}
467
468static bool pv_sched_yield_supported(void)
469{
470 return (kvm_para_has_feature(KVM_FEATURE_PV_SCHED_YIELD) &&
471 !kvm_para_has_hint(KVM_HINTS_REALTIME) &&
472 kvm_para_has_feature(KVM_FEATURE_STEAL_TIME));
473}
474
475#define KVM_IPI_CLUSTER_SIZE (2 * BITS_PER_LONG)
476
477static void __send_ipi_mask(const struct cpumask *mask, int vector)
478{
479 unsigned long flags;
480 int cpu, apic_id, icr;
481 int min = 0, max = 0;
482#ifdef CONFIG_X86_64
483 __uint128_t ipi_bitmap = 0;
484#else
485 u64 ipi_bitmap = 0;
486#endif
487 long ret;
488
489 if (cpumask_empty(mask))
490 return;
491
492 local_irq_save(flags);
493
494 switch (vector) {
495 default:
496 icr = APIC_DM_FIXED | vector;
497 break;
498 case NMI_VECTOR:
499 icr = APIC_DM_NMI;
500 break;
501 }
502
503 for_each_cpu(cpu, mask) {
504 apic_id = per_cpu(x86_cpu_to_apicid, cpu);
505 if (!ipi_bitmap) {
506 min = max = apic_id;
507 } else if (apic_id < min && max - apic_id < KVM_IPI_CLUSTER_SIZE) {
508 ipi_bitmap <<= min - apic_id;
509 min = apic_id;
510 } else if (apic_id < min + KVM_IPI_CLUSTER_SIZE) {
511 max = apic_id < max ? max : apic_id;
512 } else {
513 ret = kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap,
514 (unsigned long)(ipi_bitmap >> BITS_PER_LONG), min, icr);
515 WARN_ONCE(ret < 0, "kvm-guest: failed to send PV IPI: %ld",
516 ret);
517 min = max = apic_id;
518 ipi_bitmap = 0;
519 }
520 __set_bit(apic_id - min, (unsigned long *)&ipi_bitmap);
521 }
522
523 if (ipi_bitmap) {
524 ret = kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap,
525 (unsigned long)(ipi_bitmap >> BITS_PER_LONG), min, icr);
526 WARN_ONCE(ret < 0, "kvm-guest: failed to send PV IPI: %ld",
527 ret);
528 }
529
530 local_irq_restore(flags);
531}
532
533static void kvm_send_ipi_mask(const struct cpumask *mask, int vector)
534{
535 __send_ipi_mask(mask, vector);
536}
537
538static void kvm_send_ipi_mask_allbutself(const struct cpumask *mask, int vector)
539{
540 unsigned int this_cpu = smp_processor_id();
541 struct cpumask *new_mask = this_cpu_cpumask_var_ptr(__pv_cpu_mask);
542 const struct cpumask *local_mask;
543
544 cpumask_copy(new_mask, mask);
545 cpumask_clear_cpu(this_cpu, new_mask);
546 local_mask = new_mask;
547 __send_ipi_mask(local_mask, vector);
548}
549
550/*
551 * Set the IPI entry points
552 */
553static void kvm_setup_pv_ipi(void)
554{
555 apic->send_IPI_mask = kvm_send_ipi_mask;
556 apic->send_IPI_mask_allbutself = kvm_send_ipi_mask_allbutself;
557 pr_info("setup PV IPIs\n");
558}
559
560static void kvm_smp_send_call_func_ipi(const struct cpumask *mask)
561{
562 int cpu;
563
564 native_send_call_func_ipi(mask);
565
566 /* Make sure other vCPUs get a chance to run if they need to. */
567 for_each_cpu(cpu, mask) {
568 if (vcpu_is_preempted(cpu)) {
569 kvm_hypercall1(KVM_HC_SCHED_YIELD, per_cpu(x86_cpu_to_apicid, cpu));
570 break;
571 }
572 }
573}
574
575static void __init kvm_smp_prepare_boot_cpu(void)
576{
577 /*
578 * Map the per-cpu variables as decrypted before kvm_guest_cpu_init()
579 * shares the guest physical address with the hypervisor.
580 */
581 sev_map_percpu_data();
582
583 kvm_guest_cpu_init();
584 native_smp_prepare_boot_cpu();
585 kvm_spinlock_init();
586}
587
588static void kvm_guest_cpu_offline(void)
589{
590 kvm_disable_steal_time();
591 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
592 wrmsrl(MSR_KVM_PV_EOI_EN, 0);
593 kvm_pv_disable_apf();
594 apf_task_wake_all();
595}
596
597static int kvm_cpu_online(unsigned int cpu)
598{
599 local_irq_disable();
600 kvm_guest_cpu_init();
601 local_irq_enable();
602 return 0;
603}
604
605static int kvm_cpu_down_prepare(unsigned int cpu)
606{
607 local_irq_disable();
608 kvm_guest_cpu_offline();
609 local_irq_enable();
610 return 0;
611}
612#endif
613
614static void kvm_flush_tlb_others(const struct cpumask *cpumask,
615 const struct flush_tlb_info *info)
616{
617 u8 state;
618 int cpu;
619 struct kvm_steal_time *src;
620 struct cpumask *flushmask = this_cpu_cpumask_var_ptr(__pv_cpu_mask);
621
622 cpumask_copy(flushmask, cpumask);
623 /*
624 * We have to call flush only on online vCPUs. And
625 * queue flush_on_enter for pre-empted vCPUs
626 */
627 for_each_cpu(cpu, flushmask) {
628 src = &per_cpu(steal_time, cpu);
629 state = READ_ONCE(src->preempted);
630 if ((state & KVM_VCPU_PREEMPTED)) {
631 if (try_cmpxchg(&src->preempted, &state,
632 state | KVM_VCPU_FLUSH_TLB))
633 __cpumask_clear_cpu(cpu, flushmask);
634 }
635 }
636
637 native_flush_tlb_others(flushmask, info);
638}
639
640static void __init kvm_guest_init(void)
641{
642 int i;
643
644 paravirt_ops_setup();
645 register_reboot_notifier(&kvm_pv_reboot_nb);
646 for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++)
647 raw_spin_lock_init(&async_pf_sleepers[i].lock);
648
649 if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
650 has_steal_clock = 1;
651 pv_ops.time.steal_clock = kvm_steal_clock;
652 }
653
654 if (pv_tlb_flush_supported()) {
655 pv_ops.mmu.flush_tlb_others = kvm_flush_tlb_others;
656 pv_ops.mmu.tlb_remove_table = tlb_remove_table;
657 pr_info("KVM setup pv remote TLB flush\n");
658 }
659
660 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
661 apic_set_eoi_write(kvm_guest_apic_eoi_write);
662
663 if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF_INT) && kvmapf) {
664 static_branch_enable(&kvm_async_pf_enabled);
665 alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR, asm_sysvec_kvm_asyncpf_interrupt);
666 }
667
668#ifdef CONFIG_SMP
669 smp_ops.smp_prepare_boot_cpu = kvm_smp_prepare_boot_cpu;
670 if (pv_sched_yield_supported()) {
671 smp_ops.send_call_func_ipi = kvm_smp_send_call_func_ipi;
672 pr_info("setup PV sched yield\n");
673 }
674 if (cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "x86/kvm:online",
675 kvm_cpu_online, kvm_cpu_down_prepare) < 0)
676 pr_err("failed to install cpu hotplug callbacks\n");
677#else
678 sev_map_percpu_data();
679 kvm_guest_cpu_init();
680#endif
681
682 /*
683 * Hard lockup detection is enabled by default. Disable it, as guests
684 * can get false positives too easily, for example if the host is
685 * overcommitted.
686 */
687 hardlockup_detector_disable();
688}
689
690static noinline uint32_t __kvm_cpuid_base(void)
691{
692 if (boot_cpu_data.cpuid_level < 0)
693 return 0; /* So we don't blow up on old processors */
694
695 if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
696 return hypervisor_cpuid_base("KVMKVMKVM\0\0\0", 0);
697
698 return 0;
699}
700
701static inline uint32_t kvm_cpuid_base(void)
702{
703 static int kvm_cpuid_base = -1;
704
705 if (kvm_cpuid_base == -1)
706 kvm_cpuid_base = __kvm_cpuid_base();
707
708 return kvm_cpuid_base;
709}
710
711bool kvm_para_available(void)
712{
713 return kvm_cpuid_base() != 0;
714}
715EXPORT_SYMBOL_GPL(kvm_para_available);
716
717unsigned int kvm_arch_para_features(void)
718{
719 return cpuid_eax(kvm_cpuid_base() | KVM_CPUID_FEATURES);
720}
721
722unsigned int kvm_arch_para_hints(void)
723{
724 return cpuid_edx(kvm_cpuid_base() | KVM_CPUID_FEATURES);
725}
726EXPORT_SYMBOL_GPL(kvm_arch_para_hints);
727
728static uint32_t __init kvm_detect(void)
729{
730 return kvm_cpuid_base();
731}
732
733static void __init kvm_apic_init(void)
734{
735#if defined(CONFIG_SMP)
736 if (pv_ipi_supported())
737 kvm_setup_pv_ipi();
738#endif
739}
740
741static void __init kvm_init_platform(void)
742{
743 kvmclock_init();
744 x86_platform.apic_post_init = kvm_apic_init;
745}
746
747const __initconst struct hypervisor_x86 x86_hyper_kvm = {
748 .name = "KVM",
749 .detect = kvm_detect,
750 .type = X86_HYPER_KVM,
751 .init.guest_late_init = kvm_guest_init,
752 .init.x2apic_available = kvm_para_available,
753 .init.init_platform = kvm_init_platform,
754};
755
756static __init int activate_jump_labels(void)
757{
758 if (has_steal_clock) {
759 static_key_slow_inc(¶virt_steal_enabled);
760 if (steal_acc)
761 static_key_slow_inc(¶virt_steal_rq_enabled);
762 }
763
764 return 0;
765}
766arch_initcall(activate_jump_labels);
767
768static __init int kvm_alloc_cpumask(void)
769{
770 int cpu;
771 bool alloc = false;
772
773 if (!kvm_para_available() || nopv)
774 return 0;
775
776 if (pv_tlb_flush_supported())
777 alloc = true;
778
779#if defined(CONFIG_SMP)
780 if (pv_ipi_supported())
781 alloc = true;
782#endif
783
784 if (alloc)
785 for_each_possible_cpu(cpu) {
786 zalloc_cpumask_var_node(per_cpu_ptr(&__pv_cpu_mask, cpu),
787 GFP_KERNEL, cpu_to_node(cpu));
788 }
789
790 return 0;
791}
792arch_initcall(kvm_alloc_cpumask);
793
794#ifdef CONFIG_PARAVIRT_SPINLOCKS
795
796/* Kick a cpu by its apicid. Used to wake up a halted vcpu */
797static void kvm_kick_cpu(int cpu)
798{
799 int apicid;
800 unsigned long flags = 0;
801
802 apicid = per_cpu(x86_cpu_to_apicid, cpu);
803 kvm_hypercall2(KVM_HC_KICK_CPU, flags, apicid);
804}
805
806#include <asm/qspinlock.h>
807
808static void kvm_wait(u8 *ptr, u8 val)
809{
810 unsigned long flags;
811
812 if (in_nmi())
813 return;
814
815 local_irq_save(flags);
816
817 if (READ_ONCE(*ptr) != val)
818 goto out;
819
820 /*
821 * halt until it's our turn and kicked. Note that we do safe halt
822 * for irq enabled case to avoid hang when lock info is overwritten
823 * in irq spinlock slowpath and no spurious interrupt occur to save us.
824 */
825 if (arch_irqs_disabled_flags(flags))
826 halt();
827 else
828 safe_halt();
829
830out:
831 local_irq_restore(flags);
832}
833
834#ifdef CONFIG_X86_32
835__visible bool __kvm_vcpu_is_preempted(long cpu)
836{
837 struct kvm_steal_time *src = &per_cpu(steal_time, cpu);
838
839 return !!(src->preempted & KVM_VCPU_PREEMPTED);
840}
841PV_CALLEE_SAVE_REGS_THUNK(__kvm_vcpu_is_preempted);
842
843#else
844
845#include <asm/asm-offsets.h>
846
847extern bool __raw_callee_save___kvm_vcpu_is_preempted(long);
848
849/*
850 * Hand-optimize version for x86-64 to avoid 8 64-bit register saving and
851 * restoring to/from the stack.
852 */
853asm(
854".pushsection .text;"
855".global __raw_callee_save___kvm_vcpu_is_preempted;"
856".type __raw_callee_save___kvm_vcpu_is_preempted, @function;"
857"__raw_callee_save___kvm_vcpu_is_preempted:"
858"movq __per_cpu_offset(,%rdi,8), %rax;"
859"cmpb $0, " __stringify(KVM_STEAL_TIME_preempted) "+steal_time(%rax);"
860"setne %al;"
861"ret;"
862".size __raw_callee_save___kvm_vcpu_is_preempted, .-__raw_callee_save___kvm_vcpu_is_preempted;"
863".popsection");
864
865#endif
866
867/*
868 * Setup pv_lock_ops to exploit KVM_FEATURE_PV_UNHALT if present.
869 */
870void __init kvm_spinlock_init(void)
871{
872 /*
873 * In case host doesn't support KVM_FEATURE_PV_UNHALT there is still an
874 * advantage of keeping virt_spin_lock_key enabled: virt_spin_lock() is
875 * preferred over native qspinlock when vCPU is preempted.
876 */
877 if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT)) {
878 pr_info("PV spinlocks disabled, no host support\n");
879 return;
880 }
881
882 /*
883 * Disable PV spinlocks and use native qspinlock when dedicated pCPUs
884 * are available.
885 */
886 if (kvm_para_has_hint(KVM_HINTS_REALTIME)) {
887 pr_info("PV spinlocks disabled with KVM_HINTS_REALTIME hints\n");
888 goto out;
889 }
890
891 if (num_possible_cpus() == 1) {
892 pr_info("PV spinlocks disabled, single CPU\n");
893 goto out;
894 }
895
896 if (nopvspin) {
897 pr_info("PV spinlocks disabled, forced by \"nopvspin\" parameter\n");
898 goto out;
899 }
900
901 pr_info("PV spinlocks enabled\n");
902
903 __pv_init_lock_hash();
904 pv_ops.lock.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath;
905 pv_ops.lock.queued_spin_unlock =
906 PV_CALLEE_SAVE(__pv_queued_spin_unlock);
907 pv_ops.lock.wait = kvm_wait;
908 pv_ops.lock.kick = kvm_kick_cpu;
909
910 if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
911 pv_ops.lock.vcpu_is_preempted =
912 PV_CALLEE_SAVE(__kvm_vcpu_is_preempted);
913 }
914 /*
915 * When PV spinlock is enabled which is preferred over
916 * virt_spin_lock(), virt_spin_lock_key's value is meaningless.
917 * Just disable it anyway.
918 */
919out:
920 static_branch_disable(&virt_spin_lock_key);
921}
922
923#endif /* CONFIG_PARAVIRT_SPINLOCKS */
924
925#ifdef CONFIG_ARCH_CPUIDLE_HALTPOLL
926
927static void kvm_disable_host_haltpoll(void *i)
928{
929 wrmsrl(MSR_KVM_POLL_CONTROL, 0);
930}
931
932static void kvm_enable_host_haltpoll(void *i)
933{
934 wrmsrl(MSR_KVM_POLL_CONTROL, 1);
935}
936
937void arch_haltpoll_enable(unsigned int cpu)
938{
939 if (!kvm_para_has_feature(KVM_FEATURE_POLL_CONTROL)) {
940 pr_err_once("host does not support poll control\n");
941 pr_err_once("host upgrade recommended\n");
942 return;
943 }
944
945 /* Enable guest halt poll disables host halt poll */
946 smp_call_function_single(cpu, kvm_disable_host_haltpoll, NULL, 1);
947}
948EXPORT_SYMBOL_GPL(arch_haltpoll_enable);
949
950void arch_haltpoll_disable(unsigned int cpu)
951{
952 if (!kvm_para_has_feature(KVM_FEATURE_POLL_CONTROL))
953 return;
954
955 /* Enable guest halt poll disables host halt poll */
956 smp_call_function_single(cpu, kvm_enable_host_haltpoll, NULL, 1);
957}
958EXPORT_SYMBOL_GPL(arch_haltpoll_disable);
959#endif