Loading...
1/*
2 * KVM paravirt_ops implementation
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17 *
18 * Copyright (C) 2007, Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
19 * Copyright IBM Corporation, 2007
20 * Authors: Anthony Liguori <aliguori@us.ibm.com>
21 */
22
23#include <linux/context_tracking.h>
24#include <linux/init.h>
25#include <linux/kernel.h>
26#include <linux/kvm_para.h>
27#include <linux/cpu.h>
28#include <linux/mm.h>
29#include <linux/highmem.h>
30#include <linux/hardirq.h>
31#include <linux/notifier.h>
32#include <linux/reboot.h>
33#include <linux/hash.h>
34#include <linux/sched.h>
35#include <linux/slab.h>
36#include <linux/kprobes.h>
37#include <linux/debugfs.h>
38#include <linux/nmi.h>
39#include <linux/swait.h>
40#include <asm/timer.h>
41#include <asm/cpu.h>
42#include <asm/traps.h>
43#include <asm/desc.h>
44#include <asm/tlbflush.h>
45#include <asm/apic.h>
46#include <asm/apicdef.h>
47#include <asm/hypervisor.h>
48#include <asm/kvm_guest.h>
49
50static int kvmapf = 1;
51
52static int __init parse_no_kvmapf(char *arg)
53{
54 kvmapf = 0;
55 return 0;
56}
57
58early_param("no-kvmapf", parse_no_kvmapf);
59
60static int steal_acc = 1;
61static int __init parse_no_stealacc(char *arg)
62{
63 steal_acc = 0;
64 return 0;
65}
66
67early_param("no-steal-acc", parse_no_stealacc);
68
69static int kvmclock_vsyscall = 1;
70static int __init parse_no_kvmclock_vsyscall(char *arg)
71{
72 kvmclock_vsyscall = 0;
73 return 0;
74}
75
76early_param("no-kvmclock-vsyscall", parse_no_kvmclock_vsyscall);
77
78static DEFINE_PER_CPU_DECRYPTED(struct kvm_vcpu_pv_apf_data, apf_reason) __aligned(64);
79static DEFINE_PER_CPU_DECRYPTED(struct kvm_steal_time, steal_time) __aligned(64);
80static int has_steal_clock = 0;
81
82/*
83 * No need for any "IO delay" on KVM
84 */
85static void kvm_io_delay(void)
86{
87}
88
89#define KVM_TASK_SLEEP_HASHBITS 8
90#define KVM_TASK_SLEEP_HASHSIZE (1<<KVM_TASK_SLEEP_HASHBITS)
91
92struct kvm_task_sleep_node {
93 struct hlist_node link;
94 struct swait_queue_head wq;
95 u32 token;
96 int cpu;
97 bool halted;
98};
99
100static struct kvm_task_sleep_head {
101 raw_spinlock_t lock;
102 struct hlist_head list;
103} async_pf_sleepers[KVM_TASK_SLEEP_HASHSIZE];
104
105static struct kvm_task_sleep_node *_find_apf_task(struct kvm_task_sleep_head *b,
106 u32 token)
107{
108 struct hlist_node *p;
109
110 hlist_for_each(p, &b->list) {
111 struct kvm_task_sleep_node *n =
112 hlist_entry(p, typeof(*n), link);
113 if (n->token == token)
114 return n;
115 }
116
117 return NULL;
118}
119
120/*
121 * @interrupt_kernel: Is this called from a routine which interrupts the kernel
122 * (other than user space)?
123 */
124void kvm_async_pf_task_wait(u32 token, int interrupt_kernel)
125{
126 u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
127 struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
128 struct kvm_task_sleep_node n, *e;
129 DECLARE_SWAITQUEUE(wait);
130
131 rcu_irq_enter();
132
133 raw_spin_lock(&b->lock);
134 e = _find_apf_task(b, token);
135 if (e) {
136 /* dummy entry exist -> wake up was delivered ahead of PF */
137 hlist_del(&e->link);
138 kfree(e);
139 raw_spin_unlock(&b->lock);
140
141 rcu_irq_exit();
142 return;
143 }
144
145 n.token = token;
146 n.cpu = smp_processor_id();
147 n.halted = is_idle_task(current) ||
148 (IS_ENABLED(CONFIG_PREEMPT_COUNT)
149 ? preempt_count() > 1 || rcu_preempt_depth()
150 : interrupt_kernel);
151 init_swait_queue_head(&n.wq);
152 hlist_add_head(&n.link, &b->list);
153 raw_spin_unlock(&b->lock);
154
155 for (;;) {
156 if (!n.halted)
157 prepare_to_swait(&n.wq, &wait, TASK_UNINTERRUPTIBLE);
158 if (hlist_unhashed(&n.link))
159 break;
160
161 rcu_irq_exit();
162
163 if (!n.halted) {
164 local_irq_enable();
165 schedule();
166 local_irq_disable();
167 } else {
168 /*
169 * We cannot reschedule. So halt.
170 */
171 native_safe_halt();
172 local_irq_disable();
173 }
174
175 rcu_irq_enter();
176 }
177 if (!n.halted)
178 finish_swait(&n.wq, &wait);
179
180 rcu_irq_exit();
181 return;
182}
183EXPORT_SYMBOL_GPL(kvm_async_pf_task_wait);
184
185static void apf_task_wake_one(struct kvm_task_sleep_node *n)
186{
187 hlist_del_init(&n->link);
188 if (n->halted)
189 smp_send_reschedule(n->cpu);
190 else if (swq_has_sleeper(&n->wq))
191 swake_up(&n->wq);
192}
193
194static void apf_task_wake_all(void)
195{
196 int i;
197
198 for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++) {
199 struct hlist_node *p, *next;
200 struct kvm_task_sleep_head *b = &async_pf_sleepers[i];
201 raw_spin_lock(&b->lock);
202 hlist_for_each_safe(p, next, &b->list) {
203 struct kvm_task_sleep_node *n =
204 hlist_entry(p, typeof(*n), link);
205 if (n->cpu == smp_processor_id())
206 apf_task_wake_one(n);
207 }
208 raw_spin_unlock(&b->lock);
209 }
210}
211
212void kvm_async_pf_task_wake(u32 token)
213{
214 u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
215 struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
216 struct kvm_task_sleep_node *n;
217
218 if (token == ~0) {
219 apf_task_wake_all();
220 return;
221 }
222
223again:
224 raw_spin_lock(&b->lock);
225 n = _find_apf_task(b, token);
226 if (!n) {
227 /*
228 * async PF was not yet handled.
229 * Add dummy entry for the token.
230 */
231 n = kzalloc(sizeof(*n), GFP_ATOMIC);
232 if (!n) {
233 /*
234 * Allocation failed! Busy wait while other cpu
235 * handles async PF.
236 */
237 raw_spin_unlock(&b->lock);
238 cpu_relax();
239 goto again;
240 }
241 n->token = token;
242 n->cpu = smp_processor_id();
243 init_swait_queue_head(&n->wq);
244 hlist_add_head(&n->link, &b->list);
245 } else
246 apf_task_wake_one(n);
247 raw_spin_unlock(&b->lock);
248 return;
249}
250EXPORT_SYMBOL_GPL(kvm_async_pf_task_wake);
251
252u32 kvm_read_and_reset_pf_reason(void)
253{
254 u32 reason = 0;
255
256 if (__this_cpu_read(apf_reason.enabled)) {
257 reason = __this_cpu_read(apf_reason.reason);
258 __this_cpu_write(apf_reason.reason, 0);
259 }
260
261 return reason;
262}
263EXPORT_SYMBOL_GPL(kvm_read_and_reset_pf_reason);
264NOKPROBE_SYMBOL(kvm_read_and_reset_pf_reason);
265
266dotraplinkage void
267do_async_page_fault(struct pt_regs *regs, unsigned long error_code)
268{
269 enum ctx_state prev_state;
270
271 switch (kvm_read_and_reset_pf_reason()) {
272 default:
273 do_page_fault(regs, error_code);
274 break;
275 case KVM_PV_REASON_PAGE_NOT_PRESENT:
276 /* page is swapped out by the host. */
277 prev_state = exception_enter();
278 kvm_async_pf_task_wait((u32)read_cr2(), !user_mode(regs));
279 exception_exit(prev_state);
280 break;
281 case KVM_PV_REASON_PAGE_READY:
282 rcu_irq_enter();
283 kvm_async_pf_task_wake((u32)read_cr2());
284 rcu_irq_exit();
285 break;
286 }
287}
288NOKPROBE_SYMBOL(do_async_page_fault);
289
290static void __init paravirt_ops_setup(void)
291{
292 pv_info.name = "KVM";
293
294 if (kvm_para_has_feature(KVM_FEATURE_NOP_IO_DELAY))
295 pv_cpu_ops.io_delay = kvm_io_delay;
296
297#ifdef CONFIG_X86_IO_APIC
298 no_timer_check = 1;
299#endif
300}
301
302static void kvm_register_steal_time(void)
303{
304 int cpu = smp_processor_id();
305 struct kvm_steal_time *st = &per_cpu(steal_time, cpu);
306
307 if (!has_steal_clock)
308 return;
309
310 wrmsrl(MSR_KVM_STEAL_TIME, (slow_virt_to_phys(st) | KVM_MSR_ENABLED));
311 pr_info("kvm-stealtime: cpu %d, msr %llx\n",
312 cpu, (unsigned long long) slow_virt_to_phys(st));
313}
314
315static DEFINE_PER_CPU_DECRYPTED(unsigned long, kvm_apic_eoi) = KVM_PV_EOI_DISABLED;
316
317static notrace void kvm_guest_apic_eoi_write(u32 reg, u32 val)
318{
319 /**
320 * This relies on __test_and_clear_bit to modify the memory
321 * in a way that is atomic with respect to the local CPU.
322 * The hypervisor only accesses this memory from the local CPU so
323 * there's no need for lock or memory barriers.
324 * An optimization barrier is implied in apic write.
325 */
326 if (__test_and_clear_bit(KVM_PV_EOI_BIT, this_cpu_ptr(&kvm_apic_eoi)))
327 return;
328 apic->native_eoi_write(APIC_EOI, APIC_EOI_ACK);
329}
330
331static void kvm_guest_cpu_init(void)
332{
333 if (!kvm_para_available())
334 return;
335
336 if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF) && kvmapf) {
337 u64 pa = slow_virt_to_phys(this_cpu_ptr(&apf_reason));
338
339#ifdef CONFIG_PREEMPT
340 pa |= KVM_ASYNC_PF_SEND_ALWAYS;
341#endif
342 pa |= KVM_ASYNC_PF_ENABLED;
343
344 if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF_VMEXIT))
345 pa |= KVM_ASYNC_PF_DELIVERY_AS_PF_VMEXIT;
346
347 wrmsrl(MSR_KVM_ASYNC_PF_EN, pa);
348 __this_cpu_write(apf_reason.enabled, 1);
349 printk(KERN_INFO"KVM setup async PF for cpu %d\n",
350 smp_processor_id());
351 }
352
353 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) {
354 unsigned long pa;
355 /* Size alignment is implied but just to make it explicit. */
356 BUILD_BUG_ON(__alignof__(kvm_apic_eoi) < 4);
357 __this_cpu_write(kvm_apic_eoi, 0);
358 pa = slow_virt_to_phys(this_cpu_ptr(&kvm_apic_eoi))
359 | KVM_MSR_ENABLED;
360 wrmsrl(MSR_KVM_PV_EOI_EN, pa);
361 }
362
363 if (has_steal_clock)
364 kvm_register_steal_time();
365}
366
367static void kvm_pv_disable_apf(void)
368{
369 if (!__this_cpu_read(apf_reason.enabled))
370 return;
371
372 wrmsrl(MSR_KVM_ASYNC_PF_EN, 0);
373 __this_cpu_write(apf_reason.enabled, 0);
374
375 printk(KERN_INFO"Unregister pv shared memory for cpu %d\n",
376 smp_processor_id());
377}
378
379static void kvm_pv_guest_cpu_reboot(void *unused)
380{
381 /*
382 * We disable PV EOI before we load a new kernel by kexec,
383 * since MSR_KVM_PV_EOI_EN stores a pointer into old kernel's memory.
384 * New kernel can re-enable when it boots.
385 */
386 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
387 wrmsrl(MSR_KVM_PV_EOI_EN, 0);
388 kvm_pv_disable_apf();
389 kvm_disable_steal_time();
390}
391
392static int kvm_pv_reboot_notify(struct notifier_block *nb,
393 unsigned long code, void *unused)
394{
395 if (code == SYS_RESTART)
396 on_each_cpu(kvm_pv_guest_cpu_reboot, NULL, 1);
397 return NOTIFY_DONE;
398}
399
400static struct notifier_block kvm_pv_reboot_nb = {
401 .notifier_call = kvm_pv_reboot_notify,
402};
403
404static u64 kvm_steal_clock(int cpu)
405{
406 u64 steal;
407 struct kvm_steal_time *src;
408 int version;
409
410 src = &per_cpu(steal_time, cpu);
411 do {
412 version = src->version;
413 virt_rmb();
414 steal = src->steal;
415 virt_rmb();
416 } while ((version & 1) || (version != src->version));
417
418 return steal;
419}
420
421void kvm_disable_steal_time(void)
422{
423 if (!has_steal_clock)
424 return;
425
426 wrmsr(MSR_KVM_STEAL_TIME, 0, 0);
427}
428
429static inline void __set_percpu_decrypted(void *ptr, unsigned long size)
430{
431 early_set_memory_decrypted((unsigned long) ptr, size);
432}
433
434/*
435 * Iterate through all possible CPUs and map the memory region pointed
436 * by apf_reason, steal_time and kvm_apic_eoi as decrypted at once.
437 *
438 * Note: we iterate through all possible CPUs to ensure that CPUs
439 * hotplugged will have their per-cpu variable already mapped as
440 * decrypted.
441 */
442static void __init sev_map_percpu_data(void)
443{
444 int cpu;
445
446 if (!sev_active())
447 return;
448
449 for_each_possible_cpu(cpu) {
450 __set_percpu_decrypted(&per_cpu(apf_reason, cpu), sizeof(apf_reason));
451 __set_percpu_decrypted(&per_cpu(steal_time, cpu), sizeof(steal_time));
452 __set_percpu_decrypted(&per_cpu(kvm_apic_eoi, cpu), sizeof(kvm_apic_eoi));
453 }
454}
455
456#ifdef CONFIG_SMP
457static void __init kvm_smp_prepare_cpus(unsigned int max_cpus)
458{
459 native_smp_prepare_cpus(max_cpus);
460 if (kvm_para_has_hint(KVM_HINTS_REALTIME))
461 static_branch_disable(&virt_spin_lock_key);
462}
463
464static void __init kvm_smp_prepare_boot_cpu(void)
465{
466 /*
467 * Map the per-cpu variables as decrypted before kvm_guest_cpu_init()
468 * shares the guest physical address with the hypervisor.
469 */
470 sev_map_percpu_data();
471
472 kvm_guest_cpu_init();
473 native_smp_prepare_boot_cpu();
474 kvm_spinlock_init();
475}
476
477static void kvm_guest_cpu_offline(void)
478{
479 kvm_disable_steal_time();
480 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
481 wrmsrl(MSR_KVM_PV_EOI_EN, 0);
482 kvm_pv_disable_apf();
483 apf_task_wake_all();
484}
485
486static int kvm_cpu_online(unsigned int cpu)
487{
488 local_irq_disable();
489 kvm_guest_cpu_init();
490 local_irq_enable();
491 return 0;
492}
493
494static int kvm_cpu_down_prepare(unsigned int cpu)
495{
496 local_irq_disable();
497 kvm_guest_cpu_offline();
498 local_irq_enable();
499 return 0;
500}
501#endif
502
503static void __init kvm_apf_trap_init(void)
504{
505 update_intr_gate(X86_TRAP_PF, async_page_fault);
506}
507
508static DEFINE_PER_CPU(cpumask_var_t, __pv_tlb_mask);
509
510static void kvm_flush_tlb_others(const struct cpumask *cpumask,
511 const struct flush_tlb_info *info)
512{
513 u8 state;
514 int cpu;
515 struct kvm_steal_time *src;
516 struct cpumask *flushmask = this_cpu_cpumask_var_ptr(__pv_tlb_mask);
517
518 cpumask_copy(flushmask, cpumask);
519 /*
520 * We have to call flush only on online vCPUs. And
521 * queue flush_on_enter for pre-empted vCPUs
522 */
523 for_each_cpu(cpu, flushmask) {
524 src = &per_cpu(steal_time, cpu);
525 state = READ_ONCE(src->preempted);
526 if ((state & KVM_VCPU_PREEMPTED)) {
527 if (try_cmpxchg(&src->preempted, &state,
528 state | KVM_VCPU_FLUSH_TLB))
529 __cpumask_clear_cpu(cpu, flushmask);
530 }
531 }
532
533 native_flush_tlb_others(flushmask, info);
534}
535
536static void __init kvm_guest_init(void)
537{
538 int i;
539
540 if (!kvm_para_available())
541 return;
542
543 paravirt_ops_setup();
544 register_reboot_notifier(&kvm_pv_reboot_nb);
545 for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++)
546 raw_spin_lock_init(&async_pf_sleepers[i].lock);
547 if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF))
548 x86_init.irqs.trap_init = kvm_apf_trap_init;
549
550 if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
551 has_steal_clock = 1;
552 pv_time_ops.steal_clock = kvm_steal_clock;
553 }
554
555 if (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) &&
556 !kvm_para_has_hint(KVM_HINTS_REALTIME) &&
557 kvm_para_has_feature(KVM_FEATURE_STEAL_TIME))
558 pv_mmu_ops.flush_tlb_others = kvm_flush_tlb_others;
559
560 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
561 apic_set_eoi_write(kvm_guest_apic_eoi_write);
562
563 if (kvmclock_vsyscall)
564 kvm_setup_vsyscall_timeinfo();
565
566#ifdef CONFIG_SMP
567 smp_ops.smp_prepare_cpus = kvm_smp_prepare_cpus;
568 smp_ops.smp_prepare_boot_cpu = kvm_smp_prepare_boot_cpu;
569 if (cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "x86/kvm:online",
570 kvm_cpu_online, kvm_cpu_down_prepare) < 0)
571 pr_err("kvm_guest: Failed to install cpu hotplug callbacks\n");
572#else
573 sev_map_percpu_data();
574 kvm_guest_cpu_init();
575#endif
576
577 /*
578 * Hard lockup detection is enabled by default. Disable it, as guests
579 * can get false positives too easily, for example if the host is
580 * overcommitted.
581 */
582 hardlockup_detector_disable();
583}
584
585static noinline uint32_t __kvm_cpuid_base(void)
586{
587 if (boot_cpu_data.cpuid_level < 0)
588 return 0; /* So we don't blow up on old processors */
589
590 if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
591 return hypervisor_cpuid_base("KVMKVMKVM\0\0\0", 0);
592
593 return 0;
594}
595
596static inline uint32_t kvm_cpuid_base(void)
597{
598 static int kvm_cpuid_base = -1;
599
600 if (kvm_cpuid_base == -1)
601 kvm_cpuid_base = __kvm_cpuid_base();
602
603 return kvm_cpuid_base;
604}
605
606bool kvm_para_available(void)
607{
608 return kvm_cpuid_base() != 0;
609}
610EXPORT_SYMBOL_GPL(kvm_para_available);
611
612unsigned int kvm_arch_para_features(void)
613{
614 return cpuid_eax(kvm_cpuid_base() | KVM_CPUID_FEATURES);
615}
616
617unsigned int kvm_arch_para_hints(void)
618{
619 return cpuid_edx(kvm_cpuid_base() | KVM_CPUID_FEATURES);
620}
621
622static uint32_t __init kvm_detect(void)
623{
624 return kvm_cpuid_base();
625}
626
627const __initconst struct hypervisor_x86 x86_hyper_kvm = {
628 .name = "KVM",
629 .detect = kvm_detect,
630 .type = X86_HYPER_KVM,
631 .init.guest_late_init = kvm_guest_init,
632 .init.x2apic_available = kvm_para_available,
633};
634
635static __init int activate_jump_labels(void)
636{
637 if (has_steal_clock) {
638 static_key_slow_inc(¶virt_steal_enabled);
639 if (steal_acc)
640 static_key_slow_inc(¶virt_steal_rq_enabled);
641 }
642
643 return 0;
644}
645arch_initcall(activate_jump_labels);
646
647static __init int kvm_setup_pv_tlb_flush(void)
648{
649 int cpu;
650
651 if (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) &&
652 !kvm_para_has_hint(KVM_HINTS_REALTIME) &&
653 kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
654 for_each_possible_cpu(cpu) {
655 zalloc_cpumask_var_node(per_cpu_ptr(&__pv_tlb_mask, cpu),
656 GFP_KERNEL, cpu_to_node(cpu));
657 }
658 pr_info("KVM setup pv remote TLB flush\n");
659 }
660
661 return 0;
662}
663arch_initcall(kvm_setup_pv_tlb_flush);
664
665#ifdef CONFIG_PARAVIRT_SPINLOCKS
666
667/* Kick a cpu by its apicid. Used to wake up a halted vcpu */
668static void kvm_kick_cpu(int cpu)
669{
670 int apicid;
671 unsigned long flags = 0;
672
673 apicid = per_cpu(x86_cpu_to_apicid, cpu);
674 kvm_hypercall2(KVM_HC_KICK_CPU, flags, apicid);
675}
676
677#include <asm/qspinlock.h>
678
679static void kvm_wait(u8 *ptr, u8 val)
680{
681 unsigned long flags;
682
683 if (in_nmi())
684 return;
685
686 local_irq_save(flags);
687
688 if (READ_ONCE(*ptr) != val)
689 goto out;
690
691 /*
692 * halt until it's our turn and kicked. Note that we do safe halt
693 * for irq enabled case to avoid hang when lock info is overwritten
694 * in irq spinlock slowpath and no spurious interrupt occur to save us.
695 */
696 if (arch_irqs_disabled_flags(flags))
697 halt();
698 else
699 safe_halt();
700
701out:
702 local_irq_restore(flags);
703}
704
705#ifdef CONFIG_X86_32
706__visible bool __kvm_vcpu_is_preempted(long cpu)
707{
708 struct kvm_steal_time *src = &per_cpu(steal_time, cpu);
709
710 return !!(src->preempted & KVM_VCPU_PREEMPTED);
711}
712PV_CALLEE_SAVE_REGS_THUNK(__kvm_vcpu_is_preempted);
713
714#else
715
716#include <asm/asm-offsets.h>
717
718extern bool __raw_callee_save___kvm_vcpu_is_preempted(long);
719
720/*
721 * Hand-optimize version for x86-64 to avoid 8 64-bit register saving and
722 * restoring to/from the stack.
723 */
724asm(
725".pushsection .text;"
726".global __raw_callee_save___kvm_vcpu_is_preempted;"
727".type __raw_callee_save___kvm_vcpu_is_preempted, @function;"
728"__raw_callee_save___kvm_vcpu_is_preempted:"
729"movq __per_cpu_offset(,%rdi,8), %rax;"
730"cmpb $0, " __stringify(KVM_STEAL_TIME_preempted) "+steal_time(%rax);"
731"setne %al;"
732"ret;"
733".popsection");
734
735#endif
736
737/*
738 * Setup pv_lock_ops to exploit KVM_FEATURE_PV_UNHALT if present.
739 */
740void __init kvm_spinlock_init(void)
741{
742 if (!kvm_para_available())
743 return;
744 /* Does host kernel support KVM_FEATURE_PV_UNHALT? */
745 if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT))
746 return;
747
748 if (kvm_para_has_hint(KVM_HINTS_REALTIME))
749 return;
750
751 __pv_init_lock_hash();
752 pv_lock_ops.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath;
753 pv_lock_ops.queued_spin_unlock = PV_CALLEE_SAVE(__pv_queued_spin_unlock);
754 pv_lock_ops.wait = kvm_wait;
755 pv_lock_ops.kick = kvm_kick_cpu;
756
757 if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
758 pv_lock_ops.vcpu_is_preempted =
759 PV_CALLEE_SAVE(__kvm_vcpu_is_preempted);
760 }
761}
762
763#endif /* CONFIG_PARAVIRT_SPINLOCKS */
1/*
2 * KVM paravirt_ops implementation
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17 *
18 * Copyright (C) 2007, Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
19 * Copyright IBM Corporation, 2007
20 * Authors: Anthony Liguori <aliguori@us.ibm.com>
21 */
22
23#include <linux/context_tracking.h>
24#include <linux/module.h>
25#include <linux/kernel.h>
26#include <linux/kvm_para.h>
27#include <linux/cpu.h>
28#include <linux/mm.h>
29#include <linux/highmem.h>
30#include <linux/hardirq.h>
31#include <linux/notifier.h>
32#include <linux/reboot.h>
33#include <linux/hash.h>
34#include <linux/sched.h>
35#include <linux/slab.h>
36#include <linux/kprobes.h>
37#include <linux/debugfs.h>
38#include <asm/timer.h>
39#include <asm/cpu.h>
40#include <asm/traps.h>
41#include <asm/desc.h>
42#include <asm/tlbflush.h>
43#include <asm/idle.h>
44#include <asm/apic.h>
45#include <asm/apicdef.h>
46#include <asm/hypervisor.h>
47#include <asm/kvm_guest.h>
48
49static int kvmapf = 1;
50
51static int parse_no_kvmapf(char *arg)
52{
53 kvmapf = 0;
54 return 0;
55}
56
57early_param("no-kvmapf", parse_no_kvmapf);
58
59static int steal_acc = 1;
60static int parse_no_stealacc(char *arg)
61{
62 steal_acc = 0;
63 return 0;
64}
65
66early_param("no-steal-acc", parse_no_stealacc);
67
68static int kvmclock_vsyscall = 1;
69static int parse_no_kvmclock_vsyscall(char *arg)
70{
71 kvmclock_vsyscall = 0;
72 return 0;
73}
74
75early_param("no-kvmclock-vsyscall", parse_no_kvmclock_vsyscall);
76
77static DEFINE_PER_CPU(struct kvm_vcpu_pv_apf_data, apf_reason) __aligned(64);
78static DEFINE_PER_CPU(struct kvm_steal_time, steal_time) __aligned(64);
79static int has_steal_clock = 0;
80
81/*
82 * No need for any "IO delay" on KVM
83 */
84static void kvm_io_delay(void)
85{
86}
87
88#define KVM_TASK_SLEEP_HASHBITS 8
89#define KVM_TASK_SLEEP_HASHSIZE (1<<KVM_TASK_SLEEP_HASHBITS)
90
91struct kvm_task_sleep_node {
92 struct hlist_node link;
93 wait_queue_head_t wq;
94 u32 token;
95 int cpu;
96 bool halted;
97};
98
99static struct kvm_task_sleep_head {
100 spinlock_t lock;
101 struct hlist_head list;
102} async_pf_sleepers[KVM_TASK_SLEEP_HASHSIZE];
103
104static struct kvm_task_sleep_node *_find_apf_task(struct kvm_task_sleep_head *b,
105 u32 token)
106{
107 struct hlist_node *p;
108
109 hlist_for_each(p, &b->list) {
110 struct kvm_task_sleep_node *n =
111 hlist_entry(p, typeof(*n), link);
112 if (n->token == token)
113 return n;
114 }
115
116 return NULL;
117}
118
119void kvm_async_pf_task_wait(u32 token)
120{
121 u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
122 struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
123 struct kvm_task_sleep_node n, *e;
124 DEFINE_WAIT(wait);
125
126 rcu_irq_enter();
127
128 spin_lock(&b->lock);
129 e = _find_apf_task(b, token);
130 if (e) {
131 /* dummy entry exist -> wake up was delivered ahead of PF */
132 hlist_del(&e->link);
133 kfree(e);
134 spin_unlock(&b->lock);
135
136 rcu_irq_exit();
137 return;
138 }
139
140 n.token = token;
141 n.cpu = smp_processor_id();
142 n.halted = is_idle_task(current) || preempt_count() > 1;
143 init_waitqueue_head(&n.wq);
144 hlist_add_head(&n.link, &b->list);
145 spin_unlock(&b->lock);
146
147 for (;;) {
148 if (!n.halted)
149 prepare_to_wait(&n.wq, &wait, TASK_UNINTERRUPTIBLE);
150 if (hlist_unhashed(&n.link))
151 break;
152
153 if (!n.halted) {
154 local_irq_enable();
155 schedule();
156 local_irq_disable();
157 } else {
158 /*
159 * We cannot reschedule. So halt.
160 */
161 rcu_irq_exit();
162 native_safe_halt();
163 rcu_irq_enter();
164 local_irq_disable();
165 }
166 }
167 if (!n.halted)
168 finish_wait(&n.wq, &wait);
169
170 rcu_irq_exit();
171 return;
172}
173EXPORT_SYMBOL_GPL(kvm_async_pf_task_wait);
174
175static void apf_task_wake_one(struct kvm_task_sleep_node *n)
176{
177 hlist_del_init(&n->link);
178 if (n->halted)
179 smp_send_reschedule(n->cpu);
180 else if (waitqueue_active(&n->wq))
181 wake_up(&n->wq);
182}
183
184static void apf_task_wake_all(void)
185{
186 int i;
187
188 for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++) {
189 struct hlist_node *p, *next;
190 struct kvm_task_sleep_head *b = &async_pf_sleepers[i];
191 spin_lock(&b->lock);
192 hlist_for_each_safe(p, next, &b->list) {
193 struct kvm_task_sleep_node *n =
194 hlist_entry(p, typeof(*n), link);
195 if (n->cpu == smp_processor_id())
196 apf_task_wake_one(n);
197 }
198 spin_unlock(&b->lock);
199 }
200}
201
202void kvm_async_pf_task_wake(u32 token)
203{
204 u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
205 struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
206 struct kvm_task_sleep_node *n;
207
208 if (token == ~0) {
209 apf_task_wake_all();
210 return;
211 }
212
213again:
214 spin_lock(&b->lock);
215 n = _find_apf_task(b, token);
216 if (!n) {
217 /*
218 * async PF was not yet handled.
219 * Add dummy entry for the token.
220 */
221 n = kzalloc(sizeof(*n), GFP_ATOMIC);
222 if (!n) {
223 /*
224 * Allocation failed! Busy wait while other cpu
225 * handles async PF.
226 */
227 spin_unlock(&b->lock);
228 cpu_relax();
229 goto again;
230 }
231 n->token = token;
232 n->cpu = smp_processor_id();
233 init_waitqueue_head(&n->wq);
234 hlist_add_head(&n->link, &b->list);
235 } else
236 apf_task_wake_one(n);
237 spin_unlock(&b->lock);
238 return;
239}
240EXPORT_SYMBOL_GPL(kvm_async_pf_task_wake);
241
242u32 kvm_read_and_reset_pf_reason(void)
243{
244 u32 reason = 0;
245
246 if (__get_cpu_var(apf_reason).enabled) {
247 reason = __get_cpu_var(apf_reason).reason;
248 __get_cpu_var(apf_reason).reason = 0;
249 }
250
251 return reason;
252}
253EXPORT_SYMBOL_GPL(kvm_read_and_reset_pf_reason);
254
255dotraplinkage void __kprobes
256do_async_page_fault(struct pt_regs *regs, unsigned long error_code)
257{
258 enum ctx_state prev_state;
259
260 switch (kvm_read_and_reset_pf_reason()) {
261 default:
262 do_page_fault(regs, error_code);
263 break;
264 case KVM_PV_REASON_PAGE_NOT_PRESENT:
265 /* page is swapped out by the host. */
266 prev_state = exception_enter();
267 exit_idle();
268 kvm_async_pf_task_wait((u32)read_cr2());
269 exception_exit(prev_state);
270 break;
271 case KVM_PV_REASON_PAGE_READY:
272 rcu_irq_enter();
273 exit_idle();
274 kvm_async_pf_task_wake((u32)read_cr2());
275 rcu_irq_exit();
276 break;
277 }
278}
279
280static void __init paravirt_ops_setup(void)
281{
282 pv_info.name = "KVM";
283 pv_info.paravirt_enabled = 1;
284
285 if (kvm_para_has_feature(KVM_FEATURE_NOP_IO_DELAY))
286 pv_cpu_ops.io_delay = kvm_io_delay;
287
288#ifdef CONFIG_X86_IO_APIC
289 no_timer_check = 1;
290#endif
291}
292
293static void kvm_register_steal_time(void)
294{
295 int cpu = smp_processor_id();
296 struct kvm_steal_time *st = &per_cpu(steal_time, cpu);
297
298 if (!has_steal_clock)
299 return;
300
301 memset(st, 0, sizeof(*st));
302
303 wrmsrl(MSR_KVM_STEAL_TIME, (slow_virt_to_phys(st) | KVM_MSR_ENABLED));
304 pr_info("kvm-stealtime: cpu %d, msr %llx\n",
305 cpu, (unsigned long long) slow_virt_to_phys(st));
306}
307
308static DEFINE_PER_CPU(unsigned long, kvm_apic_eoi) = KVM_PV_EOI_DISABLED;
309
310static void kvm_guest_apic_eoi_write(u32 reg, u32 val)
311{
312 /**
313 * This relies on __test_and_clear_bit to modify the memory
314 * in a way that is atomic with respect to the local CPU.
315 * The hypervisor only accesses this memory from the local CPU so
316 * there's no need for lock or memory barriers.
317 * An optimization barrier is implied in apic write.
318 */
319 if (__test_and_clear_bit(KVM_PV_EOI_BIT, &__get_cpu_var(kvm_apic_eoi)))
320 return;
321 apic_write(APIC_EOI, APIC_EOI_ACK);
322}
323
324void kvm_guest_cpu_init(void)
325{
326 if (!kvm_para_available())
327 return;
328
329 if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF) && kvmapf) {
330 u64 pa = slow_virt_to_phys(&__get_cpu_var(apf_reason));
331
332#ifdef CONFIG_PREEMPT
333 pa |= KVM_ASYNC_PF_SEND_ALWAYS;
334#endif
335 wrmsrl(MSR_KVM_ASYNC_PF_EN, pa | KVM_ASYNC_PF_ENABLED);
336 __get_cpu_var(apf_reason).enabled = 1;
337 printk(KERN_INFO"KVM setup async PF for cpu %d\n",
338 smp_processor_id());
339 }
340
341 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) {
342 unsigned long pa;
343 /* Size alignment is implied but just to make it explicit. */
344 BUILD_BUG_ON(__alignof__(kvm_apic_eoi) < 4);
345 __get_cpu_var(kvm_apic_eoi) = 0;
346 pa = slow_virt_to_phys(&__get_cpu_var(kvm_apic_eoi))
347 | KVM_MSR_ENABLED;
348 wrmsrl(MSR_KVM_PV_EOI_EN, pa);
349 }
350
351 if (has_steal_clock)
352 kvm_register_steal_time();
353}
354
355static void kvm_pv_disable_apf(void)
356{
357 if (!__get_cpu_var(apf_reason).enabled)
358 return;
359
360 wrmsrl(MSR_KVM_ASYNC_PF_EN, 0);
361 __get_cpu_var(apf_reason).enabled = 0;
362
363 printk(KERN_INFO"Unregister pv shared memory for cpu %d\n",
364 smp_processor_id());
365}
366
367static void kvm_pv_guest_cpu_reboot(void *unused)
368{
369 /*
370 * We disable PV EOI before we load a new kernel by kexec,
371 * since MSR_KVM_PV_EOI_EN stores a pointer into old kernel's memory.
372 * New kernel can re-enable when it boots.
373 */
374 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
375 wrmsrl(MSR_KVM_PV_EOI_EN, 0);
376 kvm_pv_disable_apf();
377 kvm_disable_steal_time();
378}
379
380static int kvm_pv_reboot_notify(struct notifier_block *nb,
381 unsigned long code, void *unused)
382{
383 if (code == SYS_RESTART)
384 on_each_cpu(kvm_pv_guest_cpu_reboot, NULL, 1);
385 return NOTIFY_DONE;
386}
387
388static struct notifier_block kvm_pv_reboot_nb = {
389 .notifier_call = kvm_pv_reboot_notify,
390};
391
392static u64 kvm_steal_clock(int cpu)
393{
394 u64 steal;
395 struct kvm_steal_time *src;
396 int version;
397
398 src = &per_cpu(steal_time, cpu);
399 do {
400 version = src->version;
401 rmb();
402 steal = src->steal;
403 rmb();
404 } while ((version & 1) || (version != src->version));
405
406 return steal;
407}
408
409void kvm_disable_steal_time(void)
410{
411 if (!has_steal_clock)
412 return;
413
414 wrmsr(MSR_KVM_STEAL_TIME, 0, 0);
415}
416
417#ifdef CONFIG_SMP
418static void __init kvm_smp_prepare_boot_cpu(void)
419{
420 kvm_guest_cpu_init();
421 native_smp_prepare_boot_cpu();
422 kvm_spinlock_init();
423}
424
425static void kvm_guest_cpu_online(void *dummy)
426{
427 kvm_guest_cpu_init();
428}
429
430static void kvm_guest_cpu_offline(void *dummy)
431{
432 kvm_disable_steal_time();
433 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
434 wrmsrl(MSR_KVM_PV_EOI_EN, 0);
435 kvm_pv_disable_apf();
436 apf_task_wake_all();
437}
438
439static int kvm_cpu_notify(struct notifier_block *self, unsigned long action,
440 void *hcpu)
441{
442 int cpu = (unsigned long)hcpu;
443 switch (action) {
444 case CPU_ONLINE:
445 case CPU_DOWN_FAILED:
446 case CPU_ONLINE_FROZEN:
447 smp_call_function_single(cpu, kvm_guest_cpu_online, NULL, 0);
448 break;
449 case CPU_DOWN_PREPARE:
450 case CPU_DOWN_PREPARE_FROZEN:
451 smp_call_function_single(cpu, kvm_guest_cpu_offline, NULL, 1);
452 break;
453 default:
454 break;
455 }
456 return NOTIFY_OK;
457}
458
459static struct notifier_block kvm_cpu_notifier = {
460 .notifier_call = kvm_cpu_notify,
461};
462#endif
463
464static void __init kvm_apf_trap_init(void)
465{
466 set_intr_gate(14, async_page_fault);
467}
468
469void __init kvm_guest_init(void)
470{
471 int i;
472
473 if (!kvm_para_available())
474 return;
475
476 paravirt_ops_setup();
477 register_reboot_notifier(&kvm_pv_reboot_nb);
478 for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++)
479 spin_lock_init(&async_pf_sleepers[i].lock);
480 if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF))
481 x86_init.irqs.trap_init = kvm_apf_trap_init;
482
483 if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
484 has_steal_clock = 1;
485 pv_time_ops.steal_clock = kvm_steal_clock;
486 }
487
488 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
489 apic_set_eoi_write(kvm_guest_apic_eoi_write);
490
491 if (kvmclock_vsyscall)
492 kvm_setup_vsyscall_timeinfo();
493
494#ifdef CONFIG_SMP
495 smp_ops.smp_prepare_boot_cpu = kvm_smp_prepare_boot_cpu;
496 register_cpu_notifier(&kvm_cpu_notifier);
497#else
498 kvm_guest_cpu_init();
499#endif
500}
501
502static noinline uint32_t __kvm_cpuid_base(void)
503{
504 if (boot_cpu_data.cpuid_level < 0)
505 return 0; /* So we don't blow up on old processors */
506
507 if (cpu_has_hypervisor)
508 return hypervisor_cpuid_base("KVMKVMKVM\0\0\0", 0);
509
510 return 0;
511}
512
513static inline uint32_t kvm_cpuid_base(void)
514{
515 static int kvm_cpuid_base = -1;
516
517 if (kvm_cpuid_base == -1)
518 kvm_cpuid_base = __kvm_cpuid_base();
519
520 return kvm_cpuid_base;
521}
522
523bool kvm_para_available(void)
524{
525 return kvm_cpuid_base() != 0;
526}
527EXPORT_SYMBOL_GPL(kvm_para_available);
528
529unsigned int kvm_arch_para_features(void)
530{
531 return cpuid_eax(kvm_cpuid_base() | KVM_CPUID_FEATURES);
532}
533
534static uint32_t __init kvm_detect(void)
535{
536 return kvm_cpuid_base();
537}
538
539const struct hypervisor_x86 x86_hyper_kvm __refconst = {
540 .name = "KVM",
541 .detect = kvm_detect,
542 .x2apic_available = kvm_para_available,
543};
544EXPORT_SYMBOL_GPL(x86_hyper_kvm);
545
546static __init int activate_jump_labels(void)
547{
548 if (has_steal_clock) {
549 static_key_slow_inc(¶virt_steal_enabled);
550 if (steal_acc)
551 static_key_slow_inc(¶virt_steal_rq_enabled);
552 }
553
554 return 0;
555}
556arch_initcall(activate_jump_labels);
557
558#ifdef CONFIG_PARAVIRT_SPINLOCKS
559
560/* Kick a cpu by its apicid. Used to wake up a halted vcpu */
561static void kvm_kick_cpu(int cpu)
562{
563 int apicid;
564 unsigned long flags = 0;
565
566 apicid = per_cpu(x86_cpu_to_apicid, cpu);
567 kvm_hypercall2(KVM_HC_KICK_CPU, flags, apicid);
568}
569
570enum kvm_contention_stat {
571 TAKEN_SLOW,
572 TAKEN_SLOW_PICKUP,
573 RELEASED_SLOW,
574 RELEASED_SLOW_KICKED,
575 NR_CONTENTION_STATS
576};
577
578#ifdef CONFIG_KVM_DEBUG_FS
579#define HISTO_BUCKETS 30
580
581static struct kvm_spinlock_stats
582{
583 u32 contention_stats[NR_CONTENTION_STATS];
584 u32 histo_spin_blocked[HISTO_BUCKETS+1];
585 u64 time_blocked;
586} spinlock_stats;
587
588static u8 zero_stats;
589
590static inline void check_zero(void)
591{
592 u8 ret;
593 u8 old;
594
595 old = ACCESS_ONCE(zero_stats);
596 if (unlikely(old)) {
597 ret = cmpxchg(&zero_stats, old, 0);
598 /* This ensures only one fellow resets the stat */
599 if (ret == old)
600 memset(&spinlock_stats, 0, sizeof(spinlock_stats));
601 }
602}
603
604static inline void add_stats(enum kvm_contention_stat var, u32 val)
605{
606 check_zero();
607 spinlock_stats.contention_stats[var] += val;
608}
609
610
611static inline u64 spin_time_start(void)
612{
613 return sched_clock();
614}
615
616static void __spin_time_accum(u64 delta, u32 *array)
617{
618 unsigned index;
619
620 index = ilog2(delta);
621 check_zero();
622
623 if (index < HISTO_BUCKETS)
624 array[index]++;
625 else
626 array[HISTO_BUCKETS]++;
627}
628
629static inline void spin_time_accum_blocked(u64 start)
630{
631 u32 delta;
632
633 delta = sched_clock() - start;
634 __spin_time_accum(delta, spinlock_stats.histo_spin_blocked);
635 spinlock_stats.time_blocked += delta;
636}
637
638static struct dentry *d_spin_debug;
639static struct dentry *d_kvm_debug;
640
641struct dentry *kvm_init_debugfs(void)
642{
643 d_kvm_debug = debugfs_create_dir("kvm-guest", NULL);
644 if (!d_kvm_debug)
645 printk(KERN_WARNING "Could not create 'kvm' debugfs directory\n");
646
647 return d_kvm_debug;
648}
649
650static int __init kvm_spinlock_debugfs(void)
651{
652 struct dentry *d_kvm;
653
654 d_kvm = kvm_init_debugfs();
655 if (d_kvm == NULL)
656 return -ENOMEM;
657
658 d_spin_debug = debugfs_create_dir("spinlocks", d_kvm);
659
660 debugfs_create_u8("zero_stats", 0644, d_spin_debug, &zero_stats);
661
662 debugfs_create_u32("taken_slow", 0444, d_spin_debug,
663 &spinlock_stats.contention_stats[TAKEN_SLOW]);
664 debugfs_create_u32("taken_slow_pickup", 0444, d_spin_debug,
665 &spinlock_stats.contention_stats[TAKEN_SLOW_PICKUP]);
666
667 debugfs_create_u32("released_slow", 0444, d_spin_debug,
668 &spinlock_stats.contention_stats[RELEASED_SLOW]);
669 debugfs_create_u32("released_slow_kicked", 0444, d_spin_debug,
670 &spinlock_stats.contention_stats[RELEASED_SLOW_KICKED]);
671
672 debugfs_create_u64("time_blocked", 0444, d_spin_debug,
673 &spinlock_stats.time_blocked);
674
675 debugfs_create_u32_array("histo_blocked", 0444, d_spin_debug,
676 spinlock_stats.histo_spin_blocked, HISTO_BUCKETS + 1);
677
678 return 0;
679}
680fs_initcall(kvm_spinlock_debugfs);
681#else /* !CONFIG_KVM_DEBUG_FS */
682static inline void add_stats(enum kvm_contention_stat var, u32 val)
683{
684}
685
686static inline u64 spin_time_start(void)
687{
688 return 0;
689}
690
691static inline void spin_time_accum_blocked(u64 start)
692{
693}
694#endif /* CONFIG_KVM_DEBUG_FS */
695
696struct kvm_lock_waiting {
697 struct arch_spinlock *lock;
698 __ticket_t want;
699};
700
701/* cpus 'waiting' on a spinlock to become available */
702static cpumask_t waiting_cpus;
703
704/* Track spinlock on which a cpu is waiting */
705static DEFINE_PER_CPU(struct kvm_lock_waiting, klock_waiting);
706
707__visible void kvm_lock_spinning(struct arch_spinlock *lock, __ticket_t want)
708{
709 struct kvm_lock_waiting *w;
710 int cpu;
711 u64 start;
712 unsigned long flags;
713
714 if (in_nmi())
715 return;
716
717 w = &__get_cpu_var(klock_waiting);
718 cpu = smp_processor_id();
719 start = spin_time_start();
720
721 /*
722 * Make sure an interrupt handler can't upset things in a
723 * partially setup state.
724 */
725 local_irq_save(flags);
726
727 /*
728 * The ordering protocol on this is that the "lock" pointer
729 * may only be set non-NULL if the "want" ticket is correct.
730 * If we're updating "want", we must first clear "lock".
731 */
732 w->lock = NULL;
733 smp_wmb();
734 w->want = want;
735 smp_wmb();
736 w->lock = lock;
737
738 add_stats(TAKEN_SLOW, 1);
739
740 /*
741 * This uses set_bit, which is atomic but we should not rely on its
742 * reordering gurantees. So barrier is needed after this call.
743 */
744 cpumask_set_cpu(cpu, &waiting_cpus);
745
746 barrier();
747
748 /*
749 * Mark entry to slowpath before doing the pickup test to make
750 * sure we don't deadlock with an unlocker.
751 */
752 __ticket_enter_slowpath(lock);
753
754 /*
755 * check again make sure it didn't become free while
756 * we weren't looking.
757 */
758 if (ACCESS_ONCE(lock->tickets.head) == want) {
759 add_stats(TAKEN_SLOW_PICKUP, 1);
760 goto out;
761 }
762
763 /*
764 * halt until it's our turn and kicked. Note that we do safe halt
765 * for irq enabled case to avoid hang when lock info is overwritten
766 * in irq spinlock slowpath and no spurious interrupt occur to save us.
767 */
768 if (arch_irqs_disabled_flags(flags))
769 halt();
770 else
771 safe_halt();
772
773out:
774 cpumask_clear_cpu(cpu, &waiting_cpus);
775 w->lock = NULL;
776 local_irq_restore(flags);
777 spin_time_accum_blocked(start);
778}
779PV_CALLEE_SAVE_REGS_THUNK(kvm_lock_spinning);
780
781/* Kick vcpu waiting on @lock->head to reach value @ticket */
782static void kvm_unlock_kick(struct arch_spinlock *lock, __ticket_t ticket)
783{
784 int cpu;
785
786 add_stats(RELEASED_SLOW, 1);
787 for_each_cpu(cpu, &waiting_cpus) {
788 const struct kvm_lock_waiting *w = &per_cpu(klock_waiting, cpu);
789 if (ACCESS_ONCE(w->lock) == lock &&
790 ACCESS_ONCE(w->want) == ticket) {
791 add_stats(RELEASED_SLOW_KICKED, 1);
792 kvm_kick_cpu(cpu);
793 break;
794 }
795 }
796}
797
798/*
799 * Setup pv_lock_ops to exploit KVM_FEATURE_PV_UNHALT if present.
800 */
801void __init kvm_spinlock_init(void)
802{
803 if (!kvm_para_available())
804 return;
805 /* Does host kernel support KVM_FEATURE_PV_UNHALT? */
806 if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT))
807 return;
808
809 pv_lock_ops.lock_spinning = PV_CALLEE_SAVE(kvm_lock_spinning);
810 pv_lock_ops.unlock_kick = kvm_unlock_kick;
811}
812
813static __init int kvm_spinlock_init_jump(void)
814{
815 if (!kvm_para_available())
816 return 0;
817 if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT))
818 return 0;
819
820 static_key_slow_inc(¶virt_ticketlocks_enabled);
821 printk(KERN_INFO "KVM setup paravirtual spinlock\n");
822
823 return 0;
824}
825early_initcall(kvm_spinlock_init_jump);
826
827#endif /* CONFIG_PARAVIRT_SPINLOCKS */