Linux Audio

Check our new training course

Loading...
v4.6
  1/*
  2 * KVM paravirt_ops implementation
  3 *
  4 * This program is free software; you can redistribute it and/or modify
  5 * it under the terms of the GNU General Public License as published by
  6 * the Free Software Foundation; either version 2 of the License, or
  7 * (at your option) any later version.
  8 *
  9 * This program is distributed in the hope that it will be useful,
 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 12 * GNU General Public License for more details.
 13 *
 14 * You should have received a copy of the GNU General Public License
 15 * along with this program; if not, write to the Free Software
 16 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
 17 *
 18 * Copyright (C) 2007, Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
 19 * Copyright IBM Corporation, 2007
 20 *   Authors: Anthony Liguori <aliguori@us.ibm.com>
 21 */
 22
 23#include <linux/context_tracking.h>
 24#include <linux/module.h>
 25#include <linux/kernel.h>
 26#include <linux/kvm_para.h>
 27#include <linux/cpu.h>
 28#include <linux/mm.h>
 29#include <linux/highmem.h>
 30#include <linux/hardirq.h>
 31#include <linux/notifier.h>
 32#include <linux/reboot.h>
 33#include <linux/hash.h>
 34#include <linux/sched.h>
 35#include <linux/slab.h>
 36#include <linux/kprobes.h>
 37#include <linux/debugfs.h>
 38#include <linux/nmi.h>
 39#include <linux/swait.h>
 40#include <asm/timer.h>
 41#include <asm/cpu.h>
 42#include <asm/traps.h>
 43#include <asm/desc.h>
 44#include <asm/tlbflush.h>
 45#include <asm/idle.h>
 46#include <asm/apic.h>
 47#include <asm/apicdef.h>
 48#include <asm/hypervisor.h>
 49#include <asm/kvm_guest.h>
 50
 51static int kvmapf = 1;
 52
 53static int parse_no_kvmapf(char *arg)
 54{
 55        kvmapf = 0;
 56        return 0;
 57}
 58
 59early_param("no-kvmapf", parse_no_kvmapf);
 60
 61static int steal_acc = 1;
 62static int parse_no_stealacc(char *arg)
 63{
 64        steal_acc = 0;
 65        return 0;
 66}
 67
 68early_param("no-steal-acc", parse_no_stealacc);
 69
 70static int kvmclock_vsyscall = 1;
 71static int parse_no_kvmclock_vsyscall(char *arg)
 72{
 73        kvmclock_vsyscall = 0;
 74        return 0;
 75}
 76
 77early_param("no-kvmclock-vsyscall", parse_no_kvmclock_vsyscall);
 78
 79static DEFINE_PER_CPU(struct kvm_vcpu_pv_apf_data, apf_reason) __aligned(64);
 80static DEFINE_PER_CPU(struct kvm_steal_time, steal_time) __aligned(64);
 81static int has_steal_clock = 0;
 82
 83/*
 84 * No need for any "IO delay" on KVM
 85 */
 86static void kvm_io_delay(void)
 87{
 88}
 89
 90#define KVM_TASK_SLEEP_HASHBITS 8
 91#define KVM_TASK_SLEEP_HASHSIZE (1<<KVM_TASK_SLEEP_HASHBITS)
 92
 93struct kvm_task_sleep_node {
 94	struct hlist_node link;
 95	struct swait_queue_head wq;
 96	u32 token;
 97	int cpu;
 98	bool halted;
 99};
100
101static struct kvm_task_sleep_head {
102	raw_spinlock_t lock;
103	struct hlist_head list;
104} async_pf_sleepers[KVM_TASK_SLEEP_HASHSIZE];
105
106static struct kvm_task_sleep_node *_find_apf_task(struct kvm_task_sleep_head *b,
107						  u32 token)
108{
109	struct hlist_node *p;
110
111	hlist_for_each(p, &b->list) {
112		struct kvm_task_sleep_node *n =
113			hlist_entry(p, typeof(*n), link);
114		if (n->token == token)
115			return n;
116	}
117
118	return NULL;
119}
120
121void kvm_async_pf_task_wait(u32 token)
122{
123	u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
124	struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
125	struct kvm_task_sleep_node n, *e;
126	DECLARE_SWAITQUEUE(wait);
127
128	rcu_irq_enter();
129
130	raw_spin_lock(&b->lock);
131	e = _find_apf_task(b, token);
132	if (e) {
133		/* dummy entry exist -> wake up was delivered ahead of PF */
134		hlist_del(&e->link);
135		kfree(e);
136		raw_spin_unlock(&b->lock);
137
138		rcu_irq_exit();
139		return;
140	}
141
142	n.token = token;
143	n.cpu = smp_processor_id();
144	n.halted = is_idle_task(current) || preempt_count() > 1;
145	init_swait_queue_head(&n.wq);
146	hlist_add_head(&n.link, &b->list);
147	raw_spin_unlock(&b->lock);
148
149	for (;;) {
150		if (!n.halted)
151			prepare_to_swait(&n.wq, &wait, TASK_UNINTERRUPTIBLE);
152		if (hlist_unhashed(&n.link))
153			break;
154
155		if (!n.halted) {
156			local_irq_enable();
157			schedule();
158			local_irq_disable();
159		} else {
160			/*
161			 * We cannot reschedule. So halt.
162			 */
163			rcu_irq_exit();
164			native_safe_halt();
165			rcu_irq_enter();
166			local_irq_disable();
167		}
168	}
169	if (!n.halted)
170		finish_swait(&n.wq, &wait);
171
172	rcu_irq_exit();
173	return;
174}
175EXPORT_SYMBOL_GPL(kvm_async_pf_task_wait);
176
177static void apf_task_wake_one(struct kvm_task_sleep_node *n)
178{
179	hlist_del_init(&n->link);
180	if (n->halted)
181		smp_send_reschedule(n->cpu);
182	else if (swait_active(&n->wq))
183		swake_up(&n->wq);
184}
185
186static void apf_task_wake_all(void)
187{
188	int i;
189
190	for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++) {
191		struct hlist_node *p, *next;
192		struct kvm_task_sleep_head *b = &async_pf_sleepers[i];
193		raw_spin_lock(&b->lock);
194		hlist_for_each_safe(p, next, &b->list) {
195			struct kvm_task_sleep_node *n =
196				hlist_entry(p, typeof(*n), link);
197			if (n->cpu == smp_processor_id())
198				apf_task_wake_one(n);
199		}
200		raw_spin_unlock(&b->lock);
201	}
202}
203
204void kvm_async_pf_task_wake(u32 token)
205{
206	u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
207	struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
208	struct kvm_task_sleep_node *n;
209
210	if (token == ~0) {
211		apf_task_wake_all();
212		return;
213	}
214
215again:
216	raw_spin_lock(&b->lock);
217	n = _find_apf_task(b, token);
218	if (!n) {
219		/*
220		 * async PF was not yet handled.
221		 * Add dummy entry for the token.
222		 */
223		n = kzalloc(sizeof(*n), GFP_ATOMIC);
224		if (!n) {
225			/*
226			 * Allocation failed! Busy wait while other cpu
227			 * handles async PF.
228			 */
229			raw_spin_unlock(&b->lock);
230			cpu_relax();
231			goto again;
232		}
233		n->token = token;
234		n->cpu = smp_processor_id();
235		init_swait_queue_head(&n->wq);
236		hlist_add_head(&n->link, &b->list);
237	} else
238		apf_task_wake_one(n);
239	raw_spin_unlock(&b->lock);
240	return;
241}
242EXPORT_SYMBOL_GPL(kvm_async_pf_task_wake);
243
244u32 kvm_read_and_reset_pf_reason(void)
245{
246	u32 reason = 0;
247
248	if (__this_cpu_read(apf_reason.enabled)) {
249		reason = __this_cpu_read(apf_reason.reason);
250		__this_cpu_write(apf_reason.reason, 0);
251	}
252
253	return reason;
254}
255EXPORT_SYMBOL_GPL(kvm_read_and_reset_pf_reason);
256NOKPROBE_SYMBOL(kvm_read_and_reset_pf_reason);
257
258dotraplinkage void
259do_async_page_fault(struct pt_regs *regs, unsigned long error_code)
260{
261	enum ctx_state prev_state;
262
263	switch (kvm_read_and_reset_pf_reason()) {
264	default:
265		trace_do_page_fault(regs, error_code);
266		break;
267	case KVM_PV_REASON_PAGE_NOT_PRESENT:
268		/* page is swapped out by the host. */
269		prev_state = exception_enter();
270		exit_idle();
271		kvm_async_pf_task_wait((u32)read_cr2());
272		exception_exit(prev_state);
273		break;
274	case KVM_PV_REASON_PAGE_READY:
275		rcu_irq_enter();
276		exit_idle();
277		kvm_async_pf_task_wake((u32)read_cr2());
278		rcu_irq_exit();
279		break;
280	}
281}
282NOKPROBE_SYMBOL(do_async_page_fault);
283
284static void __init paravirt_ops_setup(void)
285{
286	pv_info.name = "KVM";
287
288	/*
289	 * KVM isn't paravirt in the sense of paravirt_enabled.  A KVM
290	 * guest kernel works like a bare metal kernel with additional
291	 * features, and paravirt_enabled is about features that are
292	 * missing.
293	 */
294	pv_info.paravirt_enabled = 0;
295
296	if (kvm_para_has_feature(KVM_FEATURE_NOP_IO_DELAY))
297		pv_cpu_ops.io_delay = kvm_io_delay;
298
299#ifdef CONFIG_X86_IO_APIC
300	no_timer_check = 1;
301#endif
302}
303
304static void kvm_register_steal_time(void)
305{
306	int cpu = smp_processor_id();
307	struct kvm_steal_time *st = &per_cpu(steal_time, cpu);
308
309	if (!has_steal_clock)
310		return;
311
312	memset(st, 0, sizeof(*st));
313
314	wrmsrl(MSR_KVM_STEAL_TIME, (slow_virt_to_phys(st) | KVM_MSR_ENABLED));
315	pr_info("kvm-stealtime: cpu %d, msr %llx\n",
316		cpu, (unsigned long long) slow_virt_to_phys(st));
317}
318
319static DEFINE_PER_CPU(unsigned long, kvm_apic_eoi) = KVM_PV_EOI_DISABLED;
320
321static void kvm_guest_apic_eoi_write(u32 reg, u32 val)
322{
323	/**
324	 * This relies on __test_and_clear_bit to modify the memory
325	 * in a way that is atomic with respect to the local CPU.
326	 * The hypervisor only accesses this memory from the local CPU so
327	 * there's no need for lock or memory barriers.
328	 * An optimization barrier is implied in apic write.
329	 */
330	if (__test_and_clear_bit(KVM_PV_EOI_BIT, this_cpu_ptr(&kvm_apic_eoi)))
331		return;
332	apic_write(APIC_EOI, APIC_EOI_ACK);
333}
334
335static void kvm_guest_cpu_init(void)
336{
337	if (!kvm_para_available())
338		return;
339
340	if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF) && kvmapf) {
341		u64 pa = slow_virt_to_phys(this_cpu_ptr(&apf_reason));
342
343#ifdef CONFIG_PREEMPT
344		pa |= KVM_ASYNC_PF_SEND_ALWAYS;
345#endif
346		wrmsrl(MSR_KVM_ASYNC_PF_EN, pa | KVM_ASYNC_PF_ENABLED);
347		__this_cpu_write(apf_reason.enabled, 1);
348		printk(KERN_INFO"KVM setup async PF for cpu %d\n",
349		       smp_processor_id());
350	}
351
352	if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) {
353		unsigned long pa;
354		/* Size alignment is implied but just to make it explicit. */
355		BUILD_BUG_ON(__alignof__(kvm_apic_eoi) < 4);
356		__this_cpu_write(kvm_apic_eoi, 0);
357		pa = slow_virt_to_phys(this_cpu_ptr(&kvm_apic_eoi))
358			| KVM_MSR_ENABLED;
359		wrmsrl(MSR_KVM_PV_EOI_EN, pa);
360	}
361
362	if (has_steal_clock)
363		kvm_register_steal_time();
364}
365
366static void kvm_pv_disable_apf(void)
367{
368	if (!__this_cpu_read(apf_reason.enabled))
369		return;
370
371	wrmsrl(MSR_KVM_ASYNC_PF_EN, 0);
372	__this_cpu_write(apf_reason.enabled, 0);
373
374	printk(KERN_INFO"Unregister pv shared memory for cpu %d\n",
375	       smp_processor_id());
376}
377
378static void kvm_pv_guest_cpu_reboot(void *unused)
379{
380	/*
381	 * We disable PV EOI before we load a new kernel by kexec,
382	 * since MSR_KVM_PV_EOI_EN stores a pointer into old kernel's memory.
383	 * New kernel can re-enable when it boots.
384	 */
385	if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
386		wrmsrl(MSR_KVM_PV_EOI_EN, 0);
387	kvm_pv_disable_apf();
388	kvm_disable_steal_time();
389}
390
391static int kvm_pv_reboot_notify(struct notifier_block *nb,
392				unsigned long code, void *unused)
393{
394	if (code == SYS_RESTART)
395		on_each_cpu(kvm_pv_guest_cpu_reboot, NULL, 1);
396	return NOTIFY_DONE;
397}
398
399static struct notifier_block kvm_pv_reboot_nb = {
400	.notifier_call = kvm_pv_reboot_notify,
401};
402
403static u64 kvm_steal_clock(int cpu)
404{
405	u64 steal;
406	struct kvm_steal_time *src;
407	int version;
408
409	src = &per_cpu(steal_time, cpu);
410	do {
411		version = src->version;
412		rmb();
413		steal = src->steal;
414		rmb();
415	} while ((version & 1) || (version != src->version));
416
417	return steal;
418}
419
420void kvm_disable_steal_time(void)
421{
422	if (!has_steal_clock)
423		return;
424
425	wrmsr(MSR_KVM_STEAL_TIME, 0, 0);
426}
427
428#ifdef CONFIG_SMP
429static void __init kvm_smp_prepare_boot_cpu(void)
430{
431	kvm_guest_cpu_init();
432	native_smp_prepare_boot_cpu();
433	kvm_spinlock_init();
434}
435
436static void kvm_guest_cpu_online(void *dummy)
437{
438	kvm_guest_cpu_init();
439}
440
441static void kvm_guest_cpu_offline(void *dummy)
442{
443	kvm_disable_steal_time();
444	if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
445		wrmsrl(MSR_KVM_PV_EOI_EN, 0);
446	kvm_pv_disable_apf();
447	apf_task_wake_all();
448}
449
450static int kvm_cpu_notify(struct notifier_block *self, unsigned long action,
451			  void *hcpu)
452{
453	int cpu = (unsigned long)hcpu;
454	switch (action) {
455	case CPU_ONLINE:
456	case CPU_DOWN_FAILED:
457	case CPU_ONLINE_FROZEN:
458		smp_call_function_single(cpu, kvm_guest_cpu_online, NULL, 0);
459		break;
460	case CPU_DOWN_PREPARE:
461	case CPU_DOWN_PREPARE_FROZEN:
462		smp_call_function_single(cpu, kvm_guest_cpu_offline, NULL, 1);
463		break;
464	default:
465		break;
466	}
467	return NOTIFY_OK;
468}
469
470static struct notifier_block kvm_cpu_notifier = {
471        .notifier_call  = kvm_cpu_notify,
472};
473#endif
474
475static void __init kvm_apf_trap_init(void)
476{
477	set_intr_gate(14, async_page_fault);
478}
479
480void __init kvm_guest_init(void)
481{
482	int i;
483
484	if (!kvm_para_available())
485		return;
486
487	paravirt_ops_setup();
488	register_reboot_notifier(&kvm_pv_reboot_nb);
489	for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++)
490		raw_spin_lock_init(&async_pf_sleepers[i].lock);
491	if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF))
492		x86_init.irqs.trap_init = kvm_apf_trap_init;
493
494	if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
495		has_steal_clock = 1;
496		pv_time_ops.steal_clock = kvm_steal_clock;
497	}
498
499	if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
500		apic_set_eoi_write(kvm_guest_apic_eoi_write);
501
502	if (kvmclock_vsyscall)
503		kvm_setup_vsyscall_timeinfo();
504
505#ifdef CONFIG_SMP
506	smp_ops.smp_prepare_boot_cpu = kvm_smp_prepare_boot_cpu;
507	register_cpu_notifier(&kvm_cpu_notifier);
508#else
509	kvm_guest_cpu_init();
510#endif
511
512	/*
513	 * Hard lockup detection is enabled by default. Disable it, as guests
514	 * can get false positives too easily, for example if the host is
515	 * overcommitted.
516	 */
517	hardlockup_detector_disable();
518}
519
520static noinline uint32_t __kvm_cpuid_base(void)
521{
522	if (boot_cpu_data.cpuid_level < 0)
523		return 0;	/* So we don't blow up on old processors */
524
525	if (cpu_has_hypervisor)
526		return hypervisor_cpuid_base("KVMKVMKVM\0\0\0", 0);
527
528	return 0;
529}
530
531static inline uint32_t kvm_cpuid_base(void)
532{
533	static int kvm_cpuid_base = -1;
534
535	if (kvm_cpuid_base == -1)
536		kvm_cpuid_base = __kvm_cpuid_base();
537
538	return kvm_cpuid_base;
539}
540
541bool kvm_para_available(void)
542{
543	return kvm_cpuid_base() != 0;
544}
545EXPORT_SYMBOL_GPL(kvm_para_available);
546
547unsigned int kvm_arch_para_features(void)
548{
549	return cpuid_eax(kvm_cpuid_base() | KVM_CPUID_FEATURES);
550}
551
552static uint32_t __init kvm_detect(void)
553{
554	return kvm_cpuid_base();
555}
556
557const struct hypervisor_x86 x86_hyper_kvm __refconst = {
558	.name			= "KVM",
559	.detect			= kvm_detect,
560	.x2apic_available	= kvm_para_available,
561};
562EXPORT_SYMBOL_GPL(x86_hyper_kvm);
563
564static __init int activate_jump_labels(void)
565{
566	if (has_steal_clock) {
567		static_key_slow_inc(&paravirt_steal_enabled);
568		if (steal_acc)
569			static_key_slow_inc(&paravirt_steal_rq_enabled);
570	}
571
572	return 0;
573}
574arch_initcall(activate_jump_labels);
575
576#ifdef CONFIG_PARAVIRT_SPINLOCKS
577
578/* Kick a cpu by its apicid. Used to wake up a halted vcpu */
579static void kvm_kick_cpu(int cpu)
580{
581	int apicid;
582	unsigned long flags = 0;
583
584	apicid = per_cpu(x86_cpu_to_apicid, cpu);
585	kvm_hypercall2(KVM_HC_KICK_CPU, flags, apicid);
586}
587
588
589#ifdef CONFIG_QUEUED_SPINLOCKS
590
591#include <asm/qspinlock.h>
592
593static void kvm_wait(u8 *ptr, u8 val)
594{
595	unsigned long flags;
596
597	if (in_nmi())
598		return;
599
600	local_irq_save(flags);
601
602	if (READ_ONCE(*ptr) != val)
603		goto out;
604
605	/*
606	 * halt until it's our turn and kicked. Note that we do safe halt
607	 * for irq enabled case to avoid hang when lock info is overwritten
608	 * in irq spinlock slowpath and no spurious interrupt occur to save us.
609	 */
610	if (arch_irqs_disabled_flags(flags))
611		halt();
612	else
613		safe_halt();
614
615out:
616	local_irq_restore(flags);
617}
618
619#else /* !CONFIG_QUEUED_SPINLOCKS */
620
621enum kvm_contention_stat {
622	TAKEN_SLOW,
623	TAKEN_SLOW_PICKUP,
624	RELEASED_SLOW,
625	RELEASED_SLOW_KICKED,
626	NR_CONTENTION_STATS
627};
628
629#ifdef CONFIG_KVM_DEBUG_FS
630#define HISTO_BUCKETS	30
631
632static struct kvm_spinlock_stats
633{
634	u32 contention_stats[NR_CONTENTION_STATS];
635	u32 histo_spin_blocked[HISTO_BUCKETS+1];
636	u64 time_blocked;
637} spinlock_stats;
638
639static u8 zero_stats;
640
641static inline void check_zero(void)
642{
643	u8 ret;
644	u8 old;
645
646	old = READ_ONCE(zero_stats);
647	if (unlikely(old)) {
648		ret = cmpxchg(&zero_stats, old, 0);
649		/* This ensures only one fellow resets the stat */
650		if (ret == old)
651			memset(&spinlock_stats, 0, sizeof(spinlock_stats));
652	}
653}
654
655static inline void add_stats(enum kvm_contention_stat var, u32 val)
656{
657	check_zero();
658	spinlock_stats.contention_stats[var] += val;
659}
660
661
662static inline u64 spin_time_start(void)
663{
664	return sched_clock();
665}
666
667static void __spin_time_accum(u64 delta, u32 *array)
668{
669	unsigned index;
670
671	index = ilog2(delta);
672	check_zero();
673
674	if (index < HISTO_BUCKETS)
675		array[index]++;
676	else
677		array[HISTO_BUCKETS]++;
678}
679
680static inline void spin_time_accum_blocked(u64 start)
681{
682	u32 delta;
683
684	delta = sched_clock() - start;
685	__spin_time_accum(delta, spinlock_stats.histo_spin_blocked);
686	spinlock_stats.time_blocked += delta;
687}
688
689static struct dentry *d_spin_debug;
690static struct dentry *d_kvm_debug;
691
692static struct dentry *kvm_init_debugfs(void)
693{
694	d_kvm_debug = debugfs_create_dir("kvm-guest", NULL);
695	if (!d_kvm_debug)
696		printk(KERN_WARNING "Could not create 'kvm' debugfs directory\n");
697
698	return d_kvm_debug;
699}
700
701static int __init kvm_spinlock_debugfs(void)
702{
703	struct dentry *d_kvm;
704
705	d_kvm = kvm_init_debugfs();
706	if (d_kvm == NULL)
707		return -ENOMEM;
708
709	d_spin_debug = debugfs_create_dir("spinlocks", d_kvm);
710
711	debugfs_create_u8("zero_stats", 0644, d_spin_debug, &zero_stats);
712
713	debugfs_create_u32("taken_slow", 0444, d_spin_debug,
714		   &spinlock_stats.contention_stats[TAKEN_SLOW]);
715	debugfs_create_u32("taken_slow_pickup", 0444, d_spin_debug,
716		   &spinlock_stats.contention_stats[TAKEN_SLOW_PICKUP]);
717
718	debugfs_create_u32("released_slow", 0444, d_spin_debug,
719		   &spinlock_stats.contention_stats[RELEASED_SLOW]);
720	debugfs_create_u32("released_slow_kicked", 0444, d_spin_debug,
721		   &spinlock_stats.contention_stats[RELEASED_SLOW_KICKED]);
722
723	debugfs_create_u64("time_blocked", 0444, d_spin_debug,
724			   &spinlock_stats.time_blocked);
725
726	debugfs_create_u32_array("histo_blocked", 0444, d_spin_debug,
727		     spinlock_stats.histo_spin_blocked, HISTO_BUCKETS + 1);
728
729	return 0;
730}
731fs_initcall(kvm_spinlock_debugfs);
732#else  /* !CONFIG_KVM_DEBUG_FS */
733static inline void add_stats(enum kvm_contention_stat var, u32 val)
734{
735}
736
737static inline u64 spin_time_start(void)
738{
739	return 0;
740}
741
742static inline void spin_time_accum_blocked(u64 start)
743{
744}
745#endif  /* CONFIG_KVM_DEBUG_FS */
746
747struct kvm_lock_waiting {
748	struct arch_spinlock *lock;
749	__ticket_t want;
750};
751
752/* cpus 'waiting' on a spinlock to become available */
753static cpumask_t waiting_cpus;
754
755/* Track spinlock on which a cpu is waiting */
756static DEFINE_PER_CPU(struct kvm_lock_waiting, klock_waiting);
757
758__visible void kvm_lock_spinning(struct arch_spinlock *lock, __ticket_t want)
759{
760	struct kvm_lock_waiting *w;
761	int cpu;
762	u64 start;
763	unsigned long flags;
764	__ticket_t head;
765
766	if (in_nmi())
767		return;
768
769	w = this_cpu_ptr(&klock_waiting);
770	cpu = smp_processor_id();
771	start = spin_time_start();
772
773	/*
774	 * Make sure an interrupt handler can't upset things in a
775	 * partially setup state.
776	 */
777	local_irq_save(flags);
778
779	/*
780	 * The ordering protocol on this is that the "lock" pointer
781	 * may only be set non-NULL if the "want" ticket is correct.
782	 * If we're updating "want", we must first clear "lock".
783	 */
784	w->lock = NULL;
785	smp_wmb();
786	w->want = want;
787	smp_wmb();
788	w->lock = lock;
789
790	add_stats(TAKEN_SLOW, 1);
791
792	/*
793	 * This uses set_bit, which is atomic but we should not rely on its
794	 * reordering gurantees. So barrier is needed after this call.
795	 */
796	cpumask_set_cpu(cpu, &waiting_cpus);
797
798	barrier();
799
800	/*
801	 * Mark entry to slowpath before doing the pickup test to make
802	 * sure we don't deadlock with an unlocker.
803	 */
804	__ticket_enter_slowpath(lock);
805
806	/* make sure enter_slowpath, which is atomic does not cross the read */
807	smp_mb__after_atomic();
808
809	/*
810	 * check again make sure it didn't become free while
811	 * we weren't looking.
812	 */
813	head = READ_ONCE(lock->tickets.head);
814	if (__tickets_equal(head, want)) {
815		add_stats(TAKEN_SLOW_PICKUP, 1);
816		goto out;
817	}
818
819	/*
820	 * halt until it's our turn and kicked. Note that we do safe halt
821	 * for irq enabled case to avoid hang when lock info is overwritten
822	 * in irq spinlock slowpath and no spurious interrupt occur to save us.
823	 */
824	if (arch_irqs_disabled_flags(flags))
825		halt();
826	else
827		safe_halt();
828
829out:
830	cpumask_clear_cpu(cpu, &waiting_cpus);
831	w->lock = NULL;
832	local_irq_restore(flags);
833	spin_time_accum_blocked(start);
834}
835PV_CALLEE_SAVE_REGS_THUNK(kvm_lock_spinning);
836
837/* Kick vcpu waiting on @lock->head to reach value @ticket */
838static void kvm_unlock_kick(struct arch_spinlock *lock, __ticket_t ticket)
839{
840	int cpu;
841
842	add_stats(RELEASED_SLOW, 1);
843	for_each_cpu(cpu, &waiting_cpus) {
844		const struct kvm_lock_waiting *w = &per_cpu(klock_waiting, cpu);
845		if (READ_ONCE(w->lock) == lock &&
846		    READ_ONCE(w->want) == ticket) {
847			add_stats(RELEASED_SLOW_KICKED, 1);
848			kvm_kick_cpu(cpu);
849			break;
850		}
851	}
852}
853
854#endif /* !CONFIG_QUEUED_SPINLOCKS */
855
856/*
857 * Setup pv_lock_ops to exploit KVM_FEATURE_PV_UNHALT if present.
858 */
859void __init kvm_spinlock_init(void)
860{
861	if (!kvm_para_available())
862		return;
863	/* Does host kernel support KVM_FEATURE_PV_UNHALT? */
864	if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT))
865		return;
866
867#ifdef CONFIG_QUEUED_SPINLOCKS
868	__pv_init_lock_hash();
869	pv_lock_ops.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath;
870	pv_lock_ops.queued_spin_unlock = PV_CALLEE_SAVE(__pv_queued_spin_unlock);
871	pv_lock_ops.wait = kvm_wait;
872	pv_lock_ops.kick = kvm_kick_cpu;
873#else /* !CONFIG_QUEUED_SPINLOCKS */
874	pv_lock_ops.lock_spinning = PV_CALLEE_SAVE(kvm_lock_spinning);
875	pv_lock_ops.unlock_kick = kvm_unlock_kick;
876#endif
877}
878
879static __init int kvm_spinlock_init_jump(void)
880{
881	if (!kvm_para_available())
882		return 0;
883	if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT))
884		return 0;
885
886	static_key_slow_inc(&paravirt_ticketlocks_enabled);
887	printk(KERN_INFO "KVM setup paravirtual spinlock\n");
888
889	return 0;
890}
891early_initcall(kvm_spinlock_init_jump);
892
893#endif	/* CONFIG_PARAVIRT_SPINLOCKS */
v3.15
  1/*
  2 * KVM paravirt_ops implementation
  3 *
  4 * This program is free software; you can redistribute it and/or modify
  5 * it under the terms of the GNU General Public License as published by
  6 * the Free Software Foundation; either version 2 of the License, or
  7 * (at your option) any later version.
  8 *
  9 * This program is distributed in the hope that it will be useful,
 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 12 * GNU General Public License for more details.
 13 *
 14 * You should have received a copy of the GNU General Public License
 15 * along with this program; if not, write to the Free Software
 16 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
 17 *
 18 * Copyright (C) 2007, Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
 19 * Copyright IBM Corporation, 2007
 20 *   Authors: Anthony Liguori <aliguori@us.ibm.com>
 21 */
 22
 23#include <linux/context_tracking.h>
 24#include <linux/module.h>
 25#include <linux/kernel.h>
 26#include <linux/kvm_para.h>
 27#include <linux/cpu.h>
 28#include <linux/mm.h>
 29#include <linux/highmem.h>
 30#include <linux/hardirq.h>
 31#include <linux/notifier.h>
 32#include <linux/reboot.h>
 33#include <linux/hash.h>
 34#include <linux/sched.h>
 35#include <linux/slab.h>
 36#include <linux/kprobes.h>
 37#include <linux/debugfs.h>
 
 
 38#include <asm/timer.h>
 39#include <asm/cpu.h>
 40#include <asm/traps.h>
 41#include <asm/desc.h>
 42#include <asm/tlbflush.h>
 43#include <asm/idle.h>
 44#include <asm/apic.h>
 45#include <asm/apicdef.h>
 46#include <asm/hypervisor.h>
 47#include <asm/kvm_guest.h>
 48
 49static int kvmapf = 1;
 50
 51static int parse_no_kvmapf(char *arg)
 52{
 53        kvmapf = 0;
 54        return 0;
 55}
 56
 57early_param("no-kvmapf", parse_no_kvmapf);
 58
 59static int steal_acc = 1;
 60static int parse_no_stealacc(char *arg)
 61{
 62        steal_acc = 0;
 63        return 0;
 64}
 65
 66early_param("no-steal-acc", parse_no_stealacc);
 67
 68static int kvmclock_vsyscall = 1;
 69static int parse_no_kvmclock_vsyscall(char *arg)
 70{
 71        kvmclock_vsyscall = 0;
 72        return 0;
 73}
 74
 75early_param("no-kvmclock-vsyscall", parse_no_kvmclock_vsyscall);
 76
 77static DEFINE_PER_CPU(struct kvm_vcpu_pv_apf_data, apf_reason) __aligned(64);
 78static DEFINE_PER_CPU(struct kvm_steal_time, steal_time) __aligned(64);
 79static int has_steal_clock = 0;
 80
 81/*
 82 * No need for any "IO delay" on KVM
 83 */
 84static void kvm_io_delay(void)
 85{
 86}
 87
 88#define KVM_TASK_SLEEP_HASHBITS 8
 89#define KVM_TASK_SLEEP_HASHSIZE (1<<KVM_TASK_SLEEP_HASHBITS)
 90
 91struct kvm_task_sleep_node {
 92	struct hlist_node link;
 93	wait_queue_head_t wq;
 94	u32 token;
 95	int cpu;
 96	bool halted;
 97};
 98
 99static struct kvm_task_sleep_head {
100	spinlock_t lock;
101	struct hlist_head list;
102} async_pf_sleepers[KVM_TASK_SLEEP_HASHSIZE];
103
104static struct kvm_task_sleep_node *_find_apf_task(struct kvm_task_sleep_head *b,
105						  u32 token)
106{
107	struct hlist_node *p;
108
109	hlist_for_each(p, &b->list) {
110		struct kvm_task_sleep_node *n =
111			hlist_entry(p, typeof(*n), link);
112		if (n->token == token)
113			return n;
114	}
115
116	return NULL;
117}
118
119void kvm_async_pf_task_wait(u32 token)
120{
121	u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
122	struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
123	struct kvm_task_sleep_node n, *e;
124	DEFINE_WAIT(wait);
125
126	rcu_irq_enter();
127
128	spin_lock(&b->lock);
129	e = _find_apf_task(b, token);
130	if (e) {
131		/* dummy entry exist -> wake up was delivered ahead of PF */
132		hlist_del(&e->link);
133		kfree(e);
134		spin_unlock(&b->lock);
135
136		rcu_irq_exit();
137		return;
138	}
139
140	n.token = token;
141	n.cpu = smp_processor_id();
142	n.halted = is_idle_task(current) || preempt_count() > 1;
143	init_waitqueue_head(&n.wq);
144	hlist_add_head(&n.link, &b->list);
145	spin_unlock(&b->lock);
146
147	for (;;) {
148		if (!n.halted)
149			prepare_to_wait(&n.wq, &wait, TASK_UNINTERRUPTIBLE);
150		if (hlist_unhashed(&n.link))
151			break;
152
153		if (!n.halted) {
154			local_irq_enable();
155			schedule();
156			local_irq_disable();
157		} else {
158			/*
159			 * We cannot reschedule. So halt.
160			 */
161			rcu_irq_exit();
162			native_safe_halt();
163			rcu_irq_enter();
164			local_irq_disable();
165		}
166	}
167	if (!n.halted)
168		finish_wait(&n.wq, &wait);
169
170	rcu_irq_exit();
171	return;
172}
173EXPORT_SYMBOL_GPL(kvm_async_pf_task_wait);
174
175static void apf_task_wake_one(struct kvm_task_sleep_node *n)
176{
177	hlist_del_init(&n->link);
178	if (n->halted)
179		smp_send_reschedule(n->cpu);
180	else if (waitqueue_active(&n->wq))
181		wake_up(&n->wq);
182}
183
184static void apf_task_wake_all(void)
185{
186	int i;
187
188	for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++) {
189		struct hlist_node *p, *next;
190		struct kvm_task_sleep_head *b = &async_pf_sleepers[i];
191		spin_lock(&b->lock);
192		hlist_for_each_safe(p, next, &b->list) {
193			struct kvm_task_sleep_node *n =
194				hlist_entry(p, typeof(*n), link);
195			if (n->cpu == smp_processor_id())
196				apf_task_wake_one(n);
197		}
198		spin_unlock(&b->lock);
199	}
200}
201
202void kvm_async_pf_task_wake(u32 token)
203{
204	u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
205	struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
206	struct kvm_task_sleep_node *n;
207
208	if (token == ~0) {
209		apf_task_wake_all();
210		return;
211	}
212
213again:
214	spin_lock(&b->lock);
215	n = _find_apf_task(b, token);
216	if (!n) {
217		/*
218		 * async PF was not yet handled.
219		 * Add dummy entry for the token.
220		 */
221		n = kzalloc(sizeof(*n), GFP_ATOMIC);
222		if (!n) {
223			/*
224			 * Allocation failed! Busy wait while other cpu
225			 * handles async PF.
226			 */
227			spin_unlock(&b->lock);
228			cpu_relax();
229			goto again;
230		}
231		n->token = token;
232		n->cpu = smp_processor_id();
233		init_waitqueue_head(&n->wq);
234		hlist_add_head(&n->link, &b->list);
235	} else
236		apf_task_wake_one(n);
237	spin_unlock(&b->lock);
238	return;
239}
240EXPORT_SYMBOL_GPL(kvm_async_pf_task_wake);
241
242u32 kvm_read_and_reset_pf_reason(void)
243{
244	u32 reason = 0;
245
246	if (__get_cpu_var(apf_reason).enabled) {
247		reason = __get_cpu_var(apf_reason).reason;
248		__get_cpu_var(apf_reason).reason = 0;
249	}
250
251	return reason;
252}
253EXPORT_SYMBOL_GPL(kvm_read_and_reset_pf_reason);
 
254
255dotraplinkage void __kprobes
256do_async_page_fault(struct pt_regs *regs, unsigned long error_code)
257{
258	enum ctx_state prev_state;
259
260	switch (kvm_read_and_reset_pf_reason()) {
261	default:
262		do_page_fault(regs, error_code);
263		break;
264	case KVM_PV_REASON_PAGE_NOT_PRESENT:
265		/* page is swapped out by the host. */
266		prev_state = exception_enter();
267		exit_idle();
268		kvm_async_pf_task_wait((u32)read_cr2());
269		exception_exit(prev_state);
270		break;
271	case KVM_PV_REASON_PAGE_READY:
272		rcu_irq_enter();
273		exit_idle();
274		kvm_async_pf_task_wake((u32)read_cr2());
275		rcu_irq_exit();
276		break;
277	}
278}
 
279
280static void __init paravirt_ops_setup(void)
281{
282	pv_info.name = "KVM";
283	pv_info.paravirt_enabled = 1;
 
 
 
 
 
 
 
284
285	if (kvm_para_has_feature(KVM_FEATURE_NOP_IO_DELAY))
286		pv_cpu_ops.io_delay = kvm_io_delay;
287
288#ifdef CONFIG_X86_IO_APIC
289	no_timer_check = 1;
290#endif
291}
292
293static void kvm_register_steal_time(void)
294{
295	int cpu = smp_processor_id();
296	struct kvm_steal_time *st = &per_cpu(steal_time, cpu);
297
298	if (!has_steal_clock)
299		return;
300
301	memset(st, 0, sizeof(*st));
302
303	wrmsrl(MSR_KVM_STEAL_TIME, (slow_virt_to_phys(st) | KVM_MSR_ENABLED));
304	pr_info("kvm-stealtime: cpu %d, msr %llx\n",
305		cpu, (unsigned long long) slow_virt_to_phys(st));
306}
307
308static DEFINE_PER_CPU(unsigned long, kvm_apic_eoi) = KVM_PV_EOI_DISABLED;
309
310static void kvm_guest_apic_eoi_write(u32 reg, u32 val)
311{
312	/**
313	 * This relies on __test_and_clear_bit to modify the memory
314	 * in a way that is atomic with respect to the local CPU.
315	 * The hypervisor only accesses this memory from the local CPU so
316	 * there's no need for lock or memory barriers.
317	 * An optimization barrier is implied in apic write.
318	 */
319	if (__test_and_clear_bit(KVM_PV_EOI_BIT, &__get_cpu_var(kvm_apic_eoi)))
320		return;
321	apic_write(APIC_EOI, APIC_EOI_ACK);
322}
323
324void kvm_guest_cpu_init(void)
325{
326	if (!kvm_para_available())
327		return;
328
329	if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF) && kvmapf) {
330		u64 pa = slow_virt_to_phys(&__get_cpu_var(apf_reason));
331
332#ifdef CONFIG_PREEMPT
333		pa |= KVM_ASYNC_PF_SEND_ALWAYS;
334#endif
335		wrmsrl(MSR_KVM_ASYNC_PF_EN, pa | KVM_ASYNC_PF_ENABLED);
336		__get_cpu_var(apf_reason).enabled = 1;
337		printk(KERN_INFO"KVM setup async PF for cpu %d\n",
338		       smp_processor_id());
339	}
340
341	if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) {
342		unsigned long pa;
343		/* Size alignment is implied but just to make it explicit. */
344		BUILD_BUG_ON(__alignof__(kvm_apic_eoi) < 4);
345		__get_cpu_var(kvm_apic_eoi) = 0;
346		pa = slow_virt_to_phys(&__get_cpu_var(kvm_apic_eoi))
347			| KVM_MSR_ENABLED;
348		wrmsrl(MSR_KVM_PV_EOI_EN, pa);
349	}
350
351	if (has_steal_clock)
352		kvm_register_steal_time();
353}
354
355static void kvm_pv_disable_apf(void)
356{
357	if (!__get_cpu_var(apf_reason).enabled)
358		return;
359
360	wrmsrl(MSR_KVM_ASYNC_PF_EN, 0);
361	__get_cpu_var(apf_reason).enabled = 0;
362
363	printk(KERN_INFO"Unregister pv shared memory for cpu %d\n",
364	       smp_processor_id());
365}
366
367static void kvm_pv_guest_cpu_reboot(void *unused)
368{
369	/*
370	 * We disable PV EOI before we load a new kernel by kexec,
371	 * since MSR_KVM_PV_EOI_EN stores a pointer into old kernel's memory.
372	 * New kernel can re-enable when it boots.
373	 */
374	if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
375		wrmsrl(MSR_KVM_PV_EOI_EN, 0);
376	kvm_pv_disable_apf();
377	kvm_disable_steal_time();
378}
379
380static int kvm_pv_reboot_notify(struct notifier_block *nb,
381				unsigned long code, void *unused)
382{
383	if (code == SYS_RESTART)
384		on_each_cpu(kvm_pv_guest_cpu_reboot, NULL, 1);
385	return NOTIFY_DONE;
386}
387
388static struct notifier_block kvm_pv_reboot_nb = {
389	.notifier_call = kvm_pv_reboot_notify,
390};
391
392static u64 kvm_steal_clock(int cpu)
393{
394	u64 steal;
395	struct kvm_steal_time *src;
396	int version;
397
398	src = &per_cpu(steal_time, cpu);
399	do {
400		version = src->version;
401		rmb();
402		steal = src->steal;
403		rmb();
404	} while ((version & 1) || (version != src->version));
405
406	return steal;
407}
408
409void kvm_disable_steal_time(void)
410{
411	if (!has_steal_clock)
412		return;
413
414	wrmsr(MSR_KVM_STEAL_TIME, 0, 0);
415}
416
417#ifdef CONFIG_SMP
418static void __init kvm_smp_prepare_boot_cpu(void)
419{
420	kvm_guest_cpu_init();
421	native_smp_prepare_boot_cpu();
422	kvm_spinlock_init();
423}
424
425static void kvm_guest_cpu_online(void *dummy)
426{
427	kvm_guest_cpu_init();
428}
429
430static void kvm_guest_cpu_offline(void *dummy)
431{
432	kvm_disable_steal_time();
433	if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
434		wrmsrl(MSR_KVM_PV_EOI_EN, 0);
435	kvm_pv_disable_apf();
436	apf_task_wake_all();
437}
438
439static int kvm_cpu_notify(struct notifier_block *self, unsigned long action,
440			  void *hcpu)
441{
442	int cpu = (unsigned long)hcpu;
443	switch (action) {
444	case CPU_ONLINE:
445	case CPU_DOWN_FAILED:
446	case CPU_ONLINE_FROZEN:
447		smp_call_function_single(cpu, kvm_guest_cpu_online, NULL, 0);
448		break;
449	case CPU_DOWN_PREPARE:
450	case CPU_DOWN_PREPARE_FROZEN:
451		smp_call_function_single(cpu, kvm_guest_cpu_offline, NULL, 1);
452		break;
453	default:
454		break;
455	}
456	return NOTIFY_OK;
457}
458
459static struct notifier_block kvm_cpu_notifier = {
460        .notifier_call  = kvm_cpu_notify,
461};
462#endif
463
464static void __init kvm_apf_trap_init(void)
465{
466	set_intr_gate(14, async_page_fault);
467}
468
469void __init kvm_guest_init(void)
470{
471	int i;
472
473	if (!kvm_para_available())
474		return;
475
476	paravirt_ops_setup();
477	register_reboot_notifier(&kvm_pv_reboot_nb);
478	for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++)
479		spin_lock_init(&async_pf_sleepers[i].lock);
480	if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF))
481		x86_init.irqs.trap_init = kvm_apf_trap_init;
482
483	if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
484		has_steal_clock = 1;
485		pv_time_ops.steal_clock = kvm_steal_clock;
486	}
487
488	if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
489		apic_set_eoi_write(kvm_guest_apic_eoi_write);
490
491	if (kvmclock_vsyscall)
492		kvm_setup_vsyscall_timeinfo();
493
494#ifdef CONFIG_SMP
495	smp_ops.smp_prepare_boot_cpu = kvm_smp_prepare_boot_cpu;
496	register_cpu_notifier(&kvm_cpu_notifier);
497#else
498	kvm_guest_cpu_init();
499#endif
 
 
 
 
 
 
 
500}
501
502static noinline uint32_t __kvm_cpuid_base(void)
503{
504	if (boot_cpu_data.cpuid_level < 0)
505		return 0;	/* So we don't blow up on old processors */
506
507	if (cpu_has_hypervisor)
508		return hypervisor_cpuid_base("KVMKVMKVM\0\0\0", 0);
509
510	return 0;
511}
512
513static inline uint32_t kvm_cpuid_base(void)
514{
515	static int kvm_cpuid_base = -1;
516
517	if (kvm_cpuid_base == -1)
518		kvm_cpuid_base = __kvm_cpuid_base();
519
520	return kvm_cpuid_base;
521}
522
523bool kvm_para_available(void)
524{
525	return kvm_cpuid_base() != 0;
526}
527EXPORT_SYMBOL_GPL(kvm_para_available);
528
529unsigned int kvm_arch_para_features(void)
530{
531	return cpuid_eax(kvm_cpuid_base() | KVM_CPUID_FEATURES);
532}
533
534static uint32_t __init kvm_detect(void)
535{
536	return kvm_cpuid_base();
537}
538
539const struct hypervisor_x86 x86_hyper_kvm __refconst = {
540	.name			= "KVM",
541	.detect			= kvm_detect,
542	.x2apic_available	= kvm_para_available,
543};
544EXPORT_SYMBOL_GPL(x86_hyper_kvm);
545
546static __init int activate_jump_labels(void)
547{
548	if (has_steal_clock) {
549		static_key_slow_inc(&paravirt_steal_enabled);
550		if (steal_acc)
551			static_key_slow_inc(&paravirt_steal_rq_enabled);
552	}
553
554	return 0;
555}
556arch_initcall(activate_jump_labels);
557
558#ifdef CONFIG_PARAVIRT_SPINLOCKS
559
560/* Kick a cpu by its apicid. Used to wake up a halted vcpu */
561static void kvm_kick_cpu(int cpu)
562{
563	int apicid;
564	unsigned long flags = 0;
565
566	apicid = per_cpu(x86_cpu_to_apicid, cpu);
567	kvm_hypercall2(KVM_HC_KICK_CPU, flags, apicid);
568}
569
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
570enum kvm_contention_stat {
571	TAKEN_SLOW,
572	TAKEN_SLOW_PICKUP,
573	RELEASED_SLOW,
574	RELEASED_SLOW_KICKED,
575	NR_CONTENTION_STATS
576};
577
578#ifdef CONFIG_KVM_DEBUG_FS
579#define HISTO_BUCKETS	30
580
581static struct kvm_spinlock_stats
582{
583	u32 contention_stats[NR_CONTENTION_STATS];
584	u32 histo_spin_blocked[HISTO_BUCKETS+1];
585	u64 time_blocked;
586} spinlock_stats;
587
588static u8 zero_stats;
589
590static inline void check_zero(void)
591{
592	u8 ret;
593	u8 old;
594
595	old = ACCESS_ONCE(zero_stats);
596	if (unlikely(old)) {
597		ret = cmpxchg(&zero_stats, old, 0);
598		/* This ensures only one fellow resets the stat */
599		if (ret == old)
600			memset(&spinlock_stats, 0, sizeof(spinlock_stats));
601	}
602}
603
604static inline void add_stats(enum kvm_contention_stat var, u32 val)
605{
606	check_zero();
607	spinlock_stats.contention_stats[var] += val;
608}
609
610
611static inline u64 spin_time_start(void)
612{
613	return sched_clock();
614}
615
616static void __spin_time_accum(u64 delta, u32 *array)
617{
618	unsigned index;
619
620	index = ilog2(delta);
621	check_zero();
622
623	if (index < HISTO_BUCKETS)
624		array[index]++;
625	else
626		array[HISTO_BUCKETS]++;
627}
628
629static inline void spin_time_accum_blocked(u64 start)
630{
631	u32 delta;
632
633	delta = sched_clock() - start;
634	__spin_time_accum(delta, spinlock_stats.histo_spin_blocked);
635	spinlock_stats.time_blocked += delta;
636}
637
638static struct dentry *d_spin_debug;
639static struct dentry *d_kvm_debug;
640
641struct dentry *kvm_init_debugfs(void)
642{
643	d_kvm_debug = debugfs_create_dir("kvm-guest", NULL);
644	if (!d_kvm_debug)
645		printk(KERN_WARNING "Could not create 'kvm' debugfs directory\n");
646
647	return d_kvm_debug;
648}
649
650static int __init kvm_spinlock_debugfs(void)
651{
652	struct dentry *d_kvm;
653
654	d_kvm = kvm_init_debugfs();
655	if (d_kvm == NULL)
656		return -ENOMEM;
657
658	d_spin_debug = debugfs_create_dir("spinlocks", d_kvm);
659
660	debugfs_create_u8("zero_stats", 0644, d_spin_debug, &zero_stats);
661
662	debugfs_create_u32("taken_slow", 0444, d_spin_debug,
663		   &spinlock_stats.contention_stats[TAKEN_SLOW]);
664	debugfs_create_u32("taken_slow_pickup", 0444, d_spin_debug,
665		   &spinlock_stats.contention_stats[TAKEN_SLOW_PICKUP]);
666
667	debugfs_create_u32("released_slow", 0444, d_spin_debug,
668		   &spinlock_stats.contention_stats[RELEASED_SLOW]);
669	debugfs_create_u32("released_slow_kicked", 0444, d_spin_debug,
670		   &spinlock_stats.contention_stats[RELEASED_SLOW_KICKED]);
671
672	debugfs_create_u64("time_blocked", 0444, d_spin_debug,
673			   &spinlock_stats.time_blocked);
674
675	debugfs_create_u32_array("histo_blocked", 0444, d_spin_debug,
676		     spinlock_stats.histo_spin_blocked, HISTO_BUCKETS + 1);
677
678	return 0;
679}
680fs_initcall(kvm_spinlock_debugfs);
681#else  /* !CONFIG_KVM_DEBUG_FS */
682static inline void add_stats(enum kvm_contention_stat var, u32 val)
683{
684}
685
686static inline u64 spin_time_start(void)
687{
688	return 0;
689}
690
691static inline void spin_time_accum_blocked(u64 start)
692{
693}
694#endif  /* CONFIG_KVM_DEBUG_FS */
695
696struct kvm_lock_waiting {
697	struct arch_spinlock *lock;
698	__ticket_t want;
699};
700
701/* cpus 'waiting' on a spinlock to become available */
702static cpumask_t waiting_cpus;
703
704/* Track spinlock on which a cpu is waiting */
705static DEFINE_PER_CPU(struct kvm_lock_waiting, klock_waiting);
706
707__visible void kvm_lock_spinning(struct arch_spinlock *lock, __ticket_t want)
708{
709	struct kvm_lock_waiting *w;
710	int cpu;
711	u64 start;
712	unsigned long flags;
 
713
714	if (in_nmi())
715		return;
716
717	w = &__get_cpu_var(klock_waiting);
718	cpu = smp_processor_id();
719	start = spin_time_start();
720
721	/*
722	 * Make sure an interrupt handler can't upset things in a
723	 * partially setup state.
724	 */
725	local_irq_save(flags);
726
727	/*
728	 * The ordering protocol on this is that the "lock" pointer
729	 * may only be set non-NULL if the "want" ticket is correct.
730	 * If we're updating "want", we must first clear "lock".
731	 */
732	w->lock = NULL;
733	smp_wmb();
734	w->want = want;
735	smp_wmb();
736	w->lock = lock;
737
738	add_stats(TAKEN_SLOW, 1);
739
740	/*
741	 * This uses set_bit, which is atomic but we should not rely on its
742	 * reordering gurantees. So barrier is needed after this call.
743	 */
744	cpumask_set_cpu(cpu, &waiting_cpus);
745
746	barrier();
747
748	/*
749	 * Mark entry to slowpath before doing the pickup test to make
750	 * sure we don't deadlock with an unlocker.
751	 */
752	__ticket_enter_slowpath(lock);
753
 
 
 
754	/*
755	 * check again make sure it didn't become free while
756	 * we weren't looking.
757	 */
758	if (ACCESS_ONCE(lock->tickets.head) == want) {
 
759		add_stats(TAKEN_SLOW_PICKUP, 1);
760		goto out;
761	}
762
763	/*
764	 * halt until it's our turn and kicked. Note that we do safe halt
765	 * for irq enabled case to avoid hang when lock info is overwritten
766	 * in irq spinlock slowpath and no spurious interrupt occur to save us.
767	 */
768	if (arch_irqs_disabled_flags(flags))
769		halt();
770	else
771		safe_halt();
772
773out:
774	cpumask_clear_cpu(cpu, &waiting_cpus);
775	w->lock = NULL;
776	local_irq_restore(flags);
777	spin_time_accum_blocked(start);
778}
779PV_CALLEE_SAVE_REGS_THUNK(kvm_lock_spinning);
780
781/* Kick vcpu waiting on @lock->head to reach value @ticket */
782static void kvm_unlock_kick(struct arch_spinlock *lock, __ticket_t ticket)
783{
784	int cpu;
785
786	add_stats(RELEASED_SLOW, 1);
787	for_each_cpu(cpu, &waiting_cpus) {
788		const struct kvm_lock_waiting *w = &per_cpu(klock_waiting, cpu);
789		if (ACCESS_ONCE(w->lock) == lock &&
790		    ACCESS_ONCE(w->want) == ticket) {
791			add_stats(RELEASED_SLOW_KICKED, 1);
792			kvm_kick_cpu(cpu);
793			break;
794		}
795	}
796}
797
 
 
798/*
799 * Setup pv_lock_ops to exploit KVM_FEATURE_PV_UNHALT if present.
800 */
801void __init kvm_spinlock_init(void)
802{
803	if (!kvm_para_available())
804		return;
805	/* Does host kernel support KVM_FEATURE_PV_UNHALT? */
806	if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT))
807		return;
808
 
 
 
 
 
 
 
809	pv_lock_ops.lock_spinning = PV_CALLEE_SAVE(kvm_lock_spinning);
810	pv_lock_ops.unlock_kick = kvm_unlock_kick;
 
811}
812
813static __init int kvm_spinlock_init_jump(void)
814{
815	if (!kvm_para_available())
816		return 0;
817	if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT))
818		return 0;
819
820	static_key_slow_inc(&paravirt_ticketlocks_enabled);
821	printk(KERN_INFO "KVM setup paravirtual spinlock\n");
822
823	return 0;
824}
825early_initcall(kvm_spinlock_init_jump);
826
827#endif	/* CONFIG_PARAVIRT_SPINLOCKS */