Linux Audio

Check our new training course

Loading...
v5.4
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * KVM paravirt_ops implementation
  4 *
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  5 * Copyright (C) 2007, Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
  6 * Copyright IBM Corporation, 2007
  7 *   Authors: Anthony Liguori <aliguori@us.ibm.com>
  8 */
  9
 10#include <linux/context_tracking.h>
 11#include <linux/init.h>
 12#include <linux/kernel.h>
 13#include <linux/kvm_para.h>
 14#include <linux/cpu.h>
 15#include <linux/mm.h>
 16#include <linux/highmem.h>
 17#include <linux/hardirq.h>
 18#include <linux/notifier.h>
 19#include <linux/reboot.h>
 20#include <linux/hash.h>
 21#include <linux/sched.h>
 22#include <linux/slab.h>
 23#include <linux/kprobes.h>
 24#include <linux/debugfs.h>
 25#include <linux/nmi.h>
 26#include <linux/swait.h>
 27#include <asm/timer.h>
 28#include <asm/cpu.h>
 29#include <asm/traps.h>
 30#include <asm/desc.h>
 31#include <asm/tlbflush.h>
 32#include <asm/apic.h>
 33#include <asm/apicdef.h>
 34#include <asm/hypervisor.h>
 35#include <asm/tlb.h>
 36
 37static int kvmapf = 1;
 38
 39static int __init parse_no_kvmapf(char *arg)
 40{
 41        kvmapf = 0;
 42        return 0;
 43}
 44
 45early_param("no-kvmapf", parse_no_kvmapf);
 46
 47static int steal_acc = 1;
 48static int __init parse_no_stealacc(char *arg)
 49{
 50        steal_acc = 0;
 51        return 0;
 52}
 53
 54early_param("no-steal-acc", parse_no_stealacc);
 55
 56static DEFINE_PER_CPU_DECRYPTED(struct kvm_vcpu_pv_apf_data, apf_reason) __aligned(64);
 57DEFINE_PER_CPU_DECRYPTED(struct kvm_steal_time, steal_time) __aligned(64) __visible;
 
 
 
 
 
 
 58static int has_steal_clock = 0;
 59
 
 
 
 
 
 60/*
 61 * No need for any "IO delay" on KVM
 62 */
 63static void kvm_io_delay(void)
 64{
 65}
 66
 67#define KVM_TASK_SLEEP_HASHBITS 8
 68#define KVM_TASK_SLEEP_HASHSIZE (1<<KVM_TASK_SLEEP_HASHBITS)
 69
 70struct kvm_task_sleep_node {
 71	struct hlist_node link;
 72	struct swait_queue_head wq;
 73	u32 token;
 74	int cpu;
 75	bool halted;
 
 76};
 77
 78static struct kvm_task_sleep_head {
 79	raw_spinlock_t lock;
 80	struct hlist_head list;
 81} async_pf_sleepers[KVM_TASK_SLEEP_HASHSIZE];
 82
 83static struct kvm_task_sleep_node *_find_apf_task(struct kvm_task_sleep_head *b,
 84						  u32 token)
 85{
 86	struct hlist_node *p;
 87
 88	hlist_for_each(p, &b->list) {
 89		struct kvm_task_sleep_node *n =
 90			hlist_entry(p, typeof(*n), link);
 91		if (n->token == token)
 92			return n;
 93	}
 94
 95	return NULL;
 96}
 97
 98/*
 99 * @interrupt_kernel: Is this called from a routine which interrupts the kernel
100 * 		      (other than user space)?
101 */
102void kvm_async_pf_task_wait(u32 token, int interrupt_kernel)
103{
104	u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
105	struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
106	struct kvm_task_sleep_node n, *e;
107	DECLARE_SWAITQUEUE(wait);
 
108
109	rcu_irq_enter();
 
 
110
111	raw_spin_lock(&b->lock);
112	e = _find_apf_task(b, token);
113	if (e) {
114		/* dummy entry exist -> wake up was delivered ahead of PF */
115		hlist_del(&e->link);
116		kfree(e);
117		raw_spin_unlock(&b->lock);
118
119		rcu_irq_exit();
120		return;
121	}
122
123	n.token = token;
124	n.cpu = smp_processor_id();
125	n.halted = is_idle_task(current) ||
126		   (IS_ENABLED(CONFIG_PREEMPT_COUNT)
127		    ? preempt_count() > 1 || rcu_preempt_depth()
128		    : interrupt_kernel);
129	init_swait_queue_head(&n.wq);
130	hlist_add_head(&n.link, &b->list);
131	raw_spin_unlock(&b->lock);
132
133	for (;;) {
134		if (!n.halted)
135			prepare_to_swait_exclusive(&n.wq, &wait, TASK_UNINTERRUPTIBLE);
136		if (hlist_unhashed(&n.link))
137			break;
138
139		rcu_irq_exit();
140
141		if (!n.halted) {
142			local_irq_enable();
143			schedule();
144			local_irq_disable();
145		} else {
146			/*
147			 * We cannot reschedule. So halt.
148			 */
149			native_safe_halt();
150			local_irq_disable();
151		}
152
153		rcu_irq_enter();
154	}
155	if (!n.halted)
156		finish_swait(&n.wq, &wait);
157
158	rcu_irq_exit();
159	return;
160}
161EXPORT_SYMBOL_GPL(kvm_async_pf_task_wait);
162
163static void apf_task_wake_one(struct kvm_task_sleep_node *n)
164{
165	hlist_del_init(&n->link);
 
 
 
166	if (n->halted)
167		smp_send_reschedule(n->cpu);
168	else if (swq_has_sleeper(&n->wq))
169		swake_up_one(&n->wq);
170}
171
172static void apf_task_wake_all(void)
173{
174	int i;
175
176	for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++) {
177		struct hlist_node *p, *next;
178		struct kvm_task_sleep_head *b = &async_pf_sleepers[i];
179		raw_spin_lock(&b->lock);
180		hlist_for_each_safe(p, next, &b->list) {
181			struct kvm_task_sleep_node *n =
182				hlist_entry(p, typeof(*n), link);
183			if (n->cpu == smp_processor_id())
184				apf_task_wake_one(n);
185		}
186		raw_spin_unlock(&b->lock);
187	}
188}
189
190void kvm_async_pf_task_wake(u32 token)
191{
192	u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
193	struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
194	struct kvm_task_sleep_node *n;
195
196	if (token == ~0) {
197		apf_task_wake_all();
198		return;
199	}
200
201again:
202	raw_spin_lock(&b->lock);
203	n = _find_apf_task(b, token);
204	if (!n) {
205		/*
206		 * async PF was not yet handled.
207		 * Add dummy entry for the token.
208		 */
209		n = kzalloc(sizeof(*n), GFP_ATOMIC);
210		if (!n) {
211			/*
212			 * Allocation failed! Busy wait while other cpu
213			 * handles async PF.
214			 */
215			raw_spin_unlock(&b->lock);
216			cpu_relax();
217			goto again;
218		}
219		n->token = token;
220		n->cpu = smp_processor_id();
221		init_swait_queue_head(&n->wq);
 
222		hlist_add_head(&n->link, &b->list);
223	} else
224		apf_task_wake_one(n);
225	raw_spin_unlock(&b->lock);
226	return;
227}
228EXPORT_SYMBOL_GPL(kvm_async_pf_task_wake);
229
230u32 kvm_read_and_reset_pf_reason(void)
231{
232	u32 reason = 0;
233
234	if (__this_cpu_read(apf_reason.enabled)) {
235		reason = __this_cpu_read(apf_reason.reason);
236		__this_cpu_write(apf_reason.reason, 0);
237	}
238
239	return reason;
240}
241EXPORT_SYMBOL_GPL(kvm_read_and_reset_pf_reason);
242NOKPROBE_SYMBOL(kvm_read_and_reset_pf_reason);
243
244dotraplinkage void
245do_async_page_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address)
246{
247	enum ctx_state prev_state;
248
249	switch (kvm_read_and_reset_pf_reason()) {
250	default:
251		do_page_fault(regs, error_code, address);
252		break;
253	case KVM_PV_REASON_PAGE_NOT_PRESENT:
254		/* page is swapped out by the host. */
255		prev_state = exception_enter();
256		kvm_async_pf_task_wait((u32)address, !user_mode(regs));
257		exception_exit(prev_state);
258		break;
259	case KVM_PV_REASON_PAGE_READY:
260		rcu_irq_enter();
261		kvm_async_pf_task_wake((u32)address);
262		rcu_irq_exit();
263		break;
264	}
265}
266NOKPROBE_SYMBOL(do_async_page_fault);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
267
268static void __init paravirt_ops_setup(void)
269{
270	pv_info.name = "KVM";
 
271
272	if (kvm_para_has_feature(KVM_FEATURE_NOP_IO_DELAY))
273		pv_ops.cpu.io_delay = kvm_io_delay;
274
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
275#ifdef CONFIG_X86_IO_APIC
276	no_timer_check = 1;
277#endif
278}
279
280static void kvm_register_steal_time(void)
281{
282	int cpu = smp_processor_id();
283	struct kvm_steal_time *st = &per_cpu(steal_time, cpu);
284
285	if (!has_steal_clock)
286		return;
287
288	wrmsrl(MSR_KVM_STEAL_TIME, (slow_virt_to_phys(st) | KVM_MSR_ENABLED));
289	pr_info("kvm-stealtime: cpu %d, msr %llx\n",
290		cpu, (unsigned long long) slow_virt_to_phys(st));
291}
292
293static DEFINE_PER_CPU_DECRYPTED(unsigned long, kvm_apic_eoi) = KVM_PV_EOI_DISABLED;
294
295static notrace void kvm_guest_apic_eoi_write(u32 reg, u32 val)
296{
297	/**
298	 * This relies on __test_and_clear_bit to modify the memory
299	 * in a way that is atomic with respect to the local CPU.
300	 * The hypervisor only accesses this memory from the local CPU so
301	 * there's no need for lock or memory barriers.
302	 * An optimization barrier is implied in apic write.
303	 */
304	if (__test_and_clear_bit(KVM_PV_EOI_BIT, this_cpu_ptr(&kvm_apic_eoi)))
305		return;
306	apic->native_eoi_write(APIC_EOI, APIC_EOI_ACK);
307}
308
309static void kvm_guest_cpu_init(void)
310{
 
 
 
311	if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF) && kvmapf) {
312		u64 pa = slow_virt_to_phys(this_cpu_ptr(&apf_reason));
313
314#ifdef CONFIG_PREEMPTION
315		pa |= KVM_ASYNC_PF_SEND_ALWAYS;
316#endif
317		pa |= KVM_ASYNC_PF_ENABLED;
318
319		if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF_VMEXIT))
320			pa |= KVM_ASYNC_PF_DELIVERY_AS_PF_VMEXIT;
321
322		wrmsrl(MSR_KVM_ASYNC_PF_EN, pa);
323		__this_cpu_write(apf_reason.enabled, 1);
324		printk(KERN_INFO"KVM setup async PF for cpu %d\n",
325		       smp_processor_id());
326	}
327
328	if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) {
329		unsigned long pa;
330		/* Size alignment is implied but just to make it explicit. */
331		BUILD_BUG_ON(__alignof__(kvm_apic_eoi) < 4);
332		__this_cpu_write(kvm_apic_eoi, 0);
333		pa = slow_virt_to_phys(this_cpu_ptr(&kvm_apic_eoi))
334			| KVM_MSR_ENABLED;
335		wrmsrl(MSR_KVM_PV_EOI_EN, pa);
336	}
337
338	if (has_steal_clock)
339		kvm_register_steal_time();
340}
341
342static void kvm_pv_disable_apf(void)
343{
344	if (!__this_cpu_read(apf_reason.enabled))
345		return;
346
347	wrmsrl(MSR_KVM_ASYNC_PF_EN, 0);
348	__this_cpu_write(apf_reason.enabled, 0);
349
350	printk(KERN_INFO"Unregister pv shared memory for cpu %d\n",
351	       smp_processor_id());
352}
353
354static void kvm_pv_guest_cpu_reboot(void *unused)
355{
356	/*
357	 * We disable PV EOI before we load a new kernel by kexec,
358	 * since MSR_KVM_PV_EOI_EN stores a pointer into old kernel's memory.
359	 * New kernel can re-enable when it boots.
360	 */
361	if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
362		wrmsrl(MSR_KVM_PV_EOI_EN, 0);
363	kvm_pv_disable_apf();
364	kvm_disable_steal_time();
365}
366
367static int kvm_pv_reboot_notify(struct notifier_block *nb,
368				unsigned long code, void *unused)
369{
370	if (code == SYS_RESTART)
371		on_each_cpu(kvm_pv_guest_cpu_reboot, NULL, 1);
372	return NOTIFY_DONE;
373}
374
375static struct notifier_block kvm_pv_reboot_nb = {
376	.notifier_call = kvm_pv_reboot_notify,
377};
378
379static u64 kvm_steal_clock(int cpu)
380{
381	u64 steal;
382	struct kvm_steal_time *src;
383	int version;
384
385	src = &per_cpu(steal_time, cpu);
386	do {
387		version = src->version;
388		virt_rmb();
389		steal = src->steal;
390		virt_rmb();
391	} while ((version & 1) || (version != src->version));
392
393	return steal;
394}
395
396void kvm_disable_steal_time(void)
397{
398	if (!has_steal_clock)
399		return;
400
401	wrmsr(MSR_KVM_STEAL_TIME, 0, 0);
402}
403
404static inline void __set_percpu_decrypted(void *ptr, unsigned long size)
405{
406	early_set_memory_decrypted((unsigned long) ptr, size);
407}
408
409/*
410 * Iterate through all possible CPUs and map the memory region pointed
411 * by apf_reason, steal_time and kvm_apic_eoi as decrypted at once.
412 *
413 * Note: we iterate through all possible CPUs to ensure that CPUs
414 * hotplugged will have their per-cpu variable already mapped as
415 * decrypted.
416 */
417static void __init sev_map_percpu_data(void)
418{
419	int cpu;
420
421	if (!sev_active())
422		return;
423
424	for_each_possible_cpu(cpu) {
425		__set_percpu_decrypted(&per_cpu(apf_reason, cpu), sizeof(apf_reason));
426		__set_percpu_decrypted(&per_cpu(steal_time, cpu), sizeof(steal_time));
427		__set_percpu_decrypted(&per_cpu(kvm_apic_eoi, cpu), sizeof(kvm_apic_eoi));
428	}
429}
430
431#ifdef CONFIG_SMP
432#define KVM_IPI_CLUSTER_SIZE	(2 * BITS_PER_LONG)
433
434static void __send_ipi_mask(const struct cpumask *mask, int vector)
435{
436	unsigned long flags;
437	int cpu, apic_id, icr;
438	int min = 0, max = 0;
439#ifdef CONFIG_X86_64
440	__uint128_t ipi_bitmap = 0;
441#else
442	u64 ipi_bitmap = 0;
443#endif
444	long ret;
445
446	if (cpumask_empty(mask))
447		return;
448
449	local_irq_save(flags);
450
451	switch (vector) {
452	default:
453		icr = APIC_DM_FIXED | vector;
454		break;
455	case NMI_VECTOR:
456		icr = APIC_DM_NMI;
457		break;
458	}
459
460	for_each_cpu(cpu, mask) {
461		apic_id = per_cpu(x86_cpu_to_apicid, cpu);
462		if (!ipi_bitmap) {
463			min = max = apic_id;
464		} else if (apic_id < min && max - apic_id < KVM_IPI_CLUSTER_SIZE) {
465			ipi_bitmap <<= min - apic_id;
466			min = apic_id;
467		} else if (apic_id < min + KVM_IPI_CLUSTER_SIZE) {
468			max = apic_id < max ? max : apic_id;
469		} else {
470			ret = kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap,
471				(unsigned long)(ipi_bitmap >> BITS_PER_LONG), min, icr);
472			WARN_ONCE(ret < 0, "KVM: failed to send PV IPI: %ld", ret);
473			min = max = apic_id;
474			ipi_bitmap = 0;
475		}
476		__set_bit(apic_id - min, (unsigned long *)&ipi_bitmap);
477	}
478
479	if (ipi_bitmap) {
480		ret = kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap,
481			(unsigned long)(ipi_bitmap >> BITS_PER_LONG), min, icr);
482		WARN_ONCE(ret < 0, "KVM: failed to send PV IPI: %ld", ret);
483	}
484
485	local_irq_restore(flags);
486}
487
488static void kvm_send_ipi_mask(const struct cpumask *mask, int vector)
489{
490	__send_ipi_mask(mask, vector);
491}
492
493static void kvm_send_ipi_mask_allbutself(const struct cpumask *mask, int vector)
494{
495	unsigned int this_cpu = smp_processor_id();
496	struct cpumask new_mask;
497	const struct cpumask *local_mask;
498
499	cpumask_copy(&new_mask, mask);
500	cpumask_clear_cpu(this_cpu, &new_mask);
501	local_mask = &new_mask;
502	__send_ipi_mask(local_mask, vector);
503}
504
505/*
506 * Set the IPI entry points
507 */
508static void kvm_setup_pv_ipi(void)
509{
510	apic->send_IPI_mask = kvm_send_ipi_mask;
511	apic->send_IPI_mask_allbutself = kvm_send_ipi_mask_allbutself;
512	pr_info("KVM setup pv IPIs\n");
513}
514
515static void kvm_smp_send_call_func_ipi(const struct cpumask *mask)
516{
517	int cpu;
518
519	native_send_call_func_ipi(mask);
520
521	/* Make sure other vCPUs get a chance to run if they need to. */
522	for_each_cpu(cpu, mask) {
523		if (vcpu_is_preempted(cpu)) {
524			kvm_hypercall1(KVM_HC_SCHED_YIELD, per_cpu(x86_cpu_to_apicid, cpu));
525			break;
526		}
527	}
528}
529
530static void __init kvm_smp_prepare_cpus(unsigned int max_cpus)
531{
532	native_smp_prepare_cpus(max_cpus);
533	if (kvm_para_has_hint(KVM_HINTS_REALTIME))
534		static_branch_disable(&virt_spin_lock_key);
535}
536
537static void __init kvm_smp_prepare_boot_cpu(void)
538{
539	/*
540	 * Map the per-cpu variables as decrypted before kvm_guest_cpu_init()
541	 * shares the guest physical address with the hypervisor.
542	 */
543	sev_map_percpu_data();
544
545	kvm_guest_cpu_init();
546	native_smp_prepare_boot_cpu();
547	kvm_spinlock_init();
548}
549
550static void kvm_guest_cpu_offline(void)
551{
552	kvm_disable_steal_time();
553	if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
554		wrmsrl(MSR_KVM_PV_EOI_EN, 0);
555	kvm_pv_disable_apf();
556	apf_task_wake_all();
557}
558
559static int kvm_cpu_online(unsigned int cpu)
 
560{
561	local_irq_disable();
562	kvm_guest_cpu_init();
563	local_irq_enable();
564	return 0;
 
 
 
 
 
 
 
 
 
 
 
565}
566
567static int kvm_cpu_down_prepare(unsigned int cpu)
568{
569	local_irq_disable();
570	kvm_guest_cpu_offline();
571	local_irq_enable();
572	return 0;
573}
574#endif
575
576static void __init kvm_apf_trap_init(void)
577{
578	update_intr_gate(X86_TRAP_PF, async_page_fault);
579}
580
581static DEFINE_PER_CPU(cpumask_var_t, __pv_tlb_mask);
582
583static void kvm_flush_tlb_others(const struct cpumask *cpumask,
584			const struct flush_tlb_info *info)
585{
586	u8 state;
587	int cpu;
588	struct kvm_steal_time *src;
589	struct cpumask *flushmask = this_cpu_cpumask_var_ptr(__pv_tlb_mask);
590
591	cpumask_copy(flushmask, cpumask);
592	/*
593	 * We have to call flush only on online vCPUs. And
594	 * queue flush_on_enter for pre-empted vCPUs
595	 */
596	for_each_cpu(cpu, flushmask) {
597		src = &per_cpu(steal_time, cpu);
598		state = READ_ONCE(src->preempted);
599		if ((state & KVM_VCPU_PREEMPTED)) {
600			if (try_cmpxchg(&src->preempted, &state,
601					state | KVM_VCPU_FLUSH_TLB))
602				__cpumask_clear_cpu(cpu, flushmask);
603		}
604	}
605
606	native_flush_tlb_others(flushmask, info);
607}
608
609static void __init kvm_guest_init(void)
610{
611	int i;
612
 
 
 
613	paravirt_ops_setup();
614	register_reboot_notifier(&kvm_pv_reboot_nb);
615	for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++)
616		raw_spin_lock_init(&async_pf_sleepers[i].lock);
617	if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF))
618		x86_init.irqs.trap_init = kvm_apf_trap_init;
619
620	if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
621		has_steal_clock = 1;
622		pv_ops.time.steal_clock = kvm_steal_clock;
623	}
624
625	if (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) &&
626	    !kvm_para_has_hint(KVM_HINTS_REALTIME) &&
627	    kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
628		pv_ops.mmu.flush_tlb_others = kvm_flush_tlb_others;
629		pv_ops.mmu.tlb_remove_table = tlb_remove_table;
630	}
631
632	if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
633		apic_set_eoi_write(kvm_guest_apic_eoi_write);
634
635#ifdef CONFIG_SMP
636	smp_ops.smp_prepare_cpus = kvm_smp_prepare_cpus;
637	smp_ops.smp_prepare_boot_cpu = kvm_smp_prepare_boot_cpu;
638	if (kvm_para_has_feature(KVM_FEATURE_PV_SCHED_YIELD) &&
639	    !kvm_para_has_hint(KVM_HINTS_REALTIME) &&
640	    kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
641		smp_ops.send_call_func_ipi = kvm_smp_send_call_func_ipi;
642		pr_info("KVM setup pv sched yield\n");
643	}
644	if (cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "x86/kvm:online",
645				      kvm_cpu_online, kvm_cpu_down_prepare) < 0)
646		pr_err("kvm_guest: Failed to install cpu hotplug callbacks\n");
647#else
648	sev_map_percpu_data();
649	kvm_guest_cpu_init();
650#endif
651
652	/*
653	 * Hard lockup detection is enabled by default. Disable it, as guests
654	 * can get false positives too easily, for example if the host is
655	 * overcommitted.
656	 */
657	hardlockup_detector_disable();
658}
659
660static noinline uint32_t __kvm_cpuid_base(void)
661{
662	if (boot_cpu_data.cpuid_level < 0)
663		return 0;	/* So we don't blow up on old processors */
664
665	if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
666		return hypervisor_cpuid_base("KVMKVMKVM\0\0\0", 0);
667
668	return 0;
669}
670
671static inline uint32_t kvm_cpuid_base(void)
672{
673	static int kvm_cpuid_base = -1;
674
675	if (kvm_cpuid_base == -1)
676		kvm_cpuid_base = __kvm_cpuid_base();
677
678	return kvm_cpuid_base;
679}
680
681bool kvm_para_available(void)
682{
683	return kvm_cpuid_base() != 0;
684}
685EXPORT_SYMBOL_GPL(kvm_para_available);
686
687unsigned int kvm_arch_para_features(void)
688{
689	return cpuid_eax(kvm_cpuid_base() | KVM_CPUID_FEATURES);
690}
691
692unsigned int kvm_arch_para_hints(void)
693{
694	return cpuid_edx(kvm_cpuid_base() | KVM_CPUID_FEATURES);
695}
696EXPORT_SYMBOL_GPL(kvm_arch_para_hints);
697
698static uint32_t __init kvm_detect(void)
699{
700	return kvm_cpuid_base();
701}
702
703static void __init kvm_apic_init(void)
704{
705#if defined(CONFIG_SMP)
706	if (kvm_para_has_feature(KVM_FEATURE_PV_SEND_IPI))
707		kvm_setup_pv_ipi();
708#endif
709}
710
711static void __init kvm_init_platform(void)
712{
713	kvmclock_init();
714	x86_platform.apic_post_init = kvm_apic_init;
715}
716
717const __initconst struct hypervisor_x86 x86_hyper_kvm = {
718	.name			= "KVM",
719	.detect			= kvm_detect,
720	.type			= X86_HYPER_KVM,
721	.init.guest_late_init	= kvm_guest_init,
722	.init.x2apic_available	= kvm_para_available,
723	.init.init_platform	= kvm_init_platform,
724};
725
726static __init int activate_jump_labels(void)
727{
728	if (has_steal_clock) {
729		static_key_slow_inc(&paravirt_steal_enabled);
730		if (steal_acc)
731			static_key_slow_inc(&paravirt_steal_rq_enabled);
732	}
733
734	return 0;
735}
736arch_initcall(activate_jump_labels);
737
738static __init int kvm_setup_pv_tlb_flush(void)
739{
740	int cpu;
741
742	if (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) &&
743	    !kvm_para_has_hint(KVM_HINTS_REALTIME) &&
744	    kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
745		for_each_possible_cpu(cpu) {
746			zalloc_cpumask_var_node(per_cpu_ptr(&__pv_tlb_mask, cpu),
747				GFP_KERNEL, cpu_to_node(cpu));
748		}
749		pr_info("KVM setup pv remote TLB flush\n");
750	}
751
752	return 0;
753}
754arch_initcall(kvm_setup_pv_tlb_flush);
755
756#ifdef CONFIG_PARAVIRT_SPINLOCKS
757
758/* Kick a cpu by its apicid. Used to wake up a halted vcpu */
759static void kvm_kick_cpu(int cpu)
760{
761	int apicid;
762	unsigned long flags = 0;
763
764	apicid = per_cpu(x86_cpu_to_apicid, cpu);
765	kvm_hypercall2(KVM_HC_KICK_CPU, flags, apicid);
766}
767
768#include <asm/qspinlock.h>
769
770static void kvm_wait(u8 *ptr, u8 val)
771{
772	unsigned long flags;
773
774	if (in_nmi())
775		return;
776
777	local_irq_save(flags);
778
779	if (READ_ONCE(*ptr) != val)
780		goto out;
781
782	/*
783	 * halt until it's our turn and kicked. Note that we do safe halt
784	 * for irq enabled case to avoid hang when lock info is overwritten
785	 * in irq spinlock slowpath and no spurious interrupt occur to save us.
786	 */
787	if (arch_irqs_disabled_flags(flags))
788		halt();
789	else
790		safe_halt();
791
792out:
793	local_irq_restore(flags);
794}
795
796#ifdef CONFIG_X86_32
797__visible bool __kvm_vcpu_is_preempted(long cpu)
798{
799	struct kvm_steal_time *src = &per_cpu(steal_time, cpu);
800
801	return !!(src->preempted & KVM_VCPU_PREEMPTED);
802}
803PV_CALLEE_SAVE_REGS_THUNK(__kvm_vcpu_is_preempted);
804
805#else
806
807#include <asm/asm-offsets.h>
808
809extern bool __raw_callee_save___kvm_vcpu_is_preempted(long);
810
811/*
812 * Hand-optimize version for x86-64 to avoid 8 64-bit register saving and
813 * restoring to/from the stack.
814 */
815asm(
816".pushsection .text;"
817".global __raw_callee_save___kvm_vcpu_is_preempted;"
818".type __raw_callee_save___kvm_vcpu_is_preempted, @function;"
819"__raw_callee_save___kvm_vcpu_is_preempted:"
820"movq	__per_cpu_offset(,%rdi,8), %rax;"
821"cmpb	$0, " __stringify(KVM_STEAL_TIME_preempted) "+steal_time(%rax);"
822"setne	%al;"
823"ret;"
824".size __raw_callee_save___kvm_vcpu_is_preempted, .-__raw_callee_save___kvm_vcpu_is_preempted;"
825".popsection");
826
827#endif
828
829/*
830 * Setup pv_lock_ops to exploit KVM_FEATURE_PV_UNHALT if present.
831 */
832void __init kvm_spinlock_init(void)
833{
834	/* Does host kernel support KVM_FEATURE_PV_UNHALT? */
835	if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT))
836		return;
837
838	if (kvm_para_has_hint(KVM_HINTS_REALTIME))
839		return;
840
841	/* Don't use the pvqspinlock code if there is only 1 vCPU. */
842	if (num_possible_cpus() == 1)
843		return;
844
845	__pv_init_lock_hash();
846	pv_ops.lock.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath;
847	pv_ops.lock.queued_spin_unlock =
848		PV_CALLEE_SAVE(__pv_queued_spin_unlock);
849	pv_ops.lock.wait = kvm_wait;
850	pv_ops.lock.kick = kvm_kick_cpu;
851
852	if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
853		pv_ops.lock.vcpu_is_preempted =
854			PV_CALLEE_SAVE(__kvm_vcpu_is_preempted);
855	}
856}
857
858#endif	/* CONFIG_PARAVIRT_SPINLOCKS */
859
860#ifdef CONFIG_ARCH_CPUIDLE_HALTPOLL
861
862static void kvm_disable_host_haltpoll(void *i)
863{
864	wrmsrl(MSR_KVM_POLL_CONTROL, 0);
865}
866
867static void kvm_enable_host_haltpoll(void *i)
868{
869	wrmsrl(MSR_KVM_POLL_CONTROL, 1);
870}
871
872void arch_haltpoll_enable(unsigned int cpu)
873{
874	if (!kvm_para_has_feature(KVM_FEATURE_POLL_CONTROL)) {
875		pr_err_once("kvm: host does not support poll control\n");
876		pr_err_once("kvm: host upgrade recommended\n");
877		return;
878	}
879
880	/* Enable guest halt poll disables host halt poll */
881	smp_call_function_single(cpu, kvm_disable_host_haltpoll, NULL, 1);
882}
883EXPORT_SYMBOL_GPL(arch_haltpoll_enable);
884
885void arch_haltpoll_disable(unsigned int cpu)
886{
887	if (!kvm_para_has_feature(KVM_FEATURE_POLL_CONTROL))
888		return;
889
890	/* Enable guest halt poll disables host halt poll */
891	smp_call_function_single(cpu, kvm_enable_host_haltpoll, NULL, 1);
892}
893EXPORT_SYMBOL_GPL(arch_haltpoll_disable);
894#endif
v3.1
 
  1/*
  2 * KVM paravirt_ops implementation
  3 *
  4 * This program is free software; you can redistribute it and/or modify
  5 * it under the terms of the GNU General Public License as published by
  6 * the Free Software Foundation; either version 2 of the License, or
  7 * (at your option) any later version.
  8 *
  9 * This program is distributed in the hope that it will be useful,
 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 12 * GNU General Public License for more details.
 13 *
 14 * You should have received a copy of the GNU General Public License
 15 * along with this program; if not, write to the Free Software
 16 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
 17 *
 18 * Copyright (C) 2007, Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
 19 * Copyright IBM Corporation, 2007
 20 *   Authors: Anthony Liguori <aliguori@us.ibm.com>
 21 */
 22
 23#include <linux/module.h>
 
 24#include <linux/kernel.h>
 25#include <linux/kvm_para.h>
 26#include <linux/cpu.h>
 27#include <linux/mm.h>
 28#include <linux/highmem.h>
 29#include <linux/hardirq.h>
 30#include <linux/notifier.h>
 31#include <linux/reboot.h>
 32#include <linux/hash.h>
 33#include <linux/sched.h>
 34#include <linux/slab.h>
 35#include <linux/kprobes.h>
 
 
 
 36#include <asm/timer.h>
 37#include <asm/cpu.h>
 38#include <asm/traps.h>
 39#include <asm/desc.h>
 40#include <asm/tlbflush.h>
 41
 42#define MMU_QUEUE_SIZE 1024
 
 
 43
 44static int kvmapf = 1;
 45
 46static int parse_no_kvmapf(char *arg)
 47{
 48        kvmapf = 0;
 49        return 0;
 50}
 51
 52early_param("no-kvmapf", parse_no_kvmapf);
 53
 54static int steal_acc = 1;
 55static int parse_no_stealacc(char *arg)
 56{
 57        steal_acc = 0;
 58        return 0;
 59}
 60
 61early_param("no-steal-acc", parse_no_stealacc);
 62
 63struct kvm_para_state {
 64	u8 mmu_queue[MMU_QUEUE_SIZE];
 65	int mmu_queue_len;
 66};
 67
 68static DEFINE_PER_CPU(struct kvm_para_state, para_state);
 69static DEFINE_PER_CPU(struct kvm_vcpu_pv_apf_data, apf_reason) __aligned(64);
 70static DEFINE_PER_CPU(struct kvm_steal_time, steal_time) __aligned(64);
 71static int has_steal_clock = 0;
 72
 73static struct kvm_para_state *kvm_para_state(void)
 74{
 75	return &per_cpu(para_state, raw_smp_processor_id());
 76}
 77
 78/*
 79 * No need for any "IO delay" on KVM
 80 */
 81static void kvm_io_delay(void)
 82{
 83}
 84
 85#define KVM_TASK_SLEEP_HASHBITS 8
 86#define KVM_TASK_SLEEP_HASHSIZE (1<<KVM_TASK_SLEEP_HASHBITS)
 87
 88struct kvm_task_sleep_node {
 89	struct hlist_node link;
 90	wait_queue_head_t wq;
 91	u32 token;
 92	int cpu;
 93	bool halted;
 94	struct mm_struct *mm;
 95};
 96
 97static struct kvm_task_sleep_head {
 98	spinlock_t lock;
 99	struct hlist_head list;
100} async_pf_sleepers[KVM_TASK_SLEEP_HASHSIZE];
101
102static struct kvm_task_sleep_node *_find_apf_task(struct kvm_task_sleep_head *b,
103						  u32 token)
104{
105	struct hlist_node *p;
106
107	hlist_for_each(p, &b->list) {
108		struct kvm_task_sleep_node *n =
109			hlist_entry(p, typeof(*n), link);
110		if (n->token == token)
111			return n;
112	}
113
114	return NULL;
115}
116
117void kvm_async_pf_task_wait(u32 token)
 
 
 
 
118{
119	u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
120	struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
121	struct kvm_task_sleep_node n, *e;
122	DEFINE_WAIT(wait);
123	int cpu, idle;
124
125	cpu = get_cpu();
126	idle = idle_cpu(cpu);
127	put_cpu();
128
129	spin_lock(&b->lock);
130	e = _find_apf_task(b, token);
131	if (e) {
132		/* dummy entry exist -> wake up was delivered ahead of PF */
133		hlist_del(&e->link);
134		kfree(e);
135		spin_unlock(&b->lock);
 
 
136		return;
137	}
138
139	n.token = token;
140	n.cpu = smp_processor_id();
141	n.mm = current->active_mm;
142	n.halted = idle || preempt_count() > 1;
143	atomic_inc(&n.mm->mm_count);
144	init_waitqueue_head(&n.wq);
 
145	hlist_add_head(&n.link, &b->list);
146	spin_unlock(&b->lock);
147
148	for (;;) {
149		if (!n.halted)
150			prepare_to_wait(&n.wq, &wait, TASK_UNINTERRUPTIBLE);
151		if (hlist_unhashed(&n.link))
152			break;
153
 
 
154		if (!n.halted) {
155			local_irq_enable();
156			schedule();
157			local_irq_disable();
158		} else {
159			/*
160			 * We cannot reschedule. So halt.
161			 */
162			native_safe_halt();
163			local_irq_disable();
164		}
 
 
165	}
166	if (!n.halted)
167		finish_wait(&n.wq, &wait);
168
 
169	return;
170}
171EXPORT_SYMBOL_GPL(kvm_async_pf_task_wait);
172
173static void apf_task_wake_one(struct kvm_task_sleep_node *n)
174{
175	hlist_del_init(&n->link);
176	if (!n->mm)
177		return;
178	mmdrop(n->mm);
179	if (n->halted)
180		smp_send_reschedule(n->cpu);
181	else if (waitqueue_active(&n->wq))
182		wake_up(&n->wq);
183}
184
185static void apf_task_wake_all(void)
186{
187	int i;
188
189	for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++) {
190		struct hlist_node *p, *next;
191		struct kvm_task_sleep_head *b = &async_pf_sleepers[i];
192		spin_lock(&b->lock);
193		hlist_for_each_safe(p, next, &b->list) {
194			struct kvm_task_sleep_node *n =
195				hlist_entry(p, typeof(*n), link);
196			if (n->cpu == smp_processor_id())
197				apf_task_wake_one(n);
198		}
199		spin_unlock(&b->lock);
200	}
201}
202
203void kvm_async_pf_task_wake(u32 token)
204{
205	u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
206	struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
207	struct kvm_task_sleep_node *n;
208
209	if (token == ~0) {
210		apf_task_wake_all();
211		return;
212	}
213
214again:
215	spin_lock(&b->lock);
216	n = _find_apf_task(b, token);
217	if (!n) {
218		/*
219		 * async PF was not yet handled.
220		 * Add dummy entry for the token.
221		 */
222		n = kmalloc(sizeof(*n), GFP_ATOMIC);
223		if (!n) {
224			/*
225			 * Allocation failed! Busy wait while other cpu
226			 * handles async PF.
227			 */
228			spin_unlock(&b->lock);
229			cpu_relax();
230			goto again;
231		}
232		n->token = token;
233		n->cpu = smp_processor_id();
234		n->mm = NULL;
235		init_waitqueue_head(&n->wq);
236		hlist_add_head(&n->link, &b->list);
237	} else
238		apf_task_wake_one(n);
239	spin_unlock(&b->lock);
240	return;
241}
242EXPORT_SYMBOL_GPL(kvm_async_pf_task_wake);
243
244u32 kvm_read_and_reset_pf_reason(void)
245{
246	u32 reason = 0;
247
248	if (__get_cpu_var(apf_reason).enabled) {
249		reason = __get_cpu_var(apf_reason).reason;
250		__get_cpu_var(apf_reason).reason = 0;
251	}
252
253	return reason;
254}
255EXPORT_SYMBOL_GPL(kvm_read_and_reset_pf_reason);
 
256
257dotraplinkage void __kprobes
258do_async_page_fault(struct pt_regs *regs, unsigned long error_code)
259{
 
 
260	switch (kvm_read_and_reset_pf_reason()) {
261	default:
262		do_page_fault(regs, error_code);
263		break;
264	case KVM_PV_REASON_PAGE_NOT_PRESENT:
265		/* page is swapped out by the host. */
266		kvm_async_pf_task_wait((u32)read_cr2());
 
 
267		break;
268	case KVM_PV_REASON_PAGE_READY:
269		kvm_async_pf_task_wake((u32)read_cr2());
 
 
270		break;
271	}
272}
273
274static void kvm_mmu_op(void *buffer, unsigned len)
275{
276	int r;
277	unsigned long a1, a2;
278
279	do {
280		a1 = __pa(buffer);
281		a2 = 0;   /* on i386 __pa() always returns <4G */
282		r = kvm_hypercall3(KVM_HC_MMU_OP, len, a1, a2);
283		buffer += r;
284		len -= r;
285	} while (len);
286}
287
288static void mmu_queue_flush(struct kvm_para_state *state)
289{
290	if (state->mmu_queue_len) {
291		kvm_mmu_op(state->mmu_queue, state->mmu_queue_len);
292		state->mmu_queue_len = 0;
293	}
294}
295
296static void kvm_deferred_mmu_op(void *buffer, int len)
297{
298	struct kvm_para_state *state = kvm_para_state();
299
300	if (paravirt_get_lazy_mode() != PARAVIRT_LAZY_MMU) {
301		kvm_mmu_op(buffer, len);
302		return;
303	}
304	if (state->mmu_queue_len + len > sizeof state->mmu_queue)
305		mmu_queue_flush(state);
306	memcpy(state->mmu_queue + state->mmu_queue_len, buffer, len);
307	state->mmu_queue_len += len;
308}
309
310static void kvm_mmu_write(void *dest, u64 val)
311{
312	__u64 pte_phys;
313	struct kvm_mmu_op_write_pte wpte;
314
315#ifdef CONFIG_HIGHPTE
316	struct page *page;
317	unsigned long dst = (unsigned long) dest;
318
319	page = kmap_atomic_to_page(dest);
320	pte_phys = page_to_pfn(page);
321	pte_phys <<= PAGE_SHIFT;
322	pte_phys += (dst & ~(PAGE_MASK));
323#else
324	pte_phys = (unsigned long)__pa(dest);
325#endif
326	wpte.header.op = KVM_MMU_OP_WRITE_PTE;
327	wpte.pte_val = val;
328	wpte.pte_phys = pte_phys;
329
330	kvm_deferred_mmu_op(&wpte, sizeof wpte);
331}
332
333/*
334 * We only need to hook operations that are MMU writes.  We hook these so that
335 * we can use lazy MMU mode to batch these operations.  We could probably
336 * improve the performance of the host code if we used some of the information
337 * here to simplify processing of batched writes.
338 */
339static void kvm_set_pte(pte_t *ptep, pte_t pte)
340{
341	kvm_mmu_write(ptep, pte_val(pte));
342}
343
344static void kvm_set_pte_at(struct mm_struct *mm, unsigned long addr,
345			   pte_t *ptep, pte_t pte)
346{
347	kvm_mmu_write(ptep, pte_val(pte));
348}
349
350static void kvm_set_pmd(pmd_t *pmdp, pmd_t pmd)
351{
352	kvm_mmu_write(pmdp, pmd_val(pmd));
353}
354
355#if PAGETABLE_LEVELS >= 3
356#ifdef CONFIG_X86_PAE
357static void kvm_set_pte_atomic(pte_t *ptep, pte_t pte)
358{
359	kvm_mmu_write(ptep, pte_val(pte));
360}
361
362static void kvm_pte_clear(struct mm_struct *mm,
363			  unsigned long addr, pte_t *ptep)
364{
365	kvm_mmu_write(ptep, 0);
366}
367
368static void kvm_pmd_clear(pmd_t *pmdp)
369{
370	kvm_mmu_write(pmdp, 0);
371}
372#endif
373
374static void kvm_set_pud(pud_t *pudp, pud_t pud)
375{
376	kvm_mmu_write(pudp, pud_val(pud));
377}
378
379#if PAGETABLE_LEVELS == 4
380static void kvm_set_pgd(pgd_t *pgdp, pgd_t pgd)
381{
382	kvm_mmu_write(pgdp, pgd_val(pgd));
383}
384#endif
385#endif /* PAGETABLE_LEVELS >= 3 */
386
387static void kvm_flush_tlb(void)
388{
389	struct kvm_mmu_op_flush_tlb ftlb = {
390		.header.op = KVM_MMU_OP_FLUSH_TLB,
391	};
392
393	kvm_deferred_mmu_op(&ftlb, sizeof ftlb);
394}
395
396static void kvm_release_pt(unsigned long pfn)
397{
398	struct kvm_mmu_op_release_pt rpt = {
399		.header.op = KVM_MMU_OP_RELEASE_PT,
400		.pt_phys = (u64)pfn << PAGE_SHIFT,
401	};
402
403	kvm_mmu_op(&rpt, sizeof rpt);
404}
405
406static void kvm_enter_lazy_mmu(void)
407{
408	paravirt_enter_lazy_mmu();
409}
410
411static void kvm_leave_lazy_mmu(void)
412{
413	struct kvm_para_state *state = kvm_para_state();
414
415	mmu_queue_flush(state);
416	paravirt_leave_lazy_mmu();
417}
418
419static void __init paravirt_ops_setup(void)
420{
421	pv_info.name = "KVM";
422	pv_info.paravirt_enabled = 1;
423
424	if (kvm_para_has_feature(KVM_FEATURE_NOP_IO_DELAY))
425		pv_cpu_ops.io_delay = kvm_io_delay;
426
427	if (kvm_para_has_feature(KVM_FEATURE_MMU_OP)) {
428		pv_mmu_ops.set_pte = kvm_set_pte;
429		pv_mmu_ops.set_pte_at = kvm_set_pte_at;
430		pv_mmu_ops.set_pmd = kvm_set_pmd;
431#if PAGETABLE_LEVELS >= 3
432#ifdef CONFIG_X86_PAE
433		pv_mmu_ops.set_pte_atomic = kvm_set_pte_atomic;
434		pv_mmu_ops.pte_clear = kvm_pte_clear;
435		pv_mmu_ops.pmd_clear = kvm_pmd_clear;
436#endif
437		pv_mmu_ops.set_pud = kvm_set_pud;
438#if PAGETABLE_LEVELS == 4
439		pv_mmu_ops.set_pgd = kvm_set_pgd;
440#endif
441#endif
442		pv_mmu_ops.flush_tlb_user = kvm_flush_tlb;
443		pv_mmu_ops.release_pte = kvm_release_pt;
444		pv_mmu_ops.release_pmd = kvm_release_pt;
445		pv_mmu_ops.release_pud = kvm_release_pt;
446
447		pv_mmu_ops.lazy_mode.enter = kvm_enter_lazy_mmu;
448		pv_mmu_ops.lazy_mode.leave = kvm_leave_lazy_mmu;
449	}
450#ifdef CONFIG_X86_IO_APIC
451	no_timer_check = 1;
452#endif
453}
454
455static void kvm_register_steal_time(void)
456{
457	int cpu = smp_processor_id();
458	struct kvm_steal_time *st = &per_cpu(steal_time, cpu);
459
460	if (!has_steal_clock)
461		return;
462
463	memset(st, 0, sizeof(*st));
464
465	wrmsrl(MSR_KVM_STEAL_TIME, (__pa(st) | KVM_MSR_ENABLED));
466	printk(KERN_INFO "kvm-stealtime: cpu %d, msr %lx\n",
467		cpu, __pa(st));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
468}
469
470void __cpuinit kvm_guest_cpu_init(void)
471{
472	if (!kvm_para_available())
473		return;
474
475	if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF) && kvmapf) {
476		u64 pa = __pa(&__get_cpu_var(apf_reason));
477
478#ifdef CONFIG_PREEMPT
479		pa |= KVM_ASYNC_PF_SEND_ALWAYS;
480#endif
481		wrmsrl(MSR_KVM_ASYNC_PF_EN, pa | KVM_ASYNC_PF_ENABLED);
482		__get_cpu_var(apf_reason).enabled = 1;
 
 
 
 
 
483		printk(KERN_INFO"KVM setup async PF for cpu %d\n",
484		       smp_processor_id());
485	}
486
 
 
 
 
 
 
 
 
 
 
487	if (has_steal_clock)
488		kvm_register_steal_time();
489}
490
491static void kvm_pv_disable_apf(void *unused)
492{
493	if (!__get_cpu_var(apf_reason).enabled)
494		return;
495
496	wrmsrl(MSR_KVM_ASYNC_PF_EN, 0);
497	__get_cpu_var(apf_reason).enabled = 0;
498
499	printk(KERN_INFO"Unregister pv shared memory for cpu %d\n",
500	       smp_processor_id());
501}
502
 
 
 
 
 
 
 
 
 
 
 
 
 
503static int kvm_pv_reboot_notify(struct notifier_block *nb,
504				unsigned long code, void *unused)
505{
506	if (code == SYS_RESTART)
507		on_each_cpu(kvm_pv_disable_apf, NULL, 1);
508	return NOTIFY_DONE;
509}
510
511static struct notifier_block kvm_pv_reboot_nb = {
512	.notifier_call = kvm_pv_reboot_notify,
513};
514
515static u64 kvm_steal_clock(int cpu)
516{
517	u64 steal;
518	struct kvm_steal_time *src;
519	int version;
520
521	src = &per_cpu(steal_time, cpu);
522	do {
523		version = src->version;
524		rmb();
525		steal = src->steal;
526		rmb();
527	} while ((version & 1) || (version != src->version));
528
529	return steal;
530}
531
532void kvm_disable_steal_time(void)
533{
534	if (!has_steal_clock)
535		return;
536
537	wrmsr(MSR_KVM_STEAL_TIME, 0, 0);
538}
539
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
540#ifdef CONFIG_SMP
541static void __init kvm_smp_prepare_boot_cpu(void)
 
 
542{
543#ifdef CONFIG_KVM_CLOCK
544	WARN_ON(kvm_register_clock("primary cpu clock"));
 
 
 
 
 
545#endif
546	kvm_guest_cpu_init();
547	native_smp_prepare_boot_cpu();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
548}
549
550static void __cpuinit kvm_guest_cpu_online(void *dummy)
551{
 
 
 
 
 
 
552	kvm_guest_cpu_init();
 
 
553}
554
555static void kvm_guest_cpu_offline(void *dummy)
556{
557	kvm_disable_steal_time();
558	kvm_pv_disable_apf(NULL);
 
 
559	apf_task_wake_all();
560}
561
562static int __cpuinit kvm_cpu_notify(struct notifier_block *self,
563				    unsigned long action, void *hcpu)
564{
565	int cpu = (unsigned long)hcpu;
566	switch (action) {
567	case CPU_ONLINE:
568	case CPU_DOWN_FAILED:
569	case CPU_ONLINE_FROZEN:
570		smp_call_function_single(cpu, kvm_guest_cpu_online, NULL, 0);
571		break;
572	case CPU_DOWN_PREPARE:
573	case CPU_DOWN_PREPARE_FROZEN:
574		smp_call_function_single(cpu, kvm_guest_cpu_offline, NULL, 1);
575		break;
576	default:
577		break;
578	}
579	return NOTIFY_OK;
580}
581
582static struct notifier_block __cpuinitdata kvm_cpu_notifier = {
583        .notifier_call  = kvm_cpu_notify,
584};
 
 
 
 
585#endif
586
587static void __init kvm_apf_trap_init(void)
588{
589	set_intr_gate(14, &async_page_fault);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
590}
591
592void __init kvm_guest_init(void)
593{
594	int i;
595
596	if (!kvm_para_available())
597		return;
598
599	paravirt_ops_setup();
600	register_reboot_notifier(&kvm_pv_reboot_nb);
601	for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++)
602		spin_lock_init(&async_pf_sleepers[i].lock);
603	if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF))
604		x86_init.irqs.trap_init = kvm_apf_trap_init;
605
606	if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
607		has_steal_clock = 1;
608		pv_time_ops.steal_clock = kvm_steal_clock;
 
 
 
 
 
 
 
609	}
610
 
 
 
611#ifdef CONFIG_SMP
 
612	smp_ops.smp_prepare_boot_cpu = kvm_smp_prepare_boot_cpu;
613	register_cpu_notifier(&kvm_cpu_notifier);
 
 
 
 
 
 
 
 
614#else
 
615	kvm_guest_cpu_init();
616#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
617}
618
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
619static __init int activate_jump_labels(void)
620{
621	if (has_steal_clock) {
622		jump_label_inc(&paravirt_steal_enabled);
623		if (steal_acc)
624			jump_label_inc(&paravirt_steal_rq_enabled);
625	}
626
627	return 0;
628}
629arch_initcall(activate_jump_labels);