Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * KVM Microsoft Hyper-V emulation
4 *
5 * derived from arch/x86/kvm/x86.c
6 *
7 * Copyright (C) 2006 Qumranet, Inc.
8 * Copyright (C) 2008 Qumranet, Inc.
9 * Copyright IBM Corporation, 2008
10 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
11 * Copyright (C) 2015 Andrey Smetanin <asmetanin@virtuozzo.com>
12 *
13 * Authors:
14 * Avi Kivity <avi@qumranet.com>
15 * Yaniv Kamay <yaniv@qumranet.com>
16 * Amit Shah <amit.shah@qumranet.com>
17 * Ben-Ami Yassour <benami@il.ibm.com>
18 * Andrey Smetanin <asmetanin@virtuozzo.com>
19 */
20
21#include "x86.h"
22#include "lapic.h"
23#include "ioapic.h"
24#include "cpuid.h"
25#include "hyperv.h"
26
27#include <linux/cpu.h>
28#include <linux/kvm_host.h>
29#include <linux/highmem.h>
30#include <linux/sched/cputime.h>
31#include <linux/eventfd.h>
32
33#include <asm/apicdef.h>
34#include <trace/events/kvm.h>
35
36#include "trace.h"
37#include "irq.h"
38
39#define KVM_HV_MAX_SPARSE_VCPU_SET_BITS DIV_ROUND_UP(KVM_MAX_VCPUS, 64)
40
41static void stimer_mark_pending(struct kvm_vcpu_hv_stimer *stimer,
42 bool vcpu_kick);
43
44static inline u64 synic_read_sint(struct kvm_vcpu_hv_synic *synic, int sint)
45{
46 return atomic64_read(&synic->sint[sint]);
47}
48
49static inline int synic_get_sint_vector(u64 sint_value)
50{
51 if (sint_value & HV_SYNIC_SINT_MASKED)
52 return -1;
53 return sint_value & HV_SYNIC_SINT_VECTOR_MASK;
54}
55
56static bool synic_has_vector_connected(struct kvm_vcpu_hv_synic *synic,
57 int vector)
58{
59 int i;
60
61 for (i = 0; i < ARRAY_SIZE(synic->sint); i++) {
62 if (synic_get_sint_vector(synic_read_sint(synic, i)) == vector)
63 return true;
64 }
65 return false;
66}
67
68static bool synic_has_vector_auto_eoi(struct kvm_vcpu_hv_synic *synic,
69 int vector)
70{
71 int i;
72 u64 sint_value;
73
74 for (i = 0; i < ARRAY_SIZE(synic->sint); i++) {
75 sint_value = synic_read_sint(synic, i);
76 if (synic_get_sint_vector(sint_value) == vector &&
77 sint_value & HV_SYNIC_SINT_AUTO_EOI)
78 return true;
79 }
80 return false;
81}
82
83static void synic_update_vector(struct kvm_vcpu_hv_synic *synic,
84 int vector)
85{
86 if (vector < HV_SYNIC_FIRST_VALID_VECTOR)
87 return;
88
89 if (synic_has_vector_connected(synic, vector))
90 __set_bit(vector, synic->vec_bitmap);
91 else
92 __clear_bit(vector, synic->vec_bitmap);
93
94 if (synic_has_vector_auto_eoi(synic, vector))
95 __set_bit(vector, synic->auto_eoi_bitmap);
96 else
97 __clear_bit(vector, synic->auto_eoi_bitmap);
98}
99
100static int synic_set_sint(struct kvm_vcpu_hv_synic *synic, int sint,
101 u64 data, bool host)
102{
103 int vector, old_vector;
104 bool masked;
105
106 vector = data & HV_SYNIC_SINT_VECTOR_MASK;
107 masked = data & HV_SYNIC_SINT_MASKED;
108
109 /*
110 * Valid vectors are 16-255, however, nested Hyper-V attempts to write
111 * default '0x10000' value on boot and this should not #GP. We need to
112 * allow zero-initing the register from host as well.
113 */
114 if (vector < HV_SYNIC_FIRST_VALID_VECTOR && !host && !masked)
115 return 1;
116 /*
117 * Guest may configure multiple SINTs to use the same vector, so
118 * we maintain a bitmap of vectors handled by synic, and a
119 * bitmap of vectors with auto-eoi behavior. The bitmaps are
120 * updated here, and atomically queried on fast paths.
121 */
122 old_vector = synic_read_sint(synic, sint) & HV_SYNIC_SINT_VECTOR_MASK;
123
124 atomic64_set(&synic->sint[sint], data);
125
126 synic_update_vector(synic, old_vector);
127
128 synic_update_vector(synic, vector);
129
130 /* Load SynIC vectors into EOI exit bitmap */
131 kvm_make_request(KVM_REQ_SCAN_IOAPIC, synic_to_vcpu(synic));
132 return 0;
133}
134
135static struct kvm_vcpu *get_vcpu_by_vpidx(struct kvm *kvm, u32 vpidx)
136{
137 struct kvm_vcpu *vcpu = NULL;
138 int i;
139
140 if (vpidx >= KVM_MAX_VCPUS)
141 return NULL;
142
143 vcpu = kvm_get_vcpu(kvm, vpidx);
144 if (vcpu && vcpu_to_hv_vcpu(vcpu)->vp_index == vpidx)
145 return vcpu;
146 kvm_for_each_vcpu(i, vcpu, kvm)
147 if (vcpu_to_hv_vcpu(vcpu)->vp_index == vpidx)
148 return vcpu;
149 return NULL;
150}
151
152static struct kvm_vcpu_hv_synic *synic_get(struct kvm *kvm, u32 vpidx)
153{
154 struct kvm_vcpu *vcpu;
155 struct kvm_vcpu_hv_synic *synic;
156
157 vcpu = get_vcpu_by_vpidx(kvm, vpidx);
158 if (!vcpu)
159 return NULL;
160 synic = vcpu_to_synic(vcpu);
161 return (synic->active) ? synic : NULL;
162}
163
164static void kvm_hv_notify_acked_sint(struct kvm_vcpu *vcpu, u32 sint)
165{
166 struct kvm *kvm = vcpu->kvm;
167 struct kvm_vcpu_hv_synic *synic = vcpu_to_synic(vcpu);
168 struct kvm_vcpu_hv *hv_vcpu = vcpu_to_hv_vcpu(vcpu);
169 struct kvm_vcpu_hv_stimer *stimer;
170 int gsi, idx;
171
172 trace_kvm_hv_notify_acked_sint(vcpu->vcpu_id, sint);
173
174 /* Try to deliver pending Hyper-V SynIC timers messages */
175 for (idx = 0; idx < ARRAY_SIZE(hv_vcpu->stimer); idx++) {
176 stimer = &hv_vcpu->stimer[idx];
177 if (stimer->msg_pending && stimer->config.enable &&
178 !stimer->config.direct_mode &&
179 stimer->config.sintx == sint)
180 stimer_mark_pending(stimer, false);
181 }
182
183 idx = srcu_read_lock(&kvm->irq_srcu);
184 gsi = atomic_read(&synic->sint_to_gsi[sint]);
185 if (gsi != -1)
186 kvm_notify_acked_gsi(kvm, gsi);
187 srcu_read_unlock(&kvm->irq_srcu, idx);
188}
189
190static void synic_exit(struct kvm_vcpu_hv_synic *synic, u32 msr)
191{
192 struct kvm_vcpu *vcpu = synic_to_vcpu(synic);
193 struct kvm_vcpu_hv *hv_vcpu = &vcpu->arch.hyperv;
194
195 hv_vcpu->exit.type = KVM_EXIT_HYPERV_SYNIC;
196 hv_vcpu->exit.u.synic.msr = msr;
197 hv_vcpu->exit.u.synic.control = synic->control;
198 hv_vcpu->exit.u.synic.evt_page = synic->evt_page;
199 hv_vcpu->exit.u.synic.msg_page = synic->msg_page;
200
201 kvm_make_request(KVM_REQ_HV_EXIT, vcpu);
202}
203
204static int synic_set_msr(struct kvm_vcpu_hv_synic *synic,
205 u32 msr, u64 data, bool host)
206{
207 struct kvm_vcpu *vcpu = synic_to_vcpu(synic);
208 int ret;
209
210 if (!synic->active && !host)
211 return 1;
212
213 trace_kvm_hv_synic_set_msr(vcpu->vcpu_id, msr, data, host);
214
215 ret = 0;
216 switch (msr) {
217 case HV_X64_MSR_SCONTROL:
218 synic->control = data;
219 if (!host)
220 synic_exit(synic, msr);
221 break;
222 case HV_X64_MSR_SVERSION:
223 if (!host) {
224 ret = 1;
225 break;
226 }
227 synic->version = data;
228 break;
229 case HV_X64_MSR_SIEFP:
230 if ((data & HV_SYNIC_SIEFP_ENABLE) && !host &&
231 !synic->dont_zero_synic_pages)
232 if (kvm_clear_guest(vcpu->kvm,
233 data & PAGE_MASK, PAGE_SIZE)) {
234 ret = 1;
235 break;
236 }
237 synic->evt_page = data;
238 if (!host)
239 synic_exit(synic, msr);
240 break;
241 case HV_X64_MSR_SIMP:
242 if ((data & HV_SYNIC_SIMP_ENABLE) && !host &&
243 !synic->dont_zero_synic_pages)
244 if (kvm_clear_guest(vcpu->kvm,
245 data & PAGE_MASK, PAGE_SIZE)) {
246 ret = 1;
247 break;
248 }
249 synic->msg_page = data;
250 if (!host)
251 synic_exit(synic, msr);
252 break;
253 case HV_X64_MSR_EOM: {
254 int i;
255
256 for (i = 0; i < ARRAY_SIZE(synic->sint); i++)
257 kvm_hv_notify_acked_sint(vcpu, i);
258 break;
259 }
260 case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
261 ret = synic_set_sint(synic, msr - HV_X64_MSR_SINT0, data, host);
262 break;
263 default:
264 ret = 1;
265 break;
266 }
267 return ret;
268}
269
270static bool kvm_hv_is_syndbg_enabled(struct kvm_vcpu *vcpu)
271{
272 struct kvm_cpuid_entry2 *entry;
273
274 entry = kvm_find_cpuid_entry(vcpu,
275 HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES,
276 0);
277 if (!entry)
278 return false;
279
280 return entry->eax & HV_X64_SYNDBG_CAP_ALLOW_KERNEL_DEBUGGING;
281}
282
283static int kvm_hv_syndbg_complete_userspace(struct kvm_vcpu *vcpu)
284{
285 struct kvm *kvm = vcpu->kvm;
286 struct kvm_hv *hv = &kvm->arch.hyperv;
287
288 if (vcpu->run->hyperv.u.syndbg.msr == HV_X64_MSR_SYNDBG_CONTROL)
289 hv->hv_syndbg.control.status =
290 vcpu->run->hyperv.u.syndbg.status;
291 return 1;
292}
293
294static void syndbg_exit(struct kvm_vcpu *vcpu, u32 msr)
295{
296 struct kvm_hv_syndbg *syndbg = vcpu_to_hv_syndbg(vcpu);
297 struct kvm_vcpu_hv *hv_vcpu = &vcpu->arch.hyperv;
298
299 hv_vcpu->exit.type = KVM_EXIT_HYPERV_SYNDBG;
300 hv_vcpu->exit.u.syndbg.msr = msr;
301 hv_vcpu->exit.u.syndbg.control = syndbg->control.control;
302 hv_vcpu->exit.u.syndbg.send_page = syndbg->control.send_page;
303 hv_vcpu->exit.u.syndbg.recv_page = syndbg->control.recv_page;
304 hv_vcpu->exit.u.syndbg.pending_page = syndbg->control.pending_page;
305 vcpu->arch.complete_userspace_io =
306 kvm_hv_syndbg_complete_userspace;
307
308 kvm_make_request(KVM_REQ_HV_EXIT, vcpu);
309}
310
311static int syndbg_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
312{
313 struct kvm_hv_syndbg *syndbg = vcpu_to_hv_syndbg(vcpu);
314
315 if (!kvm_hv_is_syndbg_enabled(vcpu) && !host)
316 return 1;
317
318 trace_kvm_hv_syndbg_set_msr(vcpu->vcpu_id,
319 vcpu_to_hv_vcpu(vcpu)->vp_index, msr, data);
320 switch (msr) {
321 case HV_X64_MSR_SYNDBG_CONTROL:
322 syndbg->control.control = data;
323 if (!host)
324 syndbg_exit(vcpu, msr);
325 break;
326 case HV_X64_MSR_SYNDBG_STATUS:
327 syndbg->control.status = data;
328 break;
329 case HV_X64_MSR_SYNDBG_SEND_BUFFER:
330 syndbg->control.send_page = data;
331 break;
332 case HV_X64_MSR_SYNDBG_RECV_BUFFER:
333 syndbg->control.recv_page = data;
334 break;
335 case HV_X64_MSR_SYNDBG_PENDING_BUFFER:
336 syndbg->control.pending_page = data;
337 if (!host)
338 syndbg_exit(vcpu, msr);
339 break;
340 case HV_X64_MSR_SYNDBG_OPTIONS:
341 syndbg->options = data;
342 break;
343 default:
344 break;
345 }
346
347 return 0;
348}
349
350static int syndbg_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host)
351{
352 struct kvm_hv_syndbg *syndbg = vcpu_to_hv_syndbg(vcpu);
353
354 if (!kvm_hv_is_syndbg_enabled(vcpu) && !host)
355 return 1;
356
357 switch (msr) {
358 case HV_X64_MSR_SYNDBG_CONTROL:
359 *pdata = syndbg->control.control;
360 break;
361 case HV_X64_MSR_SYNDBG_STATUS:
362 *pdata = syndbg->control.status;
363 break;
364 case HV_X64_MSR_SYNDBG_SEND_BUFFER:
365 *pdata = syndbg->control.send_page;
366 break;
367 case HV_X64_MSR_SYNDBG_RECV_BUFFER:
368 *pdata = syndbg->control.recv_page;
369 break;
370 case HV_X64_MSR_SYNDBG_PENDING_BUFFER:
371 *pdata = syndbg->control.pending_page;
372 break;
373 case HV_X64_MSR_SYNDBG_OPTIONS:
374 *pdata = syndbg->options;
375 break;
376 default:
377 break;
378 }
379
380 trace_kvm_hv_syndbg_get_msr(vcpu->vcpu_id,
381 vcpu_to_hv_vcpu(vcpu)->vp_index, msr,
382 *pdata);
383
384 return 0;
385}
386
387static int synic_get_msr(struct kvm_vcpu_hv_synic *synic, u32 msr, u64 *pdata,
388 bool host)
389{
390 int ret;
391
392 if (!synic->active && !host)
393 return 1;
394
395 ret = 0;
396 switch (msr) {
397 case HV_X64_MSR_SCONTROL:
398 *pdata = synic->control;
399 break;
400 case HV_X64_MSR_SVERSION:
401 *pdata = synic->version;
402 break;
403 case HV_X64_MSR_SIEFP:
404 *pdata = synic->evt_page;
405 break;
406 case HV_X64_MSR_SIMP:
407 *pdata = synic->msg_page;
408 break;
409 case HV_X64_MSR_EOM:
410 *pdata = 0;
411 break;
412 case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
413 *pdata = atomic64_read(&synic->sint[msr - HV_X64_MSR_SINT0]);
414 break;
415 default:
416 ret = 1;
417 break;
418 }
419 return ret;
420}
421
422static int synic_set_irq(struct kvm_vcpu_hv_synic *synic, u32 sint)
423{
424 struct kvm_vcpu *vcpu = synic_to_vcpu(synic);
425 struct kvm_lapic_irq irq;
426 int ret, vector;
427
428 if (sint >= ARRAY_SIZE(synic->sint))
429 return -EINVAL;
430
431 vector = synic_get_sint_vector(synic_read_sint(synic, sint));
432 if (vector < 0)
433 return -ENOENT;
434
435 memset(&irq, 0, sizeof(irq));
436 irq.shorthand = APIC_DEST_SELF;
437 irq.dest_mode = APIC_DEST_PHYSICAL;
438 irq.delivery_mode = APIC_DM_FIXED;
439 irq.vector = vector;
440 irq.level = 1;
441
442 ret = kvm_irq_delivery_to_apic(vcpu->kvm, vcpu->arch.apic, &irq, NULL);
443 trace_kvm_hv_synic_set_irq(vcpu->vcpu_id, sint, irq.vector, ret);
444 return ret;
445}
446
447int kvm_hv_synic_set_irq(struct kvm *kvm, u32 vpidx, u32 sint)
448{
449 struct kvm_vcpu_hv_synic *synic;
450
451 synic = synic_get(kvm, vpidx);
452 if (!synic)
453 return -EINVAL;
454
455 return synic_set_irq(synic, sint);
456}
457
458void kvm_hv_synic_send_eoi(struct kvm_vcpu *vcpu, int vector)
459{
460 struct kvm_vcpu_hv_synic *synic = vcpu_to_synic(vcpu);
461 int i;
462
463 trace_kvm_hv_synic_send_eoi(vcpu->vcpu_id, vector);
464
465 for (i = 0; i < ARRAY_SIZE(synic->sint); i++)
466 if (synic_get_sint_vector(synic_read_sint(synic, i)) == vector)
467 kvm_hv_notify_acked_sint(vcpu, i);
468}
469
470static int kvm_hv_set_sint_gsi(struct kvm *kvm, u32 vpidx, u32 sint, int gsi)
471{
472 struct kvm_vcpu_hv_synic *synic;
473
474 synic = synic_get(kvm, vpidx);
475 if (!synic)
476 return -EINVAL;
477
478 if (sint >= ARRAY_SIZE(synic->sint_to_gsi))
479 return -EINVAL;
480
481 atomic_set(&synic->sint_to_gsi[sint], gsi);
482 return 0;
483}
484
485void kvm_hv_irq_routing_update(struct kvm *kvm)
486{
487 struct kvm_irq_routing_table *irq_rt;
488 struct kvm_kernel_irq_routing_entry *e;
489 u32 gsi;
490
491 irq_rt = srcu_dereference_check(kvm->irq_routing, &kvm->irq_srcu,
492 lockdep_is_held(&kvm->irq_lock));
493
494 for (gsi = 0; gsi < irq_rt->nr_rt_entries; gsi++) {
495 hlist_for_each_entry(e, &irq_rt->map[gsi], link) {
496 if (e->type == KVM_IRQ_ROUTING_HV_SINT)
497 kvm_hv_set_sint_gsi(kvm, e->hv_sint.vcpu,
498 e->hv_sint.sint, gsi);
499 }
500 }
501}
502
503static void synic_init(struct kvm_vcpu_hv_synic *synic)
504{
505 int i;
506
507 memset(synic, 0, sizeof(*synic));
508 synic->version = HV_SYNIC_VERSION_1;
509 for (i = 0; i < ARRAY_SIZE(synic->sint); i++) {
510 atomic64_set(&synic->sint[i], HV_SYNIC_SINT_MASKED);
511 atomic_set(&synic->sint_to_gsi[i], -1);
512 }
513}
514
515static u64 get_time_ref_counter(struct kvm *kvm)
516{
517 struct kvm_hv *hv = &kvm->arch.hyperv;
518 struct kvm_vcpu *vcpu;
519 u64 tsc;
520
521 /*
522 * The guest has not set up the TSC page or the clock isn't
523 * stable, fall back to get_kvmclock_ns.
524 */
525 if (!hv->tsc_ref.tsc_sequence)
526 return div_u64(get_kvmclock_ns(kvm), 100);
527
528 vcpu = kvm_get_vcpu(kvm, 0);
529 tsc = kvm_read_l1_tsc(vcpu, rdtsc());
530 return mul_u64_u64_shr(tsc, hv->tsc_ref.tsc_scale, 64)
531 + hv->tsc_ref.tsc_offset;
532}
533
534static void stimer_mark_pending(struct kvm_vcpu_hv_stimer *stimer,
535 bool vcpu_kick)
536{
537 struct kvm_vcpu *vcpu = stimer_to_vcpu(stimer);
538
539 set_bit(stimer->index,
540 vcpu_to_hv_vcpu(vcpu)->stimer_pending_bitmap);
541 kvm_make_request(KVM_REQ_HV_STIMER, vcpu);
542 if (vcpu_kick)
543 kvm_vcpu_kick(vcpu);
544}
545
546static void stimer_cleanup(struct kvm_vcpu_hv_stimer *stimer)
547{
548 struct kvm_vcpu *vcpu = stimer_to_vcpu(stimer);
549
550 trace_kvm_hv_stimer_cleanup(stimer_to_vcpu(stimer)->vcpu_id,
551 stimer->index);
552
553 hrtimer_cancel(&stimer->timer);
554 clear_bit(stimer->index,
555 vcpu_to_hv_vcpu(vcpu)->stimer_pending_bitmap);
556 stimer->msg_pending = false;
557 stimer->exp_time = 0;
558}
559
560static enum hrtimer_restart stimer_timer_callback(struct hrtimer *timer)
561{
562 struct kvm_vcpu_hv_stimer *stimer;
563
564 stimer = container_of(timer, struct kvm_vcpu_hv_stimer, timer);
565 trace_kvm_hv_stimer_callback(stimer_to_vcpu(stimer)->vcpu_id,
566 stimer->index);
567 stimer_mark_pending(stimer, true);
568
569 return HRTIMER_NORESTART;
570}
571
572/*
573 * stimer_start() assumptions:
574 * a) stimer->count is not equal to 0
575 * b) stimer->config has HV_STIMER_ENABLE flag
576 */
577static int stimer_start(struct kvm_vcpu_hv_stimer *stimer)
578{
579 u64 time_now;
580 ktime_t ktime_now;
581
582 time_now = get_time_ref_counter(stimer_to_vcpu(stimer)->kvm);
583 ktime_now = ktime_get();
584
585 if (stimer->config.periodic) {
586 if (stimer->exp_time) {
587 if (time_now >= stimer->exp_time) {
588 u64 remainder;
589
590 div64_u64_rem(time_now - stimer->exp_time,
591 stimer->count, &remainder);
592 stimer->exp_time =
593 time_now + (stimer->count - remainder);
594 }
595 } else
596 stimer->exp_time = time_now + stimer->count;
597
598 trace_kvm_hv_stimer_start_periodic(
599 stimer_to_vcpu(stimer)->vcpu_id,
600 stimer->index,
601 time_now, stimer->exp_time);
602
603 hrtimer_start(&stimer->timer,
604 ktime_add_ns(ktime_now,
605 100 * (stimer->exp_time - time_now)),
606 HRTIMER_MODE_ABS);
607 return 0;
608 }
609 stimer->exp_time = stimer->count;
610 if (time_now >= stimer->count) {
611 /*
612 * Expire timer according to Hypervisor Top-Level Functional
613 * specification v4(15.3.1):
614 * "If a one shot is enabled and the specified count is in
615 * the past, it will expire immediately."
616 */
617 stimer_mark_pending(stimer, false);
618 return 0;
619 }
620
621 trace_kvm_hv_stimer_start_one_shot(stimer_to_vcpu(stimer)->vcpu_id,
622 stimer->index,
623 time_now, stimer->count);
624
625 hrtimer_start(&stimer->timer,
626 ktime_add_ns(ktime_now, 100 * (stimer->count - time_now)),
627 HRTIMER_MODE_ABS);
628 return 0;
629}
630
631static int stimer_set_config(struct kvm_vcpu_hv_stimer *stimer, u64 config,
632 bool host)
633{
634 union hv_stimer_config new_config = {.as_uint64 = config},
635 old_config = {.as_uint64 = stimer->config.as_uint64};
636
637 trace_kvm_hv_stimer_set_config(stimer_to_vcpu(stimer)->vcpu_id,
638 stimer->index, config, host);
639
640 stimer_cleanup(stimer);
641 if (old_config.enable &&
642 !new_config.direct_mode && new_config.sintx == 0)
643 new_config.enable = 0;
644 stimer->config.as_uint64 = new_config.as_uint64;
645
646 if (stimer->config.enable)
647 stimer_mark_pending(stimer, false);
648
649 return 0;
650}
651
652static int stimer_set_count(struct kvm_vcpu_hv_stimer *stimer, u64 count,
653 bool host)
654{
655 trace_kvm_hv_stimer_set_count(stimer_to_vcpu(stimer)->vcpu_id,
656 stimer->index, count, host);
657
658 stimer_cleanup(stimer);
659 stimer->count = count;
660 if (stimer->count == 0)
661 stimer->config.enable = 0;
662 else if (stimer->config.auto_enable)
663 stimer->config.enable = 1;
664
665 if (stimer->config.enable)
666 stimer_mark_pending(stimer, false);
667
668 return 0;
669}
670
671static int stimer_get_config(struct kvm_vcpu_hv_stimer *stimer, u64 *pconfig)
672{
673 *pconfig = stimer->config.as_uint64;
674 return 0;
675}
676
677static int stimer_get_count(struct kvm_vcpu_hv_stimer *stimer, u64 *pcount)
678{
679 *pcount = stimer->count;
680 return 0;
681}
682
683static int synic_deliver_msg(struct kvm_vcpu_hv_synic *synic, u32 sint,
684 struct hv_message *src_msg, bool no_retry)
685{
686 struct kvm_vcpu *vcpu = synic_to_vcpu(synic);
687 int msg_off = offsetof(struct hv_message_page, sint_message[sint]);
688 gfn_t msg_page_gfn;
689 struct hv_message_header hv_hdr;
690 int r;
691
692 if (!(synic->msg_page & HV_SYNIC_SIMP_ENABLE))
693 return -ENOENT;
694
695 msg_page_gfn = synic->msg_page >> PAGE_SHIFT;
696
697 /*
698 * Strictly following the spec-mandated ordering would assume setting
699 * .msg_pending before checking .message_type. However, this function
700 * is only called in vcpu context so the entire update is atomic from
701 * guest POV and thus the exact order here doesn't matter.
702 */
703 r = kvm_vcpu_read_guest_page(vcpu, msg_page_gfn, &hv_hdr.message_type,
704 msg_off + offsetof(struct hv_message,
705 header.message_type),
706 sizeof(hv_hdr.message_type));
707 if (r < 0)
708 return r;
709
710 if (hv_hdr.message_type != HVMSG_NONE) {
711 if (no_retry)
712 return 0;
713
714 hv_hdr.message_flags.msg_pending = 1;
715 r = kvm_vcpu_write_guest_page(vcpu, msg_page_gfn,
716 &hv_hdr.message_flags,
717 msg_off +
718 offsetof(struct hv_message,
719 header.message_flags),
720 sizeof(hv_hdr.message_flags));
721 if (r < 0)
722 return r;
723 return -EAGAIN;
724 }
725
726 r = kvm_vcpu_write_guest_page(vcpu, msg_page_gfn, src_msg, msg_off,
727 sizeof(src_msg->header) +
728 src_msg->header.payload_size);
729 if (r < 0)
730 return r;
731
732 r = synic_set_irq(synic, sint);
733 if (r < 0)
734 return r;
735 if (r == 0)
736 return -EFAULT;
737 return 0;
738}
739
740static int stimer_send_msg(struct kvm_vcpu_hv_stimer *stimer)
741{
742 struct kvm_vcpu *vcpu = stimer_to_vcpu(stimer);
743 struct hv_message *msg = &stimer->msg;
744 struct hv_timer_message_payload *payload =
745 (struct hv_timer_message_payload *)&msg->u.payload;
746
747 /*
748 * To avoid piling up periodic ticks, don't retry message
749 * delivery for them (within "lazy" lost ticks policy).
750 */
751 bool no_retry = stimer->config.periodic;
752
753 payload->expiration_time = stimer->exp_time;
754 payload->delivery_time = get_time_ref_counter(vcpu->kvm);
755 return synic_deliver_msg(vcpu_to_synic(vcpu),
756 stimer->config.sintx, msg,
757 no_retry);
758}
759
760static int stimer_notify_direct(struct kvm_vcpu_hv_stimer *stimer)
761{
762 struct kvm_vcpu *vcpu = stimer_to_vcpu(stimer);
763 struct kvm_lapic_irq irq = {
764 .delivery_mode = APIC_DM_FIXED,
765 .vector = stimer->config.apic_vector
766 };
767
768 if (lapic_in_kernel(vcpu))
769 return !kvm_apic_set_irq(vcpu, &irq, NULL);
770 return 0;
771}
772
773static void stimer_expiration(struct kvm_vcpu_hv_stimer *stimer)
774{
775 int r, direct = stimer->config.direct_mode;
776
777 stimer->msg_pending = true;
778 if (!direct)
779 r = stimer_send_msg(stimer);
780 else
781 r = stimer_notify_direct(stimer);
782 trace_kvm_hv_stimer_expiration(stimer_to_vcpu(stimer)->vcpu_id,
783 stimer->index, direct, r);
784 if (!r) {
785 stimer->msg_pending = false;
786 if (!(stimer->config.periodic))
787 stimer->config.enable = 0;
788 }
789}
790
791void kvm_hv_process_stimers(struct kvm_vcpu *vcpu)
792{
793 struct kvm_vcpu_hv *hv_vcpu = vcpu_to_hv_vcpu(vcpu);
794 struct kvm_vcpu_hv_stimer *stimer;
795 u64 time_now, exp_time;
796 int i;
797
798 for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++)
799 if (test_and_clear_bit(i, hv_vcpu->stimer_pending_bitmap)) {
800 stimer = &hv_vcpu->stimer[i];
801 if (stimer->config.enable) {
802 exp_time = stimer->exp_time;
803
804 if (exp_time) {
805 time_now =
806 get_time_ref_counter(vcpu->kvm);
807 if (time_now >= exp_time)
808 stimer_expiration(stimer);
809 }
810
811 if ((stimer->config.enable) &&
812 stimer->count) {
813 if (!stimer->msg_pending)
814 stimer_start(stimer);
815 } else
816 stimer_cleanup(stimer);
817 }
818 }
819}
820
821void kvm_hv_vcpu_uninit(struct kvm_vcpu *vcpu)
822{
823 struct kvm_vcpu_hv *hv_vcpu = vcpu_to_hv_vcpu(vcpu);
824 int i;
825
826 for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++)
827 stimer_cleanup(&hv_vcpu->stimer[i]);
828}
829
830bool kvm_hv_assist_page_enabled(struct kvm_vcpu *vcpu)
831{
832 if (!(vcpu->arch.hyperv.hv_vapic & HV_X64_MSR_VP_ASSIST_PAGE_ENABLE))
833 return false;
834 return vcpu->arch.pv_eoi.msr_val & KVM_MSR_ENABLED;
835}
836EXPORT_SYMBOL_GPL(kvm_hv_assist_page_enabled);
837
838bool kvm_hv_get_assist_page(struct kvm_vcpu *vcpu,
839 struct hv_vp_assist_page *assist_page)
840{
841 if (!kvm_hv_assist_page_enabled(vcpu))
842 return false;
843 return !kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data,
844 assist_page, sizeof(*assist_page));
845}
846EXPORT_SYMBOL_GPL(kvm_hv_get_assist_page);
847
848static void stimer_prepare_msg(struct kvm_vcpu_hv_stimer *stimer)
849{
850 struct hv_message *msg = &stimer->msg;
851 struct hv_timer_message_payload *payload =
852 (struct hv_timer_message_payload *)&msg->u.payload;
853
854 memset(&msg->header, 0, sizeof(msg->header));
855 msg->header.message_type = HVMSG_TIMER_EXPIRED;
856 msg->header.payload_size = sizeof(*payload);
857
858 payload->timer_index = stimer->index;
859 payload->expiration_time = 0;
860 payload->delivery_time = 0;
861}
862
863static void stimer_init(struct kvm_vcpu_hv_stimer *stimer, int timer_index)
864{
865 memset(stimer, 0, sizeof(*stimer));
866 stimer->index = timer_index;
867 hrtimer_init(&stimer->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
868 stimer->timer.function = stimer_timer_callback;
869 stimer_prepare_msg(stimer);
870}
871
872void kvm_hv_vcpu_init(struct kvm_vcpu *vcpu)
873{
874 struct kvm_vcpu_hv *hv_vcpu = vcpu_to_hv_vcpu(vcpu);
875 int i;
876
877 synic_init(&hv_vcpu->synic);
878
879 bitmap_zero(hv_vcpu->stimer_pending_bitmap, HV_SYNIC_STIMER_COUNT);
880 for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++)
881 stimer_init(&hv_vcpu->stimer[i], i);
882}
883
884void kvm_hv_vcpu_postcreate(struct kvm_vcpu *vcpu)
885{
886 struct kvm_vcpu_hv *hv_vcpu = vcpu_to_hv_vcpu(vcpu);
887
888 hv_vcpu->vp_index = kvm_vcpu_get_idx(vcpu);
889}
890
891int kvm_hv_activate_synic(struct kvm_vcpu *vcpu, bool dont_zero_synic_pages)
892{
893 struct kvm_vcpu_hv_synic *synic = vcpu_to_synic(vcpu);
894
895 /*
896 * Hyper-V SynIC auto EOI SINT's are
897 * not compatible with APICV, so request
898 * to deactivate APICV permanently.
899 */
900 kvm_request_apicv_update(vcpu->kvm, false, APICV_INHIBIT_REASON_HYPERV);
901 synic->active = true;
902 synic->dont_zero_synic_pages = dont_zero_synic_pages;
903 synic->control = HV_SYNIC_CONTROL_ENABLE;
904 return 0;
905}
906
907static bool kvm_hv_msr_partition_wide(u32 msr)
908{
909 bool r = false;
910
911 switch (msr) {
912 case HV_X64_MSR_GUEST_OS_ID:
913 case HV_X64_MSR_HYPERCALL:
914 case HV_X64_MSR_REFERENCE_TSC:
915 case HV_X64_MSR_TIME_REF_COUNT:
916 case HV_X64_MSR_CRASH_CTL:
917 case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
918 case HV_X64_MSR_RESET:
919 case HV_X64_MSR_REENLIGHTENMENT_CONTROL:
920 case HV_X64_MSR_TSC_EMULATION_CONTROL:
921 case HV_X64_MSR_TSC_EMULATION_STATUS:
922 case HV_X64_MSR_SYNDBG_OPTIONS:
923 case HV_X64_MSR_SYNDBG_CONTROL ... HV_X64_MSR_SYNDBG_PENDING_BUFFER:
924 r = true;
925 break;
926 }
927
928 return r;
929}
930
931static int kvm_hv_msr_get_crash_data(struct kvm_vcpu *vcpu,
932 u32 index, u64 *pdata)
933{
934 struct kvm_hv *hv = &vcpu->kvm->arch.hyperv;
935 size_t size = ARRAY_SIZE(hv->hv_crash_param);
936
937 if (WARN_ON_ONCE(index >= size))
938 return -EINVAL;
939
940 *pdata = hv->hv_crash_param[array_index_nospec(index, size)];
941 return 0;
942}
943
944static int kvm_hv_msr_get_crash_ctl(struct kvm_vcpu *vcpu, u64 *pdata)
945{
946 struct kvm_hv *hv = &vcpu->kvm->arch.hyperv;
947
948 *pdata = hv->hv_crash_ctl;
949 return 0;
950}
951
952static int kvm_hv_msr_set_crash_ctl(struct kvm_vcpu *vcpu, u64 data, bool host)
953{
954 struct kvm_hv *hv = &vcpu->kvm->arch.hyperv;
955
956 if (host)
957 hv->hv_crash_ctl = data & HV_CRASH_CTL_CRASH_NOTIFY;
958
959 if (!host && (data & HV_CRASH_CTL_CRASH_NOTIFY)) {
960
961 vcpu_debug(vcpu, "hv crash (0x%llx 0x%llx 0x%llx 0x%llx 0x%llx)\n",
962 hv->hv_crash_param[0],
963 hv->hv_crash_param[1],
964 hv->hv_crash_param[2],
965 hv->hv_crash_param[3],
966 hv->hv_crash_param[4]);
967
968 /* Send notification about crash to user space */
969 kvm_make_request(KVM_REQ_HV_CRASH, vcpu);
970 }
971
972 return 0;
973}
974
975static int kvm_hv_msr_set_crash_data(struct kvm_vcpu *vcpu,
976 u32 index, u64 data)
977{
978 struct kvm_hv *hv = &vcpu->kvm->arch.hyperv;
979 size_t size = ARRAY_SIZE(hv->hv_crash_param);
980
981 if (WARN_ON_ONCE(index >= size))
982 return -EINVAL;
983
984 hv->hv_crash_param[array_index_nospec(index, size)] = data;
985 return 0;
986}
987
988/*
989 * The kvmclock and Hyper-V TSC page use similar formulas, and converting
990 * between them is possible:
991 *
992 * kvmclock formula:
993 * nsec = (ticks - tsc_timestamp) * tsc_to_system_mul * 2^(tsc_shift-32)
994 * + system_time
995 *
996 * Hyper-V formula:
997 * nsec/100 = ticks * scale / 2^64 + offset
998 *
999 * When tsc_timestamp = system_time = 0, offset is zero in the Hyper-V formula.
1000 * By dividing the kvmclock formula by 100 and equating what's left we get:
1001 * ticks * scale / 2^64 = ticks * tsc_to_system_mul * 2^(tsc_shift-32) / 100
1002 * scale / 2^64 = tsc_to_system_mul * 2^(tsc_shift-32) / 100
1003 * scale = tsc_to_system_mul * 2^(32+tsc_shift) / 100
1004 *
1005 * Now expand the kvmclock formula and divide by 100:
1006 * nsec = ticks * tsc_to_system_mul * 2^(tsc_shift-32)
1007 * - tsc_timestamp * tsc_to_system_mul * 2^(tsc_shift-32)
1008 * + system_time
1009 * nsec/100 = ticks * tsc_to_system_mul * 2^(tsc_shift-32) / 100
1010 * - tsc_timestamp * tsc_to_system_mul * 2^(tsc_shift-32) / 100
1011 * + system_time / 100
1012 *
1013 * Replace tsc_to_system_mul * 2^(tsc_shift-32) / 100 by scale / 2^64:
1014 * nsec/100 = ticks * scale / 2^64
1015 * - tsc_timestamp * scale / 2^64
1016 * + system_time / 100
1017 *
1018 * Equate with the Hyper-V formula so that ticks * scale / 2^64 cancels out:
1019 * offset = system_time / 100 - tsc_timestamp * scale / 2^64
1020 *
1021 * These two equivalencies are implemented in this function.
1022 */
1023static bool compute_tsc_page_parameters(struct pvclock_vcpu_time_info *hv_clock,
1024 struct ms_hyperv_tsc_page *tsc_ref)
1025{
1026 u64 max_mul;
1027
1028 if (!(hv_clock->flags & PVCLOCK_TSC_STABLE_BIT))
1029 return false;
1030
1031 /*
1032 * check if scale would overflow, if so we use the time ref counter
1033 * tsc_to_system_mul * 2^(tsc_shift+32) / 100 >= 2^64
1034 * tsc_to_system_mul / 100 >= 2^(32-tsc_shift)
1035 * tsc_to_system_mul >= 100 * 2^(32-tsc_shift)
1036 */
1037 max_mul = 100ull << (32 - hv_clock->tsc_shift);
1038 if (hv_clock->tsc_to_system_mul >= max_mul)
1039 return false;
1040
1041 /*
1042 * Otherwise compute the scale and offset according to the formulas
1043 * derived above.
1044 */
1045 tsc_ref->tsc_scale =
1046 mul_u64_u32_div(1ULL << (32 + hv_clock->tsc_shift),
1047 hv_clock->tsc_to_system_mul,
1048 100);
1049
1050 tsc_ref->tsc_offset = hv_clock->system_time;
1051 do_div(tsc_ref->tsc_offset, 100);
1052 tsc_ref->tsc_offset -=
1053 mul_u64_u64_shr(hv_clock->tsc_timestamp, tsc_ref->tsc_scale, 64);
1054 return true;
1055}
1056
1057void kvm_hv_setup_tsc_page(struct kvm *kvm,
1058 struct pvclock_vcpu_time_info *hv_clock)
1059{
1060 struct kvm_hv *hv = &kvm->arch.hyperv;
1061 u32 tsc_seq;
1062 u64 gfn;
1063
1064 BUILD_BUG_ON(sizeof(tsc_seq) != sizeof(hv->tsc_ref.tsc_sequence));
1065 BUILD_BUG_ON(offsetof(struct ms_hyperv_tsc_page, tsc_sequence) != 0);
1066
1067 if (!(hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE))
1068 return;
1069
1070 mutex_lock(&kvm->arch.hyperv.hv_lock);
1071 if (!(hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE))
1072 goto out_unlock;
1073
1074 gfn = hv->hv_tsc_page >> HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT;
1075 /*
1076 * Because the TSC parameters only vary when there is a
1077 * change in the master clock, do not bother with caching.
1078 */
1079 if (unlikely(kvm_read_guest(kvm, gfn_to_gpa(gfn),
1080 &tsc_seq, sizeof(tsc_seq))))
1081 goto out_unlock;
1082
1083 /*
1084 * While we're computing and writing the parameters, force the
1085 * guest to use the time reference count MSR.
1086 */
1087 hv->tsc_ref.tsc_sequence = 0;
1088 if (kvm_write_guest(kvm, gfn_to_gpa(gfn),
1089 &hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence)))
1090 goto out_unlock;
1091
1092 if (!compute_tsc_page_parameters(hv_clock, &hv->tsc_ref))
1093 goto out_unlock;
1094
1095 /* Ensure sequence is zero before writing the rest of the struct. */
1096 smp_wmb();
1097 if (kvm_write_guest(kvm, gfn_to_gpa(gfn), &hv->tsc_ref, sizeof(hv->tsc_ref)))
1098 goto out_unlock;
1099
1100 /*
1101 * Now switch to the TSC page mechanism by writing the sequence.
1102 */
1103 tsc_seq++;
1104 if (tsc_seq == 0xFFFFFFFF || tsc_seq == 0)
1105 tsc_seq = 1;
1106
1107 /* Write the struct entirely before the non-zero sequence. */
1108 smp_wmb();
1109
1110 hv->tsc_ref.tsc_sequence = tsc_seq;
1111 kvm_write_guest(kvm, gfn_to_gpa(gfn),
1112 &hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence));
1113out_unlock:
1114 mutex_unlock(&kvm->arch.hyperv.hv_lock);
1115}
1116
1117static int kvm_hv_set_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data,
1118 bool host)
1119{
1120 struct kvm *kvm = vcpu->kvm;
1121 struct kvm_hv *hv = &kvm->arch.hyperv;
1122
1123 switch (msr) {
1124 case HV_X64_MSR_GUEST_OS_ID:
1125 hv->hv_guest_os_id = data;
1126 /* setting guest os id to zero disables hypercall page */
1127 if (!hv->hv_guest_os_id)
1128 hv->hv_hypercall &= ~HV_X64_MSR_HYPERCALL_ENABLE;
1129 break;
1130 case HV_X64_MSR_HYPERCALL: {
1131 u64 gfn;
1132 unsigned long addr;
1133 u8 instructions[4];
1134
1135 /* if guest os id is not set hypercall should remain disabled */
1136 if (!hv->hv_guest_os_id)
1137 break;
1138 if (!(data & HV_X64_MSR_HYPERCALL_ENABLE)) {
1139 hv->hv_hypercall = data;
1140 break;
1141 }
1142 gfn = data >> HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT;
1143 addr = gfn_to_hva(kvm, gfn);
1144 if (kvm_is_error_hva(addr))
1145 return 1;
1146 kvm_x86_ops.patch_hypercall(vcpu, instructions);
1147 ((unsigned char *)instructions)[3] = 0xc3; /* ret */
1148 if (__copy_to_user((void __user *)addr, instructions, 4))
1149 return 1;
1150 hv->hv_hypercall = data;
1151 mark_page_dirty(kvm, gfn);
1152 break;
1153 }
1154 case HV_X64_MSR_REFERENCE_TSC:
1155 hv->hv_tsc_page = data;
1156 if (hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE)
1157 kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu);
1158 break;
1159 case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
1160 return kvm_hv_msr_set_crash_data(vcpu,
1161 msr - HV_X64_MSR_CRASH_P0,
1162 data);
1163 case HV_X64_MSR_CRASH_CTL:
1164 return kvm_hv_msr_set_crash_ctl(vcpu, data, host);
1165 case HV_X64_MSR_RESET:
1166 if (data == 1) {
1167 vcpu_debug(vcpu, "hyper-v reset requested\n");
1168 kvm_make_request(KVM_REQ_HV_RESET, vcpu);
1169 }
1170 break;
1171 case HV_X64_MSR_REENLIGHTENMENT_CONTROL:
1172 hv->hv_reenlightenment_control = data;
1173 break;
1174 case HV_X64_MSR_TSC_EMULATION_CONTROL:
1175 hv->hv_tsc_emulation_control = data;
1176 break;
1177 case HV_X64_MSR_TSC_EMULATION_STATUS:
1178 hv->hv_tsc_emulation_status = data;
1179 break;
1180 case HV_X64_MSR_TIME_REF_COUNT:
1181 /* read-only, but still ignore it if host-initiated */
1182 if (!host)
1183 return 1;
1184 break;
1185 case HV_X64_MSR_SYNDBG_OPTIONS:
1186 case HV_X64_MSR_SYNDBG_CONTROL ... HV_X64_MSR_SYNDBG_PENDING_BUFFER:
1187 return syndbg_set_msr(vcpu, msr, data, host);
1188 default:
1189 vcpu_unimpl(vcpu, "Hyper-V unhandled wrmsr: 0x%x data 0x%llx\n",
1190 msr, data);
1191 return 1;
1192 }
1193 return 0;
1194}
1195
1196/* Calculate cpu time spent by current task in 100ns units */
1197static u64 current_task_runtime_100ns(void)
1198{
1199 u64 utime, stime;
1200
1201 task_cputime_adjusted(current, &utime, &stime);
1202
1203 return div_u64(utime + stime, 100);
1204}
1205
1206static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
1207{
1208 struct kvm_vcpu_hv *hv_vcpu = &vcpu->arch.hyperv;
1209
1210 switch (msr) {
1211 case HV_X64_MSR_VP_INDEX: {
1212 struct kvm_hv *hv = &vcpu->kvm->arch.hyperv;
1213 int vcpu_idx = kvm_vcpu_get_idx(vcpu);
1214 u32 new_vp_index = (u32)data;
1215
1216 if (!host || new_vp_index >= KVM_MAX_VCPUS)
1217 return 1;
1218
1219 if (new_vp_index == hv_vcpu->vp_index)
1220 return 0;
1221
1222 /*
1223 * The VP index is initialized to vcpu_index by
1224 * kvm_hv_vcpu_postcreate so they initially match. Now the
1225 * VP index is changing, adjust num_mismatched_vp_indexes if
1226 * it now matches or no longer matches vcpu_idx.
1227 */
1228 if (hv_vcpu->vp_index == vcpu_idx)
1229 atomic_inc(&hv->num_mismatched_vp_indexes);
1230 else if (new_vp_index == vcpu_idx)
1231 atomic_dec(&hv->num_mismatched_vp_indexes);
1232
1233 hv_vcpu->vp_index = new_vp_index;
1234 break;
1235 }
1236 case HV_X64_MSR_VP_ASSIST_PAGE: {
1237 u64 gfn;
1238 unsigned long addr;
1239
1240 if (!(data & HV_X64_MSR_VP_ASSIST_PAGE_ENABLE)) {
1241 hv_vcpu->hv_vapic = data;
1242 if (kvm_lapic_enable_pv_eoi(vcpu, 0, 0))
1243 return 1;
1244 break;
1245 }
1246 gfn = data >> HV_X64_MSR_VP_ASSIST_PAGE_ADDRESS_SHIFT;
1247 addr = kvm_vcpu_gfn_to_hva(vcpu, gfn);
1248 if (kvm_is_error_hva(addr))
1249 return 1;
1250
1251 /*
1252 * Clear apic_assist portion of struct hv_vp_assist_page
1253 * only, there can be valuable data in the rest which needs
1254 * to be preserved e.g. on migration.
1255 */
1256 if (__put_user(0, (u32 __user *)addr))
1257 return 1;
1258 hv_vcpu->hv_vapic = data;
1259 kvm_vcpu_mark_page_dirty(vcpu, gfn);
1260 if (kvm_lapic_enable_pv_eoi(vcpu,
1261 gfn_to_gpa(gfn) | KVM_MSR_ENABLED,
1262 sizeof(struct hv_vp_assist_page)))
1263 return 1;
1264 break;
1265 }
1266 case HV_X64_MSR_EOI:
1267 return kvm_hv_vapic_msr_write(vcpu, APIC_EOI, data);
1268 case HV_X64_MSR_ICR:
1269 return kvm_hv_vapic_msr_write(vcpu, APIC_ICR, data);
1270 case HV_X64_MSR_TPR:
1271 return kvm_hv_vapic_msr_write(vcpu, APIC_TASKPRI, data);
1272 case HV_X64_MSR_VP_RUNTIME:
1273 if (!host)
1274 return 1;
1275 hv_vcpu->runtime_offset = data - current_task_runtime_100ns();
1276 break;
1277 case HV_X64_MSR_SCONTROL:
1278 case HV_X64_MSR_SVERSION:
1279 case HV_X64_MSR_SIEFP:
1280 case HV_X64_MSR_SIMP:
1281 case HV_X64_MSR_EOM:
1282 case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
1283 return synic_set_msr(vcpu_to_synic(vcpu), msr, data, host);
1284 case HV_X64_MSR_STIMER0_CONFIG:
1285 case HV_X64_MSR_STIMER1_CONFIG:
1286 case HV_X64_MSR_STIMER2_CONFIG:
1287 case HV_X64_MSR_STIMER3_CONFIG: {
1288 int timer_index = (msr - HV_X64_MSR_STIMER0_CONFIG)/2;
1289
1290 return stimer_set_config(vcpu_to_stimer(vcpu, timer_index),
1291 data, host);
1292 }
1293 case HV_X64_MSR_STIMER0_COUNT:
1294 case HV_X64_MSR_STIMER1_COUNT:
1295 case HV_X64_MSR_STIMER2_COUNT:
1296 case HV_X64_MSR_STIMER3_COUNT: {
1297 int timer_index = (msr - HV_X64_MSR_STIMER0_COUNT)/2;
1298
1299 return stimer_set_count(vcpu_to_stimer(vcpu, timer_index),
1300 data, host);
1301 }
1302 case HV_X64_MSR_TSC_FREQUENCY:
1303 case HV_X64_MSR_APIC_FREQUENCY:
1304 /* read-only, but still ignore it if host-initiated */
1305 if (!host)
1306 return 1;
1307 break;
1308 default:
1309 vcpu_unimpl(vcpu, "Hyper-V unhandled wrmsr: 0x%x data 0x%llx\n",
1310 msr, data);
1311 return 1;
1312 }
1313
1314 return 0;
1315}
1316
1317static int kvm_hv_get_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata,
1318 bool host)
1319{
1320 u64 data = 0;
1321 struct kvm *kvm = vcpu->kvm;
1322 struct kvm_hv *hv = &kvm->arch.hyperv;
1323
1324 switch (msr) {
1325 case HV_X64_MSR_GUEST_OS_ID:
1326 data = hv->hv_guest_os_id;
1327 break;
1328 case HV_X64_MSR_HYPERCALL:
1329 data = hv->hv_hypercall;
1330 break;
1331 case HV_X64_MSR_TIME_REF_COUNT:
1332 data = get_time_ref_counter(kvm);
1333 break;
1334 case HV_X64_MSR_REFERENCE_TSC:
1335 data = hv->hv_tsc_page;
1336 break;
1337 case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
1338 return kvm_hv_msr_get_crash_data(vcpu,
1339 msr - HV_X64_MSR_CRASH_P0,
1340 pdata);
1341 case HV_X64_MSR_CRASH_CTL:
1342 return kvm_hv_msr_get_crash_ctl(vcpu, pdata);
1343 case HV_X64_MSR_RESET:
1344 data = 0;
1345 break;
1346 case HV_X64_MSR_REENLIGHTENMENT_CONTROL:
1347 data = hv->hv_reenlightenment_control;
1348 break;
1349 case HV_X64_MSR_TSC_EMULATION_CONTROL:
1350 data = hv->hv_tsc_emulation_control;
1351 break;
1352 case HV_X64_MSR_TSC_EMULATION_STATUS:
1353 data = hv->hv_tsc_emulation_status;
1354 break;
1355 case HV_X64_MSR_SYNDBG_OPTIONS:
1356 case HV_X64_MSR_SYNDBG_CONTROL ... HV_X64_MSR_SYNDBG_PENDING_BUFFER:
1357 return syndbg_get_msr(vcpu, msr, pdata, host);
1358 default:
1359 vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
1360 return 1;
1361 }
1362
1363 *pdata = data;
1364 return 0;
1365}
1366
1367static int kvm_hv_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata,
1368 bool host)
1369{
1370 u64 data = 0;
1371 struct kvm_vcpu_hv *hv_vcpu = &vcpu->arch.hyperv;
1372
1373 switch (msr) {
1374 case HV_X64_MSR_VP_INDEX:
1375 data = hv_vcpu->vp_index;
1376 break;
1377 case HV_X64_MSR_EOI:
1378 return kvm_hv_vapic_msr_read(vcpu, APIC_EOI, pdata);
1379 case HV_X64_MSR_ICR:
1380 return kvm_hv_vapic_msr_read(vcpu, APIC_ICR, pdata);
1381 case HV_X64_MSR_TPR:
1382 return kvm_hv_vapic_msr_read(vcpu, APIC_TASKPRI, pdata);
1383 case HV_X64_MSR_VP_ASSIST_PAGE:
1384 data = hv_vcpu->hv_vapic;
1385 break;
1386 case HV_X64_MSR_VP_RUNTIME:
1387 data = current_task_runtime_100ns() + hv_vcpu->runtime_offset;
1388 break;
1389 case HV_X64_MSR_SCONTROL:
1390 case HV_X64_MSR_SVERSION:
1391 case HV_X64_MSR_SIEFP:
1392 case HV_X64_MSR_SIMP:
1393 case HV_X64_MSR_EOM:
1394 case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
1395 return synic_get_msr(vcpu_to_synic(vcpu), msr, pdata, host);
1396 case HV_X64_MSR_STIMER0_CONFIG:
1397 case HV_X64_MSR_STIMER1_CONFIG:
1398 case HV_X64_MSR_STIMER2_CONFIG:
1399 case HV_X64_MSR_STIMER3_CONFIG: {
1400 int timer_index = (msr - HV_X64_MSR_STIMER0_CONFIG)/2;
1401
1402 return stimer_get_config(vcpu_to_stimer(vcpu, timer_index),
1403 pdata);
1404 }
1405 case HV_X64_MSR_STIMER0_COUNT:
1406 case HV_X64_MSR_STIMER1_COUNT:
1407 case HV_X64_MSR_STIMER2_COUNT:
1408 case HV_X64_MSR_STIMER3_COUNT: {
1409 int timer_index = (msr - HV_X64_MSR_STIMER0_COUNT)/2;
1410
1411 return stimer_get_count(vcpu_to_stimer(vcpu, timer_index),
1412 pdata);
1413 }
1414 case HV_X64_MSR_TSC_FREQUENCY:
1415 data = (u64)vcpu->arch.virtual_tsc_khz * 1000;
1416 break;
1417 case HV_X64_MSR_APIC_FREQUENCY:
1418 data = APIC_BUS_FREQUENCY;
1419 break;
1420 default:
1421 vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
1422 return 1;
1423 }
1424 *pdata = data;
1425 return 0;
1426}
1427
1428int kvm_hv_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
1429{
1430 if (kvm_hv_msr_partition_wide(msr)) {
1431 int r;
1432
1433 mutex_lock(&vcpu->kvm->arch.hyperv.hv_lock);
1434 r = kvm_hv_set_msr_pw(vcpu, msr, data, host);
1435 mutex_unlock(&vcpu->kvm->arch.hyperv.hv_lock);
1436 return r;
1437 } else
1438 return kvm_hv_set_msr(vcpu, msr, data, host);
1439}
1440
1441int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host)
1442{
1443 if (kvm_hv_msr_partition_wide(msr)) {
1444 int r;
1445
1446 mutex_lock(&vcpu->kvm->arch.hyperv.hv_lock);
1447 r = kvm_hv_get_msr_pw(vcpu, msr, pdata, host);
1448 mutex_unlock(&vcpu->kvm->arch.hyperv.hv_lock);
1449 return r;
1450 } else
1451 return kvm_hv_get_msr(vcpu, msr, pdata, host);
1452}
1453
1454static __always_inline unsigned long *sparse_set_to_vcpu_mask(
1455 struct kvm *kvm, u64 *sparse_banks, u64 valid_bank_mask,
1456 u64 *vp_bitmap, unsigned long *vcpu_bitmap)
1457{
1458 struct kvm_hv *hv = &kvm->arch.hyperv;
1459 struct kvm_vcpu *vcpu;
1460 int i, bank, sbank = 0;
1461
1462 memset(vp_bitmap, 0,
1463 KVM_HV_MAX_SPARSE_VCPU_SET_BITS * sizeof(*vp_bitmap));
1464 for_each_set_bit(bank, (unsigned long *)&valid_bank_mask,
1465 KVM_HV_MAX_SPARSE_VCPU_SET_BITS)
1466 vp_bitmap[bank] = sparse_banks[sbank++];
1467
1468 if (likely(!atomic_read(&hv->num_mismatched_vp_indexes))) {
1469 /* for all vcpus vp_index == vcpu_idx */
1470 return (unsigned long *)vp_bitmap;
1471 }
1472
1473 bitmap_zero(vcpu_bitmap, KVM_MAX_VCPUS);
1474 kvm_for_each_vcpu(i, vcpu, kvm) {
1475 if (test_bit(vcpu_to_hv_vcpu(vcpu)->vp_index,
1476 (unsigned long *)vp_bitmap))
1477 __set_bit(i, vcpu_bitmap);
1478 }
1479 return vcpu_bitmap;
1480}
1481
1482static u64 kvm_hv_flush_tlb(struct kvm_vcpu *current_vcpu, u64 ingpa,
1483 u16 rep_cnt, bool ex)
1484{
1485 struct kvm *kvm = current_vcpu->kvm;
1486 struct kvm_vcpu_hv *hv_vcpu = ¤t_vcpu->arch.hyperv;
1487 struct hv_tlb_flush_ex flush_ex;
1488 struct hv_tlb_flush flush;
1489 u64 vp_bitmap[KVM_HV_MAX_SPARSE_VCPU_SET_BITS];
1490 DECLARE_BITMAP(vcpu_bitmap, KVM_MAX_VCPUS);
1491 unsigned long *vcpu_mask;
1492 u64 valid_bank_mask;
1493 u64 sparse_banks[64];
1494 int sparse_banks_len;
1495 bool all_cpus;
1496
1497 if (!ex) {
1498 if (unlikely(kvm_read_guest(kvm, ingpa, &flush, sizeof(flush))))
1499 return HV_STATUS_INVALID_HYPERCALL_INPUT;
1500
1501 trace_kvm_hv_flush_tlb(flush.processor_mask,
1502 flush.address_space, flush.flags);
1503
1504 valid_bank_mask = BIT_ULL(0);
1505 sparse_banks[0] = flush.processor_mask;
1506
1507 /*
1508 * Work around possible WS2012 bug: it sends hypercalls
1509 * with processor_mask = 0x0 and HV_FLUSH_ALL_PROCESSORS clear,
1510 * while also expecting us to flush something and crashing if
1511 * we don't. Let's treat processor_mask == 0 same as
1512 * HV_FLUSH_ALL_PROCESSORS.
1513 */
1514 all_cpus = (flush.flags & HV_FLUSH_ALL_PROCESSORS) ||
1515 flush.processor_mask == 0;
1516 } else {
1517 if (unlikely(kvm_read_guest(kvm, ingpa, &flush_ex,
1518 sizeof(flush_ex))))
1519 return HV_STATUS_INVALID_HYPERCALL_INPUT;
1520
1521 trace_kvm_hv_flush_tlb_ex(flush_ex.hv_vp_set.valid_bank_mask,
1522 flush_ex.hv_vp_set.format,
1523 flush_ex.address_space,
1524 flush_ex.flags);
1525
1526 valid_bank_mask = flush_ex.hv_vp_set.valid_bank_mask;
1527 all_cpus = flush_ex.hv_vp_set.format !=
1528 HV_GENERIC_SET_SPARSE_4K;
1529
1530 sparse_banks_len =
1531 bitmap_weight((unsigned long *)&valid_bank_mask, 64) *
1532 sizeof(sparse_banks[0]);
1533
1534 if (!sparse_banks_len && !all_cpus)
1535 goto ret_success;
1536
1537 if (!all_cpus &&
1538 kvm_read_guest(kvm,
1539 ingpa + offsetof(struct hv_tlb_flush_ex,
1540 hv_vp_set.bank_contents),
1541 sparse_banks,
1542 sparse_banks_len))
1543 return HV_STATUS_INVALID_HYPERCALL_INPUT;
1544 }
1545
1546 cpumask_clear(&hv_vcpu->tlb_flush);
1547
1548 vcpu_mask = all_cpus ? NULL :
1549 sparse_set_to_vcpu_mask(kvm, sparse_banks, valid_bank_mask,
1550 vp_bitmap, vcpu_bitmap);
1551
1552 /*
1553 * vcpu->arch.cr3 may not be up-to-date for running vCPUs so we can't
1554 * analyze it here, flush TLB regardless of the specified address space.
1555 */
1556 kvm_make_vcpus_request_mask(kvm, KVM_REQ_HV_TLB_FLUSH,
1557 NULL, vcpu_mask, &hv_vcpu->tlb_flush);
1558
1559ret_success:
1560 /* We always do full TLB flush, set rep_done = rep_cnt. */
1561 return (u64)HV_STATUS_SUCCESS |
1562 ((u64)rep_cnt << HV_HYPERCALL_REP_COMP_OFFSET);
1563}
1564
1565static void kvm_send_ipi_to_many(struct kvm *kvm, u32 vector,
1566 unsigned long *vcpu_bitmap)
1567{
1568 struct kvm_lapic_irq irq = {
1569 .delivery_mode = APIC_DM_FIXED,
1570 .vector = vector
1571 };
1572 struct kvm_vcpu *vcpu;
1573 int i;
1574
1575 kvm_for_each_vcpu(i, vcpu, kvm) {
1576 if (vcpu_bitmap && !test_bit(i, vcpu_bitmap))
1577 continue;
1578
1579 /* We fail only when APIC is disabled */
1580 kvm_apic_set_irq(vcpu, &irq, NULL);
1581 }
1582}
1583
1584static u64 kvm_hv_send_ipi(struct kvm_vcpu *current_vcpu, u64 ingpa, u64 outgpa,
1585 bool ex, bool fast)
1586{
1587 struct kvm *kvm = current_vcpu->kvm;
1588 struct hv_send_ipi_ex send_ipi_ex;
1589 struct hv_send_ipi send_ipi;
1590 u64 vp_bitmap[KVM_HV_MAX_SPARSE_VCPU_SET_BITS];
1591 DECLARE_BITMAP(vcpu_bitmap, KVM_MAX_VCPUS);
1592 unsigned long *vcpu_mask;
1593 unsigned long valid_bank_mask;
1594 u64 sparse_banks[64];
1595 int sparse_banks_len;
1596 u32 vector;
1597 bool all_cpus;
1598
1599 if (!ex) {
1600 if (!fast) {
1601 if (unlikely(kvm_read_guest(kvm, ingpa, &send_ipi,
1602 sizeof(send_ipi))))
1603 return HV_STATUS_INVALID_HYPERCALL_INPUT;
1604 sparse_banks[0] = send_ipi.cpu_mask;
1605 vector = send_ipi.vector;
1606 } else {
1607 /* 'reserved' part of hv_send_ipi should be 0 */
1608 if (unlikely(ingpa >> 32 != 0))
1609 return HV_STATUS_INVALID_HYPERCALL_INPUT;
1610 sparse_banks[0] = outgpa;
1611 vector = (u32)ingpa;
1612 }
1613 all_cpus = false;
1614 valid_bank_mask = BIT_ULL(0);
1615
1616 trace_kvm_hv_send_ipi(vector, sparse_banks[0]);
1617 } else {
1618 if (unlikely(kvm_read_guest(kvm, ingpa, &send_ipi_ex,
1619 sizeof(send_ipi_ex))))
1620 return HV_STATUS_INVALID_HYPERCALL_INPUT;
1621
1622 trace_kvm_hv_send_ipi_ex(send_ipi_ex.vector,
1623 send_ipi_ex.vp_set.format,
1624 send_ipi_ex.vp_set.valid_bank_mask);
1625
1626 vector = send_ipi_ex.vector;
1627 valid_bank_mask = send_ipi_ex.vp_set.valid_bank_mask;
1628 sparse_banks_len = bitmap_weight(&valid_bank_mask, 64) *
1629 sizeof(sparse_banks[0]);
1630
1631 all_cpus = send_ipi_ex.vp_set.format == HV_GENERIC_SET_ALL;
1632
1633 if (!sparse_banks_len)
1634 goto ret_success;
1635
1636 if (!all_cpus &&
1637 kvm_read_guest(kvm,
1638 ingpa + offsetof(struct hv_send_ipi_ex,
1639 vp_set.bank_contents),
1640 sparse_banks,
1641 sparse_banks_len))
1642 return HV_STATUS_INVALID_HYPERCALL_INPUT;
1643 }
1644
1645 if ((vector < HV_IPI_LOW_VECTOR) || (vector > HV_IPI_HIGH_VECTOR))
1646 return HV_STATUS_INVALID_HYPERCALL_INPUT;
1647
1648 vcpu_mask = all_cpus ? NULL :
1649 sparse_set_to_vcpu_mask(kvm, sparse_banks, valid_bank_mask,
1650 vp_bitmap, vcpu_bitmap);
1651
1652 kvm_send_ipi_to_many(kvm, vector, vcpu_mask);
1653
1654ret_success:
1655 return HV_STATUS_SUCCESS;
1656}
1657
1658bool kvm_hv_hypercall_enabled(struct kvm *kvm)
1659{
1660 return READ_ONCE(kvm->arch.hyperv.hv_guest_os_id) != 0;
1661}
1662
1663static void kvm_hv_hypercall_set_result(struct kvm_vcpu *vcpu, u64 result)
1664{
1665 bool longmode;
1666
1667 longmode = is_64_bit_mode(vcpu);
1668 if (longmode)
1669 kvm_rax_write(vcpu, result);
1670 else {
1671 kvm_rdx_write(vcpu, result >> 32);
1672 kvm_rax_write(vcpu, result & 0xffffffff);
1673 }
1674}
1675
1676static int kvm_hv_hypercall_complete(struct kvm_vcpu *vcpu, u64 result)
1677{
1678 kvm_hv_hypercall_set_result(vcpu, result);
1679 ++vcpu->stat.hypercalls;
1680 return kvm_skip_emulated_instruction(vcpu);
1681}
1682
1683static int kvm_hv_hypercall_complete_userspace(struct kvm_vcpu *vcpu)
1684{
1685 return kvm_hv_hypercall_complete(vcpu, vcpu->run->hyperv.u.hcall.result);
1686}
1687
1688static u16 kvm_hvcall_signal_event(struct kvm_vcpu *vcpu, bool fast, u64 param)
1689{
1690 struct eventfd_ctx *eventfd;
1691
1692 if (unlikely(!fast)) {
1693 int ret;
1694 gpa_t gpa = param;
1695
1696 if ((gpa & (__alignof__(param) - 1)) ||
1697 offset_in_page(gpa) + sizeof(param) > PAGE_SIZE)
1698 return HV_STATUS_INVALID_ALIGNMENT;
1699
1700 ret = kvm_vcpu_read_guest(vcpu, gpa, ¶m, sizeof(param));
1701 if (ret < 0)
1702 return HV_STATUS_INVALID_ALIGNMENT;
1703 }
1704
1705 /*
1706 * Per spec, bits 32-47 contain the extra "flag number". However, we
1707 * have no use for it, and in all known usecases it is zero, so just
1708 * report lookup failure if it isn't.
1709 */
1710 if (param & 0xffff00000000ULL)
1711 return HV_STATUS_INVALID_PORT_ID;
1712 /* remaining bits are reserved-zero */
1713 if (param & ~KVM_HYPERV_CONN_ID_MASK)
1714 return HV_STATUS_INVALID_HYPERCALL_INPUT;
1715
1716 /* the eventfd is protected by vcpu->kvm->srcu, but conn_to_evt isn't */
1717 rcu_read_lock();
1718 eventfd = idr_find(&vcpu->kvm->arch.hyperv.conn_to_evt, param);
1719 rcu_read_unlock();
1720 if (!eventfd)
1721 return HV_STATUS_INVALID_PORT_ID;
1722
1723 eventfd_signal(eventfd, 1);
1724 return HV_STATUS_SUCCESS;
1725}
1726
1727int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
1728{
1729 u64 param, ingpa, outgpa, ret = HV_STATUS_SUCCESS;
1730 uint16_t code, rep_idx, rep_cnt;
1731 bool fast, rep;
1732
1733 /*
1734 * hypercall generates UD from non zero cpl and real mode
1735 * per HYPER-V spec
1736 */
1737 if (kvm_x86_ops.get_cpl(vcpu) != 0 || !is_protmode(vcpu)) {
1738 kvm_queue_exception(vcpu, UD_VECTOR);
1739 return 1;
1740 }
1741
1742#ifdef CONFIG_X86_64
1743 if (is_64_bit_mode(vcpu)) {
1744 param = kvm_rcx_read(vcpu);
1745 ingpa = kvm_rdx_read(vcpu);
1746 outgpa = kvm_r8_read(vcpu);
1747 } else
1748#endif
1749 {
1750 param = ((u64)kvm_rdx_read(vcpu) << 32) |
1751 (kvm_rax_read(vcpu) & 0xffffffff);
1752 ingpa = ((u64)kvm_rbx_read(vcpu) << 32) |
1753 (kvm_rcx_read(vcpu) & 0xffffffff);
1754 outgpa = ((u64)kvm_rdi_read(vcpu) << 32) |
1755 (kvm_rsi_read(vcpu) & 0xffffffff);
1756 }
1757
1758 code = param & 0xffff;
1759 fast = !!(param & HV_HYPERCALL_FAST_BIT);
1760 rep_cnt = (param >> HV_HYPERCALL_REP_COMP_OFFSET) & 0xfff;
1761 rep_idx = (param >> HV_HYPERCALL_REP_START_OFFSET) & 0xfff;
1762 rep = !!(rep_cnt || rep_idx);
1763
1764 trace_kvm_hv_hypercall(code, fast, rep_cnt, rep_idx, ingpa, outgpa);
1765
1766 switch (code) {
1767 case HVCALL_NOTIFY_LONG_SPIN_WAIT:
1768 if (unlikely(rep)) {
1769 ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
1770 break;
1771 }
1772 kvm_vcpu_on_spin(vcpu, true);
1773 break;
1774 case HVCALL_SIGNAL_EVENT:
1775 if (unlikely(rep)) {
1776 ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
1777 break;
1778 }
1779 ret = kvm_hvcall_signal_event(vcpu, fast, ingpa);
1780 if (ret != HV_STATUS_INVALID_PORT_ID)
1781 break;
1782 fallthrough; /* maybe userspace knows this conn_id */
1783 case HVCALL_POST_MESSAGE:
1784 /* don't bother userspace if it has no way to handle it */
1785 if (unlikely(rep || !vcpu_to_synic(vcpu)->active)) {
1786 ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
1787 break;
1788 }
1789 vcpu->run->exit_reason = KVM_EXIT_HYPERV;
1790 vcpu->run->hyperv.type = KVM_EXIT_HYPERV_HCALL;
1791 vcpu->run->hyperv.u.hcall.input = param;
1792 vcpu->run->hyperv.u.hcall.params[0] = ingpa;
1793 vcpu->run->hyperv.u.hcall.params[1] = outgpa;
1794 vcpu->arch.complete_userspace_io =
1795 kvm_hv_hypercall_complete_userspace;
1796 return 0;
1797 case HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST:
1798 if (unlikely(fast || !rep_cnt || rep_idx)) {
1799 ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
1800 break;
1801 }
1802 ret = kvm_hv_flush_tlb(vcpu, ingpa, rep_cnt, false);
1803 break;
1804 case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE:
1805 if (unlikely(fast || rep)) {
1806 ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
1807 break;
1808 }
1809 ret = kvm_hv_flush_tlb(vcpu, ingpa, rep_cnt, false);
1810 break;
1811 case HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX:
1812 if (unlikely(fast || !rep_cnt || rep_idx)) {
1813 ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
1814 break;
1815 }
1816 ret = kvm_hv_flush_tlb(vcpu, ingpa, rep_cnt, true);
1817 break;
1818 case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX:
1819 if (unlikely(fast || rep)) {
1820 ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
1821 break;
1822 }
1823 ret = kvm_hv_flush_tlb(vcpu, ingpa, rep_cnt, true);
1824 break;
1825 case HVCALL_SEND_IPI:
1826 if (unlikely(rep)) {
1827 ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
1828 break;
1829 }
1830 ret = kvm_hv_send_ipi(vcpu, ingpa, outgpa, false, fast);
1831 break;
1832 case HVCALL_SEND_IPI_EX:
1833 if (unlikely(fast || rep)) {
1834 ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
1835 break;
1836 }
1837 ret = kvm_hv_send_ipi(vcpu, ingpa, outgpa, true, false);
1838 break;
1839 case HVCALL_POST_DEBUG_DATA:
1840 case HVCALL_RETRIEVE_DEBUG_DATA:
1841 if (unlikely(fast)) {
1842 ret = HV_STATUS_INVALID_PARAMETER;
1843 break;
1844 }
1845 fallthrough;
1846 case HVCALL_RESET_DEBUG_SESSION: {
1847 struct kvm_hv_syndbg *syndbg = vcpu_to_hv_syndbg(vcpu);
1848
1849 if (!kvm_hv_is_syndbg_enabled(vcpu)) {
1850 ret = HV_STATUS_INVALID_HYPERCALL_CODE;
1851 break;
1852 }
1853
1854 if (!(syndbg->options & HV_X64_SYNDBG_OPTION_USE_HCALLS)) {
1855 ret = HV_STATUS_OPERATION_DENIED;
1856 break;
1857 }
1858 vcpu->run->exit_reason = KVM_EXIT_HYPERV;
1859 vcpu->run->hyperv.type = KVM_EXIT_HYPERV_HCALL;
1860 vcpu->run->hyperv.u.hcall.input = param;
1861 vcpu->run->hyperv.u.hcall.params[0] = ingpa;
1862 vcpu->run->hyperv.u.hcall.params[1] = outgpa;
1863 vcpu->arch.complete_userspace_io =
1864 kvm_hv_hypercall_complete_userspace;
1865 return 0;
1866 }
1867 default:
1868 ret = HV_STATUS_INVALID_HYPERCALL_CODE;
1869 break;
1870 }
1871
1872 return kvm_hv_hypercall_complete(vcpu, ret);
1873}
1874
1875void kvm_hv_init_vm(struct kvm *kvm)
1876{
1877 mutex_init(&kvm->arch.hyperv.hv_lock);
1878 idr_init(&kvm->arch.hyperv.conn_to_evt);
1879}
1880
1881void kvm_hv_destroy_vm(struct kvm *kvm)
1882{
1883 struct eventfd_ctx *eventfd;
1884 int i;
1885
1886 idr_for_each_entry(&kvm->arch.hyperv.conn_to_evt, eventfd, i)
1887 eventfd_ctx_put(eventfd);
1888 idr_destroy(&kvm->arch.hyperv.conn_to_evt);
1889}
1890
1891static int kvm_hv_eventfd_assign(struct kvm *kvm, u32 conn_id, int fd)
1892{
1893 struct kvm_hv *hv = &kvm->arch.hyperv;
1894 struct eventfd_ctx *eventfd;
1895 int ret;
1896
1897 eventfd = eventfd_ctx_fdget(fd);
1898 if (IS_ERR(eventfd))
1899 return PTR_ERR(eventfd);
1900
1901 mutex_lock(&hv->hv_lock);
1902 ret = idr_alloc(&hv->conn_to_evt, eventfd, conn_id, conn_id + 1,
1903 GFP_KERNEL_ACCOUNT);
1904 mutex_unlock(&hv->hv_lock);
1905
1906 if (ret >= 0)
1907 return 0;
1908
1909 if (ret == -ENOSPC)
1910 ret = -EEXIST;
1911 eventfd_ctx_put(eventfd);
1912 return ret;
1913}
1914
1915static int kvm_hv_eventfd_deassign(struct kvm *kvm, u32 conn_id)
1916{
1917 struct kvm_hv *hv = &kvm->arch.hyperv;
1918 struct eventfd_ctx *eventfd;
1919
1920 mutex_lock(&hv->hv_lock);
1921 eventfd = idr_remove(&hv->conn_to_evt, conn_id);
1922 mutex_unlock(&hv->hv_lock);
1923
1924 if (!eventfd)
1925 return -ENOENT;
1926
1927 synchronize_srcu(&kvm->srcu);
1928 eventfd_ctx_put(eventfd);
1929 return 0;
1930}
1931
1932int kvm_vm_ioctl_hv_eventfd(struct kvm *kvm, struct kvm_hyperv_eventfd *args)
1933{
1934 if ((args->flags & ~KVM_HYPERV_EVENTFD_DEASSIGN) ||
1935 (args->conn_id & ~KVM_HYPERV_CONN_ID_MASK))
1936 return -EINVAL;
1937
1938 if (args->flags == KVM_HYPERV_EVENTFD_DEASSIGN)
1939 return kvm_hv_eventfd_deassign(kvm, args->conn_id);
1940 return kvm_hv_eventfd_assign(kvm, args->conn_id, args->fd);
1941}
1942
1943int kvm_vcpu_ioctl_get_hv_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid,
1944 struct kvm_cpuid_entry2 __user *entries)
1945{
1946 uint16_t evmcs_ver = 0;
1947 struct kvm_cpuid_entry2 cpuid_entries[] = {
1948 { .function = HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS },
1949 { .function = HYPERV_CPUID_INTERFACE },
1950 { .function = HYPERV_CPUID_VERSION },
1951 { .function = HYPERV_CPUID_FEATURES },
1952 { .function = HYPERV_CPUID_ENLIGHTMENT_INFO },
1953 { .function = HYPERV_CPUID_IMPLEMENT_LIMITS },
1954 { .function = HYPERV_CPUID_SYNDBG_VENDOR_AND_MAX_FUNCTIONS },
1955 { .function = HYPERV_CPUID_SYNDBG_INTERFACE },
1956 { .function = HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES },
1957 { .function = HYPERV_CPUID_NESTED_FEATURES },
1958 };
1959 int i, nent = ARRAY_SIZE(cpuid_entries);
1960
1961 if (kvm_x86_ops.nested_ops->get_evmcs_version)
1962 evmcs_ver = kvm_x86_ops.nested_ops->get_evmcs_version(vcpu);
1963
1964 /* Skip NESTED_FEATURES if eVMCS is not supported */
1965 if (!evmcs_ver)
1966 --nent;
1967
1968 if (cpuid->nent < nent)
1969 return -E2BIG;
1970
1971 if (cpuid->nent > nent)
1972 cpuid->nent = nent;
1973
1974 for (i = 0; i < nent; i++) {
1975 struct kvm_cpuid_entry2 *ent = &cpuid_entries[i];
1976 u32 signature[3];
1977
1978 switch (ent->function) {
1979 case HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS:
1980 memcpy(signature, "Linux KVM Hv", 12);
1981
1982 ent->eax = HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES;
1983 ent->ebx = signature[0];
1984 ent->ecx = signature[1];
1985 ent->edx = signature[2];
1986 break;
1987
1988 case HYPERV_CPUID_INTERFACE:
1989 memcpy(signature, "Hv#1\0\0\0\0\0\0\0\0", 12);
1990 ent->eax = signature[0];
1991 break;
1992
1993 case HYPERV_CPUID_VERSION:
1994 /*
1995 * We implement some Hyper-V 2016 functions so let's use
1996 * this version.
1997 */
1998 ent->eax = 0x00003839;
1999 ent->ebx = 0x000A0000;
2000 break;
2001
2002 case HYPERV_CPUID_FEATURES:
2003 ent->eax |= HV_X64_MSR_VP_RUNTIME_AVAILABLE;
2004 ent->eax |= HV_MSR_TIME_REF_COUNT_AVAILABLE;
2005 ent->eax |= HV_X64_MSR_SYNIC_AVAILABLE;
2006 ent->eax |= HV_MSR_SYNTIMER_AVAILABLE;
2007 ent->eax |= HV_X64_MSR_APIC_ACCESS_AVAILABLE;
2008 ent->eax |= HV_X64_MSR_HYPERCALL_AVAILABLE;
2009 ent->eax |= HV_X64_MSR_VP_INDEX_AVAILABLE;
2010 ent->eax |= HV_X64_MSR_RESET_AVAILABLE;
2011 ent->eax |= HV_MSR_REFERENCE_TSC_AVAILABLE;
2012 ent->eax |= HV_X64_ACCESS_FREQUENCY_MSRS;
2013 ent->eax |= HV_X64_ACCESS_REENLIGHTENMENT;
2014
2015 ent->ebx |= HV_X64_POST_MESSAGES;
2016 ent->ebx |= HV_X64_SIGNAL_EVENTS;
2017
2018 ent->edx |= HV_FEATURE_FREQUENCY_MSRS_AVAILABLE;
2019 ent->edx |= HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE;
2020
2021 ent->ebx |= HV_DEBUGGING;
2022 ent->edx |= HV_X64_GUEST_DEBUGGING_AVAILABLE;
2023 ent->edx |= HV_FEATURE_DEBUG_MSRS_AVAILABLE;
2024
2025 /*
2026 * Direct Synthetic timers only make sense with in-kernel
2027 * LAPIC
2028 */
2029 if (lapic_in_kernel(vcpu))
2030 ent->edx |= HV_STIMER_DIRECT_MODE_AVAILABLE;
2031
2032 break;
2033
2034 case HYPERV_CPUID_ENLIGHTMENT_INFO:
2035 ent->eax |= HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED;
2036 ent->eax |= HV_X64_APIC_ACCESS_RECOMMENDED;
2037 ent->eax |= HV_X64_RELAXED_TIMING_RECOMMENDED;
2038 ent->eax |= HV_X64_CLUSTER_IPI_RECOMMENDED;
2039 ent->eax |= HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED;
2040 if (evmcs_ver)
2041 ent->eax |= HV_X64_ENLIGHTENED_VMCS_RECOMMENDED;
2042 if (!cpu_smt_possible())
2043 ent->eax |= HV_X64_NO_NONARCH_CORESHARING;
2044 /*
2045 * Default number of spinlock retry attempts, matches
2046 * HyperV 2016.
2047 */
2048 ent->ebx = 0x00000FFF;
2049
2050 break;
2051
2052 case HYPERV_CPUID_IMPLEMENT_LIMITS:
2053 /* Maximum number of virtual processors */
2054 ent->eax = KVM_MAX_VCPUS;
2055 /*
2056 * Maximum number of logical processors, matches
2057 * HyperV 2016.
2058 */
2059 ent->ebx = 64;
2060
2061 break;
2062
2063 case HYPERV_CPUID_NESTED_FEATURES:
2064 ent->eax = evmcs_ver;
2065
2066 break;
2067
2068 case HYPERV_CPUID_SYNDBG_VENDOR_AND_MAX_FUNCTIONS:
2069 memcpy(signature, "Linux KVM Hv", 12);
2070
2071 ent->eax = 0;
2072 ent->ebx = signature[0];
2073 ent->ecx = signature[1];
2074 ent->edx = signature[2];
2075 break;
2076
2077 case HYPERV_CPUID_SYNDBG_INTERFACE:
2078 memcpy(signature, "VS#1\0\0\0\0\0\0\0\0", 12);
2079 ent->eax = signature[0];
2080 break;
2081
2082 case HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES:
2083 ent->eax |= HV_X64_SYNDBG_CAP_ALLOW_KERNEL_DEBUGGING;
2084 break;
2085
2086 default:
2087 break;
2088 }
2089 }
2090
2091 if (copy_to_user(entries, cpuid_entries,
2092 nent * sizeof(struct kvm_cpuid_entry2)))
2093 return -EFAULT;
2094
2095 return 0;
2096}
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * KVM Microsoft Hyper-V emulation
4 *
5 * derived from arch/x86/kvm/x86.c
6 *
7 * Copyright (C) 2006 Qumranet, Inc.
8 * Copyright (C) 2008 Qumranet, Inc.
9 * Copyright IBM Corporation, 2008
10 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
11 * Copyright (C) 2015 Andrey Smetanin <asmetanin@virtuozzo.com>
12 *
13 * Authors:
14 * Avi Kivity <avi@qumranet.com>
15 * Yaniv Kamay <yaniv@qumranet.com>
16 * Amit Shah <amit.shah@qumranet.com>
17 * Ben-Ami Yassour <benami@il.ibm.com>
18 * Andrey Smetanin <asmetanin@virtuozzo.com>
19 */
20
21#include "x86.h"
22#include "lapic.h"
23#include "ioapic.h"
24#include "hyperv.h"
25
26#include <linux/cpu.h>
27#include <linux/kvm_host.h>
28#include <linux/highmem.h>
29#include <linux/sched/cputime.h>
30#include <linux/eventfd.h>
31
32#include <asm/apicdef.h>
33#include <trace/events/kvm.h>
34
35#include "trace.h"
36
37#define KVM_HV_MAX_SPARSE_VCPU_SET_BITS DIV_ROUND_UP(KVM_MAX_VCPUS, 64)
38
39static void stimer_mark_pending(struct kvm_vcpu_hv_stimer *stimer,
40 bool vcpu_kick);
41
42static inline u64 synic_read_sint(struct kvm_vcpu_hv_synic *synic, int sint)
43{
44 return atomic64_read(&synic->sint[sint]);
45}
46
47static inline int synic_get_sint_vector(u64 sint_value)
48{
49 if (sint_value & HV_SYNIC_SINT_MASKED)
50 return -1;
51 return sint_value & HV_SYNIC_SINT_VECTOR_MASK;
52}
53
54static bool synic_has_vector_connected(struct kvm_vcpu_hv_synic *synic,
55 int vector)
56{
57 int i;
58
59 for (i = 0; i < ARRAY_SIZE(synic->sint); i++) {
60 if (synic_get_sint_vector(synic_read_sint(synic, i)) == vector)
61 return true;
62 }
63 return false;
64}
65
66static bool synic_has_vector_auto_eoi(struct kvm_vcpu_hv_synic *synic,
67 int vector)
68{
69 int i;
70 u64 sint_value;
71
72 for (i = 0; i < ARRAY_SIZE(synic->sint); i++) {
73 sint_value = synic_read_sint(synic, i);
74 if (synic_get_sint_vector(sint_value) == vector &&
75 sint_value & HV_SYNIC_SINT_AUTO_EOI)
76 return true;
77 }
78 return false;
79}
80
81static void synic_update_vector(struct kvm_vcpu_hv_synic *synic,
82 int vector)
83{
84 if (vector < HV_SYNIC_FIRST_VALID_VECTOR)
85 return;
86
87 if (synic_has_vector_connected(synic, vector))
88 __set_bit(vector, synic->vec_bitmap);
89 else
90 __clear_bit(vector, synic->vec_bitmap);
91
92 if (synic_has_vector_auto_eoi(synic, vector))
93 __set_bit(vector, synic->auto_eoi_bitmap);
94 else
95 __clear_bit(vector, synic->auto_eoi_bitmap);
96}
97
98static int synic_set_sint(struct kvm_vcpu_hv_synic *synic, int sint,
99 u64 data, bool host)
100{
101 int vector, old_vector;
102 bool masked;
103
104 vector = data & HV_SYNIC_SINT_VECTOR_MASK;
105 masked = data & HV_SYNIC_SINT_MASKED;
106
107 /*
108 * Valid vectors are 16-255, however, nested Hyper-V attempts to write
109 * default '0x10000' value on boot and this should not #GP. We need to
110 * allow zero-initing the register from host as well.
111 */
112 if (vector < HV_SYNIC_FIRST_VALID_VECTOR && !host && !masked)
113 return 1;
114 /*
115 * Guest may configure multiple SINTs to use the same vector, so
116 * we maintain a bitmap of vectors handled by synic, and a
117 * bitmap of vectors with auto-eoi behavior. The bitmaps are
118 * updated here, and atomically queried on fast paths.
119 */
120 old_vector = synic_read_sint(synic, sint) & HV_SYNIC_SINT_VECTOR_MASK;
121
122 atomic64_set(&synic->sint[sint], data);
123
124 synic_update_vector(synic, old_vector);
125
126 synic_update_vector(synic, vector);
127
128 /* Load SynIC vectors into EOI exit bitmap */
129 kvm_make_request(KVM_REQ_SCAN_IOAPIC, synic_to_vcpu(synic));
130 return 0;
131}
132
133static struct kvm_vcpu *get_vcpu_by_vpidx(struct kvm *kvm, u32 vpidx)
134{
135 struct kvm_vcpu *vcpu = NULL;
136 int i;
137
138 if (vpidx >= KVM_MAX_VCPUS)
139 return NULL;
140
141 vcpu = kvm_get_vcpu(kvm, vpidx);
142 if (vcpu && vcpu_to_hv_vcpu(vcpu)->vp_index == vpidx)
143 return vcpu;
144 kvm_for_each_vcpu(i, vcpu, kvm)
145 if (vcpu_to_hv_vcpu(vcpu)->vp_index == vpidx)
146 return vcpu;
147 return NULL;
148}
149
150static struct kvm_vcpu_hv_synic *synic_get(struct kvm *kvm, u32 vpidx)
151{
152 struct kvm_vcpu *vcpu;
153 struct kvm_vcpu_hv_synic *synic;
154
155 vcpu = get_vcpu_by_vpidx(kvm, vpidx);
156 if (!vcpu)
157 return NULL;
158 synic = vcpu_to_synic(vcpu);
159 return (synic->active) ? synic : NULL;
160}
161
162static void kvm_hv_notify_acked_sint(struct kvm_vcpu *vcpu, u32 sint)
163{
164 struct kvm *kvm = vcpu->kvm;
165 struct kvm_vcpu_hv_synic *synic = vcpu_to_synic(vcpu);
166 struct kvm_vcpu_hv *hv_vcpu = vcpu_to_hv_vcpu(vcpu);
167 struct kvm_vcpu_hv_stimer *stimer;
168 int gsi, idx;
169
170 trace_kvm_hv_notify_acked_sint(vcpu->vcpu_id, sint);
171
172 /* Try to deliver pending Hyper-V SynIC timers messages */
173 for (idx = 0; idx < ARRAY_SIZE(hv_vcpu->stimer); idx++) {
174 stimer = &hv_vcpu->stimer[idx];
175 if (stimer->msg_pending && stimer->config.enable &&
176 !stimer->config.direct_mode &&
177 stimer->config.sintx == sint)
178 stimer_mark_pending(stimer, false);
179 }
180
181 idx = srcu_read_lock(&kvm->irq_srcu);
182 gsi = atomic_read(&synic->sint_to_gsi[sint]);
183 if (gsi != -1)
184 kvm_notify_acked_gsi(kvm, gsi);
185 srcu_read_unlock(&kvm->irq_srcu, idx);
186}
187
188static void synic_exit(struct kvm_vcpu_hv_synic *synic, u32 msr)
189{
190 struct kvm_vcpu *vcpu = synic_to_vcpu(synic);
191 struct kvm_vcpu_hv *hv_vcpu = &vcpu->arch.hyperv;
192
193 hv_vcpu->exit.type = KVM_EXIT_HYPERV_SYNIC;
194 hv_vcpu->exit.u.synic.msr = msr;
195 hv_vcpu->exit.u.synic.control = synic->control;
196 hv_vcpu->exit.u.synic.evt_page = synic->evt_page;
197 hv_vcpu->exit.u.synic.msg_page = synic->msg_page;
198
199 kvm_make_request(KVM_REQ_HV_EXIT, vcpu);
200}
201
202static int synic_set_msr(struct kvm_vcpu_hv_synic *synic,
203 u32 msr, u64 data, bool host)
204{
205 struct kvm_vcpu *vcpu = synic_to_vcpu(synic);
206 int ret;
207
208 if (!synic->active && !host)
209 return 1;
210
211 trace_kvm_hv_synic_set_msr(vcpu->vcpu_id, msr, data, host);
212
213 ret = 0;
214 switch (msr) {
215 case HV_X64_MSR_SCONTROL:
216 synic->control = data;
217 if (!host)
218 synic_exit(synic, msr);
219 break;
220 case HV_X64_MSR_SVERSION:
221 if (!host) {
222 ret = 1;
223 break;
224 }
225 synic->version = data;
226 break;
227 case HV_X64_MSR_SIEFP:
228 if ((data & HV_SYNIC_SIEFP_ENABLE) && !host &&
229 !synic->dont_zero_synic_pages)
230 if (kvm_clear_guest(vcpu->kvm,
231 data & PAGE_MASK, PAGE_SIZE)) {
232 ret = 1;
233 break;
234 }
235 synic->evt_page = data;
236 if (!host)
237 synic_exit(synic, msr);
238 break;
239 case HV_X64_MSR_SIMP:
240 if ((data & HV_SYNIC_SIMP_ENABLE) && !host &&
241 !synic->dont_zero_synic_pages)
242 if (kvm_clear_guest(vcpu->kvm,
243 data & PAGE_MASK, PAGE_SIZE)) {
244 ret = 1;
245 break;
246 }
247 synic->msg_page = data;
248 if (!host)
249 synic_exit(synic, msr);
250 break;
251 case HV_X64_MSR_EOM: {
252 int i;
253
254 for (i = 0; i < ARRAY_SIZE(synic->sint); i++)
255 kvm_hv_notify_acked_sint(vcpu, i);
256 break;
257 }
258 case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
259 ret = synic_set_sint(synic, msr - HV_X64_MSR_SINT0, data, host);
260 break;
261 default:
262 ret = 1;
263 break;
264 }
265 return ret;
266}
267
268static int synic_get_msr(struct kvm_vcpu_hv_synic *synic, u32 msr, u64 *pdata,
269 bool host)
270{
271 int ret;
272
273 if (!synic->active && !host)
274 return 1;
275
276 ret = 0;
277 switch (msr) {
278 case HV_X64_MSR_SCONTROL:
279 *pdata = synic->control;
280 break;
281 case HV_X64_MSR_SVERSION:
282 *pdata = synic->version;
283 break;
284 case HV_X64_MSR_SIEFP:
285 *pdata = synic->evt_page;
286 break;
287 case HV_X64_MSR_SIMP:
288 *pdata = synic->msg_page;
289 break;
290 case HV_X64_MSR_EOM:
291 *pdata = 0;
292 break;
293 case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
294 *pdata = atomic64_read(&synic->sint[msr - HV_X64_MSR_SINT0]);
295 break;
296 default:
297 ret = 1;
298 break;
299 }
300 return ret;
301}
302
303static int synic_set_irq(struct kvm_vcpu_hv_synic *synic, u32 sint)
304{
305 struct kvm_vcpu *vcpu = synic_to_vcpu(synic);
306 struct kvm_lapic_irq irq;
307 int ret, vector;
308
309 if (sint >= ARRAY_SIZE(synic->sint))
310 return -EINVAL;
311
312 vector = synic_get_sint_vector(synic_read_sint(synic, sint));
313 if (vector < 0)
314 return -ENOENT;
315
316 memset(&irq, 0, sizeof(irq));
317 irq.shorthand = APIC_DEST_SELF;
318 irq.dest_mode = APIC_DEST_PHYSICAL;
319 irq.delivery_mode = APIC_DM_FIXED;
320 irq.vector = vector;
321 irq.level = 1;
322
323 ret = kvm_irq_delivery_to_apic(vcpu->kvm, vcpu->arch.apic, &irq, NULL);
324 trace_kvm_hv_synic_set_irq(vcpu->vcpu_id, sint, irq.vector, ret);
325 return ret;
326}
327
328int kvm_hv_synic_set_irq(struct kvm *kvm, u32 vpidx, u32 sint)
329{
330 struct kvm_vcpu_hv_synic *synic;
331
332 synic = synic_get(kvm, vpidx);
333 if (!synic)
334 return -EINVAL;
335
336 return synic_set_irq(synic, sint);
337}
338
339void kvm_hv_synic_send_eoi(struct kvm_vcpu *vcpu, int vector)
340{
341 struct kvm_vcpu_hv_synic *synic = vcpu_to_synic(vcpu);
342 int i;
343
344 trace_kvm_hv_synic_send_eoi(vcpu->vcpu_id, vector);
345
346 for (i = 0; i < ARRAY_SIZE(synic->sint); i++)
347 if (synic_get_sint_vector(synic_read_sint(synic, i)) == vector)
348 kvm_hv_notify_acked_sint(vcpu, i);
349}
350
351static int kvm_hv_set_sint_gsi(struct kvm *kvm, u32 vpidx, u32 sint, int gsi)
352{
353 struct kvm_vcpu_hv_synic *synic;
354
355 synic = synic_get(kvm, vpidx);
356 if (!synic)
357 return -EINVAL;
358
359 if (sint >= ARRAY_SIZE(synic->sint_to_gsi))
360 return -EINVAL;
361
362 atomic_set(&synic->sint_to_gsi[sint], gsi);
363 return 0;
364}
365
366void kvm_hv_irq_routing_update(struct kvm *kvm)
367{
368 struct kvm_irq_routing_table *irq_rt;
369 struct kvm_kernel_irq_routing_entry *e;
370 u32 gsi;
371
372 irq_rt = srcu_dereference_check(kvm->irq_routing, &kvm->irq_srcu,
373 lockdep_is_held(&kvm->irq_lock));
374
375 for (gsi = 0; gsi < irq_rt->nr_rt_entries; gsi++) {
376 hlist_for_each_entry(e, &irq_rt->map[gsi], link) {
377 if (e->type == KVM_IRQ_ROUTING_HV_SINT)
378 kvm_hv_set_sint_gsi(kvm, e->hv_sint.vcpu,
379 e->hv_sint.sint, gsi);
380 }
381 }
382}
383
384static void synic_init(struct kvm_vcpu_hv_synic *synic)
385{
386 int i;
387
388 memset(synic, 0, sizeof(*synic));
389 synic->version = HV_SYNIC_VERSION_1;
390 for (i = 0; i < ARRAY_SIZE(synic->sint); i++) {
391 atomic64_set(&synic->sint[i], HV_SYNIC_SINT_MASKED);
392 atomic_set(&synic->sint_to_gsi[i], -1);
393 }
394}
395
396static u64 get_time_ref_counter(struct kvm *kvm)
397{
398 struct kvm_hv *hv = &kvm->arch.hyperv;
399 struct kvm_vcpu *vcpu;
400 u64 tsc;
401
402 /*
403 * The guest has not set up the TSC page or the clock isn't
404 * stable, fall back to get_kvmclock_ns.
405 */
406 if (!hv->tsc_ref.tsc_sequence)
407 return div_u64(get_kvmclock_ns(kvm), 100);
408
409 vcpu = kvm_get_vcpu(kvm, 0);
410 tsc = kvm_read_l1_tsc(vcpu, rdtsc());
411 return mul_u64_u64_shr(tsc, hv->tsc_ref.tsc_scale, 64)
412 + hv->tsc_ref.tsc_offset;
413}
414
415static void stimer_mark_pending(struct kvm_vcpu_hv_stimer *stimer,
416 bool vcpu_kick)
417{
418 struct kvm_vcpu *vcpu = stimer_to_vcpu(stimer);
419
420 set_bit(stimer->index,
421 vcpu_to_hv_vcpu(vcpu)->stimer_pending_bitmap);
422 kvm_make_request(KVM_REQ_HV_STIMER, vcpu);
423 if (vcpu_kick)
424 kvm_vcpu_kick(vcpu);
425}
426
427static void stimer_cleanup(struct kvm_vcpu_hv_stimer *stimer)
428{
429 struct kvm_vcpu *vcpu = stimer_to_vcpu(stimer);
430
431 trace_kvm_hv_stimer_cleanup(stimer_to_vcpu(stimer)->vcpu_id,
432 stimer->index);
433
434 hrtimer_cancel(&stimer->timer);
435 clear_bit(stimer->index,
436 vcpu_to_hv_vcpu(vcpu)->stimer_pending_bitmap);
437 stimer->msg_pending = false;
438 stimer->exp_time = 0;
439}
440
441static enum hrtimer_restart stimer_timer_callback(struct hrtimer *timer)
442{
443 struct kvm_vcpu_hv_stimer *stimer;
444
445 stimer = container_of(timer, struct kvm_vcpu_hv_stimer, timer);
446 trace_kvm_hv_stimer_callback(stimer_to_vcpu(stimer)->vcpu_id,
447 stimer->index);
448 stimer_mark_pending(stimer, true);
449
450 return HRTIMER_NORESTART;
451}
452
453/*
454 * stimer_start() assumptions:
455 * a) stimer->count is not equal to 0
456 * b) stimer->config has HV_STIMER_ENABLE flag
457 */
458static int stimer_start(struct kvm_vcpu_hv_stimer *stimer)
459{
460 u64 time_now;
461 ktime_t ktime_now;
462
463 time_now = get_time_ref_counter(stimer_to_vcpu(stimer)->kvm);
464 ktime_now = ktime_get();
465
466 if (stimer->config.periodic) {
467 if (stimer->exp_time) {
468 if (time_now >= stimer->exp_time) {
469 u64 remainder;
470
471 div64_u64_rem(time_now - stimer->exp_time,
472 stimer->count, &remainder);
473 stimer->exp_time =
474 time_now + (stimer->count - remainder);
475 }
476 } else
477 stimer->exp_time = time_now + stimer->count;
478
479 trace_kvm_hv_stimer_start_periodic(
480 stimer_to_vcpu(stimer)->vcpu_id,
481 stimer->index,
482 time_now, stimer->exp_time);
483
484 hrtimer_start(&stimer->timer,
485 ktime_add_ns(ktime_now,
486 100 * (stimer->exp_time - time_now)),
487 HRTIMER_MODE_ABS);
488 return 0;
489 }
490 stimer->exp_time = stimer->count;
491 if (time_now >= stimer->count) {
492 /*
493 * Expire timer according to Hypervisor Top-Level Functional
494 * specification v4(15.3.1):
495 * "If a one shot is enabled and the specified count is in
496 * the past, it will expire immediately."
497 */
498 stimer_mark_pending(stimer, false);
499 return 0;
500 }
501
502 trace_kvm_hv_stimer_start_one_shot(stimer_to_vcpu(stimer)->vcpu_id,
503 stimer->index,
504 time_now, stimer->count);
505
506 hrtimer_start(&stimer->timer,
507 ktime_add_ns(ktime_now, 100 * (stimer->count - time_now)),
508 HRTIMER_MODE_ABS);
509 return 0;
510}
511
512static int stimer_set_config(struct kvm_vcpu_hv_stimer *stimer, u64 config,
513 bool host)
514{
515 union hv_stimer_config new_config = {.as_uint64 = config},
516 old_config = {.as_uint64 = stimer->config.as_uint64};
517
518 trace_kvm_hv_stimer_set_config(stimer_to_vcpu(stimer)->vcpu_id,
519 stimer->index, config, host);
520
521 stimer_cleanup(stimer);
522 if (old_config.enable &&
523 !new_config.direct_mode && new_config.sintx == 0)
524 new_config.enable = 0;
525 stimer->config.as_uint64 = new_config.as_uint64;
526
527 if (stimer->config.enable)
528 stimer_mark_pending(stimer, false);
529
530 return 0;
531}
532
533static int stimer_set_count(struct kvm_vcpu_hv_stimer *stimer, u64 count,
534 bool host)
535{
536 trace_kvm_hv_stimer_set_count(stimer_to_vcpu(stimer)->vcpu_id,
537 stimer->index, count, host);
538
539 stimer_cleanup(stimer);
540 stimer->count = count;
541 if (stimer->count == 0)
542 stimer->config.enable = 0;
543 else if (stimer->config.auto_enable)
544 stimer->config.enable = 1;
545
546 if (stimer->config.enable)
547 stimer_mark_pending(stimer, false);
548
549 return 0;
550}
551
552static int stimer_get_config(struct kvm_vcpu_hv_stimer *stimer, u64 *pconfig)
553{
554 *pconfig = stimer->config.as_uint64;
555 return 0;
556}
557
558static int stimer_get_count(struct kvm_vcpu_hv_stimer *stimer, u64 *pcount)
559{
560 *pcount = stimer->count;
561 return 0;
562}
563
564static int synic_deliver_msg(struct kvm_vcpu_hv_synic *synic, u32 sint,
565 struct hv_message *src_msg, bool no_retry)
566{
567 struct kvm_vcpu *vcpu = synic_to_vcpu(synic);
568 int msg_off = offsetof(struct hv_message_page, sint_message[sint]);
569 gfn_t msg_page_gfn;
570 struct hv_message_header hv_hdr;
571 int r;
572
573 if (!(synic->msg_page & HV_SYNIC_SIMP_ENABLE))
574 return -ENOENT;
575
576 msg_page_gfn = synic->msg_page >> PAGE_SHIFT;
577
578 /*
579 * Strictly following the spec-mandated ordering would assume setting
580 * .msg_pending before checking .message_type. However, this function
581 * is only called in vcpu context so the entire update is atomic from
582 * guest POV and thus the exact order here doesn't matter.
583 */
584 r = kvm_vcpu_read_guest_page(vcpu, msg_page_gfn, &hv_hdr.message_type,
585 msg_off + offsetof(struct hv_message,
586 header.message_type),
587 sizeof(hv_hdr.message_type));
588 if (r < 0)
589 return r;
590
591 if (hv_hdr.message_type != HVMSG_NONE) {
592 if (no_retry)
593 return 0;
594
595 hv_hdr.message_flags.msg_pending = 1;
596 r = kvm_vcpu_write_guest_page(vcpu, msg_page_gfn,
597 &hv_hdr.message_flags,
598 msg_off +
599 offsetof(struct hv_message,
600 header.message_flags),
601 sizeof(hv_hdr.message_flags));
602 if (r < 0)
603 return r;
604 return -EAGAIN;
605 }
606
607 r = kvm_vcpu_write_guest_page(vcpu, msg_page_gfn, src_msg, msg_off,
608 sizeof(src_msg->header) +
609 src_msg->header.payload_size);
610 if (r < 0)
611 return r;
612
613 r = synic_set_irq(synic, sint);
614 if (r < 0)
615 return r;
616 if (r == 0)
617 return -EFAULT;
618 return 0;
619}
620
621static int stimer_send_msg(struct kvm_vcpu_hv_stimer *stimer)
622{
623 struct kvm_vcpu *vcpu = stimer_to_vcpu(stimer);
624 struct hv_message *msg = &stimer->msg;
625 struct hv_timer_message_payload *payload =
626 (struct hv_timer_message_payload *)&msg->u.payload;
627
628 /*
629 * To avoid piling up periodic ticks, don't retry message
630 * delivery for them (within "lazy" lost ticks policy).
631 */
632 bool no_retry = stimer->config.periodic;
633
634 payload->expiration_time = stimer->exp_time;
635 payload->delivery_time = get_time_ref_counter(vcpu->kvm);
636 return synic_deliver_msg(vcpu_to_synic(vcpu),
637 stimer->config.sintx, msg,
638 no_retry);
639}
640
641static int stimer_notify_direct(struct kvm_vcpu_hv_stimer *stimer)
642{
643 struct kvm_vcpu *vcpu = stimer_to_vcpu(stimer);
644 struct kvm_lapic_irq irq = {
645 .delivery_mode = APIC_DM_FIXED,
646 .vector = stimer->config.apic_vector
647 };
648
649 if (lapic_in_kernel(vcpu))
650 return !kvm_apic_set_irq(vcpu, &irq, NULL);
651 return 0;
652}
653
654static void stimer_expiration(struct kvm_vcpu_hv_stimer *stimer)
655{
656 int r, direct = stimer->config.direct_mode;
657
658 stimer->msg_pending = true;
659 if (!direct)
660 r = stimer_send_msg(stimer);
661 else
662 r = stimer_notify_direct(stimer);
663 trace_kvm_hv_stimer_expiration(stimer_to_vcpu(stimer)->vcpu_id,
664 stimer->index, direct, r);
665 if (!r) {
666 stimer->msg_pending = false;
667 if (!(stimer->config.periodic))
668 stimer->config.enable = 0;
669 }
670}
671
672void kvm_hv_process_stimers(struct kvm_vcpu *vcpu)
673{
674 struct kvm_vcpu_hv *hv_vcpu = vcpu_to_hv_vcpu(vcpu);
675 struct kvm_vcpu_hv_stimer *stimer;
676 u64 time_now, exp_time;
677 int i;
678
679 for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++)
680 if (test_and_clear_bit(i, hv_vcpu->stimer_pending_bitmap)) {
681 stimer = &hv_vcpu->stimer[i];
682 if (stimer->config.enable) {
683 exp_time = stimer->exp_time;
684
685 if (exp_time) {
686 time_now =
687 get_time_ref_counter(vcpu->kvm);
688 if (time_now >= exp_time)
689 stimer_expiration(stimer);
690 }
691
692 if ((stimer->config.enable) &&
693 stimer->count) {
694 if (!stimer->msg_pending)
695 stimer_start(stimer);
696 } else
697 stimer_cleanup(stimer);
698 }
699 }
700}
701
702void kvm_hv_vcpu_uninit(struct kvm_vcpu *vcpu)
703{
704 struct kvm_vcpu_hv *hv_vcpu = vcpu_to_hv_vcpu(vcpu);
705 int i;
706
707 for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++)
708 stimer_cleanup(&hv_vcpu->stimer[i]);
709}
710
711bool kvm_hv_assist_page_enabled(struct kvm_vcpu *vcpu)
712{
713 if (!(vcpu->arch.hyperv.hv_vapic & HV_X64_MSR_VP_ASSIST_PAGE_ENABLE))
714 return false;
715 return vcpu->arch.pv_eoi.msr_val & KVM_MSR_ENABLED;
716}
717EXPORT_SYMBOL_GPL(kvm_hv_assist_page_enabled);
718
719bool kvm_hv_get_assist_page(struct kvm_vcpu *vcpu,
720 struct hv_vp_assist_page *assist_page)
721{
722 if (!kvm_hv_assist_page_enabled(vcpu))
723 return false;
724 return !kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data,
725 assist_page, sizeof(*assist_page));
726}
727EXPORT_SYMBOL_GPL(kvm_hv_get_assist_page);
728
729static void stimer_prepare_msg(struct kvm_vcpu_hv_stimer *stimer)
730{
731 struct hv_message *msg = &stimer->msg;
732 struct hv_timer_message_payload *payload =
733 (struct hv_timer_message_payload *)&msg->u.payload;
734
735 memset(&msg->header, 0, sizeof(msg->header));
736 msg->header.message_type = HVMSG_TIMER_EXPIRED;
737 msg->header.payload_size = sizeof(*payload);
738
739 payload->timer_index = stimer->index;
740 payload->expiration_time = 0;
741 payload->delivery_time = 0;
742}
743
744static void stimer_init(struct kvm_vcpu_hv_stimer *stimer, int timer_index)
745{
746 memset(stimer, 0, sizeof(*stimer));
747 stimer->index = timer_index;
748 hrtimer_init(&stimer->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
749 stimer->timer.function = stimer_timer_callback;
750 stimer_prepare_msg(stimer);
751}
752
753void kvm_hv_vcpu_init(struct kvm_vcpu *vcpu)
754{
755 struct kvm_vcpu_hv *hv_vcpu = vcpu_to_hv_vcpu(vcpu);
756 int i;
757
758 synic_init(&hv_vcpu->synic);
759
760 bitmap_zero(hv_vcpu->stimer_pending_bitmap, HV_SYNIC_STIMER_COUNT);
761 for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++)
762 stimer_init(&hv_vcpu->stimer[i], i);
763}
764
765void kvm_hv_vcpu_postcreate(struct kvm_vcpu *vcpu)
766{
767 struct kvm_vcpu_hv *hv_vcpu = vcpu_to_hv_vcpu(vcpu);
768
769 hv_vcpu->vp_index = kvm_vcpu_get_idx(vcpu);
770}
771
772int kvm_hv_activate_synic(struct kvm_vcpu *vcpu, bool dont_zero_synic_pages)
773{
774 struct kvm_vcpu_hv_synic *synic = vcpu_to_synic(vcpu);
775
776 /*
777 * Hyper-V SynIC auto EOI SINT's are
778 * not compatible with APICV, so deactivate APICV
779 */
780 kvm_vcpu_deactivate_apicv(vcpu);
781 synic->active = true;
782 synic->dont_zero_synic_pages = dont_zero_synic_pages;
783 return 0;
784}
785
786static bool kvm_hv_msr_partition_wide(u32 msr)
787{
788 bool r = false;
789
790 switch (msr) {
791 case HV_X64_MSR_GUEST_OS_ID:
792 case HV_X64_MSR_HYPERCALL:
793 case HV_X64_MSR_REFERENCE_TSC:
794 case HV_X64_MSR_TIME_REF_COUNT:
795 case HV_X64_MSR_CRASH_CTL:
796 case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
797 case HV_X64_MSR_RESET:
798 case HV_X64_MSR_REENLIGHTENMENT_CONTROL:
799 case HV_X64_MSR_TSC_EMULATION_CONTROL:
800 case HV_X64_MSR_TSC_EMULATION_STATUS:
801 r = true;
802 break;
803 }
804
805 return r;
806}
807
808static int kvm_hv_msr_get_crash_data(struct kvm_vcpu *vcpu,
809 u32 index, u64 *pdata)
810{
811 struct kvm_hv *hv = &vcpu->kvm->arch.hyperv;
812
813 if (WARN_ON_ONCE(index >= ARRAY_SIZE(hv->hv_crash_param)))
814 return -EINVAL;
815
816 *pdata = hv->hv_crash_param[index];
817 return 0;
818}
819
820static int kvm_hv_msr_get_crash_ctl(struct kvm_vcpu *vcpu, u64 *pdata)
821{
822 struct kvm_hv *hv = &vcpu->kvm->arch.hyperv;
823
824 *pdata = hv->hv_crash_ctl;
825 return 0;
826}
827
828static int kvm_hv_msr_set_crash_ctl(struct kvm_vcpu *vcpu, u64 data, bool host)
829{
830 struct kvm_hv *hv = &vcpu->kvm->arch.hyperv;
831
832 if (host)
833 hv->hv_crash_ctl = data & HV_CRASH_CTL_CRASH_NOTIFY;
834
835 if (!host && (data & HV_CRASH_CTL_CRASH_NOTIFY)) {
836
837 vcpu_debug(vcpu, "hv crash (0x%llx 0x%llx 0x%llx 0x%llx 0x%llx)\n",
838 hv->hv_crash_param[0],
839 hv->hv_crash_param[1],
840 hv->hv_crash_param[2],
841 hv->hv_crash_param[3],
842 hv->hv_crash_param[4]);
843
844 /* Send notification about crash to user space */
845 kvm_make_request(KVM_REQ_HV_CRASH, vcpu);
846 }
847
848 return 0;
849}
850
851static int kvm_hv_msr_set_crash_data(struct kvm_vcpu *vcpu,
852 u32 index, u64 data)
853{
854 struct kvm_hv *hv = &vcpu->kvm->arch.hyperv;
855
856 if (WARN_ON_ONCE(index >= ARRAY_SIZE(hv->hv_crash_param)))
857 return -EINVAL;
858
859 hv->hv_crash_param[index] = data;
860 return 0;
861}
862
863/*
864 * The kvmclock and Hyper-V TSC page use similar formulas, and converting
865 * between them is possible:
866 *
867 * kvmclock formula:
868 * nsec = (ticks - tsc_timestamp) * tsc_to_system_mul * 2^(tsc_shift-32)
869 * + system_time
870 *
871 * Hyper-V formula:
872 * nsec/100 = ticks * scale / 2^64 + offset
873 *
874 * When tsc_timestamp = system_time = 0, offset is zero in the Hyper-V formula.
875 * By dividing the kvmclock formula by 100 and equating what's left we get:
876 * ticks * scale / 2^64 = ticks * tsc_to_system_mul * 2^(tsc_shift-32) / 100
877 * scale / 2^64 = tsc_to_system_mul * 2^(tsc_shift-32) / 100
878 * scale = tsc_to_system_mul * 2^(32+tsc_shift) / 100
879 *
880 * Now expand the kvmclock formula and divide by 100:
881 * nsec = ticks * tsc_to_system_mul * 2^(tsc_shift-32)
882 * - tsc_timestamp * tsc_to_system_mul * 2^(tsc_shift-32)
883 * + system_time
884 * nsec/100 = ticks * tsc_to_system_mul * 2^(tsc_shift-32) / 100
885 * - tsc_timestamp * tsc_to_system_mul * 2^(tsc_shift-32) / 100
886 * + system_time / 100
887 *
888 * Replace tsc_to_system_mul * 2^(tsc_shift-32) / 100 by scale / 2^64:
889 * nsec/100 = ticks * scale / 2^64
890 * - tsc_timestamp * scale / 2^64
891 * + system_time / 100
892 *
893 * Equate with the Hyper-V formula so that ticks * scale / 2^64 cancels out:
894 * offset = system_time / 100 - tsc_timestamp * scale / 2^64
895 *
896 * These two equivalencies are implemented in this function.
897 */
898static bool compute_tsc_page_parameters(struct pvclock_vcpu_time_info *hv_clock,
899 HV_REFERENCE_TSC_PAGE *tsc_ref)
900{
901 u64 max_mul;
902
903 if (!(hv_clock->flags & PVCLOCK_TSC_STABLE_BIT))
904 return false;
905
906 /*
907 * check if scale would overflow, if so we use the time ref counter
908 * tsc_to_system_mul * 2^(tsc_shift+32) / 100 >= 2^64
909 * tsc_to_system_mul / 100 >= 2^(32-tsc_shift)
910 * tsc_to_system_mul >= 100 * 2^(32-tsc_shift)
911 */
912 max_mul = 100ull << (32 - hv_clock->tsc_shift);
913 if (hv_clock->tsc_to_system_mul >= max_mul)
914 return false;
915
916 /*
917 * Otherwise compute the scale and offset according to the formulas
918 * derived above.
919 */
920 tsc_ref->tsc_scale =
921 mul_u64_u32_div(1ULL << (32 + hv_clock->tsc_shift),
922 hv_clock->tsc_to_system_mul,
923 100);
924
925 tsc_ref->tsc_offset = hv_clock->system_time;
926 do_div(tsc_ref->tsc_offset, 100);
927 tsc_ref->tsc_offset -=
928 mul_u64_u64_shr(hv_clock->tsc_timestamp, tsc_ref->tsc_scale, 64);
929 return true;
930}
931
932void kvm_hv_setup_tsc_page(struct kvm *kvm,
933 struct pvclock_vcpu_time_info *hv_clock)
934{
935 struct kvm_hv *hv = &kvm->arch.hyperv;
936 u32 tsc_seq;
937 u64 gfn;
938
939 BUILD_BUG_ON(sizeof(tsc_seq) != sizeof(hv->tsc_ref.tsc_sequence));
940 BUILD_BUG_ON(offsetof(HV_REFERENCE_TSC_PAGE, tsc_sequence) != 0);
941
942 if (!(hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE))
943 return;
944
945 mutex_lock(&kvm->arch.hyperv.hv_lock);
946 if (!(hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE))
947 goto out_unlock;
948
949 gfn = hv->hv_tsc_page >> HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT;
950 /*
951 * Because the TSC parameters only vary when there is a
952 * change in the master clock, do not bother with caching.
953 */
954 if (unlikely(kvm_read_guest(kvm, gfn_to_gpa(gfn),
955 &tsc_seq, sizeof(tsc_seq))))
956 goto out_unlock;
957
958 /*
959 * While we're computing and writing the parameters, force the
960 * guest to use the time reference count MSR.
961 */
962 hv->tsc_ref.tsc_sequence = 0;
963 if (kvm_write_guest(kvm, gfn_to_gpa(gfn),
964 &hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence)))
965 goto out_unlock;
966
967 if (!compute_tsc_page_parameters(hv_clock, &hv->tsc_ref))
968 goto out_unlock;
969
970 /* Ensure sequence is zero before writing the rest of the struct. */
971 smp_wmb();
972 if (kvm_write_guest(kvm, gfn_to_gpa(gfn), &hv->tsc_ref, sizeof(hv->tsc_ref)))
973 goto out_unlock;
974
975 /*
976 * Now switch to the TSC page mechanism by writing the sequence.
977 */
978 tsc_seq++;
979 if (tsc_seq == 0xFFFFFFFF || tsc_seq == 0)
980 tsc_seq = 1;
981
982 /* Write the struct entirely before the non-zero sequence. */
983 smp_wmb();
984
985 hv->tsc_ref.tsc_sequence = tsc_seq;
986 kvm_write_guest(kvm, gfn_to_gpa(gfn),
987 &hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence));
988out_unlock:
989 mutex_unlock(&kvm->arch.hyperv.hv_lock);
990}
991
992static int kvm_hv_set_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data,
993 bool host)
994{
995 struct kvm *kvm = vcpu->kvm;
996 struct kvm_hv *hv = &kvm->arch.hyperv;
997
998 switch (msr) {
999 case HV_X64_MSR_GUEST_OS_ID:
1000 hv->hv_guest_os_id = data;
1001 /* setting guest os id to zero disables hypercall page */
1002 if (!hv->hv_guest_os_id)
1003 hv->hv_hypercall &= ~HV_X64_MSR_HYPERCALL_ENABLE;
1004 break;
1005 case HV_X64_MSR_HYPERCALL: {
1006 u64 gfn;
1007 unsigned long addr;
1008 u8 instructions[4];
1009
1010 /* if guest os id is not set hypercall should remain disabled */
1011 if (!hv->hv_guest_os_id)
1012 break;
1013 if (!(data & HV_X64_MSR_HYPERCALL_ENABLE)) {
1014 hv->hv_hypercall = data;
1015 break;
1016 }
1017 gfn = data >> HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT;
1018 addr = gfn_to_hva(kvm, gfn);
1019 if (kvm_is_error_hva(addr))
1020 return 1;
1021 kvm_x86_ops->patch_hypercall(vcpu, instructions);
1022 ((unsigned char *)instructions)[3] = 0xc3; /* ret */
1023 if (__copy_to_user((void __user *)addr, instructions, 4))
1024 return 1;
1025 hv->hv_hypercall = data;
1026 mark_page_dirty(kvm, gfn);
1027 break;
1028 }
1029 case HV_X64_MSR_REFERENCE_TSC:
1030 hv->hv_tsc_page = data;
1031 if (hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE)
1032 kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu);
1033 break;
1034 case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
1035 return kvm_hv_msr_set_crash_data(vcpu,
1036 msr - HV_X64_MSR_CRASH_P0,
1037 data);
1038 case HV_X64_MSR_CRASH_CTL:
1039 return kvm_hv_msr_set_crash_ctl(vcpu, data, host);
1040 case HV_X64_MSR_RESET:
1041 if (data == 1) {
1042 vcpu_debug(vcpu, "hyper-v reset requested\n");
1043 kvm_make_request(KVM_REQ_HV_RESET, vcpu);
1044 }
1045 break;
1046 case HV_X64_MSR_REENLIGHTENMENT_CONTROL:
1047 hv->hv_reenlightenment_control = data;
1048 break;
1049 case HV_X64_MSR_TSC_EMULATION_CONTROL:
1050 hv->hv_tsc_emulation_control = data;
1051 break;
1052 case HV_X64_MSR_TSC_EMULATION_STATUS:
1053 hv->hv_tsc_emulation_status = data;
1054 break;
1055 case HV_X64_MSR_TIME_REF_COUNT:
1056 /* read-only, but still ignore it if host-initiated */
1057 if (!host)
1058 return 1;
1059 break;
1060 default:
1061 vcpu_unimpl(vcpu, "Hyper-V uhandled wrmsr: 0x%x data 0x%llx\n",
1062 msr, data);
1063 return 1;
1064 }
1065 return 0;
1066}
1067
1068/* Calculate cpu time spent by current task in 100ns units */
1069static u64 current_task_runtime_100ns(void)
1070{
1071 u64 utime, stime;
1072
1073 task_cputime_adjusted(current, &utime, &stime);
1074
1075 return div_u64(utime + stime, 100);
1076}
1077
1078static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
1079{
1080 struct kvm_vcpu_hv *hv_vcpu = &vcpu->arch.hyperv;
1081
1082 switch (msr) {
1083 case HV_X64_MSR_VP_INDEX: {
1084 struct kvm_hv *hv = &vcpu->kvm->arch.hyperv;
1085 int vcpu_idx = kvm_vcpu_get_idx(vcpu);
1086 u32 new_vp_index = (u32)data;
1087
1088 if (!host || new_vp_index >= KVM_MAX_VCPUS)
1089 return 1;
1090
1091 if (new_vp_index == hv_vcpu->vp_index)
1092 return 0;
1093
1094 /*
1095 * The VP index is initialized to vcpu_index by
1096 * kvm_hv_vcpu_postcreate so they initially match. Now the
1097 * VP index is changing, adjust num_mismatched_vp_indexes if
1098 * it now matches or no longer matches vcpu_idx.
1099 */
1100 if (hv_vcpu->vp_index == vcpu_idx)
1101 atomic_inc(&hv->num_mismatched_vp_indexes);
1102 else if (new_vp_index == vcpu_idx)
1103 atomic_dec(&hv->num_mismatched_vp_indexes);
1104
1105 hv_vcpu->vp_index = new_vp_index;
1106 break;
1107 }
1108 case HV_X64_MSR_VP_ASSIST_PAGE: {
1109 u64 gfn;
1110 unsigned long addr;
1111
1112 if (!(data & HV_X64_MSR_VP_ASSIST_PAGE_ENABLE)) {
1113 hv_vcpu->hv_vapic = data;
1114 if (kvm_lapic_enable_pv_eoi(vcpu, 0, 0))
1115 return 1;
1116 break;
1117 }
1118 gfn = data >> HV_X64_MSR_VP_ASSIST_PAGE_ADDRESS_SHIFT;
1119 addr = kvm_vcpu_gfn_to_hva(vcpu, gfn);
1120 if (kvm_is_error_hva(addr))
1121 return 1;
1122
1123 /*
1124 * Clear apic_assist portion of f(struct hv_vp_assist_page
1125 * only, there can be valuable data in the rest which needs
1126 * to be preserved e.g. on migration.
1127 */
1128 if (__clear_user((void __user *)addr, sizeof(u32)))
1129 return 1;
1130 hv_vcpu->hv_vapic = data;
1131 kvm_vcpu_mark_page_dirty(vcpu, gfn);
1132 if (kvm_lapic_enable_pv_eoi(vcpu,
1133 gfn_to_gpa(gfn) | KVM_MSR_ENABLED,
1134 sizeof(struct hv_vp_assist_page)))
1135 return 1;
1136 break;
1137 }
1138 case HV_X64_MSR_EOI:
1139 return kvm_hv_vapic_msr_write(vcpu, APIC_EOI, data);
1140 case HV_X64_MSR_ICR:
1141 return kvm_hv_vapic_msr_write(vcpu, APIC_ICR, data);
1142 case HV_X64_MSR_TPR:
1143 return kvm_hv_vapic_msr_write(vcpu, APIC_TASKPRI, data);
1144 case HV_X64_MSR_VP_RUNTIME:
1145 if (!host)
1146 return 1;
1147 hv_vcpu->runtime_offset = data - current_task_runtime_100ns();
1148 break;
1149 case HV_X64_MSR_SCONTROL:
1150 case HV_X64_MSR_SVERSION:
1151 case HV_X64_MSR_SIEFP:
1152 case HV_X64_MSR_SIMP:
1153 case HV_X64_MSR_EOM:
1154 case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
1155 return synic_set_msr(vcpu_to_synic(vcpu), msr, data, host);
1156 case HV_X64_MSR_STIMER0_CONFIG:
1157 case HV_X64_MSR_STIMER1_CONFIG:
1158 case HV_X64_MSR_STIMER2_CONFIG:
1159 case HV_X64_MSR_STIMER3_CONFIG: {
1160 int timer_index = (msr - HV_X64_MSR_STIMER0_CONFIG)/2;
1161
1162 return stimer_set_config(vcpu_to_stimer(vcpu, timer_index),
1163 data, host);
1164 }
1165 case HV_X64_MSR_STIMER0_COUNT:
1166 case HV_X64_MSR_STIMER1_COUNT:
1167 case HV_X64_MSR_STIMER2_COUNT:
1168 case HV_X64_MSR_STIMER3_COUNT: {
1169 int timer_index = (msr - HV_X64_MSR_STIMER0_COUNT)/2;
1170
1171 return stimer_set_count(vcpu_to_stimer(vcpu, timer_index),
1172 data, host);
1173 }
1174 case HV_X64_MSR_TSC_FREQUENCY:
1175 case HV_X64_MSR_APIC_FREQUENCY:
1176 /* read-only, but still ignore it if host-initiated */
1177 if (!host)
1178 return 1;
1179 break;
1180 default:
1181 vcpu_unimpl(vcpu, "Hyper-V uhandled wrmsr: 0x%x data 0x%llx\n",
1182 msr, data);
1183 return 1;
1184 }
1185
1186 return 0;
1187}
1188
1189static int kvm_hv_get_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
1190{
1191 u64 data = 0;
1192 struct kvm *kvm = vcpu->kvm;
1193 struct kvm_hv *hv = &kvm->arch.hyperv;
1194
1195 switch (msr) {
1196 case HV_X64_MSR_GUEST_OS_ID:
1197 data = hv->hv_guest_os_id;
1198 break;
1199 case HV_X64_MSR_HYPERCALL:
1200 data = hv->hv_hypercall;
1201 break;
1202 case HV_X64_MSR_TIME_REF_COUNT:
1203 data = get_time_ref_counter(kvm);
1204 break;
1205 case HV_X64_MSR_REFERENCE_TSC:
1206 data = hv->hv_tsc_page;
1207 break;
1208 case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
1209 return kvm_hv_msr_get_crash_data(vcpu,
1210 msr - HV_X64_MSR_CRASH_P0,
1211 pdata);
1212 case HV_X64_MSR_CRASH_CTL:
1213 return kvm_hv_msr_get_crash_ctl(vcpu, pdata);
1214 case HV_X64_MSR_RESET:
1215 data = 0;
1216 break;
1217 case HV_X64_MSR_REENLIGHTENMENT_CONTROL:
1218 data = hv->hv_reenlightenment_control;
1219 break;
1220 case HV_X64_MSR_TSC_EMULATION_CONTROL:
1221 data = hv->hv_tsc_emulation_control;
1222 break;
1223 case HV_X64_MSR_TSC_EMULATION_STATUS:
1224 data = hv->hv_tsc_emulation_status;
1225 break;
1226 default:
1227 vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
1228 return 1;
1229 }
1230
1231 *pdata = data;
1232 return 0;
1233}
1234
1235static int kvm_hv_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata,
1236 bool host)
1237{
1238 u64 data = 0;
1239 struct kvm_vcpu_hv *hv_vcpu = &vcpu->arch.hyperv;
1240
1241 switch (msr) {
1242 case HV_X64_MSR_VP_INDEX:
1243 data = hv_vcpu->vp_index;
1244 break;
1245 case HV_X64_MSR_EOI:
1246 return kvm_hv_vapic_msr_read(vcpu, APIC_EOI, pdata);
1247 case HV_X64_MSR_ICR:
1248 return kvm_hv_vapic_msr_read(vcpu, APIC_ICR, pdata);
1249 case HV_X64_MSR_TPR:
1250 return kvm_hv_vapic_msr_read(vcpu, APIC_TASKPRI, pdata);
1251 case HV_X64_MSR_VP_ASSIST_PAGE:
1252 data = hv_vcpu->hv_vapic;
1253 break;
1254 case HV_X64_MSR_VP_RUNTIME:
1255 data = current_task_runtime_100ns() + hv_vcpu->runtime_offset;
1256 break;
1257 case HV_X64_MSR_SCONTROL:
1258 case HV_X64_MSR_SVERSION:
1259 case HV_X64_MSR_SIEFP:
1260 case HV_X64_MSR_SIMP:
1261 case HV_X64_MSR_EOM:
1262 case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
1263 return synic_get_msr(vcpu_to_synic(vcpu), msr, pdata, host);
1264 case HV_X64_MSR_STIMER0_CONFIG:
1265 case HV_X64_MSR_STIMER1_CONFIG:
1266 case HV_X64_MSR_STIMER2_CONFIG:
1267 case HV_X64_MSR_STIMER3_CONFIG: {
1268 int timer_index = (msr - HV_X64_MSR_STIMER0_CONFIG)/2;
1269
1270 return stimer_get_config(vcpu_to_stimer(vcpu, timer_index),
1271 pdata);
1272 }
1273 case HV_X64_MSR_STIMER0_COUNT:
1274 case HV_X64_MSR_STIMER1_COUNT:
1275 case HV_X64_MSR_STIMER2_COUNT:
1276 case HV_X64_MSR_STIMER3_COUNT: {
1277 int timer_index = (msr - HV_X64_MSR_STIMER0_COUNT)/2;
1278
1279 return stimer_get_count(vcpu_to_stimer(vcpu, timer_index),
1280 pdata);
1281 }
1282 case HV_X64_MSR_TSC_FREQUENCY:
1283 data = (u64)vcpu->arch.virtual_tsc_khz * 1000;
1284 break;
1285 case HV_X64_MSR_APIC_FREQUENCY:
1286 data = APIC_BUS_FREQUENCY;
1287 break;
1288 default:
1289 vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
1290 return 1;
1291 }
1292 *pdata = data;
1293 return 0;
1294}
1295
1296int kvm_hv_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
1297{
1298 if (kvm_hv_msr_partition_wide(msr)) {
1299 int r;
1300
1301 mutex_lock(&vcpu->kvm->arch.hyperv.hv_lock);
1302 r = kvm_hv_set_msr_pw(vcpu, msr, data, host);
1303 mutex_unlock(&vcpu->kvm->arch.hyperv.hv_lock);
1304 return r;
1305 } else
1306 return kvm_hv_set_msr(vcpu, msr, data, host);
1307}
1308
1309int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host)
1310{
1311 if (kvm_hv_msr_partition_wide(msr)) {
1312 int r;
1313
1314 mutex_lock(&vcpu->kvm->arch.hyperv.hv_lock);
1315 r = kvm_hv_get_msr_pw(vcpu, msr, pdata);
1316 mutex_unlock(&vcpu->kvm->arch.hyperv.hv_lock);
1317 return r;
1318 } else
1319 return kvm_hv_get_msr(vcpu, msr, pdata, host);
1320}
1321
1322static __always_inline unsigned long *sparse_set_to_vcpu_mask(
1323 struct kvm *kvm, u64 *sparse_banks, u64 valid_bank_mask,
1324 u64 *vp_bitmap, unsigned long *vcpu_bitmap)
1325{
1326 struct kvm_hv *hv = &kvm->arch.hyperv;
1327 struct kvm_vcpu *vcpu;
1328 int i, bank, sbank = 0;
1329
1330 memset(vp_bitmap, 0,
1331 KVM_HV_MAX_SPARSE_VCPU_SET_BITS * sizeof(*vp_bitmap));
1332 for_each_set_bit(bank, (unsigned long *)&valid_bank_mask,
1333 KVM_HV_MAX_SPARSE_VCPU_SET_BITS)
1334 vp_bitmap[bank] = sparse_banks[sbank++];
1335
1336 if (likely(!atomic_read(&hv->num_mismatched_vp_indexes))) {
1337 /* for all vcpus vp_index == vcpu_idx */
1338 return (unsigned long *)vp_bitmap;
1339 }
1340
1341 bitmap_zero(vcpu_bitmap, KVM_MAX_VCPUS);
1342 kvm_for_each_vcpu(i, vcpu, kvm) {
1343 if (test_bit(vcpu_to_hv_vcpu(vcpu)->vp_index,
1344 (unsigned long *)vp_bitmap))
1345 __set_bit(i, vcpu_bitmap);
1346 }
1347 return vcpu_bitmap;
1348}
1349
1350static u64 kvm_hv_flush_tlb(struct kvm_vcpu *current_vcpu, u64 ingpa,
1351 u16 rep_cnt, bool ex)
1352{
1353 struct kvm *kvm = current_vcpu->kvm;
1354 struct kvm_vcpu_hv *hv_vcpu = ¤t_vcpu->arch.hyperv;
1355 struct hv_tlb_flush_ex flush_ex;
1356 struct hv_tlb_flush flush;
1357 u64 vp_bitmap[KVM_HV_MAX_SPARSE_VCPU_SET_BITS];
1358 DECLARE_BITMAP(vcpu_bitmap, KVM_MAX_VCPUS);
1359 unsigned long *vcpu_mask;
1360 u64 valid_bank_mask;
1361 u64 sparse_banks[64];
1362 int sparse_banks_len;
1363 bool all_cpus;
1364
1365 if (!ex) {
1366 if (unlikely(kvm_read_guest(kvm, ingpa, &flush, sizeof(flush))))
1367 return HV_STATUS_INVALID_HYPERCALL_INPUT;
1368
1369 trace_kvm_hv_flush_tlb(flush.processor_mask,
1370 flush.address_space, flush.flags);
1371
1372 valid_bank_mask = BIT_ULL(0);
1373 sparse_banks[0] = flush.processor_mask;
1374
1375 /*
1376 * Work around possible WS2012 bug: it sends hypercalls
1377 * with processor_mask = 0x0 and HV_FLUSH_ALL_PROCESSORS clear,
1378 * while also expecting us to flush something and crashing if
1379 * we don't. Let's treat processor_mask == 0 same as
1380 * HV_FLUSH_ALL_PROCESSORS.
1381 */
1382 all_cpus = (flush.flags & HV_FLUSH_ALL_PROCESSORS) ||
1383 flush.processor_mask == 0;
1384 } else {
1385 if (unlikely(kvm_read_guest(kvm, ingpa, &flush_ex,
1386 sizeof(flush_ex))))
1387 return HV_STATUS_INVALID_HYPERCALL_INPUT;
1388
1389 trace_kvm_hv_flush_tlb_ex(flush_ex.hv_vp_set.valid_bank_mask,
1390 flush_ex.hv_vp_set.format,
1391 flush_ex.address_space,
1392 flush_ex.flags);
1393
1394 valid_bank_mask = flush_ex.hv_vp_set.valid_bank_mask;
1395 all_cpus = flush_ex.hv_vp_set.format !=
1396 HV_GENERIC_SET_SPARSE_4K;
1397
1398 sparse_banks_len =
1399 bitmap_weight((unsigned long *)&valid_bank_mask, 64) *
1400 sizeof(sparse_banks[0]);
1401
1402 if (!sparse_banks_len && !all_cpus)
1403 goto ret_success;
1404
1405 if (!all_cpus &&
1406 kvm_read_guest(kvm,
1407 ingpa + offsetof(struct hv_tlb_flush_ex,
1408 hv_vp_set.bank_contents),
1409 sparse_banks,
1410 sparse_banks_len))
1411 return HV_STATUS_INVALID_HYPERCALL_INPUT;
1412 }
1413
1414 cpumask_clear(&hv_vcpu->tlb_flush);
1415
1416 vcpu_mask = all_cpus ? NULL :
1417 sparse_set_to_vcpu_mask(kvm, sparse_banks, valid_bank_mask,
1418 vp_bitmap, vcpu_bitmap);
1419
1420 /*
1421 * vcpu->arch.cr3 may not be up-to-date for running vCPUs so we can't
1422 * analyze it here, flush TLB regardless of the specified address space.
1423 */
1424 kvm_make_vcpus_request_mask(kvm,
1425 KVM_REQ_TLB_FLUSH | KVM_REQUEST_NO_WAKEUP,
1426 vcpu_mask, &hv_vcpu->tlb_flush);
1427
1428ret_success:
1429 /* We always do full TLB flush, set rep_done = rep_cnt. */
1430 return (u64)HV_STATUS_SUCCESS |
1431 ((u64)rep_cnt << HV_HYPERCALL_REP_COMP_OFFSET);
1432}
1433
1434static void kvm_send_ipi_to_many(struct kvm *kvm, u32 vector,
1435 unsigned long *vcpu_bitmap)
1436{
1437 struct kvm_lapic_irq irq = {
1438 .delivery_mode = APIC_DM_FIXED,
1439 .vector = vector
1440 };
1441 struct kvm_vcpu *vcpu;
1442 int i;
1443
1444 kvm_for_each_vcpu(i, vcpu, kvm) {
1445 if (vcpu_bitmap && !test_bit(i, vcpu_bitmap))
1446 continue;
1447
1448 /* We fail only when APIC is disabled */
1449 kvm_apic_set_irq(vcpu, &irq, NULL);
1450 }
1451}
1452
1453static u64 kvm_hv_send_ipi(struct kvm_vcpu *current_vcpu, u64 ingpa, u64 outgpa,
1454 bool ex, bool fast)
1455{
1456 struct kvm *kvm = current_vcpu->kvm;
1457 struct hv_send_ipi_ex send_ipi_ex;
1458 struct hv_send_ipi send_ipi;
1459 u64 vp_bitmap[KVM_HV_MAX_SPARSE_VCPU_SET_BITS];
1460 DECLARE_BITMAP(vcpu_bitmap, KVM_MAX_VCPUS);
1461 unsigned long *vcpu_mask;
1462 unsigned long valid_bank_mask;
1463 u64 sparse_banks[64];
1464 int sparse_banks_len;
1465 u32 vector;
1466 bool all_cpus;
1467
1468 if (!ex) {
1469 if (!fast) {
1470 if (unlikely(kvm_read_guest(kvm, ingpa, &send_ipi,
1471 sizeof(send_ipi))))
1472 return HV_STATUS_INVALID_HYPERCALL_INPUT;
1473 sparse_banks[0] = send_ipi.cpu_mask;
1474 vector = send_ipi.vector;
1475 } else {
1476 /* 'reserved' part of hv_send_ipi should be 0 */
1477 if (unlikely(ingpa >> 32 != 0))
1478 return HV_STATUS_INVALID_HYPERCALL_INPUT;
1479 sparse_banks[0] = outgpa;
1480 vector = (u32)ingpa;
1481 }
1482 all_cpus = false;
1483 valid_bank_mask = BIT_ULL(0);
1484
1485 trace_kvm_hv_send_ipi(vector, sparse_banks[0]);
1486 } else {
1487 if (unlikely(kvm_read_guest(kvm, ingpa, &send_ipi_ex,
1488 sizeof(send_ipi_ex))))
1489 return HV_STATUS_INVALID_HYPERCALL_INPUT;
1490
1491 trace_kvm_hv_send_ipi_ex(send_ipi_ex.vector,
1492 send_ipi_ex.vp_set.format,
1493 send_ipi_ex.vp_set.valid_bank_mask);
1494
1495 vector = send_ipi_ex.vector;
1496 valid_bank_mask = send_ipi_ex.vp_set.valid_bank_mask;
1497 sparse_banks_len = bitmap_weight(&valid_bank_mask, 64) *
1498 sizeof(sparse_banks[0]);
1499
1500 all_cpus = send_ipi_ex.vp_set.format == HV_GENERIC_SET_ALL;
1501
1502 if (!sparse_banks_len)
1503 goto ret_success;
1504
1505 if (!all_cpus &&
1506 kvm_read_guest(kvm,
1507 ingpa + offsetof(struct hv_send_ipi_ex,
1508 vp_set.bank_contents),
1509 sparse_banks,
1510 sparse_banks_len))
1511 return HV_STATUS_INVALID_HYPERCALL_INPUT;
1512 }
1513
1514 if ((vector < HV_IPI_LOW_VECTOR) || (vector > HV_IPI_HIGH_VECTOR))
1515 return HV_STATUS_INVALID_HYPERCALL_INPUT;
1516
1517 vcpu_mask = all_cpus ? NULL :
1518 sparse_set_to_vcpu_mask(kvm, sparse_banks, valid_bank_mask,
1519 vp_bitmap, vcpu_bitmap);
1520
1521 kvm_send_ipi_to_many(kvm, vector, vcpu_mask);
1522
1523ret_success:
1524 return HV_STATUS_SUCCESS;
1525}
1526
1527bool kvm_hv_hypercall_enabled(struct kvm *kvm)
1528{
1529 return READ_ONCE(kvm->arch.hyperv.hv_hypercall) & HV_X64_MSR_HYPERCALL_ENABLE;
1530}
1531
1532static void kvm_hv_hypercall_set_result(struct kvm_vcpu *vcpu, u64 result)
1533{
1534 bool longmode;
1535
1536 longmode = is_64_bit_mode(vcpu);
1537 if (longmode)
1538 kvm_rax_write(vcpu, result);
1539 else {
1540 kvm_rdx_write(vcpu, result >> 32);
1541 kvm_rax_write(vcpu, result & 0xffffffff);
1542 }
1543}
1544
1545static int kvm_hv_hypercall_complete(struct kvm_vcpu *vcpu, u64 result)
1546{
1547 kvm_hv_hypercall_set_result(vcpu, result);
1548 ++vcpu->stat.hypercalls;
1549 return kvm_skip_emulated_instruction(vcpu);
1550}
1551
1552static int kvm_hv_hypercall_complete_userspace(struct kvm_vcpu *vcpu)
1553{
1554 return kvm_hv_hypercall_complete(vcpu, vcpu->run->hyperv.u.hcall.result);
1555}
1556
1557static u16 kvm_hvcall_signal_event(struct kvm_vcpu *vcpu, bool fast, u64 param)
1558{
1559 struct eventfd_ctx *eventfd;
1560
1561 if (unlikely(!fast)) {
1562 int ret;
1563 gpa_t gpa = param;
1564
1565 if ((gpa & (__alignof__(param) - 1)) ||
1566 offset_in_page(gpa) + sizeof(param) > PAGE_SIZE)
1567 return HV_STATUS_INVALID_ALIGNMENT;
1568
1569 ret = kvm_vcpu_read_guest(vcpu, gpa, ¶m, sizeof(param));
1570 if (ret < 0)
1571 return HV_STATUS_INVALID_ALIGNMENT;
1572 }
1573
1574 /*
1575 * Per spec, bits 32-47 contain the extra "flag number". However, we
1576 * have no use for it, and in all known usecases it is zero, so just
1577 * report lookup failure if it isn't.
1578 */
1579 if (param & 0xffff00000000ULL)
1580 return HV_STATUS_INVALID_PORT_ID;
1581 /* remaining bits are reserved-zero */
1582 if (param & ~KVM_HYPERV_CONN_ID_MASK)
1583 return HV_STATUS_INVALID_HYPERCALL_INPUT;
1584
1585 /* the eventfd is protected by vcpu->kvm->srcu, but conn_to_evt isn't */
1586 rcu_read_lock();
1587 eventfd = idr_find(&vcpu->kvm->arch.hyperv.conn_to_evt, param);
1588 rcu_read_unlock();
1589 if (!eventfd)
1590 return HV_STATUS_INVALID_PORT_ID;
1591
1592 eventfd_signal(eventfd, 1);
1593 return HV_STATUS_SUCCESS;
1594}
1595
1596int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
1597{
1598 u64 param, ingpa, outgpa, ret = HV_STATUS_SUCCESS;
1599 uint16_t code, rep_idx, rep_cnt;
1600 bool fast, rep;
1601
1602 /*
1603 * hypercall generates UD from non zero cpl and real mode
1604 * per HYPER-V spec
1605 */
1606 if (kvm_x86_ops->get_cpl(vcpu) != 0 || !is_protmode(vcpu)) {
1607 kvm_queue_exception(vcpu, UD_VECTOR);
1608 return 1;
1609 }
1610
1611#ifdef CONFIG_X86_64
1612 if (is_64_bit_mode(vcpu)) {
1613 param = kvm_rcx_read(vcpu);
1614 ingpa = kvm_rdx_read(vcpu);
1615 outgpa = kvm_r8_read(vcpu);
1616 } else
1617#endif
1618 {
1619 param = ((u64)kvm_rdx_read(vcpu) << 32) |
1620 (kvm_rax_read(vcpu) & 0xffffffff);
1621 ingpa = ((u64)kvm_rbx_read(vcpu) << 32) |
1622 (kvm_rcx_read(vcpu) & 0xffffffff);
1623 outgpa = ((u64)kvm_rdi_read(vcpu) << 32) |
1624 (kvm_rsi_read(vcpu) & 0xffffffff);
1625 }
1626
1627 code = param & 0xffff;
1628 fast = !!(param & HV_HYPERCALL_FAST_BIT);
1629 rep_cnt = (param >> HV_HYPERCALL_REP_COMP_OFFSET) & 0xfff;
1630 rep_idx = (param >> HV_HYPERCALL_REP_START_OFFSET) & 0xfff;
1631 rep = !!(rep_cnt || rep_idx);
1632
1633 trace_kvm_hv_hypercall(code, fast, rep_cnt, rep_idx, ingpa, outgpa);
1634
1635 switch (code) {
1636 case HVCALL_NOTIFY_LONG_SPIN_WAIT:
1637 if (unlikely(rep)) {
1638 ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
1639 break;
1640 }
1641 kvm_vcpu_on_spin(vcpu, true);
1642 break;
1643 case HVCALL_SIGNAL_EVENT:
1644 if (unlikely(rep)) {
1645 ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
1646 break;
1647 }
1648 ret = kvm_hvcall_signal_event(vcpu, fast, ingpa);
1649 if (ret != HV_STATUS_INVALID_PORT_ID)
1650 break;
1651 /* fall through - maybe userspace knows this conn_id. */
1652 case HVCALL_POST_MESSAGE:
1653 /* don't bother userspace if it has no way to handle it */
1654 if (unlikely(rep || !vcpu_to_synic(vcpu)->active)) {
1655 ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
1656 break;
1657 }
1658 vcpu->run->exit_reason = KVM_EXIT_HYPERV;
1659 vcpu->run->hyperv.type = KVM_EXIT_HYPERV_HCALL;
1660 vcpu->run->hyperv.u.hcall.input = param;
1661 vcpu->run->hyperv.u.hcall.params[0] = ingpa;
1662 vcpu->run->hyperv.u.hcall.params[1] = outgpa;
1663 vcpu->arch.complete_userspace_io =
1664 kvm_hv_hypercall_complete_userspace;
1665 return 0;
1666 case HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST:
1667 if (unlikely(fast || !rep_cnt || rep_idx)) {
1668 ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
1669 break;
1670 }
1671 ret = kvm_hv_flush_tlb(vcpu, ingpa, rep_cnt, false);
1672 break;
1673 case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE:
1674 if (unlikely(fast || rep)) {
1675 ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
1676 break;
1677 }
1678 ret = kvm_hv_flush_tlb(vcpu, ingpa, rep_cnt, false);
1679 break;
1680 case HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX:
1681 if (unlikely(fast || !rep_cnt || rep_idx)) {
1682 ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
1683 break;
1684 }
1685 ret = kvm_hv_flush_tlb(vcpu, ingpa, rep_cnt, true);
1686 break;
1687 case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX:
1688 if (unlikely(fast || rep)) {
1689 ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
1690 break;
1691 }
1692 ret = kvm_hv_flush_tlb(vcpu, ingpa, rep_cnt, true);
1693 break;
1694 case HVCALL_SEND_IPI:
1695 if (unlikely(rep)) {
1696 ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
1697 break;
1698 }
1699 ret = kvm_hv_send_ipi(vcpu, ingpa, outgpa, false, fast);
1700 break;
1701 case HVCALL_SEND_IPI_EX:
1702 if (unlikely(fast || rep)) {
1703 ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
1704 break;
1705 }
1706 ret = kvm_hv_send_ipi(vcpu, ingpa, outgpa, true, false);
1707 break;
1708 default:
1709 ret = HV_STATUS_INVALID_HYPERCALL_CODE;
1710 break;
1711 }
1712
1713 return kvm_hv_hypercall_complete(vcpu, ret);
1714}
1715
1716void kvm_hv_init_vm(struct kvm *kvm)
1717{
1718 mutex_init(&kvm->arch.hyperv.hv_lock);
1719 idr_init(&kvm->arch.hyperv.conn_to_evt);
1720}
1721
1722void kvm_hv_destroy_vm(struct kvm *kvm)
1723{
1724 struct eventfd_ctx *eventfd;
1725 int i;
1726
1727 idr_for_each_entry(&kvm->arch.hyperv.conn_to_evt, eventfd, i)
1728 eventfd_ctx_put(eventfd);
1729 idr_destroy(&kvm->arch.hyperv.conn_to_evt);
1730}
1731
1732static int kvm_hv_eventfd_assign(struct kvm *kvm, u32 conn_id, int fd)
1733{
1734 struct kvm_hv *hv = &kvm->arch.hyperv;
1735 struct eventfd_ctx *eventfd;
1736 int ret;
1737
1738 eventfd = eventfd_ctx_fdget(fd);
1739 if (IS_ERR(eventfd))
1740 return PTR_ERR(eventfd);
1741
1742 mutex_lock(&hv->hv_lock);
1743 ret = idr_alloc(&hv->conn_to_evt, eventfd, conn_id, conn_id + 1,
1744 GFP_KERNEL_ACCOUNT);
1745 mutex_unlock(&hv->hv_lock);
1746
1747 if (ret >= 0)
1748 return 0;
1749
1750 if (ret == -ENOSPC)
1751 ret = -EEXIST;
1752 eventfd_ctx_put(eventfd);
1753 return ret;
1754}
1755
1756static int kvm_hv_eventfd_deassign(struct kvm *kvm, u32 conn_id)
1757{
1758 struct kvm_hv *hv = &kvm->arch.hyperv;
1759 struct eventfd_ctx *eventfd;
1760
1761 mutex_lock(&hv->hv_lock);
1762 eventfd = idr_remove(&hv->conn_to_evt, conn_id);
1763 mutex_unlock(&hv->hv_lock);
1764
1765 if (!eventfd)
1766 return -ENOENT;
1767
1768 synchronize_srcu(&kvm->srcu);
1769 eventfd_ctx_put(eventfd);
1770 return 0;
1771}
1772
1773int kvm_vm_ioctl_hv_eventfd(struct kvm *kvm, struct kvm_hyperv_eventfd *args)
1774{
1775 if ((args->flags & ~KVM_HYPERV_EVENTFD_DEASSIGN) ||
1776 (args->conn_id & ~KVM_HYPERV_CONN_ID_MASK))
1777 return -EINVAL;
1778
1779 if (args->flags == KVM_HYPERV_EVENTFD_DEASSIGN)
1780 return kvm_hv_eventfd_deassign(kvm, args->conn_id);
1781 return kvm_hv_eventfd_assign(kvm, args->conn_id, args->fd);
1782}
1783
1784int kvm_vcpu_ioctl_get_hv_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid,
1785 struct kvm_cpuid_entry2 __user *entries)
1786{
1787 uint16_t evmcs_ver = 0;
1788 struct kvm_cpuid_entry2 cpuid_entries[] = {
1789 { .function = HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS },
1790 { .function = HYPERV_CPUID_INTERFACE },
1791 { .function = HYPERV_CPUID_VERSION },
1792 { .function = HYPERV_CPUID_FEATURES },
1793 { .function = HYPERV_CPUID_ENLIGHTMENT_INFO },
1794 { .function = HYPERV_CPUID_IMPLEMENT_LIMITS },
1795 { .function = HYPERV_CPUID_NESTED_FEATURES },
1796 };
1797 int i, nent = ARRAY_SIZE(cpuid_entries);
1798
1799 if (kvm_x86_ops->nested_get_evmcs_version)
1800 evmcs_ver = kvm_x86_ops->nested_get_evmcs_version(vcpu);
1801
1802 /* Skip NESTED_FEATURES if eVMCS is not supported */
1803 if (!evmcs_ver)
1804 --nent;
1805
1806 if (cpuid->nent < nent)
1807 return -E2BIG;
1808
1809 if (cpuid->nent > nent)
1810 cpuid->nent = nent;
1811
1812 for (i = 0; i < nent; i++) {
1813 struct kvm_cpuid_entry2 *ent = &cpuid_entries[i];
1814 u32 signature[3];
1815
1816 switch (ent->function) {
1817 case HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS:
1818 memcpy(signature, "Linux KVM Hv", 12);
1819
1820 ent->eax = HYPERV_CPUID_NESTED_FEATURES;
1821 ent->ebx = signature[0];
1822 ent->ecx = signature[1];
1823 ent->edx = signature[2];
1824 break;
1825
1826 case HYPERV_CPUID_INTERFACE:
1827 memcpy(signature, "Hv#1\0\0\0\0\0\0\0\0", 12);
1828 ent->eax = signature[0];
1829 break;
1830
1831 case HYPERV_CPUID_VERSION:
1832 /*
1833 * We implement some Hyper-V 2016 functions so let's use
1834 * this version.
1835 */
1836 ent->eax = 0x00003839;
1837 ent->ebx = 0x000A0000;
1838 break;
1839
1840 case HYPERV_CPUID_FEATURES:
1841 ent->eax |= HV_X64_MSR_VP_RUNTIME_AVAILABLE;
1842 ent->eax |= HV_MSR_TIME_REF_COUNT_AVAILABLE;
1843 ent->eax |= HV_X64_MSR_SYNIC_AVAILABLE;
1844 ent->eax |= HV_MSR_SYNTIMER_AVAILABLE;
1845 ent->eax |= HV_X64_MSR_APIC_ACCESS_AVAILABLE;
1846 ent->eax |= HV_X64_MSR_HYPERCALL_AVAILABLE;
1847 ent->eax |= HV_X64_MSR_VP_INDEX_AVAILABLE;
1848 ent->eax |= HV_X64_MSR_RESET_AVAILABLE;
1849 ent->eax |= HV_MSR_REFERENCE_TSC_AVAILABLE;
1850 ent->eax |= HV_X64_ACCESS_FREQUENCY_MSRS;
1851 ent->eax |= HV_X64_ACCESS_REENLIGHTENMENT;
1852
1853 ent->ebx |= HV_X64_POST_MESSAGES;
1854 ent->ebx |= HV_X64_SIGNAL_EVENTS;
1855
1856 ent->edx |= HV_FEATURE_FREQUENCY_MSRS_AVAILABLE;
1857 ent->edx |= HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE;
1858
1859 /*
1860 * Direct Synthetic timers only make sense with in-kernel
1861 * LAPIC
1862 */
1863 if (lapic_in_kernel(vcpu))
1864 ent->edx |= HV_STIMER_DIRECT_MODE_AVAILABLE;
1865
1866 break;
1867
1868 case HYPERV_CPUID_ENLIGHTMENT_INFO:
1869 ent->eax |= HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED;
1870 ent->eax |= HV_X64_APIC_ACCESS_RECOMMENDED;
1871 ent->eax |= HV_X64_RELAXED_TIMING_RECOMMENDED;
1872 ent->eax |= HV_X64_CLUSTER_IPI_RECOMMENDED;
1873 ent->eax |= HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED;
1874 if (evmcs_ver)
1875 ent->eax |= HV_X64_ENLIGHTENED_VMCS_RECOMMENDED;
1876 if (!cpu_smt_possible())
1877 ent->eax |= HV_X64_NO_NONARCH_CORESHARING;
1878 /*
1879 * Default number of spinlock retry attempts, matches
1880 * HyperV 2016.
1881 */
1882 ent->ebx = 0x00000FFF;
1883
1884 break;
1885
1886 case HYPERV_CPUID_IMPLEMENT_LIMITS:
1887 /* Maximum number of virtual processors */
1888 ent->eax = KVM_MAX_VCPUS;
1889 /*
1890 * Maximum number of logical processors, matches
1891 * HyperV 2016.
1892 */
1893 ent->ebx = 64;
1894
1895 break;
1896
1897 case HYPERV_CPUID_NESTED_FEATURES:
1898 ent->eax = evmcs_ver;
1899
1900 break;
1901
1902 default:
1903 break;
1904 }
1905 }
1906
1907 if (copy_to_user(entries, cpuid_entries,
1908 nent * sizeof(struct kvm_cpuid_entry2)))
1909 return -EFAULT;
1910
1911 return 0;
1912}