Linux Audio

Check our new training course

Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Copyright (c) 2009, Microsoft Corporation.
  4 *
 
 
 
 
 
 
 
 
 
 
 
 
 
  5 * Authors:
  6 *   Haiyang Zhang <haiyangz@microsoft.com>
  7 *   Hank Janssen  <hjanssen@microsoft.com>
 
  8 */
  9#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 10
 11#include <linux/io.h>
 12#include <linux/kernel.h>
 13#include <linux/mm.h>
 14#include <linux/slab.h>
 15#include <linux/vmalloc.h>
 16#include <linux/hyperv.h>
 17#include <linux/random.h>
 18#include <linux/clockchips.h>
 19#include <linux/delay.h>
 20#include <linux/interrupt.h>
 21#include <clocksource/hyperv_timer.h>
 
 22#include <asm/mshyperv.h>
 23#include <linux/set_memory.h>
 24#include "hyperv_vmbus.h"
 25
 26/* The one and only */
 27struct hv_context hv_context;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 28
 29/*
 30 * hv_init - Main initialization routine.
 31 *
 32 * This routine must be called before any other routines in here are called
 33 */
 34int hv_init(void)
 35{
 36	hv_context.cpu_context = alloc_percpu(struct hv_per_cpu_context);
 37	if (!hv_context.cpu_context)
 38		return -ENOMEM;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 39	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 40}
 41
 42/*
 43 * hv_post_message - Post a message using the hypervisor message IPC.
 44 *
 45 * This involves a hypercall.
 46 */
 47int hv_post_message(union hv_connection_id connection_id,
 48		  enum hv_message_type message_type,
 49		  void *payload, size_t payload_size)
 50{
 
 51	struct hv_input_post_message *aligned_msg;
 52	unsigned long flags;
 53	u64 status;
 54
 55	if (payload_size > HV_MESSAGE_PAYLOAD_BYTE_COUNT)
 56		return -EMSGSIZE;
 57
 58	local_irq_save(flags);
 59
 60	/*
 61	 * A TDX VM with the paravisor must use the decrypted post_msg_page: see
 62	 * the comment in struct hv_per_cpu_context. A SNP VM with the paravisor
 63	 * can use the encrypted hyperv_pcpu_input_arg because it copies the
 64	 * input into the GHCB page, which has been decrypted by the paravisor.
 65	 */
 66	if (hv_isolation_type_tdx() && ms_hyperv.paravisor_present)
 67		aligned_msg = this_cpu_ptr(hv_context.cpu_context)->post_msg_page;
 68	else
 69		aligned_msg = *this_cpu_ptr(hyperv_pcpu_input_arg);
 70
 71	aligned_msg->connectionid = connection_id;
 72	aligned_msg->reserved = 0;
 73	aligned_msg->message_type = message_type;
 74	aligned_msg->payload_size = payload_size;
 75	memcpy((void *)aligned_msg->payload, payload, payload_size);
 76
 77	if (ms_hyperv.paravisor_present) {
 78		if (hv_isolation_type_tdx())
 79			status = hv_tdx_hypercall(HVCALL_POST_MESSAGE,
 80						  virt_to_phys(aligned_msg), 0);
 81		else if (hv_isolation_type_snp())
 82			status = hv_ghcb_hypercall(HVCALL_POST_MESSAGE,
 83						   aligned_msg, NULL,
 84						   sizeof(*aligned_msg));
 85		else
 86			status = HV_STATUS_INVALID_PARAMETER;
 87	} else {
 88		status = hv_do_hypercall(HVCALL_POST_MESSAGE,
 89				aligned_msg, NULL);
 90	}
 91
 92	local_irq_restore(flags);
 
 
 
 
 
 
 
 
 
 93
 94	return hv_result(status);
 95}
 96
 97int hv_synic_alloc(void)
 98{
 99	int cpu, ret = -ENOMEM;
100	struct hv_per_cpu_context *hv_cpu;
101
 
 
 
 
 
 
 
 
 
 
 
 
 
 
102	/*
103	 * First, zero all per-cpu memory areas so hv_synic_free() can
104	 * detect what memory has been allocated and cleanup properly
105	 * after any failures.
106	 */
107	for_each_present_cpu(cpu) {
108		hv_cpu = per_cpu_ptr(hv_context.cpu_context, cpu);
109		memset(hv_cpu, 0, sizeof(*hv_cpu));
110	}
111
112	hv_context.hv_numa_map = kcalloc(nr_node_ids, sizeof(struct cpumask),
113					 GFP_KERNEL);
 
 
 
 
 
 
 
 
 
 
 
 
114	if (hv_context.hv_numa_map == NULL) {
115		pr_err("Unable to allocate NUMA map\n");
116		goto err;
117	}
118
119	for_each_present_cpu(cpu) {
120		hv_cpu = per_cpu_ptr(hv_context.cpu_context, cpu);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
121
122		tasklet_init(&hv_cpu->msg_dpc,
123			     vmbus_on_msg_dpc, (unsigned long) hv_cpu);
124
125		if (ms_hyperv.paravisor_present && hv_isolation_type_tdx()) {
126			hv_cpu->post_msg_page = (void *)get_zeroed_page(GFP_ATOMIC);
127			if (hv_cpu->post_msg_page == NULL) {
128				pr_err("Unable to allocate post msg page\n");
129				goto err;
130			}
131
132			ret = set_memory_decrypted((unsigned long)hv_cpu->post_msg_page, 1);
133			if (ret) {
134				pr_err("Failed to decrypt post msg page: %d\n", ret);
135				/* Just leak the page, as it's unsafe to free the page. */
136				hv_cpu->post_msg_page = NULL;
137				goto err;
138			}
139
140			memset(hv_cpu->post_msg_page, 0, PAGE_SIZE);
 
 
141		}
142
143		/*
144		 * Synic message and event pages are allocated by paravisor.
145		 * Skip these pages allocation here.
146		 */
147		if (!ms_hyperv.paravisor_present && !hv_root_partition) {
148			hv_cpu->synic_message_page =
149				(void *)get_zeroed_page(GFP_ATOMIC);
150			if (hv_cpu->synic_message_page == NULL) {
151				pr_err("Unable to allocate SYNIC message page\n");
152				goto err;
153			}
154
155			hv_cpu->synic_event_page =
156				(void *)get_zeroed_page(GFP_ATOMIC);
157			if (hv_cpu->synic_event_page == NULL) {
158				pr_err("Unable to allocate SYNIC event page\n");
159
160				free_page((unsigned long)hv_cpu->synic_message_page);
161				hv_cpu->synic_message_page = NULL;
162				goto err;
163			}
164		}
165
166		if (!ms_hyperv.paravisor_present &&
167		    (hv_isolation_type_snp() || hv_isolation_type_tdx())) {
168			ret = set_memory_decrypted((unsigned long)
169				hv_cpu->synic_message_page, 1);
170			if (ret) {
171				pr_err("Failed to decrypt SYNIC msg page: %d\n", ret);
172				hv_cpu->synic_message_page = NULL;
173
174				/*
175				 * Free the event page here so that hv_synic_free()
176				 * won't later try to re-encrypt it.
177				 */
178				free_page((unsigned long)hv_cpu->synic_event_page);
179				hv_cpu->synic_event_page = NULL;
180				goto err;
181			}
182
183			ret = set_memory_decrypted((unsigned long)
184				hv_cpu->synic_event_page, 1);
185			if (ret) {
186				pr_err("Failed to decrypt SYNIC event page: %d\n", ret);
187				hv_cpu->synic_event_page = NULL;
188				goto err;
189			}
190
191			memset(hv_cpu->synic_message_page, 0, PAGE_SIZE);
192			memset(hv_cpu->synic_event_page, 0, PAGE_SIZE);
 
193		}
 
 
194	}
195
196	return 0;
197
198err:
199	/*
200	 * Any memory allocations that succeeded will be freed when
201	 * the caller cleans up by calling hv_synic_free()
202	 */
203	return ret;
204}
205
 
 
 
 
 
 
 
 
 
 
 
 
206
207void hv_synic_free(void)
208{
209	int cpu, ret;
210
211	for_each_present_cpu(cpu) {
212		struct hv_per_cpu_context *hv_cpu
213			= per_cpu_ptr(hv_context.cpu_context, cpu);
214
215		/* It's better to leak the page if the encryption fails. */
216		if (ms_hyperv.paravisor_present && hv_isolation_type_tdx()) {
217			if (hv_cpu->post_msg_page) {
218				ret = set_memory_encrypted((unsigned long)
219					hv_cpu->post_msg_page, 1);
220				if (ret) {
221					pr_err("Failed to encrypt post msg page: %d\n", ret);
222					hv_cpu->post_msg_page = NULL;
223				}
224			}
225		}
226
227		if (!ms_hyperv.paravisor_present &&
228		    (hv_isolation_type_snp() || hv_isolation_type_tdx())) {
229			if (hv_cpu->synic_message_page) {
230				ret = set_memory_encrypted((unsigned long)
231					hv_cpu->synic_message_page, 1);
232				if (ret) {
233					pr_err("Failed to encrypt SYNIC msg page: %d\n", ret);
234					hv_cpu->synic_message_page = NULL;
235				}
236			}
237
238			if (hv_cpu->synic_event_page) {
239				ret = set_memory_encrypted((unsigned long)
240					hv_cpu->synic_event_page, 1);
241				if (ret) {
242					pr_err("Failed to encrypt SYNIC event page: %d\n", ret);
243					hv_cpu->synic_event_page = NULL;
244				}
245			}
246		}
247
248		free_page((unsigned long)hv_cpu->post_msg_page);
249		free_page((unsigned long)hv_cpu->synic_event_page);
250		free_page((unsigned long)hv_cpu->synic_message_page);
251	}
252
253	kfree(hv_context.hv_numa_map);
 
 
254}
255
256/*
257 * hv_synic_init - Initialize the Synthetic Interrupt Controller.
258 *
259 * If it is already initialized by another entity (ie x2v shim), we need to
260 * retrieve the initialized message and event pages.  Otherwise, we create and
261 * initialize the message and event pages.
262 */
263void hv_synic_enable_regs(unsigned int cpu)
264{
265	struct hv_per_cpu_context *hv_cpu
266		= per_cpu_ptr(hv_context.cpu_context, cpu);
267	union hv_synic_simp simp;
268	union hv_synic_siefp siefp;
269	union hv_synic_sint shared_sint;
270	union hv_synic_scontrol sctrl;
 
 
 
 
 
 
 
 
 
271
272	/* Setup the Synic's message page */
273	simp.as_uint64 = hv_get_register(HV_REGISTER_SIMP);
274	simp.simp_enabled = 1;
 
 
275
276	if (ms_hyperv.paravisor_present || hv_root_partition) {
277		/* Mask out vTOM bit. ioremap_cache() maps decrypted */
278		u64 base = (simp.base_simp_gpa << HV_HYP_PAGE_SHIFT) &
279				~ms_hyperv.shared_gpa_boundary;
280		hv_cpu->synic_message_page
281			= (void *)ioremap_cache(base, HV_HYP_PAGE_SIZE);
282		if (!hv_cpu->synic_message_page)
283			pr_err("Fail to map synic message page.\n");
284	} else {
285		simp.base_simp_gpa = virt_to_phys(hv_cpu->synic_message_page)
286			>> HV_HYP_PAGE_SHIFT;
287	}
288
289	hv_set_register(HV_REGISTER_SIMP, simp.as_uint64);
290
291	/* Setup the Synic's event page */
292	siefp.as_uint64 = hv_get_register(HV_REGISTER_SIEFP);
293	siefp.siefp_enabled = 1;
 
 
294
295	if (ms_hyperv.paravisor_present || hv_root_partition) {
296		/* Mask out vTOM bit. ioremap_cache() maps decrypted */
297		u64 base = (siefp.base_siefp_gpa << HV_HYP_PAGE_SHIFT) &
298				~ms_hyperv.shared_gpa_boundary;
299		hv_cpu->synic_event_page
300			= (void *)ioremap_cache(base, HV_HYP_PAGE_SIZE);
301		if (!hv_cpu->synic_event_page)
302			pr_err("Fail to map synic event page.\n");
303	} else {
304		siefp.base_siefp_gpa = virt_to_phys(hv_cpu->synic_event_page)
305			>> HV_HYP_PAGE_SHIFT;
306	}
307
308	hv_set_register(HV_REGISTER_SIEFP, siefp.as_uint64);
309
310	/* Setup the shared SINT. */
311	if (vmbus_irq != -1)
312		enable_percpu_irq(vmbus_irq, 0);
313	shared_sint.as_uint64 = hv_get_register(HV_REGISTER_SINT0 +
314					VMBUS_MESSAGE_SINT);
315
316	shared_sint.vector = vmbus_interrupt;
 
317	shared_sint.masked = false;
 
318
319	/*
320	 * On architectures where Hyper-V doesn't support AEOI (e.g., ARM64),
321	 * it doesn't provide a recommendation flag and AEOI must be disabled.
322	 */
323#ifdef HV_DEPRECATING_AEOI_RECOMMENDED
324	shared_sint.auto_eoi =
325			!(ms_hyperv.hints & HV_DEPRECATING_AEOI_RECOMMENDED);
326#else
327	shared_sint.auto_eoi = 0;
328#endif
329	hv_set_register(HV_REGISTER_SINT0 + VMBUS_MESSAGE_SINT,
330				shared_sint.as_uint64);
331
332	/* Enable the global synic bit */
333	sctrl.as_uint64 = hv_get_register(HV_REGISTER_SCONTROL);
334	sctrl.enable = 1;
335
336	hv_set_register(HV_REGISTER_SCONTROL, sctrl.as_uint64);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
337}
338
339int hv_synic_init(unsigned int cpu)
 
 
 
340{
341	hv_synic_enable_regs(cpu);
342
343	hv_stimer_legacy_init(cpu, VMBUS_MESSAGE_SINT);
 
344
345	return 0;
 
346}
347
348/*
349 * hv_synic_cleanup - Cleanup routine for hv_synic_init().
350 */
351void hv_synic_disable_regs(unsigned int cpu)
352{
353	struct hv_per_cpu_context *hv_cpu
354		= per_cpu_ptr(hv_context.cpu_context, cpu);
355	union hv_synic_sint shared_sint;
356	union hv_synic_simp simp;
357	union hv_synic_siefp siefp;
358	union hv_synic_scontrol sctrl;
 
 
 
 
359
360	shared_sint.as_uint64 = hv_get_register(HV_REGISTER_SINT0 +
361					VMBUS_MESSAGE_SINT);
 
 
 
 
 
362
363	shared_sint.masked = 1;
364
365	/* Need to correctly cleanup in the case of SMP!!! */
366	/* Disable the interrupt */
367	hv_set_register(HV_REGISTER_SINT0 + VMBUS_MESSAGE_SINT,
368				shared_sint.as_uint64);
369
370	simp.as_uint64 = hv_get_register(HV_REGISTER_SIMP);
371	/*
372	 * In Isolation VM, sim and sief pages are allocated by
373	 * paravisor. These pages also will be used by kdump
374	 * kernel. So just reset enable bit here and keep page
375	 * addresses.
376	 */
377	simp.simp_enabled = 0;
378	if (ms_hyperv.paravisor_present || hv_root_partition) {
379		iounmap(hv_cpu->synic_message_page);
380		hv_cpu->synic_message_page = NULL;
381	} else {
382		simp.base_simp_gpa = 0;
383	}
384
385	hv_set_register(HV_REGISTER_SIMP, simp.as_uint64);
386
387	siefp.as_uint64 = hv_get_register(HV_REGISTER_SIEFP);
388	siefp.siefp_enabled = 0;
 
389
390	if (ms_hyperv.paravisor_present || hv_root_partition) {
391		iounmap(hv_cpu->synic_event_page);
392		hv_cpu->synic_event_page = NULL;
393	} else {
394		siefp.base_siefp_gpa = 0;
395	}
396
397	hv_set_register(HV_REGISTER_SIEFP, siefp.as_uint64);
398
399	/* Disable the global synic bit */
400	sctrl.as_uint64 = hv_get_register(HV_REGISTER_SCONTROL);
401	sctrl.enable = 0;
402	hv_set_register(HV_REGISTER_SCONTROL, sctrl.as_uint64);
403
404	if (vmbus_irq != -1)
405		disable_percpu_irq(vmbus_irq);
406}
407
408#define HV_MAX_TRIES 3
409/*
410 * Scan the event flags page of 'this' CPU looking for any bit that is set.  If we find one
411 * bit set, then wait for a few milliseconds.  Repeat these steps for a maximum of 3 times.
412 * Return 'true', if there is still any set bit after this operation; 'false', otherwise.
413 *
414 * If a bit is set, that means there is a pending channel interrupt.  The expectation is
415 * that the normal interrupt handling mechanism will find and process the channel interrupt
416 * "very soon", and in the process clear the bit.
417 */
418static bool hv_synic_event_pending(void)
419{
420	struct hv_per_cpu_context *hv_cpu = this_cpu_ptr(hv_context.cpu_context);
421	union hv_synic_event_flags *event =
422		(union hv_synic_event_flags *)hv_cpu->synic_event_page + VMBUS_MESSAGE_SINT;
423	unsigned long *recv_int_page = event->flags; /* assumes VMBus version >= VERSION_WIN8 */
424	bool pending;
425	u32 relid;
426	int tries = 0;
427
428retry:
429	pending = false;
430	for_each_set_bit(relid, recv_int_page, HV_EVENT_FLAGS_COUNT) {
431		/* Special case - VMBus channel protocol messages */
432		if (relid == 0)
433			continue;
434		pending = true;
435		break;
436	}
437	if (pending && tries++ < HV_MAX_TRIES) {
438		usleep_range(10000, 20000);
439		goto retry;
440	}
441	return pending;
442}
443
444int hv_synic_cleanup(unsigned int cpu)
445{
446	struct vmbus_channel *channel, *sc;
447	bool channel_found = false;
448
449	if (vmbus_connection.conn_state != CONNECTED)
450		goto always_cleanup;
451
452	/*
453	 * Hyper-V does not provide a way to change the connect CPU once
454	 * it is set; we must prevent the connect CPU from going offline
455	 * while the VM is running normally. But in the panic or kexec()
456	 * path where the vmbus is already disconnected, the CPU must be
457	 * allowed to shut down.
458	 */
459	if (cpu == VMBUS_CONNECT_CPU)
460		return -EBUSY;
461
462	/*
463	 * Search for channels which are bound to the CPU we're about to
464	 * cleanup.  In case we find one and vmbus is still connected, we
465	 * fail; this will effectively prevent CPU offlining.
466	 *
467	 * TODO: Re-bind the channels to different CPUs.
468	 */
469	mutex_lock(&vmbus_connection.channel_mutex);
470	list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
471		if (channel->target_cpu == cpu) {
472			channel_found = true;
473			break;
474		}
475		list_for_each_entry(sc, &channel->sc_list, sc_list) {
476			if (sc->target_cpu == cpu) {
477				channel_found = true;
478				break;
479			}
480		}
481		if (channel_found)
482			break;
483	}
484	mutex_unlock(&vmbus_connection.channel_mutex);
485
486	if (channel_found)
487		return -EBUSY;
488
489	/*
490	 * channel_found == false means that any channels that were previously
491	 * assigned to the CPU have been reassigned elsewhere with a call of
492	 * vmbus_send_modifychannel().  Scan the event flags page looking for
493	 * bits that are set and waiting with a timeout for vmbus_chan_sched()
494	 * to process such bits.  If bits are still set after this operation
495	 * and VMBus is connected, fail the CPU offlining operation.
496	 */
497	if (vmbus_proto_version >= VERSION_WIN10_V4_1 && hv_synic_event_pending())
498		return -EBUSY;
499
500always_cleanup:
501	hv_stimer_legacy_cleanup(cpu);
502
503	hv_synic_disable_regs(cpu);
504
505	return 0;
506}
v4.10.11
 
  1/*
  2 * Copyright (c) 2009, Microsoft Corporation.
  3 *
  4 * This program is free software; you can redistribute it and/or modify it
  5 * under the terms and conditions of the GNU General Public License,
  6 * version 2, as published by the Free Software Foundation.
  7 *
  8 * This program is distributed in the hope it will be useful, but WITHOUT
  9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 10 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 11 * more details.
 12 *
 13 * You should have received a copy of the GNU General Public License along with
 14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
 15 * Place - Suite 330, Boston, MA 02111-1307 USA.
 16 *
 17 * Authors:
 18 *   Haiyang Zhang <haiyangz@microsoft.com>
 19 *   Hank Janssen  <hjanssen@microsoft.com>
 20 *
 21 */
 22#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 23
 
 24#include <linux/kernel.h>
 25#include <linux/mm.h>
 26#include <linux/slab.h>
 27#include <linux/vmalloc.h>
 28#include <linux/hyperv.h>
 29#include <linux/version.h>
 
 
 30#include <linux/interrupt.h>
 31#include <linux/clockchips.h>
 32#include <asm/hyperv.h>
 33#include <asm/mshyperv.h>
 
 34#include "hyperv_vmbus.h"
 35
 36/* The one and only */
 37struct hv_context hv_context = {
 38	.synic_initialized	= false,
 39	.hypercall_page		= NULL,
 40};
 41
 42#define HV_TIMER_FREQUENCY (10 * 1000 * 1000) /* 100ns period */
 43#define HV_MAX_MAX_DELTA_TICKS 0xffffffff
 44#define HV_MIN_DELTA_TICKS 1
 45
 46/*
 47 * query_hypervisor_info - Get version info of the windows hypervisor
 48 */
 49unsigned int host_info_eax;
 50unsigned int host_info_ebx;
 51unsigned int host_info_ecx;
 52unsigned int host_info_edx;
 53
 54static int query_hypervisor_info(void)
 55{
 56	unsigned int eax;
 57	unsigned int ebx;
 58	unsigned int ecx;
 59	unsigned int edx;
 60	unsigned int max_leaf;
 61	unsigned int op;
 62
 63	/*
 64	* Its assumed that this is called after confirming that Viridian
 65	* is present. Query id and revision.
 66	*/
 67	eax = 0;
 68	ebx = 0;
 69	ecx = 0;
 70	edx = 0;
 71	op = HVCPUID_VENDOR_MAXFUNCTION;
 72	cpuid(op, &eax, &ebx, &ecx, &edx);
 73
 74	max_leaf = eax;
 75
 76	if (max_leaf >= HVCPUID_VERSION) {
 77		eax = 0;
 78		ebx = 0;
 79		ecx = 0;
 80		edx = 0;
 81		op = HVCPUID_VERSION;
 82		cpuid(op, &eax, &ebx, &ecx, &edx);
 83		host_info_eax = eax;
 84		host_info_ebx = ebx;
 85		host_info_ecx = ecx;
 86		host_info_edx = edx;
 87	}
 88	return max_leaf;
 89}
 90
 91/*
 92 * hv_do_hypercall- Invoke the specified hypercall
 93 */
 94u64 hv_do_hypercall(u64 control, void *input, void *output)
 95{
 96	u64 input_address = (input) ? virt_to_phys(input) : 0;
 97	u64 output_address = (output) ? virt_to_phys(output) : 0;
 98	void *hypercall_page = hv_context.hypercall_page;
 99#ifdef CONFIG_X86_64
100	u64 hv_status = 0;
101
102	if (!hypercall_page)
103		return (u64)ULLONG_MAX;
104
105	__asm__ __volatile__("mov %0, %%r8" : : "r" (output_address) : "r8");
106	__asm__ __volatile__("call *%3" : "=a" (hv_status) :
107			     "c" (control), "d" (input_address),
108			     "m" (hypercall_page));
109
110	return hv_status;
111
112#else
113
114	u32 control_hi = control >> 32;
115	u32 control_lo = control & 0xFFFFFFFF;
116	u32 hv_status_hi = 1;
117	u32 hv_status_lo = 1;
118	u32 input_address_hi = input_address >> 32;
119	u32 input_address_lo = input_address & 0xFFFFFFFF;
120	u32 output_address_hi = output_address >> 32;
121	u32 output_address_lo = output_address & 0xFFFFFFFF;
122
123	if (!hypercall_page)
124		return (u64)ULLONG_MAX;
125
126	__asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
127			      "=a"(hv_status_lo) : "d" (control_hi),
128			      "a" (control_lo), "b" (input_address_hi),
129			      "c" (input_address_lo), "D"(output_address_hi),
130			      "S"(output_address_lo), "m" (hypercall_page));
131
132	return hv_status_lo | ((u64)hv_status_hi << 32);
133#endif /* !x86_64 */
134}
135EXPORT_SYMBOL_GPL(hv_do_hypercall);
136
137#ifdef CONFIG_X86_64
138static u64 read_hv_clock_tsc(struct clocksource *arg)
139{
140	u64 current_tick;
141	struct ms_hyperv_tsc_page *tsc_pg = hv_context.tsc_page;
142
143	if (tsc_pg->tsc_sequence != 0) {
144		/*
145		 * Use the tsc page to compute the value.
146		 */
147
148		while (1) {
149			u64 tmp;
150			u32 sequence = tsc_pg->tsc_sequence;
151			u64 cur_tsc;
152			u64 scale = tsc_pg->tsc_scale;
153			s64 offset = tsc_pg->tsc_offset;
154
155			rdtscll(cur_tsc);
156			/* current_tick = ((cur_tsc *scale) >> 64) + offset */
157			asm("mulq %3"
158				: "=d" (current_tick), "=a" (tmp)
159				: "a" (cur_tsc), "r" (scale));
160
161			current_tick += offset;
162			if (tsc_pg->tsc_sequence == sequence)
163				return current_tick;
164
165			if (tsc_pg->tsc_sequence != 0)
166				continue;
167			/*
168			 * Fallback using MSR method.
169			 */
170			break;
171		}
172	}
173	rdmsrl(HV_X64_MSR_TIME_REF_COUNT, current_tick);
174	return current_tick;
175}
176
177static struct clocksource hyperv_cs_tsc = {
178		.name           = "hyperv_clocksource_tsc_page",
179		.rating         = 425,
180		.read           = read_hv_clock_tsc,
181		.mask           = CLOCKSOURCE_MASK(64),
182		.flags          = CLOCK_SOURCE_IS_CONTINUOUS,
183};
184#endif
185
186
187/*
188 * hv_init - Main initialization routine.
189 *
190 * This routine must be called before any other routines in here are called
191 */
192int hv_init(void)
193{
194	int max_leaf;
195	union hv_x64_msr_hypercall_contents hypercall_msr;
196	void *virtaddr = NULL;
197
198	memset(hv_context.synic_event_page, 0, sizeof(void *) * NR_CPUS);
199	memset(hv_context.synic_message_page, 0,
200	       sizeof(void *) * NR_CPUS);
201	memset(hv_context.post_msg_page, 0,
202	       sizeof(void *) * NR_CPUS);
203	memset(hv_context.vp_index, 0,
204	       sizeof(int) * NR_CPUS);
205	memset(hv_context.event_dpc, 0,
206	       sizeof(void *) * NR_CPUS);
207	memset(hv_context.msg_dpc, 0,
208	       sizeof(void *) * NR_CPUS);
209	memset(hv_context.clk_evt, 0,
210	       sizeof(void *) * NR_CPUS);
211
212	max_leaf = query_hypervisor_info();
213
214	/*
215	 * Write our OS ID.
216	 */
217	hv_context.guestid = generate_guest_id(0, LINUX_VERSION_CODE, 0);
218	wrmsrl(HV_X64_MSR_GUEST_OS_ID, hv_context.guestid);
219
220	/* See if the hypercall page is already set */
221	rdmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
222
223	virtaddr = __vmalloc(PAGE_SIZE, GFP_KERNEL, PAGE_KERNEL_RX);
224
225	if (!virtaddr)
226		goto cleanup;
227
228	hypercall_msr.enable = 1;
229
230	hypercall_msr.guest_physical_address = vmalloc_to_pfn(virtaddr);
231	wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
232
233	/* Confirm that hypercall page did get setup. */
234	hypercall_msr.as_uint64 = 0;
235	rdmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
236
237	if (!hypercall_msr.enable)
238		goto cleanup;
239
240	hv_context.hypercall_page = virtaddr;
241
242#ifdef CONFIG_X86_64
243	if (ms_hyperv.features & HV_X64_MSR_REFERENCE_TSC_AVAILABLE) {
244		union hv_x64_msr_hypercall_contents tsc_msr;
245		void *va_tsc;
246
247		va_tsc = __vmalloc(PAGE_SIZE, GFP_KERNEL, PAGE_KERNEL);
248		if (!va_tsc)
249			goto cleanup;
250		hv_context.tsc_page = va_tsc;
251
252		rdmsrl(HV_X64_MSR_REFERENCE_TSC, tsc_msr.as_uint64);
253
254		tsc_msr.enable = 1;
255		tsc_msr.guest_physical_address = vmalloc_to_pfn(va_tsc);
256
257		wrmsrl(HV_X64_MSR_REFERENCE_TSC, tsc_msr.as_uint64);
258		clocksource_register_hz(&hyperv_cs_tsc, NSEC_PER_SEC/100);
259	}
260#endif
261	return 0;
262
263cleanup:
264	if (virtaddr) {
265		if (hypercall_msr.enable) {
266			hypercall_msr.as_uint64 = 0;
267			wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
268		}
269
270		vfree(virtaddr);
271	}
272
273	return -ENOTSUPP;
274}
275
276/*
277 * hv_cleanup - Cleanup routine.
278 *
279 * This routine is called normally during driver unloading or exiting.
280 */
281void hv_cleanup(bool crash)
282{
283	union hv_x64_msr_hypercall_contents hypercall_msr;
284
285	/* Reset our OS id */
286	wrmsrl(HV_X64_MSR_GUEST_OS_ID, 0);
287
288	if (hv_context.hypercall_page) {
289		hypercall_msr.as_uint64 = 0;
290		wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
291		if (!crash)
292			vfree(hv_context.hypercall_page);
293		hv_context.hypercall_page = NULL;
294	}
295
296#ifdef CONFIG_X86_64
297	/*
298	 * Cleanup the TSC page based CS.
299	 */
300	if (ms_hyperv.features & HV_X64_MSR_REFERENCE_TSC_AVAILABLE) {
301		/*
302		 * Crash can happen in an interrupt context and unregistering
303		 * a clocksource is impossible and redundant in this case.
304		 */
305		if (!oops_in_progress) {
306			clocksource_change_rating(&hyperv_cs_tsc, 10);
307			clocksource_unregister(&hyperv_cs_tsc);
308		}
309
310		hypercall_msr.as_uint64 = 0;
311		wrmsrl(HV_X64_MSR_REFERENCE_TSC, hypercall_msr.as_uint64);
312		if (!crash) {
313			vfree(hv_context.tsc_page);
314			hv_context.tsc_page = NULL;
315		}
316	}
317#endif
318}
319
320/*
321 * hv_post_message - Post a message using the hypervisor message IPC.
322 *
323 * This involves a hypercall.
324 */
325int hv_post_message(union hv_connection_id connection_id,
326		  enum hv_message_type message_type,
327		  void *payload, size_t payload_size)
328{
329
330	struct hv_input_post_message *aligned_msg;
 
331	u64 status;
332
333	if (payload_size > HV_MESSAGE_PAYLOAD_BYTE_COUNT)
334		return -EMSGSIZE;
335
336	aligned_msg = (struct hv_input_post_message *)
337			hv_context.post_msg_page[get_cpu()];
 
 
 
 
 
 
 
 
 
 
338
339	aligned_msg->connectionid = connection_id;
340	aligned_msg->reserved = 0;
341	aligned_msg->message_type = message_type;
342	aligned_msg->payload_size = payload_size;
343	memcpy((void *)aligned_msg->payload, payload, payload_size);
344
345	status = hv_do_hypercall(HVCALL_POST_MESSAGE, aligned_msg, NULL);
346
347	put_cpu();
348	return status & 0xFFFF;
349}
350
351static int hv_ce_set_next_event(unsigned long delta,
352				struct clock_event_device *evt)
353{
354	u64 current_tick;
355
356	WARN_ON(!clockevent_state_oneshot(evt));
 
 
357
358	rdmsrl(HV_X64_MSR_TIME_REF_COUNT, current_tick);
359	current_tick += delta;
360	wrmsrl(HV_X64_MSR_STIMER0_COUNT, current_tick);
361	return 0;
362}
363
364static int hv_ce_shutdown(struct clock_event_device *evt)
365{
366	wrmsrl(HV_X64_MSR_STIMER0_COUNT, 0);
367	wrmsrl(HV_X64_MSR_STIMER0_CONFIG, 0);
368
369	return 0;
370}
371
372static int hv_ce_set_oneshot(struct clock_event_device *evt)
373{
374	union hv_timer_config timer_cfg;
 
375
376	timer_cfg.enable = 1;
377	timer_cfg.auto_enable = 1;
378	timer_cfg.sintx = VMBUS_MESSAGE_SINT;
379	wrmsrl(HV_X64_MSR_STIMER0_CONFIG, timer_cfg.as_uint64);
380
381	return 0;
382}
383
384static void hv_init_clockevent_device(struct clock_event_device *dev, int cpu)
385{
386	dev->name = "Hyper-V clockevent";
387	dev->features = CLOCK_EVT_FEAT_ONESHOT;
388	dev->cpumask = cpumask_of(cpu);
389	dev->rating = 1000;
390	/*
391	 * Avoid settint dev->owner = THIS_MODULE deliberately as doing so will
392	 * result in clockevents_config_and_register() taking additional
393	 * references to the hv_vmbus module making it impossible to unload.
394	 */
 
 
 
 
395
396	dev->set_state_shutdown = hv_ce_shutdown;
397	dev->set_state_oneshot = hv_ce_set_oneshot;
398	dev->set_next_event = hv_ce_set_next_event;
399}
400
401
402int hv_synic_alloc(void)
403{
404	size_t size = sizeof(struct tasklet_struct);
405	size_t ced_size = sizeof(struct clock_event_device);
406	int cpu;
407
408	hv_context.hv_numa_map = kzalloc(sizeof(struct cpumask) * nr_node_ids,
409					 GFP_ATOMIC);
410	if (hv_context.hv_numa_map == NULL) {
411		pr_err("Unable to allocate NUMA map\n");
412		goto err;
413	}
414
415	for_each_present_cpu(cpu) {
416		hv_context.event_dpc[cpu] = kmalloc(size, GFP_ATOMIC);
417		if (hv_context.event_dpc[cpu] == NULL) {
418			pr_err("Unable to allocate event dpc\n");
419			goto err;
420		}
421		tasklet_init(hv_context.event_dpc[cpu], vmbus_on_event, cpu);
422
423		hv_context.msg_dpc[cpu] = kmalloc(size, GFP_ATOMIC);
424		if (hv_context.msg_dpc[cpu] == NULL) {
425			pr_err("Unable to allocate event dpc\n");
426			goto err;
427		}
428		tasklet_init(hv_context.msg_dpc[cpu], vmbus_on_msg_dpc, cpu);
429
430		hv_context.clk_evt[cpu] = kzalloc(ced_size, GFP_ATOMIC);
431		if (hv_context.clk_evt[cpu] == NULL) {
432			pr_err("Unable to allocate clock event device\n");
433			goto err;
434		}
435
436		hv_init_clockevent_device(hv_context.clk_evt[cpu], cpu);
 
437
438		hv_context.synic_message_page[cpu] =
439			(void *)get_zeroed_page(GFP_ATOMIC);
 
 
 
 
 
 
 
 
 
 
 
 
440
441		if (hv_context.synic_message_page[cpu] == NULL) {
442			pr_err("Unable to allocate SYNIC message page\n");
443			goto err;
444		}
445
446		hv_context.synic_event_page[cpu] =
447			(void *)get_zeroed_page(GFP_ATOMIC);
448
449		if (hv_context.synic_event_page[cpu] == NULL) {
450			pr_err("Unable to allocate SYNIC event page\n");
451			goto err;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
452		}
453
454		hv_context.post_msg_page[cpu] =
455			(void *)get_zeroed_page(GFP_ATOMIC);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
456
457		if (hv_context.post_msg_page[cpu] == NULL) {
458			pr_err("Unable to allocate post msg page\n");
459			goto err;
460		}
461
462		INIT_LIST_HEAD(&hv_context.percpu_list[cpu]);
463	}
464
465	return 0;
 
466err:
467	return -ENOMEM;
 
 
 
 
468}
469
470static void hv_synic_free_cpu(int cpu)
471{
472	kfree(hv_context.event_dpc[cpu]);
473	kfree(hv_context.msg_dpc[cpu]);
474	kfree(hv_context.clk_evt[cpu]);
475	if (hv_context.synic_event_page[cpu])
476		free_page((unsigned long)hv_context.synic_event_page[cpu]);
477	if (hv_context.synic_message_page[cpu])
478		free_page((unsigned long)hv_context.synic_message_page[cpu]);
479	if (hv_context.post_msg_page[cpu])
480		free_page((unsigned long)hv_context.post_msg_page[cpu]);
481}
482
483void hv_synic_free(void)
484{
485	int cpu;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
486
487	kfree(hv_context.hv_numa_map);
488	for_each_present_cpu(cpu)
489		hv_synic_free_cpu(cpu);
490}
491
492/*
493 * hv_synic_init - Initialize the Synthethic Interrupt Controller.
494 *
495 * If it is already initialized by another entity (ie x2v shim), we need to
496 * retrieve the initialized message and event pages.  Otherwise, we create and
497 * initialize the message and event pages.
498 */
499void hv_synic_init(void *arg)
500{
501	u64 version;
 
502	union hv_synic_simp simp;
503	union hv_synic_siefp siefp;
504	union hv_synic_sint shared_sint;
505	union hv_synic_scontrol sctrl;
506	u64 vp_index;
507
508	int cpu = smp_processor_id();
509
510	if (!hv_context.hypercall_page)
511		return;
512
513	/* Check the version */
514	rdmsrl(HV_X64_MSR_SVERSION, version);
515
516	/* Setup the Synic's message page */
517	rdmsrl(HV_X64_MSR_SIMP, simp.as_uint64);
518	simp.simp_enabled = 1;
519	simp.base_simp_gpa = virt_to_phys(hv_context.synic_message_page[cpu])
520		>> PAGE_SHIFT;
521
522	wrmsrl(HV_X64_MSR_SIMP, simp.as_uint64);
 
 
 
 
 
 
 
 
 
 
 
 
 
523
524	/* Setup the Synic's event page */
525	rdmsrl(HV_X64_MSR_SIEFP, siefp.as_uint64);
526	siefp.siefp_enabled = 1;
527	siefp.base_siefp_gpa = virt_to_phys(hv_context.synic_event_page[cpu])
528		>> PAGE_SHIFT;
529
530	wrmsrl(HV_X64_MSR_SIEFP, siefp.as_uint64);
 
 
 
 
 
 
 
 
 
 
 
 
 
531
532	/* Setup the shared SINT. */
533	rdmsrl(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
 
 
 
534
535	shared_sint.as_uint64 = 0;
536	shared_sint.vector = HYPERVISOR_CALLBACK_VECTOR;
537	shared_sint.masked = false;
538	shared_sint.auto_eoi = true;
539
540	wrmsrl(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
 
 
 
 
 
 
 
 
 
 
 
541
542	/* Enable the global synic bit */
543	rdmsrl(HV_X64_MSR_SCONTROL, sctrl.as_uint64);
544	sctrl.enable = 1;
545
546	wrmsrl(HV_X64_MSR_SCONTROL, sctrl.as_uint64);
547
548	hv_context.synic_initialized = true;
549
550	/*
551	 * Setup the mapping between Hyper-V's notion
552	 * of cpuid and Linux' notion of cpuid.
553	 * This array will be indexed using Linux cpuid.
554	 */
555	rdmsrl(HV_X64_MSR_VP_INDEX, vp_index);
556	hv_context.vp_index[cpu] = (u32)vp_index;
557
558	/*
559	 * Register the per-cpu clockevent source.
560	 */
561	if (ms_hyperv.features & HV_X64_MSR_SYNTIMER_AVAILABLE)
562		clockevents_config_and_register(hv_context.clk_evt[cpu],
563						HV_TIMER_FREQUENCY,
564						HV_MIN_DELTA_TICKS,
565						HV_MAX_MAX_DELTA_TICKS);
566	return;
567}
568
569/*
570 * hv_synic_clockevents_cleanup - Cleanup clockevent devices
571 */
572void hv_synic_clockevents_cleanup(void)
573{
574	int cpu;
575
576	if (!(ms_hyperv.features & HV_X64_MSR_SYNTIMER_AVAILABLE))
577		return;
578
579	for_each_present_cpu(cpu)
580		clockevents_unbind_device(hv_context.clk_evt[cpu], cpu);
581}
582
583/*
584 * hv_synic_cleanup - Cleanup routine for hv_synic_init().
585 */
586void hv_synic_cleanup(void *arg)
587{
 
 
588	union hv_synic_sint shared_sint;
589	union hv_synic_simp simp;
590	union hv_synic_siefp siefp;
591	union hv_synic_scontrol sctrl;
592	int cpu = smp_processor_id();
593
594	if (!hv_context.synic_initialized)
595		return;
596
597	/* Turn off clockevent device */
598	if (ms_hyperv.features & HV_X64_MSR_SYNTIMER_AVAILABLE) {
599		clockevents_unbind_device(hv_context.clk_evt[cpu], cpu);
600		hv_ce_shutdown(hv_context.clk_evt[cpu]);
601	}
602
603	rdmsrl(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
604
605	shared_sint.masked = 1;
606
607	/* Need to correctly cleanup in the case of SMP!!! */
608	/* Disable the interrupt */
609	wrmsrl(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
 
610
611	rdmsrl(HV_X64_MSR_SIMP, simp.as_uint64);
 
 
 
 
 
 
612	simp.simp_enabled = 0;
613	simp.base_simp_gpa = 0;
 
 
 
 
 
614
615	wrmsrl(HV_X64_MSR_SIMP, simp.as_uint64);
616
617	rdmsrl(HV_X64_MSR_SIEFP, siefp.as_uint64);
618	siefp.siefp_enabled = 0;
619	siefp.base_siefp_gpa = 0;
620
621	wrmsrl(HV_X64_MSR_SIEFP, siefp.as_uint64);
 
 
 
 
 
 
 
622
623	/* Disable the global synic bit */
624	rdmsrl(HV_X64_MSR_SCONTROL, sctrl.as_uint64);
625	sctrl.enable = 0;
626	wrmsrl(HV_X64_MSR_SCONTROL, sctrl.as_uint64);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
627}