Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Copyright (c) 2009, Microsoft Corporation.
  4 *
  5 * Authors:
  6 *   Haiyang Zhang <haiyangz@microsoft.com>
  7 *   Hank Janssen  <hjanssen@microsoft.com>
  8 */
  9#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 10
 11#include <linux/kernel.h>
 12#include <linux/mm.h>
 13#include <linux/slab.h>
 14#include <linux/vmalloc.h>
 15#include <linux/hyperv.h>
 16#include <linux/version.h>
 17#include <linux/random.h>
 18#include <linux/clockchips.h>
 19#include <clocksource/hyperv_timer.h>
 20#include <asm/mshyperv.h>
 21#include "hyperv_vmbus.h"
 22
 23/* The one and only */
 24struct hv_context hv_context;
 25
 26/*
 27 * hv_init - Main initialization routine.
 28 *
 29 * This routine must be called before any other routines in here are called
 30 */
 31int hv_init(void)
 32{
 33	hv_context.cpu_context = alloc_percpu(struct hv_per_cpu_context);
 34	if (!hv_context.cpu_context)
 35		return -ENOMEM;
 36	return 0;
 37}
 38
 39/*
 40 * hv_post_message - Post a message using the hypervisor message IPC.
 41 *
 42 * This involves a hypercall.
 43 */
 44int hv_post_message(union hv_connection_id connection_id,
 45		  enum hv_message_type message_type,
 46		  void *payload, size_t payload_size)
 47{
 48	struct hv_input_post_message *aligned_msg;
 49	struct hv_per_cpu_context *hv_cpu;
 50	u64 status;
 51
 52	if (payload_size > HV_MESSAGE_PAYLOAD_BYTE_COUNT)
 53		return -EMSGSIZE;
 54
 55	hv_cpu = get_cpu_ptr(hv_context.cpu_context);
 56	aligned_msg = hv_cpu->post_msg_page;
 57	aligned_msg->connectionid = connection_id;
 58	aligned_msg->reserved = 0;
 59	aligned_msg->message_type = message_type;
 60	aligned_msg->payload_size = payload_size;
 61	memcpy((void *)aligned_msg->payload, payload, payload_size);
 62
 63	status = hv_do_hypercall(HVCALL_POST_MESSAGE, aligned_msg, NULL);
 64
 65	/* Preemption must remain disabled until after the hypercall
 66	 * so some other thread can't get scheduled onto this cpu and
 67	 * corrupt the per-cpu post_msg_page
 68	 */
 69	put_cpu_ptr(hv_cpu);
 70
 71	return status & 0xFFFF;
 72}
 73
 74int hv_synic_alloc(void)
 75{
 76	int cpu;
 77	struct hv_per_cpu_context *hv_cpu;
 78
 79	/*
 80	 * First, zero all per-cpu memory areas so hv_synic_free() can
 81	 * detect what memory has been allocated and cleanup properly
 82	 * after any failures.
 83	 */
 84	for_each_present_cpu(cpu) {
 85		hv_cpu = per_cpu_ptr(hv_context.cpu_context, cpu);
 86		memset(hv_cpu, 0, sizeof(*hv_cpu));
 87	}
 88
 89	hv_context.hv_numa_map = kcalloc(nr_node_ids, sizeof(struct cpumask),
 90					 GFP_KERNEL);
 91	if (hv_context.hv_numa_map == NULL) {
 92		pr_err("Unable to allocate NUMA map\n");
 93		goto err;
 94	}
 95
 96	for_each_present_cpu(cpu) {
 97		hv_cpu = per_cpu_ptr(hv_context.cpu_context, cpu);
 98
 99		tasklet_init(&hv_cpu->msg_dpc,
100			     vmbus_on_msg_dpc, (unsigned long) hv_cpu);
101
102		hv_cpu->synic_message_page =
103			(void *)get_zeroed_page(GFP_ATOMIC);
104		if (hv_cpu->synic_message_page == NULL) {
105			pr_err("Unable to allocate SYNIC message page\n");
106			goto err;
107		}
108
109		hv_cpu->synic_event_page = (void *)get_zeroed_page(GFP_ATOMIC);
110		if (hv_cpu->synic_event_page == NULL) {
111			pr_err("Unable to allocate SYNIC event page\n");
112			goto err;
113		}
114
115		hv_cpu->post_msg_page = (void *)get_zeroed_page(GFP_ATOMIC);
116		if (hv_cpu->post_msg_page == NULL) {
117			pr_err("Unable to allocate post msg page\n");
118			goto err;
119		}
120
121		INIT_LIST_HEAD(&hv_cpu->chan_list);
122	}
123
124	return 0;
125err:
126	/*
127	 * Any memory allocations that succeeded will be freed when
128	 * the caller cleans up by calling hv_synic_free()
129	 */
130	return -ENOMEM;
131}
132
133
134void hv_synic_free(void)
135{
136	int cpu;
137
138	for_each_present_cpu(cpu) {
139		struct hv_per_cpu_context *hv_cpu
140			= per_cpu_ptr(hv_context.cpu_context, cpu);
141
142		free_page((unsigned long)hv_cpu->synic_event_page);
143		free_page((unsigned long)hv_cpu->synic_message_page);
144		free_page((unsigned long)hv_cpu->post_msg_page);
145	}
146
147	kfree(hv_context.hv_numa_map);
148}
149
150/*
151 * hv_synic_init - Initialize the Synthetic Interrupt Controller.
152 *
153 * If it is already initialized by another entity (ie x2v shim), we need to
154 * retrieve the initialized message and event pages.  Otherwise, we create and
155 * initialize the message and event pages.
156 */
157void hv_synic_enable_regs(unsigned int cpu)
158{
159	struct hv_per_cpu_context *hv_cpu
160		= per_cpu_ptr(hv_context.cpu_context, cpu);
161	union hv_synic_simp simp;
162	union hv_synic_siefp siefp;
163	union hv_synic_sint shared_sint;
164	union hv_synic_scontrol sctrl;
165
166	/* Setup the Synic's message page */
167	hv_get_simp(simp.as_uint64);
168	simp.simp_enabled = 1;
169	simp.base_simp_gpa = virt_to_phys(hv_cpu->synic_message_page)
170		>> PAGE_SHIFT;
171
172	hv_set_simp(simp.as_uint64);
173
174	/* Setup the Synic's event page */
175	hv_get_siefp(siefp.as_uint64);
176	siefp.siefp_enabled = 1;
177	siefp.base_siefp_gpa = virt_to_phys(hv_cpu->synic_event_page)
178		>> PAGE_SHIFT;
179
180	hv_set_siefp(siefp.as_uint64);
181
182	/* Setup the shared SINT. */
183	hv_get_synint_state(VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
184
185	shared_sint.vector = HYPERVISOR_CALLBACK_VECTOR;
186	shared_sint.masked = false;
187	if (ms_hyperv.hints & HV_DEPRECATING_AEOI_RECOMMENDED)
188		shared_sint.auto_eoi = false;
189	else
190		shared_sint.auto_eoi = true;
191
192	hv_set_synint_state(VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
193
194	/* Enable the global synic bit */
195	hv_get_synic_state(sctrl.as_uint64);
196	sctrl.enable = 1;
197
198	hv_set_synic_state(sctrl.as_uint64);
199}
200
201int hv_synic_init(unsigned int cpu)
202{
203	hv_synic_enable_regs(cpu);
204
205	hv_stimer_init(cpu);
206
207	return 0;
208}
209
210/*
211 * hv_synic_cleanup - Cleanup routine for hv_synic_init().
212 */
213void hv_synic_disable_regs(unsigned int cpu)
214{
215	union hv_synic_sint shared_sint;
216	union hv_synic_simp simp;
217	union hv_synic_siefp siefp;
218	union hv_synic_scontrol sctrl;
219
220	hv_get_synint_state(VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
221
222	shared_sint.masked = 1;
223
224	/* Need to correctly cleanup in the case of SMP!!! */
225	/* Disable the interrupt */
226	hv_set_synint_state(VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
227
228	hv_get_simp(simp.as_uint64);
229	simp.simp_enabled = 0;
230	simp.base_simp_gpa = 0;
231
232	hv_set_simp(simp.as_uint64);
233
234	hv_get_siefp(siefp.as_uint64);
235	siefp.siefp_enabled = 0;
236	siefp.base_siefp_gpa = 0;
237
238	hv_set_siefp(siefp.as_uint64);
239
240	/* Disable the global synic bit */
241	hv_get_synic_state(sctrl.as_uint64);
242	sctrl.enable = 0;
243	hv_set_synic_state(sctrl.as_uint64);
244}
245
246int hv_synic_cleanup(unsigned int cpu)
247{
248	struct vmbus_channel *channel, *sc;
249	bool channel_found = false;
250	unsigned long flags;
251
252	/*
253	 * Search for channels which are bound to the CPU we're about to
254	 * cleanup. In case we find one and vmbus is still connected we need to
255	 * fail, this will effectively prevent CPU offlining. There is no way
256	 * we can re-bind channels to different CPUs for now.
257	 */
258	mutex_lock(&vmbus_connection.channel_mutex);
259	list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
260		if (channel->target_cpu == cpu) {
261			channel_found = true;
262			break;
263		}
264		spin_lock_irqsave(&channel->lock, flags);
265		list_for_each_entry(sc, &channel->sc_list, sc_list) {
266			if (sc->target_cpu == cpu) {
267				channel_found = true;
268				break;
269			}
270		}
271		spin_unlock_irqrestore(&channel->lock, flags);
272		if (channel_found)
273			break;
274	}
275	mutex_unlock(&vmbus_connection.channel_mutex);
276
277	if (channel_found && vmbus_connection.conn_state == CONNECTED)
278		return -EBUSY;
279
280	hv_stimer_cleanup(cpu);
281
282	hv_synic_disable_regs(cpu);
283
284	return 0;
285}