Linux Audio

Check our new training course

Loading...
v5.4
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Copyright (c) 2009, Microsoft Corporation.
  4 *
 
 
 
 
 
 
 
 
 
 
 
 
 
  5 * Authors:
  6 *   Haiyang Zhang <haiyangz@microsoft.com>
  7 *   Hank Janssen  <hjanssen@microsoft.com>
 
  8 */
  9#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 10
 11#include <linux/kernel.h>
 12#include <linux/mm.h>
 13#include <linux/slab.h>
 14#include <linux/vmalloc.h>
 15#include <linux/hyperv.h>
 16#include <linux/version.h>
 17#include <linux/random.h>
 18#include <linux/clockchips.h>
 19#include <clocksource/hyperv_timer.h>
 20#include <asm/mshyperv.h>
 21#include "hyperv_vmbus.h"
 22
 23/* The one and only */
 24struct hv_context hv_context;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 25
 26/*
 27 * hv_init - Main initialization routine.
 28 *
 29 * This routine must be called before any other routines in here are called
 30 */
 31int hv_init(void)
 32{
 33	hv_context.cpu_context = alloc_percpu(struct hv_per_cpu_context);
 34	if (!hv_context.cpu_context)
 35		return -ENOMEM;
 
 
 
 36	return 0;
 37}
 38
 39/*
 40 * hv_post_message - Post a message using the hypervisor message IPC.
 41 *
 42 * This involves a hypercall.
 43 */
 44int hv_post_message(union hv_connection_id connection_id,
 45		  enum hv_message_type message_type,
 46		  void *payload, size_t payload_size)
 47{
 48	struct hv_input_post_message *aligned_msg;
 49	struct hv_per_cpu_context *hv_cpu;
 50	u64 status;
 51
 52	if (payload_size > HV_MESSAGE_PAYLOAD_BYTE_COUNT)
 53		return -EMSGSIZE;
 54
 55	hv_cpu = get_cpu_ptr(hv_context.cpu_context);
 56	aligned_msg = hv_cpu->post_msg_page;
 57	aligned_msg->connectionid = connection_id;
 58	aligned_msg->reserved = 0;
 59	aligned_msg->message_type = message_type;
 60	aligned_msg->payload_size = payload_size;
 61	memcpy((void *)aligned_msg->payload, payload, payload_size);
 62
 63	status = hv_do_hypercall(HVCALL_POST_MESSAGE, aligned_msg, NULL);
 64
 65	/* Preemption must remain disabled until after the hypercall
 66	 * so some other thread can't get scheduled onto this cpu and
 67	 * corrupt the per-cpu post_msg_page
 68	 */
 69	put_cpu_ptr(hv_cpu);
 70
 71	return status & 0xFFFF;
 72}
 73
 74int hv_synic_alloc(void)
 
 
 
 
 
 
 75{
 76	int cpu;
 77	struct hv_per_cpu_context *hv_cpu;
 78
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 79	/*
 80	 * First, zero all per-cpu memory areas so hv_synic_free() can
 81	 * detect what memory has been allocated and cleanup properly
 82	 * after any failures.
 83	 */
 84	for_each_present_cpu(cpu) {
 85		hv_cpu = per_cpu_ptr(hv_context.cpu_context, cpu);
 86		memset(hv_cpu, 0, sizeof(*hv_cpu));
 87	}
 88
 89	hv_context.hv_numa_map = kcalloc(nr_node_ids, sizeof(struct cpumask),
 
 
 
 
 
 
 
 
 
 
 90					 GFP_KERNEL);
 91	if (hv_context.hv_numa_map == NULL) {
 92		pr_err("Unable to allocate NUMA map\n");
 93		goto err;
 94	}
 95
 96	for_each_present_cpu(cpu) {
 97		hv_cpu = per_cpu_ptr(hv_context.cpu_context, cpu);
 
 98
 
 99		tasklet_init(&hv_cpu->msg_dpc,
100			     vmbus_on_msg_dpc, (unsigned long) hv_cpu);
101
 
 
 
 
 
 
 
 
102		hv_cpu->synic_message_page =
103			(void *)get_zeroed_page(GFP_ATOMIC);
104		if (hv_cpu->synic_message_page == NULL) {
105			pr_err("Unable to allocate SYNIC message page\n");
106			goto err;
107		}
108
109		hv_cpu->synic_event_page = (void *)get_zeroed_page(GFP_ATOMIC);
110		if (hv_cpu->synic_event_page == NULL) {
111			pr_err("Unable to allocate SYNIC event page\n");
112			goto err;
113		}
114
115		hv_cpu->post_msg_page = (void *)get_zeroed_page(GFP_ATOMIC);
116		if (hv_cpu->post_msg_page == NULL) {
117			pr_err("Unable to allocate post msg page\n");
118			goto err;
119		}
120
121		INIT_LIST_HEAD(&hv_cpu->chan_list);
122	}
123
 
 
 
 
 
124	return 0;
125err:
126	/*
127	 * Any memory allocations that succeeded will be freed when
128	 * the caller cleans up by calling hv_synic_free()
129	 */
130	return -ENOMEM;
131}
132
133
134void hv_synic_free(void)
135{
136	int cpu;
137
138	for_each_present_cpu(cpu) {
139		struct hv_per_cpu_context *hv_cpu
140			= per_cpu_ptr(hv_context.cpu_context, cpu);
141
142		free_page((unsigned long)hv_cpu->synic_event_page);
143		free_page((unsigned long)hv_cpu->synic_message_page);
144		free_page((unsigned long)hv_cpu->post_msg_page);
 
 
 
145	}
146
147	kfree(hv_context.hv_numa_map);
148}
149
150/*
151 * hv_synic_init - Initialize the Synthetic Interrupt Controller.
152 *
153 * If it is already initialized by another entity (ie x2v shim), we need to
154 * retrieve the initialized message and event pages.  Otherwise, we create and
155 * initialize the message and event pages.
156 */
157void hv_synic_enable_regs(unsigned int cpu)
158{
159	struct hv_per_cpu_context *hv_cpu
160		= per_cpu_ptr(hv_context.cpu_context, cpu);
161	union hv_synic_simp simp;
162	union hv_synic_siefp siefp;
163	union hv_synic_sint shared_sint;
164	union hv_synic_scontrol sctrl;
165
166	/* Setup the Synic's message page */
167	hv_get_simp(simp.as_uint64);
168	simp.simp_enabled = 1;
169	simp.base_simp_gpa = virt_to_phys(hv_cpu->synic_message_page)
170		>> PAGE_SHIFT;
171
172	hv_set_simp(simp.as_uint64);
173
174	/* Setup the Synic's event page */
175	hv_get_siefp(siefp.as_uint64);
176	siefp.siefp_enabled = 1;
177	siefp.base_siefp_gpa = virt_to_phys(hv_cpu->synic_event_page)
178		>> PAGE_SHIFT;
179
180	hv_set_siefp(siefp.as_uint64);
181
182	/* Setup the shared SINT. */
183	hv_get_synint_state(VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
 
184
185	shared_sint.vector = HYPERVISOR_CALLBACK_VECTOR;
186	shared_sint.masked = false;
187	if (ms_hyperv.hints & HV_DEPRECATING_AEOI_RECOMMENDED)
188		shared_sint.auto_eoi = false;
189	else
190		shared_sint.auto_eoi = true;
191
192	hv_set_synint_state(VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
 
193
194	/* Enable the global synic bit */
195	hv_get_synic_state(sctrl.as_uint64);
196	sctrl.enable = 1;
197
198	hv_set_synic_state(sctrl.as_uint64);
199}
200
201int hv_synic_init(unsigned int cpu)
202{
203	hv_synic_enable_regs(cpu);
204
205	hv_stimer_init(cpu);
206
 
 
 
 
 
 
 
 
207	return 0;
208}
209
210/*
211 * hv_synic_cleanup - Cleanup routine for hv_synic_init().
212 */
213void hv_synic_disable_regs(unsigned int cpu)
214{
215	union hv_synic_sint shared_sint;
216	union hv_synic_simp simp;
217	union hv_synic_siefp siefp;
218	union hv_synic_scontrol sctrl;
219
220	hv_get_synint_state(VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
221
222	shared_sint.masked = 1;
223
224	/* Need to correctly cleanup in the case of SMP!!! */
225	/* Disable the interrupt */
226	hv_set_synint_state(VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
227
228	hv_get_simp(simp.as_uint64);
229	simp.simp_enabled = 0;
230	simp.base_simp_gpa = 0;
231
232	hv_set_simp(simp.as_uint64);
 
233
234	hv_get_siefp(siefp.as_uint64);
235	siefp.siefp_enabled = 0;
236	siefp.base_siefp_gpa = 0;
237
238	hv_set_siefp(siefp.as_uint64);
239
240	/* Disable the global synic bit */
241	hv_get_synic_state(sctrl.as_uint64);
242	sctrl.enable = 0;
243	hv_set_synic_state(sctrl.as_uint64);
244}
245
 
 
 
246int hv_synic_cleanup(unsigned int cpu)
247{
 
 
 
 
248	struct vmbus_channel *channel, *sc;
249	bool channel_found = false;
250	unsigned long flags;
251
 
 
 
252	/*
253	 * Search for channels which are bound to the CPU we're about to
254	 * cleanup. In case we find one and vmbus is still connected we need to
255	 * fail, this will effectively prevent CPU offlining. There is no way
256	 * we can re-bind channels to different CPUs for now.
257	 */
258	mutex_lock(&vmbus_connection.channel_mutex);
259	list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
260		if (channel->target_cpu == cpu) {
261			channel_found = true;
262			break;
263		}
264		spin_lock_irqsave(&channel->lock, flags);
265		list_for_each_entry(sc, &channel->sc_list, sc_list) {
266			if (sc->target_cpu == cpu) {
267				channel_found = true;
268				break;
269			}
270		}
271		spin_unlock_irqrestore(&channel->lock, flags);
272		if (channel_found)
273			break;
274	}
275	mutex_unlock(&vmbus_connection.channel_mutex);
276
277	if (channel_found && vmbus_connection.conn_state == CONNECTED)
278		return -EBUSY;
279
280	hv_stimer_cleanup(cpu);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
281
282	hv_synic_disable_regs(cpu);
 
 
 
 
 
 
 
 
 
283
284	return 0;
285}
v4.17
 
  1/*
  2 * Copyright (c) 2009, Microsoft Corporation.
  3 *
  4 * This program is free software; you can redistribute it and/or modify it
  5 * under the terms and conditions of the GNU General Public License,
  6 * version 2, as published by the Free Software Foundation.
  7 *
  8 * This program is distributed in the hope it will be useful, but WITHOUT
  9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 10 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 11 * more details.
 12 *
 13 * You should have received a copy of the GNU General Public License along with
 14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
 15 * Place - Suite 330, Boston, MA 02111-1307 USA.
 16 *
 17 * Authors:
 18 *   Haiyang Zhang <haiyangz@microsoft.com>
 19 *   Hank Janssen  <hjanssen@microsoft.com>
 20 *
 21 */
 22#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 23
 24#include <linux/kernel.h>
 25#include <linux/mm.h>
 26#include <linux/slab.h>
 27#include <linux/vmalloc.h>
 28#include <linux/hyperv.h>
 29#include <linux/version.h>
 30#include <linux/random.h>
 31#include <linux/clockchips.h>
 
 32#include <asm/mshyperv.h>
 33#include "hyperv_vmbus.h"
 34
 35/* The one and only */
 36struct hv_context hv_context = {
 37	.synic_initialized	= false,
 38};
 39
 40/*
 41 * If false, we're using the old mechanism for stimer0 interrupts
 42 * where it sends a VMbus message when it expires. The old
 43 * mechanism is used when running on older versions of Hyper-V
 44 * that don't support Direct Mode. While Hyper-V provides
 45 * four stimer's per CPU, Linux uses only stimer0.
 46 */
 47static bool direct_mode_enabled;
 48static int stimer0_irq;
 49static int stimer0_vector;
 50
 51#define HV_TIMER_FREQUENCY (10 * 1000 * 1000) /* 100ns period */
 52#define HV_MAX_MAX_DELTA_TICKS 0xffffffff
 53#define HV_MIN_DELTA_TICKS 1
 54
 55/*
 56 * hv_init - Main initialization routine.
 57 *
 58 * This routine must be called before any other routines in here are called
 59 */
 60int hv_init(void)
 61{
 62	hv_context.cpu_context = alloc_percpu(struct hv_per_cpu_context);
 63	if (!hv_context.cpu_context)
 64		return -ENOMEM;
 65
 66	direct_mode_enabled = ms_hyperv.misc_features &
 67			HV_X64_STIMER_DIRECT_MODE_AVAILABLE;
 68	return 0;
 69}
 70
 71/*
 72 * hv_post_message - Post a message using the hypervisor message IPC.
 73 *
 74 * This involves a hypercall.
 75 */
 76int hv_post_message(union hv_connection_id connection_id,
 77		  enum hv_message_type message_type,
 78		  void *payload, size_t payload_size)
 79{
 80	struct hv_input_post_message *aligned_msg;
 81	struct hv_per_cpu_context *hv_cpu;
 82	u64 status;
 83
 84	if (payload_size > HV_MESSAGE_PAYLOAD_BYTE_COUNT)
 85		return -EMSGSIZE;
 86
 87	hv_cpu = get_cpu_ptr(hv_context.cpu_context);
 88	aligned_msg = hv_cpu->post_msg_page;
 89	aligned_msg->connectionid = connection_id;
 90	aligned_msg->reserved = 0;
 91	aligned_msg->message_type = message_type;
 92	aligned_msg->payload_size = payload_size;
 93	memcpy((void *)aligned_msg->payload, payload, payload_size);
 94
 95	status = hv_do_hypercall(HVCALL_POST_MESSAGE, aligned_msg, NULL);
 96
 97	/* Preemption must remain disabled until after the hypercall
 98	 * so some other thread can't get scheduled onto this cpu and
 99	 * corrupt the per-cpu post_msg_page
100	 */
101	put_cpu_ptr(hv_cpu);
102
103	return status & 0xFFFF;
104}
105
106/*
107 * ISR for when stimer0 is operating in Direct Mode.  Direct Mode
108 * does not use VMbus or any VMbus messages, so process here and not
109 * in the VMbus driver code.
110 */
111
112static void hv_stimer0_isr(void)
113{
 
114	struct hv_per_cpu_context *hv_cpu;
115
116	hv_cpu = this_cpu_ptr(hv_context.cpu_context);
117	hv_cpu->clk_evt->event_handler(hv_cpu->clk_evt);
118	add_interrupt_randomness(stimer0_vector, 0);
119}
120
121static int hv_ce_set_next_event(unsigned long delta,
122				struct clock_event_device *evt)
123{
124	u64 current_tick;
125
126	WARN_ON(!clockevent_state_oneshot(evt));
127
128	current_tick = hyperv_cs->read(NULL);
129	current_tick += delta;
130	hv_init_timer(HV_X64_MSR_STIMER0_COUNT, current_tick);
131	return 0;
132}
133
134static int hv_ce_shutdown(struct clock_event_device *evt)
135{
136	hv_init_timer(HV_X64_MSR_STIMER0_COUNT, 0);
137	hv_init_timer_config(HV_X64_MSR_STIMER0_CONFIG, 0);
138	if (direct_mode_enabled)
139		hv_disable_stimer0_percpu_irq(stimer0_irq);
140
141	return 0;
142}
143
144static int hv_ce_set_oneshot(struct clock_event_device *evt)
145{
146	union hv_timer_config timer_cfg;
147
148	timer_cfg.as_uint64 = 0;
149	timer_cfg.enable = 1;
150	timer_cfg.auto_enable = 1;
151	if (direct_mode_enabled) {
152		/*
153		 * When it expires, the timer will directly interrupt
154		 * on the specified hardware vector/IRQ.
155		 */
156		timer_cfg.direct_mode = 1;
157		timer_cfg.apic_vector = stimer0_vector;
158		hv_enable_stimer0_percpu_irq(stimer0_irq);
159	} else {
160		/*
161		 * When it expires, the timer will generate a VMbus message,
162		 * to be handled by the normal VMbus interrupt handler.
163		 */
164		timer_cfg.direct_mode = 0;
165		timer_cfg.sintx = VMBUS_MESSAGE_SINT;
166	}
167	hv_init_timer_config(HV_X64_MSR_STIMER0_CONFIG, timer_cfg.as_uint64);
168	return 0;
169}
170
171static void hv_init_clockevent_device(struct clock_event_device *dev, int cpu)
172{
173	dev->name = "Hyper-V clockevent";
174	dev->features = CLOCK_EVT_FEAT_ONESHOT;
175	dev->cpumask = cpumask_of(cpu);
176	dev->rating = 1000;
177	/*
178	 * Avoid settint dev->owner = THIS_MODULE deliberately as doing so will
179	 * result in clockevents_config_and_register() taking additional
180	 * references to the hv_vmbus module making it impossible to unload.
181	 */
 
 
 
 
182
183	dev->set_state_shutdown = hv_ce_shutdown;
184	dev->set_state_oneshot = hv_ce_set_oneshot;
185	dev->set_next_event = hv_ce_set_next_event;
186}
187
188
189int hv_synic_alloc(void)
190{
191	int cpu;
192
193	hv_context.hv_numa_map = kzalloc(sizeof(struct cpumask) * nr_node_ids,
194					 GFP_KERNEL);
195	if (hv_context.hv_numa_map == NULL) {
196		pr_err("Unable to allocate NUMA map\n");
197		goto err;
198	}
199
200	for_each_present_cpu(cpu) {
201		struct hv_per_cpu_context *hv_cpu
202			= per_cpu_ptr(hv_context.cpu_context, cpu);
203
204		memset(hv_cpu, 0, sizeof(*hv_cpu));
205		tasklet_init(&hv_cpu->msg_dpc,
206			     vmbus_on_msg_dpc, (unsigned long) hv_cpu);
207
208		hv_cpu->clk_evt = kzalloc(sizeof(struct clock_event_device),
209					  GFP_KERNEL);
210		if (hv_cpu->clk_evt == NULL) {
211			pr_err("Unable to allocate clock event device\n");
212			goto err;
213		}
214		hv_init_clockevent_device(hv_cpu->clk_evt, cpu);
215
216		hv_cpu->synic_message_page =
217			(void *)get_zeroed_page(GFP_ATOMIC);
218		if (hv_cpu->synic_message_page == NULL) {
219			pr_err("Unable to allocate SYNIC message page\n");
220			goto err;
221		}
222
223		hv_cpu->synic_event_page = (void *)get_zeroed_page(GFP_ATOMIC);
224		if (hv_cpu->synic_event_page == NULL) {
225			pr_err("Unable to allocate SYNIC event page\n");
226			goto err;
227		}
228
229		hv_cpu->post_msg_page = (void *)get_zeroed_page(GFP_ATOMIC);
230		if (hv_cpu->post_msg_page == NULL) {
231			pr_err("Unable to allocate post msg page\n");
232			goto err;
233		}
234
235		INIT_LIST_HEAD(&hv_cpu->chan_list);
236	}
237
238	if (direct_mode_enabled &&
239	    hv_setup_stimer0_irq(&stimer0_irq, &stimer0_vector,
240				hv_stimer0_isr))
241		goto err;
242
243	return 0;
244err:
 
 
 
 
245	return -ENOMEM;
246}
247
248
249void hv_synic_free(void)
250{
251	int cpu;
252
253	for_each_present_cpu(cpu) {
254		struct hv_per_cpu_context *hv_cpu
255			= per_cpu_ptr(hv_context.cpu_context, cpu);
256
257		if (hv_cpu->synic_event_page)
258			free_page((unsigned long)hv_cpu->synic_event_page);
259		if (hv_cpu->synic_message_page)
260			free_page((unsigned long)hv_cpu->synic_message_page);
261		if (hv_cpu->post_msg_page)
262			free_page((unsigned long)hv_cpu->post_msg_page);
263	}
264
265	kfree(hv_context.hv_numa_map);
266}
267
268/*
269 * hv_synic_init - Initialize the Synthetic Interrupt Controller.
270 *
271 * If it is already initialized by another entity (ie x2v shim), we need to
272 * retrieve the initialized message and event pages.  Otherwise, we create and
273 * initialize the message and event pages.
274 */
275int hv_synic_init(unsigned int cpu)
276{
277	struct hv_per_cpu_context *hv_cpu
278		= per_cpu_ptr(hv_context.cpu_context, cpu);
279	union hv_synic_simp simp;
280	union hv_synic_siefp siefp;
281	union hv_synic_sint shared_sint;
282	union hv_synic_scontrol sctrl;
283
284	/* Setup the Synic's message page */
285	hv_get_simp(simp.as_uint64);
286	simp.simp_enabled = 1;
287	simp.base_simp_gpa = virt_to_phys(hv_cpu->synic_message_page)
288		>> PAGE_SHIFT;
289
290	hv_set_simp(simp.as_uint64);
291
292	/* Setup the Synic's event page */
293	hv_get_siefp(siefp.as_uint64);
294	siefp.siefp_enabled = 1;
295	siefp.base_siefp_gpa = virt_to_phys(hv_cpu->synic_event_page)
296		>> PAGE_SHIFT;
297
298	hv_set_siefp(siefp.as_uint64);
299
300	/* Setup the shared SINT. */
301	hv_get_synint_state(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT,
302			    shared_sint.as_uint64);
303
304	shared_sint.vector = HYPERVISOR_CALLBACK_VECTOR;
305	shared_sint.masked = false;
306	if (ms_hyperv.hints & HV_X64_DEPRECATING_AEOI_RECOMMENDED)
307		shared_sint.auto_eoi = false;
308	else
309		shared_sint.auto_eoi = true;
310
311	hv_set_synint_state(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT,
312			    shared_sint.as_uint64);
313
314	/* Enable the global synic bit */
315	hv_get_synic_state(sctrl.as_uint64);
316	sctrl.enable = 1;
317
318	hv_set_synic_state(sctrl.as_uint64);
 
 
 
 
 
319
320	hv_context.synic_initialized = true;
321
322	/*
323	 * Register the per-cpu clockevent source.
324	 */
325	if (ms_hyperv.features & HV_X64_MSR_SYNTIMER_AVAILABLE)
326		clockevents_config_and_register(hv_cpu->clk_evt,
327						HV_TIMER_FREQUENCY,
328						HV_MIN_DELTA_TICKS,
329						HV_MAX_MAX_DELTA_TICKS);
330	return 0;
331}
332
333/*
334 * hv_synic_clockevents_cleanup - Cleanup clockevent devices
335 */
336void hv_synic_clockevents_cleanup(void)
337{
338	int cpu;
 
 
 
 
 
 
 
 
 
 
 
339
340	if (!(ms_hyperv.features & HV_X64_MSR_SYNTIMER_AVAILABLE))
341		return;
 
342
343	if (direct_mode_enabled)
344		hv_remove_stimer0_irq(stimer0_irq);
345
346	for_each_present_cpu(cpu) {
347		struct hv_per_cpu_context *hv_cpu
348			= per_cpu_ptr(hv_context.cpu_context, cpu);
 
 
349
350		clockevents_unbind_device(hv_cpu->clk_evt, cpu);
351	}
 
 
352}
353
354/*
355 * hv_synic_cleanup - Cleanup routine for hv_synic_init().
356 */
357int hv_synic_cleanup(unsigned int cpu)
358{
359	union hv_synic_sint shared_sint;
360	union hv_synic_simp simp;
361	union hv_synic_siefp siefp;
362	union hv_synic_scontrol sctrl;
363	struct vmbus_channel *channel, *sc;
364	bool channel_found = false;
365	unsigned long flags;
366
367	if (!hv_context.synic_initialized)
368		return -EFAULT;
369
370	/*
371	 * Search for channels which are bound to the CPU we're about to
372	 * cleanup. In case we find one and vmbus is still connected we need to
373	 * fail, this will effectively prevent CPU offlining. There is no way
374	 * we can re-bind channels to different CPUs for now.
375	 */
376	mutex_lock(&vmbus_connection.channel_mutex);
377	list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
378		if (channel->target_cpu == cpu) {
379			channel_found = true;
380			break;
381		}
382		spin_lock_irqsave(&channel->lock, flags);
383		list_for_each_entry(sc, &channel->sc_list, sc_list) {
384			if (sc->target_cpu == cpu) {
385				channel_found = true;
386				break;
387			}
388		}
389		spin_unlock_irqrestore(&channel->lock, flags);
390		if (channel_found)
391			break;
392	}
393	mutex_unlock(&vmbus_connection.channel_mutex);
394
395	if (channel_found && vmbus_connection.conn_state == CONNECTED)
396		return -EBUSY;
397
398	/* Turn off clockevent device */
399	if (ms_hyperv.features & HV_X64_MSR_SYNTIMER_AVAILABLE) {
400		struct hv_per_cpu_context *hv_cpu
401			= this_cpu_ptr(hv_context.cpu_context);
402
403		clockevents_unbind_device(hv_cpu->clk_evt, cpu);
404		hv_ce_shutdown(hv_cpu->clk_evt);
405		put_cpu_ptr(hv_cpu);
406	}
407
408	hv_get_synint_state(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT,
409			    shared_sint.as_uint64);
410
411	shared_sint.masked = 1;
412
413	/* Need to correctly cleanup in the case of SMP!!! */
414	/* Disable the interrupt */
415	hv_set_synint_state(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT,
416			    shared_sint.as_uint64);
417
418	hv_get_simp(simp.as_uint64);
419	simp.simp_enabled = 0;
420	simp.base_simp_gpa = 0;
421
422	hv_set_simp(simp.as_uint64);
423
424	hv_get_siefp(siefp.as_uint64);
425	siefp.siefp_enabled = 0;
426	siefp.base_siefp_gpa = 0;
427
428	hv_set_siefp(siefp.as_uint64);
429
430	/* Disable the global synic bit */
431	hv_get_synic_state(sctrl.as_uint64);
432	sctrl.enable = 0;
433	hv_set_synic_state(sctrl.as_uint64);
434
435	return 0;
436}