Linux Audio

Check our new training course

Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0
  2#include <linux/smp.h>
  3#include <linux/cpu.h>
 
 
 
 
 
 
 
 
 
 
 
 
 
  4#include <linux/slab.h>
  5#include <linux/cpumask.h>
  6#include <linux/percpu.h>
 
 
 
 
  7
 
 
 
 
 
 
 
 
  8#include <xen/events.h>
  9
 10#include <xen/hvc-console.h>
 11#include "xen-ops.h"
 12#include "smp.h"
 
 
 13
 14static DEFINE_PER_CPU(struct xen_common_irq, xen_resched_irq) = { .irq = -1 };
 15static DEFINE_PER_CPU(struct xen_common_irq, xen_callfunc_irq) = { .irq = -1 };
 16static DEFINE_PER_CPU(struct xen_common_irq, xen_callfuncsingle_irq) = { .irq = -1 };
 17static DEFINE_PER_CPU(struct xen_common_irq, xen_debug_irq) = { .irq = -1 };
 18
 19static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id);
 20static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id);
 21
 22/*
 23 * Reschedule call back.
 24 */
 25static irqreturn_t xen_reschedule_interrupt(int irq, void *dev_id)
 26{
 27	inc_irq_stat(irq_resched_count);
 28	scheduler_ipi();
 29
 30	return IRQ_HANDLED;
 31}
 32
 33void xen_smp_intr_free(unsigned int cpu)
 34{
 35	kfree(per_cpu(xen_resched_irq, cpu).name);
 36	per_cpu(xen_resched_irq, cpu).name = NULL;
 37	if (per_cpu(xen_resched_irq, cpu).irq >= 0) {
 38		unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu).irq, NULL);
 39		per_cpu(xen_resched_irq, cpu).irq = -1;
 40	}
 41	kfree(per_cpu(xen_callfunc_irq, cpu).name);
 42	per_cpu(xen_callfunc_irq, cpu).name = NULL;
 43	if (per_cpu(xen_callfunc_irq, cpu).irq >= 0) {
 44		unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu).irq, NULL);
 45		per_cpu(xen_callfunc_irq, cpu).irq = -1;
 46	}
 47	kfree(per_cpu(xen_debug_irq, cpu).name);
 48	per_cpu(xen_debug_irq, cpu).name = NULL;
 49	if (per_cpu(xen_debug_irq, cpu).irq >= 0) {
 50		unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu).irq, NULL);
 51		per_cpu(xen_debug_irq, cpu).irq = -1;
 52	}
 53	kfree(per_cpu(xen_callfuncsingle_irq, cpu).name);
 54	per_cpu(xen_callfuncsingle_irq, cpu).name = NULL;
 55	if (per_cpu(xen_callfuncsingle_irq, cpu).irq >= 0) {
 56		unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu).irq,
 57				       NULL);
 58		per_cpu(xen_callfuncsingle_irq, cpu).irq = -1;
 59	}
 60}
 61
 62int xen_smp_intr_init(unsigned int cpu)
 
 
 
 
 
 
 63{
 64	int rc;
 65	char *resched_name, *callfunc_name, *debug_name;
 66
 67	resched_name = kasprintf(GFP_KERNEL, "resched%d", cpu);
 68	if (!resched_name)
 69		goto fail_mem;
 70	per_cpu(xen_resched_irq, cpu).name = resched_name;
 71	rc = bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR,
 72				    cpu,
 73				    xen_reschedule_interrupt,
 74				    IRQF_PERCPU|IRQF_NOBALANCING,
 75				    resched_name,
 76				    NULL);
 77	if (rc < 0)
 78		goto fail;
 79	per_cpu(xen_resched_irq, cpu).irq = rc;
 80
 81	callfunc_name = kasprintf(GFP_KERNEL, "callfunc%d", cpu);
 82	if (!callfunc_name)
 83		goto fail_mem;
 84	per_cpu(xen_callfunc_irq, cpu).name = callfunc_name;
 85	rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_VECTOR,
 86				    cpu,
 87				    xen_call_function_interrupt,
 88				    IRQF_PERCPU|IRQF_NOBALANCING,
 89				    callfunc_name,
 90				    NULL);
 91	if (rc < 0)
 92		goto fail;
 93	per_cpu(xen_callfunc_irq, cpu).irq = rc;
 94
 95	if (!xen_fifo_events) {
 96		debug_name = kasprintf(GFP_KERNEL, "debug%d", cpu);
 97		if (!debug_name)
 98			goto fail_mem;
 99
100		per_cpu(xen_debug_irq, cpu).name = debug_name;
101		rc = bind_virq_to_irqhandler(VIRQ_DEBUG, cpu,
102					     xen_debug_interrupt,
103					     IRQF_PERCPU | IRQF_NOBALANCING,
104					     debug_name, NULL);
105		if (rc < 0)
106			goto fail;
107		per_cpu(xen_debug_irq, cpu).irq = rc;
108	}
109
110	callfunc_name = kasprintf(GFP_KERNEL, "callfuncsingle%d", cpu);
111	if (!callfunc_name)
112		goto fail_mem;
113
114	per_cpu(xen_callfuncsingle_irq, cpu).name = callfunc_name;
115	rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_SINGLE_VECTOR,
116				    cpu,
117				    xen_call_function_single_interrupt,
118				    IRQF_PERCPU|IRQF_NOBALANCING,
119				    callfunc_name,
120				    NULL);
121	if (rc < 0)
122		goto fail;
123	per_cpu(xen_callfuncsingle_irq, cpu).irq = rc;
124
125	return 0;
126
127 fail_mem:
128	rc = -ENOMEM;
129 fail:
130	xen_smp_intr_free(cpu);
 
 
 
 
 
 
 
 
 
131	return rc;
132}
133
134void __init xen_smp_cpus_done(unsigned int max_cpus)
135{
136	if (xen_hvm_domain())
137		native_smp_cpus_done(max_cpus);
138	else
139		calculate_max_logical_packages();
 
 
 
 
 
 
 
 
140}
141
142void xen_smp_send_reschedule(int cpu)
143{
144	xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
145}
146
147static void __xen_send_IPI_mask(const struct cpumask *mask,
148			      int vector)
149{
150	unsigned cpu;
 
151
152	for_each_cpu_and(cpu, mask, cpu_online_mask)
153		xen_send_IPI_one(cpu, vector);
 
 
 
 
154}
155
156void xen_smp_send_call_function_ipi(const struct cpumask *mask)
157{
158	int cpu;
 
159
160	__xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR);
 
 
 
 
161
162	/* Make sure other vcpus get a chance to run if they need to. */
163	for_each_cpu(cpu, mask) {
164		if (xen_vcpu_stolen(cpu)) {
165			HYPERVISOR_sched_op(SCHEDOP_yield, NULL);
166			break;
167		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
168	}
169}
170
171void xen_smp_send_call_function_single_ipi(int cpu)
 
172{
173	__xen_send_IPI_mask(cpumask_of(cpu),
174			  XEN_CALL_FUNCTION_SINGLE_VECTOR);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
175}
176
177static inline int xen_map_vector(int vector)
178{
179	int xen_vector;
 
180
181	switch (vector) {
182	case RESCHEDULE_VECTOR:
183		xen_vector = XEN_RESCHEDULE_VECTOR;
184		break;
185	case CALL_FUNCTION_VECTOR:
186		xen_vector = XEN_CALL_FUNCTION_VECTOR;
187		break;
188	case CALL_FUNCTION_SINGLE_VECTOR:
189		xen_vector = XEN_CALL_FUNCTION_SINGLE_VECTOR;
190		break;
191	case IRQ_WORK_VECTOR:
192		xen_vector = XEN_IRQ_WORK_VECTOR;
193		break;
194#ifdef CONFIG_X86_64
195	case NMI_VECTOR:
196	case APIC_DM_NMI: /* Some use that instead of NMI_VECTOR */
197		xen_vector = XEN_NMI_VECTOR;
198		break;
199#endif
200	default:
201		xen_vector = -1;
202		printk(KERN_ERR "xen: vector 0x%x is not implemented\n",
203			vector);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
204	}
 
 
 
 
 
 
205
206	return xen_vector;
 
207}
208
209void xen_send_IPI_mask(const struct cpumask *mask,
210			      int vector)
211{
212	int xen_vector = xen_map_vector(vector);
 
 
 
213
214	if (xen_vector >= 0)
215		__xen_send_IPI_mask(mask, xen_vector);
 
 
216}
217
218void xen_send_IPI_all(int vector)
219{
220	int xen_vector = xen_map_vector(vector);
 
221
222	if (xen_vector >= 0)
223		__xen_send_IPI_mask(cpu_online_mask, xen_vector);
 
224}
225
226void xen_send_IPI_self(int vector)
 
227{
228	int xen_vector = xen_map_vector(vector);
 
 
 
 
 
 
 
 
 
 
229
230	if (xen_vector >= 0)
231		xen_send_IPI_one(smp_processor_id(), xen_vector);
 
232}
233
234void xen_send_IPI_mask_allbutself(const struct cpumask *mask,
235				int vector)
 
 
 
 
 
236{
237	unsigned cpu;
238	unsigned int this_cpu = smp_processor_id();
239	int xen_vector = xen_map_vector(vector);
240
241	if (!(num_online_cpus() > 1) || (xen_vector < 0))
242		return;
 
243
244	for_each_cpu_and(cpu, mask, cpu_online_mask) {
245		if (this_cpu == cpu)
246			continue;
247
248		xen_send_IPI_one(cpu, xen_vector);
 
 
 
 
 
 
 
249	}
250}
251
252void xen_send_IPI_allbutself(int vector)
253{
254	xen_send_IPI_mask_allbutself(cpu_online_mask, vector);
 
255}
256
257static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id)
258{
 
259	generic_smp_call_function_interrupt();
260	inc_irq_stat(irq_call_count);
 
261
262	return IRQ_HANDLED;
263}
264
265static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id)
266{
 
267	generic_smp_call_function_single_interrupt();
268	inc_irq_stat(irq_call_count);
 
269
270	return IRQ_HANDLED;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
271}
v3.1
  1/*
  2 * Xen SMP support
  3 *
  4 * This file implements the Xen versions of smp_ops.  SMP under Xen is
  5 * very straightforward.  Bringing a CPU up is simply a matter of
  6 * loading its initial context and setting it running.
  7 *
  8 * IPIs are handled through the Xen event mechanism.
  9 *
 10 * Because virtual CPUs can be scheduled onto any real CPU, there's no
 11 * useful topology information for the kernel to make use of.  As a
 12 * result, all CPUs are treated as if they're single-core and
 13 * single-threaded.
 14 */
 15#include <linux/sched.h>
 16#include <linux/err.h>
 17#include <linux/slab.h>
 18#include <linux/smp.h>
 19
 20#include <asm/paravirt.h>
 21#include <asm/desc.h>
 22#include <asm/pgtable.h>
 23#include <asm/cpu.h>
 24
 25#include <xen/interface/xen.h>
 26#include <xen/interface/vcpu.h>
 27
 28#include <asm/xen/interface.h>
 29#include <asm/xen/hypercall.h>
 30
 31#include <xen/xen.h>
 32#include <xen/page.h>
 33#include <xen/events.h>
 34
 35#include <xen/hvc-console.h>
 36#include "xen-ops.h"
 37#include "mmu.h"
 38
 39cpumask_var_t xen_cpu_initialized_map;
 40
 41static DEFINE_PER_CPU(int, xen_resched_irq);
 42static DEFINE_PER_CPU(int, xen_callfunc_irq);
 43static DEFINE_PER_CPU(int, xen_callfuncsingle_irq);
 44static DEFINE_PER_CPU(int, xen_debug_irq) = -1;
 45
 46static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id);
 47static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id);
 48
 49/*
 50 * Reschedule call back.
 51 */
 52static irqreturn_t xen_reschedule_interrupt(int irq, void *dev_id)
 53{
 54	inc_irq_stat(irq_resched_count);
 55	scheduler_ipi();
 56
 57	return IRQ_HANDLED;
 58}
 59
 60static void __cpuinit cpu_bringup(void)
 61{
 62	int cpu = smp_processor_id();
 63
 64	cpu_init();
 65	touch_softlockup_watchdog();
 66	preempt_disable();
 67
 68	xen_enable_sysenter();
 69	xen_enable_syscall();
 70
 71	cpu = smp_processor_id();
 72	smp_store_cpu_info(cpu);
 73	cpu_data(cpu).x86_max_cores = 1;
 74	set_cpu_sibling_map(cpu);
 75
 76	xen_setup_cpu_clockevents();
 77
 78	set_cpu_online(cpu, true);
 79	percpu_write(cpu_state, CPU_ONLINE);
 80	wmb();
 81
 82	/* We can take interrupts now: we're officially "up". */
 83	local_irq_enable();
 84
 85	wmb();			/* make sure everything is out */
 
 86}
 87
 88static void __cpuinit cpu_bringup_and_idle(void)
 89{
 90	cpu_bringup();
 91	cpu_idle();
 92}
 93
 94static int xen_smp_intr_init(unsigned int cpu)
 95{
 96	int rc;
 97	const char *resched_name, *callfunc_name, *debug_name;
 98
 99	resched_name = kasprintf(GFP_KERNEL, "resched%d", cpu);
 
 
 
100	rc = bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR,
101				    cpu,
102				    xen_reschedule_interrupt,
103				    IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING,
104				    resched_name,
105				    NULL);
106	if (rc < 0)
107		goto fail;
108	per_cpu(xen_resched_irq, cpu) = rc;
109
110	callfunc_name = kasprintf(GFP_KERNEL, "callfunc%d", cpu);
 
 
 
111	rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_VECTOR,
112				    cpu,
113				    xen_call_function_interrupt,
114				    IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING,
115				    callfunc_name,
116				    NULL);
117	if (rc < 0)
118		goto fail;
119	per_cpu(xen_callfunc_irq, cpu) = rc;
120
121	debug_name = kasprintf(GFP_KERNEL, "debug%d", cpu);
122	rc = bind_virq_to_irqhandler(VIRQ_DEBUG, cpu, xen_debug_interrupt,
123				     IRQF_DISABLED | IRQF_PERCPU | IRQF_NOBALANCING,
124				     debug_name, NULL);
125	if (rc < 0)
126		goto fail;
127	per_cpu(xen_debug_irq, cpu) = rc;
 
 
 
 
 
 
 
128
129	callfunc_name = kasprintf(GFP_KERNEL, "callfuncsingle%d", cpu);
 
 
 
 
130	rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_SINGLE_VECTOR,
131				    cpu,
132				    xen_call_function_single_interrupt,
133				    IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING,
134				    callfunc_name,
135				    NULL);
136	if (rc < 0)
137		goto fail;
138	per_cpu(xen_callfuncsingle_irq, cpu) = rc;
139
140	return 0;
141
 
 
142 fail:
143	if (per_cpu(xen_resched_irq, cpu) >= 0)
144		unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu), NULL);
145	if (per_cpu(xen_callfunc_irq, cpu) >= 0)
146		unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu), NULL);
147	if (per_cpu(xen_debug_irq, cpu) >= 0)
148		unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu), NULL);
149	if (per_cpu(xen_callfuncsingle_irq, cpu) >= 0)
150		unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu),
151				       NULL);
152
153	return rc;
154}
155
156static void __init xen_fill_possible_map(void)
157{
158	int i, rc;
159
160	if (xen_initial_domain())
161		return;
162
163	for (i = 0; i < nr_cpu_ids; i++) {
164		rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL);
165		if (rc >= 0) {
166			num_processors++;
167			set_cpu_possible(i, true);
168		}
169	}
170}
171
172static void __init xen_filter_cpu_maps(void)
173{
174	int i, rc;
175
176	if (!xen_initial_domain())
177		return;
178
179	num_processors = 0;
180	disabled_cpus = 0;
181	for (i = 0; i < nr_cpu_ids; i++) {
182		rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL);
183		if (rc >= 0) {
184			num_processors++;
185			set_cpu_possible(i, true);
186		} else {
187			set_cpu_possible(i, false);
188			set_cpu_present(i, false);
189		}
190	}
191}
192
193static void __init xen_smp_prepare_boot_cpu(void)
 
194{
195	BUG_ON(smp_processor_id() != 0);
196	native_smp_prepare_boot_cpu();
197
198	/* We've switched to the "real" per-cpu gdt, so make sure the
199	   old memory can be recycled */
200	make_lowmem_page_readwrite(xen_initial_gdt);
201
202	xen_filter_cpu_maps();
203	xen_setup_vcpu_info_placement();
204}
205
206static void __init xen_smp_prepare_cpus(unsigned int max_cpus)
207{
208	unsigned cpu;
209	unsigned int i;
210
211	if (skip_ioapic_setup) {
212		char *m = (max_cpus == 0) ?
213			"The nosmp parameter is incompatible with Xen; " \
214			"use Xen dom0_max_vcpus=1 parameter" :
215			"The noapic parameter is incompatible with Xen";
216
217		xen_raw_printk(m);
218		panic(m);
219	}
220	xen_init_lock_cpu(0);
221
222	smp_store_cpu_info(0);
223	cpu_data(0).x86_max_cores = 1;
224
225	for_each_possible_cpu(i) {
226		zalloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL);
227		zalloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL);
228		zalloc_cpumask_var(&per_cpu(cpu_llc_shared_map, i), GFP_KERNEL);
229	}
230	set_cpu_sibling_map(0);
231
232	if (xen_smp_intr_init(0))
233		BUG();
234
235	if (!alloc_cpumask_var(&xen_cpu_initialized_map, GFP_KERNEL))
236		panic("could not allocate xen_cpu_initialized_map\n");
237
238	cpumask_copy(xen_cpu_initialized_map, cpumask_of(0));
239
240	/* Restrict the possible_map according to max_cpus. */
241	while ((num_possible_cpus() > 1) && (num_possible_cpus() > max_cpus)) {
242		for (cpu = nr_cpu_ids - 1; !cpu_possible(cpu); cpu--)
243			continue;
244		set_cpu_possible(cpu, false);
245	}
246
247	for_each_possible_cpu (cpu) {
248		struct task_struct *idle;
249
250		if (cpu == 0)
251			continue;
252
253		idle = fork_idle(cpu);
254		if (IS_ERR(idle))
255			panic("failed fork for CPU %d", cpu);
256
257		set_cpu_present(cpu, true);
258	}
259}
260
261static int __cpuinit
262cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
263{
264	struct vcpu_guest_context *ctxt;
265	struct desc_struct *gdt;
266	unsigned long gdt_mfn;
267
268	if (cpumask_test_and_set_cpu(cpu, xen_cpu_initialized_map))
269		return 0;
270
271	ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
272	if (ctxt == NULL)
273		return -ENOMEM;
274
275	gdt = get_cpu_gdt_table(cpu);
276
277	ctxt->flags = VGCF_IN_KERNEL;
278	ctxt->user_regs.ds = __USER_DS;
279	ctxt->user_regs.es = __USER_DS;
280	ctxt->user_regs.ss = __KERNEL_DS;
281#ifdef CONFIG_X86_32
282	ctxt->user_regs.fs = __KERNEL_PERCPU;
283	ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
284#else
285	ctxt->gs_base_kernel = per_cpu_offset(cpu);
286#endif
287	ctxt->user_regs.eip = (unsigned long)cpu_bringup_and_idle;
288	ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */
289
290	memset(&ctxt->fpu_ctxt, 0, sizeof(ctxt->fpu_ctxt));
291
292	xen_copy_trap_info(ctxt->trap_ctxt);
293
294	ctxt->ldt_ents = 0;
295
296	BUG_ON((unsigned long)gdt & ~PAGE_MASK);
297
298	gdt_mfn = arbitrary_virt_to_mfn(gdt);
299	make_lowmem_page_readonly(gdt);
300	make_lowmem_page_readonly(mfn_to_virt(gdt_mfn));
301
302	ctxt->gdt_frames[0] = gdt_mfn;
303	ctxt->gdt_ents      = GDT_ENTRIES;
304
305	ctxt->user_regs.cs = __KERNEL_CS;
306	ctxt->user_regs.esp = idle->thread.sp0 - sizeof(struct pt_regs);
307
308	ctxt->kernel_ss = __KERNEL_DS;
309	ctxt->kernel_sp = idle->thread.sp0;
310
311#ifdef CONFIG_X86_32
312	ctxt->event_callback_cs     = __KERNEL_CS;
313	ctxt->failsafe_callback_cs  = __KERNEL_CS;
314#endif
315	ctxt->event_callback_eip    = (unsigned long)xen_hypervisor_callback;
316	ctxt->failsafe_callback_eip = (unsigned long)xen_failsafe_callback;
317
318	per_cpu(xen_cr3, cpu) = __pa(swapper_pg_dir);
319	ctxt->ctrlreg[3] = xen_pfn_to_cr3(virt_to_mfn(swapper_pg_dir));
320
321	if (HYPERVISOR_vcpu_op(VCPUOP_initialise, cpu, ctxt))
322		BUG();
323
324	kfree(ctxt);
325	return 0;
326}
327
328static int __cpuinit xen_cpu_up(unsigned int cpu)
329{
330	struct task_struct *idle = idle_task(cpu);
331	int rc;
332
333	per_cpu(current_task, cpu) = idle;
334#ifdef CONFIG_X86_32
335	irq_ctx_init(cpu);
336#else
337	clear_tsk_thread_flag(idle, TIF_FORK);
338	per_cpu(kernel_stack, cpu) =
339		(unsigned long)task_stack_page(idle) -
340		KERNEL_STACK_OFFSET + THREAD_SIZE;
 
 
 
 
 
 
 
 
 
 
341#endif
342	xen_setup_runstate_info(cpu);
343	xen_setup_timer(cpu);
344	xen_init_lock_cpu(cpu);
345
346	per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
347
348	/* make sure interrupts start blocked */
349	per_cpu(xen_vcpu, cpu)->evtchn_upcall_mask = 1;
350
351	rc = cpu_initialize_context(cpu, idle);
352	if (rc)
353		return rc;
354
355	if (num_online_cpus() == 1)
356		alternatives_smp_switch(1);
357
358	rc = xen_smp_intr_init(cpu);
359	if (rc)
360		return rc;
361
362	rc = HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL);
363	BUG_ON(rc);
364
365	while(per_cpu(cpu_state, cpu) != CPU_ONLINE) {
366		HYPERVISOR_sched_op(SCHEDOP_yield, NULL);
367		barrier();
368	}
369
370	return 0;
371}
372
373static void xen_smp_cpus_done(unsigned int max_cpus)
374{
375}
376
377#ifdef CONFIG_HOTPLUG_CPU
378static int xen_cpu_disable(void)
379{
380	unsigned int cpu = smp_processor_id();
381	if (cpu == 0)
382		return -EBUSY;
383
384	cpu_disable_common();
385
386	load_cr3(swapper_pg_dir);
387	return 0;
388}
389
390static void xen_cpu_die(unsigned int cpu)
391{
392	while (HYPERVISOR_vcpu_op(VCPUOP_is_up, cpu, NULL)) {
393		current->state = TASK_UNINTERRUPTIBLE;
394		schedule_timeout(HZ/10);
395	}
396	unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu), NULL);
397	unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu), NULL);
398	unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu), NULL);
399	unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu), NULL);
400	xen_uninit_lock_cpu(cpu);
401	xen_teardown_timer(cpu);
402
403	if (num_online_cpus() == 1)
404		alternatives_smp_switch(0);
405}
406
407static void __cpuinit xen_play_dead(void) /* used only with HOTPLUG_CPU */
 
408{
409	play_dead_common();
410	HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL);
411	cpu_bringup();
412}
413
414#else /* !CONFIG_HOTPLUG_CPU */
415static int xen_cpu_disable(void)
416{
417	return -ENOSYS;
418}
419
420static void xen_cpu_die(unsigned int cpu)
421{
422	BUG();
423}
424
425static void xen_play_dead(void)
426{
427	BUG();
428}
429
430#endif
431static void stop_self(void *v)
432{
433	int cpu = smp_processor_id();
434
435	/* make sure we're not pinning something down */
436	load_cr3(swapper_pg_dir);
437	/* should set up a minimal gdt */
438
439	set_cpu_online(cpu, false);
440
441	HYPERVISOR_vcpu_op(VCPUOP_down, cpu, NULL);
442	BUG();
443}
444
445static void xen_stop_other_cpus(int wait)
446{
447	smp_call_function(stop_self, NULL, wait);
448}
449
450static void xen_smp_send_reschedule(int cpu)
451{
452	xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR);
453}
454
455static void xen_send_IPI_mask(const struct cpumask *mask,
456			      enum ipi_vector vector)
457{
458	unsigned cpu;
 
 
459
460	for_each_cpu_and(cpu, mask, cpu_online_mask)
461		xen_send_IPI_one(cpu, vector);
462}
463
464static void xen_smp_send_call_function_ipi(const struct cpumask *mask)
465{
466	int cpu;
467
468	xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR);
469
470	/* Make sure other vcpus get a chance to run if they need to. */
471	for_each_cpu(cpu, mask) {
472		if (xen_vcpu_stolen(cpu)) {
473			HYPERVISOR_sched_op(SCHEDOP_yield, NULL);
474			break;
475		}
476	}
477}
478
479static void xen_smp_send_call_function_single_ipi(int cpu)
480{
481	xen_send_IPI_mask(cpumask_of(cpu),
482			  XEN_CALL_FUNCTION_SINGLE_VECTOR);
483}
484
485static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id)
486{
487	irq_enter();
488	generic_smp_call_function_interrupt();
489	inc_irq_stat(irq_call_count);
490	irq_exit();
491
492	return IRQ_HANDLED;
493}
494
495static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id)
496{
497	irq_enter();
498	generic_smp_call_function_single_interrupt();
499	inc_irq_stat(irq_call_count);
500	irq_exit();
501
502	return IRQ_HANDLED;
503}
504
505static const struct smp_ops xen_smp_ops __initconst = {
506	.smp_prepare_boot_cpu = xen_smp_prepare_boot_cpu,
507	.smp_prepare_cpus = xen_smp_prepare_cpus,
508	.smp_cpus_done = xen_smp_cpus_done,
509
510	.cpu_up = xen_cpu_up,
511	.cpu_die = xen_cpu_die,
512	.cpu_disable = xen_cpu_disable,
513	.play_dead = xen_play_dead,
514
515	.stop_other_cpus = xen_stop_other_cpus,
516	.smp_send_reschedule = xen_smp_send_reschedule,
517
518	.send_call_func_ipi = xen_smp_send_call_function_ipi,
519	.send_call_func_single_ipi = xen_smp_send_call_function_single_ipi,
520};
521
522void __init xen_smp_init(void)
523{
524	smp_ops = xen_smp_ops;
525	xen_fill_possible_map();
526	xen_init_spinlocks();
527}
528
529static void __init xen_hvm_smp_prepare_cpus(unsigned int max_cpus)
530{
531	native_smp_prepare_cpus(max_cpus);
532	WARN_ON(xen_smp_intr_init(0));
533
534	xen_init_lock_cpu(0);
535}
536
537static int __cpuinit xen_hvm_cpu_up(unsigned int cpu)
538{
539	int rc;
540	rc = native_cpu_up(cpu);
541	WARN_ON (xen_smp_intr_init(cpu));
542	return rc;
543}
544
545static void xen_hvm_cpu_die(unsigned int cpu)
546{
547	unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu), NULL);
548	unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu), NULL);
549	unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu), NULL);
550	unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu), NULL);
551	native_cpu_die(cpu);
552}
553
554void __init xen_hvm_smp_init(void)
555{
556	if (!xen_have_vector_callback)
557		return;
558	smp_ops.smp_prepare_cpus = xen_hvm_smp_prepare_cpus;
559	smp_ops.smp_send_reschedule = xen_smp_send_reschedule;
560	smp_ops.cpu_up = xen_hvm_cpu_up;
561	smp_ops.cpu_die = xen_hvm_cpu_die;
562	smp_ops.send_call_func_ipi = xen_smp_send_call_function_ipi;
563	smp_ops.send_call_func_single_ipi = xen_smp_send_call_function_single_ipi;
564}