Loading...
1/*
2 * Intel SMP support routines.
3 *
4 * (c) 1995 Alan Cox, Building #3 <alan@lxorguk.ukuu.org.uk>
5 * (c) 1998-99, 2000, 2009 Ingo Molnar <mingo@redhat.com>
6 * (c) 2002,2003 Andi Kleen, SuSE Labs.
7 *
8 * i386 and x86_64 integration by Glauber Costa <gcosta@redhat.com>
9 *
10 * This code is released under the GNU General Public License version 2 or
11 * later.
12 */
13
14#include <linux/init.h>
15
16#include <linux/mm.h>
17#include <linux/delay.h>
18#include <linux/spinlock.h>
19#include <linux/export.h>
20#include <linux/kernel_stat.h>
21#include <linux/mc146818rtc.h>
22#include <linux/cache.h>
23#include <linux/interrupt.h>
24#include <linux/cpu.h>
25#include <linux/gfp.h>
26
27#include <asm/mtrr.h>
28#include <asm/tlbflush.h>
29#include <asm/mmu_context.h>
30#include <asm/proto.h>
31#include <asm/apic.h>
32#include <asm/nmi.h>
33#include <asm/trace/irq_vectors.h>
34/*
35 * Some notes on x86 processor bugs affecting SMP operation:
36 *
37 * Pentium, Pentium Pro, II, III (and all CPUs) have bugs.
38 * The Linux implications for SMP are handled as follows:
39 *
40 * Pentium III / [Xeon]
41 * None of the E1AP-E3AP errata are visible to the user.
42 *
43 * E1AP. see PII A1AP
44 * E2AP. see PII A2AP
45 * E3AP. see PII A3AP
46 *
47 * Pentium II / [Xeon]
48 * None of the A1AP-A3AP errata are visible to the user.
49 *
50 * A1AP. see PPro 1AP
51 * A2AP. see PPro 2AP
52 * A3AP. see PPro 7AP
53 *
54 * Pentium Pro
55 * None of 1AP-9AP errata are visible to the normal user,
56 * except occasional delivery of 'spurious interrupt' as trap #15.
57 * This is very rare and a non-problem.
58 *
59 * 1AP. Linux maps APIC as non-cacheable
60 * 2AP. worked around in hardware
61 * 3AP. fixed in C0 and above steppings microcode update.
62 * Linux does not use excessive STARTUP_IPIs.
63 * 4AP. worked around in hardware
64 * 5AP. symmetric IO mode (normal Linux operation) not affected.
65 * 'noapic' mode has vector 0xf filled out properly.
66 * 6AP. 'noapic' mode might be affected - fixed in later steppings
67 * 7AP. We do not assume writes to the LVT deassering IRQs
68 * 8AP. We do not enable low power mode (deep sleep) during MP bootup
69 * 9AP. We do not use mixed mode
70 *
71 * Pentium
72 * There is a marginal case where REP MOVS on 100MHz SMP
73 * machines with B stepping processors can fail. XXX should provide
74 * an L1cache=Writethrough or L1cache=off option.
75 *
76 * B stepping CPUs may hang. There are hardware work arounds
77 * for this. We warn about it in case your board doesn't have the work
78 * arounds. Basically that's so I can tell anyone with a B stepping
79 * CPU and SMP problems "tough".
80 *
81 * Specific items [From Pentium Processor Specification Update]
82 *
83 * 1AP. Linux doesn't use remote read
84 * 2AP. Linux doesn't trust APIC errors
85 * 3AP. We work around this
86 * 4AP. Linux never generated 3 interrupts of the same priority
87 * to cause a lost local interrupt.
88 * 5AP. Remote read is never used
89 * 6AP. not affected - worked around in hardware
90 * 7AP. not affected - worked around in hardware
91 * 8AP. worked around in hardware - we get explicit CS errors if not
92 * 9AP. only 'noapic' mode affected. Might generate spurious
93 * interrupts, we log only the first one and count the
94 * rest silently.
95 * 10AP. not affected - worked around in hardware
96 * 11AP. Linux reads the APIC between writes to avoid this, as per
97 * the documentation. Make sure you preserve this as it affects
98 * the C stepping chips too.
99 * 12AP. not affected - worked around in hardware
100 * 13AP. not affected - worked around in hardware
101 * 14AP. we always deassert INIT during bootup
102 * 15AP. not affected - worked around in hardware
103 * 16AP. not affected - worked around in hardware
104 * 17AP. not affected - worked around in hardware
105 * 18AP. not affected - worked around in hardware
106 * 19AP. not affected - worked around in BIOS
107 *
108 * If this sounds worrying believe me these bugs are either ___RARE___,
109 * or are signal timing bugs worked around in hardware and there's
110 * about nothing of note with C stepping upwards.
111 */
112
113static atomic_t stopping_cpu = ATOMIC_INIT(-1);
114static bool smp_no_nmi_ipi = false;
115
116/*
117 * this function sends a 'reschedule' IPI to another CPU.
118 * it goes straight through and wastes no time serializing
119 * anything. Worst case is that we lose a reschedule ...
120 */
121static void native_smp_send_reschedule(int cpu)
122{
123 if (unlikely(cpu_is_offline(cpu))) {
124 WARN_ON(1);
125 return;
126 }
127 apic->send_IPI_mask(cpumask_of(cpu), RESCHEDULE_VECTOR);
128}
129
130void native_send_call_func_single_ipi(int cpu)
131{
132 apic->send_IPI_mask(cpumask_of(cpu), CALL_FUNCTION_SINGLE_VECTOR);
133}
134
135void native_send_call_func_ipi(const struct cpumask *mask)
136{
137 cpumask_var_t allbutself;
138
139 if (!alloc_cpumask_var(&allbutself, GFP_ATOMIC)) {
140 apic->send_IPI_mask(mask, CALL_FUNCTION_VECTOR);
141 return;
142 }
143
144 cpumask_copy(allbutself, cpu_online_mask);
145 cpumask_clear_cpu(smp_processor_id(), allbutself);
146
147 if (cpumask_equal(mask, allbutself) &&
148 cpumask_equal(cpu_online_mask, cpu_callout_mask))
149 apic->send_IPI_allbutself(CALL_FUNCTION_VECTOR);
150 else
151 apic->send_IPI_mask(mask, CALL_FUNCTION_VECTOR);
152
153 free_cpumask_var(allbutself);
154}
155
156static int smp_stop_nmi_callback(unsigned int val, struct pt_regs *regs)
157{
158 /* We are registered on stopping cpu too, avoid spurious NMI */
159 if (raw_smp_processor_id() == atomic_read(&stopping_cpu))
160 return NMI_HANDLED;
161
162 stop_this_cpu(NULL);
163
164 return NMI_HANDLED;
165}
166
167/*
168 * this function calls the 'stop' function on all other CPUs in the system.
169 */
170
171asmlinkage __visible void smp_reboot_interrupt(void)
172{
173 ack_APIC_irq();
174 irq_enter();
175 stop_this_cpu(NULL);
176 irq_exit();
177}
178
179static void native_stop_other_cpus(int wait)
180{
181 unsigned long flags;
182 unsigned long timeout;
183
184 if (reboot_force)
185 return;
186
187 /*
188 * Use an own vector here because smp_call_function
189 * does lots of things not suitable in a panic situation.
190 */
191
192 /*
193 * We start by using the REBOOT_VECTOR irq.
194 * The irq is treated as a sync point to allow critical
195 * regions of code on other cpus to release their spin locks
196 * and re-enable irqs. Jumping straight to an NMI might
197 * accidentally cause deadlocks with further shutdown/panic
198 * code. By syncing, we give the cpus up to one second to
199 * finish their work before we force them off with the NMI.
200 */
201 if (num_online_cpus() > 1) {
202 /* did someone beat us here? */
203 if (atomic_cmpxchg(&stopping_cpu, -1, safe_smp_processor_id()) != -1)
204 return;
205
206 /* sync above data before sending IRQ */
207 wmb();
208
209 apic->send_IPI_allbutself(REBOOT_VECTOR);
210
211 /*
212 * Don't wait longer than a second if the caller
213 * didn't ask us to wait.
214 */
215 timeout = USEC_PER_SEC;
216 while (num_online_cpus() > 1 && (wait || timeout--))
217 udelay(1);
218 }
219
220 /* if the REBOOT_VECTOR didn't work, try with the NMI */
221 if ((num_online_cpus() > 1) && (!smp_no_nmi_ipi)) {
222 if (register_nmi_handler(NMI_LOCAL, smp_stop_nmi_callback,
223 NMI_FLAG_FIRST, "smp_stop"))
224 /* Note: we ignore failures here */
225 /* Hope the REBOOT_IRQ is good enough */
226 goto finish;
227
228 /* sync above data before sending IRQ */
229 wmb();
230
231 pr_emerg("Shutting down cpus with NMI\n");
232
233 apic->send_IPI_allbutself(NMI_VECTOR);
234
235 /*
236 * Don't wait longer than a 10 ms if the caller
237 * didn't ask us to wait.
238 */
239 timeout = USEC_PER_MSEC * 10;
240 while (num_online_cpus() > 1 && (wait || timeout--))
241 udelay(1);
242 }
243
244finish:
245 local_irq_save(flags);
246 disable_local_APIC();
247 local_irq_restore(flags);
248}
249
250/*
251 * Reschedule call back.
252 */
253static inline void __smp_reschedule_interrupt(void)
254{
255 inc_irq_stat(irq_resched_count);
256 scheduler_ipi();
257}
258
259__visible void smp_reschedule_interrupt(struct pt_regs *regs)
260{
261 ack_APIC_irq();
262 __smp_reschedule_interrupt();
263 /*
264 * KVM uses this interrupt to force a cpu out of guest mode
265 */
266}
267
268static inline void smp_entering_irq(void)
269{
270 ack_APIC_irq();
271 irq_enter();
272}
273
274__visible void smp_trace_reschedule_interrupt(struct pt_regs *regs)
275{
276 /*
277 * Need to call irq_enter() before calling the trace point.
278 * __smp_reschedule_interrupt() calls irq_enter/exit() too (in
279 * scheduler_ipi(). This is OK, since those functions are allowed
280 * to nest.
281 */
282 smp_entering_irq();
283 trace_reschedule_entry(RESCHEDULE_VECTOR);
284 __smp_reschedule_interrupt();
285 trace_reschedule_exit(RESCHEDULE_VECTOR);
286 exiting_irq();
287 /*
288 * KVM uses this interrupt to force a cpu out of guest mode
289 */
290}
291
292static inline void __smp_call_function_interrupt(void)
293{
294 generic_smp_call_function_interrupt();
295 inc_irq_stat(irq_call_count);
296}
297
298__visible void smp_call_function_interrupt(struct pt_regs *regs)
299{
300 smp_entering_irq();
301 __smp_call_function_interrupt();
302 exiting_irq();
303}
304
305__visible void smp_trace_call_function_interrupt(struct pt_regs *regs)
306{
307 smp_entering_irq();
308 trace_call_function_entry(CALL_FUNCTION_VECTOR);
309 __smp_call_function_interrupt();
310 trace_call_function_exit(CALL_FUNCTION_VECTOR);
311 exiting_irq();
312}
313
314static inline void __smp_call_function_single_interrupt(void)
315{
316 generic_smp_call_function_single_interrupt();
317 inc_irq_stat(irq_call_count);
318}
319
320__visible void smp_call_function_single_interrupt(struct pt_regs *regs)
321{
322 smp_entering_irq();
323 __smp_call_function_single_interrupt();
324 exiting_irq();
325}
326
327__visible void smp_trace_call_function_single_interrupt(struct pt_regs *regs)
328{
329 smp_entering_irq();
330 trace_call_function_single_entry(CALL_FUNCTION_SINGLE_VECTOR);
331 __smp_call_function_single_interrupt();
332 trace_call_function_single_exit(CALL_FUNCTION_SINGLE_VECTOR);
333 exiting_irq();
334}
335
336static int __init nonmi_ipi_setup(char *str)
337{
338 smp_no_nmi_ipi = true;
339 return 1;
340}
341
342__setup("nonmi_ipi", nonmi_ipi_setup);
343
344struct smp_ops smp_ops = {
345 .smp_prepare_boot_cpu = native_smp_prepare_boot_cpu,
346 .smp_prepare_cpus = native_smp_prepare_cpus,
347 .smp_cpus_done = native_smp_cpus_done,
348
349 .stop_other_cpus = native_stop_other_cpus,
350 .smp_send_reschedule = native_smp_send_reschedule,
351
352 .cpu_up = native_cpu_up,
353 .cpu_die = native_cpu_die,
354 .cpu_disable = native_cpu_disable,
355 .play_dead = native_play_dead,
356
357 .send_call_func_ipi = native_send_call_func_ipi,
358 .send_call_func_single_ipi = native_send_call_func_single_ipi,
359};
360EXPORT_SYMBOL_GPL(smp_ops);
1/*
2 * Intel SMP support routines.
3 *
4 * (c) 1995 Alan Cox, Building #3 <alan@lxorguk.ukuu.org.uk>
5 * (c) 1998-99, 2000, 2009 Ingo Molnar <mingo@redhat.com>
6 * (c) 2002,2003 Andi Kleen, SuSE Labs.
7 *
8 * i386 and x86_64 integration by Glauber Costa <gcosta@redhat.com>
9 *
10 * This code is released under the GNU General Public License version 2 or
11 * later.
12 */
13
14#include <linux/init.h>
15
16#include <linux/mm.h>
17#include <linux/delay.h>
18#include <linux/spinlock.h>
19#include <linux/export.h>
20#include <linux/kernel_stat.h>
21#include <linux/mc146818rtc.h>
22#include <linux/cache.h>
23#include <linux/interrupt.h>
24#include <linux/cpu.h>
25#include <linux/gfp.h>
26
27#include <asm/mtrr.h>
28#include <asm/tlbflush.h>
29#include <asm/mmu_context.h>
30#include <asm/proto.h>
31#include <asm/apic.h>
32#include <asm/nmi.h>
33#include <asm/mce.h>
34#include <asm/trace/irq_vectors.h>
35#include <asm/kexec.h>
36
37/*
38 * Some notes on x86 processor bugs affecting SMP operation:
39 *
40 * Pentium, Pentium Pro, II, III (and all CPUs) have bugs.
41 * The Linux implications for SMP are handled as follows:
42 *
43 * Pentium III / [Xeon]
44 * None of the E1AP-E3AP errata are visible to the user.
45 *
46 * E1AP. see PII A1AP
47 * E2AP. see PII A2AP
48 * E3AP. see PII A3AP
49 *
50 * Pentium II / [Xeon]
51 * None of the A1AP-A3AP errata are visible to the user.
52 *
53 * A1AP. see PPro 1AP
54 * A2AP. see PPro 2AP
55 * A3AP. see PPro 7AP
56 *
57 * Pentium Pro
58 * None of 1AP-9AP errata are visible to the normal user,
59 * except occasional delivery of 'spurious interrupt' as trap #15.
60 * This is very rare and a non-problem.
61 *
62 * 1AP. Linux maps APIC as non-cacheable
63 * 2AP. worked around in hardware
64 * 3AP. fixed in C0 and above steppings microcode update.
65 * Linux does not use excessive STARTUP_IPIs.
66 * 4AP. worked around in hardware
67 * 5AP. symmetric IO mode (normal Linux operation) not affected.
68 * 'noapic' mode has vector 0xf filled out properly.
69 * 6AP. 'noapic' mode might be affected - fixed in later steppings
70 * 7AP. We do not assume writes to the LVT deassering IRQs
71 * 8AP. We do not enable low power mode (deep sleep) during MP bootup
72 * 9AP. We do not use mixed mode
73 *
74 * Pentium
75 * There is a marginal case where REP MOVS on 100MHz SMP
76 * machines with B stepping processors can fail. XXX should provide
77 * an L1cache=Writethrough or L1cache=off option.
78 *
79 * B stepping CPUs may hang. There are hardware work arounds
80 * for this. We warn about it in case your board doesn't have the work
81 * arounds. Basically that's so I can tell anyone with a B stepping
82 * CPU and SMP problems "tough".
83 *
84 * Specific items [From Pentium Processor Specification Update]
85 *
86 * 1AP. Linux doesn't use remote read
87 * 2AP. Linux doesn't trust APIC errors
88 * 3AP. We work around this
89 * 4AP. Linux never generated 3 interrupts of the same priority
90 * to cause a lost local interrupt.
91 * 5AP. Remote read is never used
92 * 6AP. not affected - worked around in hardware
93 * 7AP. not affected - worked around in hardware
94 * 8AP. worked around in hardware - we get explicit CS errors if not
95 * 9AP. only 'noapic' mode affected. Might generate spurious
96 * interrupts, we log only the first one and count the
97 * rest silently.
98 * 10AP. not affected - worked around in hardware
99 * 11AP. Linux reads the APIC between writes to avoid this, as per
100 * the documentation. Make sure you preserve this as it affects
101 * the C stepping chips too.
102 * 12AP. not affected - worked around in hardware
103 * 13AP. not affected - worked around in hardware
104 * 14AP. we always deassert INIT during bootup
105 * 15AP. not affected - worked around in hardware
106 * 16AP. not affected - worked around in hardware
107 * 17AP. not affected - worked around in hardware
108 * 18AP. not affected - worked around in hardware
109 * 19AP. not affected - worked around in BIOS
110 *
111 * If this sounds worrying believe me these bugs are either ___RARE___,
112 * or are signal timing bugs worked around in hardware and there's
113 * about nothing of note with C stepping upwards.
114 */
115
116static atomic_t stopping_cpu = ATOMIC_INIT(-1);
117static bool smp_no_nmi_ipi = false;
118
119/*
120 * this function sends a 'reschedule' IPI to another CPU.
121 * it goes straight through and wastes no time serializing
122 * anything. Worst case is that we lose a reschedule ...
123 */
124static void native_smp_send_reschedule(int cpu)
125{
126 if (unlikely(cpu_is_offline(cpu))) {
127 WARN_ON(1);
128 return;
129 }
130 apic->send_IPI(cpu, RESCHEDULE_VECTOR);
131}
132
133void native_send_call_func_single_ipi(int cpu)
134{
135 apic->send_IPI(cpu, CALL_FUNCTION_SINGLE_VECTOR);
136}
137
138void native_send_call_func_ipi(const struct cpumask *mask)
139{
140 cpumask_var_t allbutself;
141
142 if (!alloc_cpumask_var(&allbutself, GFP_ATOMIC)) {
143 apic->send_IPI_mask(mask, CALL_FUNCTION_VECTOR);
144 return;
145 }
146
147 cpumask_copy(allbutself, cpu_online_mask);
148 cpumask_clear_cpu(smp_processor_id(), allbutself);
149
150 if (cpumask_equal(mask, allbutself) &&
151 cpumask_equal(cpu_online_mask, cpu_callout_mask))
152 apic->send_IPI_allbutself(CALL_FUNCTION_VECTOR);
153 else
154 apic->send_IPI_mask(mask, CALL_FUNCTION_VECTOR);
155
156 free_cpumask_var(allbutself);
157}
158
159static int smp_stop_nmi_callback(unsigned int val, struct pt_regs *regs)
160{
161 /* We are registered on stopping cpu too, avoid spurious NMI */
162 if (raw_smp_processor_id() == atomic_read(&stopping_cpu))
163 return NMI_HANDLED;
164
165 stop_this_cpu(NULL);
166
167 return NMI_HANDLED;
168}
169
170/*
171 * this function calls the 'stop' function on all other CPUs in the system.
172 */
173
174asmlinkage __visible void smp_reboot_interrupt(void)
175{
176 ipi_entering_ack_irq();
177 stop_this_cpu(NULL);
178 irq_exit();
179}
180
181static void native_stop_other_cpus(int wait)
182{
183 unsigned long flags;
184 unsigned long timeout;
185
186 if (reboot_force)
187 return;
188
189 /*
190 * Use an own vector here because smp_call_function
191 * does lots of things not suitable in a panic situation.
192 */
193
194 /*
195 * We start by using the REBOOT_VECTOR irq.
196 * The irq is treated as a sync point to allow critical
197 * regions of code on other cpus to release their spin locks
198 * and re-enable irqs. Jumping straight to an NMI might
199 * accidentally cause deadlocks with further shutdown/panic
200 * code. By syncing, we give the cpus up to one second to
201 * finish their work before we force them off with the NMI.
202 */
203 if (num_online_cpus() > 1) {
204 /* did someone beat us here? */
205 if (atomic_cmpxchg(&stopping_cpu, -1, safe_smp_processor_id()) != -1)
206 return;
207
208 /* sync above data before sending IRQ */
209 wmb();
210
211 apic->send_IPI_allbutself(REBOOT_VECTOR);
212
213 /*
214 * Don't wait longer than a second if the caller
215 * didn't ask us to wait.
216 */
217 timeout = USEC_PER_SEC;
218 while (num_online_cpus() > 1 && (wait || timeout--))
219 udelay(1);
220 }
221
222 /* if the REBOOT_VECTOR didn't work, try with the NMI */
223 if ((num_online_cpus() > 1) && (!smp_no_nmi_ipi)) {
224 if (register_nmi_handler(NMI_LOCAL, smp_stop_nmi_callback,
225 NMI_FLAG_FIRST, "smp_stop"))
226 /* Note: we ignore failures here */
227 /* Hope the REBOOT_IRQ is good enough */
228 goto finish;
229
230 /* sync above data before sending IRQ */
231 wmb();
232
233 pr_emerg("Shutting down cpus with NMI\n");
234
235 apic->send_IPI_allbutself(NMI_VECTOR);
236
237 /*
238 * Don't wait longer than a 10 ms if the caller
239 * didn't ask us to wait.
240 */
241 timeout = USEC_PER_MSEC * 10;
242 while (num_online_cpus() > 1 && (wait || timeout--))
243 udelay(1);
244 }
245
246finish:
247 local_irq_save(flags);
248 disable_local_APIC();
249 mcheck_cpu_clear(this_cpu_ptr(&cpu_info));
250 local_irq_restore(flags);
251}
252
253/*
254 * Reschedule call back.
255 */
256static inline void __smp_reschedule_interrupt(void)
257{
258 inc_irq_stat(irq_resched_count);
259 scheduler_ipi();
260}
261
262__visible void smp_reschedule_interrupt(struct pt_regs *regs)
263{
264 ack_APIC_irq();
265 __smp_reschedule_interrupt();
266 /*
267 * KVM uses this interrupt to force a cpu out of guest mode
268 */
269}
270
271__visible void smp_trace_reschedule_interrupt(struct pt_regs *regs)
272{
273 /*
274 * Need to call irq_enter() before calling the trace point.
275 * __smp_reschedule_interrupt() calls irq_enter/exit() too (in
276 * scheduler_ipi(). This is OK, since those functions are allowed
277 * to nest.
278 */
279 ipi_entering_ack_irq();
280 trace_reschedule_entry(RESCHEDULE_VECTOR);
281 __smp_reschedule_interrupt();
282 trace_reschedule_exit(RESCHEDULE_VECTOR);
283 exiting_irq();
284 /*
285 * KVM uses this interrupt to force a cpu out of guest mode
286 */
287}
288
289static inline void __smp_call_function_interrupt(void)
290{
291 generic_smp_call_function_interrupt();
292 inc_irq_stat(irq_call_count);
293}
294
295__visible void smp_call_function_interrupt(struct pt_regs *regs)
296{
297 ipi_entering_ack_irq();
298 __smp_call_function_interrupt();
299 exiting_irq();
300}
301
302__visible void smp_trace_call_function_interrupt(struct pt_regs *regs)
303{
304 ipi_entering_ack_irq();
305 trace_call_function_entry(CALL_FUNCTION_VECTOR);
306 __smp_call_function_interrupt();
307 trace_call_function_exit(CALL_FUNCTION_VECTOR);
308 exiting_irq();
309}
310
311static inline void __smp_call_function_single_interrupt(void)
312{
313 generic_smp_call_function_single_interrupt();
314 inc_irq_stat(irq_call_count);
315}
316
317__visible void smp_call_function_single_interrupt(struct pt_regs *regs)
318{
319 ipi_entering_ack_irq();
320 __smp_call_function_single_interrupt();
321 exiting_irq();
322}
323
324__visible void smp_trace_call_function_single_interrupt(struct pt_regs *regs)
325{
326 ipi_entering_ack_irq();
327 trace_call_function_single_entry(CALL_FUNCTION_SINGLE_VECTOR);
328 __smp_call_function_single_interrupt();
329 trace_call_function_single_exit(CALL_FUNCTION_SINGLE_VECTOR);
330 exiting_irq();
331}
332
333static int __init nonmi_ipi_setup(char *str)
334{
335 smp_no_nmi_ipi = true;
336 return 1;
337}
338
339__setup("nonmi_ipi", nonmi_ipi_setup);
340
341struct smp_ops smp_ops = {
342 .smp_prepare_boot_cpu = native_smp_prepare_boot_cpu,
343 .smp_prepare_cpus = native_smp_prepare_cpus,
344 .smp_cpus_done = native_smp_cpus_done,
345
346 .stop_other_cpus = native_stop_other_cpus,
347#if defined(CONFIG_KEXEC_CORE)
348 .crash_stop_other_cpus = kdump_nmi_shootdown_cpus,
349#endif
350 .smp_send_reschedule = native_smp_send_reschedule,
351
352 .cpu_up = native_cpu_up,
353 .cpu_die = native_cpu_die,
354 .cpu_disable = native_cpu_disable,
355 .play_dead = native_play_dead,
356
357 .send_call_func_ipi = native_send_call_func_ipi,
358 .send_call_func_single_ipi = native_send_call_func_single_ipi,
359};
360EXPORT_SYMBOL_GPL(smp_ops);