Loading...
1/*
2 * Copyright (C) 1991, 1992 Linus Torvalds
3 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
4 * Copyright (C) 2011 Don Zickus Red Hat, Inc.
5 *
6 * Pentium III FXSR, SSE support
7 * Gareth Hughes <gareth@valinux.com>, May 2000
8 */
9
10/*
11 * Handle hardware traps and faults.
12 */
13#include <linux/spinlock.h>
14#include <linux/kprobes.h>
15#include <linux/kdebug.h>
16#include <linux/nmi.h>
17#include <linux/debugfs.h>
18#include <linux/delay.h>
19#include <linux/hardirq.h>
20#include <linux/slab.h>
21#include <linux/export.h>
22
23#if defined(CONFIG_EDAC)
24#include <linux/edac.h>
25#endif
26
27#include <linux/atomic.h>
28#include <asm/traps.h>
29#include <asm/mach_traps.h>
30#include <asm/nmi.h>
31#include <asm/x86_init.h>
32
33#define CREATE_TRACE_POINTS
34#include <trace/events/nmi.h>
35
36struct nmi_desc {
37 spinlock_t lock;
38 struct list_head head;
39};
40
41static struct nmi_desc nmi_desc[NMI_MAX] =
42{
43 {
44 .lock = __SPIN_LOCK_UNLOCKED(&nmi_desc[0].lock),
45 .head = LIST_HEAD_INIT(nmi_desc[0].head),
46 },
47 {
48 .lock = __SPIN_LOCK_UNLOCKED(&nmi_desc[1].lock),
49 .head = LIST_HEAD_INIT(nmi_desc[1].head),
50 },
51 {
52 .lock = __SPIN_LOCK_UNLOCKED(&nmi_desc[2].lock),
53 .head = LIST_HEAD_INIT(nmi_desc[2].head),
54 },
55 {
56 .lock = __SPIN_LOCK_UNLOCKED(&nmi_desc[3].lock),
57 .head = LIST_HEAD_INIT(nmi_desc[3].head),
58 },
59
60};
61
62struct nmi_stats {
63 unsigned int normal;
64 unsigned int unknown;
65 unsigned int external;
66 unsigned int swallow;
67};
68
69static DEFINE_PER_CPU(struct nmi_stats, nmi_stats);
70
71static int ignore_nmis;
72
73int unknown_nmi_panic;
74/*
75 * Prevent NMI reason port (0x61) being accessed simultaneously, can
76 * only be used in NMI handler.
77 */
78static DEFINE_RAW_SPINLOCK(nmi_reason_lock);
79
80static int __init setup_unknown_nmi_panic(char *str)
81{
82 unknown_nmi_panic = 1;
83 return 1;
84}
85__setup("unknown_nmi_panic", setup_unknown_nmi_panic);
86
87#define nmi_to_desc(type) (&nmi_desc[type])
88
89static u64 nmi_longest_ns = 1 * NSEC_PER_MSEC;
90
91static int __init nmi_warning_debugfs(void)
92{
93 debugfs_create_u64("nmi_longest_ns", 0644,
94 arch_debugfs_dir, &nmi_longest_ns);
95 return 0;
96}
97fs_initcall(nmi_warning_debugfs);
98
99static void nmi_max_handler(struct irq_work *w)
100{
101 struct nmiaction *a = container_of(w, struct nmiaction, irq_work);
102 int remainder_ns, decimal_msecs;
103 u64 whole_msecs = ACCESS_ONCE(a->max_duration);
104
105 remainder_ns = do_div(whole_msecs, (1000 * 1000));
106 decimal_msecs = remainder_ns / 1000;
107
108 printk_ratelimited(KERN_INFO
109 "INFO: NMI handler (%ps) took too long to run: %lld.%03d msecs\n",
110 a->handler, whole_msecs, decimal_msecs);
111}
112
113static int __kprobes nmi_handle(unsigned int type, struct pt_regs *regs, bool b2b)
114{
115 struct nmi_desc *desc = nmi_to_desc(type);
116 struct nmiaction *a;
117 int handled=0;
118
119 rcu_read_lock();
120
121 /*
122 * NMIs are edge-triggered, which means if you have enough
123 * of them concurrently, you can lose some because only one
124 * can be latched at any given time. Walk the whole list
125 * to handle those situations.
126 */
127 list_for_each_entry_rcu(a, &desc->head, list) {
128 int thishandled;
129 u64 delta;
130
131 delta = sched_clock();
132 thishandled = a->handler(type, regs);
133 handled += thishandled;
134 delta = sched_clock() - delta;
135 trace_nmi_handler(a->handler, (int)delta, thishandled);
136
137 if (delta < nmi_longest_ns || delta < a->max_duration)
138 continue;
139
140 a->max_duration = delta;
141 irq_work_queue(&a->irq_work);
142 }
143
144 rcu_read_unlock();
145
146 /* return total number of NMI events handled */
147 return handled;
148}
149
150int __register_nmi_handler(unsigned int type, struct nmiaction *action)
151{
152 struct nmi_desc *desc = nmi_to_desc(type);
153 unsigned long flags;
154
155 if (!action->handler)
156 return -EINVAL;
157
158 init_irq_work(&action->irq_work, nmi_max_handler);
159
160 spin_lock_irqsave(&desc->lock, flags);
161
162 /*
163 * most handlers of type NMI_UNKNOWN never return because
164 * they just assume the NMI is theirs. Just a sanity check
165 * to manage expectations
166 */
167 WARN_ON_ONCE(type == NMI_UNKNOWN && !list_empty(&desc->head));
168 WARN_ON_ONCE(type == NMI_SERR && !list_empty(&desc->head));
169 WARN_ON_ONCE(type == NMI_IO_CHECK && !list_empty(&desc->head));
170
171 /*
172 * some handlers need to be executed first otherwise a fake
173 * event confuses some handlers (kdump uses this flag)
174 */
175 if (action->flags & NMI_FLAG_FIRST)
176 list_add_rcu(&action->list, &desc->head);
177 else
178 list_add_tail_rcu(&action->list, &desc->head);
179
180 spin_unlock_irqrestore(&desc->lock, flags);
181 return 0;
182}
183EXPORT_SYMBOL(__register_nmi_handler);
184
185void unregister_nmi_handler(unsigned int type, const char *name)
186{
187 struct nmi_desc *desc = nmi_to_desc(type);
188 struct nmiaction *n;
189 unsigned long flags;
190
191 spin_lock_irqsave(&desc->lock, flags);
192
193 list_for_each_entry_rcu(n, &desc->head, list) {
194 /*
195 * the name passed in to describe the nmi handler
196 * is used as the lookup key
197 */
198 if (!strcmp(n->name, name)) {
199 WARN(in_nmi(),
200 "Trying to free NMI (%s) from NMI context!\n", n->name);
201 list_del_rcu(&n->list);
202 break;
203 }
204 }
205
206 spin_unlock_irqrestore(&desc->lock, flags);
207 synchronize_rcu();
208}
209EXPORT_SYMBOL_GPL(unregister_nmi_handler);
210
211static __kprobes void
212pci_serr_error(unsigned char reason, struct pt_regs *regs)
213{
214 /* check to see if anyone registered against these types of errors */
215 if (nmi_handle(NMI_SERR, regs, false))
216 return;
217
218 pr_emerg("NMI: PCI system error (SERR) for reason %02x on CPU %d.\n",
219 reason, smp_processor_id());
220
221 /*
222 * On some machines, PCI SERR line is used to report memory
223 * errors. EDAC makes use of it.
224 */
225#if defined(CONFIG_EDAC)
226 if (edac_handler_set()) {
227 edac_atomic_assert_error();
228 return;
229 }
230#endif
231
232 if (panic_on_unrecovered_nmi)
233 panic("NMI: Not continuing");
234
235 pr_emerg("Dazed and confused, but trying to continue\n");
236
237 /* Clear and disable the PCI SERR error line. */
238 reason = (reason & NMI_REASON_CLEAR_MASK) | NMI_REASON_CLEAR_SERR;
239 outb(reason, NMI_REASON_PORT);
240}
241
242static __kprobes void
243io_check_error(unsigned char reason, struct pt_regs *regs)
244{
245 unsigned long i;
246
247 /* check to see if anyone registered against these types of errors */
248 if (nmi_handle(NMI_IO_CHECK, regs, false))
249 return;
250
251 pr_emerg(
252 "NMI: IOCK error (debug interrupt?) for reason %02x on CPU %d.\n",
253 reason, smp_processor_id());
254 show_regs(regs);
255
256 if (panic_on_io_nmi)
257 panic("NMI IOCK error: Not continuing");
258
259 /* Re-enable the IOCK line, wait for a few seconds */
260 reason = (reason & NMI_REASON_CLEAR_MASK) | NMI_REASON_CLEAR_IOCHK;
261 outb(reason, NMI_REASON_PORT);
262
263 i = 20000;
264 while (--i) {
265 touch_nmi_watchdog();
266 udelay(100);
267 }
268
269 reason &= ~NMI_REASON_CLEAR_IOCHK;
270 outb(reason, NMI_REASON_PORT);
271}
272
273static __kprobes void
274unknown_nmi_error(unsigned char reason, struct pt_regs *regs)
275{
276 int handled;
277
278 /*
279 * Use 'false' as back-to-back NMIs are dealt with one level up.
280 * Of course this makes having multiple 'unknown' handlers useless
281 * as only the first one is ever run (unless it can actually determine
282 * if it caused the NMI)
283 */
284 handled = nmi_handle(NMI_UNKNOWN, regs, false);
285 if (handled) {
286 __this_cpu_add(nmi_stats.unknown, handled);
287 return;
288 }
289
290 __this_cpu_add(nmi_stats.unknown, 1);
291
292 pr_emerg("Uhhuh. NMI received for unknown reason %02x on CPU %d.\n",
293 reason, smp_processor_id());
294
295 pr_emerg("Do you have a strange power saving mode enabled?\n");
296 if (unknown_nmi_panic || panic_on_unrecovered_nmi)
297 panic("NMI: Not continuing");
298
299 pr_emerg("Dazed and confused, but trying to continue\n");
300}
301
302static DEFINE_PER_CPU(bool, swallow_nmi);
303static DEFINE_PER_CPU(unsigned long, last_nmi_rip);
304
305static __kprobes void default_do_nmi(struct pt_regs *regs)
306{
307 unsigned char reason = 0;
308 int handled;
309 bool b2b = false;
310
311 /*
312 * CPU-specific NMI must be processed before non-CPU-specific
313 * NMI, otherwise we may lose it, because the CPU-specific
314 * NMI can not be detected/processed on other CPUs.
315 */
316
317 /*
318 * Back-to-back NMIs are interesting because they can either
319 * be two NMI or more than two NMIs (any thing over two is dropped
320 * due to NMI being edge-triggered). If this is the second half
321 * of the back-to-back NMI, assume we dropped things and process
322 * more handlers. Otherwise reset the 'swallow' NMI behaviour
323 */
324 if (regs->ip == __this_cpu_read(last_nmi_rip))
325 b2b = true;
326 else
327 __this_cpu_write(swallow_nmi, false);
328
329 __this_cpu_write(last_nmi_rip, regs->ip);
330
331 handled = nmi_handle(NMI_LOCAL, regs, b2b);
332 __this_cpu_add(nmi_stats.normal, handled);
333 if (handled) {
334 /*
335 * There are cases when a NMI handler handles multiple
336 * events in the current NMI. One of these events may
337 * be queued for in the next NMI. Because the event is
338 * already handled, the next NMI will result in an unknown
339 * NMI. Instead lets flag this for a potential NMI to
340 * swallow.
341 */
342 if (handled > 1)
343 __this_cpu_write(swallow_nmi, true);
344 return;
345 }
346
347 /* Non-CPU-specific NMI: NMI sources can be processed on any CPU */
348 raw_spin_lock(&nmi_reason_lock);
349 reason = x86_platform.get_nmi_reason();
350
351 if (reason & NMI_REASON_MASK) {
352 if (reason & NMI_REASON_SERR)
353 pci_serr_error(reason, regs);
354 else if (reason & NMI_REASON_IOCHK)
355 io_check_error(reason, regs);
356#ifdef CONFIG_X86_32
357 /*
358 * Reassert NMI in case it became active
359 * meanwhile as it's edge-triggered:
360 */
361 reassert_nmi();
362#endif
363 __this_cpu_add(nmi_stats.external, 1);
364 raw_spin_unlock(&nmi_reason_lock);
365 return;
366 }
367 raw_spin_unlock(&nmi_reason_lock);
368
369 /*
370 * Only one NMI can be latched at a time. To handle
371 * this we may process multiple nmi handlers at once to
372 * cover the case where an NMI is dropped. The downside
373 * to this approach is we may process an NMI prematurely,
374 * while its real NMI is sitting latched. This will cause
375 * an unknown NMI on the next run of the NMI processing.
376 *
377 * We tried to flag that condition above, by setting the
378 * swallow_nmi flag when we process more than one event.
379 * This condition is also only present on the second half
380 * of a back-to-back NMI, so we flag that condition too.
381 *
382 * If both are true, we assume we already processed this
383 * NMI previously and we swallow it. Otherwise we reset
384 * the logic.
385 *
386 * There are scenarios where we may accidentally swallow
387 * a 'real' unknown NMI. For example, while processing
388 * a perf NMI another perf NMI comes in along with a
389 * 'real' unknown NMI. These two NMIs get combined into
390 * one (as descibed above). When the next NMI gets
391 * processed, it will be flagged by perf as handled, but
392 * noone will know that there was a 'real' unknown NMI sent
393 * also. As a result it gets swallowed. Or if the first
394 * perf NMI returns two events handled then the second
395 * NMI will get eaten by the logic below, again losing a
396 * 'real' unknown NMI. But this is the best we can do
397 * for now.
398 */
399 if (b2b && __this_cpu_read(swallow_nmi))
400 __this_cpu_add(nmi_stats.swallow, 1);
401 else
402 unknown_nmi_error(reason, regs);
403}
404
405/*
406 * NMIs can hit breakpoints which will cause it to lose its
407 * NMI context with the CPU when the breakpoint does an iret.
408 */
409#ifdef CONFIG_X86_32
410/*
411 * For i386, NMIs use the same stack as the kernel, and we can
412 * add a workaround to the iret problem in C (preventing nested
413 * NMIs if an NMI takes a trap). Simply have 3 states the NMI
414 * can be in:
415 *
416 * 1) not running
417 * 2) executing
418 * 3) latched
419 *
420 * When no NMI is in progress, it is in the "not running" state.
421 * When an NMI comes in, it goes into the "executing" state.
422 * Normally, if another NMI is triggered, it does not interrupt
423 * the running NMI and the HW will simply latch it so that when
424 * the first NMI finishes, it will restart the second NMI.
425 * (Note, the latch is binary, thus multiple NMIs triggering,
426 * when one is running, are ignored. Only one NMI is restarted.)
427 *
428 * If an NMI hits a breakpoint that executes an iret, another
429 * NMI can preempt it. We do not want to allow this new NMI
430 * to run, but we want to execute it when the first one finishes.
431 * We set the state to "latched", and the exit of the first NMI will
432 * perform a dec_return, if the result is zero (NOT_RUNNING), then
433 * it will simply exit the NMI handler. If not, the dec_return
434 * would have set the state to NMI_EXECUTING (what we want it to
435 * be when we are running). In this case, we simply jump back
436 * to rerun the NMI handler again, and restart the 'latched' NMI.
437 *
438 * No trap (breakpoint or page fault) should be hit before nmi_restart,
439 * thus there is no race between the first check of state for NOT_RUNNING
440 * and setting it to NMI_EXECUTING. The HW will prevent nested NMIs
441 * at this point.
442 *
443 * In case the NMI takes a page fault, we need to save off the CR2
444 * because the NMI could have preempted another page fault and corrupt
445 * the CR2 that is about to be read. As nested NMIs must be restarted
446 * and they can not take breakpoints or page faults, the update of the
447 * CR2 must be done before converting the nmi state back to NOT_RUNNING.
448 * Otherwise, there would be a race of another nested NMI coming in
449 * after setting state to NOT_RUNNING but before updating the nmi_cr2.
450 */
451enum nmi_states {
452 NMI_NOT_RUNNING = 0,
453 NMI_EXECUTING,
454 NMI_LATCHED,
455};
456static DEFINE_PER_CPU(enum nmi_states, nmi_state);
457static DEFINE_PER_CPU(unsigned long, nmi_cr2);
458
459#define nmi_nesting_preprocess(regs) \
460 do { \
461 if (this_cpu_read(nmi_state) != NMI_NOT_RUNNING) { \
462 this_cpu_write(nmi_state, NMI_LATCHED); \
463 return; \
464 } \
465 this_cpu_write(nmi_state, NMI_EXECUTING); \
466 this_cpu_write(nmi_cr2, read_cr2()); \
467 } while (0); \
468 nmi_restart:
469
470#define nmi_nesting_postprocess() \
471 do { \
472 if (unlikely(this_cpu_read(nmi_cr2) != read_cr2())) \
473 write_cr2(this_cpu_read(nmi_cr2)); \
474 if (this_cpu_dec_return(nmi_state)) \
475 goto nmi_restart; \
476 } while (0)
477#else /* x86_64 */
478/*
479 * In x86_64 things are a bit more difficult. This has the same problem
480 * where an NMI hitting a breakpoint that calls iret will remove the
481 * NMI context, allowing a nested NMI to enter. What makes this more
482 * difficult is that both NMIs and breakpoints have their own stack.
483 * When a new NMI or breakpoint is executed, the stack is set to a fixed
484 * point. If an NMI is nested, it will have its stack set at that same
485 * fixed address that the first NMI had, and will start corrupting the
486 * stack. This is handled in entry_64.S, but the same problem exists with
487 * the breakpoint stack.
488 *
489 * If a breakpoint is being processed, and the debug stack is being used,
490 * if an NMI comes in and also hits a breakpoint, the stack pointer
491 * will be set to the same fixed address as the breakpoint that was
492 * interrupted, causing that stack to be corrupted. To handle this case,
493 * check if the stack that was interrupted is the debug stack, and if
494 * so, change the IDT so that new breakpoints will use the current stack
495 * and not switch to the fixed address. On return of the NMI, switch back
496 * to the original IDT.
497 */
498static DEFINE_PER_CPU(int, update_debug_stack);
499
500static inline void nmi_nesting_preprocess(struct pt_regs *regs)
501{
502 /*
503 * If we interrupted a breakpoint, it is possible that
504 * the nmi handler will have breakpoints too. We need to
505 * change the IDT such that breakpoints that happen here
506 * continue to use the NMI stack.
507 */
508 if (unlikely(is_debug_stack(regs->sp))) {
509 debug_stack_set_zero();
510 this_cpu_write(update_debug_stack, 1);
511 }
512}
513
514static inline void nmi_nesting_postprocess(void)
515{
516 if (unlikely(this_cpu_read(update_debug_stack))) {
517 debug_stack_reset();
518 this_cpu_write(update_debug_stack, 0);
519 }
520}
521#endif
522
523dotraplinkage notrace __kprobes void
524do_nmi(struct pt_regs *regs, long error_code)
525{
526 nmi_nesting_preprocess(regs);
527
528 nmi_enter();
529
530 inc_irq_stat(__nmi_count);
531
532 if (!ignore_nmis)
533 default_do_nmi(regs);
534
535 nmi_exit();
536
537 /* On i386, may loop back to preprocess */
538 nmi_nesting_postprocess();
539}
540
541void stop_nmi(void)
542{
543 ignore_nmis++;
544}
545
546void restart_nmi(void)
547{
548 ignore_nmis--;
549}
550
551/* reset the back-to-back NMI logic */
552void local_touch_nmi(void)
553{
554 __this_cpu_write(last_nmi_rip, 0);
555}
556EXPORT_SYMBOL_GPL(local_touch_nmi);
1/*
2 * Copyright (C) 1991, 1992 Linus Torvalds
3 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
4 * Copyright (C) 2011 Don Zickus Red Hat, Inc.
5 *
6 * Pentium III FXSR, SSE support
7 * Gareth Hughes <gareth@valinux.com>, May 2000
8 */
9
10/*
11 * Handle hardware traps and faults.
12 */
13#include <linux/spinlock.h>
14#include <linux/kprobes.h>
15#include <linux/kdebug.h>
16#include <linux/nmi.h>
17#include <linux/debugfs.h>
18#include <linux/delay.h>
19#include <linux/hardirq.h>
20#include <linux/ratelimit.h>
21#include <linux/slab.h>
22#include <linux/export.h>
23
24#if defined(CONFIG_EDAC)
25#include <linux/edac.h>
26#endif
27
28#include <linux/atomic.h>
29#include <asm/traps.h>
30#include <asm/mach_traps.h>
31#include <asm/nmi.h>
32#include <asm/x86_init.h>
33#include <asm/reboot.h>
34#include <asm/cache.h>
35
36#define CREATE_TRACE_POINTS
37#include <trace/events/nmi.h>
38
39struct nmi_desc {
40 spinlock_t lock;
41 struct list_head head;
42};
43
44static struct nmi_desc nmi_desc[NMI_MAX] =
45{
46 {
47 .lock = __SPIN_LOCK_UNLOCKED(&nmi_desc[0].lock),
48 .head = LIST_HEAD_INIT(nmi_desc[0].head),
49 },
50 {
51 .lock = __SPIN_LOCK_UNLOCKED(&nmi_desc[1].lock),
52 .head = LIST_HEAD_INIT(nmi_desc[1].head),
53 },
54 {
55 .lock = __SPIN_LOCK_UNLOCKED(&nmi_desc[2].lock),
56 .head = LIST_HEAD_INIT(nmi_desc[2].head),
57 },
58 {
59 .lock = __SPIN_LOCK_UNLOCKED(&nmi_desc[3].lock),
60 .head = LIST_HEAD_INIT(nmi_desc[3].head),
61 },
62
63};
64
65struct nmi_stats {
66 unsigned int normal;
67 unsigned int unknown;
68 unsigned int external;
69 unsigned int swallow;
70};
71
72static DEFINE_PER_CPU(struct nmi_stats, nmi_stats);
73
74static int ignore_nmis __read_mostly;
75
76int unknown_nmi_panic;
77/*
78 * Prevent NMI reason port (0x61) being accessed simultaneously, can
79 * only be used in NMI handler.
80 */
81static DEFINE_RAW_SPINLOCK(nmi_reason_lock);
82
83static int __init setup_unknown_nmi_panic(char *str)
84{
85 unknown_nmi_panic = 1;
86 return 1;
87}
88__setup("unknown_nmi_panic", setup_unknown_nmi_panic);
89
90#define nmi_to_desc(type) (&nmi_desc[type])
91
92static u64 nmi_longest_ns = 1 * NSEC_PER_MSEC;
93
94static int __init nmi_warning_debugfs(void)
95{
96 debugfs_create_u64("nmi_longest_ns", 0644,
97 arch_debugfs_dir, &nmi_longest_ns);
98 return 0;
99}
100fs_initcall(nmi_warning_debugfs);
101
102static void nmi_max_handler(struct irq_work *w)
103{
104 struct nmiaction *a = container_of(w, struct nmiaction, irq_work);
105 int remainder_ns, decimal_msecs;
106 u64 whole_msecs = ACCESS_ONCE(a->max_duration);
107
108 remainder_ns = do_div(whole_msecs, (1000 * 1000));
109 decimal_msecs = remainder_ns / 1000;
110
111 printk_ratelimited(KERN_INFO
112 "INFO: NMI handler (%ps) took too long to run: %lld.%03d msecs\n",
113 a->handler, whole_msecs, decimal_msecs);
114}
115
116static int nmi_handle(unsigned int type, struct pt_regs *regs)
117{
118 struct nmi_desc *desc = nmi_to_desc(type);
119 struct nmiaction *a;
120 int handled=0;
121
122 rcu_read_lock();
123
124 /*
125 * NMIs are edge-triggered, which means if you have enough
126 * of them concurrently, you can lose some because only one
127 * can be latched at any given time. Walk the whole list
128 * to handle those situations.
129 */
130 list_for_each_entry_rcu(a, &desc->head, list) {
131 int thishandled;
132 u64 delta;
133
134 delta = sched_clock();
135 thishandled = a->handler(type, regs);
136 handled += thishandled;
137 delta = sched_clock() - delta;
138 trace_nmi_handler(a->handler, (int)delta, thishandled);
139
140 if (delta < nmi_longest_ns || delta < a->max_duration)
141 continue;
142
143 a->max_duration = delta;
144 irq_work_queue(&a->irq_work);
145 }
146
147 rcu_read_unlock();
148
149 /* return total number of NMI events handled */
150 return handled;
151}
152NOKPROBE_SYMBOL(nmi_handle);
153
154int __register_nmi_handler(unsigned int type, struct nmiaction *action)
155{
156 struct nmi_desc *desc = nmi_to_desc(type);
157 unsigned long flags;
158
159 if (!action->handler)
160 return -EINVAL;
161
162 init_irq_work(&action->irq_work, nmi_max_handler);
163
164 spin_lock_irqsave(&desc->lock, flags);
165
166 /*
167 * most handlers of type NMI_UNKNOWN never return because
168 * they just assume the NMI is theirs. Just a sanity check
169 * to manage expectations
170 */
171 WARN_ON_ONCE(type == NMI_UNKNOWN && !list_empty(&desc->head));
172 WARN_ON_ONCE(type == NMI_SERR && !list_empty(&desc->head));
173 WARN_ON_ONCE(type == NMI_IO_CHECK && !list_empty(&desc->head));
174
175 /*
176 * some handlers need to be executed first otherwise a fake
177 * event confuses some handlers (kdump uses this flag)
178 */
179 if (action->flags & NMI_FLAG_FIRST)
180 list_add_rcu(&action->list, &desc->head);
181 else
182 list_add_tail_rcu(&action->list, &desc->head);
183
184 spin_unlock_irqrestore(&desc->lock, flags);
185 return 0;
186}
187EXPORT_SYMBOL(__register_nmi_handler);
188
189void unregister_nmi_handler(unsigned int type, const char *name)
190{
191 struct nmi_desc *desc = nmi_to_desc(type);
192 struct nmiaction *n;
193 unsigned long flags;
194
195 spin_lock_irqsave(&desc->lock, flags);
196
197 list_for_each_entry_rcu(n, &desc->head, list) {
198 /*
199 * the name passed in to describe the nmi handler
200 * is used as the lookup key
201 */
202 if (!strcmp(n->name, name)) {
203 WARN(in_nmi(),
204 "Trying to free NMI (%s) from NMI context!\n", n->name);
205 list_del_rcu(&n->list);
206 break;
207 }
208 }
209
210 spin_unlock_irqrestore(&desc->lock, flags);
211 synchronize_rcu();
212}
213EXPORT_SYMBOL_GPL(unregister_nmi_handler);
214
215static void
216pci_serr_error(unsigned char reason, struct pt_regs *regs)
217{
218 /* check to see if anyone registered against these types of errors */
219 if (nmi_handle(NMI_SERR, regs))
220 return;
221
222 pr_emerg("NMI: PCI system error (SERR) for reason %02x on CPU %d.\n",
223 reason, smp_processor_id());
224
225 /*
226 * On some machines, PCI SERR line is used to report memory
227 * errors. EDAC makes use of it.
228 */
229#if defined(CONFIG_EDAC)
230 if (edac_handler_set()) {
231 edac_atomic_assert_error();
232 return;
233 }
234#endif
235
236 if (panic_on_unrecovered_nmi)
237 nmi_panic(regs, "NMI: Not continuing");
238
239 pr_emerg("Dazed and confused, but trying to continue\n");
240
241 /* Clear and disable the PCI SERR error line. */
242 reason = (reason & NMI_REASON_CLEAR_MASK) | NMI_REASON_CLEAR_SERR;
243 outb(reason, NMI_REASON_PORT);
244}
245NOKPROBE_SYMBOL(pci_serr_error);
246
247static void
248io_check_error(unsigned char reason, struct pt_regs *regs)
249{
250 unsigned long i;
251
252 /* check to see if anyone registered against these types of errors */
253 if (nmi_handle(NMI_IO_CHECK, regs))
254 return;
255
256 pr_emerg(
257 "NMI: IOCK error (debug interrupt?) for reason %02x on CPU %d.\n",
258 reason, smp_processor_id());
259 show_regs(regs);
260
261 if (panic_on_io_nmi) {
262 nmi_panic(regs, "NMI IOCK error: Not continuing");
263
264 /*
265 * If we end up here, it means we have received an NMI while
266 * processing panic(). Simply return without delaying and
267 * re-enabling NMIs.
268 */
269 return;
270 }
271
272 /* Re-enable the IOCK line, wait for a few seconds */
273 reason = (reason & NMI_REASON_CLEAR_MASK) | NMI_REASON_CLEAR_IOCHK;
274 outb(reason, NMI_REASON_PORT);
275
276 i = 20000;
277 while (--i) {
278 touch_nmi_watchdog();
279 udelay(100);
280 }
281
282 reason &= ~NMI_REASON_CLEAR_IOCHK;
283 outb(reason, NMI_REASON_PORT);
284}
285NOKPROBE_SYMBOL(io_check_error);
286
287static void
288unknown_nmi_error(unsigned char reason, struct pt_regs *regs)
289{
290 int handled;
291
292 /*
293 * Use 'false' as back-to-back NMIs are dealt with one level up.
294 * Of course this makes having multiple 'unknown' handlers useless
295 * as only the first one is ever run (unless it can actually determine
296 * if it caused the NMI)
297 */
298 handled = nmi_handle(NMI_UNKNOWN, regs);
299 if (handled) {
300 __this_cpu_add(nmi_stats.unknown, handled);
301 return;
302 }
303
304 __this_cpu_add(nmi_stats.unknown, 1);
305
306 pr_emerg("Uhhuh. NMI received for unknown reason %02x on CPU %d.\n",
307 reason, smp_processor_id());
308
309 pr_emerg("Do you have a strange power saving mode enabled?\n");
310 if (unknown_nmi_panic || panic_on_unrecovered_nmi)
311 nmi_panic(regs, "NMI: Not continuing");
312
313 pr_emerg("Dazed and confused, but trying to continue\n");
314}
315NOKPROBE_SYMBOL(unknown_nmi_error);
316
317static DEFINE_PER_CPU(bool, swallow_nmi);
318static DEFINE_PER_CPU(unsigned long, last_nmi_rip);
319
320static void default_do_nmi(struct pt_regs *regs)
321{
322 unsigned char reason = 0;
323 int handled;
324 bool b2b = false;
325
326 /*
327 * CPU-specific NMI must be processed before non-CPU-specific
328 * NMI, otherwise we may lose it, because the CPU-specific
329 * NMI can not be detected/processed on other CPUs.
330 */
331
332 /*
333 * Back-to-back NMIs are interesting because they can either
334 * be two NMI or more than two NMIs (any thing over two is dropped
335 * due to NMI being edge-triggered). If this is the second half
336 * of the back-to-back NMI, assume we dropped things and process
337 * more handlers. Otherwise reset the 'swallow' NMI behaviour
338 */
339 if (regs->ip == __this_cpu_read(last_nmi_rip))
340 b2b = true;
341 else
342 __this_cpu_write(swallow_nmi, false);
343
344 __this_cpu_write(last_nmi_rip, regs->ip);
345
346 handled = nmi_handle(NMI_LOCAL, regs);
347 __this_cpu_add(nmi_stats.normal, handled);
348 if (handled) {
349 /*
350 * There are cases when a NMI handler handles multiple
351 * events in the current NMI. One of these events may
352 * be queued for in the next NMI. Because the event is
353 * already handled, the next NMI will result in an unknown
354 * NMI. Instead lets flag this for a potential NMI to
355 * swallow.
356 */
357 if (handled > 1)
358 __this_cpu_write(swallow_nmi, true);
359 return;
360 }
361
362 /*
363 * Non-CPU-specific NMI: NMI sources can be processed on any CPU.
364 *
365 * Another CPU may be processing panic routines while holding
366 * nmi_reason_lock. Check if the CPU issued the IPI for crash dumping,
367 * and if so, call its callback directly. If there is no CPU preparing
368 * crash dump, we simply loop here.
369 */
370 while (!raw_spin_trylock(&nmi_reason_lock)) {
371 run_crash_ipi_callback(regs);
372 cpu_relax();
373 }
374
375 reason = x86_platform.get_nmi_reason();
376
377 if (reason & NMI_REASON_MASK) {
378 if (reason & NMI_REASON_SERR)
379 pci_serr_error(reason, regs);
380 else if (reason & NMI_REASON_IOCHK)
381 io_check_error(reason, regs);
382#ifdef CONFIG_X86_32
383 /*
384 * Reassert NMI in case it became active
385 * meanwhile as it's edge-triggered:
386 */
387 reassert_nmi();
388#endif
389 __this_cpu_add(nmi_stats.external, 1);
390 raw_spin_unlock(&nmi_reason_lock);
391 return;
392 }
393 raw_spin_unlock(&nmi_reason_lock);
394
395 /*
396 * Only one NMI can be latched at a time. To handle
397 * this we may process multiple nmi handlers at once to
398 * cover the case where an NMI is dropped. The downside
399 * to this approach is we may process an NMI prematurely,
400 * while its real NMI is sitting latched. This will cause
401 * an unknown NMI on the next run of the NMI processing.
402 *
403 * We tried to flag that condition above, by setting the
404 * swallow_nmi flag when we process more than one event.
405 * This condition is also only present on the second half
406 * of a back-to-back NMI, so we flag that condition too.
407 *
408 * If both are true, we assume we already processed this
409 * NMI previously and we swallow it. Otherwise we reset
410 * the logic.
411 *
412 * There are scenarios where we may accidentally swallow
413 * a 'real' unknown NMI. For example, while processing
414 * a perf NMI another perf NMI comes in along with a
415 * 'real' unknown NMI. These two NMIs get combined into
416 * one (as descibed above). When the next NMI gets
417 * processed, it will be flagged by perf as handled, but
418 * noone will know that there was a 'real' unknown NMI sent
419 * also. As a result it gets swallowed. Or if the first
420 * perf NMI returns two events handled then the second
421 * NMI will get eaten by the logic below, again losing a
422 * 'real' unknown NMI. But this is the best we can do
423 * for now.
424 */
425 if (b2b && __this_cpu_read(swallow_nmi))
426 __this_cpu_add(nmi_stats.swallow, 1);
427 else
428 unknown_nmi_error(reason, regs);
429}
430NOKPROBE_SYMBOL(default_do_nmi);
431
432/*
433 * NMIs can page fault or hit breakpoints which will cause it to lose
434 * its NMI context with the CPU when the breakpoint or page fault does an IRET.
435 *
436 * As a result, NMIs can nest if NMIs get unmasked due an IRET during
437 * NMI processing. On x86_64, the asm glue protects us from nested NMIs
438 * if the outer NMI came from kernel mode, but we can still nest if the
439 * outer NMI came from user mode.
440 *
441 * To handle these nested NMIs, we have three states:
442 *
443 * 1) not running
444 * 2) executing
445 * 3) latched
446 *
447 * When no NMI is in progress, it is in the "not running" state.
448 * When an NMI comes in, it goes into the "executing" state.
449 * Normally, if another NMI is triggered, it does not interrupt
450 * the running NMI and the HW will simply latch it so that when
451 * the first NMI finishes, it will restart the second NMI.
452 * (Note, the latch is binary, thus multiple NMIs triggering,
453 * when one is running, are ignored. Only one NMI is restarted.)
454 *
455 * If an NMI executes an iret, another NMI can preempt it. We do not
456 * want to allow this new NMI to run, but we want to execute it when the
457 * first one finishes. We set the state to "latched", and the exit of
458 * the first NMI will perform a dec_return, if the result is zero
459 * (NOT_RUNNING), then it will simply exit the NMI handler. If not, the
460 * dec_return would have set the state to NMI_EXECUTING (what we want it
461 * to be when we are running). In this case, we simply jump back to
462 * rerun the NMI handler again, and restart the 'latched' NMI.
463 *
464 * No trap (breakpoint or page fault) should be hit before nmi_restart,
465 * thus there is no race between the first check of state for NOT_RUNNING
466 * and setting it to NMI_EXECUTING. The HW will prevent nested NMIs
467 * at this point.
468 *
469 * In case the NMI takes a page fault, we need to save off the CR2
470 * because the NMI could have preempted another page fault and corrupt
471 * the CR2 that is about to be read. As nested NMIs must be restarted
472 * and they can not take breakpoints or page faults, the update of the
473 * CR2 must be done before converting the nmi state back to NOT_RUNNING.
474 * Otherwise, there would be a race of another nested NMI coming in
475 * after setting state to NOT_RUNNING but before updating the nmi_cr2.
476 */
477enum nmi_states {
478 NMI_NOT_RUNNING = 0,
479 NMI_EXECUTING,
480 NMI_LATCHED,
481};
482static DEFINE_PER_CPU(enum nmi_states, nmi_state);
483static DEFINE_PER_CPU(unsigned long, nmi_cr2);
484
485#ifdef CONFIG_X86_64
486/*
487 * In x86_64, we need to handle breakpoint -> NMI -> breakpoint. Without
488 * some care, the inner breakpoint will clobber the outer breakpoint's
489 * stack.
490 *
491 * If a breakpoint is being processed, and the debug stack is being
492 * used, if an NMI comes in and also hits a breakpoint, the stack
493 * pointer will be set to the same fixed address as the breakpoint that
494 * was interrupted, causing that stack to be corrupted. To handle this
495 * case, check if the stack that was interrupted is the debug stack, and
496 * if so, change the IDT so that new breakpoints will use the current
497 * stack and not switch to the fixed address. On return of the NMI,
498 * switch back to the original IDT.
499 */
500static DEFINE_PER_CPU(int, update_debug_stack);
501#endif
502
503dotraplinkage notrace void
504do_nmi(struct pt_regs *regs, long error_code)
505{
506 if (this_cpu_read(nmi_state) != NMI_NOT_RUNNING) {
507 this_cpu_write(nmi_state, NMI_LATCHED);
508 return;
509 }
510 this_cpu_write(nmi_state, NMI_EXECUTING);
511 this_cpu_write(nmi_cr2, read_cr2());
512nmi_restart:
513
514#ifdef CONFIG_X86_64
515 /*
516 * If we interrupted a breakpoint, it is possible that
517 * the nmi handler will have breakpoints too. We need to
518 * change the IDT such that breakpoints that happen here
519 * continue to use the NMI stack.
520 */
521 if (unlikely(is_debug_stack(regs->sp))) {
522 debug_stack_set_zero();
523 this_cpu_write(update_debug_stack, 1);
524 }
525#endif
526
527 nmi_enter();
528
529 inc_irq_stat(__nmi_count);
530
531 if (!ignore_nmis)
532 default_do_nmi(regs);
533
534 nmi_exit();
535
536#ifdef CONFIG_X86_64
537 if (unlikely(this_cpu_read(update_debug_stack))) {
538 debug_stack_reset();
539 this_cpu_write(update_debug_stack, 0);
540 }
541#endif
542
543 if (unlikely(this_cpu_read(nmi_cr2) != read_cr2()))
544 write_cr2(this_cpu_read(nmi_cr2));
545 if (this_cpu_dec_return(nmi_state))
546 goto nmi_restart;
547}
548NOKPROBE_SYMBOL(do_nmi);
549
550void stop_nmi(void)
551{
552 ignore_nmis++;
553}
554
555void restart_nmi(void)
556{
557 ignore_nmis--;
558}
559
560/* reset the back-to-back NMI logic */
561void local_touch_nmi(void)
562{
563 __this_cpu_write(last_nmi_rip, 0);
564}
565EXPORT_SYMBOL_GPL(local_touch_nmi);