Loading...
1/*
2 * Copyright (C) 1991, 1992 Linus Torvalds
3 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
4 * Copyright (C) 2011 Don Zickus Red Hat, Inc.
5 *
6 * Pentium III FXSR, SSE support
7 * Gareth Hughes <gareth@valinux.com>, May 2000
8 */
9
10/*
11 * Handle hardware traps and faults.
12 */
13#include <linux/spinlock.h>
14#include <linux/kprobes.h>
15#include <linux/kdebug.h>
16#include <linux/sched/debug.h>
17#include <linux/nmi.h>
18#include <linux/debugfs.h>
19#include <linux/delay.h>
20#include <linux/hardirq.h>
21#include <linux/ratelimit.h>
22#include <linux/slab.h>
23#include <linux/export.h>
24#include <linux/sched/clock.h>
25
26#if defined(CONFIG_EDAC)
27#include <linux/edac.h>
28#endif
29
30#include <linux/atomic.h>
31#include <asm/traps.h>
32#include <asm/mach_traps.h>
33#include <asm/nmi.h>
34#include <asm/x86_init.h>
35#include <asm/reboot.h>
36#include <asm/cache.h>
37
38#define CREATE_TRACE_POINTS
39#include <trace/events/nmi.h>
40
41struct nmi_desc {
42 raw_spinlock_t lock;
43 struct list_head head;
44};
45
46static struct nmi_desc nmi_desc[NMI_MAX] =
47{
48 {
49 .lock = __RAW_SPIN_LOCK_UNLOCKED(&nmi_desc[0].lock),
50 .head = LIST_HEAD_INIT(nmi_desc[0].head),
51 },
52 {
53 .lock = __RAW_SPIN_LOCK_UNLOCKED(&nmi_desc[1].lock),
54 .head = LIST_HEAD_INIT(nmi_desc[1].head),
55 },
56 {
57 .lock = __RAW_SPIN_LOCK_UNLOCKED(&nmi_desc[2].lock),
58 .head = LIST_HEAD_INIT(nmi_desc[2].head),
59 },
60 {
61 .lock = __RAW_SPIN_LOCK_UNLOCKED(&nmi_desc[3].lock),
62 .head = LIST_HEAD_INIT(nmi_desc[3].head),
63 },
64
65};
66
67struct nmi_stats {
68 unsigned int normal;
69 unsigned int unknown;
70 unsigned int external;
71 unsigned int swallow;
72};
73
74static DEFINE_PER_CPU(struct nmi_stats, nmi_stats);
75
76static int ignore_nmis __read_mostly;
77
78int unknown_nmi_panic;
79/*
80 * Prevent NMI reason port (0x61) being accessed simultaneously, can
81 * only be used in NMI handler.
82 */
83static DEFINE_RAW_SPINLOCK(nmi_reason_lock);
84
85static int __init setup_unknown_nmi_panic(char *str)
86{
87 unknown_nmi_panic = 1;
88 return 1;
89}
90__setup("unknown_nmi_panic", setup_unknown_nmi_panic);
91
92#define nmi_to_desc(type) (&nmi_desc[type])
93
94static u64 nmi_longest_ns = 1 * NSEC_PER_MSEC;
95
96static int __init nmi_warning_debugfs(void)
97{
98 debugfs_create_u64("nmi_longest_ns", 0644,
99 arch_debugfs_dir, &nmi_longest_ns);
100 return 0;
101}
102fs_initcall(nmi_warning_debugfs);
103
104static void nmi_max_handler(struct irq_work *w)
105{
106 struct nmiaction *a = container_of(w, struct nmiaction, irq_work);
107 int remainder_ns, decimal_msecs;
108 u64 whole_msecs = READ_ONCE(a->max_duration);
109
110 remainder_ns = do_div(whole_msecs, (1000 * 1000));
111 decimal_msecs = remainder_ns / 1000;
112
113 printk_ratelimited(KERN_INFO
114 "INFO: NMI handler (%ps) took too long to run: %lld.%03d msecs\n",
115 a->handler, whole_msecs, decimal_msecs);
116}
117
118static int nmi_handle(unsigned int type, struct pt_regs *regs)
119{
120 struct nmi_desc *desc = nmi_to_desc(type);
121 struct nmiaction *a;
122 int handled=0;
123
124 rcu_read_lock();
125
126 /*
127 * NMIs are edge-triggered, which means if you have enough
128 * of them concurrently, you can lose some because only one
129 * can be latched at any given time. Walk the whole list
130 * to handle those situations.
131 */
132 list_for_each_entry_rcu(a, &desc->head, list) {
133 int thishandled;
134 u64 delta;
135
136 delta = sched_clock();
137 thishandled = a->handler(type, regs);
138 handled += thishandled;
139 delta = sched_clock() - delta;
140 trace_nmi_handler(a->handler, (int)delta, thishandled);
141
142 if (delta < nmi_longest_ns || delta < a->max_duration)
143 continue;
144
145 a->max_duration = delta;
146 irq_work_queue(&a->irq_work);
147 }
148
149 rcu_read_unlock();
150
151 /* return total number of NMI events handled */
152 return handled;
153}
154NOKPROBE_SYMBOL(nmi_handle);
155
156int __register_nmi_handler(unsigned int type, struct nmiaction *action)
157{
158 struct nmi_desc *desc = nmi_to_desc(type);
159 unsigned long flags;
160
161 if (!action->handler)
162 return -EINVAL;
163
164 init_irq_work(&action->irq_work, nmi_max_handler);
165
166 raw_spin_lock_irqsave(&desc->lock, flags);
167
168 /*
169 * Indicate if there are multiple registrations on the
170 * internal NMI handler call chains (SERR and IO_CHECK).
171 */
172 WARN_ON_ONCE(type == NMI_SERR && !list_empty(&desc->head));
173 WARN_ON_ONCE(type == NMI_IO_CHECK && !list_empty(&desc->head));
174
175 /*
176 * some handlers need to be executed first otherwise a fake
177 * event confuses some handlers (kdump uses this flag)
178 */
179 if (action->flags & NMI_FLAG_FIRST)
180 list_add_rcu(&action->list, &desc->head);
181 else
182 list_add_tail_rcu(&action->list, &desc->head);
183
184 raw_spin_unlock_irqrestore(&desc->lock, flags);
185 return 0;
186}
187EXPORT_SYMBOL(__register_nmi_handler);
188
189void unregister_nmi_handler(unsigned int type, const char *name)
190{
191 struct nmi_desc *desc = nmi_to_desc(type);
192 struct nmiaction *n;
193 unsigned long flags;
194
195 raw_spin_lock_irqsave(&desc->lock, flags);
196
197 list_for_each_entry_rcu(n, &desc->head, list) {
198 /*
199 * the name passed in to describe the nmi handler
200 * is used as the lookup key
201 */
202 if (!strcmp(n->name, name)) {
203 WARN(in_nmi(),
204 "Trying to free NMI (%s) from NMI context!\n", n->name);
205 list_del_rcu(&n->list);
206 break;
207 }
208 }
209
210 raw_spin_unlock_irqrestore(&desc->lock, flags);
211 synchronize_rcu();
212}
213EXPORT_SYMBOL_GPL(unregister_nmi_handler);
214
215static void
216pci_serr_error(unsigned char reason, struct pt_regs *regs)
217{
218 /* check to see if anyone registered against these types of errors */
219 if (nmi_handle(NMI_SERR, regs))
220 return;
221
222 pr_emerg("NMI: PCI system error (SERR) for reason %02x on CPU %d.\n",
223 reason, smp_processor_id());
224
225 if (panic_on_unrecovered_nmi)
226 nmi_panic(regs, "NMI: Not continuing");
227
228 pr_emerg("Dazed and confused, but trying to continue\n");
229
230 /* Clear and disable the PCI SERR error line. */
231 reason = (reason & NMI_REASON_CLEAR_MASK) | NMI_REASON_CLEAR_SERR;
232 outb(reason, NMI_REASON_PORT);
233}
234NOKPROBE_SYMBOL(pci_serr_error);
235
236static void
237io_check_error(unsigned char reason, struct pt_regs *regs)
238{
239 unsigned long i;
240
241 /* check to see if anyone registered against these types of errors */
242 if (nmi_handle(NMI_IO_CHECK, regs))
243 return;
244
245 pr_emerg(
246 "NMI: IOCK error (debug interrupt?) for reason %02x on CPU %d.\n",
247 reason, smp_processor_id());
248 show_regs(regs);
249
250 if (panic_on_io_nmi) {
251 nmi_panic(regs, "NMI IOCK error: Not continuing");
252
253 /*
254 * If we end up here, it means we have received an NMI while
255 * processing panic(). Simply return without delaying and
256 * re-enabling NMIs.
257 */
258 return;
259 }
260
261 /* Re-enable the IOCK line, wait for a few seconds */
262 reason = (reason & NMI_REASON_CLEAR_MASK) | NMI_REASON_CLEAR_IOCHK;
263 outb(reason, NMI_REASON_PORT);
264
265 i = 20000;
266 while (--i) {
267 touch_nmi_watchdog();
268 udelay(100);
269 }
270
271 reason &= ~NMI_REASON_CLEAR_IOCHK;
272 outb(reason, NMI_REASON_PORT);
273}
274NOKPROBE_SYMBOL(io_check_error);
275
276static void
277unknown_nmi_error(unsigned char reason, struct pt_regs *regs)
278{
279 int handled;
280
281 /*
282 * Use 'false' as back-to-back NMIs are dealt with one level up.
283 * Of course this makes having multiple 'unknown' handlers useless
284 * as only the first one is ever run (unless it can actually determine
285 * if it caused the NMI)
286 */
287 handled = nmi_handle(NMI_UNKNOWN, regs);
288 if (handled) {
289 __this_cpu_add(nmi_stats.unknown, handled);
290 return;
291 }
292
293 __this_cpu_add(nmi_stats.unknown, 1);
294
295 pr_emerg("Uhhuh. NMI received for unknown reason %02x on CPU %d.\n",
296 reason, smp_processor_id());
297
298 pr_emerg("Do you have a strange power saving mode enabled?\n");
299 if (unknown_nmi_panic || panic_on_unrecovered_nmi)
300 nmi_panic(regs, "NMI: Not continuing");
301
302 pr_emerg("Dazed and confused, but trying to continue\n");
303}
304NOKPROBE_SYMBOL(unknown_nmi_error);
305
306static DEFINE_PER_CPU(bool, swallow_nmi);
307static DEFINE_PER_CPU(unsigned long, last_nmi_rip);
308
309static void default_do_nmi(struct pt_regs *regs)
310{
311 unsigned char reason = 0;
312 int handled;
313 bool b2b = false;
314
315 /*
316 * CPU-specific NMI must be processed before non-CPU-specific
317 * NMI, otherwise we may lose it, because the CPU-specific
318 * NMI can not be detected/processed on other CPUs.
319 */
320
321 /*
322 * Back-to-back NMIs are interesting because they can either
323 * be two NMI or more than two NMIs (any thing over two is dropped
324 * due to NMI being edge-triggered). If this is the second half
325 * of the back-to-back NMI, assume we dropped things and process
326 * more handlers. Otherwise reset the 'swallow' NMI behaviour
327 */
328 if (regs->ip == __this_cpu_read(last_nmi_rip))
329 b2b = true;
330 else
331 __this_cpu_write(swallow_nmi, false);
332
333 __this_cpu_write(last_nmi_rip, regs->ip);
334
335 handled = nmi_handle(NMI_LOCAL, regs);
336 __this_cpu_add(nmi_stats.normal, handled);
337 if (handled) {
338 /*
339 * There are cases when a NMI handler handles multiple
340 * events in the current NMI. One of these events may
341 * be queued for in the next NMI. Because the event is
342 * already handled, the next NMI will result in an unknown
343 * NMI. Instead lets flag this for a potential NMI to
344 * swallow.
345 */
346 if (handled > 1)
347 __this_cpu_write(swallow_nmi, true);
348 return;
349 }
350
351 /*
352 * Non-CPU-specific NMI: NMI sources can be processed on any CPU.
353 *
354 * Another CPU may be processing panic routines while holding
355 * nmi_reason_lock. Check if the CPU issued the IPI for crash dumping,
356 * and if so, call its callback directly. If there is no CPU preparing
357 * crash dump, we simply loop here.
358 */
359 while (!raw_spin_trylock(&nmi_reason_lock)) {
360 run_crash_ipi_callback(regs);
361 cpu_relax();
362 }
363
364 reason = x86_platform.get_nmi_reason();
365
366 if (reason & NMI_REASON_MASK) {
367 if (reason & NMI_REASON_SERR)
368 pci_serr_error(reason, regs);
369 else if (reason & NMI_REASON_IOCHK)
370 io_check_error(reason, regs);
371#ifdef CONFIG_X86_32
372 /*
373 * Reassert NMI in case it became active
374 * meanwhile as it's edge-triggered:
375 */
376 reassert_nmi();
377#endif
378 __this_cpu_add(nmi_stats.external, 1);
379 raw_spin_unlock(&nmi_reason_lock);
380 return;
381 }
382 raw_spin_unlock(&nmi_reason_lock);
383
384 /*
385 * Only one NMI can be latched at a time. To handle
386 * this we may process multiple nmi handlers at once to
387 * cover the case where an NMI is dropped. The downside
388 * to this approach is we may process an NMI prematurely,
389 * while its real NMI is sitting latched. This will cause
390 * an unknown NMI on the next run of the NMI processing.
391 *
392 * We tried to flag that condition above, by setting the
393 * swallow_nmi flag when we process more than one event.
394 * This condition is also only present on the second half
395 * of a back-to-back NMI, so we flag that condition too.
396 *
397 * If both are true, we assume we already processed this
398 * NMI previously and we swallow it. Otherwise we reset
399 * the logic.
400 *
401 * There are scenarios where we may accidentally swallow
402 * a 'real' unknown NMI. For example, while processing
403 * a perf NMI another perf NMI comes in along with a
404 * 'real' unknown NMI. These two NMIs get combined into
405 * one (as descibed above). When the next NMI gets
406 * processed, it will be flagged by perf as handled, but
407 * noone will know that there was a 'real' unknown NMI sent
408 * also. As a result it gets swallowed. Or if the first
409 * perf NMI returns two events handled then the second
410 * NMI will get eaten by the logic below, again losing a
411 * 'real' unknown NMI. But this is the best we can do
412 * for now.
413 */
414 if (b2b && __this_cpu_read(swallow_nmi))
415 __this_cpu_add(nmi_stats.swallow, 1);
416 else
417 unknown_nmi_error(reason, regs);
418}
419NOKPROBE_SYMBOL(default_do_nmi);
420
421/*
422 * NMIs can page fault or hit breakpoints which will cause it to lose
423 * its NMI context with the CPU when the breakpoint or page fault does an IRET.
424 *
425 * As a result, NMIs can nest if NMIs get unmasked due an IRET during
426 * NMI processing. On x86_64, the asm glue protects us from nested NMIs
427 * if the outer NMI came from kernel mode, but we can still nest if the
428 * outer NMI came from user mode.
429 *
430 * To handle these nested NMIs, we have three states:
431 *
432 * 1) not running
433 * 2) executing
434 * 3) latched
435 *
436 * When no NMI is in progress, it is in the "not running" state.
437 * When an NMI comes in, it goes into the "executing" state.
438 * Normally, if another NMI is triggered, it does not interrupt
439 * the running NMI and the HW will simply latch it so that when
440 * the first NMI finishes, it will restart the second NMI.
441 * (Note, the latch is binary, thus multiple NMIs triggering,
442 * when one is running, are ignored. Only one NMI is restarted.)
443 *
444 * If an NMI executes an iret, another NMI can preempt it. We do not
445 * want to allow this new NMI to run, but we want to execute it when the
446 * first one finishes. We set the state to "latched", and the exit of
447 * the first NMI will perform a dec_return, if the result is zero
448 * (NOT_RUNNING), then it will simply exit the NMI handler. If not, the
449 * dec_return would have set the state to NMI_EXECUTING (what we want it
450 * to be when we are running). In this case, we simply jump back to
451 * rerun the NMI handler again, and restart the 'latched' NMI.
452 *
453 * No trap (breakpoint or page fault) should be hit before nmi_restart,
454 * thus there is no race between the first check of state for NOT_RUNNING
455 * and setting it to NMI_EXECUTING. The HW will prevent nested NMIs
456 * at this point.
457 *
458 * In case the NMI takes a page fault, we need to save off the CR2
459 * because the NMI could have preempted another page fault and corrupt
460 * the CR2 that is about to be read. As nested NMIs must be restarted
461 * and they can not take breakpoints or page faults, the update of the
462 * CR2 must be done before converting the nmi state back to NOT_RUNNING.
463 * Otherwise, there would be a race of another nested NMI coming in
464 * after setting state to NOT_RUNNING but before updating the nmi_cr2.
465 */
466enum nmi_states {
467 NMI_NOT_RUNNING = 0,
468 NMI_EXECUTING,
469 NMI_LATCHED,
470};
471static DEFINE_PER_CPU(enum nmi_states, nmi_state);
472static DEFINE_PER_CPU(unsigned long, nmi_cr2);
473
474#ifdef CONFIG_X86_64
475/*
476 * In x86_64, we need to handle breakpoint -> NMI -> breakpoint. Without
477 * some care, the inner breakpoint will clobber the outer breakpoint's
478 * stack.
479 *
480 * If a breakpoint is being processed, and the debug stack is being
481 * used, if an NMI comes in and also hits a breakpoint, the stack
482 * pointer will be set to the same fixed address as the breakpoint that
483 * was interrupted, causing that stack to be corrupted. To handle this
484 * case, check if the stack that was interrupted is the debug stack, and
485 * if so, change the IDT so that new breakpoints will use the current
486 * stack and not switch to the fixed address. On return of the NMI,
487 * switch back to the original IDT.
488 */
489static DEFINE_PER_CPU(int, update_debug_stack);
490#endif
491
492dotraplinkage notrace void
493do_nmi(struct pt_regs *regs, long error_code)
494{
495 if (this_cpu_read(nmi_state) != NMI_NOT_RUNNING) {
496 this_cpu_write(nmi_state, NMI_LATCHED);
497 return;
498 }
499 this_cpu_write(nmi_state, NMI_EXECUTING);
500 this_cpu_write(nmi_cr2, read_cr2());
501nmi_restart:
502
503#ifdef CONFIG_X86_64
504 /*
505 * If we interrupted a breakpoint, it is possible that
506 * the nmi handler will have breakpoints too. We need to
507 * change the IDT such that breakpoints that happen here
508 * continue to use the NMI stack.
509 */
510 if (unlikely(is_debug_stack(regs->sp))) {
511 debug_stack_set_zero();
512 this_cpu_write(update_debug_stack, 1);
513 }
514#endif
515
516 nmi_enter();
517
518 inc_irq_stat(__nmi_count);
519
520 if (!ignore_nmis)
521 default_do_nmi(regs);
522
523 nmi_exit();
524
525#ifdef CONFIG_X86_64
526 if (unlikely(this_cpu_read(update_debug_stack))) {
527 debug_stack_reset();
528 this_cpu_write(update_debug_stack, 0);
529 }
530#endif
531
532 if (unlikely(this_cpu_read(nmi_cr2) != read_cr2()))
533 write_cr2(this_cpu_read(nmi_cr2));
534 if (this_cpu_dec_return(nmi_state))
535 goto nmi_restart;
536}
537NOKPROBE_SYMBOL(do_nmi);
538
539void stop_nmi(void)
540{
541 ignore_nmis++;
542}
543
544void restart_nmi(void)
545{
546 ignore_nmis--;
547}
548
549/* reset the back-to-back NMI logic */
550void local_touch_nmi(void)
551{
552 __this_cpu_write(last_nmi_rip, 0);
553}
554EXPORT_SYMBOL_GPL(local_touch_nmi);
1/*
2 * Copyright (C) 1991, 1992 Linus Torvalds
3 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
4 * Copyright (C) 2011 Don Zickus Red Hat, Inc.
5 *
6 * Pentium III FXSR, SSE support
7 * Gareth Hughes <gareth@valinux.com>, May 2000
8 */
9
10/*
11 * Handle hardware traps and faults.
12 */
13#include <linux/spinlock.h>
14#include <linux/kprobes.h>
15#include <linux/kdebug.h>
16#include <linux/nmi.h>
17#include <linux/debugfs.h>
18#include <linux/delay.h>
19#include <linux/hardirq.h>
20#include <linux/slab.h>
21#include <linux/export.h>
22
23#if defined(CONFIG_EDAC)
24#include <linux/edac.h>
25#endif
26
27#include <linux/atomic.h>
28#include <asm/traps.h>
29#include <asm/mach_traps.h>
30#include <asm/nmi.h>
31#include <asm/x86_init.h>
32#include <asm/reboot.h>
33#include <asm/cache.h>
34
35#define CREATE_TRACE_POINTS
36#include <trace/events/nmi.h>
37
38struct nmi_desc {
39 spinlock_t lock;
40 struct list_head head;
41};
42
43static struct nmi_desc nmi_desc[NMI_MAX] =
44{
45 {
46 .lock = __SPIN_LOCK_UNLOCKED(&nmi_desc[0].lock),
47 .head = LIST_HEAD_INIT(nmi_desc[0].head),
48 },
49 {
50 .lock = __SPIN_LOCK_UNLOCKED(&nmi_desc[1].lock),
51 .head = LIST_HEAD_INIT(nmi_desc[1].head),
52 },
53 {
54 .lock = __SPIN_LOCK_UNLOCKED(&nmi_desc[2].lock),
55 .head = LIST_HEAD_INIT(nmi_desc[2].head),
56 },
57 {
58 .lock = __SPIN_LOCK_UNLOCKED(&nmi_desc[3].lock),
59 .head = LIST_HEAD_INIT(nmi_desc[3].head),
60 },
61
62};
63
64struct nmi_stats {
65 unsigned int normal;
66 unsigned int unknown;
67 unsigned int external;
68 unsigned int swallow;
69};
70
71static DEFINE_PER_CPU(struct nmi_stats, nmi_stats);
72
73static int ignore_nmis __read_mostly;
74
75int unknown_nmi_panic;
76/*
77 * Prevent NMI reason port (0x61) being accessed simultaneously, can
78 * only be used in NMI handler.
79 */
80static DEFINE_RAW_SPINLOCK(nmi_reason_lock);
81
82static int __init setup_unknown_nmi_panic(char *str)
83{
84 unknown_nmi_panic = 1;
85 return 1;
86}
87__setup("unknown_nmi_panic", setup_unknown_nmi_panic);
88
89#define nmi_to_desc(type) (&nmi_desc[type])
90
91static u64 nmi_longest_ns = 1 * NSEC_PER_MSEC;
92
93static int __init nmi_warning_debugfs(void)
94{
95 debugfs_create_u64("nmi_longest_ns", 0644,
96 arch_debugfs_dir, &nmi_longest_ns);
97 return 0;
98}
99fs_initcall(nmi_warning_debugfs);
100
101static void nmi_max_handler(struct irq_work *w)
102{
103 struct nmiaction *a = container_of(w, struct nmiaction, irq_work);
104 int remainder_ns, decimal_msecs;
105 u64 whole_msecs = ACCESS_ONCE(a->max_duration);
106
107 remainder_ns = do_div(whole_msecs, (1000 * 1000));
108 decimal_msecs = remainder_ns / 1000;
109
110 printk_ratelimited(KERN_INFO
111 "INFO: NMI handler (%ps) took too long to run: %lld.%03d msecs\n",
112 a->handler, whole_msecs, decimal_msecs);
113}
114
115static int nmi_handle(unsigned int type, struct pt_regs *regs)
116{
117 struct nmi_desc *desc = nmi_to_desc(type);
118 struct nmiaction *a;
119 int handled=0;
120
121 rcu_read_lock();
122
123 /*
124 * NMIs are edge-triggered, which means if you have enough
125 * of them concurrently, you can lose some because only one
126 * can be latched at any given time. Walk the whole list
127 * to handle those situations.
128 */
129 list_for_each_entry_rcu(a, &desc->head, list) {
130 int thishandled;
131 u64 delta;
132
133 delta = sched_clock();
134 thishandled = a->handler(type, regs);
135 handled += thishandled;
136 delta = sched_clock() - delta;
137 trace_nmi_handler(a->handler, (int)delta, thishandled);
138
139 if (delta < nmi_longest_ns || delta < a->max_duration)
140 continue;
141
142 a->max_duration = delta;
143 irq_work_queue(&a->irq_work);
144 }
145
146 rcu_read_unlock();
147
148 /* return total number of NMI events handled */
149 return handled;
150}
151NOKPROBE_SYMBOL(nmi_handle);
152
153int __register_nmi_handler(unsigned int type, struct nmiaction *action)
154{
155 struct nmi_desc *desc = nmi_to_desc(type);
156 unsigned long flags;
157
158 if (!action->handler)
159 return -EINVAL;
160
161 init_irq_work(&action->irq_work, nmi_max_handler);
162
163 spin_lock_irqsave(&desc->lock, flags);
164
165 /*
166 * most handlers of type NMI_UNKNOWN never return because
167 * they just assume the NMI is theirs. Just a sanity check
168 * to manage expectations
169 */
170 WARN_ON_ONCE(type == NMI_UNKNOWN && !list_empty(&desc->head));
171 WARN_ON_ONCE(type == NMI_SERR && !list_empty(&desc->head));
172 WARN_ON_ONCE(type == NMI_IO_CHECK && !list_empty(&desc->head));
173
174 /*
175 * some handlers need to be executed first otherwise a fake
176 * event confuses some handlers (kdump uses this flag)
177 */
178 if (action->flags & NMI_FLAG_FIRST)
179 list_add_rcu(&action->list, &desc->head);
180 else
181 list_add_tail_rcu(&action->list, &desc->head);
182
183 spin_unlock_irqrestore(&desc->lock, flags);
184 return 0;
185}
186EXPORT_SYMBOL(__register_nmi_handler);
187
188void unregister_nmi_handler(unsigned int type, const char *name)
189{
190 struct nmi_desc *desc = nmi_to_desc(type);
191 struct nmiaction *n;
192 unsigned long flags;
193
194 spin_lock_irqsave(&desc->lock, flags);
195
196 list_for_each_entry_rcu(n, &desc->head, list) {
197 /*
198 * the name passed in to describe the nmi handler
199 * is used as the lookup key
200 */
201 if (!strcmp(n->name, name)) {
202 WARN(in_nmi(),
203 "Trying to free NMI (%s) from NMI context!\n", n->name);
204 list_del_rcu(&n->list);
205 break;
206 }
207 }
208
209 spin_unlock_irqrestore(&desc->lock, flags);
210 synchronize_rcu();
211}
212EXPORT_SYMBOL_GPL(unregister_nmi_handler);
213
214static void
215pci_serr_error(unsigned char reason, struct pt_regs *regs)
216{
217 /* check to see if anyone registered against these types of errors */
218 if (nmi_handle(NMI_SERR, regs))
219 return;
220
221 pr_emerg("NMI: PCI system error (SERR) for reason %02x on CPU %d.\n",
222 reason, smp_processor_id());
223
224 /*
225 * On some machines, PCI SERR line is used to report memory
226 * errors. EDAC makes use of it.
227 */
228#if defined(CONFIG_EDAC)
229 if (edac_handler_set()) {
230 edac_atomic_assert_error();
231 return;
232 }
233#endif
234
235 if (panic_on_unrecovered_nmi)
236 nmi_panic(regs, "NMI: Not continuing");
237
238 pr_emerg("Dazed and confused, but trying to continue\n");
239
240 /* Clear and disable the PCI SERR error line. */
241 reason = (reason & NMI_REASON_CLEAR_MASK) | NMI_REASON_CLEAR_SERR;
242 outb(reason, NMI_REASON_PORT);
243}
244NOKPROBE_SYMBOL(pci_serr_error);
245
246static void
247io_check_error(unsigned char reason, struct pt_regs *regs)
248{
249 unsigned long i;
250
251 /* check to see if anyone registered against these types of errors */
252 if (nmi_handle(NMI_IO_CHECK, regs))
253 return;
254
255 pr_emerg(
256 "NMI: IOCK error (debug interrupt?) for reason %02x on CPU %d.\n",
257 reason, smp_processor_id());
258 show_regs(regs);
259
260 if (panic_on_io_nmi) {
261 nmi_panic(regs, "NMI IOCK error: Not continuing");
262
263 /*
264 * If we end up here, it means we have received an NMI while
265 * processing panic(). Simply return without delaying and
266 * re-enabling NMIs.
267 */
268 return;
269 }
270
271 /* Re-enable the IOCK line, wait for a few seconds */
272 reason = (reason & NMI_REASON_CLEAR_MASK) | NMI_REASON_CLEAR_IOCHK;
273 outb(reason, NMI_REASON_PORT);
274
275 i = 20000;
276 while (--i) {
277 touch_nmi_watchdog();
278 udelay(100);
279 }
280
281 reason &= ~NMI_REASON_CLEAR_IOCHK;
282 outb(reason, NMI_REASON_PORT);
283}
284NOKPROBE_SYMBOL(io_check_error);
285
286static void
287unknown_nmi_error(unsigned char reason, struct pt_regs *regs)
288{
289 int handled;
290
291 /*
292 * Use 'false' as back-to-back NMIs are dealt with one level up.
293 * Of course this makes having multiple 'unknown' handlers useless
294 * as only the first one is ever run (unless it can actually determine
295 * if it caused the NMI)
296 */
297 handled = nmi_handle(NMI_UNKNOWN, regs);
298 if (handled) {
299 __this_cpu_add(nmi_stats.unknown, handled);
300 return;
301 }
302
303 __this_cpu_add(nmi_stats.unknown, 1);
304
305 pr_emerg("Uhhuh. NMI received for unknown reason %02x on CPU %d.\n",
306 reason, smp_processor_id());
307
308 pr_emerg("Do you have a strange power saving mode enabled?\n");
309 if (unknown_nmi_panic || panic_on_unrecovered_nmi)
310 nmi_panic(regs, "NMI: Not continuing");
311
312 pr_emerg("Dazed and confused, but trying to continue\n");
313}
314NOKPROBE_SYMBOL(unknown_nmi_error);
315
316static DEFINE_PER_CPU(bool, swallow_nmi);
317static DEFINE_PER_CPU(unsigned long, last_nmi_rip);
318
319static void default_do_nmi(struct pt_regs *regs)
320{
321 unsigned char reason = 0;
322 int handled;
323 bool b2b = false;
324
325 /*
326 * CPU-specific NMI must be processed before non-CPU-specific
327 * NMI, otherwise we may lose it, because the CPU-specific
328 * NMI can not be detected/processed on other CPUs.
329 */
330
331 /*
332 * Back-to-back NMIs are interesting because they can either
333 * be two NMI or more than two NMIs (any thing over two is dropped
334 * due to NMI being edge-triggered). If this is the second half
335 * of the back-to-back NMI, assume we dropped things and process
336 * more handlers. Otherwise reset the 'swallow' NMI behaviour
337 */
338 if (regs->ip == __this_cpu_read(last_nmi_rip))
339 b2b = true;
340 else
341 __this_cpu_write(swallow_nmi, false);
342
343 __this_cpu_write(last_nmi_rip, regs->ip);
344
345 handled = nmi_handle(NMI_LOCAL, regs);
346 __this_cpu_add(nmi_stats.normal, handled);
347 if (handled) {
348 /*
349 * There are cases when a NMI handler handles multiple
350 * events in the current NMI. One of these events may
351 * be queued for in the next NMI. Because the event is
352 * already handled, the next NMI will result in an unknown
353 * NMI. Instead lets flag this for a potential NMI to
354 * swallow.
355 */
356 if (handled > 1)
357 __this_cpu_write(swallow_nmi, true);
358 return;
359 }
360
361 /*
362 * Non-CPU-specific NMI: NMI sources can be processed on any CPU.
363 *
364 * Another CPU may be processing panic routines while holding
365 * nmi_reason_lock. Check if the CPU issued the IPI for crash dumping,
366 * and if so, call its callback directly. If there is no CPU preparing
367 * crash dump, we simply loop here.
368 */
369 while (!raw_spin_trylock(&nmi_reason_lock)) {
370 run_crash_ipi_callback(regs);
371 cpu_relax();
372 }
373
374 reason = x86_platform.get_nmi_reason();
375
376 if (reason & NMI_REASON_MASK) {
377 if (reason & NMI_REASON_SERR)
378 pci_serr_error(reason, regs);
379 else if (reason & NMI_REASON_IOCHK)
380 io_check_error(reason, regs);
381#ifdef CONFIG_X86_32
382 /*
383 * Reassert NMI in case it became active
384 * meanwhile as it's edge-triggered:
385 */
386 reassert_nmi();
387#endif
388 __this_cpu_add(nmi_stats.external, 1);
389 raw_spin_unlock(&nmi_reason_lock);
390 return;
391 }
392 raw_spin_unlock(&nmi_reason_lock);
393
394 /*
395 * Only one NMI can be latched at a time. To handle
396 * this we may process multiple nmi handlers at once to
397 * cover the case where an NMI is dropped. The downside
398 * to this approach is we may process an NMI prematurely,
399 * while its real NMI is sitting latched. This will cause
400 * an unknown NMI on the next run of the NMI processing.
401 *
402 * We tried to flag that condition above, by setting the
403 * swallow_nmi flag when we process more than one event.
404 * This condition is also only present on the second half
405 * of a back-to-back NMI, so we flag that condition too.
406 *
407 * If both are true, we assume we already processed this
408 * NMI previously and we swallow it. Otherwise we reset
409 * the logic.
410 *
411 * There are scenarios where we may accidentally swallow
412 * a 'real' unknown NMI. For example, while processing
413 * a perf NMI another perf NMI comes in along with a
414 * 'real' unknown NMI. These two NMIs get combined into
415 * one (as descibed above). When the next NMI gets
416 * processed, it will be flagged by perf as handled, but
417 * noone will know that there was a 'real' unknown NMI sent
418 * also. As a result it gets swallowed. Or if the first
419 * perf NMI returns two events handled then the second
420 * NMI will get eaten by the logic below, again losing a
421 * 'real' unknown NMI. But this is the best we can do
422 * for now.
423 */
424 if (b2b && __this_cpu_read(swallow_nmi))
425 __this_cpu_add(nmi_stats.swallow, 1);
426 else
427 unknown_nmi_error(reason, regs);
428}
429NOKPROBE_SYMBOL(default_do_nmi);
430
431/*
432 * NMIs can page fault or hit breakpoints which will cause it to lose
433 * its NMI context with the CPU when the breakpoint or page fault does an IRET.
434 *
435 * As a result, NMIs can nest if NMIs get unmasked due an IRET during
436 * NMI processing. On x86_64, the asm glue protects us from nested NMIs
437 * if the outer NMI came from kernel mode, but we can still nest if the
438 * outer NMI came from user mode.
439 *
440 * To handle these nested NMIs, we have three states:
441 *
442 * 1) not running
443 * 2) executing
444 * 3) latched
445 *
446 * When no NMI is in progress, it is in the "not running" state.
447 * When an NMI comes in, it goes into the "executing" state.
448 * Normally, if another NMI is triggered, it does not interrupt
449 * the running NMI and the HW will simply latch it so that when
450 * the first NMI finishes, it will restart the second NMI.
451 * (Note, the latch is binary, thus multiple NMIs triggering,
452 * when one is running, are ignored. Only one NMI is restarted.)
453 *
454 * If an NMI executes an iret, another NMI can preempt it. We do not
455 * want to allow this new NMI to run, but we want to execute it when the
456 * first one finishes. We set the state to "latched", and the exit of
457 * the first NMI will perform a dec_return, if the result is zero
458 * (NOT_RUNNING), then it will simply exit the NMI handler. If not, the
459 * dec_return would have set the state to NMI_EXECUTING (what we want it
460 * to be when we are running). In this case, we simply jump back to
461 * rerun the NMI handler again, and restart the 'latched' NMI.
462 *
463 * No trap (breakpoint or page fault) should be hit before nmi_restart,
464 * thus there is no race between the first check of state for NOT_RUNNING
465 * and setting it to NMI_EXECUTING. The HW will prevent nested NMIs
466 * at this point.
467 *
468 * In case the NMI takes a page fault, we need to save off the CR2
469 * because the NMI could have preempted another page fault and corrupt
470 * the CR2 that is about to be read. As nested NMIs must be restarted
471 * and they can not take breakpoints or page faults, the update of the
472 * CR2 must be done before converting the nmi state back to NOT_RUNNING.
473 * Otherwise, there would be a race of another nested NMI coming in
474 * after setting state to NOT_RUNNING but before updating the nmi_cr2.
475 */
476enum nmi_states {
477 NMI_NOT_RUNNING = 0,
478 NMI_EXECUTING,
479 NMI_LATCHED,
480};
481static DEFINE_PER_CPU(enum nmi_states, nmi_state);
482static DEFINE_PER_CPU(unsigned long, nmi_cr2);
483
484#ifdef CONFIG_X86_64
485/*
486 * In x86_64, we need to handle breakpoint -> NMI -> breakpoint. Without
487 * some care, the inner breakpoint will clobber the outer breakpoint's
488 * stack.
489 *
490 * If a breakpoint is being processed, and the debug stack is being
491 * used, if an NMI comes in and also hits a breakpoint, the stack
492 * pointer will be set to the same fixed address as the breakpoint that
493 * was interrupted, causing that stack to be corrupted. To handle this
494 * case, check if the stack that was interrupted is the debug stack, and
495 * if so, change the IDT so that new breakpoints will use the current
496 * stack and not switch to the fixed address. On return of the NMI,
497 * switch back to the original IDT.
498 */
499static DEFINE_PER_CPU(int, update_debug_stack);
500#endif
501
502dotraplinkage notrace void
503do_nmi(struct pt_regs *regs, long error_code)
504{
505 if (this_cpu_read(nmi_state) != NMI_NOT_RUNNING) {
506 this_cpu_write(nmi_state, NMI_LATCHED);
507 return;
508 }
509 this_cpu_write(nmi_state, NMI_EXECUTING);
510 this_cpu_write(nmi_cr2, read_cr2());
511nmi_restart:
512
513#ifdef CONFIG_X86_64
514 /*
515 * If we interrupted a breakpoint, it is possible that
516 * the nmi handler will have breakpoints too. We need to
517 * change the IDT such that breakpoints that happen here
518 * continue to use the NMI stack.
519 */
520 if (unlikely(is_debug_stack(regs->sp))) {
521 debug_stack_set_zero();
522 this_cpu_write(update_debug_stack, 1);
523 }
524#endif
525
526 nmi_enter();
527
528 inc_irq_stat(__nmi_count);
529
530 if (!ignore_nmis)
531 default_do_nmi(regs);
532
533 nmi_exit();
534
535#ifdef CONFIG_X86_64
536 if (unlikely(this_cpu_read(update_debug_stack))) {
537 debug_stack_reset();
538 this_cpu_write(update_debug_stack, 0);
539 }
540#endif
541
542 if (unlikely(this_cpu_read(nmi_cr2) != read_cr2()))
543 write_cr2(this_cpu_read(nmi_cr2));
544 if (this_cpu_dec_return(nmi_state))
545 goto nmi_restart;
546}
547NOKPROBE_SYMBOL(do_nmi);
548
549void stop_nmi(void)
550{
551 ignore_nmis++;
552}
553
554void restart_nmi(void)
555{
556 ignore_nmis--;
557}
558
559/* reset the back-to-back NMI logic */
560void local_touch_nmi(void)
561{
562 __this_cpu_write(last_nmi_rip, 0);
563}
564EXPORT_SYMBOL_GPL(local_touch_nmi);