Linux Audio

Check our new training course

Loading...
v3.5.6
 
  1/*
  2 *  Copyright (C) 1991, 1992  Linus Torvalds
  3 *  Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
  4 *  Copyright (C) 2011	Don Zickus Red Hat, Inc.
  5 *
  6 *  Pentium III FXSR, SSE support
  7 *	Gareth Hughes <gareth@valinux.com>, May 2000
  8 */
  9
 10/*
 11 * Handle hardware traps and faults.
 12 */
 13#include <linux/spinlock.h>
 14#include <linux/kprobes.h>
 15#include <linux/kdebug.h>
 
 16#include <linux/nmi.h>
 
 17#include <linux/delay.h>
 18#include <linux/hardirq.h>
 
 19#include <linux/slab.h>
 20#include <linux/export.h>
 21
 22#if defined(CONFIG_EDAC)
 23#include <linux/edac.h>
 24#endif
 25
 26#include <linux/atomic.h>
 
 
 
 27#include <asm/traps.h>
 28#include <asm/mach_traps.h>
 29#include <asm/nmi.h>
 30#include <asm/x86_init.h>
 
 
 
 
 
 
 
 
 
 31
 32struct nmi_desc {
 33	spinlock_t lock;
 34	struct list_head head;
 35};
 36
 37static struct nmi_desc nmi_desc[NMI_MAX] = 
 38{
 39	{
 40		.lock = __SPIN_LOCK_UNLOCKED(&nmi_desc[0].lock),
 41		.head = LIST_HEAD_INIT(nmi_desc[0].head),
 42	},
 43	{
 44		.lock = __SPIN_LOCK_UNLOCKED(&nmi_desc[1].lock),
 45		.head = LIST_HEAD_INIT(nmi_desc[1].head),
 46	},
 47	{
 48		.lock = __SPIN_LOCK_UNLOCKED(&nmi_desc[2].lock),
 49		.head = LIST_HEAD_INIT(nmi_desc[2].head),
 50	},
 51	{
 52		.lock = __SPIN_LOCK_UNLOCKED(&nmi_desc[3].lock),
 53		.head = LIST_HEAD_INIT(nmi_desc[3].head),
 54	},
 55
 56};
 57
 58struct nmi_stats {
 59	unsigned int normal;
 60	unsigned int unknown;
 61	unsigned int external;
 62	unsigned int swallow;
 
 
 
 
 
 
 
 
 
 63};
 64
 65static DEFINE_PER_CPU(struct nmi_stats, nmi_stats);
 66
 67static int ignore_nmis;
 68
 69int unknown_nmi_panic;
 70/*
 71 * Prevent NMI reason port (0x61) being accessed simultaneously, can
 72 * only be used in NMI handler.
 73 */
 74static DEFINE_RAW_SPINLOCK(nmi_reason_lock);
 75
 76static int __init setup_unknown_nmi_panic(char *str)
 77{
 78	unknown_nmi_panic = 1;
 79	return 1;
 80}
 81__setup("unknown_nmi_panic", setup_unknown_nmi_panic);
 82
 83#define nmi_to_desc(type) (&nmi_desc[type])
 84
 85static int __kprobes nmi_handle(unsigned int type, struct pt_regs *regs, bool b2b)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 86{
 87	struct nmi_desc *desc = nmi_to_desc(type);
 88	struct nmiaction *a;
 89	int handled=0;
 90
 91	rcu_read_lock();
 92
 93	/*
 94	 * NMIs are edge-triggered, which means if you have enough
 95	 * of them concurrently, you can lose some because only one
 96	 * can be latched at any given time.  Walk the whole list
 97	 * to handle those situations.
 98	 */
 99	list_for_each_entry_rcu(a, &desc->head, list)
100		handled += a->handler(type, regs);
 
 
 
 
 
 
 
 
 
 
101
102	rcu_read_unlock();
103
104	/* return total number of NMI events handled */
105	return handled;
106}
 
107
108int __register_nmi_handler(unsigned int type, struct nmiaction *action)
109{
110	struct nmi_desc *desc = nmi_to_desc(type);
111	unsigned long flags;
112
113	if (!action->handler)
114		return -EINVAL;
115
116	spin_lock_irqsave(&desc->lock, flags);
117
118	/*
119	 * most handlers of type NMI_UNKNOWN never return because
120	 * they just assume the NMI is theirs.  Just a sanity check
121	 * to manage expectations
122	 */
123	WARN_ON_ONCE(type == NMI_UNKNOWN && !list_empty(&desc->head));
124	WARN_ON_ONCE(type == NMI_SERR && !list_empty(&desc->head));
125	WARN_ON_ONCE(type == NMI_IO_CHECK && !list_empty(&desc->head));
126
127	/*
128	 * some handlers need to be executed first otherwise a fake
129	 * event confuses some handlers (kdump uses this flag)
130	 */
131	if (action->flags & NMI_FLAG_FIRST)
132		list_add_rcu(&action->list, &desc->head);
133	else
134		list_add_tail_rcu(&action->list, &desc->head);
135	
136	spin_unlock_irqrestore(&desc->lock, flags);
137	return 0;
138}
139EXPORT_SYMBOL(__register_nmi_handler);
140
141void unregister_nmi_handler(unsigned int type, const char *name)
142{
143	struct nmi_desc *desc = nmi_to_desc(type);
144	struct nmiaction *n;
145	unsigned long flags;
146
147	spin_lock_irqsave(&desc->lock, flags);
148
149	list_for_each_entry_rcu(n, &desc->head, list) {
150		/*
151		 * the name passed in to describe the nmi handler
152		 * is used as the lookup key
153		 */
154		if (!strcmp(n->name, name)) {
155			WARN(in_nmi(),
156				"Trying to free NMI (%s) from NMI context!\n", n->name);
157			list_del_rcu(&n->list);
 
158			break;
159		}
160	}
161
162	spin_unlock_irqrestore(&desc->lock, flags);
163	synchronize_rcu();
 
 
 
164}
165EXPORT_SYMBOL_GPL(unregister_nmi_handler);
166
167static __kprobes void
168pci_serr_error(unsigned char reason, struct pt_regs *regs)
169{
170	/* check to see if anyone registered against these types of errors */
171	if (nmi_handle(NMI_SERR, regs, false))
172		return;
173
174	pr_emerg("NMI: PCI system error (SERR) for reason %02x on CPU %d.\n",
175		 reason, smp_processor_id());
176
177	/*
178	 * On some machines, PCI SERR line is used to report memory
179	 * errors. EDAC makes use of it.
180	 */
181#if defined(CONFIG_EDAC)
182	if (edac_handler_set()) {
183		edac_atomic_assert_error();
184		return;
185	}
186#endif
187
188	if (panic_on_unrecovered_nmi)
189		panic("NMI: Not continuing");
190
191	pr_emerg("Dazed and confused, but trying to continue\n");
192
193	/* Clear and disable the PCI SERR error line. */
194	reason = (reason & NMI_REASON_CLEAR_MASK) | NMI_REASON_CLEAR_SERR;
195	outb(reason, NMI_REASON_PORT);
196}
 
197
198static __kprobes void
199io_check_error(unsigned char reason, struct pt_regs *regs)
200{
201	unsigned long i;
202
203	/* check to see if anyone registered against these types of errors */
204	if (nmi_handle(NMI_IO_CHECK, regs, false))
205		return;
206
207	pr_emerg(
208	"NMI: IOCK error (debug interrupt?) for reason %02x on CPU %d.\n",
209		 reason, smp_processor_id());
210	show_regs(regs);
211
212	if (panic_on_io_nmi)
213		panic("NMI IOCK error: Not continuing");
 
 
 
 
 
 
 
 
214
215	/* Re-enable the IOCK line, wait for a few seconds */
216	reason = (reason & NMI_REASON_CLEAR_MASK) | NMI_REASON_CLEAR_IOCHK;
217	outb(reason, NMI_REASON_PORT);
218
219	i = 20000;
220	while (--i) {
221		touch_nmi_watchdog();
222		udelay(100);
223	}
224
225	reason &= ~NMI_REASON_CLEAR_IOCHK;
226	outb(reason, NMI_REASON_PORT);
227}
 
228
229static __kprobes void
230unknown_nmi_error(unsigned char reason, struct pt_regs *regs)
231{
232	int handled;
233
234	/*
235	 * Use 'false' as back-to-back NMIs are dealt with one level up.
236	 * Of course this makes having multiple 'unknown' handlers useless
237	 * as only the first one is ever run (unless it can actually determine
238	 * if it caused the NMI)
239	 */
240	handled = nmi_handle(NMI_UNKNOWN, regs, false);
241	if (handled) {
242		__this_cpu_add(nmi_stats.unknown, handled);
243		return;
244	}
245
246	__this_cpu_add(nmi_stats.unknown, 1);
247
248	pr_emerg("Uhhuh. NMI received for unknown reason %02x on CPU %d.\n",
249		 reason, smp_processor_id());
250
251	pr_emerg("Do you have a strange power saving mode enabled?\n");
252	if (unknown_nmi_panic || panic_on_unrecovered_nmi)
253		panic("NMI: Not continuing");
254
255	pr_emerg("Dazed and confused, but trying to continue\n");
256}
 
257
258static DEFINE_PER_CPU(bool, swallow_nmi);
259static DEFINE_PER_CPU(unsigned long, last_nmi_rip);
260
261static __kprobes void default_do_nmi(struct pt_regs *regs)
262{
263	unsigned char reason = 0;
264	int handled;
265	bool b2b = false;
266
267	/*
268	 * CPU-specific NMI must be processed before non-CPU-specific
269	 * NMI, otherwise we may lose it, because the CPU-specific
270	 * NMI can not be detected/processed on other CPUs.
271	 */
272
273	/*
274	 * Back-to-back NMIs are interesting because they can either
275	 * be two NMI or more than two NMIs (any thing over two is dropped
276	 * due to NMI being edge-triggered).  If this is the second half
277	 * of the back-to-back NMI, assume we dropped things and process
278	 * more handlers.  Otherwise reset the 'swallow' NMI behaviour
279	 */
280	if (regs->ip == __this_cpu_read(last_nmi_rip))
281		b2b = true;
282	else
283		__this_cpu_write(swallow_nmi, false);
284
285	__this_cpu_write(last_nmi_rip, regs->ip);
286
287	handled = nmi_handle(NMI_LOCAL, regs, b2b);
 
 
 
 
 
288	__this_cpu_add(nmi_stats.normal, handled);
289	if (handled) {
290		/*
291		 * There are cases when a NMI handler handles multiple
292		 * events in the current NMI.  One of these events may
293		 * be queued for in the next NMI.  Because the event is
294		 * already handled, the next NMI will result in an unknown
295		 * NMI.  Instead lets flag this for a potential NMI to
296		 * swallow.
297		 */
298		if (handled > 1)
299			__this_cpu_write(swallow_nmi, true);
300		return;
 
 
 
 
 
 
 
 
 
 
 
 
 
301	}
302
303	/* Non-CPU-specific NMI: NMI sources can be processed on any CPU */
304	raw_spin_lock(&nmi_reason_lock);
305	reason = x86_platform.get_nmi_reason();
306
307	if (reason & NMI_REASON_MASK) {
308		if (reason & NMI_REASON_SERR)
309			pci_serr_error(reason, regs);
310		else if (reason & NMI_REASON_IOCHK)
311			io_check_error(reason, regs);
312#ifdef CONFIG_X86_32
313		/*
314		 * Reassert NMI in case it became active
315		 * meanwhile as it's edge-triggered:
316		 */
317		reassert_nmi();
318#endif
319		__this_cpu_add(nmi_stats.external, 1);
320		raw_spin_unlock(&nmi_reason_lock);
321		return;
322	}
323	raw_spin_unlock(&nmi_reason_lock);
324
325	/*
326	 * Only one NMI can be latched at a time.  To handle
327	 * this we may process multiple nmi handlers at once to
328	 * cover the case where an NMI is dropped.  The downside
329	 * to this approach is we may process an NMI prematurely,
330	 * while its real NMI is sitting latched.  This will cause
331	 * an unknown NMI on the next run of the NMI processing.
332	 *
333	 * We tried to flag that condition above, by setting the
334	 * swallow_nmi flag when we process more than one event.
335	 * This condition is also only present on the second half
336	 * of a back-to-back NMI, so we flag that condition too.
337	 *
338	 * If both are true, we assume we already processed this
339	 * NMI previously and we swallow it.  Otherwise we reset
340	 * the logic.
341	 *
342	 * There are scenarios where we may accidentally swallow
343	 * a 'real' unknown NMI.  For example, while processing
344	 * a perf NMI another perf NMI comes in along with a
345	 * 'real' unknown NMI.  These two NMIs get combined into
346	 * one (as descibed above).  When the next NMI gets
347	 * processed, it will be flagged by perf as handled, but
348	 * noone will know that there was a 'real' unknown NMI sent
349	 * also.  As a result it gets swallowed.  Or if the first
350	 * perf NMI returns two events handled then the second
351	 * NMI will get eaten by the logic below, again losing a
352	 * 'real' unknown NMI.  But this is the best we can do
353	 * for now.
354	 */
355	if (b2b && __this_cpu_read(swallow_nmi))
356		__this_cpu_add(nmi_stats.swallow, 1);
357	else
358		unknown_nmi_error(reason, regs);
 
 
 
359}
360
361/*
362 * NMIs can hit breakpoints which will cause it to lose its
363 * NMI context with the CPU when the breakpoint does an iret.
364 */
365#ifdef CONFIG_X86_32
366/*
367 * For i386, NMIs use the same stack as the kernel, and we can
368 * add a workaround to the iret problem in C. Simply have 3 states
369 * the NMI can be in.
 
370 *
371 *  1) not running
372 *  2) executing
373 *  3) latched
374 *
375 * When no NMI is in progress, it is in the "not running" state.
376 * When an NMI comes in, it goes into the "executing" state.
377 * Normally, if another NMI is triggered, it does not interrupt
378 * the running NMI and the HW will simply latch it so that when
379 * the first NMI finishes, it will restart the second NMI.
380 * (Note, the latch is binary, thus multiple NMIs triggering,
381 *  when one is running, are ignored. Only one NMI is restarted.)
382 *
383 * If an NMI hits a breakpoint that executes an iret, another
384 * NMI can preempt it. We do not want to allow this new NMI
385 * to run, but we want to execute it when the first one finishes.
386 * We set the state to "latched", and the first NMI will perform
387 * an cmpxchg on the state, and if it doesn't successfully
388 * reset the state to "not running" it will restart the next
389 * NMI.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
390 */
391enum nmi_states {
392	NMI_NOT_RUNNING,
393	NMI_EXECUTING,
394	NMI_LATCHED,
395};
396static DEFINE_PER_CPU(enum nmi_states, nmi_state);
 
 
397
398#define nmi_nesting_preprocess(regs)					\
399	do {								\
400		if (__get_cpu_var(nmi_state) != NMI_NOT_RUNNING) {	\
401			__get_cpu_var(nmi_state) = NMI_LATCHED;		\
402			return;						\
403		}							\
404	nmi_restart:							\
405		__get_cpu_var(nmi_state) = NMI_EXECUTING;		\
406	} while (0)
407
408#define nmi_nesting_postprocess()					\
409	do {								\
410		if (cmpxchg(&__get_cpu_var(nmi_state),			\
411		    NMI_EXECUTING, NMI_NOT_RUNNING) != NMI_EXECUTING)	\
412			goto nmi_restart;				\
413	} while (0)
414#else /* x86_64 */
415/*
416 * In x86_64 things are a bit more difficult. This has the same problem
417 * where an NMI hitting a breakpoint that calls iret will remove the
418 * NMI context, allowing a nested NMI to enter. What makes this more
419 * difficult is that both NMIs and breakpoints have their own stack.
420 * When a new NMI or breakpoint is executed, the stack is set to a fixed
421 * point. If an NMI is nested, it will have its stack set at that same
422 * fixed address that the first NMI had, and will start corrupting the
423 * stack. This is handled in entry_64.S, but the same problem exists with
424 * the breakpoint stack.
425 *
426 * If a breakpoint is being processed, and the debug stack is being used,
427 * if an NMI comes in and also hits a breakpoint, the stack pointer
428 * will be set to the same fixed address as the breakpoint that was
429 * interrupted, causing that stack to be corrupted. To handle this case,
430 * check if the stack that was interrupted is the debug stack, and if
431 * so, change the IDT so that new breakpoints will use the current stack
432 * and not switch to the fixed address. On return of the NMI, switch back
433 * to the original IDT.
434 */
435static DEFINE_PER_CPU(int, update_debug_stack);
436
437static inline void nmi_nesting_preprocess(struct pt_regs *regs)
438{
 
 
 
439	/*
440	 * If we interrupted a breakpoint, it is possible that
441	 * the nmi handler will have breakpoints too. We need to
442	 * change the IDT such that breakpoints that happen here
443	 * continue to use the NMI stack.
444	 */
445	if (unlikely(is_debug_stack(regs->sp))) {
446		debug_stack_set_zero();
447		this_cpu_write(update_debug_stack, 1);
 
 
 
 
 
 
 
 
 
 
448	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
449}
450
451static inline void nmi_nesting_postprocess(void)
 
452{
453	if (unlikely(this_cpu_read(update_debug_stack))) {
454		debug_stack_reset();
455		this_cpu_write(update_debug_stack, 0);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
456	}
457}
 
458#endif
459
460dotraplinkage notrace __kprobes void
461do_nmi(struct pt_regs *regs, long error_code)
 
 
 
 
 
 
 
 
 
 
462{
463	nmi_nesting_preprocess(regs);
464
465	nmi_enter();
 
 
 
 
466
467	inc_irq_stat(__nmi_count);
 
 
 
 
 
 
468
469	if (!ignore_nmis)
470		default_do_nmi(regs);
471
472	nmi_exit();
 
 
 
473
474	/* On i386, may loop back to preprocess */
475	nmi_nesting_postprocess();
476}
 
477
478void stop_nmi(void)
479{
480	ignore_nmis++;
481}
482
483void restart_nmi(void)
484{
485	ignore_nmis--;
486}
487
488/* reset the back-to-back NMI logic */
489void local_touch_nmi(void)
490{
491	__this_cpu_write(last_nmi_rip, 0);
492}
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 *  Copyright (C) 1991, 1992  Linus Torvalds
  4 *  Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
  5 *  Copyright (C) 2011	Don Zickus Red Hat, Inc.
  6 *
  7 *  Pentium III FXSR, SSE support
  8 *	Gareth Hughes <gareth@valinux.com>, May 2000
  9 */
 10
 11/*
 12 * Handle hardware traps and faults.
 13 */
 14#include <linux/spinlock.h>
 15#include <linux/kprobes.h>
 16#include <linux/kdebug.h>
 17#include <linux/sched/debug.h>
 18#include <linux/nmi.h>
 19#include <linux/debugfs.h>
 20#include <linux/delay.h>
 21#include <linux/hardirq.h>
 22#include <linux/ratelimit.h>
 23#include <linux/slab.h>
 24#include <linux/export.h>
 
 
 
 
 
 25#include <linux/atomic.h>
 26#include <linux/sched/clock.h>
 27
 28#include <asm/cpu_entry_area.h>
 29#include <asm/traps.h>
 30#include <asm/mach_traps.h>
 31#include <asm/nmi.h>
 32#include <asm/x86_init.h>
 33#include <asm/reboot.h>
 34#include <asm/cache.h>
 35#include <asm/nospec-branch.h>
 36#include <asm/microcode.h>
 37#include <asm/sev.h>
 38#include <asm/fred.h>
 39
 40#define CREATE_TRACE_POINTS
 41#include <trace/events/nmi.h>
 42
 43struct nmi_desc {
 44	raw_spinlock_t lock;
 45	struct list_head head;
 46};
 47
 48static struct nmi_desc nmi_desc[NMI_MAX] = 
 49{
 50	{
 51		.lock = __RAW_SPIN_LOCK_UNLOCKED(&nmi_desc[0].lock),
 52		.head = LIST_HEAD_INIT(nmi_desc[0].head),
 53	},
 54	{
 55		.lock = __RAW_SPIN_LOCK_UNLOCKED(&nmi_desc[1].lock),
 56		.head = LIST_HEAD_INIT(nmi_desc[1].head),
 57	},
 58	{
 59		.lock = __RAW_SPIN_LOCK_UNLOCKED(&nmi_desc[2].lock),
 60		.head = LIST_HEAD_INIT(nmi_desc[2].head),
 61	},
 62	{
 63		.lock = __RAW_SPIN_LOCK_UNLOCKED(&nmi_desc[3].lock),
 64		.head = LIST_HEAD_INIT(nmi_desc[3].head),
 65	},
 66
 67};
 68
 69struct nmi_stats {
 70	unsigned int normal;
 71	unsigned int unknown;
 72	unsigned int external;
 73	unsigned int swallow;
 74	unsigned long recv_jiffies;
 75	unsigned long idt_seq;
 76	unsigned long idt_nmi_seq;
 77	unsigned long idt_ignored;
 78	atomic_long_t idt_calls;
 79	unsigned long idt_seq_snap;
 80	unsigned long idt_nmi_seq_snap;
 81	unsigned long idt_ignored_snap;
 82	long idt_calls_snap;
 83};
 84
 85static DEFINE_PER_CPU(struct nmi_stats, nmi_stats);
 86
 87static int ignore_nmis __read_mostly;
 88
 89int unknown_nmi_panic;
 90/*
 91 * Prevent NMI reason port (0x61) being accessed simultaneously, can
 92 * only be used in NMI handler.
 93 */
 94static DEFINE_RAW_SPINLOCK(nmi_reason_lock);
 95
 96static int __init setup_unknown_nmi_panic(char *str)
 97{
 98	unknown_nmi_panic = 1;
 99	return 1;
100}
101__setup("unknown_nmi_panic", setup_unknown_nmi_panic);
102
103#define nmi_to_desc(type) (&nmi_desc[type])
104
105static u64 nmi_longest_ns = 1 * NSEC_PER_MSEC;
106
107static int __init nmi_warning_debugfs(void)
108{
109	debugfs_create_u64("nmi_longest_ns", 0644,
110			arch_debugfs_dir, &nmi_longest_ns);
111	return 0;
112}
113fs_initcall(nmi_warning_debugfs);
114
115static void nmi_check_duration(struct nmiaction *action, u64 duration)
116{
117	int remainder_ns, decimal_msecs;
118
119	if (duration < nmi_longest_ns || duration < action->max_duration)
120		return;
121
122	action->max_duration = duration;
123
124	remainder_ns = do_div(duration, (1000 * 1000));
125	decimal_msecs = remainder_ns / 1000;
126
127	printk_ratelimited(KERN_INFO
128		"INFO: NMI handler (%ps) took too long to run: %lld.%03d msecs\n",
129		action->handler, duration, decimal_msecs);
130}
131
132static int nmi_handle(unsigned int type, struct pt_regs *regs)
133{
134	struct nmi_desc *desc = nmi_to_desc(type);
135	struct nmiaction *a;
136	int handled=0;
137
138	rcu_read_lock();
139
140	/*
141	 * NMIs are edge-triggered, which means if you have enough
142	 * of them concurrently, you can lose some because only one
143	 * can be latched at any given time.  Walk the whole list
144	 * to handle those situations.
145	 */
146	list_for_each_entry_rcu(a, &desc->head, list) {
147		int thishandled;
148		u64 delta;
149
150		delta = sched_clock();
151		thishandled = a->handler(type, regs);
152		handled += thishandled;
153		delta = sched_clock() - delta;
154		trace_nmi_handler(a->handler, (int)delta, thishandled);
155
156		nmi_check_duration(a, delta);
157	}
158
159	rcu_read_unlock();
160
161	/* return total number of NMI events handled */
162	return handled;
163}
164NOKPROBE_SYMBOL(nmi_handle);
165
166int __register_nmi_handler(unsigned int type, struct nmiaction *action)
167{
168	struct nmi_desc *desc = nmi_to_desc(type);
169	unsigned long flags;
170
171	if (WARN_ON_ONCE(!action->handler || !list_empty(&action->list)))
172		return -EINVAL;
173
174	raw_spin_lock_irqsave(&desc->lock, flags);
175
176	/*
177	 * Indicate if there are multiple registrations on the
178	 * internal NMI handler call chains (SERR and IO_CHECK).
 
179	 */
 
180	WARN_ON_ONCE(type == NMI_SERR && !list_empty(&desc->head));
181	WARN_ON_ONCE(type == NMI_IO_CHECK && !list_empty(&desc->head));
182
183	/*
184	 * some handlers need to be executed first otherwise a fake
185	 * event confuses some handlers (kdump uses this flag)
186	 */
187	if (action->flags & NMI_FLAG_FIRST)
188		list_add_rcu(&action->list, &desc->head);
189	else
190		list_add_tail_rcu(&action->list, &desc->head);
191
192	raw_spin_unlock_irqrestore(&desc->lock, flags);
193	return 0;
194}
195EXPORT_SYMBOL(__register_nmi_handler);
196
197void unregister_nmi_handler(unsigned int type, const char *name)
198{
199	struct nmi_desc *desc = nmi_to_desc(type);
200	struct nmiaction *n, *found = NULL;
201	unsigned long flags;
202
203	raw_spin_lock_irqsave(&desc->lock, flags);
204
205	list_for_each_entry_rcu(n, &desc->head, list) {
206		/*
207		 * the name passed in to describe the nmi handler
208		 * is used as the lookup key
209		 */
210		if (!strcmp(n->name, name)) {
211			WARN(in_nmi(),
212				"Trying to free NMI (%s) from NMI context!\n", n->name);
213			list_del_rcu(&n->list);
214			found = n;
215			break;
216		}
217	}
218
219	raw_spin_unlock_irqrestore(&desc->lock, flags);
220	if (found) {
221		synchronize_rcu();
222		INIT_LIST_HEAD(&found->list);
223	}
224}
225EXPORT_SYMBOL_GPL(unregister_nmi_handler);
226
227static void
228pci_serr_error(unsigned char reason, struct pt_regs *regs)
229{
230	/* check to see if anyone registered against these types of errors */
231	if (nmi_handle(NMI_SERR, regs))
232		return;
233
234	pr_emerg("NMI: PCI system error (SERR) for reason %02x on CPU %d.\n",
235		 reason, smp_processor_id());
236
 
 
 
 
 
 
 
 
 
 
 
237	if (panic_on_unrecovered_nmi)
238		nmi_panic(regs, "NMI: Not continuing");
239
240	pr_emerg("Dazed and confused, but trying to continue\n");
241
242	/* Clear and disable the PCI SERR error line. */
243	reason = (reason & NMI_REASON_CLEAR_MASK) | NMI_REASON_CLEAR_SERR;
244	outb(reason, NMI_REASON_PORT);
245}
246NOKPROBE_SYMBOL(pci_serr_error);
247
248static void
249io_check_error(unsigned char reason, struct pt_regs *regs)
250{
251	unsigned long i;
252
253	/* check to see if anyone registered against these types of errors */
254	if (nmi_handle(NMI_IO_CHECK, regs))
255		return;
256
257	pr_emerg(
258	"NMI: IOCK error (debug interrupt?) for reason %02x on CPU %d.\n",
259		 reason, smp_processor_id());
260	show_regs(regs);
261
262	if (panic_on_io_nmi) {
263		nmi_panic(regs, "NMI IOCK error: Not continuing");
264
265		/*
266		 * If we end up here, it means we have received an NMI while
267		 * processing panic(). Simply return without delaying and
268		 * re-enabling NMIs.
269		 */
270		return;
271	}
272
273	/* Re-enable the IOCK line, wait for a few seconds */
274	reason = (reason & NMI_REASON_CLEAR_MASK) | NMI_REASON_CLEAR_IOCHK;
275	outb(reason, NMI_REASON_PORT);
276
277	i = 20000;
278	while (--i) {
279		touch_nmi_watchdog();
280		udelay(100);
281	}
282
283	reason &= ~NMI_REASON_CLEAR_IOCHK;
284	outb(reason, NMI_REASON_PORT);
285}
286NOKPROBE_SYMBOL(io_check_error);
287
288static void
289unknown_nmi_error(unsigned char reason, struct pt_regs *regs)
290{
291	int handled;
292
293	/*
294	 * Use 'false' as back-to-back NMIs are dealt with one level up.
295	 * Of course this makes having multiple 'unknown' handlers useless
296	 * as only the first one is ever run (unless it can actually determine
297	 * if it caused the NMI)
298	 */
299	handled = nmi_handle(NMI_UNKNOWN, regs);
300	if (handled) {
301		__this_cpu_add(nmi_stats.unknown, handled);
302		return;
303	}
304
305	__this_cpu_add(nmi_stats.unknown, 1);
306
307	pr_emerg_ratelimited("Uhhuh. NMI received for unknown reason %02x on CPU %d.\n",
308			     reason, smp_processor_id());
309
 
310	if (unknown_nmi_panic || panic_on_unrecovered_nmi)
311		nmi_panic(regs, "NMI: Not continuing");
312
313	pr_emerg_ratelimited("Dazed and confused, but trying to continue\n");
314}
315NOKPROBE_SYMBOL(unknown_nmi_error);
316
317static DEFINE_PER_CPU(bool, swallow_nmi);
318static DEFINE_PER_CPU(unsigned long, last_nmi_rip);
319
320static noinstr void default_do_nmi(struct pt_regs *regs)
321{
322	unsigned char reason = 0;
323	int handled;
324	bool b2b = false;
325
326	/*
327	 * CPU-specific NMI must be processed before non-CPU-specific
328	 * NMI, otherwise we may lose it, because the CPU-specific
329	 * NMI can not be detected/processed on other CPUs.
330	 */
331
332	/*
333	 * Back-to-back NMIs are interesting because they can either
334	 * be two NMI or more than two NMIs (any thing over two is dropped
335	 * due to NMI being edge-triggered).  If this is the second half
336	 * of the back-to-back NMI, assume we dropped things and process
337	 * more handlers.  Otherwise reset the 'swallow' NMI behaviour
338	 */
339	if (regs->ip == __this_cpu_read(last_nmi_rip))
340		b2b = true;
341	else
342		__this_cpu_write(swallow_nmi, false);
343
344	__this_cpu_write(last_nmi_rip, regs->ip);
345
346	instrumentation_begin();
347
348	if (microcode_nmi_handler_enabled() && microcode_nmi_handler())
349		goto out;
350
351	handled = nmi_handle(NMI_LOCAL, regs);
352	__this_cpu_add(nmi_stats.normal, handled);
353	if (handled) {
354		/*
355		 * There are cases when a NMI handler handles multiple
356		 * events in the current NMI.  One of these events may
357		 * be queued for in the next NMI.  Because the event is
358		 * already handled, the next NMI will result in an unknown
359		 * NMI.  Instead lets flag this for a potential NMI to
360		 * swallow.
361		 */
362		if (handled > 1)
363			__this_cpu_write(swallow_nmi, true);
364		goto out;
365	}
366
367	/*
368	 * Non-CPU-specific NMI: NMI sources can be processed on any CPU.
369	 *
370	 * Another CPU may be processing panic routines while holding
371	 * nmi_reason_lock. Check if the CPU issued the IPI for crash dumping,
372	 * and if so, call its callback directly.  If there is no CPU preparing
373	 * crash dump, we simply loop here.
374	 */
375	while (!raw_spin_trylock(&nmi_reason_lock)) {
376		run_crash_ipi_callback(regs);
377		cpu_relax();
378	}
379
 
 
380	reason = x86_platform.get_nmi_reason();
381
382	if (reason & NMI_REASON_MASK) {
383		if (reason & NMI_REASON_SERR)
384			pci_serr_error(reason, regs);
385		else if (reason & NMI_REASON_IOCHK)
386			io_check_error(reason, regs);
387#ifdef CONFIG_X86_32
388		/*
389		 * Reassert NMI in case it became active
390		 * meanwhile as it's edge-triggered:
391		 */
392		reassert_nmi();
393#endif
394		__this_cpu_add(nmi_stats.external, 1);
395		raw_spin_unlock(&nmi_reason_lock);
396		goto out;
397	}
398	raw_spin_unlock(&nmi_reason_lock);
399
400	/*
401	 * Only one NMI can be latched at a time.  To handle
402	 * this we may process multiple nmi handlers at once to
403	 * cover the case where an NMI is dropped.  The downside
404	 * to this approach is we may process an NMI prematurely,
405	 * while its real NMI is sitting latched.  This will cause
406	 * an unknown NMI on the next run of the NMI processing.
407	 *
408	 * We tried to flag that condition above, by setting the
409	 * swallow_nmi flag when we process more than one event.
410	 * This condition is also only present on the second half
411	 * of a back-to-back NMI, so we flag that condition too.
412	 *
413	 * If both are true, we assume we already processed this
414	 * NMI previously and we swallow it.  Otherwise we reset
415	 * the logic.
416	 *
417	 * There are scenarios where we may accidentally swallow
418	 * a 'real' unknown NMI.  For example, while processing
419	 * a perf NMI another perf NMI comes in along with a
420	 * 'real' unknown NMI.  These two NMIs get combined into
421	 * one (as described above).  When the next NMI gets
422	 * processed, it will be flagged by perf as handled, but
423	 * no one will know that there was a 'real' unknown NMI sent
424	 * also.  As a result it gets swallowed.  Or if the first
425	 * perf NMI returns two events handled then the second
426	 * NMI will get eaten by the logic below, again losing a
427	 * 'real' unknown NMI.  But this is the best we can do
428	 * for now.
429	 */
430	if (b2b && __this_cpu_read(swallow_nmi))
431		__this_cpu_add(nmi_stats.swallow, 1);
432	else
433		unknown_nmi_error(reason, regs);
434
435out:
436	instrumentation_end();
437}
438
439/*
440 * NMIs can page fault or hit breakpoints which will cause it to lose
441 * its NMI context with the CPU when the breakpoint or page fault does an IRET.
442 *
443 * As a result, NMIs can nest if NMIs get unmasked due an IRET during
444 * NMI processing.  On x86_64, the asm glue protects us from nested NMIs
445 * if the outer NMI came from kernel mode, but we can still nest if the
446 * outer NMI came from user mode.
447 *
448 * To handle these nested NMIs, we have three states:
449 *
450 *  1) not running
451 *  2) executing
452 *  3) latched
453 *
454 * When no NMI is in progress, it is in the "not running" state.
455 * When an NMI comes in, it goes into the "executing" state.
456 * Normally, if another NMI is triggered, it does not interrupt
457 * the running NMI and the HW will simply latch it so that when
458 * the first NMI finishes, it will restart the second NMI.
459 * (Note, the latch is binary, thus multiple NMIs triggering,
460 *  when one is running, are ignored. Only one NMI is restarted.)
461 *
462 * If an NMI executes an iret, another NMI can preempt it. We do not
463 * want to allow this new NMI to run, but we want to execute it when the
464 * first one finishes.  We set the state to "latched", and the exit of
465 * the first NMI will perform a dec_return, if the result is zero
466 * (NOT_RUNNING), then it will simply exit the NMI handler. If not, the
467 * dec_return would have set the state to NMI_EXECUTING (what we want it
468 * to be when we are running). In this case, we simply jump back to
469 * rerun the NMI handler again, and restart the 'latched' NMI.
470 *
471 * No trap (breakpoint or page fault) should be hit before nmi_restart,
472 * thus there is no race between the first check of state for NOT_RUNNING
473 * and setting it to NMI_EXECUTING. The HW will prevent nested NMIs
474 * at this point.
475 *
476 * In case the NMI takes a page fault, we need to save off the CR2
477 * because the NMI could have preempted another page fault and corrupt
478 * the CR2 that is about to be read. As nested NMIs must be restarted
479 * and they can not take breakpoints or page faults, the update of the
480 * CR2 must be done before converting the nmi state back to NOT_RUNNING.
481 * Otherwise, there would be a race of another nested NMI coming in
482 * after setting state to NOT_RUNNING but before updating the nmi_cr2.
483 */
484enum nmi_states {
485	NMI_NOT_RUNNING = 0,
486	NMI_EXECUTING,
487	NMI_LATCHED,
488};
489static DEFINE_PER_CPU(enum nmi_states, nmi_state);
490static DEFINE_PER_CPU(unsigned long, nmi_cr2);
491static DEFINE_PER_CPU(unsigned long, nmi_dr7);
492
493DEFINE_IDTENTRY_RAW(exc_nmi)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
494{
495	irqentry_state_t irq_state;
496	struct nmi_stats *nsp = this_cpu_ptr(&nmi_stats);
497
498	/*
499	 * Re-enable NMIs right here when running as an SEV-ES guest. This might
500	 * cause nested NMIs, but those can be handled safely.
 
 
501	 */
502	sev_es_nmi_complete();
503	if (IS_ENABLED(CONFIG_NMI_CHECK_CPU))
504		raw_atomic_long_inc(&nsp->idt_calls);
505
506	if (arch_cpu_is_offline(smp_processor_id())) {
507		if (microcode_nmi_handler_enabled())
508			microcode_offline_nmi_handler();
509		return;
510	}
511
512	if (this_cpu_read(nmi_state) != NMI_NOT_RUNNING) {
513		this_cpu_write(nmi_state, NMI_LATCHED);
514		return;
515	}
516	this_cpu_write(nmi_state, NMI_EXECUTING);
517	this_cpu_write(nmi_cr2, read_cr2());
518
519nmi_restart:
520	if (IS_ENABLED(CONFIG_NMI_CHECK_CPU)) {
521		WRITE_ONCE(nsp->idt_seq, nsp->idt_seq + 1);
522		WARN_ON_ONCE(!(nsp->idt_seq & 0x1));
523		WRITE_ONCE(nsp->recv_jiffies, jiffies);
524	}
525
526	/*
527	 * Needs to happen before DR7 is accessed, because the hypervisor can
528	 * intercept DR7 reads/writes, turning those into #VC exceptions.
529	 */
530	sev_es_ist_enter(regs);
531
532	this_cpu_write(nmi_dr7, local_db_save());
533
534	irq_state = irqentry_nmi_enter(regs);
535
536	inc_irq_stat(__nmi_count);
537
538	if (IS_ENABLED(CONFIG_NMI_CHECK_CPU) && ignore_nmis) {
539		WRITE_ONCE(nsp->idt_ignored, nsp->idt_ignored + 1);
540	} else if (!ignore_nmis) {
541		if (IS_ENABLED(CONFIG_NMI_CHECK_CPU)) {
542			WRITE_ONCE(nsp->idt_nmi_seq, nsp->idt_nmi_seq + 1);
543			WARN_ON_ONCE(!(nsp->idt_nmi_seq & 0x1));
544		}
545		default_do_nmi(regs);
546		if (IS_ENABLED(CONFIG_NMI_CHECK_CPU)) {
547			WRITE_ONCE(nsp->idt_nmi_seq, nsp->idt_nmi_seq + 1);
548			WARN_ON_ONCE(nsp->idt_nmi_seq & 0x1);
549		}
550	}
551
552	irqentry_nmi_exit(regs, irq_state);
553
554	local_db_restore(this_cpu_read(nmi_dr7));
555
556	sev_es_ist_exit();
557
558	if (unlikely(this_cpu_read(nmi_cr2) != read_cr2()))
559		write_cr2(this_cpu_read(nmi_cr2));
560	if (IS_ENABLED(CONFIG_NMI_CHECK_CPU)) {
561		WRITE_ONCE(nsp->idt_seq, nsp->idt_seq + 1);
562		WARN_ON_ONCE(nsp->idt_seq & 0x1);
563		WRITE_ONCE(nsp->recv_jiffies, jiffies);
564	}
565	if (this_cpu_dec_return(nmi_state))
566		goto nmi_restart;
567}
568
569#if IS_ENABLED(CONFIG_KVM_INTEL)
570DEFINE_IDTENTRY_RAW(exc_nmi_kvm_vmx)
571{
572	exc_nmi(regs);
573}
574#if IS_MODULE(CONFIG_KVM_INTEL)
575EXPORT_SYMBOL_GPL(asm_exc_nmi_kvm_vmx);
576#endif
577#endif
578
579#ifdef CONFIG_NMI_CHECK_CPU
580
581static char *nmi_check_stall_msg[] = {
582/*									*/
583/* +--------- nmi_seq & 0x1: CPU is currently in NMI handler.		*/
584/* | +------ cpu_is_offline(cpu)					*/
585/* | | +--- nsp->idt_calls_snap != atomic_long_read(&nsp->idt_calls):	*/
586/* | | |	NMI handler has been invoked.				*/
587/* | | |								*/
588/* V V V								*/
589/* 0 0 0 */ "NMIs are not reaching exc_nmi() handler",
590/* 0 0 1 */ "exc_nmi() handler is ignoring NMIs",
591/* 0 1 0 */ "CPU is offline and NMIs are not reaching exc_nmi() handler",
592/* 0 1 1 */ "CPU is offline and exc_nmi() handler is legitimately ignoring NMIs",
593/* 1 0 0 */ "CPU is in exc_nmi() handler and no further NMIs are reaching handler",
594/* 1 0 1 */ "CPU is in exc_nmi() handler which is legitimately ignoring NMIs",
595/* 1 1 0 */ "CPU is offline in exc_nmi() handler and no more NMIs are reaching exc_nmi() handler",
596/* 1 1 1 */ "CPU is offline in exc_nmi() handler which is legitimately ignoring NMIs",
597};
598
599void nmi_backtrace_stall_snap(const struct cpumask *btp)
600{
601	int cpu;
602	struct nmi_stats *nsp;
603
604	for_each_cpu(cpu, btp) {
605		nsp = per_cpu_ptr(&nmi_stats, cpu);
606		nsp->idt_seq_snap = READ_ONCE(nsp->idt_seq);
607		nsp->idt_nmi_seq_snap = READ_ONCE(nsp->idt_nmi_seq);
608		nsp->idt_ignored_snap = READ_ONCE(nsp->idt_ignored);
609		nsp->idt_calls_snap = atomic_long_read(&nsp->idt_calls);
610	}
611}
612
613void nmi_backtrace_stall_check(const struct cpumask *btp)
614{
615	int cpu;
616	int idx;
617	unsigned long nmi_seq;
618	unsigned long j = jiffies;
619	char *modp;
620	char *msgp;
621	char *msghp;
622	struct nmi_stats *nsp;
623
624	for_each_cpu(cpu, btp) {
625		nsp = per_cpu_ptr(&nmi_stats, cpu);
626		modp = "";
627		msghp = "";
628		nmi_seq = READ_ONCE(nsp->idt_nmi_seq);
629		if (nsp->idt_nmi_seq_snap + 1 == nmi_seq && (nmi_seq & 0x1)) {
630			msgp = "CPU entered NMI handler function, but has not exited";
631		} else if (nsp->idt_nmi_seq_snap == nmi_seq ||
632			   nsp->idt_nmi_seq_snap + 1 == nmi_seq) {
633			idx = ((nmi_seq & 0x1) << 2) |
634			      (cpu_is_offline(cpu) << 1) |
635			      (nsp->idt_calls_snap != atomic_long_read(&nsp->idt_calls));
636			msgp = nmi_check_stall_msg[idx];
637			if (nsp->idt_ignored_snap != READ_ONCE(nsp->idt_ignored) && (idx & 0x1))
638				modp = ", but OK because ignore_nmis was set";
639			if (nsp->idt_nmi_seq_snap + 1 == nmi_seq)
640				msghp = " (CPU exited one NMI handler function)";
641			else if (nmi_seq & 0x1)
642				msghp = " (CPU currently in NMI handler function)";
643			else
644				msghp = " (CPU was never in an NMI handler function)";
645		} else {
646			msgp = "CPU is handling NMIs";
647		}
648		pr_alert("%s: CPU %d: %s%s%s\n", __func__, cpu, msgp, modp, msghp);
649		pr_alert("%s: last activity: %lu jiffies ago.\n",
650			 __func__, j - READ_ONCE(nsp->recv_jiffies));
651	}
652}
653
654#endif
655
656#ifdef CONFIG_X86_FRED
657/*
658 * With FRED, CR2/DR6 is pushed to #PF/#DB stack frame during FRED
659 * event delivery, i.e., there is no problem of transient states.
660 * And NMI unblocking only happens when the stack frame indicates
661 * that so should happen.
662 *
663 * Thus, the NMI entry stub for FRED is really straightforward and
664 * as simple as most exception handlers. As such, #DB is allowed
665 * during NMI handling.
666 */
667DEFINE_FREDENTRY_NMI(exc_nmi)
668{
669	irqentry_state_t irq_state;
670
671	if (arch_cpu_is_offline(smp_processor_id())) {
672		if (microcode_nmi_handler_enabled())
673			microcode_offline_nmi_handler();
674		return;
675	}
676
677	/*
678	 * Save CR2 for eventual restore to cover the case where the NMI
679	 * hits the VMENTER/VMEXIT region where guest CR2 is life. This
680	 * prevents guest state corruption in case that the NMI handler
681	 * takes a page fault.
682	 */
683	this_cpu_write(nmi_cr2, read_cr2());
684
685	irq_state = irqentry_nmi_enter(regs);
 
686
687	inc_irq_stat(__nmi_count);
688	default_do_nmi(regs);
689
690	irqentry_nmi_exit(regs, irq_state);
691
692	if (unlikely(this_cpu_read(nmi_cr2) != read_cr2()))
693		write_cr2(this_cpu_read(nmi_cr2));
694}
695#endif
696
697void stop_nmi(void)
698{
699	ignore_nmis++;
700}
701
702void restart_nmi(void)
703{
704	ignore_nmis--;
705}
706
707/* reset the back-to-back NMI logic */
708void local_touch_nmi(void)
709{
710	__this_cpu_write(last_nmi_rip, 0);
711}
712EXPORT_SYMBOL_GPL(local_touch_nmi);