Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * HW NMI watchdog support
4 *
5 * started by Don Zickus, Copyright (C) 2010 Red Hat, Inc.
6 *
7 * Arch specific calls to support NMI watchdog
8 *
9 * Bits copied from original nmi.c file
10 *
11 */
12#include <asm/apic.h>
13#include <asm/nmi.h>
14
15#include <linux/cpumask.h>
16#include <linux/kdebug.h>
17#include <linux/notifier.h>
18#include <linux/kprobes.h>
19#include <linux/nmi.h>
20#include <linux/init.h>
21#include <linux/delay.h>
22
23#ifdef CONFIG_HARDLOCKUP_DETECTOR_PERF
24u64 hw_nmi_get_sample_period(int watchdog_thresh)
25{
26 return (u64)(cpu_khz) * 1000 * watchdog_thresh;
27}
28#endif
29
30#ifdef arch_trigger_cpumask_backtrace
31static void nmi_raise_cpu_backtrace(cpumask_t *mask)
32{
33 apic->send_IPI_mask(mask, NMI_VECTOR);
34}
35
36void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self)
37{
38 nmi_trigger_cpumask_backtrace(mask, exclude_self,
39 nmi_raise_cpu_backtrace);
40}
41
42static int nmi_cpu_backtrace_handler(unsigned int cmd, struct pt_regs *regs)
43{
44 if (nmi_cpu_backtrace(regs))
45 return NMI_HANDLED;
46
47 return NMI_DONE;
48}
49NOKPROBE_SYMBOL(nmi_cpu_backtrace_handler);
50
51static int __init register_nmi_cpu_backtrace_handler(void)
52{
53 register_nmi_handler(NMI_LOCAL, nmi_cpu_backtrace_handler,
54 0, "arch_bt");
55 return 0;
56}
57early_initcall(register_nmi_cpu_backtrace_handler);
58#endif
1/*
2 * HW NMI watchdog support
3 *
4 * started by Don Zickus, Copyright (C) 2010 Red Hat, Inc.
5 *
6 * Arch specific calls to support NMI watchdog
7 *
8 * Bits copied from original nmi.c file
9 *
10 */
11#include <asm/apic.h>
12
13#include <linux/cpumask.h>
14#include <linux/kdebug.h>
15#include <linux/notifier.h>
16#include <linux/kprobes.h>
17#include <linux/nmi.h>
18#include <linux/module.h>
19#include <linux/delay.h>
20
21#ifdef CONFIG_HARDLOCKUP_DETECTOR
22u64 hw_nmi_get_sample_period(int watchdog_thresh)
23{
24 return (u64)(cpu_khz) * 1000 * watchdog_thresh;
25}
26#endif
27
28#ifdef arch_trigger_all_cpu_backtrace
29/* For reliability, we're prepared to waste bits here. */
30static DECLARE_BITMAP(backtrace_mask, NR_CPUS) __read_mostly;
31
32/* "in progress" flag of arch_trigger_all_cpu_backtrace */
33static unsigned long backtrace_flag;
34
35void arch_trigger_all_cpu_backtrace(void)
36{
37 int i;
38
39 if (test_and_set_bit(0, &backtrace_flag))
40 /*
41 * If there is already a trigger_all_cpu_backtrace() in progress
42 * (backtrace_flag == 1), don't output double cpu dump infos.
43 */
44 return;
45
46 cpumask_copy(to_cpumask(backtrace_mask), cpu_online_mask);
47
48 printk(KERN_INFO "sending NMI to all CPUs:\n");
49 apic->send_IPI_all(NMI_VECTOR);
50
51 /* Wait for up to 10 seconds for all CPUs to do the backtrace */
52 for (i = 0; i < 10 * 1000; i++) {
53 if (cpumask_empty(to_cpumask(backtrace_mask)))
54 break;
55 mdelay(1);
56 }
57
58 clear_bit(0, &backtrace_flag);
59 smp_mb__after_clear_bit();
60}
61
62static int __kprobes
63arch_trigger_all_cpu_backtrace_handler(struct notifier_block *self,
64 unsigned long cmd, void *__args)
65{
66 struct die_args *args = __args;
67 struct pt_regs *regs;
68 int cpu;
69
70 switch (cmd) {
71 case DIE_NMI:
72 break;
73
74 default:
75 return NOTIFY_DONE;
76 }
77
78 regs = args->regs;
79 cpu = smp_processor_id();
80
81 if (cpumask_test_cpu(cpu, to_cpumask(backtrace_mask))) {
82 static arch_spinlock_t lock = __ARCH_SPIN_LOCK_UNLOCKED;
83
84 arch_spin_lock(&lock);
85 printk(KERN_WARNING "NMI backtrace for cpu %d\n", cpu);
86 show_regs(regs);
87 arch_spin_unlock(&lock);
88 cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask));
89 return NOTIFY_STOP;
90 }
91
92 return NOTIFY_DONE;
93}
94
95static __read_mostly struct notifier_block backtrace_notifier = {
96 .notifier_call = arch_trigger_all_cpu_backtrace_handler,
97 .next = NULL,
98 .priority = NMI_LOCAL_LOW_PRIOR,
99};
100
101static int __init register_trigger_all_cpu_backtrace(void)
102{
103 register_die_notifier(&backtrace_notifier);
104 return 0;
105}
106early_initcall(register_trigger_all_cpu_backtrace);
107#endif