Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * linux/include/linux/nmi.h
4 */
5#ifndef LINUX_NMI_H
6#define LINUX_NMI_H
7
8#include <linux/sched.h>
9#include <asm/irq.h>
10#if defined(CONFIG_HAVE_NMI_WATCHDOG)
11#include <asm/nmi.h>
12#endif
13
14#ifdef CONFIG_LOCKUP_DETECTOR
15void lockup_detector_init(void);
16void lockup_detector_soft_poweroff(void);
17void lockup_detector_cleanup(void);
18bool is_hardlockup(void);
19
20extern int watchdog_user_enabled;
21extern int nmi_watchdog_user_enabled;
22extern int soft_watchdog_user_enabled;
23extern int watchdog_thresh;
24extern unsigned long watchdog_enabled;
25
26extern struct cpumask watchdog_cpumask;
27extern unsigned long *watchdog_cpumask_bits;
28#ifdef CONFIG_SMP
29extern int sysctl_softlockup_all_cpu_backtrace;
30extern int sysctl_hardlockup_all_cpu_backtrace;
31#else
32#define sysctl_softlockup_all_cpu_backtrace 0
33#define sysctl_hardlockup_all_cpu_backtrace 0
34#endif /* !CONFIG_SMP */
35
36#else /* CONFIG_LOCKUP_DETECTOR */
37static inline void lockup_detector_init(void) { }
38static inline void lockup_detector_soft_poweroff(void) { }
39static inline void lockup_detector_cleanup(void) { }
40#endif /* !CONFIG_LOCKUP_DETECTOR */
41
42#ifdef CONFIG_SOFTLOCKUP_DETECTOR
43extern void touch_softlockup_watchdog_sched(void);
44extern void touch_softlockup_watchdog(void);
45extern void touch_softlockup_watchdog_sync(void);
46extern void touch_all_softlockup_watchdogs(void);
47extern unsigned int softlockup_panic;
48
49extern int lockup_detector_online_cpu(unsigned int cpu);
50extern int lockup_detector_offline_cpu(unsigned int cpu);
51#else /* CONFIG_SOFTLOCKUP_DETECTOR */
52static inline void touch_softlockup_watchdog_sched(void) { }
53static inline void touch_softlockup_watchdog(void) { }
54static inline void touch_softlockup_watchdog_sync(void) { }
55static inline void touch_all_softlockup_watchdogs(void) { }
56
57#define lockup_detector_online_cpu NULL
58#define lockup_detector_offline_cpu NULL
59#endif /* CONFIG_SOFTLOCKUP_DETECTOR */
60
61#ifdef CONFIG_DETECT_HUNG_TASK
62void reset_hung_task_detector(void);
63#else
64static inline void reset_hung_task_detector(void) { }
65#endif
66
67/*
68 * The run state of the lockup detectors is controlled by the content of the
69 * 'watchdog_enabled' variable. Each lockup detector has its dedicated bit -
70 * bit 0 for the hard lockup detector and bit 1 for the soft lockup detector.
71 *
72 * 'watchdog_user_enabled', 'nmi_watchdog_user_enabled' and
73 * 'soft_watchdog_user_enabled' are variables that are only used as an
74 * 'interface' between the parameters in /proc/sys/kernel and the internal
75 * state bits in 'watchdog_enabled'. The 'watchdog_thresh' variable is
76 * handled differently because its value is not boolean, and the lockup
77 * detectors are 'suspended' while 'watchdog_thresh' is equal zero.
78 */
79#define NMI_WATCHDOG_ENABLED_BIT 0
80#define SOFT_WATCHDOG_ENABLED_BIT 1
81#define NMI_WATCHDOG_ENABLED (1 << NMI_WATCHDOG_ENABLED_BIT)
82#define SOFT_WATCHDOG_ENABLED (1 << SOFT_WATCHDOG_ENABLED_BIT)
83
84#if defined(CONFIG_HARDLOCKUP_DETECTOR)
85extern void hardlockup_detector_disable(void);
86extern unsigned int hardlockup_panic;
87#else
88static inline void hardlockup_detector_disable(void) {}
89#endif
90
91#if defined(CONFIG_HAVE_NMI_WATCHDOG) || defined(CONFIG_HARDLOCKUP_DETECTOR)
92# define NMI_WATCHDOG_SYSCTL_PERM 0644
93#else
94# define NMI_WATCHDOG_SYSCTL_PERM 0444
95#endif
96
97#if defined(CONFIG_HARDLOCKUP_DETECTOR_PERF)
98extern void arch_touch_nmi_watchdog(void);
99extern void hardlockup_detector_perf_stop(void);
100extern void hardlockup_detector_perf_restart(void);
101extern void hardlockup_detector_perf_disable(void);
102extern void hardlockup_detector_perf_enable(void);
103extern void hardlockup_detector_perf_cleanup(void);
104extern int hardlockup_detector_perf_init(void);
105#else
106static inline void hardlockup_detector_perf_stop(void) { }
107static inline void hardlockup_detector_perf_restart(void) { }
108static inline void hardlockup_detector_perf_disable(void) { }
109static inline void hardlockup_detector_perf_enable(void) { }
110static inline void hardlockup_detector_perf_cleanup(void) { }
111# if !defined(CONFIG_HAVE_NMI_WATCHDOG)
112static inline int hardlockup_detector_perf_init(void) { return -ENODEV; }
113static inline void arch_touch_nmi_watchdog(void) {}
114# else
115static inline int hardlockup_detector_perf_init(void) { return 0; }
116# endif
117#endif
118
119void watchdog_nmi_stop(void);
120void watchdog_nmi_start(void);
121int watchdog_nmi_probe(void);
122int watchdog_nmi_enable(unsigned int cpu);
123void watchdog_nmi_disable(unsigned int cpu);
124
125/**
126 * touch_nmi_watchdog - restart NMI watchdog timeout.
127 *
128 * If the architecture supports the NMI watchdog, touch_nmi_watchdog()
129 * may be used to reset the timeout - for code which intentionally
130 * disables interrupts for a long time. This call is stateless.
131 */
132static inline void touch_nmi_watchdog(void)
133{
134 arch_touch_nmi_watchdog();
135 touch_softlockup_watchdog();
136}
137
138/*
139 * Create trigger_all_cpu_backtrace() out of the arch-provided
140 * base function. Return whether such support was available,
141 * to allow calling code to fall back to some other mechanism:
142 */
143#ifdef arch_trigger_cpumask_backtrace
144static inline bool trigger_all_cpu_backtrace(void)
145{
146 arch_trigger_cpumask_backtrace(cpu_online_mask, false);
147 return true;
148}
149
150static inline bool trigger_allbutself_cpu_backtrace(void)
151{
152 arch_trigger_cpumask_backtrace(cpu_online_mask, true);
153 return true;
154}
155
156static inline bool trigger_cpumask_backtrace(struct cpumask *mask)
157{
158 arch_trigger_cpumask_backtrace(mask, false);
159 return true;
160}
161
162static inline bool trigger_single_cpu_backtrace(int cpu)
163{
164 arch_trigger_cpumask_backtrace(cpumask_of(cpu), false);
165 return true;
166}
167
168/* generic implementation */
169void nmi_trigger_cpumask_backtrace(const cpumask_t *mask,
170 bool exclude_self,
171 void (*raise)(cpumask_t *mask));
172bool nmi_cpu_backtrace(struct pt_regs *regs);
173
174#else
175static inline bool trigger_all_cpu_backtrace(void)
176{
177 return false;
178}
179static inline bool trigger_allbutself_cpu_backtrace(void)
180{
181 return false;
182}
183static inline bool trigger_cpumask_backtrace(struct cpumask *mask)
184{
185 return false;
186}
187static inline bool trigger_single_cpu_backtrace(int cpu)
188{
189 return false;
190}
191#endif
192
193#ifdef CONFIG_HARDLOCKUP_DETECTOR_PERF
194u64 hw_nmi_get_sample_period(int watchdog_thresh);
195#endif
196
197#if defined(CONFIG_HARDLOCKUP_CHECK_TIMESTAMP) && \
198 defined(CONFIG_HARDLOCKUP_DETECTOR)
199void watchdog_update_hrtimer_threshold(u64 period);
200#else
201static inline void watchdog_update_hrtimer_threshold(u64 period) { }
202#endif
203
204struct ctl_table;
205int proc_watchdog(struct ctl_table *, int, void *, size_t *, loff_t *);
206int proc_nmi_watchdog(struct ctl_table *, int , void *, size_t *, loff_t *);
207int proc_soft_watchdog(struct ctl_table *, int , void *, size_t *, loff_t *);
208int proc_watchdog_thresh(struct ctl_table *, int , void *, size_t *, loff_t *);
209int proc_watchdog_cpumask(struct ctl_table *, int, void *, size_t *, loff_t *);
210
211#ifdef CONFIG_HAVE_ACPI_APEI_NMI
212#include <asm/nmi.h>
213#endif
214
215#endif
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * linux/include/linux/nmi.h
4 */
5#ifndef LINUX_NMI_H
6#define LINUX_NMI_H
7
8#include <linux/sched.h>
9#include <asm/irq.h>
10
11/* Arch specific watchdogs might need to share extra watchdog-related APIs. */
12#if defined(CONFIG_HARDLOCKUP_DETECTOR_ARCH) || defined(CONFIG_HARDLOCKUP_DETECTOR_SPARC64)
13#include <asm/nmi.h>
14#endif
15
16#ifdef CONFIG_LOCKUP_DETECTOR
17void lockup_detector_init(void);
18void lockup_detector_retry_init(void);
19void lockup_detector_soft_poweroff(void);
20void lockup_detector_cleanup(void);
21
22extern int watchdog_user_enabled;
23extern int watchdog_thresh;
24extern unsigned long watchdog_enabled;
25
26extern struct cpumask watchdog_cpumask;
27extern unsigned long *watchdog_cpumask_bits;
28#ifdef CONFIG_SMP
29extern int sysctl_softlockup_all_cpu_backtrace;
30extern int sysctl_hardlockup_all_cpu_backtrace;
31#else
32#define sysctl_softlockup_all_cpu_backtrace 0
33#define sysctl_hardlockup_all_cpu_backtrace 0
34#endif /* !CONFIG_SMP */
35
36#else /* CONFIG_LOCKUP_DETECTOR */
37static inline void lockup_detector_init(void) { }
38static inline void lockup_detector_retry_init(void) { }
39static inline void lockup_detector_soft_poweroff(void) { }
40static inline void lockup_detector_cleanup(void) { }
41#endif /* !CONFIG_LOCKUP_DETECTOR */
42
43#ifdef CONFIG_SOFTLOCKUP_DETECTOR
44extern void touch_softlockup_watchdog_sched(void);
45extern void touch_softlockup_watchdog(void);
46extern void touch_softlockup_watchdog_sync(void);
47extern void touch_all_softlockup_watchdogs(void);
48extern unsigned int softlockup_panic;
49
50extern int lockup_detector_online_cpu(unsigned int cpu);
51extern int lockup_detector_offline_cpu(unsigned int cpu);
52#else /* CONFIG_SOFTLOCKUP_DETECTOR */
53static inline void touch_softlockup_watchdog_sched(void) { }
54static inline void touch_softlockup_watchdog(void) { }
55static inline void touch_softlockup_watchdog_sync(void) { }
56static inline void touch_all_softlockup_watchdogs(void) { }
57
58#define lockup_detector_online_cpu NULL
59#define lockup_detector_offline_cpu NULL
60#endif /* CONFIG_SOFTLOCKUP_DETECTOR */
61
62#ifdef CONFIG_DETECT_HUNG_TASK
63void reset_hung_task_detector(void);
64#else
65static inline void reset_hung_task_detector(void) { }
66#endif
67
68/*
69 * The run state of the lockup detectors is controlled by the content of the
70 * 'watchdog_enabled' variable. Each lockup detector has its dedicated bit -
71 * bit 0 for the hard lockup detector and bit 1 for the soft lockup detector.
72 *
73 * 'watchdog_user_enabled', 'watchdog_hardlockup_user_enabled' and
74 * 'watchdog_softlockup_user_enabled' are variables that are only used as an
75 * 'interface' between the parameters in /proc/sys/kernel and the internal
76 * state bits in 'watchdog_enabled'. The 'watchdog_thresh' variable is
77 * handled differently because its value is not boolean, and the lockup
78 * detectors are 'suspended' while 'watchdog_thresh' is equal zero.
79 */
80#define WATCHDOG_HARDLOCKUP_ENABLED_BIT 0
81#define WATCHDOG_SOFTOCKUP_ENABLED_BIT 1
82#define WATCHDOG_HARDLOCKUP_ENABLED (1 << WATCHDOG_HARDLOCKUP_ENABLED_BIT)
83#define WATCHDOG_SOFTOCKUP_ENABLED (1 << WATCHDOG_SOFTOCKUP_ENABLED_BIT)
84
85#if defined(CONFIG_HARDLOCKUP_DETECTOR)
86extern void hardlockup_detector_disable(void);
87extern unsigned int hardlockup_panic;
88#else
89static inline void hardlockup_detector_disable(void) {}
90#endif
91
92/* Sparc64 has special implemetantion that is always enabled. */
93#if defined(CONFIG_HARDLOCKUP_DETECTOR) || defined(CONFIG_HARDLOCKUP_DETECTOR_SPARC64)
94void arch_touch_nmi_watchdog(void);
95#else
96static inline void arch_touch_nmi_watchdog(void) { }
97#endif
98
99#if defined(CONFIG_HARDLOCKUP_DETECTOR_COUNTS_HRTIMER)
100void watchdog_hardlockup_touch_cpu(unsigned int cpu);
101void watchdog_hardlockup_check(unsigned int cpu, struct pt_regs *regs);
102#endif
103
104#if defined(CONFIG_HARDLOCKUP_DETECTOR_PERF)
105extern void hardlockup_detector_perf_stop(void);
106extern void hardlockup_detector_perf_restart(void);
107extern void hardlockup_detector_perf_cleanup(void);
108extern void hardlockup_config_perf_event(const char *str);
109#else
110static inline void hardlockup_detector_perf_stop(void) { }
111static inline void hardlockup_detector_perf_restart(void) { }
112static inline void hardlockup_detector_perf_cleanup(void) { }
113static inline void hardlockup_config_perf_event(const char *str) { }
114#endif
115
116void watchdog_hardlockup_stop(void);
117void watchdog_hardlockup_start(void);
118int watchdog_hardlockup_probe(void);
119void watchdog_hardlockup_enable(unsigned int cpu);
120void watchdog_hardlockup_disable(unsigned int cpu);
121
122void lockup_detector_reconfigure(void);
123
124#ifdef CONFIG_HARDLOCKUP_DETECTOR_BUDDY
125void watchdog_buddy_check_hardlockup(int hrtimer_interrupts);
126#else
127static inline void watchdog_buddy_check_hardlockup(int hrtimer_interrupts) {}
128#endif
129
130/**
131 * touch_nmi_watchdog - manually reset the hardlockup watchdog timeout.
132 *
133 * If we support detecting hardlockups, touch_nmi_watchdog() may be
134 * used to pet the watchdog (reset the timeout) - for code which
135 * intentionally disables interrupts for a long time. This call is stateless.
136 *
137 * Though this function has "nmi" in the name, the hardlockup watchdog might
138 * not be backed by NMIs. This function will likely be renamed to
139 * touch_hardlockup_watchdog() in the future.
140 */
141static inline void touch_nmi_watchdog(void)
142{
143 /*
144 * Pass on to the hardlockup detector selected via CONFIG_. Note that
145 * the hardlockup detector may not be arch-specific nor using NMIs
146 * and the arch_touch_nmi_watchdog() function will likely be renamed
147 * in the future.
148 */
149 arch_touch_nmi_watchdog();
150
151 touch_softlockup_watchdog();
152}
153
154/*
155 * Create trigger_all_cpu_backtrace() out of the arch-provided
156 * base function. Return whether such support was available,
157 * to allow calling code to fall back to some other mechanism:
158 */
159#ifdef arch_trigger_cpumask_backtrace
160static inline bool trigger_all_cpu_backtrace(void)
161{
162 arch_trigger_cpumask_backtrace(cpu_online_mask, -1);
163 return true;
164}
165
166static inline bool trigger_allbutcpu_cpu_backtrace(int exclude_cpu)
167{
168 arch_trigger_cpumask_backtrace(cpu_online_mask, exclude_cpu);
169 return true;
170}
171
172static inline bool trigger_cpumask_backtrace(struct cpumask *mask)
173{
174 arch_trigger_cpumask_backtrace(mask, -1);
175 return true;
176}
177
178static inline bool trigger_single_cpu_backtrace(int cpu)
179{
180 arch_trigger_cpumask_backtrace(cpumask_of(cpu), -1);
181 return true;
182}
183
184/* generic implementation */
185void nmi_trigger_cpumask_backtrace(const cpumask_t *mask,
186 int exclude_cpu,
187 void (*raise)(cpumask_t *mask));
188bool nmi_cpu_backtrace(struct pt_regs *regs);
189
190#else
191static inline bool trigger_all_cpu_backtrace(void)
192{
193 return false;
194}
195static inline bool trigger_allbutcpu_cpu_backtrace(int exclude_cpu)
196{
197 return false;
198}
199static inline bool trigger_cpumask_backtrace(struct cpumask *mask)
200{
201 return false;
202}
203static inline bool trigger_single_cpu_backtrace(int cpu)
204{
205 return false;
206}
207#endif
208
209#ifdef CONFIG_HARDLOCKUP_DETECTOR_PERF
210u64 hw_nmi_get_sample_period(int watchdog_thresh);
211bool arch_perf_nmi_is_available(void);
212#endif
213
214#if defined(CONFIG_HARDLOCKUP_CHECK_TIMESTAMP) && \
215 defined(CONFIG_HARDLOCKUP_DETECTOR_PERF)
216void watchdog_update_hrtimer_threshold(u64 period);
217#else
218static inline void watchdog_update_hrtimer_threshold(u64 period) { }
219#endif
220
221#ifdef CONFIG_HAVE_ACPI_APEI_NMI
222#include <asm/nmi.h>
223#endif
224
225#ifdef CONFIG_NMI_CHECK_CPU
226void nmi_backtrace_stall_snap(const struct cpumask *btp);
227void nmi_backtrace_stall_check(const struct cpumask *btp);
228#else
229static inline void nmi_backtrace_stall_snap(const struct cpumask *btp) {}
230static inline void nmi_backtrace_stall_check(const struct cpumask *btp) {}
231#endif
232
233#endif