Loading...
1/*
2 * Detect hard and soft lockups on a system
3 *
4 * started by Don Zickus, Copyright (C) 2010 Red Hat, Inc.
5 *
6 * Note: Most of this code is borrowed heavily from the original softlockup
7 * detector, so thanks to Ingo for the initial implementation.
8 * Some chunks also taken from the old x86-specific nmi watchdog code, thanks
9 * to those contributors as well.
10 */
11
12#define pr_fmt(fmt) "NMI watchdog: " fmt
13
14#include <linux/mm.h>
15#include <linux/cpu.h>
16#include <linux/nmi.h>
17#include <linux/init.h>
18#include <linux/delay.h>
19#include <linux/freezer.h>
20#include <linux/kthread.h>
21#include <linux/lockdep.h>
22#include <linux/notifier.h>
23#include <linux/module.h>
24#include <linux/sysctl.h>
25#include <linux/smpboot.h>
26#include <linux/sched/rt.h>
27
28#include <asm/irq_regs.h>
29#include <linux/kvm_para.h>
30#include <linux/perf_event.h>
31
32int watchdog_user_enabled = 1;
33int __read_mostly watchdog_thresh = 10;
34static int __read_mostly watchdog_running;
35static u64 __read_mostly sample_period;
36
37static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts);
38static DEFINE_PER_CPU(struct task_struct *, softlockup_watchdog);
39static DEFINE_PER_CPU(struct hrtimer, watchdog_hrtimer);
40static DEFINE_PER_CPU(bool, softlockup_touch_sync);
41static DEFINE_PER_CPU(bool, soft_watchdog_warn);
42static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts);
43static DEFINE_PER_CPU(unsigned long, soft_lockup_hrtimer_cnt);
44#ifdef CONFIG_HARDLOCKUP_DETECTOR
45static DEFINE_PER_CPU(bool, hard_watchdog_warn);
46static DEFINE_PER_CPU(bool, watchdog_nmi_touch);
47static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved);
48static DEFINE_PER_CPU(struct perf_event *, watchdog_ev);
49#endif
50
51/* boot commands */
52/*
53 * Should we panic when a soft-lockup or hard-lockup occurs:
54 */
55#ifdef CONFIG_HARDLOCKUP_DETECTOR
56static int hardlockup_panic =
57 CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE;
58
59static int __init hardlockup_panic_setup(char *str)
60{
61 if (!strncmp(str, "panic", 5))
62 hardlockup_panic = 1;
63 else if (!strncmp(str, "nopanic", 7))
64 hardlockup_panic = 0;
65 else if (!strncmp(str, "0", 1))
66 watchdog_user_enabled = 0;
67 return 1;
68}
69__setup("nmi_watchdog=", hardlockup_panic_setup);
70#endif
71
72unsigned int __read_mostly softlockup_panic =
73 CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE;
74
75static int __init softlockup_panic_setup(char *str)
76{
77 softlockup_panic = simple_strtoul(str, NULL, 0);
78
79 return 1;
80}
81__setup("softlockup_panic=", softlockup_panic_setup);
82
83static int __init nowatchdog_setup(char *str)
84{
85 watchdog_user_enabled = 0;
86 return 1;
87}
88__setup("nowatchdog", nowatchdog_setup);
89
90/* deprecated */
91static int __init nosoftlockup_setup(char *str)
92{
93 watchdog_user_enabled = 0;
94 return 1;
95}
96__setup("nosoftlockup", nosoftlockup_setup);
97/* */
98
99/*
100 * Hard-lockup warnings should be triggered after just a few seconds. Soft-
101 * lockups can have false positives under extreme conditions. So we generally
102 * want a higher threshold for soft lockups than for hard lockups. So we couple
103 * the thresholds with a factor: we make the soft threshold twice the amount of
104 * time the hard threshold is.
105 */
106static int get_softlockup_thresh(void)
107{
108 return watchdog_thresh * 2;
109}
110
111/*
112 * Returns seconds, approximately. We don't need nanosecond
113 * resolution, and we don't need to waste time with a big divide when
114 * 2^30ns == 1.074s.
115 */
116static unsigned long get_timestamp(void)
117{
118 return local_clock() >> 30LL; /* 2^30 ~= 10^9 */
119}
120
121static void set_sample_period(void)
122{
123 /*
124 * convert watchdog_thresh from seconds to ns
125 * the divide by 5 is to give hrtimer several chances (two
126 * or three with the current relation between the soft
127 * and hard thresholds) to increment before the
128 * hardlockup detector generates a warning
129 */
130 sample_period = get_softlockup_thresh() * ((u64)NSEC_PER_SEC / 5);
131}
132
133/* Commands for resetting the watchdog */
134static void __touch_watchdog(void)
135{
136 __this_cpu_write(watchdog_touch_ts, get_timestamp());
137}
138
139void touch_softlockup_watchdog(void)
140{
141 /*
142 * Preemption can be enabled. It doesn't matter which CPU's timestamp
143 * gets zeroed here, so use the raw_ operation.
144 */
145 raw_cpu_write(watchdog_touch_ts, 0);
146}
147EXPORT_SYMBOL(touch_softlockup_watchdog);
148
149void touch_all_softlockup_watchdogs(void)
150{
151 int cpu;
152
153 /*
154 * this is done lockless
155 * do we care if a 0 races with a timestamp?
156 * all it means is the softlock check starts one cycle later
157 */
158 for_each_online_cpu(cpu)
159 per_cpu(watchdog_touch_ts, cpu) = 0;
160}
161
162#ifdef CONFIG_HARDLOCKUP_DETECTOR
163void touch_nmi_watchdog(void)
164{
165 /*
166 * Using __raw here because some code paths have
167 * preemption enabled. If preemption is enabled
168 * then interrupts should be enabled too, in which
169 * case we shouldn't have to worry about the watchdog
170 * going off.
171 */
172 __raw_get_cpu_var(watchdog_nmi_touch) = true;
173 touch_softlockup_watchdog();
174}
175EXPORT_SYMBOL(touch_nmi_watchdog);
176
177#endif
178
179void touch_softlockup_watchdog_sync(void)
180{
181 __raw_get_cpu_var(softlockup_touch_sync) = true;
182 __raw_get_cpu_var(watchdog_touch_ts) = 0;
183}
184
185#ifdef CONFIG_HARDLOCKUP_DETECTOR
186/* watchdog detector functions */
187static int is_hardlockup(void)
188{
189 unsigned long hrint = __this_cpu_read(hrtimer_interrupts);
190
191 if (__this_cpu_read(hrtimer_interrupts_saved) == hrint)
192 return 1;
193
194 __this_cpu_write(hrtimer_interrupts_saved, hrint);
195 return 0;
196}
197#endif
198
199static int is_softlockup(unsigned long touch_ts)
200{
201 unsigned long now = get_timestamp();
202
203 /* Warn about unreasonable delays: */
204 if (time_after(now, touch_ts + get_softlockup_thresh()))
205 return now - touch_ts;
206
207 return 0;
208}
209
210#ifdef CONFIG_HARDLOCKUP_DETECTOR
211
212static struct perf_event_attr wd_hw_attr = {
213 .type = PERF_TYPE_HARDWARE,
214 .config = PERF_COUNT_HW_CPU_CYCLES,
215 .size = sizeof(struct perf_event_attr),
216 .pinned = 1,
217 .disabled = 1,
218};
219
220/* Callback function for perf event subsystem */
221static void watchdog_overflow_callback(struct perf_event *event,
222 struct perf_sample_data *data,
223 struct pt_regs *regs)
224{
225 /* Ensure the watchdog never gets throttled */
226 event->hw.interrupts = 0;
227
228 if (__this_cpu_read(watchdog_nmi_touch) == true) {
229 __this_cpu_write(watchdog_nmi_touch, false);
230 return;
231 }
232
233 /* check for a hardlockup
234 * This is done by making sure our timer interrupt
235 * is incrementing. The timer interrupt should have
236 * fired multiple times before we overflow'd. If it hasn't
237 * then this is a good indication the cpu is stuck
238 */
239 if (is_hardlockup()) {
240 int this_cpu = smp_processor_id();
241
242 /* only print hardlockups once */
243 if (__this_cpu_read(hard_watchdog_warn) == true)
244 return;
245
246 if (hardlockup_panic)
247 panic("Watchdog detected hard LOCKUP on cpu %d", this_cpu);
248 else
249 WARN(1, "Watchdog detected hard LOCKUP on cpu %d", this_cpu);
250
251 __this_cpu_write(hard_watchdog_warn, true);
252 return;
253 }
254
255 __this_cpu_write(hard_watchdog_warn, false);
256 return;
257}
258#endif /* CONFIG_HARDLOCKUP_DETECTOR */
259
260static void watchdog_interrupt_count(void)
261{
262 __this_cpu_inc(hrtimer_interrupts);
263}
264
265static int watchdog_nmi_enable(unsigned int cpu);
266static void watchdog_nmi_disable(unsigned int cpu);
267
268/* watchdog kicker functions */
269static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
270{
271 unsigned long touch_ts = __this_cpu_read(watchdog_touch_ts);
272 struct pt_regs *regs = get_irq_regs();
273 int duration;
274
275 /* kick the hardlockup detector */
276 watchdog_interrupt_count();
277
278 /* kick the softlockup detector */
279 wake_up_process(__this_cpu_read(softlockup_watchdog));
280
281 /* .. and repeat */
282 hrtimer_forward_now(hrtimer, ns_to_ktime(sample_period));
283
284 if (touch_ts == 0) {
285 if (unlikely(__this_cpu_read(softlockup_touch_sync))) {
286 /*
287 * If the time stamp was touched atomically
288 * make sure the scheduler tick is up to date.
289 */
290 __this_cpu_write(softlockup_touch_sync, false);
291 sched_clock_tick();
292 }
293
294 /* Clear the guest paused flag on watchdog reset */
295 kvm_check_and_clear_guest_paused();
296 __touch_watchdog();
297 return HRTIMER_RESTART;
298 }
299
300 /* check for a softlockup
301 * This is done by making sure a high priority task is
302 * being scheduled. The task touches the watchdog to
303 * indicate it is getting cpu time. If it hasn't then
304 * this is a good indication some task is hogging the cpu
305 */
306 duration = is_softlockup(touch_ts);
307 if (unlikely(duration)) {
308 /*
309 * If a virtual machine is stopped by the host it can look to
310 * the watchdog like a soft lockup, check to see if the host
311 * stopped the vm before we issue the warning
312 */
313 if (kvm_check_and_clear_guest_paused())
314 return HRTIMER_RESTART;
315
316 /* only warn once */
317 if (__this_cpu_read(soft_watchdog_warn) == true)
318 return HRTIMER_RESTART;
319
320 printk(KERN_EMERG "BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n",
321 smp_processor_id(), duration,
322 current->comm, task_pid_nr(current));
323 print_modules();
324 print_irqtrace_events(current);
325 if (regs)
326 show_regs(regs);
327 else
328 dump_stack();
329
330 if (softlockup_panic)
331 panic("softlockup: hung tasks");
332 __this_cpu_write(soft_watchdog_warn, true);
333 } else
334 __this_cpu_write(soft_watchdog_warn, false);
335
336 return HRTIMER_RESTART;
337}
338
339static void watchdog_set_prio(unsigned int policy, unsigned int prio)
340{
341 struct sched_param param = { .sched_priority = prio };
342
343 sched_setscheduler(current, policy, ¶m);
344}
345
346static void watchdog_enable(unsigned int cpu)
347{
348 struct hrtimer *hrtimer = &__raw_get_cpu_var(watchdog_hrtimer);
349
350 /* kick off the timer for the hardlockup detector */
351 hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
352 hrtimer->function = watchdog_timer_fn;
353
354 /* Enable the perf event */
355 watchdog_nmi_enable(cpu);
356
357 /* done here because hrtimer_start can only pin to smp_processor_id() */
358 hrtimer_start(hrtimer, ns_to_ktime(sample_period),
359 HRTIMER_MODE_REL_PINNED);
360
361 /* initialize timestamp */
362 watchdog_set_prio(SCHED_FIFO, MAX_RT_PRIO - 1);
363 __touch_watchdog();
364}
365
366static void watchdog_disable(unsigned int cpu)
367{
368 struct hrtimer *hrtimer = &__raw_get_cpu_var(watchdog_hrtimer);
369
370 watchdog_set_prio(SCHED_NORMAL, 0);
371 hrtimer_cancel(hrtimer);
372 /* disable the perf event */
373 watchdog_nmi_disable(cpu);
374}
375
376static void watchdog_cleanup(unsigned int cpu, bool online)
377{
378 watchdog_disable(cpu);
379}
380
381static int watchdog_should_run(unsigned int cpu)
382{
383 return __this_cpu_read(hrtimer_interrupts) !=
384 __this_cpu_read(soft_lockup_hrtimer_cnt);
385}
386
387/*
388 * The watchdog thread function - touches the timestamp.
389 *
390 * It only runs once every sample_period seconds (4 seconds by
391 * default) to reset the softlockup timestamp. If this gets delayed
392 * for more than 2*watchdog_thresh seconds then the debug-printout
393 * triggers in watchdog_timer_fn().
394 */
395static void watchdog(unsigned int cpu)
396{
397 __this_cpu_write(soft_lockup_hrtimer_cnt,
398 __this_cpu_read(hrtimer_interrupts));
399 __touch_watchdog();
400}
401
402#ifdef CONFIG_HARDLOCKUP_DETECTOR
403/*
404 * People like the simple clean cpu node info on boot.
405 * Reduce the watchdog noise by only printing messages
406 * that are different from what cpu0 displayed.
407 */
408static unsigned long cpu0_err;
409
410static int watchdog_nmi_enable(unsigned int cpu)
411{
412 struct perf_event_attr *wd_attr;
413 struct perf_event *event = per_cpu(watchdog_ev, cpu);
414
415 /* is it already setup and enabled? */
416 if (event && event->state > PERF_EVENT_STATE_OFF)
417 goto out;
418
419 /* it is setup but not enabled */
420 if (event != NULL)
421 goto out_enable;
422
423 wd_attr = &wd_hw_attr;
424 wd_attr->sample_period = hw_nmi_get_sample_period(watchdog_thresh);
425
426 /* Try to register using hardware perf events */
427 event = perf_event_create_kernel_counter(wd_attr, cpu, NULL, watchdog_overflow_callback, NULL);
428
429 /* save cpu0 error for future comparision */
430 if (cpu == 0 && IS_ERR(event))
431 cpu0_err = PTR_ERR(event);
432
433 if (!IS_ERR(event)) {
434 /* only print for cpu0 or different than cpu0 */
435 if (cpu == 0 || cpu0_err)
436 pr_info("enabled on all CPUs, permanently consumes one hw-PMU counter.\n");
437 goto out_save;
438 }
439
440 /* skip displaying the same error again */
441 if (cpu > 0 && (PTR_ERR(event) == cpu0_err))
442 return PTR_ERR(event);
443
444 /* vary the KERN level based on the returned errno */
445 if (PTR_ERR(event) == -EOPNOTSUPP)
446 pr_info("disabled (cpu%i): not supported (no LAPIC?)\n", cpu);
447 else if (PTR_ERR(event) == -ENOENT)
448 pr_warning("disabled (cpu%i): hardware events not enabled\n",
449 cpu);
450 else
451 pr_err("disabled (cpu%i): unable to create perf event: %ld\n",
452 cpu, PTR_ERR(event));
453 return PTR_ERR(event);
454
455 /* success path */
456out_save:
457 per_cpu(watchdog_ev, cpu) = event;
458out_enable:
459 perf_event_enable(per_cpu(watchdog_ev, cpu));
460out:
461 return 0;
462}
463
464static void watchdog_nmi_disable(unsigned int cpu)
465{
466 struct perf_event *event = per_cpu(watchdog_ev, cpu);
467
468 if (event) {
469 perf_event_disable(event);
470 per_cpu(watchdog_ev, cpu) = NULL;
471
472 /* should be in cleanup, but blocks oprofile */
473 perf_event_release_kernel(event);
474 }
475 return;
476}
477#else
478static int watchdog_nmi_enable(unsigned int cpu) { return 0; }
479static void watchdog_nmi_disable(unsigned int cpu) { return; }
480#endif /* CONFIG_HARDLOCKUP_DETECTOR */
481
482static struct smp_hotplug_thread watchdog_threads = {
483 .store = &softlockup_watchdog,
484 .thread_should_run = watchdog_should_run,
485 .thread_fn = watchdog,
486 .thread_comm = "watchdog/%u",
487 .setup = watchdog_enable,
488 .cleanup = watchdog_cleanup,
489 .park = watchdog_disable,
490 .unpark = watchdog_enable,
491};
492
493static void restart_watchdog_hrtimer(void *info)
494{
495 struct hrtimer *hrtimer = &__raw_get_cpu_var(watchdog_hrtimer);
496 int ret;
497
498 /*
499 * No need to cancel and restart hrtimer if it is currently executing
500 * because it will reprogram itself with the new period now.
501 * We should never see it unqueued here because we are running per-cpu
502 * with interrupts disabled.
503 */
504 ret = hrtimer_try_to_cancel(hrtimer);
505 if (ret == 1)
506 hrtimer_start(hrtimer, ns_to_ktime(sample_period),
507 HRTIMER_MODE_REL_PINNED);
508}
509
510static void update_timers(int cpu)
511{
512 /*
513 * Make sure that perf event counter will adopt to a new
514 * sampling period. Updating the sampling period directly would
515 * be much nicer but we do not have an API for that now so
516 * let's use a big hammer.
517 * Hrtimer will adopt the new period on the next tick but this
518 * might be late already so we have to restart the timer as well.
519 */
520 watchdog_nmi_disable(cpu);
521 smp_call_function_single(cpu, restart_watchdog_hrtimer, NULL, 1);
522 watchdog_nmi_enable(cpu);
523}
524
525static void update_timers_all_cpus(void)
526{
527 int cpu;
528
529 get_online_cpus();
530 preempt_disable();
531 for_each_online_cpu(cpu)
532 update_timers(cpu);
533 preempt_enable();
534 put_online_cpus();
535}
536
537static int watchdog_enable_all_cpus(bool sample_period_changed)
538{
539 int err = 0;
540
541 if (!watchdog_running) {
542 err = smpboot_register_percpu_thread(&watchdog_threads);
543 if (err)
544 pr_err("Failed to create watchdog threads, disabled\n");
545 else
546 watchdog_running = 1;
547 } else if (sample_period_changed) {
548 update_timers_all_cpus();
549 }
550
551 return err;
552}
553
554/* prepare/enable/disable routines */
555/* sysctl functions */
556#ifdef CONFIG_SYSCTL
557static void watchdog_disable_all_cpus(void)
558{
559 if (watchdog_running) {
560 watchdog_running = 0;
561 smpboot_unregister_percpu_thread(&watchdog_threads);
562 }
563}
564
565/*
566 * proc handler for /proc/sys/kernel/nmi_watchdog,watchdog_thresh
567 */
568
569int proc_dowatchdog(struct ctl_table *table, int write,
570 void __user *buffer, size_t *lenp, loff_t *ppos)
571{
572 int err, old_thresh, old_enabled;
573 static DEFINE_MUTEX(watchdog_proc_mutex);
574
575 mutex_lock(&watchdog_proc_mutex);
576 old_thresh = ACCESS_ONCE(watchdog_thresh);
577 old_enabled = ACCESS_ONCE(watchdog_user_enabled);
578
579 err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
580 if (err || !write)
581 goto out;
582
583 set_sample_period();
584 /*
585 * Watchdog threads shouldn't be enabled if they are
586 * disabled. The 'watchdog_running' variable check in
587 * watchdog_*_all_cpus() function takes care of this.
588 */
589 if (watchdog_user_enabled && watchdog_thresh)
590 err = watchdog_enable_all_cpus(old_thresh != watchdog_thresh);
591 else
592 watchdog_disable_all_cpus();
593
594 /* Restore old values on failure */
595 if (err) {
596 watchdog_thresh = old_thresh;
597 watchdog_user_enabled = old_enabled;
598 }
599out:
600 mutex_unlock(&watchdog_proc_mutex);
601 return err;
602}
603#endif /* CONFIG_SYSCTL */
604
605void __init lockup_detector_init(void)
606{
607 set_sample_period();
608
609 if (watchdog_user_enabled)
610 watchdog_enable_all_cpus(false);
611}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Detect hard and soft lockups on a system
4 *
5 * started by Don Zickus, Copyright (C) 2010 Red Hat, Inc.
6 *
7 * Note: Most of this code is borrowed heavily from the original softlockup
8 * detector, so thanks to Ingo for the initial implementation.
9 * Some chunks also taken from the old x86-specific nmi watchdog code, thanks
10 * to those contributors as well.
11 */
12
13#define pr_fmt(fmt) "watchdog: " fmt
14
15#include <linux/mm.h>
16#include <linux/cpu.h>
17#include <linux/nmi.h>
18#include <linux/init.h>
19#include <linux/module.h>
20#include <linux/sysctl.h>
21#include <linux/tick.h>
22#include <linux/sched/clock.h>
23#include <linux/sched/debug.h>
24#include <linux/sched/isolation.h>
25#include <linux/stop_machine.h>
26
27#include <asm/irq_regs.h>
28#include <linux/kvm_para.h>
29
30static DEFINE_MUTEX(watchdog_mutex);
31
32#if defined(CONFIG_HARDLOCKUP_DETECTOR) || defined(CONFIG_HAVE_NMI_WATCHDOG)
33# define WATCHDOG_DEFAULT (SOFT_WATCHDOG_ENABLED | NMI_WATCHDOG_ENABLED)
34# define NMI_WATCHDOG_DEFAULT 1
35#else
36# define WATCHDOG_DEFAULT (SOFT_WATCHDOG_ENABLED)
37# define NMI_WATCHDOG_DEFAULT 0
38#endif
39
40unsigned long __read_mostly watchdog_enabled;
41int __read_mostly watchdog_user_enabled = 1;
42int __read_mostly nmi_watchdog_user_enabled = NMI_WATCHDOG_DEFAULT;
43int __read_mostly soft_watchdog_user_enabled = 1;
44int __read_mostly watchdog_thresh = 10;
45static int __read_mostly nmi_watchdog_available;
46
47static struct cpumask watchdog_allowed_mask __read_mostly;
48
49struct cpumask watchdog_cpumask __read_mostly;
50unsigned long *watchdog_cpumask_bits = cpumask_bits(&watchdog_cpumask);
51
52#ifdef CONFIG_HARDLOCKUP_DETECTOR
53
54# ifdef CONFIG_SMP
55int __read_mostly sysctl_hardlockup_all_cpu_backtrace;
56# endif /* CONFIG_SMP */
57
58/*
59 * Should we panic when a soft-lockup or hard-lockup occurs:
60 */
61unsigned int __read_mostly hardlockup_panic =
62 CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE;
63/*
64 * We may not want to enable hard lockup detection by default in all cases,
65 * for example when running the kernel as a guest on a hypervisor. In these
66 * cases this function can be called to disable hard lockup detection. This
67 * function should only be executed once by the boot processor before the
68 * kernel command line parameters are parsed, because otherwise it is not
69 * possible to override this in hardlockup_panic_setup().
70 */
71void __init hardlockup_detector_disable(void)
72{
73 nmi_watchdog_user_enabled = 0;
74}
75
76static int __init hardlockup_panic_setup(char *str)
77{
78 if (!strncmp(str, "panic", 5))
79 hardlockup_panic = 1;
80 else if (!strncmp(str, "nopanic", 7))
81 hardlockup_panic = 0;
82 else if (!strncmp(str, "0", 1))
83 nmi_watchdog_user_enabled = 0;
84 else if (!strncmp(str, "1", 1))
85 nmi_watchdog_user_enabled = 1;
86 return 1;
87}
88__setup("nmi_watchdog=", hardlockup_panic_setup);
89
90#endif /* CONFIG_HARDLOCKUP_DETECTOR */
91
92/*
93 * These functions can be overridden if an architecture implements its
94 * own hardlockup detector.
95 *
96 * watchdog_nmi_enable/disable can be implemented to start and stop when
97 * softlockup watchdog threads start and stop. The arch must select the
98 * SOFTLOCKUP_DETECTOR Kconfig.
99 */
100int __weak watchdog_nmi_enable(unsigned int cpu)
101{
102 hardlockup_detector_perf_enable();
103 return 0;
104}
105
106void __weak watchdog_nmi_disable(unsigned int cpu)
107{
108 hardlockup_detector_perf_disable();
109}
110
111/* Return 0, if a NMI watchdog is available. Error code otherwise */
112int __weak __init watchdog_nmi_probe(void)
113{
114 return hardlockup_detector_perf_init();
115}
116
117/**
118 * watchdog_nmi_stop - Stop the watchdog for reconfiguration
119 *
120 * The reconfiguration steps are:
121 * watchdog_nmi_stop();
122 * update_variables();
123 * watchdog_nmi_start();
124 */
125void __weak watchdog_nmi_stop(void) { }
126
127/**
128 * watchdog_nmi_start - Start the watchdog after reconfiguration
129 *
130 * Counterpart to watchdog_nmi_stop().
131 *
132 * The following variables have been updated in update_variables() and
133 * contain the currently valid configuration:
134 * - watchdog_enabled
135 * - watchdog_thresh
136 * - watchdog_cpumask
137 */
138void __weak watchdog_nmi_start(void) { }
139
140/**
141 * lockup_detector_update_enable - Update the sysctl enable bit
142 *
143 * Caller needs to make sure that the NMI/perf watchdogs are off, so this
144 * can't race with watchdog_nmi_disable().
145 */
146static void lockup_detector_update_enable(void)
147{
148 watchdog_enabled = 0;
149 if (!watchdog_user_enabled)
150 return;
151 if (nmi_watchdog_available && nmi_watchdog_user_enabled)
152 watchdog_enabled |= NMI_WATCHDOG_ENABLED;
153 if (soft_watchdog_user_enabled)
154 watchdog_enabled |= SOFT_WATCHDOG_ENABLED;
155}
156
157#ifdef CONFIG_SOFTLOCKUP_DETECTOR
158
159#define SOFTLOCKUP_RESET ULONG_MAX
160
161#ifdef CONFIG_SMP
162int __read_mostly sysctl_softlockup_all_cpu_backtrace;
163#endif
164
165/* Global variables, exported for sysctl */
166unsigned int __read_mostly softlockup_panic =
167 CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE;
168
169static bool softlockup_initialized __read_mostly;
170static u64 __read_mostly sample_period;
171
172static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts);
173static DEFINE_PER_CPU(struct hrtimer, watchdog_hrtimer);
174static DEFINE_PER_CPU(bool, softlockup_touch_sync);
175static DEFINE_PER_CPU(bool, soft_watchdog_warn);
176static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts);
177static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved);
178static unsigned long soft_lockup_nmi_warn;
179
180static int __init nowatchdog_setup(char *str)
181{
182 watchdog_user_enabled = 0;
183 return 1;
184}
185__setup("nowatchdog", nowatchdog_setup);
186
187static int __init nosoftlockup_setup(char *str)
188{
189 soft_watchdog_user_enabled = 0;
190 return 1;
191}
192__setup("nosoftlockup", nosoftlockup_setup);
193
194static int __init watchdog_thresh_setup(char *str)
195{
196 get_option(&str, &watchdog_thresh);
197 return 1;
198}
199__setup("watchdog_thresh=", watchdog_thresh_setup);
200
201static void __lockup_detector_cleanup(void);
202
203/*
204 * Hard-lockup warnings should be triggered after just a few seconds. Soft-
205 * lockups can have false positives under extreme conditions. So we generally
206 * want a higher threshold for soft lockups than for hard lockups. So we couple
207 * the thresholds with a factor: we make the soft threshold twice the amount of
208 * time the hard threshold is.
209 */
210static int get_softlockup_thresh(void)
211{
212 return watchdog_thresh * 2;
213}
214
215/*
216 * Returns seconds, approximately. We don't need nanosecond
217 * resolution, and we don't need to waste time with a big divide when
218 * 2^30ns == 1.074s.
219 */
220static unsigned long get_timestamp(void)
221{
222 return running_clock() >> 30LL; /* 2^30 ~= 10^9 */
223}
224
225static void set_sample_period(void)
226{
227 /*
228 * convert watchdog_thresh from seconds to ns
229 * the divide by 5 is to give hrtimer several chances (two
230 * or three with the current relation between the soft
231 * and hard thresholds) to increment before the
232 * hardlockup detector generates a warning
233 */
234 sample_period = get_softlockup_thresh() * ((u64)NSEC_PER_SEC / 5);
235 watchdog_update_hrtimer_threshold(sample_period);
236}
237
238/* Commands for resetting the watchdog */
239static void __touch_watchdog(void)
240{
241 __this_cpu_write(watchdog_touch_ts, get_timestamp());
242}
243
244/**
245 * touch_softlockup_watchdog_sched - touch watchdog on scheduler stalls
246 *
247 * Call when the scheduler may have stalled for legitimate reasons
248 * preventing the watchdog task from executing - e.g. the scheduler
249 * entering idle state. This should only be used for scheduler events.
250 * Use touch_softlockup_watchdog() for everything else.
251 */
252notrace void touch_softlockup_watchdog_sched(void)
253{
254 /*
255 * Preemption can be enabled. It doesn't matter which CPU's timestamp
256 * gets zeroed here, so use the raw_ operation.
257 */
258 raw_cpu_write(watchdog_touch_ts, SOFTLOCKUP_RESET);
259}
260
261notrace void touch_softlockup_watchdog(void)
262{
263 touch_softlockup_watchdog_sched();
264 wq_watchdog_touch(raw_smp_processor_id());
265}
266EXPORT_SYMBOL(touch_softlockup_watchdog);
267
268void touch_all_softlockup_watchdogs(void)
269{
270 int cpu;
271
272 /*
273 * watchdog_mutex cannpt be taken here, as this might be called
274 * from (soft)interrupt context, so the access to
275 * watchdog_allowed_cpumask might race with a concurrent update.
276 *
277 * The watchdog time stamp can race against a concurrent real
278 * update as well, the only side effect might be a cycle delay for
279 * the softlockup check.
280 */
281 for_each_cpu(cpu, &watchdog_allowed_mask)
282 per_cpu(watchdog_touch_ts, cpu) = SOFTLOCKUP_RESET;
283 wq_watchdog_touch(-1);
284}
285
286void touch_softlockup_watchdog_sync(void)
287{
288 __this_cpu_write(softlockup_touch_sync, true);
289 __this_cpu_write(watchdog_touch_ts, SOFTLOCKUP_RESET);
290}
291
292static int is_softlockup(unsigned long touch_ts)
293{
294 unsigned long now = get_timestamp();
295
296 if ((watchdog_enabled & SOFT_WATCHDOG_ENABLED) && watchdog_thresh){
297 /* Warn about unreasonable delays. */
298 if (time_after(now, touch_ts + get_softlockup_thresh()))
299 return now - touch_ts;
300 }
301 return 0;
302}
303
304/* watchdog detector functions */
305bool is_hardlockup(void)
306{
307 unsigned long hrint = __this_cpu_read(hrtimer_interrupts);
308
309 if (__this_cpu_read(hrtimer_interrupts_saved) == hrint)
310 return true;
311
312 __this_cpu_write(hrtimer_interrupts_saved, hrint);
313 return false;
314}
315
316static void watchdog_interrupt_count(void)
317{
318 __this_cpu_inc(hrtimer_interrupts);
319}
320
321static DEFINE_PER_CPU(struct completion, softlockup_completion);
322static DEFINE_PER_CPU(struct cpu_stop_work, softlockup_stop_work);
323
324/*
325 * The watchdog thread function - touches the timestamp.
326 *
327 * It only runs once every sample_period seconds (4 seconds by
328 * default) to reset the softlockup timestamp. If this gets delayed
329 * for more than 2*watchdog_thresh seconds then the debug-printout
330 * triggers in watchdog_timer_fn().
331 */
332static int softlockup_fn(void *data)
333{
334 __touch_watchdog();
335 complete(this_cpu_ptr(&softlockup_completion));
336
337 return 0;
338}
339
340/* watchdog kicker functions */
341static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
342{
343 unsigned long touch_ts = __this_cpu_read(watchdog_touch_ts);
344 struct pt_regs *regs = get_irq_regs();
345 int duration;
346 int softlockup_all_cpu_backtrace = sysctl_softlockup_all_cpu_backtrace;
347
348 if (!watchdog_enabled)
349 return HRTIMER_NORESTART;
350
351 /* kick the hardlockup detector */
352 watchdog_interrupt_count();
353
354 /* kick the softlockup detector */
355 if (completion_done(this_cpu_ptr(&softlockup_completion))) {
356 reinit_completion(this_cpu_ptr(&softlockup_completion));
357 stop_one_cpu_nowait(smp_processor_id(),
358 softlockup_fn, NULL,
359 this_cpu_ptr(&softlockup_stop_work));
360 }
361
362 /* .. and repeat */
363 hrtimer_forward_now(hrtimer, ns_to_ktime(sample_period));
364
365 if (touch_ts == SOFTLOCKUP_RESET) {
366 if (unlikely(__this_cpu_read(softlockup_touch_sync))) {
367 /*
368 * If the time stamp was touched atomically
369 * make sure the scheduler tick is up to date.
370 */
371 __this_cpu_write(softlockup_touch_sync, false);
372 sched_clock_tick();
373 }
374
375 /* Clear the guest paused flag on watchdog reset */
376 kvm_check_and_clear_guest_paused();
377 __touch_watchdog();
378 return HRTIMER_RESTART;
379 }
380
381 /* check for a softlockup
382 * This is done by making sure a high priority task is
383 * being scheduled. The task touches the watchdog to
384 * indicate it is getting cpu time. If it hasn't then
385 * this is a good indication some task is hogging the cpu
386 */
387 duration = is_softlockup(touch_ts);
388 if (unlikely(duration)) {
389 /*
390 * If a virtual machine is stopped by the host it can look to
391 * the watchdog like a soft lockup, check to see if the host
392 * stopped the vm before we issue the warning
393 */
394 if (kvm_check_and_clear_guest_paused())
395 return HRTIMER_RESTART;
396
397 /* only warn once */
398 if (__this_cpu_read(soft_watchdog_warn) == true)
399 return HRTIMER_RESTART;
400
401 if (softlockup_all_cpu_backtrace) {
402 /* Prevent multiple soft-lockup reports if one cpu is already
403 * engaged in dumping cpu back traces
404 */
405 if (test_and_set_bit(0, &soft_lockup_nmi_warn)) {
406 /* Someone else will report us. Let's give up */
407 __this_cpu_write(soft_watchdog_warn, true);
408 return HRTIMER_RESTART;
409 }
410 }
411
412 pr_emerg("BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n",
413 smp_processor_id(), duration,
414 current->comm, task_pid_nr(current));
415 print_modules();
416 print_irqtrace_events(current);
417 if (regs)
418 show_regs(regs);
419 else
420 dump_stack();
421
422 if (softlockup_all_cpu_backtrace) {
423 /* Avoid generating two back traces for current
424 * given that one is already made above
425 */
426 trigger_allbutself_cpu_backtrace();
427
428 clear_bit(0, &soft_lockup_nmi_warn);
429 /* Barrier to sync with other cpus */
430 smp_mb__after_atomic();
431 }
432
433 add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK);
434 if (softlockup_panic)
435 panic("softlockup: hung tasks");
436 __this_cpu_write(soft_watchdog_warn, true);
437 } else
438 __this_cpu_write(soft_watchdog_warn, false);
439
440 return HRTIMER_RESTART;
441}
442
443static void watchdog_enable(unsigned int cpu)
444{
445 struct hrtimer *hrtimer = this_cpu_ptr(&watchdog_hrtimer);
446 struct completion *done = this_cpu_ptr(&softlockup_completion);
447
448 WARN_ON_ONCE(cpu != smp_processor_id());
449
450 init_completion(done);
451 complete(done);
452
453 /*
454 * Start the timer first to prevent the NMI watchdog triggering
455 * before the timer has a chance to fire.
456 */
457 hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
458 hrtimer->function = watchdog_timer_fn;
459 hrtimer_start(hrtimer, ns_to_ktime(sample_period),
460 HRTIMER_MODE_REL_PINNED_HARD);
461
462 /* Initialize timestamp */
463 __touch_watchdog();
464 /* Enable the perf event */
465 if (watchdog_enabled & NMI_WATCHDOG_ENABLED)
466 watchdog_nmi_enable(cpu);
467}
468
469static void watchdog_disable(unsigned int cpu)
470{
471 struct hrtimer *hrtimer = this_cpu_ptr(&watchdog_hrtimer);
472
473 WARN_ON_ONCE(cpu != smp_processor_id());
474
475 /*
476 * Disable the perf event first. That prevents that a large delay
477 * between disabling the timer and disabling the perf event causes
478 * the perf NMI to detect a false positive.
479 */
480 watchdog_nmi_disable(cpu);
481 hrtimer_cancel(hrtimer);
482 wait_for_completion(this_cpu_ptr(&softlockup_completion));
483}
484
485static int softlockup_stop_fn(void *data)
486{
487 watchdog_disable(smp_processor_id());
488 return 0;
489}
490
491static void softlockup_stop_all(void)
492{
493 int cpu;
494
495 if (!softlockup_initialized)
496 return;
497
498 for_each_cpu(cpu, &watchdog_allowed_mask)
499 smp_call_on_cpu(cpu, softlockup_stop_fn, NULL, false);
500
501 cpumask_clear(&watchdog_allowed_mask);
502}
503
504static int softlockup_start_fn(void *data)
505{
506 watchdog_enable(smp_processor_id());
507 return 0;
508}
509
510static void softlockup_start_all(void)
511{
512 int cpu;
513
514 cpumask_copy(&watchdog_allowed_mask, &watchdog_cpumask);
515 for_each_cpu(cpu, &watchdog_allowed_mask)
516 smp_call_on_cpu(cpu, softlockup_start_fn, NULL, false);
517}
518
519int lockup_detector_online_cpu(unsigned int cpu)
520{
521 if (cpumask_test_cpu(cpu, &watchdog_allowed_mask))
522 watchdog_enable(cpu);
523 return 0;
524}
525
526int lockup_detector_offline_cpu(unsigned int cpu)
527{
528 if (cpumask_test_cpu(cpu, &watchdog_allowed_mask))
529 watchdog_disable(cpu);
530 return 0;
531}
532
533static void lockup_detector_reconfigure(void)
534{
535 cpus_read_lock();
536 watchdog_nmi_stop();
537
538 softlockup_stop_all();
539 set_sample_period();
540 lockup_detector_update_enable();
541 if (watchdog_enabled && watchdog_thresh)
542 softlockup_start_all();
543
544 watchdog_nmi_start();
545 cpus_read_unlock();
546 /*
547 * Must be called outside the cpus locked section to prevent
548 * recursive locking in the perf code.
549 */
550 __lockup_detector_cleanup();
551}
552
553/*
554 * Create the watchdog thread infrastructure and configure the detector(s).
555 *
556 * The threads are not unparked as watchdog_allowed_mask is empty. When
557 * the threads are successfully initialized, take the proper locks and
558 * unpark the threads in the watchdog_cpumask if the watchdog is enabled.
559 */
560static __init void lockup_detector_setup(void)
561{
562 /*
563 * If sysctl is off and watchdog got disabled on the command line,
564 * nothing to do here.
565 */
566 lockup_detector_update_enable();
567
568 if (!IS_ENABLED(CONFIG_SYSCTL) &&
569 !(watchdog_enabled && watchdog_thresh))
570 return;
571
572 mutex_lock(&watchdog_mutex);
573 lockup_detector_reconfigure();
574 softlockup_initialized = true;
575 mutex_unlock(&watchdog_mutex);
576}
577
578#else /* CONFIG_SOFTLOCKUP_DETECTOR */
579static void lockup_detector_reconfigure(void)
580{
581 cpus_read_lock();
582 watchdog_nmi_stop();
583 lockup_detector_update_enable();
584 watchdog_nmi_start();
585 cpus_read_unlock();
586}
587static inline void lockup_detector_setup(void)
588{
589 lockup_detector_reconfigure();
590}
591#endif /* !CONFIG_SOFTLOCKUP_DETECTOR */
592
593static void __lockup_detector_cleanup(void)
594{
595 lockdep_assert_held(&watchdog_mutex);
596 hardlockup_detector_perf_cleanup();
597}
598
599/**
600 * lockup_detector_cleanup - Cleanup after cpu hotplug or sysctl changes
601 *
602 * Caller must not hold the cpu hotplug rwsem.
603 */
604void lockup_detector_cleanup(void)
605{
606 mutex_lock(&watchdog_mutex);
607 __lockup_detector_cleanup();
608 mutex_unlock(&watchdog_mutex);
609}
610
611/**
612 * lockup_detector_soft_poweroff - Interface to stop lockup detector(s)
613 *
614 * Special interface for parisc. It prevents lockup detector warnings from
615 * the default pm_poweroff() function which busy loops forever.
616 */
617void lockup_detector_soft_poweroff(void)
618{
619 watchdog_enabled = 0;
620}
621
622#ifdef CONFIG_SYSCTL
623
624/* Propagate any changes to the watchdog threads */
625static void proc_watchdog_update(void)
626{
627 /* Remove impossible cpus to keep sysctl output clean. */
628 cpumask_and(&watchdog_cpumask, &watchdog_cpumask, cpu_possible_mask);
629 lockup_detector_reconfigure();
630}
631
632/*
633 * common function for watchdog, nmi_watchdog and soft_watchdog parameter
634 *
635 * caller | table->data points to | 'which'
636 * -------------------|----------------------------|--------------------------
637 * proc_watchdog | watchdog_user_enabled | NMI_WATCHDOG_ENABLED |
638 * | | SOFT_WATCHDOG_ENABLED
639 * -------------------|----------------------------|--------------------------
640 * proc_nmi_watchdog | nmi_watchdog_user_enabled | NMI_WATCHDOG_ENABLED
641 * -------------------|----------------------------|--------------------------
642 * proc_soft_watchdog | soft_watchdog_user_enabled | SOFT_WATCHDOG_ENABLED
643 */
644static int proc_watchdog_common(int which, struct ctl_table *table, int write,
645 void *buffer, size_t *lenp, loff_t *ppos)
646{
647 int err, old, *param = table->data;
648
649 mutex_lock(&watchdog_mutex);
650
651 if (!write) {
652 /*
653 * On read synchronize the userspace interface. This is a
654 * racy snapshot.
655 */
656 *param = (watchdog_enabled & which) != 0;
657 err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
658 } else {
659 old = READ_ONCE(*param);
660 err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
661 if (!err && old != READ_ONCE(*param))
662 proc_watchdog_update();
663 }
664 mutex_unlock(&watchdog_mutex);
665 return err;
666}
667
668/*
669 * /proc/sys/kernel/watchdog
670 */
671int proc_watchdog(struct ctl_table *table, int write,
672 void *buffer, size_t *lenp, loff_t *ppos)
673{
674 return proc_watchdog_common(NMI_WATCHDOG_ENABLED|SOFT_WATCHDOG_ENABLED,
675 table, write, buffer, lenp, ppos);
676}
677
678/*
679 * /proc/sys/kernel/nmi_watchdog
680 */
681int proc_nmi_watchdog(struct ctl_table *table, int write,
682 void *buffer, size_t *lenp, loff_t *ppos)
683{
684 if (!nmi_watchdog_available && write)
685 return -ENOTSUPP;
686 return proc_watchdog_common(NMI_WATCHDOG_ENABLED,
687 table, write, buffer, lenp, ppos);
688}
689
690/*
691 * /proc/sys/kernel/soft_watchdog
692 */
693int proc_soft_watchdog(struct ctl_table *table, int write,
694 void *buffer, size_t *lenp, loff_t *ppos)
695{
696 return proc_watchdog_common(SOFT_WATCHDOG_ENABLED,
697 table, write, buffer, lenp, ppos);
698}
699
700/*
701 * /proc/sys/kernel/watchdog_thresh
702 */
703int proc_watchdog_thresh(struct ctl_table *table, int write,
704 void *buffer, size_t *lenp, loff_t *ppos)
705{
706 int err, old;
707
708 mutex_lock(&watchdog_mutex);
709
710 old = READ_ONCE(watchdog_thresh);
711 err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
712
713 if (!err && write && old != READ_ONCE(watchdog_thresh))
714 proc_watchdog_update();
715
716 mutex_unlock(&watchdog_mutex);
717 return err;
718}
719
720/*
721 * The cpumask is the mask of possible cpus that the watchdog can run
722 * on, not the mask of cpus it is actually running on. This allows the
723 * user to specify a mask that will include cpus that have not yet
724 * been brought online, if desired.
725 */
726int proc_watchdog_cpumask(struct ctl_table *table, int write,
727 void *buffer, size_t *lenp, loff_t *ppos)
728{
729 int err;
730
731 mutex_lock(&watchdog_mutex);
732
733 err = proc_do_large_bitmap(table, write, buffer, lenp, ppos);
734 if (!err && write)
735 proc_watchdog_update();
736
737 mutex_unlock(&watchdog_mutex);
738 return err;
739}
740#endif /* CONFIG_SYSCTL */
741
742void __init lockup_detector_init(void)
743{
744 if (tick_nohz_full_enabled())
745 pr_info("Disabling watchdog on nohz_full cores by default\n");
746
747 cpumask_copy(&watchdog_cpumask,
748 housekeeping_cpumask(HK_FLAG_TIMER));
749
750 if (!watchdog_nmi_probe())
751 nmi_watchdog_available = true;
752 lockup_detector_setup();
753}