Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Detect hard and soft lockups on a system
4 *
5 * started by Don Zickus, Copyright (C) 2010 Red Hat, Inc.
6 *
7 * Note: Most of this code is borrowed heavily from the original softlockup
8 * detector, so thanks to Ingo for the initial implementation.
9 * Some chunks also taken from the old x86-specific nmi watchdog code, thanks
10 * to those contributors as well.
11 */
12
13#define pr_fmt(fmt) "watchdog: " fmt
14
15#include <linux/mm.h>
16#include <linux/cpu.h>
17#include <linux/nmi.h>
18#include <linux/init.h>
19#include <linux/module.h>
20#include <linux/sysctl.h>
21#include <linux/tick.h>
22#include <linux/sched/clock.h>
23#include <linux/sched/debug.h>
24#include <linux/sched/isolation.h>
25#include <linux/stop_machine.h>
26
27#include <asm/irq_regs.h>
28#include <linux/kvm_para.h>
29
30static DEFINE_MUTEX(watchdog_mutex);
31
32#if defined(CONFIG_HARDLOCKUP_DETECTOR) || defined(CONFIG_HAVE_NMI_WATCHDOG)
33# define WATCHDOG_DEFAULT (SOFT_WATCHDOG_ENABLED | NMI_WATCHDOG_ENABLED)
34# define NMI_WATCHDOG_DEFAULT 1
35#else
36# define WATCHDOG_DEFAULT (SOFT_WATCHDOG_ENABLED)
37# define NMI_WATCHDOG_DEFAULT 0
38#endif
39
40unsigned long __read_mostly watchdog_enabled;
41int __read_mostly watchdog_user_enabled = 1;
42int __read_mostly nmi_watchdog_user_enabled = NMI_WATCHDOG_DEFAULT;
43int __read_mostly soft_watchdog_user_enabled = 1;
44int __read_mostly watchdog_thresh = 10;
45static int __read_mostly nmi_watchdog_available;
46
47static struct cpumask watchdog_allowed_mask __read_mostly;
48
49struct cpumask watchdog_cpumask __read_mostly;
50unsigned long *watchdog_cpumask_bits = cpumask_bits(&watchdog_cpumask);
51
52#ifdef CONFIG_HARDLOCKUP_DETECTOR
53/*
54 * Should we panic when a soft-lockup or hard-lockup occurs:
55 */
56unsigned int __read_mostly hardlockup_panic =
57 CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE;
58/*
59 * We may not want to enable hard lockup detection by default in all cases,
60 * for example when running the kernel as a guest on a hypervisor. In these
61 * cases this function can be called to disable hard lockup detection. This
62 * function should only be executed once by the boot processor before the
63 * kernel command line parameters are parsed, because otherwise it is not
64 * possible to override this in hardlockup_panic_setup().
65 */
66void __init hardlockup_detector_disable(void)
67{
68 nmi_watchdog_user_enabled = 0;
69}
70
71static int __init hardlockup_panic_setup(char *str)
72{
73 if (!strncmp(str, "panic", 5))
74 hardlockup_panic = 1;
75 else if (!strncmp(str, "nopanic", 7))
76 hardlockup_panic = 0;
77 else if (!strncmp(str, "0", 1))
78 nmi_watchdog_user_enabled = 0;
79 else if (!strncmp(str, "1", 1))
80 nmi_watchdog_user_enabled = 1;
81 return 1;
82}
83__setup("nmi_watchdog=", hardlockup_panic_setup);
84
85# ifdef CONFIG_SMP
86int __read_mostly sysctl_hardlockup_all_cpu_backtrace;
87
88static int __init hardlockup_all_cpu_backtrace_setup(char *str)
89{
90 sysctl_hardlockup_all_cpu_backtrace = !!simple_strtol(str, NULL, 0);
91 return 1;
92}
93__setup("hardlockup_all_cpu_backtrace=", hardlockup_all_cpu_backtrace_setup);
94# endif /* CONFIG_SMP */
95#endif /* CONFIG_HARDLOCKUP_DETECTOR */
96
97/*
98 * These functions can be overridden if an architecture implements its
99 * own hardlockup detector.
100 *
101 * watchdog_nmi_enable/disable can be implemented to start and stop when
102 * softlockup watchdog threads start and stop. The arch must select the
103 * SOFTLOCKUP_DETECTOR Kconfig.
104 */
105int __weak watchdog_nmi_enable(unsigned int cpu)
106{
107 hardlockup_detector_perf_enable();
108 return 0;
109}
110
111void __weak watchdog_nmi_disable(unsigned int cpu)
112{
113 hardlockup_detector_perf_disable();
114}
115
116/* Return 0, if a NMI watchdog is available. Error code otherwise */
117int __weak __init watchdog_nmi_probe(void)
118{
119 return hardlockup_detector_perf_init();
120}
121
122/**
123 * watchdog_nmi_stop - Stop the watchdog for reconfiguration
124 *
125 * The reconfiguration steps are:
126 * watchdog_nmi_stop();
127 * update_variables();
128 * watchdog_nmi_start();
129 */
130void __weak watchdog_nmi_stop(void) { }
131
132/**
133 * watchdog_nmi_start - Start the watchdog after reconfiguration
134 *
135 * Counterpart to watchdog_nmi_stop().
136 *
137 * The following variables have been updated in update_variables() and
138 * contain the currently valid configuration:
139 * - watchdog_enabled
140 * - watchdog_thresh
141 * - watchdog_cpumask
142 */
143void __weak watchdog_nmi_start(void) { }
144
145/**
146 * lockup_detector_update_enable - Update the sysctl enable bit
147 *
148 * Caller needs to make sure that the NMI/perf watchdogs are off, so this
149 * can't race with watchdog_nmi_disable().
150 */
151static void lockup_detector_update_enable(void)
152{
153 watchdog_enabled = 0;
154 if (!watchdog_user_enabled)
155 return;
156 if (nmi_watchdog_available && nmi_watchdog_user_enabled)
157 watchdog_enabled |= NMI_WATCHDOG_ENABLED;
158 if (soft_watchdog_user_enabled)
159 watchdog_enabled |= SOFT_WATCHDOG_ENABLED;
160}
161
162#ifdef CONFIG_SOFTLOCKUP_DETECTOR
163
164/* Global variables, exported for sysctl */
165unsigned int __read_mostly softlockup_panic =
166 CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE;
167
168static bool softlockup_initialized __read_mostly;
169static u64 __read_mostly sample_period;
170
171static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts);
172static DEFINE_PER_CPU(struct hrtimer, watchdog_hrtimer);
173static DEFINE_PER_CPU(bool, softlockup_touch_sync);
174static DEFINE_PER_CPU(bool, soft_watchdog_warn);
175static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts);
176static DEFINE_PER_CPU(unsigned long, soft_lockup_hrtimer_cnt);
177static DEFINE_PER_CPU(struct task_struct *, softlockup_task_ptr_saved);
178static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved);
179static unsigned long soft_lockup_nmi_warn;
180
181static int __init softlockup_panic_setup(char *str)
182{
183 softlockup_panic = simple_strtoul(str, NULL, 0);
184 return 1;
185}
186__setup("softlockup_panic=", softlockup_panic_setup);
187
188static int __init nowatchdog_setup(char *str)
189{
190 watchdog_user_enabled = 0;
191 return 1;
192}
193__setup("nowatchdog", nowatchdog_setup);
194
195static int __init nosoftlockup_setup(char *str)
196{
197 soft_watchdog_user_enabled = 0;
198 return 1;
199}
200__setup("nosoftlockup", nosoftlockup_setup);
201
202static int __init watchdog_thresh_setup(char *str)
203{
204 get_option(&str, &watchdog_thresh);
205 return 1;
206}
207__setup("watchdog_thresh=", watchdog_thresh_setup);
208
209#ifdef CONFIG_SMP
210int __read_mostly sysctl_softlockup_all_cpu_backtrace;
211
212static int __init softlockup_all_cpu_backtrace_setup(char *str)
213{
214 sysctl_softlockup_all_cpu_backtrace = !!simple_strtol(str, NULL, 0);
215 return 1;
216}
217__setup("softlockup_all_cpu_backtrace=", softlockup_all_cpu_backtrace_setup);
218#endif
219
220static void __lockup_detector_cleanup(void);
221
222/*
223 * Hard-lockup warnings should be triggered after just a few seconds. Soft-
224 * lockups can have false positives under extreme conditions. So we generally
225 * want a higher threshold for soft lockups than for hard lockups. So we couple
226 * the thresholds with a factor: we make the soft threshold twice the amount of
227 * time the hard threshold is.
228 */
229static int get_softlockup_thresh(void)
230{
231 return watchdog_thresh * 2;
232}
233
234/*
235 * Returns seconds, approximately. We don't need nanosecond
236 * resolution, and we don't need to waste time with a big divide when
237 * 2^30ns == 1.074s.
238 */
239static unsigned long get_timestamp(void)
240{
241 return running_clock() >> 30LL; /* 2^30 ~= 10^9 */
242}
243
244static void set_sample_period(void)
245{
246 /*
247 * convert watchdog_thresh from seconds to ns
248 * the divide by 5 is to give hrtimer several chances (two
249 * or three with the current relation between the soft
250 * and hard thresholds) to increment before the
251 * hardlockup detector generates a warning
252 */
253 sample_period = get_softlockup_thresh() * ((u64)NSEC_PER_SEC / 5);
254 watchdog_update_hrtimer_threshold(sample_period);
255}
256
257/* Commands for resetting the watchdog */
258static void __touch_watchdog(void)
259{
260 __this_cpu_write(watchdog_touch_ts, get_timestamp());
261}
262
263/**
264 * touch_softlockup_watchdog_sched - touch watchdog on scheduler stalls
265 *
266 * Call when the scheduler may have stalled for legitimate reasons
267 * preventing the watchdog task from executing - e.g. the scheduler
268 * entering idle state. This should only be used for scheduler events.
269 * Use touch_softlockup_watchdog() for everything else.
270 */
271notrace void touch_softlockup_watchdog_sched(void)
272{
273 /*
274 * Preemption can be enabled. It doesn't matter which CPU's timestamp
275 * gets zeroed here, so use the raw_ operation.
276 */
277 raw_cpu_write(watchdog_touch_ts, 0);
278}
279
280notrace void touch_softlockup_watchdog(void)
281{
282 touch_softlockup_watchdog_sched();
283 wq_watchdog_touch(raw_smp_processor_id());
284}
285EXPORT_SYMBOL(touch_softlockup_watchdog);
286
287void touch_all_softlockup_watchdogs(void)
288{
289 int cpu;
290
291 /*
292 * watchdog_mutex cannpt be taken here, as this might be called
293 * from (soft)interrupt context, so the access to
294 * watchdog_allowed_cpumask might race with a concurrent update.
295 *
296 * The watchdog time stamp can race against a concurrent real
297 * update as well, the only side effect might be a cycle delay for
298 * the softlockup check.
299 */
300 for_each_cpu(cpu, &watchdog_allowed_mask)
301 per_cpu(watchdog_touch_ts, cpu) = 0;
302 wq_watchdog_touch(-1);
303}
304
305void touch_softlockup_watchdog_sync(void)
306{
307 __this_cpu_write(softlockup_touch_sync, true);
308 __this_cpu_write(watchdog_touch_ts, 0);
309}
310
311static int is_softlockup(unsigned long touch_ts)
312{
313 unsigned long now = get_timestamp();
314
315 if ((watchdog_enabled & SOFT_WATCHDOG_ENABLED) && watchdog_thresh){
316 /* Warn about unreasonable delays. */
317 if (time_after(now, touch_ts + get_softlockup_thresh()))
318 return now - touch_ts;
319 }
320 return 0;
321}
322
323/* watchdog detector functions */
324bool is_hardlockup(void)
325{
326 unsigned long hrint = __this_cpu_read(hrtimer_interrupts);
327
328 if (__this_cpu_read(hrtimer_interrupts_saved) == hrint)
329 return true;
330
331 __this_cpu_write(hrtimer_interrupts_saved, hrint);
332 return false;
333}
334
335static void watchdog_interrupt_count(void)
336{
337 __this_cpu_inc(hrtimer_interrupts);
338}
339
340static DEFINE_PER_CPU(struct completion, softlockup_completion);
341static DEFINE_PER_CPU(struct cpu_stop_work, softlockup_stop_work);
342
343/*
344 * The watchdog thread function - touches the timestamp.
345 *
346 * It only runs once every sample_period seconds (4 seconds by
347 * default) to reset the softlockup timestamp. If this gets delayed
348 * for more than 2*watchdog_thresh seconds then the debug-printout
349 * triggers in watchdog_timer_fn().
350 */
351static int softlockup_fn(void *data)
352{
353 __this_cpu_write(soft_lockup_hrtimer_cnt,
354 __this_cpu_read(hrtimer_interrupts));
355 __touch_watchdog();
356 complete(this_cpu_ptr(&softlockup_completion));
357
358 return 0;
359}
360
361/* watchdog kicker functions */
362static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
363{
364 unsigned long touch_ts = __this_cpu_read(watchdog_touch_ts);
365 struct pt_regs *regs = get_irq_regs();
366 int duration;
367 int softlockup_all_cpu_backtrace = sysctl_softlockup_all_cpu_backtrace;
368
369 if (!watchdog_enabled)
370 return HRTIMER_NORESTART;
371
372 /* kick the hardlockup detector */
373 watchdog_interrupt_count();
374
375 /* kick the softlockup detector */
376 if (completion_done(this_cpu_ptr(&softlockup_completion))) {
377 reinit_completion(this_cpu_ptr(&softlockup_completion));
378 stop_one_cpu_nowait(smp_processor_id(),
379 softlockup_fn, NULL,
380 this_cpu_ptr(&softlockup_stop_work));
381 }
382
383 /* .. and repeat */
384 hrtimer_forward_now(hrtimer, ns_to_ktime(sample_period));
385
386 if (touch_ts == 0) {
387 if (unlikely(__this_cpu_read(softlockup_touch_sync))) {
388 /*
389 * If the time stamp was touched atomically
390 * make sure the scheduler tick is up to date.
391 */
392 __this_cpu_write(softlockup_touch_sync, false);
393 sched_clock_tick();
394 }
395
396 /* Clear the guest paused flag on watchdog reset */
397 kvm_check_and_clear_guest_paused();
398 __touch_watchdog();
399 return HRTIMER_RESTART;
400 }
401
402 /* check for a softlockup
403 * This is done by making sure a high priority task is
404 * being scheduled. The task touches the watchdog to
405 * indicate it is getting cpu time. If it hasn't then
406 * this is a good indication some task is hogging the cpu
407 */
408 duration = is_softlockup(touch_ts);
409 if (unlikely(duration)) {
410 /*
411 * If a virtual machine is stopped by the host it can look to
412 * the watchdog like a soft lockup, check to see if the host
413 * stopped the vm before we issue the warning
414 */
415 if (kvm_check_and_clear_guest_paused())
416 return HRTIMER_RESTART;
417
418 /* only warn once */
419 if (__this_cpu_read(soft_watchdog_warn) == true) {
420 /*
421 * When multiple processes are causing softlockups the
422 * softlockup detector only warns on the first one
423 * because the code relies on a full quiet cycle to
424 * re-arm. The second process prevents the quiet cycle
425 * and never gets reported. Use task pointers to detect
426 * this.
427 */
428 if (__this_cpu_read(softlockup_task_ptr_saved) !=
429 current) {
430 __this_cpu_write(soft_watchdog_warn, false);
431 __touch_watchdog();
432 }
433 return HRTIMER_RESTART;
434 }
435
436 if (softlockup_all_cpu_backtrace) {
437 /* Prevent multiple soft-lockup reports if one cpu is already
438 * engaged in dumping cpu back traces
439 */
440 if (test_and_set_bit(0, &soft_lockup_nmi_warn)) {
441 /* Someone else will report us. Let's give up */
442 __this_cpu_write(soft_watchdog_warn, true);
443 return HRTIMER_RESTART;
444 }
445 }
446
447 pr_emerg("BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n",
448 smp_processor_id(), duration,
449 current->comm, task_pid_nr(current));
450 __this_cpu_write(softlockup_task_ptr_saved, current);
451 print_modules();
452 print_irqtrace_events(current);
453 if (regs)
454 show_regs(regs);
455 else
456 dump_stack();
457
458 if (softlockup_all_cpu_backtrace) {
459 /* Avoid generating two back traces for current
460 * given that one is already made above
461 */
462 trigger_allbutself_cpu_backtrace();
463
464 clear_bit(0, &soft_lockup_nmi_warn);
465 /* Barrier to sync with other cpus */
466 smp_mb__after_atomic();
467 }
468
469 add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK);
470 if (softlockup_panic)
471 panic("softlockup: hung tasks");
472 __this_cpu_write(soft_watchdog_warn, true);
473 } else
474 __this_cpu_write(soft_watchdog_warn, false);
475
476 return HRTIMER_RESTART;
477}
478
479static void watchdog_enable(unsigned int cpu)
480{
481 struct hrtimer *hrtimer = this_cpu_ptr(&watchdog_hrtimer);
482 struct completion *done = this_cpu_ptr(&softlockup_completion);
483
484 WARN_ON_ONCE(cpu != smp_processor_id());
485
486 init_completion(done);
487 complete(done);
488
489 /*
490 * Start the timer first to prevent the NMI watchdog triggering
491 * before the timer has a chance to fire.
492 */
493 hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
494 hrtimer->function = watchdog_timer_fn;
495 hrtimer_start(hrtimer, ns_to_ktime(sample_period),
496 HRTIMER_MODE_REL_PINNED_HARD);
497
498 /* Initialize timestamp */
499 __touch_watchdog();
500 /* Enable the perf event */
501 if (watchdog_enabled & NMI_WATCHDOG_ENABLED)
502 watchdog_nmi_enable(cpu);
503}
504
505static void watchdog_disable(unsigned int cpu)
506{
507 struct hrtimer *hrtimer = this_cpu_ptr(&watchdog_hrtimer);
508
509 WARN_ON_ONCE(cpu != smp_processor_id());
510
511 /*
512 * Disable the perf event first. That prevents that a large delay
513 * between disabling the timer and disabling the perf event causes
514 * the perf NMI to detect a false positive.
515 */
516 watchdog_nmi_disable(cpu);
517 hrtimer_cancel(hrtimer);
518 wait_for_completion(this_cpu_ptr(&softlockup_completion));
519}
520
521static int softlockup_stop_fn(void *data)
522{
523 watchdog_disable(smp_processor_id());
524 return 0;
525}
526
527static void softlockup_stop_all(void)
528{
529 int cpu;
530
531 if (!softlockup_initialized)
532 return;
533
534 for_each_cpu(cpu, &watchdog_allowed_mask)
535 smp_call_on_cpu(cpu, softlockup_stop_fn, NULL, false);
536
537 cpumask_clear(&watchdog_allowed_mask);
538}
539
540static int softlockup_start_fn(void *data)
541{
542 watchdog_enable(smp_processor_id());
543 return 0;
544}
545
546static void softlockup_start_all(void)
547{
548 int cpu;
549
550 cpumask_copy(&watchdog_allowed_mask, &watchdog_cpumask);
551 for_each_cpu(cpu, &watchdog_allowed_mask)
552 smp_call_on_cpu(cpu, softlockup_start_fn, NULL, false);
553}
554
555int lockup_detector_online_cpu(unsigned int cpu)
556{
557 if (cpumask_test_cpu(cpu, &watchdog_allowed_mask))
558 watchdog_enable(cpu);
559 return 0;
560}
561
562int lockup_detector_offline_cpu(unsigned int cpu)
563{
564 if (cpumask_test_cpu(cpu, &watchdog_allowed_mask))
565 watchdog_disable(cpu);
566 return 0;
567}
568
569static void lockup_detector_reconfigure(void)
570{
571 cpus_read_lock();
572 watchdog_nmi_stop();
573
574 softlockup_stop_all();
575 set_sample_period();
576 lockup_detector_update_enable();
577 if (watchdog_enabled && watchdog_thresh)
578 softlockup_start_all();
579
580 watchdog_nmi_start();
581 cpus_read_unlock();
582 /*
583 * Must be called outside the cpus locked section to prevent
584 * recursive locking in the perf code.
585 */
586 __lockup_detector_cleanup();
587}
588
589/*
590 * Create the watchdog thread infrastructure and configure the detector(s).
591 *
592 * The threads are not unparked as watchdog_allowed_mask is empty. When
593 * the threads are successfully initialized, take the proper locks and
594 * unpark the threads in the watchdog_cpumask if the watchdog is enabled.
595 */
596static __init void lockup_detector_setup(void)
597{
598 /*
599 * If sysctl is off and watchdog got disabled on the command line,
600 * nothing to do here.
601 */
602 lockup_detector_update_enable();
603
604 if (!IS_ENABLED(CONFIG_SYSCTL) &&
605 !(watchdog_enabled && watchdog_thresh))
606 return;
607
608 mutex_lock(&watchdog_mutex);
609 lockup_detector_reconfigure();
610 softlockup_initialized = true;
611 mutex_unlock(&watchdog_mutex);
612}
613
614#else /* CONFIG_SOFTLOCKUP_DETECTOR */
615static void lockup_detector_reconfigure(void)
616{
617 cpus_read_lock();
618 watchdog_nmi_stop();
619 lockup_detector_update_enable();
620 watchdog_nmi_start();
621 cpus_read_unlock();
622}
623static inline void lockup_detector_setup(void)
624{
625 lockup_detector_reconfigure();
626}
627#endif /* !CONFIG_SOFTLOCKUP_DETECTOR */
628
629static void __lockup_detector_cleanup(void)
630{
631 lockdep_assert_held(&watchdog_mutex);
632 hardlockup_detector_perf_cleanup();
633}
634
635/**
636 * lockup_detector_cleanup - Cleanup after cpu hotplug or sysctl changes
637 *
638 * Caller must not hold the cpu hotplug rwsem.
639 */
640void lockup_detector_cleanup(void)
641{
642 mutex_lock(&watchdog_mutex);
643 __lockup_detector_cleanup();
644 mutex_unlock(&watchdog_mutex);
645}
646
647/**
648 * lockup_detector_soft_poweroff - Interface to stop lockup detector(s)
649 *
650 * Special interface for parisc. It prevents lockup detector warnings from
651 * the default pm_poweroff() function which busy loops forever.
652 */
653void lockup_detector_soft_poweroff(void)
654{
655 watchdog_enabled = 0;
656}
657
658#ifdef CONFIG_SYSCTL
659
660/* Propagate any changes to the watchdog threads */
661static void proc_watchdog_update(void)
662{
663 /* Remove impossible cpus to keep sysctl output clean. */
664 cpumask_and(&watchdog_cpumask, &watchdog_cpumask, cpu_possible_mask);
665 lockup_detector_reconfigure();
666}
667
668/*
669 * common function for watchdog, nmi_watchdog and soft_watchdog parameter
670 *
671 * caller | table->data points to | 'which'
672 * -------------------|----------------------------|--------------------------
673 * proc_watchdog | watchdog_user_enabled | NMI_WATCHDOG_ENABLED |
674 * | | SOFT_WATCHDOG_ENABLED
675 * -------------------|----------------------------|--------------------------
676 * proc_nmi_watchdog | nmi_watchdog_user_enabled | NMI_WATCHDOG_ENABLED
677 * -------------------|----------------------------|--------------------------
678 * proc_soft_watchdog | soft_watchdog_user_enabled | SOFT_WATCHDOG_ENABLED
679 */
680static int proc_watchdog_common(int which, struct ctl_table *table, int write,
681 void __user *buffer, size_t *lenp, loff_t *ppos)
682{
683 int err, old, *param = table->data;
684
685 mutex_lock(&watchdog_mutex);
686
687 if (!write) {
688 /*
689 * On read synchronize the userspace interface. This is a
690 * racy snapshot.
691 */
692 *param = (watchdog_enabled & which) != 0;
693 err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
694 } else {
695 old = READ_ONCE(*param);
696 err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
697 if (!err && old != READ_ONCE(*param))
698 proc_watchdog_update();
699 }
700 mutex_unlock(&watchdog_mutex);
701 return err;
702}
703
704/*
705 * /proc/sys/kernel/watchdog
706 */
707int proc_watchdog(struct ctl_table *table, int write,
708 void __user *buffer, size_t *lenp, loff_t *ppos)
709{
710 return proc_watchdog_common(NMI_WATCHDOG_ENABLED|SOFT_WATCHDOG_ENABLED,
711 table, write, buffer, lenp, ppos);
712}
713
714/*
715 * /proc/sys/kernel/nmi_watchdog
716 */
717int proc_nmi_watchdog(struct ctl_table *table, int write,
718 void __user *buffer, size_t *lenp, loff_t *ppos)
719{
720 if (!nmi_watchdog_available && write)
721 return -ENOTSUPP;
722 return proc_watchdog_common(NMI_WATCHDOG_ENABLED,
723 table, write, buffer, lenp, ppos);
724}
725
726/*
727 * /proc/sys/kernel/soft_watchdog
728 */
729int proc_soft_watchdog(struct ctl_table *table, int write,
730 void __user *buffer, size_t *lenp, loff_t *ppos)
731{
732 return proc_watchdog_common(SOFT_WATCHDOG_ENABLED,
733 table, write, buffer, lenp, ppos);
734}
735
736/*
737 * /proc/sys/kernel/watchdog_thresh
738 */
739int proc_watchdog_thresh(struct ctl_table *table, int write,
740 void __user *buffer, size_t *lenp, loff_t *ppos)
741{
742 int err, old;
743
744 mutex_lock(&watchdog_mutex);
745
746 old = READ_ONCE(watchdog_thresh);
747 err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
748
749 if (!err && write && old != READ_ONCE(watchdog_thresh))
750 proc_watchdog_update();
751
752 mutex_unlock(&watchdog_mutex);
753 return err;
754}
755
756/*
757 * The cpumask is the mask of possible cpus that the watchdog can run
758 * on, not the mask of cpus it is actually running on. This allows the
759 * user to specify a mask that will include cpus that have not yet
760 * been brought online, if desired.
761 */
762int proc_watchdog_cpumask(struct ctl_table *table, int write,
763 void __user *buffer, size_t *lenp, loff_t *ppos)
764{
765 int err;
766
767 mutex_lock(&watchdog_mutex);
768
769 err = proc_do_large_bitmap(table, write, buffer, lenp, ppos);
770 if (!err && write)
771 proc_watchdog_update();
772
773 mutex_unlock(&watchdog_mutex);
774 return err;
775}
776#endif /* CONFIG_SYSCTL */
777
778void __init lockup_detector_init(void)
779{
780 if (tick_nohz_full_enabled())
781 pr_info("Disabling watchdog on nohz_full cores by default\n");
782
783 cpumask_copy(&watchdog_cpumask,
784 housekeeping_cpumask(HK_FLAG_TIMER));
785
786 if (!watchdog_nmi_probe())
787 nmi_watchdog_available = true;
788 lockup_detector_setup();
789}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Detect hard and soft lockups on a system
4 *
5 * started by Don Zickus, Copyright (C) 2010 Red Hat, Inc.
6 *
7 * Note: Most of this code is borrowed heavily from the original softlockup
8 * detector, so thanks to Ingo for the initial implementation.
9 * Some chunks also taken from the old x86-specific nmi watchdog code, thanks
10 * to those contributors as well.
11 */
12
13#define pr_fmt(fmt) "watchdog: " fmt
14
15#include <linux/cpu.h>
16#include <linux/init.h>
17#include <linux/irq.h>
18#include <linux/irqdesc.h>
19#include <linux/kernel_stat.h>
20#include <linux/kvm_para.h>
21#include <linux/math64.h>
22#include <linux/mm.h>
23#include <linux/module.h>
24#include <linux/nmi.h>
25#include <linux/stop_machine.h>
26#include <linux/sysctl.h>
27#include <linux/tick.h>
28
29#include <linux/sched/clock.h>
30#include <linux/sched/debug.h>
31#include <linux/sched/isolation.h>
32
33#include <asm/irq_regs.h>
34
35static DEFINE_MUTEX(watchdog_mutex);
36
37#if defined(CONFIG_HARDLOCKUP_DETECTOR) || defined(CONFIG_HARDLOCKUP_DETECTOR_SPARC64)
38# define WATCHDOG_HARDLOCKUP_DEFAULT 1
39#else
40# define WATCHDOG_HARDLOCKUP_DEFAULT 0
41#endif
42
43#define NUM_SAMPLE_PERIODS 5
44
45unsigned long __read_mostly watchdog_enabled;
46int __read_mostly watchdog_user_enabled = 1;
47static int __read_mostly watchdog_hardlockup_user_enabled = WATCHDOG_HARDLOCKUP_DEFAULT;
48static int __read_mostly watchdog_softlockup_user_enabled = 1;
49int __read_mostly watchdog_thresh = 10;
50static int __read_mostly watchdog_hardlockup_available;
51
52struct cpumask watchdog_cpumask __read_mostly;
53unsigned long *watchdog_cpumask_bits = cpumask_bits(&watchdog_cpumask);
54
55#ifdef CONFIG_HARDLOCKUP_DETECTOR
56
57# ifdef CONFIG_SMP
58int __read_mostly sysctl_hardlockup_all_cpu_backtrace;
59# endif /* CONFIG_SMP */
60
61/*
62 * Should we panic when a soft-lockup or hard-lockup occurs:
63 */
64unsigned int __read_mostly hardlockup_panic =
65 IS_ENABLED(CONFIG_BOOTPARAM_HARDLOCKUP_PANIC);
66/*
67 * We may not want to enable hard lockup detection by default in all cases,
68 * for example when running the kernel as a guest on a hypervisor. In these
69 * cases this function can be called to disable hard lockup detection. This
70 * function should only be executed once by the boot processor before the
71 * kernel command line parameters are parsed, because otherwise it is not
72 * possible to override this in hardlockup_panic_setup().
73 */
74void __init hardlockup_detector_disable(void)
75{
76 watchdog_hardlockup_user_enabled = 0;
77}
78
79static int __init hardlockup_panic_setup(char *str)
80{
81next:
82 if (!strncmp(str, "panic", 5))
83 hardlockup_panic = 1;
84 else if (!strncmp(str, "nopanic", 7))
85 hardlockup_panic = 0;
86 else if (!strncmp(str, "0", 1))
87 watchdog_hardlockup_user_enabled = 0;
88 else if (!strncmp(str, "1", 1))
89 watchdog_hardlockup_user_enabled = 1;
90 else if (!strncmp(str, "r", 1))
91 hardlockup_config_perf_event(str + 1);
92 while (*(str++)) {
93 if (*str == ',') {
94 str++;
95 goto next;
96 }
97 }
98 return 1;
99}
100__setup("nmi_watchdog=", hardlockup_panic_setup);
101
102#endif /* CONFIG_HARDLOCKUP_DETECTOR */
103
104#if defined(CONFIG_HARDLOCKUP_DETECTOR_COUNTS_HRTIMER)
105
106static DEFINE_PER_CPU(atomic_t, hrtimer_interrupts);
107static DEFINE_PER_CPU(int, hrtimer_interrupts_saved);
108static DEFINE_PER_CPU(bool, watchdog_hardlockup_warned);
109static DEFINE_PER_CPU(bool, watchdog_hardlockup_touched);
110static unsigned long hard_lockup_nmi_warn;
111
112notrace void arch_touch_nmi_watchdog(void)
113{
114 /*
115 * Using __raw here because some code paths have
116 * preemption enabled. If preemption is enabled
117 * then interrupts should be enabled too, in which
118 * case we shouldn't have to worry about the watchdog
119 * going off.
120 */
121 raw_cpu_write(watchdog_hardlockup_touched, true);
122}
123EXPORT_SYMBOL(arch_touch_nmi_watchdog);
124
125void watchdog_hardlockup_touch_cpu(unsigned int cpu)
126{
127 per_cpu(watchdog_hardlockup_touched, cpu) = true;
128}
129
130static bool is_hardlockup(unsigned int cpu)
131{
132 int hrint = atomic_read(&per_cpu(hrtimer_interrupts, cpu));
133
134 if (per_cpu(hrtimer_interrupts_saved, cpu) == hrint)
135 return true;
136
137 /*
138 * NOTE: we don't need any fancy atomic_t or READ_ONCE/WRITE_ONCE
139 * for hrtimer_interrupts_saved. hrtimer_interrupts_saved is
140 * written/read by a single CPU.
141 */
142 per_cpu(hrtimer_interrupts_saved, cpu) = hrint;
143
144 return false;
145}
146
147static void watchdog_hardlockup_kick(void)
148{
149 int new_interrupts;
150
151 new_interrupts = atomic_inc_return(this_cpu_ptr(&hrtimer_interrupts));
152 watchdog_buddy_check_hardlockup(new_interrupts);
153}
154
155void watchdog_hardlockup_check(unsigned int cpu, struct pt_regs *regs)
156{
157 if (per_cpu(watchdog_hardlockup_touched, cpu)) {
158 per_cpu(watchdog_hardlockup_touched, cpu) = false;
159 return;
160 }
161
162 /*
163 * Check for a hardlockup by making sure the CPU's timer
164 * interrupt is incrementing. The timer interrupt should have
165 * fired multiple times before we overflow'd. If it hasn't
166 * then this is a good indication the cpu is stuck
167 */
168 if (is_hardlockup(cpu)) {
169 unsigned int this_cpu = smp_processor_id();
170 unsigned long flags;
171
172 /* Only print hardlockups once. */
173 if (per_cpu(watchdog_hardlockup_warned, cpu))
174 return;
175
176 /*
177 * Prevent multiple hard-lockup reports if one cpu is already
178 * engaged in dumping all cpu back traces.
179 */
180 if (sysctl_hardlockup_all_cpu_backtrace) {
181 if (test_and_set_bit_lock(0, &hard_lockup_nmi_warn))
182 return;
183 }
184
185 /*
186 * NOTE: we call printk_cpu_sync_get_irqsave() after printing
187 * the lockup message. While it would be nice to serialize
188 * that printout, we really want to make sure that if some
189 * other CPU somehow locked up while holding the lock associated
190 * with printk_cpu_sync_get_irqsave() that we can still at least
191 * get the message about the lockup out.
192 */
193 pr_emerg("Watchdog detected hard LOCKUP on cpu %d\n", cpu);
194 printk_cpu_sync_get_irqsave(flags);
195
196 print_modules();
197 print_irqtrace_events(current);
198 if (cpu == this_cpu) {
199 if (regs)
200 show_regs(regs);
201 else
202 dump_stack();
203 printk_cpu_sync_put_irqrestore(flags);
204 } else {
205 printk_cpu_sync_put_irqrestore(flags);
206 trigger_single_cpu_backtrace(cpu);
207 }
208
209 if (sysctl_hardlockup_all_cpu_backtrace) {
210 trigger_allbutcpu_cpu_backtrace(cpu);
211 if (!hardlockup_panic)
212 clear_bit_unlock(0, &hard_lockup_nmi_warn);
213 }
214
215 if (hardlockup_panic)
216 nmi_panic(regs, "Hard LOCKUP");
217
218 per_cpu(watchdog_hardlockup_warned, cpu) = true;
219 } else {
220 per_cpu(watchdog_hardlockup_warned, cpu) = false;
221 }
222}
223
224#else /* CONFIG_HARDLOCKUP_DETECTOR_COUNTS_HRTIMER */
225
226static inline void watchdog_hardlockup_kick(void) { }
227
228#endif /* !CONFIG_HARDLOCKUP_DETECTOR_COUNTS_HRTIMER */
229
230/*
231 * These functions can be overridden based on the configured hardlockdup detector.
232 *
233 * watchdog_hardlockup_enable/disable can be implemented to start and stop when
234 * softlockup watchdog start and stop. The detector must select the
235 * SOFTLOCKUP_DETECTOR Kconfig.
236 */
237void __weak watchdog_hardlockup_enable(unsigned int cpu) { }
238
239void __weak watchdog_hardlockup_disable(unsigned int cpu) { }
240
241/*
242 * Watchdog-detector specific API.
243 *
244 * Return 0 when hardlockup watchdog is available, negative value otherwise.
245 * Note that the negative value means that a delayed probe might
246 * succeed later.
247 */
248int __weak __init watchdog_hardlockup_probe(void)
249{
250 return -ENODEV;
251}
252
253/**
254 * watchdog_hardlockup_stop - Stop the watchdog for reconfiguration
255 *
256 * The reconfiguration steps are:
257 * watchdog_hardlockup_stop();
258 * update_variables();
259 * watchdog_hardlockup_start();
260 */
261void __weak watchdog_hardlockup_stop(void) { }
262
263/**
264 * watchdog_hardlockup_start - Start the watchdog after reconfiguration
265 *
266 * Counterpart to watchdog_hardlockup_stop().
267 *
268 * The following variables have been updated in update_variables() and
269 * contain the currently valid configuration:
270 * - watchdog_enabled
271 * - watchdog_thresh
272 * - watchdog_cpumask
273 */
274void __weak watchdog_hardlockup_start(void) { }
275
276/**
277 * lockup_detector_update_enable - Update the sysctl enable bit
278 *
279 * Caller needs to make sure that the hard watchdogs are off, so this
280 * can't race with watchdog_hardlockup_disable().
281 */
282static void lockup_detector_update_enable(void)
283{
284 watchdog_enabled = 0;
285 if (!watchdog_user_enabled)
286 return;
287 if (watchdog_hardlockup_available && watchdog_hardlockup_user_enabled)
288 watchdog_enabled |= WATCHDOG_HARDLOCKUP_ENABLED;
289 if (watchdog_softlockup_user_enabled)
290 watchdog_enabled |= WATCHDOG_SOFTOCKUP_ENABLED;
291}
292
293#ifdef CONFIG_SOFTLOCKUP_DETECTOR
294
295/*
296 * Delay the soflockup report when running a known slow code.
297 * It does _not_ affect the timestamp of the last successdul reschedule.
298 */
299#define SOFTLOCKUP_DELAY_REPORT ULONG_MAX
300
301#ifdef CONFIG_SMP
302int __read_mostly sysctl_softlockup_all_cpu_backtrace;
303#endif
304
305static struct cpumask watchdog_allowed_mask __read_mostly;
306
307/* Global variables, exported for sysctl */
308unsigned int __read_mostly softlockup_panic =
309 IS_ENABLED(CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC);
310
311static bool softlockup_initialized __read_mostly;
312static u64 __read_mostly sample_period;
313
314/* Timestamp taken after the last successful reschedule. */
315static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts);
316/* Timestamp of the last softlockup report. */
317static DEFINE_PER_CPU(unsigned long, watchdog_report_ts);
318static DEFINE_PER_CPU(struct hrtimer, watchdog_hrtimer);
319static DEFINE_PER_CPU(bool, softlockup_touch_sync);
320static unsigned long soft_lockup_nmi_warn;
321
322static int __init softlockup_panic_setup(char *str)
323{
324 softlockup_panic = simple_strtoul(str, NULL, 0);
325 return 1;
326}
327__setup("softlockup_panic=", softlockup_panic_setup);
328
329static int __init nowatchdog_setup(char *str)
330{
331 watchdog_user_enabled = 0;
332 return 1;
333}
334__setup("nowatchdog", nowatchdog_setup);
335
336static int __init nosoftlockup_setup(char *str)
337{
338 watchdog_softlockup_user_enabled = 0;
339 return 1;
340}
341__setup("nosoftlockup", nosoftlockup_setup);
342
343static int __init watchdog_thresh_setup(char *str)
344{
345 get_option(&str, &watchdog_thresh);
346 return 1;
347}
348__setup("watchdog_thresh=", watchdog_thresh_setup);
349
350static void __lockup_detector_cleanup(void);
351
352#ifdef CONFIG_SOFTLOCKUP_DETECTOR_INTR_STORM
353enum stats_per_group {
354 STATS_SYSTEM,
355 STATS_SOFTIRQ,
356 STATS_HARDIRQ,
357 STATS_IDLE,
358 NUM_STATS_PER_GROUP,
359};
360
361static const enum cpu_usage_stat tracked_stats[NUM_STATS_PER_GROUP] = {
362 CPUTIME_SYSTEM,
363 CPUTIME_SOFTIRQ,
364 CPUTIME_IRQ,
365 CPUTIME_IDLE,
366};
367
368static DEFINE_PER_CPU(u16, cpustat_old[NUM_STATS_PER_GROUP]);
369static DEFINE_PER_CPU(u8, cpustat_util[NUM_SAMPLE_PERIODS][NUM_STATS_PER_GROUP]);
370static DEFINE_PER_CPU(u8, cpustat_tail);
371
372/*
373 * We don't need nanosecond resolution. A granularity of 16ms is
374 * sufficient for our precision, allowing us to use u16 to store
375 * cpustats, which will roll over roughly every ~1000 seconds.
376 * 2^24 ~= 16 * 10^6
377 */
378static u16 get_16bit_precision(u64 data_ns)
379{
380 return data_ns >> 24LL; /* 2^24ns ~= 16.8ms */
381}
382
383static void update_cpustat(void)
384{
385 int i;
386 u8 util;
387 u16 old_stat, new_stat;
388 struct kernel_cpustat kcpustat;
389 u64 *cpustat = kcpustat.cpustat;
390 u8 tail = __this_cpu_read(cpustat_tail);
391 u16 sample_period_16 = get_16bit_precision(sample_period);
392
393 kcpustat_cpu_fetch(&kcpustat, smp_processor_id());
394
395 for (i = 0; i < NUM_STATS_PER_GROUP; i++) {
396 old_stat = __this_cpu_read(cpustat_old[i]);
397 new_stat = get_16bit_precision(cpustat[tracked_stats[i]]);
398 util = DIV_ROUND_UP(100 * (new_stat - old_stat), sample_period_16);
399 __this_cpu_write(cpustat_util[tail][i], util);
400 __this_cpu_write(cpustat_old[i], new_stat);
401 }
402
403 __this_cpu_write(cpustat_tail, (tail + 1) % NUM_SAMPLE_PERIODS);
404}
405
406static void print_cpustat(void)
407{
408 int i, group;
409 u8 tail = __this_cpu_read(cpustat_tail);
410 u64 sample_period_second = sample_period;
411
412 do_div(sample_period_second, NSEC_PER_SEC);
413
414 /*
415 * Outputting the "watchdog" prefix on every line is redundant and not
416 * concise, and the original alarm information is sufficient for
417 * positioning in logs, hence here printk() is used instead of pr_crit().
418 */
419 printk(KERN_CRIT "CPU#%d Utilization every %llus during lockup:\n",
420 smp_processor_id(), sample_period_second);
421
422 for (i = 0; i < NUM_SAMPLE_PERIODS; i++) {
423 group = (tail + i) % NUM_SAMPLE_PERIODS;
424 printk(KERN_CRIT "\t#%d: %3u%% system,\t%3u%% softirq,\t"
425 "%3u%% hardirq,\t%3u%% idle\n", i + 1,
426 __this_cpu_read(cpustat_util[group][STATS_SYSTEM]),
427 __this_cpu_read(cpustat_util[group][STATS_SOFTIRQ]),
428 __this_cpu_read(cpustat_util[group][STATS_HARDIRQ]),
429 __this_cpu_read(cpustat_util[group][STATS_IDLE]));
430 }
431}
432
433#define HARDIRQ_PERCENT_THRESH 50
434#define NUM_HARDIRQ_REPORT 5
435struct irq_counts {
436 int irq;
437 u32 counts;
438};
439
440static DEFINE_PER_CPU(bool, snapshot_taken);
441
442/* Tabulate the most frequent interrupts. */
443static void tabulate_irq_count(struct irq_counts *irq_counts, int irq, u32 counts, int rank)
444{
445 int i;
446 struct irq_counts new_count = {irq, counts};
447
448 for (i = 0; i < rank; i++) {
449 if (counts > irq_counts[i].counts)
450 swap(new_count, irq_counts[i]);
451 }
452}
453
454/*
455 * If the hardirq time exceeds HARDIRQ_PERCENT_THRESH% of the sample_period,
456 * then the cause of softlockup might be interrupt storm. In this case, it
457 * would be useful to start interrupt counting.
458 */
459static bool need_counting_irqs(void)
460{
461 u8 util;
462 int tail = __this_cpu_read(cpustat_tail);
463
464 tail = (tail + NUM_HARDIRQ_REPORT - 1) % NUM_HARDIRQ_REPORT;
465 util = __this_cpu_read(cpustat_util[tail][STATS_HARDIRQ]);
466 return util > HARDIRQ_PERCENT_THRESH;
467}
468
469static void start_counting_irqs(void)
470{
471 if (!__this_cpu_read(snapshot_taken)) {
472 kstat_snapshot_irqs();
473 __this_cpu_write(snapshot_taken, true);
474 }
475}
476
477static void stop_counting_irqs(void)
478{
479 __this_cpu_write(snapshot_taken, false);
480}
481
482static void print_irq_counts(void)
483{
484 unsigned int i, count;
485 struct irq_counts irq_counts_sorted[NUM_HARDIRQ_REPORT] = {
486 {-1, 0}, {-1, 0}, {-1, 0}, {-1, 0}, {-1, 0}
487 };
488
489 if (__this_cpu_read(snapshot_taken)) {
490 for_each_active_irq(i) {
491 count = kstat_get_irq_since_snapshot(i);
492 tabulate_irq_count(irq_counts_sorted, i, count, NUM_HARDIRQ_REPORT);
493 }
494
495 /*
496 * Outputting the "watchdog" prefix on every line is redundant and not
497 * concise, and the original alarm information is sufficient for
498 * positioning in logs, hence here printk() is used instead of pr_crit().
499 */
500 printk(KERN_CRIT "CPU#%d Detect HardIRQ Time exceeds %d%%. Most frequent HardIRQs:\n",
501 smp_processor_id(), HARDIRQ_PERCENT_THRESH);
502
503 for (i = 0; i < NUM_HARDIRQ_REPORT; i++) {
504 if (irq_counts_sorted[i].irq == -1)
505 break;
506
507 printk(KERN_CRIT "\t#%u: %-10u\tirq#%d\n",
508 i + 1, irq_counts_sorted[i].counts,
509 irq_counts_sorted[i].irq);
510 }
511
512 /*
513 * If the hardirq time is less than HARDIRQ_PERCENT_THRESH% in the last
514 * sample_period, then we suspect the interrupt storm might be subsiding.
515 */
516 if (!need_counting_irqs())
517 stop_counting_irqs();
518 }
519}
520
521static void report_cpu_status(void)
522{
523 print_cpustat();
524 print_irq_counts();
525}
526#else
527static inline void update_cpustat(void) { }
528static inline void report_cpu_status(void) { }
529static inline bool need_counting_irqs(void) { return false; }
530static inline void start_counting_irqs(void) { }
531static inline void stop_counting_irqs(void) { }
532#endif
533
534/*
535 * Hard-lockup warnings should be triggered after just a few seconds. Soft-
536 * lockups can have false positives under extreme conditions. So we generally
537 * want a higher threshold for soft lockups than for hard lockups. So we couple
538 * the thresholds with a factor: we make the soft threshold twice the amount of
539 * time the hard threshold is.
540 */
541static int get_softlockup_thresh(void)
542{
543 return watchdog_thresh * 2;
544}
545
546/*
547 * Returns seconds, approximately. We don't need nanosecond
548 * resolution, and we don't need to waste time with a big divide when
549 * 2^30ns == 1.074s.
550 */
551static unsigned long get_timestamp(void)
552{
553 return running_clock() >> 30LL; /* 2^30 ~= 10^9 */
554}
555
556static void set_sample_period(void)
557{
558 /*
559 * convert watchdog_thresh from seconds to ns
560 * the divide by 5 is to give hrtimer several chances (two
561 * or three with the current relation between the soft
562 * and hard thresholds) to increment before the
563 * hardlockup detector generates a warning
564 */
565 sample_period = get_softlockup_thresh() * ((u64)NSEC_PER_SEC / NUM_SAMPLE_PERIODS);
566 watchdog_update_hrtimer_threshold(sample_period);
567}
568
569static void update_report_ts(void)
570{
571 __this_cpu_write(watchdog_report_ts, get_timestamp());
572}
573
574/* Commands for resetting the watchdog */
575static void update_touch_ts(void)
576{
577 __this_cpu_write(watchdog_touch_ts, get_timestamp());
578 update_report_ts();
579}
580
581/**
582 * touch_softlockup_watchdog_sched - touch watchdog on scheduler stalls
583 *
584 * Call when the scheduler may have stalled for legitimate reasons
585 * preventing the watchdog task from executing - e.g. the scheduler
586 * entering idle state. This should only be used for scheduler events.
587 * Use touch_softlockup_watchdog() for everything else.
588 */
589notrace void touch_softlockup_watchdog_sched(void)
590{
591 /*
592 * Preemption can be enabled. It doesn't matter which CPU's watchdog
593 * report period gets restarted here, so use the raw_ operation.
594 */
595 raw_cpu_write(watchdog_report_ts, SOFTLOCKUP_DELAY_REPORT);
596}
597
598notrace void touch_softlockup_watchdog(void)
599{
600 touch_softlockup_watchdog_sched();
601 wq_watchdog_touch(raw_smp_processor_id());
602}
603EXPORT_SYMBOL(touch_softlockup_watchdog);
604
605void touch_all_softlockup_watchdogs(void)
606{
607 int cpu;
608
609 /*
610 * watchdog_mutex cannpt be taken here, as this might be called
611 * from (soft)interrupt context, so the access to
612 * watchdog_allowed_cpumask might race with a concurrent update.
613 *
614 * The watchdog time stamp can race against a concurrent real
615 * update as well, the only side effect might be a cycle delay for
616 * the softlockup check.
617 */
618 for_each_cpu(cpu, &watchdog_allowed_mask) {
619 per_cpu(watchdog_report_ts, cpu) = SOFTLOCKUP_DELAY_REPORT;
620 wq_watchdog_touch(cpu);
621 }
622}
623
624void touch_softlockup_watchdog_sync(void)
625{
626 __this_cpu_write(softlockup_touch_sync, true);
627 __this_cpu_write(watchdog_report_ts, SOFTLOCKUP_DELAY_REPORT);
628}
629
630static int is_softlockup(unsigned long touch_ts,
631 unsigned long period_ts,
632 unsigned long now)
633{
634 if ((watchdog_enabled & WATCHDOG_SOFTOCKUP_ENABLED) && watchdog_thresh) {
635 /*
636 * If period_ts has not been updated during a sample_period, then
637 * in the subsequent few sample_periods, period_ts might also not
638 * be updated, which could indicate a potential softlockup. In
639 * this case, if we suspect the cause of the potential softlockup
640 * might be interrupt storm, then we need to count the interrupts
641 * to find which interrupt is storming.
642 */
643 if (time_after_eq(now, period_ts + get_softlockup_thresh() / NUM_SAMPLE_PERIODS) &&
644 need_counting_irqs())
645 start_counting_irqs();
646
647 /*
648 * A poorly behaving BPF scheduler can live-lock the system into
649 * soft lockups. Tell sched_ext to try ejecting the BPF
650 * scheduler when close to a soft lockup.
651 */
652 if (time_after_eq(now, period_ts + get_softlockup_thresh() * 3 / 4))
653 scx_softlockup(now - touch_ts);
654
655 /* Warn about unreasonable delays. */
656 if (time_after(now, period_ts + get_softlockup_thresh()))
657 return now - touch_ts;
658 }
659 return 0;
660}
661
662/* watchdog detector functions */
663static DEFINE_PER_CPU(struct completion, softlockup_completion);
664static DEFINE_PER_CPU(struct cpu_stop_work, softlockup_stop_work);
665
666/*
667 * The watchdog feed function - touches the timestamp.
668 *
669 * It only runs once every sample_period seconds (4 seconds by
670 * default) to reset the softlockup timestamp. If this gets delayed
671 * for more than 2*watchdog_thresh seconds then the debug-printout
672 * triggers in watchdog_timer_fn().
673 */
674static int softlockup_fn(void *data)
675{
676 update_touch_ts();
677 stop_counting_irqs();
678 complete(this_cpu_ptr(&softlockup_completion));
679
680 return 0;
681}
682
683/* watchdog kicker functions */
684static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
685{
686 unsigned long touch_ts, period_ts, now;
687 struct pt_regs *regs = get_irq_regs();
688 int duration;
689 int softlockup_all_cpu_backtrace = sysctl_softlockup_all_cpu_backtrace;
690 unsigned long flags;
691
692 if (!watchdog_enabled)
693 return HRTIMER_NORESTART;
694
695 watchdog_hardlockup_kick();
696
697 /* kick the softlockup detector */
698 if (completion_done(this_cpu_ptr(&softlockup_completion))) {
699 reinit_completion(this_cpu_ptr(&softlockup_completion));
700 stop_one_cpu_nowait(smp_processor_id(),
701 softlockup_fn, NULL,
702 this_cpu_ptr(&softlockup_stop_work));
703 }
704
705 /* .. and repeat */
706 hrtimer_forward_now(hrtimer, ns_to_ktime(sample_period));
707
708 /*
709 * Read the current timestamp first. It might become invalid anytime
710 * when a virtual machine is stopped by the host or when the watchog
711 * is touched from NMI.
712 */
713 now = get_timestamp();
714 /*
715 * If a virtual machine is stopped by the host it can look to
716 * the watchdog like a soft lockup. This function touches the watchdog.
717 */
718 kvm_check_and_clear_guest_paused();
719 /*
720 * The stored timestamp is comparable with @now only when not touched.
721 * It might get touched anytime from NMI. Make sure that is_softlockup()
722 * uses the same (valid) value.
723 */
724 period_ts = READ_ONCE(*this_cpu_ptr(&watchdog_report_ts));
725
726 update_cpustat();
727
728 /* Reset the interval when touched by known problematic code. */
729 if (period_ts == SOFTLOCKUP_DELAY_REPORT) {
730 if (unlikely(__this_cpu_read(softlockup_touch_sync))) {
731 /*
732 * If the time stamp was touched atomically
733 * make sure the scheduler tick is up to date.
734 */
735 __this_cpu_write(softlockup_touch_sync, false);
736 sched_clock_tick();
737 }
738
739 update_report_ts();
740 return HRTIMER_RESTART;
741 }
742
743 /* Check for a softlockup. */
744 touch_ts = __this_cpu_read(watchdog_touch_ts);
745 duration = is_softlockup(touch_ts, period_ts, now);
746 if (unlikely(duration)) {
747 /*
748 * Prevent multiple soft-lockup reports if one cpu is already
749 * engaged in dumping all cpu back traces.
750 */
751 if (softlockup_all_cpu_backtrace) {
752 if (test_and_set_bit_lock(0, &soft_lockup_nmi_warn))
753 return HRTIMER_RESTART;
754 }
755
756 /* Start period for the next softlockup warning. */
757 update_report_ts();
758
759 printk_cpu_sync_get_irqsave(flags);
760 pr_emerg("BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n",
761 smp_processor_id(), duration,
762 current->comm, task_pid_nr(current));
763 report_cpu_status();
764 print_modules();
765 print_irqtrace_events(current);
766 if (regs)
767 show_regs(regs);
768 else
769 dump_stack();
770 printk_cpu_sync_put_irqrestore(flags);
771
772 if (softlockup_all_cpu_backtrace) {
773 trigger_allbutcpu_cpu_backtrace(smp_processor_id());
774 if (!softlockup_panic)
775 clear_bit_unlock(0, &soft_lockup_nmi_warn);
776 }
777
778 add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK);
779 if (softlockup_panic)
780 panic("softlockup: hung tasks");
781 }
782
783 return HRTIMER_RESTART;
784}
785
786static void watchdog_enable(unsigned int cpu)
787{
788 struct hrtimer *hrtimer = this_cpu_ptr(&watchdog_hrtimer);
789 struct completion *done = this_cpu_ptr(&softlockup_completion);
790
791 WARN_ON_ONCE(cpu != smp_processor_id());
792
793 init_completion(done);
794 complete(done);
795
796 /*
797 * Start the timer first to prevent the hardlockup watchdog triggering
798 * before the timer has a chance to fire.
799 */
800 hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
801 hrtimer->function = watchdog_timer_fn;
802 hrtimer_start(hrtimer, ns_to_ktime(sample_period),
803 HRTIMER_MODE_REL_PINNED_HARD);
804
805 /* Initialize timestamp */
806 update_touch_ts();
807 /* Enable the hardlockup detector */
808 if (watchdog_enabled & WATCHDOG_HARDLOCKUP_ENABLED)
809 watchdog_hardlockup_enable(cpu);
810}
811
812static void watchdog_disable(unsigned int cpu)
813{
814 struct hrtimer *hrtimer = this_cpu_ptr(&watchdog_hrtimer);
815
816 WARN_ON_ONCE(cpu != smp_processor_id());
817
818 /*
819 * Disable the hardlockup detector first. That prevents that a large
820 * delay between disabling the timer and disabling the hardlockup
821 * detector causes a false positive.
822 */
823 watchdog_hardlockup_disable(cpu);
824 hrtimer_cancel(hrtimer);
825 wait_for_completion(this_cpu_ptr(&softlockup_completion));
826}
827
828static int softlockup_stop_fn(void *data)
829{
830 watchdog_disable(smp_processor_id());
831 return 0;
832}
833
834static void softlockup_stop_all(void)
835{
836 int cpu;
837
838 if (!softlockup_initialized)
839 return;
840
841 for_each_cpu(cpu, &watchdog_allowed_mask)
842 smp_call_on_cpu(cpu, softlockup_stop_fn, NULL, false);
843
844 cpumask_clear(&watchdog_allowed_mask);
845}
846
847static int softlockup_start_fn(void *data)
848{
849 watchdog_enable(smp_processor_id());
850 return 0;
851}
852
853static void softlockup_start_all(void)
854{
855 int cpu;
856
857 cpumask_copy(&watchdog_allowed_mask, &watchdog_cpumask);
858 for_each_cpu(cpu, &watchdog_allowed_mask)
859 smp_call_on_cpu(cpu, softlockup_start_fn, NULL, false);
860}
861
862int lockup_detector_online_cpu(unsigned int cpu)
863{
864 if (cpumask_test_cpu(cpu, &watchdog_allowed_mask))
865 watchdog_enable(cpu);
866 return 0;
867}
868
869int lockup_detector_offline_cpu(unsigned int cpu)
870{
871 if (cpumask_test_cpu(cpu, &watchdog_allowed_mask))
872 watchdog_disable(cpu);
873 return 0;
874}
875
876static void __lockup_detector_reconfigure(void)
877{
878 cpus_read_lock();
879 watchdog_hardlockup_stop();
880
881 softlockup_stop_all();
882 set_sample_period();
883 lockup_detector_update_enable();
884 if (watchdog_enabled && watchdog_thresh)
885 softlockup_start_all();
886
887 watchdog_hardlockup_start();
888 cpus_read_unlock();
889 /*
890 * Must be called outside the cpus locked section to prevent
891 * recursive locking in the perf code.
892 */
893 __lockup_detector_cleanup();
894}
895
896void lockup_detector_reconfigure(void)
897{
898 mutex_lock(&watchdog_mutex);
899 __lockup_detector_reconfigure();
900 mutex_unlock(&watchdog_mutex);
901}
902
903/*
904 * Create the watchdog infrastructure and configure the detector(s).
905 */
906static __init void lockup_detector_setup(void)
907{
908 /*
909 * If sysctl is off and watchdog got disabled on the command line,
910 * nothing to do here.
911 */
912 lockup_detector_update_enable();
913
914 if (!IS_ENABLED(CONFIG_SYSCTL) &&
915 !(watchdog_enabled && watchdog_thresh))
916 return;
917
918 mutex_lock(&watchdog_mutex);
919 __lockup_detector_reconfigure();
920 softlockup_initialized = true;
921 mutex_unlock(&watchdog_mutex);
922}
923
924#else /* CONFIG_SOFTLOCKUP_DETECTOR */
925static void __lockup_detector_reconfigure(void)
926{
927 cpus_read_lock();
928 watchdog_hardlockup_stop();
929 lockup_detector_update_enable();
930 watchdog_hardlockup_start();
931 cpus_read_unlock();
932}
933void lockup_detector_reconfigure(void)
934{
935 __lockup_detector_reconfigure();
936}
937static inline void lockup_detector_setup(void)
938{
939 __lockup_detector_reconfigure();
940}
941#endif /* !CONFIG_SOFTLOCKUP_DETECTOR */
942
943static void __lockup_detector_cleanup(void)
944{
945 lockdep_assert_held(&watchdog_mutex);
946 hardlockup_detector_perf_cleanup();
947}
948
949/**
950 * lockup_detector_cleanup - Cleanup after cpu hotplug or sysctl changes
951 *
952 * Caller must not hold the cpu hotplug rwsem.
953 */
954void lockup_detector_cleanup(void)
955{
956 mutex_lock(&watchdog_mutex);
957 __lockup_detector_cleanup();
958 mutex_unlock(&watchdog_mutex);
959}
960
961/**
962 * lockup_detector_soft_poweroff - Interface to stop lockup detector(s)
963 *
964 * Special interface for parisc. It prevents lockup detector warnings from
965 * the default pm_poweroff() function which busy loops forever.
966 */
967void lockup_detector_soft_poweroff(void)
968{
969 watchdog_enabled = 0;
970}
971
972#ifdef CONFIG_SYSCTL
973
974/* Propagate any changes to the watchdog infrastructure */
975static void proc_watchdog_update(void)
976{
977 /* Remove impossible cpus to keep sysctl output clean. */
978 cpumask_and(&watchdog_cpumask, &watchdog_cpumask, cpu_possible_mask);
979 __lockup_detector_reconfigure();
980}
981
982/*
983 * common function for watchdog, nmi_watchdog and soft_watchdog parameter
984 *
985 * caller | table->data points to | 'which'
986 * -------------------|----------------------------------|-------------------------------
987 * proc_watchdog | watchdog_user_enabled | WATCHDOG_HARDLOCKUP_ENABLED |
988 * | | WATCHDOG_SOFTOCKUP_ENABLED
989 * -------------------|----------------------------------|-------------------------------
990 * proc_nmi_watchdog | watchdog_hardlockup_user_enabled | WATCHDOG_HARDLOCKUP_ENABLED
991 * -------------------|----------------------------------|-------------------------------
992 * proc_soft_watchdog | watchdog_softlockup_user_enabled | WATCHDOG_SOFTOCKUP_ENABLED
993 */
994static int proc_watchdog_common(int which, const struct ctl_table *table, int write,
995 void *buffer, size_t *lenp, loff_t *ppos)
996{
997 int err, old, *param = table->data;
998
999 mutex_lock(&watchdog_mutex);
1000
1001 old = *param;
1002 if (!write) {
1003 /*
1004 * On read synchronize the userspace interface. This is a
1005 * racy snapshot.
1006 */
1007 *param = (watchdog_enabled & which) != 0;
1008 err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
1009 *param = old;
1010 } else {
1011 err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
1012 if (!err && old != READ_ONCE(*param))
1013 proc_watchdog_update();
1014 }
1015 mutex_unlock(&watchdog_mutex);
1016 return err;
1017}
1018
1019/*
1020 * /proc/sys/kernel/watchdog
1021 */
1022static int proc_watchdog(const struct ctl_table *table, int write,
1023 void *buffer, size_t *lenp, loff_t *ppos)
1024{
1025 return proc_watchdog_common(WATCHDOG_HARDLOCKUP_ENABLED |
1026 WATCHDOG_SOFTOCKUP_ENABLED,
1027 table, write, buffer, lenp, ppos);
1028}
1029
1030/*
1031 * /proc/sys/kernel/nmi_watchdog
1032 */
1033static int proc_nmi_watchdog(const struct ctl_table *table, int write,
1034 void *buffer, size_t *lenp, loff_t *ppos)
1035{
1036 if (!watchdog_hardlockup_available && write)
1037 return -ENOTSUPP;
1038 return proc_watchdog_common(WATCHDOG_HARDLOCKUP_ENABLED,
1039 table, write, buffer, lenp, ppos);
1040}
1041
1042#ifdef CONFIG_SOFTLOCKUP_DETECTOR
1043/*
1044 * /proc/sys/kernel/soft_watchdog
1045 */
1046static int proc_soft_watchdog(const struct ctl_table *table, int write,
1047 void *buffer, size_t *lenp, loff_t *ppos)
1048{
1049 return proc_watchdog_common(WATCHDOG_SOFTOCKUP_ENABLED,
1050 table, write, buffer, lenp, ppos);
1051}
1052#endif
1053
1054/*
1055 * /proc/sys/kernel/watchdog_thresh
1056 */
1057static int proc_watchdog_thresh(const struct ctl_table *table, int write,
1058 void *buffer, size_t *lenp, loff_t *ppos)
1059{
1060 int err, old;
1061
1062 mutex_lock(&watchdog_mutex);
1063
1064 old = READ_ONCE(watchdog_thresh);
1065 err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
1066
1067 if (!err && write && old != READ_ONCE(watchdog_thresh))
1068 proc_watchdog_update();
1069
1070 mutex_unlock(&watchdog_mutex);
1071 return err;
1072}
1073
1074/*
1075 * The cpumask is the mask of possible cpus that the watchdog can run
1076 * on, not the mask of cpus it is actually running on. This allows the
1077 * user to specify a mask that will include cpus that have not yet
1078 * been brought online, if desired.
1079 */
1080static int proc_watchdog_cpumask(const struct ctl_table *table, int write,
1081 void *buffer, size_t *lenp, loff_t *ppos)
1082{
1083 int err;
1084
1085 mutex_lock(&watchdog_mutex);
1086
1087 err = proc_do_large_bitmap(table, write, buffer, lenp, ppos);
1088 if (!err && write)
1089 proc_watchdog_update();
1090
1091 mutex_unlock(&watchdog_mutex);
1092 return err;
1093}
1094
1095static const int sixty = 60;
1096
1097static struct ctl_table watchdog_sysctls[] = {
1098 {
1099 .procname = "watchdog",
1100 .data = &watchdog_user_enabled,
1101 .maxlen = sizeof(int),
1102 .mode = 0644,
1103 .proc_handler = proc_watchdog,
1104 .extra1 = SYSCTL_ZERO,
1105 .extra2 = SYSCTL_ONE,
1106 },
1107 {
1108 .procname = "watchdog_thresh",
1109 .data = &watchdog_thresh,
1110 .maxlen = sizeof(int),
1111 .mode = 0644,
1112 .proc_handler = proc_watchdog_thresh,
1113 .extra1 = SYSCTL_ZERO,
1114 .extra2 = (void *)&sixty,
1115 },
1116 {
1117 .procname = "watchdog_cpumask",
1118 .data = &watchdog_cpumask_bits,
1119 .maxlen = NR_CPUS,
1120 .mode = 0644,
1121 .proc_handler = proc_watchdog_cpumask,
1122 },
1123#ifdef CONFIG_SOFTLOCKUP_DETECTOR
1124 {
1125 .procname = "soft_watchdog",
1126 .data = &watchdog_softlockup_user_enabled,
1127 .maxlen = sizeof(int),
1128 .mode = 0644,
1129 .proc_handler = proc_soft_watchdog,
1130 .extra1 = SYSCTL_ZERO,
1131 .extra2 = SYSCTL_ONE,
1132 },
1133 {
1134 .procname = "softlockup_panic",
1135 .data = &softlockup_panic,
1136 .maxlen = sizeof(int),
1137 .mode = 0644,
1138 .proc_handler = proc_dointvec_minmax,
1139 .extra1 = SYSCTL_ZERO,
1140 .extra2 = SYSCTL_ONE,
1141 },
1142#ifdef CONFIG_SMP
1143 {
1144 .procname = "softlockup_all_cpu_backtrace",
1145 .data = &sysctl_softlockup_all_cpu_backtrace,
1146 .maxlen = sizeof(int),
1147 .mode = 0644,
1148 .proc_handler = proc_dointvec_minmax,
1149 .extra1 = SYSCTL_ZERO,
1150 .extra2 = SYSCTL_ONE,
1151 },
1152#endif /* CONFIG_SMP */
1153#endif
1154#ifdef CONFIG_HARDLOCKUP_DETECTOR
1155 {
1156 .procname = "hardlockup_panic",
1157 .data = &hardlockup_panic,
1158 .maxlen = sizeof(int),
1159 .mode = 0644,
1160 .proc_handler = proc_dointvec_minmax,
1161 .extra1 = SYSCTL_ZERO,
1162 .extra2 = SYSCTL_ONE,
1163 },
1164#ifdef CONFIG_SMP
1165 {
1166 .procname = "hardlockup_all_cpu_backtrace",
1167 .data = &sysctl_hardlockup_all_cpu_backtrace,
1168 .maxlen = sizeof(int),
1169 .mode = 0644,
1170 .proc_handler = proc_dointvec_minmax,
1171 .extra1 = SYSCTL_ZERO,
1172 .extra2 = SYSCTL_ONE,
1173 },
1174#endif /* CONFIG_SMP */
1175#endif
1176};
1177
1178static struct ctl_table watchdog_hardlockup_sysctl[] = {
1179 {
1180 .procname = "nmi_watchdog",
1181 .data = &watchdog_hardlockup_user_enabled,
1182 .maxlen = sizeof(int),
1183 .mode = 0444,
1184 .proc_handler = proc_nmi_watchdog,
1185 .extra1 = SYSCTL_ZERO,
1186 .extra2 = SYSCTL_ONE,
1187 },
1188};
1189
1190static void __init watchdog_sysctl_init(void)
1191{
1192 register_sysctl_init("kernel", watchdog_sysctls);
1193
1194 if (watchdog_hardlockup_available)
1195 watchdog_hardlockup_sysctl[0].mode = 0644;
1196 register_sysctl_init("kernel", watchdog_hardlockup_sysctl);
1197}
1198
1199#else
1200#define watchdog_sysctl_init() do { } while (0)
1201#endif /* CONFIG_SYSCTL */
1202
1203static void __init lockup_detector_delay_init(struct work_struct *work);
1204static bool allow_lockup_detector_init_retry __initdata;
1205
1206static struct work_struct detector_work __initdata =
1207 __WORK_INITIALIZER(detector_work, lockup_detector_delay_init);
1208
1209static void __init lockup_detector_delay_init(struct work_struct *work)
1210{
1211 int ret;
1212
1213 ret = watchdog_hardlockup_probe();
1214 if (ret) {
1215 if (ret == -ENODEV)
1216 pr_info("NMI not fully supported\n");
1217 else
1218 pr_info("Delayed init of the lockup detector failed: %d\n", ret);
1219 pr_info("Hard watchdog permanently disabled\n");
1220 return;
1221 }
1222
1223 allow_lockup_detector_init_retry = false;
1224
1225 watchdog_hardlockup_available = true;
1226 lockup_detector_setup();
1227}
1228
1229/*
1230 * lockup_detector_retry_init - retry init lockup detector if possible.
1231 *
1232 * Retry hardlockup detector init. It is useful when it requires some
1233 * functionality that has to be initialized later on a particular
1234 * platform.
1235 */
1236void __init lockup_detector_retry_init(void)
1237{
1238 /* Must be called before late init calls */
1239 if (!allow_lockup_detector_init_retry)
1240 return;
1241
1242 schedule_work(&detector_work);
1243}
1244
1245/*
1246 * Ensure that optional delayed hardlockup init is proceed before
1247 * the init code and memory is freed.
1248 */
1249static int __init lockup_detector_check(void)
1250{
1251 /* Prevent any later retry. */
1252 allow_lockup_detector_init_retry = false;
1253
1254 /* Make sure no work is pending. */
1255 flush_work(&detector_work);
1256
1257 watchdog_sysctl_init();
1258
1259 return 0;
1260
1261}
1262late_initcall_sync(lockup_detector_check);
1263
1264void __init lockup_detector_init(void)
1265{
1266 if (tick_nohz_full_enabled())
1267 pr_info("Disabling watchdog on nohz_full cores by default\n");
1268
1269 cpumask_copy(&watchdog_cpumask,
1270 housekeeping_cpumask(HK_TYPE_TIMER));
1271
1272 if (!watchdog_hardlockup_probe())
1273 watchdog_hardlockup_available = true;
1274 else
1275 allow_lockup_detector_init_retry = true;
1276
1277 lockup_detector_setup();
1278}