Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Detect hard and soft lockups on a system
4 *
5 * started by Don Zickus, Copyright (C) 2010 Red Hat, Inc.
6 *
7 * Note: Most of this code is borrowed heavily from the original softlockup
8 * detector, so thanks to Ingo for the initial implementation.
9 * Some chunks also taken from the old x86-specific nmi watchdog code, thanks
10 * to those contributors as well.
11 */
12
13#define pr_fmt(fmt) "watchdog: " fmt
14
15#include <linux/mm.h>
16#include <linux/cpu.h>
17#include <linux/nmi.h>
18#include <linux/init.h>
19#include <linux/module.h>
20#include <linux/sysctl.h>
21#include <linux/tick.h>
22#include <linux/sched/clock.h>
23#include <linux/sched/debug.h>
24#include <linux/sched/isolation.h>
25#include <linux/stop_machine.h>
26
27#include <asm/irq_regs.h>
28#include <linux/kvm_para.h>
29
30static DEFINE_MUTEX(watchdog_mutex);
31
32#if defined(CONFIG_HARDLOCKUP_DETECTOR) || defined(CONFIG_HAVE_NMI_WATCHDOG)
33# define WATCHDOG_DEFAULT (SOFT_WATCHDOG_ENABLED | NMI_WATCHDOG_ENABLED)
34# define NMI_WATCHDOG_DEFAULT 1
35#else
36# define WATCHDOG_DEFAULT (SOFT_WATCHDOG_ENABLED)
37# define NMI_WATCHDOG_DEFAULT 0
38#endif
39
40unsigned long __read_mostly watchdog_enabled;
41int __read_mostly watchdog_user_enabled = 1;
42int __read_mostly nmi_watchdog_user_enabled = NMI_WATCHDOG_DEFAULT;
43int __read_mostly soft_watchdog_user_enabled = 1;
44int __read_mostly watchdog_thresh = 10;
45static int __read_mostly nmi_watchdog_available;
46
47struct cpumask watchdog_cpumask __read_mostly;
48unsigned long *watchdog_cpumask_bits = cpumask_bits(&watchdog_cpumask);
49
50#ifdef CONFIG_HARDLOCKUP_DETECTOR
51
52# ifdef CONFIG_SMP
53int __read_mostly sysctl_hardlockup_all_cpu_backtrace;
54# endif /* CONFIG_SMP */
55
56/*
57 * Should we panic when a soft-lockup or hard-lockup occurs:
58 */
59unsigned int __read_mostly hardlockup_panic =
60 IS_ENABLED(CONFIG_BOOTPARAM_HARDLOCKUP_PANIC);
61/*
62 * We may not want to enable hard lockup detection by default in all cases,
63 * for example when running the kernel as a guest on a hypervisor. In these
64 * cases this function can be called to disable hard lockup detection. This
65 * function should only be executed once by the boot processor before the
66 * kernel command line parameters are parsed, because otherwise it is not
67 * possible to override this in hardlockup_panic_setup().
68 */
69void __init hardlockup_detector_disable(void)
70{
71 nmi_watchdog_user_enabled = 0;
72}
73
74static int __init hardlockup_panic_setup(char *str)
75{
76 if (!strncmp(str, "panic", 5))
77 hardlockup_panic = 1;
78 else if (!strncmp(str, "nopanic", 7))
79 hardlockup_panic = 0;
80 else if (!strncmp(str, "0", 1))
81 nmi_watchdog_user_enabled = 0;
82 else if (!strncmp(str, "1", 1))
83 nmi_watchdog_user_enabled = 1;
84 return 1;
85}
86__setup("nmi_watchdog=", hardlockup_panic_setup);
87
88#endif /* CONFIG_HARDLOCKUP_DETECTOR */
89
90/*
91 * These functions can be overridden if an architecture implements its
92 * own hardlockup detector.
93 *
94 * watchdog_nmi_enable/disable can be implemented to start and stop when
95 * softlockup watchdog start and stop. The arch must select the
96 * SOFTLOCKUP_DETECTOR Kconfig.
97 */
98int __weak watchdog_nmi_enable(unsigned int cpu)
99{
100 hardlockup_detector_perf_enable();
101 return 0;
102}
103
104void __weak watchdog_nmi_disable(unsigned int cpu)
105{
106 hardlockup_detector_perf_disable();
107}
108
109/* Return 0, if a NMI watchdog is available. Error code otherwise */
110int __weak __init watchdog_nmi_probe(void)
111{
112 return hardlockup_detector_perf_init();
113}
114
115/**
116 * watchdog_nmi_stop - Stop the watchdog for reconfiguration
117 *
118 * The reconfiguration steps are:
119 * watchdog_nmi_stop();
120 * update_variables();
121 * watchdog_nmi_start();
122 */
123void __weak watchdog_nmi_stop(void) { }
124
125/**
126 * watchdog_nmi_start - Start the watchdog after reconfiguration
127 *
128 * Counterpart to watchdog_nmi_stop().
129 *
130 * The following variables have been updated in update_variables() and
131 * contain the currently valid configuration:
132 * - watchdog_enabled
133 * - watchdog_thresh
134 * - watchdog_cpumask
135 */
136void __weak watchdog_nmi_start(void) { }
137
138/**
139 * lockup_detector_update_enable - Update the sysctl enable bit
140 *
141 * Caller needs to make sure that the NMI/perf watchdogs are off, so this
142 * can't race with watchdog_nmi_disable().
143 */
144static void lockup_detector_update_enable(void)
145{
146 watchdog_enabled = 0;
147 if (!watchdog_user_enabled)
148 return;
149 if (nmi_watchdog_available && nmi_watchdog_user_enabled)
150 watchdog_enabled |= NMI_WATCHDOG_ENABLED;
151 if (soft_watchdog_user_enabled)
152 watchdog_enabled |= SOFT_WATCHDOG_ENABLED;
153}
154
155#ifdef CONFIG_SOFTLOCKUP_DETECTOR
156
157/*
158 * Delay the soflockup report when running a known slow code.
159 * It does _not_ affect the timestamp of the last successdul reschedule.
160 */
161#define SOFTLOCKUP_DELAY_REPORT ULONG_MAX
162
163#ifdef CONFIG_SMP
164int __read_mostly sysctl_softlockup_all_cpu_backtrace;
165#endif
166
167static struct cpumask watchdog_allowed_mask __read_mostly;
168
169/* Global variables, exported for sysctl */
170unsigned int __read_mostly softlockup_panic =
171 IS_ENABLED(CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC);
172
173static bool softlockup_initialized __read_mostly;
174static u64 __read_mostly sample_period;
175
176/* Timestamp taken after the last successful reschedule. */
177static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts);
178/* Timestamp of the last softlockup report. */
179static DEFINE_PER_CPU(unsigned long, watchdog_report_ts);
180static DEFINE_PER_CPU(struct hrtimer, watchdog_hrtimer);
181static DEFINE_PER_CPU(bool, softlockup_touch_sync);
182static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts);
183static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved);
184static unsigned long soft_lockup_nmi_warn;
185
186static int __init nowatchdog_setup(char *str)
187{
188 watchdog_user_enabled = 0;
189 return 1;
190}
191__setup("nowatchdog", nowatchdog_setup);
192
193static int __init nosoftlockup_setup(char *str)
194{
195 soft_watchdog_user_enabled = 0;
196 return 1;
197}
198__setup("nosoftlockup", nosoftlockup_setup);
199
200static int __init watchdog_thresh_setup(char *str)
201{
202 get_option(&str, &watchdog_thresh);
203 return 1;
204}
205__setup("watchdog_thresh=", watchdog_thresh_setup);
206
207static void __lockup_detector_cleanup(void);
208
209/*
210 * Hard-lockup warnings should be triggered after just a few seconds. Soft-
211 * lockups can have false positives under extreme conditions. So we generally
212 * want a higher threshold for soft lockups than for hard lockups. So we couple
213 * the thresholds with a factor: we make the soft threshold twice the amount of
214 * time the hard threshold is.
215 */
216static int get_softlockup_thresh(void)
217{
218 return watchdog_thresh * 2;
219}
220
221/*
222 * Returns seconds, approximately. We don't need nanosecond
223 * resolution, and we don't need to waste time with a big divide when
224 * 2^30ns == 1.074s.
225 */
226static unsigned long get_timestamp(void)
227{
228 return running_clock() >> 30LL; /* 2^30 ~= 10^9 */
229}
230
231static void set_sample_period(void)
232{
233 /*
234 * convert watchdog_thresh from seconds to ns
235 * the divide by 5 is to give hrtimer several chances (two
236 * or three with the current relation between the soft
237 * and hard thresholds) to increment before the
238 * hardlockup detector generates a warning
239 */
240 sample_period = get_softlockup_thresh() * ((u64)NSEC_PER_SEC / 5);
241 watchdog_update_hrtimer_threshold(sample_period);
242}
243
244static void update_report_ts(void)
245{
246 __this_cpu_write(watchdog_report_ts, get_timestamp());
247}
248
249/* Commands for resetting the watchdog */
250static void update_touch_ts(void)
251{
252 __this_cpu_write(watchdog_touch_ts, get_timestamp());
253 update_report_ts();
254}
255
256/**
257 * touch_softlockup_watchdog_sched - touch watchdog on scheduler stalls
258 *
259 * Call when the scheduler may have stalled for legitimate reasons
260 * preventing the watchdog task from executing - e.g. the scheduler
261 * entering idle state. This should only be used for scheduler events.
262 * Use touch_softlockup_watchdog() for everything else.
263 */
264notrace void touch_softlockup_watchdog_sched(void)
265{
266 /*
267 * Preemption can be enabled. It doesn't matter which CPU's watchdog
268 * report period gets restarted here, so use the raw_ operation.
269 */
270 raw_cpu_write(watchdog_report_ts, SOFTLOCKUP_DELAY_REPORT);
271}
272
273notrace void touch_softlockup_watchdog(void)
274{
275 touch_softlockup_watchdog_sched();
276 wq_watchdog_touch(raw_smp_processor_id());
277}
278EXPORT_SYMBOL(touch_softlockup_watchdog);
279
280void touch_all_softlockup_watchdogs(void)
281{
282 int cpu;
283
284 /*
285 * watchdog_mutex cannpt be taken here, as this might be called
286 * from (soft)interrupt context, so the access to
287 * watchdog_allowed_cpumask might race with a concurrent update.
288 *
289 * The watchdog time stamp can race against a concurrent real
290 * update as well, the only side effect might be a cycle delay for
291 * the softlockup check.
292 */
293 for_each_cpu(cpu, &watchdog_allowed_mask) {
294 per_cpu(watchdog_report_ts, cpu) = SOFTLOCKUP_DELAY_REPORT;
295 wq_watchdog_touch(cpu);
296 }
297}
298
299void touch_softlockup_watchdog_sync(void)
300{
301 __this_cpu_write(softlockup_touch_sync, true);
302 __this_cpu_write(watchdog_report_ts, SOFTLOCKUP_DELAY_REPORT);
303}
304
305static int is_softlockup(unsigned long touch_ts,
306 unsigned long period_ts,
307 unsigned long now)
308{
309 if ((watchdog_enabled & SOFT_WATCHDOG_ENABLED) && watchdog_thresh){
310 /* Warn about unreasonable delays. */
311 if (time_after(now, period_ts + get_softlockup_thresh()))
312 return now - touch_ts;
313 }
314 return 0;
315}
316
317/* watchdog detector functions */
318bool is_hardlockup(void)
319{
320 unsigned long hrint = __this_cpu_read(hrtimer_interrupts);
321
322 if (__this_cpu_read(hrtimer_interrupts_saved) == hrint)
323 return true;
324
325 __this_cpu_write(hrtimer_interrupts_saved, hrint);
326 return false;
327}
328
329static void watchdog_interrupt_count(void)
330{
331 __this_cpu_inc(hrtimer_interrupts);
332}
333
334static DEFINE_PER_CPU(struct completion, softlockup_completion);
335static DEFINE_PER_CPU(struct cpu_stop_work, softlockup_stop_work);
336
337/*
338 * The watchdog feed function - touches the timestamp.
339 *
340 * It only runs once every sample_period seconds (4 seconds by
341 * default) to reset the softlockup timestamp. If this gets delayed
342 * for more than 2*watchdog_thresh seconds then the debug-printout
343 * triggers in watchdog_timer_fn().
344 */
345static int softlockup_fn(void *data)
346{
347 update_touch_ts();
348 complete(this_cpu_ptr(&softlockup_completion));
349
350 return 0;
351}
352
353/* watchdog kicker functions */
354static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
355{
356 unsigned long touch_ts, period_ts, now;
357 struct pt_regs *regs = get_irq_regs();
358 int duration;
359 int softlockup_all_cpu_backtrace = sysctl_softlockup_all_cpu_backtrace;
360
361 if (!watchdog_enabled)
362 return HRTIMER_NORESTART;
363
364 /* kick the hardlockup detector */
365 watchdog_interrupt_count();
366
367 /* kick the softlockup detector */
368 if (completion_done(this_cpu_ptr(&softlockup_completion))) {
369 reinit_completion(this_cpu_ptr(&softlockup_completion));
370 stop_one_cpu_nowait(smp_processor_id(),
371 softlockup_fn, NULL,
372 this_cpu_ptr(&softlockup_stop_work));
373 }
374
375 /* .. and repeat */
376 hrtimer_forward_now(hrtimer, ns_to_ktime(sample_period));
377
378 /*
379 * Read the current timestamp first. It might become invalid anytime
380 * when a virtual machine is stopped by the host or when the watchog
381 * is touched from NMI.
382 */
383 now = get_timestamp();
384 /*
385 * If a virtual machine is stopped by the host it can look to
386 * the watchdog like a soft lockup. This function touches the watchdog.
387 */
388 kvm_check_and_clear_guest_paused();
389 /*
390 * The stored timestamp is comparable with @now only when not touched.
391 * It might get touched anytime from NMI. Make sure that is_softlockup()
392 * uses the same (valid) value.
393 */
394 period_ts = READ_ONCE(*this_cpu_ptr(&watchdog_report_ts));
395
396 /* Reset the interval when touched by known problematic code. */
397 if (period_ts == SOFTLOCKUP_DELAY_REPORT) {
398 if (unlikely(__this_cpu_read(softlockup_touch_sync))) {
399 /*
400 * If the time stamp was touched atomically
401 * make sure the scheduler tick is up to date.
402 */
403 __this_cpu_write(softlockup_touch_sync, false);
404 sched_clock_tick();
405 }
406
407 update_report_ts();
408 return HRTIMER_RESTART;
409 }
410
411 /* Check for a softlockup. */
412 touch_ts = __this_cpu_read(watchdog_touch_ts);
413 duration = is_softlockup(touch_ts, period_ts, now);
414 if (unlikely(duration)) {
415 /*
416 * Prevent multiple soft-lockup reports if one cpu is already
417 * engaged in dumping all cpu back traces.
418 */
419 if (softlockup_all_cpu_backtrace) {
420 if (test_and_set_bit_lock(0, &soft_lockup_nmi_warn))
421 return HRTIMER_RESTART;
422 }
423
424 /* Start period for the next softlockup warning. */
425 update_report_ts();
426
427 pr_emerg("BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n",
428 smp_processor_id(), duration,
429 current->comm, task_pid_nr(current));
430 print_modules();
431 print_irqtrace_events(current);
432 if (regs)
433 show_regs(regs);
434 else
435 dump_stack();
436
437 if (softlockup_all_cpu_backtrace) {
438 trigger_allbutself_cpu_backtrace();
439 clear_bit_unlock(0, &soft_lockup_nmi_warn);
440 }
441
442 add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK);
443 if (softlockup_panic)
444 panic("softlockup: hung tasks");
445 }
446
447 return HRTIMER_RESTART;
448}
449
450static void watchdog_enable(unsigned int cpu)
451{
452 struct hrtimer *hrtimer = this_cpu_ptr(&watchdog_hrtimer);
453 struct completion *done = this_cpu_ptr(&softlockup_completion);
454
455 WARN_ON_ONCE(cpu != smp_processor_id());
456
457 init_completion(done);
458 complete(done);
459
460 /*
461 * Start the timer first to prevent the NMI watchdog triggering
462 * before the timer has a chance to fire.
463 */
464 hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
465 hrtimer->function = watchdog_timer_fn;
466 hrtimer_start(hrtimer, ns_to_ktime(sample_period),
467 HRTIMER_MODE_REL_PINNED_HARD);
468
469 /* Initialize timestamp */
470 update_touch_ts();
471 /* Enable the perf event */
472 if (watchdog_enabled & NMI_WATCHDOG_ENABLED)
473 watchdog_nmi_enable(cpu);
474}
475
476static void watchdog_disable(unsigned int cpu)
477{
478 struct hrtimer *hrtimer = this_cpu_ptr(&watchdog_hrtimer);
479
480 WARN_ON_ONCE(cpu != smp_processor_id());
481
482 /*
483 * Disable the perf event first. That prevents that a large delay
484 * between disabling the timer and disabling the perf event causes
485 * the perf NMI to detect a false positive.
486 */
487 watchdog_nmi_disable(cpu);
488 hrtimer_cancel(hrtimer);
489 wait_for_completion(this_cpu_ptr(&softlockup_completion));
490}
491
492static int softlockup_stop_fn(void *data)
493{
494 watchdog_disable(smp_processor_id());
495 return 0;
496}
497
498static void softlockup_stop_all(void)
499{
500 int cpu;
501
502 if (!softlockup_initialized)
503 return;
504
505 for_each_cpu(cpu, &watchdog_allowed_mask)
506 smp_call_on_cpu(cpu, softlockup_stop_fn, NULL, false);
507
508 cpumask_clear(&watchdog_allowed_mask);
509}
510
511static int softlockup_start_fn(void *data)
512{
513 watchdog_enable(smp_processor_id());
514 return 0;
515}
516
517static void softlockup_start_all(void)
518{
519 int cpu;
520
521 cpumask_copy(&watchdog_allowed_mask, &watchdog_cpumask);
522 for_each_cpu(cpu, &watchdog_allowed_mask)
523 smp_call_on_cpu(cpu, softlockup_start_fn, NULL, false);
524}
525
526int lockup_detector_online_cpu(unsigned int cpu)
527{
528 if (cpumask_test_cpu(cpu, &watchdog_allowed_mask))
529 watchdog_enable(cpu);
530 return 0;
531}
532
533int lockup_detector_offline_cpu(unsigned int cpu)
534{
535 if (cpumask_test_cpu(cpu, &watchdog_allowed_mask))
536 watchdog_disable(cpu);
537 return 0;
538}
539
540static void __lockup_detector_reconfigure(void)
541{
542 cpus_read_lock();
543 watchdog_nmi_stop();
544
545 softlockup_stop_all();
546 set_sample_period();
547 lockup_detector_update_enable();
548 if (watchdog_enabled && watchdog_thresh)
549 softlockup_start_all();
550
551 watchdog_nmi_start();
552 cpus_read_unlock();
553 /*
554 * Must be called outside the cpus locked section to prevent
555 * recursive locking in the perf code.
556 */
557 __lockup_detector_cleanup();
558}
559
560void lockup_detector_reconfigure(void)
561{
562 mutex_lock(&watchdog_mutex);
563 __lockup_detector_reconfigure();
564 mutex_unlock(&watchdog_mutex);
565}
566
567/*
568 * Create the watchdog infrastructure and configure the detector(s).
569 */
570static __init void lockup_detector_setup(void)
571{
572 /*
573 * If sysctl is off and watchdog got disabled on the command line,
574 * nothing to do here.
575 */
576 lockup_detector_update_enable();
577
578 if (!IS_ENABLED(CONFIG_SYSCTL) &&
579 !(watchdog_enabled && watchdog_thresh))
580 return;
581
582 mutex_lock(&watchdog_mutex);
583 __lockup_detector_reconfigure();
584 softlockup_initialized = true;
585 mutex_unlock(&watchdog_mutex);
586}
587
588#else /* CONFIG_SOFTLOCKUP_DETECTOR */
589static void __lockup_detector_reconfigure(void)
590{
591 cpus_read_lock();
592 watchdog_nmi_stop();
593 lockup_detector_update_enable();
594 watchdog_nmi_start();
595 cpus_read_unlock();
596}
597void lockup_detector_reconfigure(void)
598{
599 __lockup_detector_reconfigure();
600}
601static inline void lockup_detector_setup(void)
602{
603 __lockup_detector_reconfigure();
604}
605#endif /* !CONFIG_SOFTLOCKUP_DETECTOR */
606
607static void __lockup_detector_cleanup(void)
608{
609 lockdep_assert_held(&watchdog_mutex);
610 hardlockup_detector_perf_cleanup();
611}
612
613/**
614 * lockup_detector_cleanup - Cleanup after cpu hotplug or sysctl changes
615 *
616 * Caller must not hold the cpu hotplug rwsem.
617 */
618void lockup_detector_cleanup(void)
619{
620 mutex_lock(&watchdog_mutex);
621 __lockup_detector_cleanup();
622 mutex_unlock(&watchdog_mutex);
623}
624
625/**
626 * lockup_detector_soft_poweroff - Interface to stop lockup detector(s)
627 *
628 * Special interface for parisc. It prevents lockup detector warnings from
629 * the default pm_poweroff() function which busy loops forever.
630 */
631void lockup_detector_soft_poweroff(void)
632{
633 watchdog_enabled = 0;
634}
635
636#ifdef CONFIG_SYSCTL
637
638/* Propagate any changes to the watchdog infrastructure */
639static void proc_watchdog_update(void)
640{
641 /* Remove impossible cpus to keep sysctl output clean. */
642 cpumask_and(&watchdog_cpumask, &watchdog_cpumask, cpu_possible_mask);
643 __lockup_detector_reconfigure();
644}
645
646/*
647 * common function for watchdog, nmi_watchdog and soft_watchdog parameter
648 *
649 * caller | table->data points to | 'which'
650 * -------------------|----------------------------|--------------------------
651 * proc_watchdog | watchdog_user_enabled | NMI_WATCHDOG_ENABLED |
652 * | | SOFT_WATCHDOG_ENABLED
653 * -------------------|----------------------------|--------------------------
654 * proc_nmi_watchdog | nmi_watchdog_user_enabled | NMI_WATCHDOG_ENABLED
655 * -------------------|----------------------------|--------------------------
656 * proc_soft_watchdog | soft_watchdog_user_enabled | SOFT_WATCHDOG_ENABLED
657 */
658static int proc_watchdog_common(int which, struct ctl_table *table, int write,
659 void *buffer, size_t *lenp, loff_t *ppos)
660{
661 int err, old, *param = table->data;
662
663 mutex_lock(&watchdog_mutex);
664
665 if (!write) {
666 /*
667 * On read synchronize the userspace interface. This is a
668 * racy snapshot.
669 */
670 *param = (watchdog_enabled & which) != 0;
671 err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
672 } else {
673 old = READ_ONCE(*param);
674 err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
675 if (!err && old != READ_ONCE(*param))
676 proc_watchdog_update();
677 }
678 mutex_unlock(&watchdog_mutex);
679 return err;
680}
681
682/*
683 * /proc/sys/kernel/watchdog
684 */
685int proc_watchdog(struct ctl_table *table, int write,
686 void *buffer, size_t *lenp, loff_t *ppos)
687{
688 return proc_watchdog_common(NMI_WATCHDOG_ENABLED|SOFT_WATCHDOG_ENABLED,
689 table, write, buffer, lenp, ppos);
690}
691
692/*
693 * /proc/sys/kernel/nmi_watchdog
694 */
695int proc_nmi_watchdog(struct ctl_table *table, int write,
696 void *buffer, size_t *lenp, loff_t *ppos)
697{
698 if (!nmi_watchdog_available && write)
699 return -ENOTSUPP;
700 return proc_watchdog_common(NMI_WATCHDOG_ENABLED,
701 table, write, buffer, lenp, ppos);
702}
703
704/*
705 * /proc/sys/kernel/soft_watchdog
706 */
707int proc_soft_watchdog(struct ctl_table *table, int write,
708 void *buffer, size_t *lenp, loff_t *ppos)
709{
710 return proc_watchdog_common(SOFT_WATCHDOG_ENABLED,
711 table, write, buffer, lenp, ppos);
712}
713
714/*
715 * /proc/sys/kernel/watchdog_thresh
716 */
717int proc_watchdog_thresh(struct ctl_table *table, int write,
718 void *buffer, size_t *lenp, loff_t *ppos)
719{
720 int err, old;
721
722 mutex_lock(&watchdog_mutex);
723
724 old = READ_ONCE(watchdog_thresh);
725 err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
726
727 if (!err && write && old != READ_ONCE(watchdog_thresh))
728 proc_watchdog_update();
729
730 mutex_unlock(&watchdog_mutex);
731 return err;
732}
733
734/*
735 * The cpumask is the mask of possible cpus that the watchdog can run
736 * on, not the mask of cpus it is actually running on. This allows the
737 * user to specify a mask that will include cpus that have not yet
738 * been brought online, if desired.
739 */
740int proc_watchdog_cpumask(struct ctl_table *table, int write,
741 void *buffer, size_t *lenp, loff_t *ppos)
742{
743 int err;
744
745 mutex_lock(&watchdog_mutex);
746
747 err = proc_do_large_bitmap(table, write, buffer, lenp, ppos);
748 if (!err && write)
749 proc_watchdog_update();
750
751 mutex_unlock(&watchdog_mutex);
752 return err;
753}
754
755static const int sixty = 60;
756
757static struct ctl_table watchdog_sysctls[] = {
758 {
759 .procname = "watchdog",
760 .data = &watchdog_user_enabled,
761 .maxlen = sizeof(int),
762 .mode = 0644,
763 .proc_handler = proc_watchdog,
764 .extra1 = SYSCTL_ZERO,
765 .extra2 = SYSCTL_ONE,
766 },
767 {
768 .procname = "watchdog_thresh",
769 .data = &watchdog_thresh,
770 .maxlen = sizeof(int),
771 .mode = 0644,
772 .proc_handler = proc_watchdog_thresh,
773 .extra1 = SYSCTL_ZERO,
774 .extra2 = (void *)&sixty,
775 },
776 {
777 .procname = "nmi_watchdog",
778 .data = &nmi_watchdog_user_enabled,
779 .maxlen = sizeof(int),
780 .mode = NMI_WATCHDOG_SYSCTL_PERM,
781 .proc_handler = proc_nmi_watchdog,
782 .extra1 = SYSCTL_ZERO,
783 .extra2 = SYSCTL_ONE,
784 },
785 {
786 .procname = "watchdog_cpumask",
787 .data = &watchdog_cpumask_bits,
788 .maxlen = NR_CPUS,
789 .mode = 0644,
790 .proc_handler = proc_watchdog_cpumask,
791 },
792#ifdef CONFIG_SOFTLOCKUP_DETECTOR
793 {
794 .procname = "soft_watchdog",
795 .data = &soft_watchdog_user_enabled,
796 .maxlen = sizeof(int),
797 .mode = 0644,
798 .proc_handler = proc_soft_watchdog,
799 .extra1 = SYSCTL_ZERO,
800 .extra2 = SYSCTL_ONE,
801 },
802 {
803 .procname = "softlockup_panic",
804 .data = &softlockup_panic,
805 .maxlen = sizeof(int),
806 .mode = 0644,
807 .proc_handler = proc_dointvec_minmax,
808 .extra1 = SYSCTL_ZERO,
809 .extra2 = SYSCTL_ONE,
810 },
811#ifdef CONFIG_SMP
812 {
813 .procname = "softlockup_all_cpu_backtrace",
814 .data = &sysctl_softlockup_all_cpu_backtrace,
815 .maxlen = sizeof(int),
816 .mode = 0644,
817 .proc_handler = proc_dointvec_minmax,
818 .extra1 = SYSCTL_ZERO,
819 .extra2 = SYSCTL_ONE,
820 },
821#endif /* CONFIG_SMP */
822#endif
823#ifdef CONFIG_HARDLOCKUP_DETECTOR
824 {
825 .procname = "hardlockup_panic",
826 .data = &hardlockup_panic,
827 .maxlen = sizeof(int),
828 .mode = 0644,
829 .proc_handler = proc_dointvec_minmax,
830 .extra1 = SYSCTL_ZERO,
831 .extra2 = SYSCTL_ONE,
832 },
833#ifdef CONFIG_SMP
834 {
835 .procname = "hardlockup_all_cpu_backtrace",
836 .data = &sysctl_hardlockup_all_cpu_backtrace,
837 .maxlen = sizeof(int),
838 .mode = 0644,
839 .proc_handler = proc_dointvec_minmax,
840 .extra1 = SYSCTL_ZERO,
841 .extra2 = SYSCTL_ONE,
842 },
843#endif /* CONFIG_SMP */
844#endif
845 {}
846};
847
848static void __init watchdog_sysctl_init(void)
849{
850 register_sysctl_init("kernel", watchdog_sysctls);
851}
852#else
853#define watchdog_sysctl_init() do { } while (0)
854#endif /* CONFIG_SYSCTL */
855
856void __init lockup_detector_init(void)
857{
858 if (tick_nohz_full_enabled())
859 pr_info("Disabling watchdog on nohz_full cores by default\n");
860
861 cpumask_copy(&watchdog_cpumask,
862 housekeeping_cpumask(HK_TYPE_TIMER));
863
864 if (!watchdog_nmi_probe())
865 nmi_watchdog_available = true;
866 lockup_detector_setup();
867 watchdog_sysctl_init();
868}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Detect hard and soft lockups on a system
4 *
5 * started by Don Zickus, Copyright (C) 2010 Red Hat, Inc.
6 *
7 * Note: Most of this code is borrowed heavily from the original softlockup
8 * detector, so thanks to Ingo for the initial implementation.
9 * Some chunks also taken from the old x86-specific nmi watchdog code, thanks
10 * to those contributors as well.
11 */
12
13#define pr_fmt(fmt) "watchdog: " fmt
14
15#include <linux/mm.h>
16#include <linux/cpu.h>
17#include <linux/nmi.h>
18#include <linux/init.h>
19#include <linux/module.h>
20#include <linux/sysctl.h>
21#include <linux/tick.h>
22#include <linux/sched/clock.h>
23#include <linux/sched/debug.h>
24#include <linux/sched/isolation.h>
25#include <linux/stop_machine.h>
26
27#include <asm/irq_regs.h>
28#include <linux/kvm_para.h>
29
30static DEFINE_MUTEX(watchdog_mutex);
31
32#if defined(CONFIG_HARDLOCKUP_DETECTOR) || defined(CONFIG_HARDLOCKUP_DETECTOR_SPARC64)
33# define WATCHDOG_HARDLOCKUP_DEFAULT 1
34#else
35# define WATCHDOG_HARDLOCKUP_DEFAULT 0
36#endif
37
38unsigned long __read_mostly watchdog_enabled;
39int __read_mostly watchdog_user_enabled = 1;
40static int __read_mostly watchdog_hardlockup_user_enabled = WATCHDOG_HARDLOCKUP_DEFAULT;
41static int __read_mostly watchdog_softlockup_user_enabled = 1;
42int __read_mostly watchdog_thresh = 10;
43static int __read_mostly watchdog_hardlockup_available;
44
45struct cpumask watchdog_cpumask __read_mostly;
46unsigned long *watchdog_cpumask_bits = cpumask_bits(&watchdog_cpumask);
47
48#ifdef CONFIG_HARDLOCKUP_DETECTOR
49
50# ifdef CONFIG_SMP
51int __read_mostly sysctl_hardlockup_all_cpu_backtrace;
52# endif /* CONFIG_SMP */
53
54/*
55 * Should we panic when a soft-lockup or hard-lockup occurs:
56 */
57unsigned int __read_mostly hardlockup_panic =
58 IS_ENABLED(CONFIG_BOOTPARAM_HARDLOCKUP_PANIC);
59/*
60 * We may not want to enable hard lockup detection by default in all cases,
61 * for example when running the kernel as a guest on a hypervisor. In these
62 * cases this function can be called to disable hard lockup detection. This
63 * function should only be executed once by the boot processor before the
64 * kernel command line parameters are parsed, because otherwise it is not
65 * possible to override this in hardlockup_panic_setup().
66 */
67void __init hardlockup_detector_disable(void)
68{
69 watchdog_hardlockup_user_enabled = 0;
70}
71
72static int __init hardlockup_panic_setup(char *str)
73{
74 if (!strncmp(str, "panic", 5))
75 hardlockup_panic = 1;
76 else if (!strncmp(str, "nopanic", 7))
77 hardlockup_panic = 0;
78 else if (!strncmp(str, "0", 1))
79 watchdog_hardlockup_user_enabled = 0;
80 else if (!strncmp(str, "1", 1))
81 watchdog_hardlockup_user_enabled = 1;
82 return 1;
83}
84__setup("nmi_watchdog=", hardlockup_panic_setup);
85
86#endif /* CONFIG_HARDLOCKUP_DETECTOR */
87
88#if defined(CONFIG_HARDLOCKUP_DETECTOR_COUNTS_HRTIMER)
89
90static DEFINE_PER_CPU(atomic_t, hrtimer_interrupts);
91static DEFINE_PER_CPU(int, hrtimer_interrupts_saved);
92static DEFINE_PER_CPU(bool, watchdog_hardlockup_warned);
93static DEFINE_PER_CPU(bool, watchdog_hardlockup_touched);
94static unsigned long hard_lockup_nmi_warn;
95
96notrace void arch_touch_nmi_watchdog(void)
97{
98 /*
99 * Using __raw here because some code paths have
100 * preemption enabled. If preemption is enabled
101 * then interrupts should be enabled too, in which
102 * case we shouldn't have to worry about the watchdog
103 * going off.
104 */
105 raw_cpu_write(watchdog_hardlockup_touched, true);
106}
107EXPORT_SYMBOL(arch_touch_nmi_watchdog);
108
109void watchdog_hardlockup_touch_cpu(unsigned int cpu)
110{
111 per_cpu(watchdog_hardlockup_touched, cpu) = true;
112}
113
114static bool is_hardlockup(unsigned int cpu)
115{
116 int hrint = atomic_read(&per_cpu(hrtimer_interrupts, cpu));
117
118 if (per_cpu(hrtimer_interrupts_saved, cpu) == hrint)
119 return true;
120
121 /*
122 * NOTE: we don't need any fancy atomic_t or READ_ONCE/WRITE_ONCE
123 * for hrtimer_interrupts_saved. hrtimer_interrupts_saved is
124 * written/read by a single CPU.
125 */
126 per_cpu(hrtimer_interrupts_saved, cpu) = hrint;
127
128 return false;
129}
130
131static void watchdog_hardlockup_kick(void)
132{
133 int new_interrupts;
134
135 new_interrupts = atomic_inc_return(this_cpu_ptr(&hrtimer_interrupts));
136 watchdog_buddy_check_hardlockup(new_interrupts);
137}
138
139void watchdog_hardlockup_check(unsigned int cpu, struct pt_regs *regs)
140{
141 if (per_cpu(watchdog_hardlockup_touched, cpu)) {
142 per_cpu(watchdog_hardlockup_touched, cpu) = false;
143 return;
144 }
145
146 /*
147 * Check for a hardlockup by making sure the CPU's timer
148 * interrupt is incrementing. The timer interrupt should have
149 * fired multiple times before we overflow'd. If it hasn't
150 * then this is a good indication the cpu is stuck
151 */
152 if (is_hardlockup(cpu)) {
153 unsigned int this_cpu = smp_processor_id();
154 unsigned long flags;
155
156 /* Only print hardlockups once. */
157 if (per_cpu(watchdog_hardlockup_warned, cpu))
158 return;
159
160 /*
161 * Prevent multiple hard-lockup reports if one cpu is already
162 * engaged in dumping all cpu back traces.
163 */
164 if (sysctl_hardlockup_all_cpu_backtrace) {
165 if (test_and_set_bit_lock(0, &hard_lockup_nmi_warn))
166 return;
167 }
168
169 /*
170 * NOTE: we call printk_cpu_sync_get_irqsave() after printing
171 * the lockup message. While it would be nice to serialize
172 * that printout, we really want to make sure that if some
173 * other CPU somehow locked up while holding the lock associated
174 * with printk_cpu_sync_get_irqsave() that we can still at least
175 * get the message about the lockup out.
176 */
177 pr_emerg("Watchdog detected hard LOCKUP on cpu %d\n", cpu);
178 printk_cpu_sync_get_irqsave(flags);
179
180 print_modules();
181 print_irqtrace_events(current);
182 if (cpu == this_cpu) {
183 if (regs)
184 show_regs(regs);
185 else
186 dump_stack();
187 printk_cpu_sync_put_irqrestore(flags);
188 } else {
189 printk_cpu_sync_put_irqrestore(flags);
190 trigger_single_cpu_backtrace(cpu);
191 }
192
193 if (sysctl_hardlockup_all_cpu_backtrace) {
194 trigger_allbutcpu_cpu_backtrace(cpu);
195 if (!hardlockup_panic)
196 clear_bit_unlock(0, &hard_lockup_nmi_warn);
197 }
198
199 if (hardlockup_panic)
200 nmi_panic(regs, "Hard LOCKUP");
201
202 per_cpu(watchdog_hardlockup_warned, cpu) = true;
203 } else {
204 per_cpu(watchdog_hardlockup_warned, cpu) = false;
205 }
206}
207
208#else /* CONFIG_HARDLOCKUP_DETECTOR_COUNTS_HRTIMER */
209
210static inline void watchdog_hardlockup_kick(void) { }
211
212#endif /* !CONFIG_HARDLOCKUP_DETECTOR_COUNTS_HRTIMER */
213
214/*
215 * These functions can be overridden based on the configured hardlockdup detector.
216 *
217 * watchdog_hardlockup_enable/disable can be implemented to start and stop when
218 * softlockup watchdog start and stop. The detector must select the
219 * SOFTLOCKUP_DETECTOR Kconfig.
220 */
221void __weak watchdog_hardlockup_enable(unsigned int cpu) { }
222
223void __weak watchdog_hardlockup_disable(unsigned int cpu) { }
224
225/*
226 * Watchdog-detector specific API.
227 *
228 * Return 0 when hardlockup watchdog is available, negative value otherwise.
229 * Note that the negative value means that a delayed probe might
230 * succeed later.
231 */
232int __weak __init watchdog_hardlockup_probe(void)
233{
234 return -ENODEV;
235}
236
237/**
238 * watchdog_hardlockup_stop - Stop the watchdog for reconfiguration
239 *
240 * The reconfiguration steps are:
241 * watchdog_hardlockup_stop();
242 * update_variables();
243 * watchdog_hardlockup_start();
244 */
245void __weak watchdog_hardlockup_stop(void) { }
246
247/**
248 * watchdog_hardlockup_start - Start the watchdog after reconfiguration
249 *
250 * Counterpart to watchdog_hardlockup_stop().
251 *
252 * The following variables have been updated in update_variables() and
253 * contain the currently valid configuration:
254 * - watchdog_enabled
255 * - watchdog_thresh
256 * - watchdog_cpumask
257 */
258void __weak watchdog_hardlockup_start(void) { }
259
260/**
261 * lockup_detector_update_enable - Update the sysctl enable bit
262 *
263 * Caller needs to make sure that the hard watchdogs are off, so this
264 * can't race with watchdog_hardlockup_disable().
265 */
266static void lockup_detector_update_enable(void)
267{
268 watchdog_enabled = 0;
269 if (!watchdog_user_enabled)
270 return;
271 if (watchdog_hardlockup_available && watchdog_hardlockup_user_enabled)
272 watchdog_enabled |= WATCHDOG_HARDLOCKUP_ENABLED;
273 if (watchdog_softlockup_user_enabled)
274 watchdog_enabled |= WATCHDOG_SOFTOCKUP_ENABLED;
275}
276
277#ifdef CONFIG_SOFTLOCKUP_DETECTOR
278
279/*
280 * Delay the soflockup report when running a known slow code.
281 * It does _not_ affect the timestamp of the last successdul reschedule.
282 */
283#define SOFTLOCKUP_DELAY_REPORT ULONG_MAX
284
285#ifdef CONFIG_SMP
286int __read_mostly sysctl_softlockup_all_cpu_backtrace;
287#endif
288
289static struct cpumask watchdog_allowed_mask __read_mostly;
290
291/* Global variables, exported for sysctl */
292unsigned int __read_mostly softlockup_panic =
293 IS_ENABLED(CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC);
294
295static bool softlockup_initialized __read_mostly;
296static u64 __read_mostly sample_period;
297
298/* Timestamp taken after the last successful reschedule. */
299static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts);
300/* Timestamp of the last softlockup report. */
301static DEFINE_PER_CPU(unsigned long, watchdog_report_ts);
302static DEFINE_PER_CPU(struct hrtimer, watchdog_hrtimer);
303static DEFINE_PER_CPU(bool, softlockup_touch_sync);
304static unsigned long soft_lockup_nmi_warn;
305
306static int __init softlockup_panic_setup(char *str)
307{
308 softlockup_panic = simple_strtoul(str, NULL, 0);
309 return 1;
310}
311__setup("softlockup_panic=", softlockup_panic_setup);
312
313static int __init nowatchdog_setup(char *str)
314{
315 watchdog_user_enabled = 0;
316 return 1;
317}
318__setup("nowatchdog", nowatchdog_setup);
319
320static int __init nosoftlockup_setup(char *str)
321{
322 watchdog_softlockup_user_enabled = 0;
323 return 1;
324}
325__setup("nosoftlockup", nosoftlockup_setup);
326
327static int __init watchdog_thresh_setup(char *str)
328{
329 get_option(&str, &watchdog_thresh);
330 return 1;
331}
332__setup("watchdog_thresh=", watchdog_thresh_setup);
333
334static void __lockup_detector_cleanup(void);
335
336/*
337 * Hard-lockup warnings should be triggered after just a few seconds. Soft-
338 * lockups can have false positives under extreme conditions. So we generally
339 * want a higher threshold for soft lockups than for hard lockups. So we couple
340 * the thresholds with a factor: we make the soft threshold twice the amount of
341 * time the hard threshold is.
342 */
343static int get_softlockup_thresh(void)
344{
345 return watchdog_thresh * 2;
346}
347
348/*
349 * Returns seconds, approximately. We don't need nanosecond
350 * resolution, and we don't need to waste time with a big divide when
351 * 2^30ns == 1.074s.
352 */
353static unsigned long get_timestamp(void)
354{
355 return running_clock() >> 30LL; /* 2^30 ~= 10^9 */
356}
357
358static void set_sample_period(void)
359{
360 /*
361 * convert watchdog_thresh from seconds to ns
362 * the divide by 5 is to give hrtimer several chances (two
363 * or three with the current relation between the soft
364 * and hard thresholds) to increment before the
365 * hardlockup detector generates a warning
366 */
367 sample_period = get_softlockup_thresh() * ((u64)NSEC_PER_SEC / 5);
368 watchdog_update_hrtimer_threshold(sample_period);
369}
370
371static void update_report_ts(void)
372{
373 __this_cpu_write(watchdog_report_ts, get_timestamp());
374}
375
376/* Commands for resetting the watchdog */
377static void update_touch_ts(void)
378{
379 __this_cpu_write(watchdog_touch_ts, get_timestamp());
380 update_report_ts();
381}
382
383/**
384 * touch_softlockup_watchdog_sched - touch watchdog on scheduler stalls
385 *
386 * Call when the scheduler may have stalled for legitimate reasons
387 * preventing the watchdog task from executing - e.g. the scheduler
388 * entering idle state. This should only be used for scheduler events.
389 * Use touch_softlockup_watchdog() for everything else.
390 */
391notrace void touch_softlockup_watchdog_sched(void)
392{
393 /*
394 * Preemption can be enabled. It doesn't matter which CPU's watchdog
395 * report period gets restarted here, so use the raw_ operation.
396 */
397 raw_cpu_write(watchdog_report_ts, SOFTLOCKUP_DELAY_REPORT);
398}
399
400notrace void touch_softlockup_watchdog(void)
401{
402 touch_softlockup_watchdog_sched();
403 wq_watchdog_touch(raw_smp_processor_id());
404}
405EXPORT_SYMBOL(touch_softlockup_watchdog);
406
407void touch_all_softlockup_watchdogs(void)
408{
409 int cpu;
410
411 /*
412 * watchdog_mutex cannpt be taken here, as this might be called
413 * from (soft)interrupt context, so the access to
414 * watchdog_allowed_cpumask might race with a concurrent update.
415 *
416 * The watchdog time stamp can race against a concurrent real
417 * update as well, the only side effect might be a cycle delay for
418 * the softlockup check.
419 */
420 for_each_cpu(cpu, &watchdog_allowed_mask) {
421 per_cpu(watchdog_report_ts, cpu) = SOFTLOCKUP_DELAY_REPORT;
422 wq_watchdog_touch(cpu);
423 }
424}
425
426void touch_softlockup_watchdog_sync(void)
427{
428 __this_cpu_write(softlockup_touch_sync, true);
429 __this_cpu_write(watchdog_report_ts, SOFTLOCKUP_DELAY_REPORT);
430}
431
432static int is_softlockup(unsigned long touch_ts,
433 unsigned long period_ts,
434 unsigned long now)
435{
436 if ((watchdog_enabled & WATCHDOG_SOFTOCKUP_ENABLED) && watchdog_thresh) {
437 /* Warn about unreasonable delays. */
438 if (time_after(now, period_ts + get_softlockup_thresh()))
439 return now - touch_ts;
440 }
441 return 0;
442}
443
444/* watchdog detector functions */
445static DEFINE_PER_CPU(struct completion, softlockup_completion);
446static DEFINE_PER_CPU(struct cpu_stop_work, softlockup_stop_work);
447
448/*
449 * The watchdog feed function - touches the timestamp.
450 *
451 * It only runs once every sample_period seconds (4 seconds by
452 * default) to reset the softlockup timestamp. If this gets delayed
453 * for more than 2*watchdog_thresh seconds then the debug-printout
454 * triggers in watchdog_timer_fn().
455 */
456static int softlockup_fn(void *data)
457{
458 update_touch_ts();
459 complete(this_cpu_ptr(&softlockup_completion));
460
461 return 0;
462}
463
464/* watchdog kicker functions */
465static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
466{
467 unsigned long touch_ts, period_ts, now;
468 struct pt_regs *regs = get_irq_regs();
469 int duration;
470 int softlockup_all_cpu_backtrace = sysctl_softlockup_all_cpu_backtrace;
471 unsigned long flags;
472
473 if (!watchdog_enabled)
474 return HRTIMER_NORESTART;
475
476 watchdog_hardlockup_kick();
477
478 /* kick the softlockup detector */
479 if (completion_done(this_cpu_ptr(&softlockup_completion))) {
480 reinit_completion(this_cpu_ptr(&softlockup_completion));
481 stop_one_cpu_nowait(smp_processor_id(),
482 softlockup_fn, NULL,
483 this_cpu_ptr(&softlockup_stop_work));
484 }
485
486 /* .. and repeat */
487 hrtimer_forward_now(hrtimer, ns_to_ktime(sample_period));
488
489 /*
490 * Read the current timestamp first. It might become invalid anytime
491 * when a virtual machine is stopped by the host or when the watchog
492 * is touched from NMI.
493 */
494 now = get_timestamp();
495 /*
496 * If a virtual machine is stopped by the host it can look to
497 * the watchdog like a soft lockup. This function touches the watchdog.
498 */
499 kvm_check_and_clear_guest_paused();
500 /*
501 * The stored timestamp is comparable with @now only when not touched.
502 * It might get touched anytime from NMI. Make sure that is_softlockup()
503 * uses the same (valid) value.
504 */
505 period_ts = READ_ONCE(*this_cpu_ptr(&watchdog_report_ts));
506
507 /* Reset the interval when touched by known problematic code. */
508 if (period_ts == SOFTLOCKUP_DELAY_REPORT) {
509 if (unlikely(__this_cpu_read(softlockup_touch_sync))) {
510 /*
511 * If the time stamp was touched atomically
512 * make sure the scheduler tick is up to date.
513 */
514 __this_cpu_write(softlockup_touch_sync, false);
515 sched_clock_tick();
516 }
517
518 update_report_ts();
519 return HRTIMER_RESTART;
520 }
521
522 /* Check for a softlockup. */
523 touch_ts = __this_cpu_read(watchdog_touch_ts);
524 duration = is_softlockup(touch_ts, period_ts, now);
525 if (unlikely(duration)) {
526 /*
527 * Prevent multiple soft-lockup reports if one cpu is already
528 * engaged in dumping all cpu back traces.
529 */
530 if (softlockup_all_cpu_backtrace) {
531 if (test_and_set_bit_lock(0, &soft_lockup_nmi_warn))
532 return HRTIMER_RESTART;
533 }
534
535 /* Start period for the next softlockup warning. */
536 update_report_ts();
537
538 printk_cpu_sync_get_irqsave(flags);
539 pr_emerg("BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n",
540 smp_processor_id(), duration,
541 current->comm, task_pid_nr(current));
542 print_modules();
543 print_irqtrace_events(current);
544 if (regs)
545 show_regs(regs);
546 else
547 dump_stack();
548 printk_cpu_sync_put_irqrestore(flags);
549
550 if (softlockup_all_cpu_backtrace) {
551 trigger_allbutcpu_cpu_backtrace(smp_processor_id());
552 if (!softlockup_panic)
553 clear_bit_unlock(0, &soft_lockup_nmi_warn);
554 }
555
556 add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK);
557 if (softlockup_panic)
558 panic("softlockup: hung tasks");
559 }
560
561 return HRTIMER_RESTART;
562}
563
564static void watchdog_enable(unsigned int cpu)
565{
566 struct hrtimer *hrtimer = this_cpu_ptr(&watchdog_hrtimer);
567 struct completion *done = this_cpu_ptr(&softlockup_completion);
568
569 WARN_ON_ONCE(cpu != smp_processor_id());
570
571 init_completion(done);
572 complete(done);
573
574 /*
575 * Start the timer first to prevent the hardlockup watchdog triggering
576 * before the timer has a chance to fire.
577 */
578 hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
579 hrtimer->function = watchdog_timer_fn;
580 hrtimer_start(hrtimer, ns_to_ktime(sample_period),
581 HRTIMER_MODE_REL_PINNED_HARD);
582
583 /* Initialize timestamp */
584 update_touch_ts();
585 /* Enable the hardlockup detector */
586 if (watchdog_enabled & WATCHDOG_HARDLOCKUP_ENABLED)
587 watchdog_hardlockup_enable(cpu);
588}
589
590static void watchdog_disable(unsigned int cpu)
591{
592 struct hrtimer *hrtimer = this_cpu_ptr(&watchdog_hrtimer);
593
594 WARN_ON_ONCE(cpu != smp_processor_id());
595
596 /*
597 * Disable the hardlockup detector first. That prevents that a large
598 * delay between disabling the timer and disabling the hardlockup
599 * detector causes a false positive.
600 */
601 watchdog_hardlockup_disable(cpu);
602 hrtimer_cancel(hrtimer);
603 wait_for_completion(this_cpu_ptr(&softlockup_completion));
604}
605
606static int softlockup_stop_fn(void *data)
607{
608 watchdog_disable(smp_processor_id());
609 return 0;
610}
611
612static void softlockup_stop_all(void)
613{
614 int cpu;
615
616 if (!softlockup_initialized)
617 return;
618
619 for_each_cpu(cpu, &watchdog_allowed_mask)
620 smp_call_on_cpu(cpu, softlockup_stop_fn, NULL, false);
621
622 cpumask_clear(&watchdog_allowed_mask);
623}
624
625static int softlockup_start_fn(void *data)
626{
627 watchdog_enable(smp_processor_id());
628 return 0;
629}
630
631static void softlockup_start_all(void)
632{
633 int cpu;
634
635 cpumask_copy(&watchdog_allowed_mask, &watchdog_cpumask);
636 for_each_cpu(cpu, &watchdog_allowed_mask)
637 smp_call_on_cpu(cpu, softlockup_start_fn, NULL, false);
638}
639
640int lockup_detector_online_cpu(unsigned int cpu)
641{
642 if (cpumask_test_cpu(cpu, &watchdog_allowed_mask))
643 watchdog_enable(cpu);
644 return 0;
645}
646
647int lockup_detector_offline_cpu(unsigned int cpu)
648{
649 if (cpumask_test_cpu(cpu, &watchdog_allowed_mask))
650 watchdog_disable(cpu);
651 return 0;
652}
653
654static void __lockup_detector_reconfigure(void)
655{
656 cpus_read_lock();
657 watchdog_hardlockup_stop();
658
659 softlockup_stop_all();
660 set_sample_period();
661 lockup_detector_update_enable();
662 if (watchdog_enabled && watchdog_thresh)
663 softlockup_start_all();
664
665 watchdog_hardlockup_start();
666 cpus_read_unlock();
667 /*
668 * Must be called outside the cpus locked section to prevent
669 * recursive locking in the perf code.
670 */
671 __lockup_detector_cleanup();
672}
673
674void lockup_detector_reconfigure(void)
675{
676 mutex_lock(&watchdog_mutex);
677 __lockup_detector_reconfigure();
678 mutex_unlock(&watchdog_mutex);
679}
680
681/*
682 * Create the watchdog infrastructure and configure the detector(s).
683 */
684static __init void lockup_detector_setup(void)
685{
686 /*
687 * If sysctl is off and watchdog got disabled on the command line,
688 * nothing to do here.
689 */
690 lockup_detector_update_enable();
691
692 if (!IS_ENABLED(CONFIG_SYSCTL) &&
693 !(watchdog_enabled && watchdog_thresh))
694 return;
695
696 mutex_lock(&watchdog_mutex);
697 __lockup_detector_reconfigure();
698 softlockup_initialized = true;
699 mutex_unlock(&watchdog_mutex);
700}
701
702#else /* CONFIG_SOFTLOCKUP_DETECTOR */
703static void __lockup_detector_reconfigure(void)
704{
705 cpus_read_lock();
706 watchdog_hardlockup_stop();
707 lockup_detector_update_enable();
708 watchdog_hardlockup_start();
709 cpus_read_unlock();
710}
711void lockup_detector_reconfigure(void)
712{
713 __lockup_detector_reconfigure();
714}
715static inline void lockup_detector_setup(void)
716{
717 __lockup_detector_reconfigure();
718}
719#endif /* !CONFIG_SOFTLOCKUP_DETECTOR */
720
721static void __lockup_detector_cleanup(void)
722{
723 lockdep_assert_held(&watchdog_mutex);
724 hardlockup_detector_perf_cleanup();
725}
726
727/**
728 * lockup_detector_cleanup - Cleanup after cpu hotplug or sysctl changes
729 *
730 * Caller must not hold the cpu hotplug rwsem.
731 */
732void lockup_detector_cleanup(void)
733{
734 mutex_lock(&watchdog_mutex);
735 __lockup_detector_cleanup();
736 mutex_unlock(&watchdog_mutex);
737}
738
739/**
740 * lockup_detector_soft_poweroff - Interface to stop lockup detector(s)
741 *
742 * Special interface for parisc. It prevents lockup detector warnings from
743 * the default pm_poweroff() function which busy loops forever.
744 */
745void lockup_detector_soft_poweroff(void)
746{
747 watchdog_enabled = 0;
748}
749
750#ifdef CONFIG_SYSCTL
751
752/* Propagate any changes to the watchdog infrastructure */
753static void proc_watchdog_update(void)
754{
755 /* Remove impossible cpus to keep sysctl output clean. */
756 cpumask_and(&watchdog_cpumask, &watchdog_cpumask, cpu_possible_mask);
757 __lockup_detector_reconfigure();
758}
759
760/*
761 * common function for watchdog, nmi_watchdog and soft_watchdog parameter
762 *
763 * caller | table->data points to | 'which'
764 * -------------------|----------------------------------|-------------------------------
765 * proc_watchdog | watchdog_user_enabled | WATCHDOG_HARDLOCKUP_ENABLED |
766 * | | WATCHDOG_SOFTOCKUP_ENABLED
767 * -------------------|----------------------------------|-------------------------------
768 * proc_nmi_watchdog | watchdog_hardlockup_user_enabled | WATCHDOG_HARDLOCKUP_ENABLED
769 * -------------------|----------------------------------|-------------------------------
770 * proc_soft_watchdog | watchdog_softlockup_user_enabled | WATCHDOG_SOFTOCKUP_ENABLED
771 */
772static int proc_watchdog_common(int which, struct ctl_table *table, int write,
773 void *buffer, size_t *lenp, loff_t *ppos)
774{
775 int err, old, *param = table->data;
776
777 mutex_lock(&watchdog_mutex);
778
779 if (!write) {
780 /*
781 * On read synchronize the userspace interface. This is a
782 * racy snapshot.
783 */
784 *param = (watchdog_enabled & which) != 0;
785 err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
786 } else {
787 old = READ_ONCE(*param);
788 err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
789 if (!err && old != READ_ONCE(*param))
790 proc_watchdog_update();
791 }
792 mutex_unlock(&watchdog_mutex);
793 return err;
794}
795
796/*
797 * /proc/sys/kernel/watchdog
798 */
799static int proc_watchdog(struct ctl_table *table, int write,
800 void *buffer, size_t *lenp, loff_t *ppos)
801{
802 return proc_watchdog_common(WATCHDOG_HARDLOCKUP_ENABLED |
803 WATCHDOG_SOFTOCKUP_ENABLED,
804 table, write, buffer, lenp, ppos);
805}
806
807/*
808 * /proc/sys/kernel/nmi_watchdog
809 */
810static int proc_nmi_watchdog(struct ctl_table *table, int write,
811 void *buffer, size_t *lenp, loff_t *ppos)
812{
813 if (!watchdog_hardlockup_available && write)
814 return -ENOTSUPP;
815 return proc_watchdog_common(WATCHDOG_HARDLOCKUP_ENABLED,
816 table, write, buffer, lenp, ppos);
817}
818
819#ifdef CONFIG_SOFTLOCKUP_DETECTOR
820/*
821 * /proc/sys/kernel/soft_watchdog
822 */
823static int proc_soft_watchdog(struct ctl_table *table, int write,
824 void *buffer, size_t *lenp, loff_t *ppos)
825{
826 return proc_watchdog_common(WATCHDOG_SOFTOCKUP_ENABLED,
827 table, write, buffer, lenp, ppos);
828}
829#endif
830
831/*
832 * /proc/sys/kernel/watchdog_thresh
833 */
834static int proc_watchdog_thresh(struct ctl_table *table, int write,
835 void *buffer, size_t *lenp, loff_t *ppos)
836{
837 int err, old;
838
839 mutex_lock(&watchdog_mutex);
840
841 old = READ_ONCE(watchdog_thresh);
842 err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
843
844 if (!err && write && old != READ_ONCE(watchdog_thresh))
845 proc_watchdog_update();
846
847 mutex_unlock(&watchdog_mutex);
848 return err;
849}
850
851/*
852 * The cpumask is the mask of possible cpus that the watchdog can run
853 * on, not the mask of cpus it is actually running on. This allows the
854 * user to specify a mask that will include cpus that have not yet
855 * been brought online, if desired.
856 */
857static int proc_watchdog_cpumask(struct ctl_table *table, int write,
858 void *buffer, size_t *lenp, loff_t *ppos)
859{
860 int err;
861
862 mutex_lock(&watchdog_mutex);
863
864 err = proc_do_large_bitmap(table, write, buffer, lenp, ppos);
865 if (!err && write)
866 proc_watchdog_update();
867
868 mutex_unlock(&watchdog_mutex);
869 return err;
870}
871
872static const int sixty = 60;
873
874static struct ctl_table watchdog_sysctls[] = {
875 {
876 .procname = "watchdog",
877 .data = &watchdog_user_enabled,
878 .maxlen = sizeof(int),
879 .mode = 0644,
880 .proc_handler = proc_watchdog,
881 .extra1 = SYSCTL_ZERO,
882 .extra2 = SYSCTL_ONE,
883 },
884 {
885 .procname = "watchdog_thresh",
886 .data = &watchdog_thresh,
887 .maxlen = sizeof(int),
888 .mode = 0644,
889 .proc_handler = proc_watchdog_thresh,
890 .extra1 = SYSCTL_ZERO,
891 .extra2 = (void *)&sixty,
892 },
893 {
894 .procname = "watchdog_cpumask",
895 .data = &watchdog_cpumask_bits,
896 .maxlen = NR_CPUS,
897 .mode = 0644,
898 .proc_handler = proc_watchdog_cpumask,
899 },
900#ifdef CONFIG_SOFTLOCKUP_DETECTOR
901 {
902 .procname = "soft_watchdog",
903 .data = &watchdog_softlockup_user_enabled,
904 .maxlen = sizeof(int),
905 .mode = 0644,
906 .proc_handler = proc_soft_watchdog,
907 .extra1 = SYSCTL_ZERO,
908 .extra2 = SYSCTL_ONE,
909 },
910 {
911 .procname = "softlockup_panic",
912 .data = &softlockup_panic,
913 .maxlen = sizeof(int),
914 .mode = 0644,
915 .proc_handler = proc_dointvec_minmax,
916 .extra1 = SYSCTL_ZERO,
917 .extra2 = SYSCTL_ONE,
918 },
919#ifdef CONFIG_SMP
920 {
921 .procname = "softlockup_all_cpu_backtrace",
922 .data = &sysctl_softlockup_all_cpu_backtrace,
923 .maxlen = sizeof(int),
924 .mode = 0644,
925 .proc_handler = proc_dointvec_minmax,
926 .extra1 = SYSCTL_ZERO,
927 .extra2 = SYSCTL_ONE,
928 },
929#endif /* CONFIG_SMP */
930#endif
931#ifdef CONFIG_HARDLOCKUP_DETECTOR
932 {
933 .procname = "hardlockup_panic",
934 .data = &hardlockup_panic,
935 .maxlen = sizeof(int),
936 .mode = 0644,
937 .proc_handler = proc_dointvec_minmax,
938 .extra1 = SYSCTL_ZERO,
939 .extra2 = SYSCTL_ONE,
940 },
941#ifdef CONFIG_SMP
942 {
943 .procname = "hardlockup_all_cpu_backtrace",
944 .data = &sysctl_hardlockup_all_cpu_backtrace,
945 .maxlen = sizeof(int),
946 .mode = 0644,
947 .proc_handler = proc_dointvec_minmax,
948 .extra1 = SYSCTL_ZERO,
949 .extra2 = SYSCTL_ONE,
950 },
951#endif /* CONFIG_SMP */
952#endif
953 {}
954};
955
956static struct ctl_table watchdog_hardlockup_sysctl[] = {
957 {
958 .procname = "nmi_watchdog",
959 .data = &watchdog_hardlockup_user_enabled,
960 .maxlen = sizeof(int),
961 .mode = 0444,
962 .proc_handler = proc_nmi_watchdog,
963 .extra1 = SYSCTL_ZERO,
964 .extra2 = SYSCTL_ONE,
965 },
966 {}
967};
968
969static void __init watchdog_sysctl_init(void)
970{
971 register_sysctl_init("kernel", watchdog_sysctls);
972
973 if (watchdog_hardlockup_available)
974 watchdog_hardlockup_sysctl[0].mode = 0644;
975 register_sysctl_init("kernel", watchdog_hardlockup_sysctl);
976}
977
978#else
979#define watchdog_sysctl_init() do { } while (0)
980#endif /* CONFIG_SYSCTL */
981
982static void __init lockup_detector_delay_init(struct work_struct *work);
983static bool allow_lockup_detector_init_retry __initdata;
984
985static struct work_struct detector_work __initdata =
986 __WORK_INITIALIZER(detector_work, lockup_detector_delay_init);
987
988static void __init lockup_detector_delay_init(struct work_struct *work)
989{
990 int ret;
991
992 ret = watchdog_hardlockup_probe();
993 if (ret) {
994 pr_info("Delayed init of the lockup detector failed: %d\n", ret);
995 pr_info("Hard watchdog permanently disabled\n");
996 return;
997 }
998
999 allow_lockup_detector_init_retry = false;
1000
1001 watchdog_hardlockup_available = true;
1002 lockup_detector_setup();
1003}
1004
1005/*
1006 * lockup_detector_retry_init - retry init lockup detector if possible.
1007 *
1008 * Retry hardlockup detector init. It is useful when it requires some
1009 * functionality that has to be initialized later on a particular
1010 * platform.
1011 */
1012void __init lockup_detector_retry_init(void)
1013{
1014 /* Must be called before late init calls */
1015 if (!allow_lockup_detector_init_retry)
1016 return;
1017
1018 schedule_work(&detector_work);
1019}
1020
1021/*
1022 * Ensure that optional delayed hardlockup init is proceed before
1023 * the init code and memory is freed.
1024 */
1025static int __init lockup_detector_check(void)
1026{
1027 /* Prevent any later retry. */
1028 allow_lockup_detector_init_retry = false;
1029
1030 /* Make sure no work is pending. */
1031 flush_work(&detector_work);
1032
1033 watchdog_sysctl_init();
1034
1035 return 0;
1036
1037}
1038late_initcall_sync(lockup_detector_check);
1039
1040void __init lockup_detector_init(void)
1041{
1042 if (tick_nohz_full_enabled())
1043 pr_info("Disabling watchdog on nohz_full cores by default\n");
1044
1045 cpumask_copy(&watchdog_cpumask,
1046 housekeeping_cpumask(HK_TYPE_TIMER));
1047
1048 if (!watchdog_hardlockup_probe())
1049 watchdog_hardlockup_available = true;
1050 else
1051 allow_lockup_detector_init_retry = true;
1052
1053 lockup_detector_setup();
1054}