Loading...
1/*
2 * Detect hard and soft lockups on a system
3 *
4 * started by Don Zickus, Copyright (C) 2010 Red Hat, Inc.
5 *
6 * Note: Most of this code is borrowed heavily from the original softlockup
7 * detector, so thanks to Ingo for the initial implementation.
8 * Some chunks also taken from the old x86-specific nmi watchdog code, thanks
9 * to those contributors as well.
10 */
11
12#define pr_fmt(fmt) "NMI watchdog: " fmt
13
14#include <linux/mm.h>
15#include <linux/cpu.h>
16#include <linux/nmi.h>
17#include <linux/init.h>
18#include <linux/module.h>
19#include <linux/sysctl.h>
20#include <linux/smpboot.h>
21#include <linux/sched/rt.h>
22#include <linux/tick.h>
23#include <linux/workqueue.h>
24
25#include <asm/irq_regs.h>
26#include <linux/kvm_para.h>
27#include <linux/kthread.h>
28
29static DEFINE_MUTEX(watchdog_proc_mutex);
30
31#if defined(CONFIG_HAVE_NMI_WATCHDOG) || defined(CONFIG_HARDLOCKUP_DETECTOR)
32unsigned long __read_mostly watchdog_enabled = SOFT_WATCHDOG_ENABLED|NMI_WATCHDOG_ENABLED;
33#else
34unsigned long __read_mostly watchdog_enabled = SOFT_WATCHDOG_ENABLED;
35#endif
36int __read_mostly nmi_watchdog_enabled;
37int __read_mostly soft_watchdog_enabled;
38int __read_mostly watchdog_user_enabled;
39int __read_mostly watchdog_thresh = 10;
40
41#ifdef CONFIG_SMP
42int __read_mostly sysctl_softlockup_all_cpu_backtrace;
43int __read_mostly sysctl_hardlockup_all_cpu_backtrace;
44#endif
45static struct cpumask watchdog_cpumask __read_mostly;
46unsigned long *watchdog_cpumask_bits = cpumask_bits(&watchdog_cpumask);
47
48/* Helper for online, unparked cpus. */
49#define for_each_watchdog_cpu(cpu) \
50 for_each_cpu_and((cpu), cpu_online_mask, &watchdog_cpumask)
51
52atomic_t watchdog_park_in_progress = ATOMIC_INIT(0);
53
54/*
55 * The 'watchdog_running' variable is set to 1 when the watchdog threads
56 * are registered/started and is set to 0 when the watchdog threads are
57 * unregistered/stopped, so it is an indicator whether the threads exist.
58 */
59static int __read_mostly watchdog_running;
60/*
61 * If a subsystem has a need to deactivate the watchdog temporarily, it
62 * can use the suspend/resume interface to achieve this. The content of
63 * the 'watchdog_suspended' variable reflects this state. Existing threads
64 * are parked/unparked by the lockup_detector_{suspend|resume} functions
65 * (see comment blocks pertaining to those functions for further details).
66 *
67 * 'watchdog_suspended' also prevents threads from being registered/started
68 * or unregistered/stopped via parameters in /proc/sys/kernel, so the state
69 * of 'watchdog_running' cannot change while the watchdog is deactivated
70 * temporarily (see related code in 'proc' handlers).
71 */
72static int __read_mostly watchdog_suspended;
73
74static u64 __read_mostly sample_period;
75
76static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts);
77static DEFINE_PER_CPU(struct task_struct *, softlockup_watchdog);
78static DEFINE_PER_CPU(struct hrtimer, watchdog_hrtimer);
79static DEFINE_PER_CPU(bool, softlockup_touch_sync);
80static DEFINE_PER_CPU(bool, soft_watchdog_warn);
81static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts);
82static DEFINE_PER_CPU(unsigned long, soft_lockup_hrtimer_cnt);
83static DEFINE_PER_CPU(struct task_struct *, softlockup_task_ptr_saved);
84static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved);
85static unsigned long soft_lockup_nmi_warn;
86
87unsigned int __read_mostly softlockup_panic =
88 CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE;
89
90static int __init softlockup_panic_setup(char *str)
91{
92 softlockup_panic = simple_strtoul(str, NULL, 0);
93
94 return 1;
95}
96__setup("softlockup_panic=", softlockup_panic_setup);
97
98static int __init nowatchdog_setup(char *str)
99{
100 watchdog_enabled = 0;
101 return 1;
102}
103__setup("nowatchdog", nowatchdog_setup);
104
105static int __init nosoftlockup_setup(char *str)
106{
107 watchdog_enabled &= ~SOFT_WATCHDOG_ENABLED;
108 return 1;
109}
110__setup("nosoftlockup", nosoftlockup_setup);
111
112#ifdef CONFIG_SMP
113static int __init softlockup_all_cpu_backtrace_setup(char *str)
114{
115 sysctl_softlockup_all_cpu_backtrace =
116 !!simple_strtol(str, NULL, 0);
117 return 1;
118}
119__setup("softlockup_all_cpu_backtrace=", softlockup_all_cpu_backtrace_setup);
120static int __init hardlockup_all_cpu_backtrace_setup(char *str)
121{
122 sysctl_hardlockup_all_cpu_backtrace =
123 !!simple_strtol(str, NULL, 0);
124 return 1;
125}
126__setup("hardlockup_all_cpu_backtrace=", hardlockup_all_cpu_backtrace_setup);
127#endif
128
129/*
130 * Hard-lockup warnings should be triggered after just a few seconds. Soft-
131 * lockups can have false positives under extreme conditions. So we generally
132 * want a higher threshold for soft lockups than for hard lockups. So we couple
133 * the thresholds with a factor: we make the soft threshold twice the amount of
134 * time the hard threshold is.
135 */
136static int get_softlockup_thresh(void)
137{
138 return watchdog_thresh * 2;
139}
140
141/*
142 * Returns seconds, approximately. We don't need nanosecond
143 * resolution, and we don't need to waste time with a big divide when
144 * 2^30ns == 1.074s.
145 */
146static unsigned long get_timestamp(void)
147{
148 return running_clock() >> 30LL; /* 2^30 ~= 10^9 */
149}
150
151static void set_sample_period(void)
152{
153 /*
154 * convert watchdog_thresh from seconds to ns
155 * the divide by 5 is to give hrtimer several chances (two
156 * or three with the current relation between the soft
157 * and hard thresholds) to increment before the
158 * hardlockup detector generates a warning
159 */
160 sample_period = get_softlockup_thresh() * ((u64)NSEC_PER_SEC / 5);
161}
162
163/* Commands for resetting the watchdog */
164static void __touch_watchdog(void)
165{
166 __this_cpu_write(watchdog_touch_ts, get_timestamp());
167}
168
169/**
170 * touch_softlockup_watchdog_sched - touch watchdog on scheduler stalls
171 *
172 * Call when the scheduler may have stalled for legitimate reasons
173 * preventing the watchdog task from executing - e.g. the scheduler
174 * entering idle state. This should only be used for scheduler events.
175 * Use touch_softlockup_watchdog() for everything else.
176 */
177void touch_softlockup_watchdog_sched(void)
178{
179 /*
180 * Preemption can be enabled. It doesn't matter which CPU's timestamp
181 * gets zeroed here, so use the raw_ operation.
182 */
183 raw_cpu_write(watchdog_touch_ts, 0);
184}
185
186void touch_softlockup_watchdog(void)
187{
188 touch_softlockup_watchdog_sched();
189 wq_watchdog_touch(raw_smp_processor_id());
190}
191EXPORT_SYMBOL(touch_softlockup_watchdog);
192
193void touch_all_softlockup_watchdogs(void)
194{
195 int cpu;
196
197 /*
198 * this is done lockless
199 * do we care if a 0 races with a timestamp?
200 * all it means is the softlock check starts one cycle later
201 */
202 for_each_watchdog_cpu(cpu)
203 per_cpu(watchdog_touch_ts, cpu) = 0;
204 wq_watchdog_touch(-1);
205}
206
207void touch_softlockup_watchdog_sync(void)
208{
209 __this_cpu_write(softlockup_touch_sync, true);
210 __this_cpu_write(watchdog_touch_ts, 0);
211}
212
213/* watchdog detector functions */
214bool is_hardlockup(void)
215{
216 unsigned long hrint = __this_cpu_read(hrtimer_interrupts);
217
218 if (__this_cpu_read(hrtimer_interrupts_saved) == hrint)
219 return true;
220
221 __this_cpu_write(hrtimer_interrupts_saved, hrint);
222 return false;
223}
224
225static int is_softlockup(unsigned long touch_ts)
226{
227 unsigned long now = get_timestamp();
228
229 if ((watchdog_enabled & SOFT_WATCHDOG_ENABLED) && watchdog_thresh){
230 /* Warn about unreasonable delays. */
231 if (time_after(now, touch_ts + get_softlockup_thresh()))
232 return now - touch_ts;
233 }
234 return 0;
235}
236
237static void watchdog_interrupt_count(void)
238{
239 __this_cpu_inc(hrtimer_interrupts);
240}
241
242/*
243 * These two functions are mostly architecture specific
244 * defining them as weak here.
245 */
246int __weak watchdog_nmi_enable(unsigned int cpu)
247{
248 return 0;
249}
250void __weak watchdog_nmi_disable(unsigned int cpu)
251{
252}
253
254static int watchdog_enable_all_cpus(void);
255static void watchdog_disable_all_cpus(void);
256
257/* watchdog kicker functions */
258static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
259{
260 unsigned long touch_ts = __this_cpu_read(watchdog_touch_ts);
261 struct pt_regs *regs = get_irq_regs();
262 int duration;
263 int softlockup_all_cpu_backtrace = sysctl_softlockup_all_cpu_backtrace;
264
265 if (atomic_read(&watchdog_park_in_progress) != 0)
266 return HRTIMER_NORESTART;
267
268 /* kick the hardlockup detector */
269 watchdog_interrupt_count();
270
271 /* kick the softlockup detector */
272 wake_up_process(__this_cpu_read(softlockup_watchdog));
273
274 /* .. and repeat */
275 hrtimer_forward_now(hrtimer, ns_to_ktime(sample_period));
276
277 if (touch_ts == 0) {
278 if (unlikely(__this_cpu_read(softlockup_touch_sync))) {
279 /*
280 * If the time stamp was touched atomically
281 * make sure the scheduler tick is up to date.
282 */
283 __this_cpu_write(softlockup_touch_sync, false);
284 sched_clock_tick();
285 }
286
287 /* Clear the guest paused flag on watchdog reset */
288 kvm_check_and_clear_guest_paused();
289 __touch_watchdog();
290 return HRTIMER_RESTART;
291 }
292
293 /* check for a softlockup
294 * This is done by making sure a high priority task is
295 * being scheduled. The task touches the watchdog to
296 * indicate it is getting cpu time. If it hasn't then
297 * this is a good indication some task is hogging the cpu
298 */
299 duration = is_softlockup(touch_ts);
300 if (unlikely(duration)) {
301 /*
302 * If a virtual machine is stopped by the host it can look to
303 * the watchdog like a soft lockup, check to see if the host
304 * stopped the vm before we issue the warning
305 */
306 if (kvm_check_and_clear_guest_paused())
307 return HRTIMER_RESTART;
308
309 /* only warn once */
310 if (__this_cpu_read(soft_watchdog_warn) == true) {
311 /*
312 * When multiple processes are causing softlockups the
313 * softlockup detector only warns on the first one
314 * because the code relies on a full quiet cycle to
315 * re-arm. The second process prevents the quiet cycle
316 * and never gets reported. Use task pointers to detect
317 * this.
318 */
319 if (__this_cpu_read(softlockup_task_ptr_saved) !=
320 current) {
321 __this_cpu_write(soft_watchdog_warn, false);
322 __touch_watchdog();
323 }
324 return HRTIMER_RESTART;
325 }
326
327 if (softlockup_all_cpu_backtrace) {
328 /* Prevent multiple soft-lockup reports if one cpu is already
329 * engaged in dumping cpu back traces
330 */
331 if (test_and_set_bit(0, &soft_lockup_nmi_warn)) {
332 /* Someone else will report us. Let's give up */
333 __this_cpu_write(soft_watchdog_warn, true);
334 return HRTIMER_RESTART;
335 }
336 }
337
338 pr_emerg("BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n",
339 smp_processor_id(), duration,
340 current->comm, task_pid_nr(current));
341 __this_cpu_write(softlockup_task_ptr_saved, current);
342 print_modules();
343 print_irqtrace_events(current);
344 if (regs)
345 show_regs(regs);
346 else
347 dump_stack();
348
349 if (softlockup_all_cpu_backtrace) {
350 /* Avoid generating two back traces for current
351 * given that one is already made above
352 */
353 trigger_allbutself_cpu_backtrace();
354
355 clear_bit(0, &soft_lockup_nmi_warn);
356 /* Barrier to sync with other cpus */
357 smp_mb__after_atomic();
358 }
359
360 add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK);
361 if (softlockup_panic)
362 panic("softlockup: hung tasks");
363 __this_cpu_write(soft_watchdog_warn, true);
364 } else
365 __this_cpu_write(soft_watchdog_warn, false);
366
367 return HRTIMER_RESTART;
368}
369
370static void watchdog_set_prio(unsigned int policy, unsigned int prio)
371{
372 struct sched_param param = { .sched_priority = prio };
373
374 sched_setscheduler(current, policy, ¶m);
375}
376
377static void watchdog_enable(unsigned int cpu)
378{
379 struct hrtimer *hrtimer = raw_cpu_ptr(&watchdog_hrtimer);
380
381 /* kick off the timer for the hardlockup detector */
382 hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
383 hrtimer->function = watchdog_timer_fn;
384
385 /* Enable the perf event */
386 watchdog_nmi_enable(cpu);
387
388 /* done here because hrtimer_start can only pin to smp_processor_id() */
389 hrtimer_start(hrtimer, ns_to_ktime(sample_period),
390 HRTIMER_MODE_REL_PINNED);
391
392 /* initialize timestamp */
393 watchdog_set_prio(SCHED_FIFO, MAX_RT_PRIO - 1);
394 __touch_watchdog();
395}
396
397static void watchdog_disable(unsigned int cpu)
398{
399 struct hrtimer *hrtimer = raw_cpu_ptr(&watchdog_hrtimer);
400
401 watchdog_set_prio(SCHED_NORMAL, 0);
402 hrtimer_cancel(hrtimer);
403 /* disable the perf event */
404 watchdog_nmi_disable(cpu);
405}
406
407static void watchdog_cleanup(unsigned int cpu, bool online)
408{
409 watchdog_disable(cpu);
410}
411
412static int watchdog_should_run(unsigned int cpu)
413{
414 return __this_cpu_read(hrtimer_interrupts) !=
415 __this_cpu_read(soft_lockup_hrtimer_cnt);
416}
417
418/*
419 * The watchdog thread function - touches the timestamp.
420 *
421 * It only runs once every sample_period seconds (4 seconds by
422 * default) to reset the softlockup timestamp. If this gets delayed
423 * for more than 2*watchdog_thresh seconds then the debug-printout
424 * triggers in watchdog_timer_fn().
425 */
426static void watchdog(unsigned int cpu)
427{
428 __this_cpu_write(soft_lockup_hrtimer_cnt,
429 __this_cpu_read(hrtimer_interrupts));
430 __touch_watchdog();
431
432 /*
433 * watchdog_nmi_enable() clears the NMI_WATCHDOG_ENABLED bit in the
434 * failure path. Check for failures that can occur asynchronously -
435 * for example, when CPUs are on-lined - and shut down the hardware
436 * perf event on each CPU accordingly.
437 *
438 * The only non-obvious place this bit can be cleared is through
439 * watchdog_nmi_enable(), so a pr_info() is placed there. Placing a
440 * pr_info here would be too noisy as it would result in a message
441 * every few seconds if the hardlockup was disabled but the softlockup
442 * enabled.
443 */
444 if (!(watchdog_enabled & NMI_WATCHDOG_ENABLED))
445 watchdog_nmi_disable(cpu);
446}
447
448static struct smp_hotplug_thread watchdog_threads = {
449 .store = &softlockup_watchdog,
450 .thread_should_run = watchdog_should_run,
451 .thread_fn = watchdog,
452 .thread_comm = "watchdog/%u",
453 .setup = watchdog_enable,
454 .cleanup = watchdog_cleanup,
455 .park = watchdog_disable,
456 .unpark = watchdog_enable,
457};
458
459/*
460 * park all watchdog threads that are specified in 'watchdog_cpumask'
461 *
462 * This function returns an error if kthread_park() of a watchdog thread
463 * fails. In this situation, the watchdog threads of some CPUs can already
464 * be parked and the watchdog threads of other CPUs can still be runnable.
465 * Callers are expected to handle this special condition as appropriate in
466 * their context.
467 *
468 * This function may only be called in a context that is protected against
469 * races with CPU hotplug - for example, via get_online_cpus().
470 */
471static int watchdog_park_threads(void)
472{
473 int cpu, ret = 0;
474
475 atomic_set(&watchdog_park_in_progress, 1);
476
477 for_each_watchdog_cpu(cpu) {
478 ret = kthread_park(per_cpu(softlockup_watchdog, cpu));
479 if (ret)
480 break;
481 }
482
483 atomic_set(&watchdog_park_in_progress, 0);
484
485 return ret;
486}
487
488/*
489 * unpark all watchdog threads that are specified in 'watchdog_cpumask'
490 *
491 * This function may only be called in a context that is protected against
492 * races with CPU hotplug - for example, via get_online_cpus().
493 */
494static void watchdog_unpark_threads(void)
495{
496 int cpu;
497
498 for_each_watchdog_cpu(cpu)
499 kthread_unpark(per_cpu(softlockup_watchdog, cpu));
500}
501
502/*
503 * Suspend the hard and soft lockup detector by parking the watchdog threads.
504 */
505int lockup_detector_suspend(void)
506{
507 int ret = 0;
508
509 get_online_cpus();
510 mutex_lock(&watchdog_proc_mutex);
511 /*
512 * Multiple suspend requests can be active in parallel (counted by
513 * the 'watchdog_suspended' variable). If the watchdog threads are
514 * running, the first caller takes care that they will be parked.
515 * The state of 'watchdog_running' cannot change while a suspend
516 * request is active (see related code in 'proc' handlers).
517 */
518 if (watchdog_running && !watchdog_suspended)
519 ret = watchdog_park_threads();
520
521 if (ret == 0)
522 watchdog_suspended++;
523 else {
524 watchdog_disable_all_cpus();
525 pr_err("Failed to suspend lockup detectors, disabled\n");
526 watchdog_enabled = 0;
527 }
528
529 mutex_unlock(&watchdog_proc_mutex);
530
531 return ret;
532}
533
534/*
535 * Resume the hard and soft lockup detector by unparking the watchdog threads.
536 */
537void lockup_detector_resume(void)
538{
539 mutex_lock(&watchdog_proc_mutex);
540
541 watchdog_suspended--;
542 /*
543 * The watchdog threads are unparked if they were previously running
544 * and if there is no more active suspend request.
545 */
546 if (watchdog_running && !watchdog_suspended)
547 watchdog_unpark_threads();
548
549 mutex_unlock(&watchdog_proc_mutex);
550 put_online_cpus();
551}
552
553static int update_watchdog_all_cpus(void)
554{
555 int ret;
556
557 ret = watchdog_park_threads();
558 if (ret)
559 return ret;
560
561 watchdog_unpark_threads();
562
563 return 0;
564}
565
566static int watchdog_enable_all_cpus(void)
567{
568 int err = 0;
569
570 if (!watchdog_running) {
571 err = smpboot_register_percpu_thread_cpumask(&watchdog_threads,
572 &watchdog_cpumask);
573 if (err)
574 pr_err("Failed to create watchdog threads, disabled\n");
575 else
576 watchdog_running = 1;
577 } else {
578 /*
579 * Enable/disable the lockup detectors or
580 * change the sample period 'on the fly'.
581 */
582 err = update_watchdog_all_cpus();
583
584 if (err) {
585 watchdog_disable_all_cpus();
586 pr_err("Failed to update lockup detectors, disabled\n");
587 }
588 }
589
590 if (err)
591 watchdog_enabled = 0;
592
593 return err;
594}
595
596static void watchdog_disable_all_cpus(void)
597{
598 if (watchdog_running) {
599 watchdog_running = 0;
600 smpboot_unregister_percpu_thread(&watchdog_threads);
601 }
602}
603
604#ifdef CONFIG_SYSCTL
605
606/*
607 * Update the run state of the lockup detectors.
608 */
609static int proc_watchdog_update(void)
610{
611 int err = 0;
612
613 /*
614 * Watchdog threads won't be started if they are already active.
615 * The 'watchdog_running' variable in watchdog_*_all_cpus() takes
616 * care of this. If those threads are already active, the sample
617 * period will be updated and the lockup detectors will be enabled
618 * or disabled 'on the fly'.
619 */
620 if (watchdog_enabled && watchdog_thresh)
621 err = watchdog_enable_all_cpus();
622 else
623 watchdog_disable_all_cpus();
624
625 return err;
626
627}
628
629/*
630 * common function for watchdog, nmi_watchdog and soft_watchdog parameter
631 *
632 * caller | table->data points to | 'which' contains the flag(s)
633 * -------------------|-----------------------|-----------------------------
634 * proc_watchdog | watchdog_user_enabled | NMI_WATCHDOG_ENABLED or'ed
635 * | | with SOFT_WATCHDOG_ENABLED
636 * -------------------|-----------------------|-----------------------------
637 * proc_nmi_watchdog | nmi_watchdog_enabled | NMI_WATCHDOG_ENABLED
638 * -------------------|-----------------------|-----------------------------
639 * proc_soft_watchdog | soft_watchdog_enabled | SOFT_WATCHDOG_ENABLED
640 */
641static int proc_watchdog_common(int which, struct ctl_table *table, int write,
642 void __user *buffer, size_t *lenp, loff_t *ppos)
643{
644 int err, old, new;
645 int *watchdog_param = (int *)table->data;
646
647 get_online_cpus();
648 mutex_lock(&watchdog_proc_mutex);
649
650 if (watchdog_suspended) {
651 /* no parameter changes allowed while watchdog is suspended */
652 err = -EAGAIN;
653 goto out;
654 }
655
656 /*
657 * If the parameter is being read return the state of the corresponding
658 * bit(s) in 'watchdog_enabled', else update 'watchdog_enabled' and the
659 * run state of the lockup detectors.
660 */
661 if (!write) {
662 *watchdog_param = (watchdog_enabled & which) != 0;
663 err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
664 } else {
665 err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
666 if (err)
667 goto out;
668
669 /*
670 * There is a race window between fetching the current value
671 * from 'watchdog_enabled' and storing the new value. During
672 * this race window, watchdog_nmi_enable() can sneak in and
673 * clear the NMI_WATCHDOG_ENABLED bit in 'watchdog_enabled'.
674 * The 'cmpxchg' detects this race and the loop retries.
675 */
676 do {
677 old = watchdog_enabled;
678 /*
679 * If the parameter value is not zero set the
680 * corresponding bit(s), else clear it(them).
681 */
682 if (*watchdog_param)
683 new = old | which;
684 else
685 new = old & ~which;
686 } while (cmpxchg(&watchdog_enabled, old, new) != old);
687
688 /*
689 * Update the run state of the lockup detectors. There is _no_
690 * need to check the value returned by proc_watchdog_update()
691 * and to restore the previous value of 'watchdog_enabled' as
692 * both lockup detectors are disabled if proc_watchdog_update()
693 * returns an error.
694 */
695 if (old == new)
696 goto out;
697
698 err = proc_watchdog_update();
699 }
700out:
701 mutex_unlock(&watchdog_proc_mutex);
702 put_online_cpus();
703 return err;
704}
705
706/*
707 * /proc/sys/kernel/watchdog
708 */
709int proc_watchdog(struct ctl_table *table, int write,
710 void __user *buffer, size_t *lenp, loff_t *ppos)
711{
712 return proc_watchdog_common(NMI_WATCHDOG_ENABLED|SOFT_WATCHDOG_ENABLED,
713 table, write, buffer, lenp, ppos);
714}
715
716/*
717 * /proc/sys/kernel/nmi_watchdog
718 */
719int proc_nmi_watchdog(struct ctl_table *table, int write,
720 void __user *buffer, size_t *lenp, loff_t *ppos)
721{
722 return proc_watchdog_common(NMI_WATCHDOG_ENABLED,
723 table, write, buffer, lenp, ppos);
724}
725
726/*
727 * /proc/sys/kernel/soft_watchdog
728 */
729int proc_soft_watchdog(struct ctl_table *table, int write,
730 void __user *buffer, size_t *lenp, loff_t *ppos)
731{
732 return proc_watchdog_common(SOFT_WATCHDOG_ENABLED,
733 table, write, buffer, lenp, ppos);
734}
735
736/*
737 * /proc/sys/kernel/watchdog_thresh
738 */
739int proc_watchdog_thresh(struct ctl_table *table, int write,
740 void __user *buffer, size_t *lenp, loff_t *ppos)
741{
742 int err, old, new;
743
744 get_online_cpus();
745 mutex_lock(&watchdog_proc_mutex);
746
747 if (watchdog_suspended) {
748 /* no parameter changes allowed while watchdog is suspended */
749 err = -EAGAIN;
750 goto out;
751 }
752
753 old = ACCESS_ONCE(watchdog_thresh);
754 err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
755
756 if (err || !write)
757 goto out;
758
759 /*
760 * Update the sample period. Restore on failure.
761 */
762 new = ACCESS_ONCE(watchdog_thresh);
763 if (old == new)
764 goto out;
765
766 set_sample_period();
767 err = proc_watchdog_update();
768 if (err) {
769 watchdog_thresh = old;
770 set_sample_period();
771 }
772out:
773 mutex_unlock(&watchdog_proc_mutex);
774 put_online_cpus();
775 return err;
776}
777
778/*
779 * The cpumask is the mask of possible cpus that the watchdog can run
780 * on, not the mask of cpus it is actually running on. This allows the
781 * user to specify a mask that will include cpus that have not yet
782 * been brought online, if desired.
783 */
784int proc_watchdog_cpumask(struct ctl_table *table, int write,
785 void __user *buffer, size_t *lenp, loff_t *ppos)
786{
787 int err;
788
789 get_online_cpus();
790 mutex_lock(&watchdog_proc_mutex);
791
792 if (watchdog_suspended) {
793 /* no parameter changes allowed while watchdog is suspended */
794 err = -EAGAIN;
795 goto out;
796 }
797
798 err = proc_do_large_bitmap(table, write, buffer, lenp, ppos);
799 if (!err && write) {
800 /* Remove impossible cpus to keep sysctl output cleaner. */
801 cpumask_and(&watchdog_cpumask, &watchdog_cpumask,
802 cpu_possible_mask);
803
804 if (watchdog_running) {
805 /*
806 * Failure would be due to being unable to allocate
807 * a temporary cpumask, so we are likely not in a
808 * position to do much else to make things better.
809 */
810 if (smpboot_update_cpumask_percpu_thread(
811 &watchdog_threads, &watchdog_cpumask) != 0)
812 pr_err("cpumask update failed\n");
813 }
814 }
815out:
816 mutex_unlock(&watchdog_proc_mutex);
817 put_online_cpus();
818 return err;
819}
820
821#endif /* CONFIG_SYSCTL */
822
823void __init lockup_detector_init(void)
824{
825 set_sample_period();
826
827#ifdef CONFIG_NO_HZ_FULL
828 if (tick_nohz_full_enabled()) {
829 pr_info("Disabling watchdog on nohz_full cores by default\n");
830 cpumask_copy(&watchdog_cpumask, housekeeping_mask);
831 } else
832 cpumask_copy(&watchdog_cpumask, cpu_possible_mask);
833#else
834 cpumask_copy(&watchdog_cpumask, cpu_possible_mask);
835#endif
836
837 if (watchdog_enabled)
838 watchdog_enable_all_cpus();
839}
1/*
2 * Detect hard and soft lockups on a system
3 *
4 * started by Don Zickus, Copyright (C) 2010 Red Hat, Inc.
5 *
6 * Note: Most of this code is borrowed heavily from the original softlockup
7 * detector, so thanks to Ingo for the initial implementation.
8 * Some chunks also taken from the old x86-specific nmi watchdog code, thanks
9 * to those contributors as well.
10 */
11
12#define pr_fmt(fmt) "NMI watchdog: " fmt
13
14#include <linux/mm.h>
15#include <linux/cpu.h>
16#include <linux/nmi.h>
17#include <linux/init.h>
18#include <linux/module.h>
19#include <linux/sysctl.h>
20#include <linux/smpboot.h>
21#include <linux/sched/rt.h>
22#include <linux/tick.h>
23#include <linux/workqueue.h>
24
25#include <asm/irq_regs.h>
26#include <linux/kvm_para.h>
27#include <linux/perf_event.h>
28#include <linux/kthread.h>
29
30/*
31 * The run state of the lockup detectors is controlled by the content of the
32 * 'watchdog_enabled' variable. Each lockup detector has its dedicated bit -
33 * bit 0 for the hard lockup detector and bit 1 for the soft lockup detector.
34 *
35 * 'watchdog_user_enabled', 'nmi_watchdog_enabled' and 'soft_watchdog_enabled'
36 * are variables that are only used as an 'interface' between the parameters
37 * in /proc/sys/kernel and the internal state bits in 'watchdog_enabled'. The
38 * 'watchdog_thresh' variable is handled differently because its value is not
39 * boolean, and the lockup detectors are 'suspended' while 'watchdog_thresh'
40 * is equal zero.
41 */
42#define NMI_WATCHDOG_ENABLED_BIT 0
43#define SOFT_WATCHDOG_ENABLED_BIT 1
44#define NMI_WATCHDOG_ENABLED (1 << NMI_WATCHDOG_ENABLED_BIT)
45#define SOFT_WATCHDOG_ENABLED (1 << SOFT_WATCHDOG_ENABLED_BIT)
46
47static DEFINE_MUTEX(watchdog_proc_mutex);
48
49#ifdef CONFIG_HARDLOCKUP_DETECTOR
50static unsigned long __read_mostly watchdog_enabled = SOFT_WATCHDOG_ENABLED|NMI_WATCHDOG_ENABLED;
51#else
52static unsigned long __read_mostly watchdog_enabled = SOFT_WATCHDOG_ENABLED;
53#endif
54int __read_mostly nmi_watchdog_enabled;
55int __read_mostly soft_watchdog_enabled;
56int __read_mostly watchdog_user_enabled;
57int __read_mostly watchdog_thresh = 10;
58
59#ifdef CONFIG_SMP
60int __read_mostly sysctl_softlockup_all_cpu_backtrace;
61int __read_mostly sysctl_hardlockup_all_cpu_backtrace;
62#else
63#define sysctl_softlockup_all_cpu_backtrace 0
64#define sysctl_hardlockup_all_cpu_backtrace 0
65#endif
66static struct cpumask watchdog_cpumask __read_mostly;
67unsigned long *watchdog_cpumask_bits = cpumask_bits(&watchdog_cpumask);
68
69/* Helper for online, unparked cpus. */
70#define for_each_watchdog_cpu(cpu) \
71 for_each_cpu_and((cpu), cpu_online_mask, &watchdog_cpumask)
72
73/*
74 * The 'watchdog_running' variable is set to 1 when the watchdog threads
75 * are registered/started and is set to 0 when the watchdog threads are
76 * unregistered/stopped, so it is an indicator whether the threads exist.
77 */
78static int __read_mostly watchdog_running;
79/*
80 * If a subsystem has a need to deactivate the watchdog temporarily, it
81 * can use the suspend/resume interface to achieve this. The content of
82 * the 'watchdog_suspended' variable reflects this state. Existing threads
83 * are parked/unparked by the lockup_detector_{suspend|resume} functions
84 * (see comment blocks pertaining to those functions for further details).
85 *
86 * 'watchdog_suspended' also prevents threads from being registered/started
87 * or unregistered/stopped via parameters in /proc/sys/kernel, so the state
88 * of 'watchdog_running' cannot change while the watchdog is deactivated
89 * temporarily (see related code in 'proc' handlers).
90 */
91static int __read_mostly watchdog_suspended;
92
93static u64 __read_mostly sample_period;
94
95static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts);
96static DEFINE_PER_CPU(struct task_struct *, softlockup_watchdog);
97static DEFINE_PER_CPU(struct hrtimer, watchdog_hrtimer);
98static DEFINE_PER_CPU(bool, softlockup_touch_sync);
99static DEFINE_PER_CPU(bool, soft_watchdog_warn);
100static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts);
101static DEFINE_PER_CPU(unsigned long, soft_lockup_hrtimer_cnt);
102static DEFINE_PER_CPU(struct task_struct *, softlockup_task_ptr_saved);
103#ifdef CONFIG_HARDLOCKUP_DETECTOR
104static DEFINE_PER_CPU(bool, hard_watchdog_warn);
105static DEFINE_PER_CPU(bool, watchdog_nmi_touch);
106static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved);
107static DEFINE_PER_CPU(struct perf_event *, watchdog_ev);
108#endif
109static unsigned long soft_lockup_nmi_warn;
110
111/* boot commands */
112/*
113 * Should we panic when a soft-lockup or hard-lockup occurs:
114 */
115#ifdef CONFIG_HARDLOCKUP_DETECTOR
116unsigned int __read_mostly hardlockup_panic =
117 CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE;
118static unsigned long hardlockup_allcpu_dumped;
119/*
120 * We may not want to enable hard lockup detection by default in all cases,
121 * for example when running the kernel as a guest on a hypervisor. In these
122 * cases this function can be called to disable hard lockup detection. This
123 * function should only be executed once by the boot processor before the
124 * kernel command line parameters are parsed, because otherwise it is not
125 * possible to override this in hardlockup_panic_setup().
126 */
127void hardlockup_detector_disable(void)
128{
129 watchdog_enabled &= ~NMI_WATCHDOG_ENABLED;
130}
131
132static int __init hardlockup_panic_setup(char *str)
133{
134 if (!strncmp(str, "panic", 5))
135 hardlockup_panic = 1;
136 else if (!strncmp(str, "nopanic", 7))
137 hardlockup_panic = 0;
138 else if (!strncmp(str, "0", 1))
139 watchdog_enabled &= ~NMI_WATCHDOG_ENABLED;
140 else if (!strncmp(str, "1", 1))
141 watchdog_enabled |= NMI_WATCHDOG_ENABLED;
142 return 1;
143}
144__setup("nmi_watchdog=", hardlockup_panic_setup);
145#endif
146
147unsigned int __read_mostly softlockup_panic =
148 CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE;
149
150static int __init softlockup_panic_setup(char *str)
151{
152 softlockup_panic = simple_strtoul(str, NULL, 0);
153
154 return 1;
155}
156__setup("softlockup_panic=", softlockup_panic_setup);
157
158static int __init nowatchdog_setup(char *str)
159{
160 watchdog_enabled = 0;
161 return 1;
162}
163__setup("nowatchdog", nowatchdog_setup);
164
165static int __init nosoftlockup_setup(char *str)
166{
167 watchdog_enabled &= ~SOFT_WATCHDOG_ENABLED;
168 return 1;
169}
170__setup("nosoftlockup", nosoftlockup_setup);
171
172#ifdef CONFIG_SMP
173static int __init softlockup_all_cpu_backtrace_setup(char *str)
174{
175 sysctl_softlockup_all_cpu_backtrace =
176 !!simple_strtol(str, NULL, 0);
177 return 1;
178}
179__setup("softlockup_all_cpu_backtrace=", softlockup_all_cpu_backtrace_setup);
180static int __init hardlockup_all_cpu_backtrace_setup(char *str)
181{
182 sysctl_hardlockup_all_cpu_backtrace =
183 !!simple_strtol(str, NULL, 0);
184 return 1;
185}
186__setup("hardlockup_all_cpu_backtrace=", hardlockup_all_cpu_backtrace_setup);
187#endif
188
189/*
190 * Hard-lockup warnings should be triggered after just a few seconds. Soft-
191 * lockups can have false positives under extreme conditions. So we generally
192 * want a higher threshold for soft lockups than for hard lockups. So we couple
193 * the thresholds with a factor: we make the soft threshold twice the amount of
194 * time the hard threshold is.
195 */
196static int get_softlockup_thresh(void)
197{
198 return watchdog_thresh * 2;
199}
200
201/*
202 * Returns seconds, approximately. We don't need nanosecond
203 * resolution, and we don't need to waste time with a big divide when
204 * 2^30ns == 1.074s.
205 */
206static unsigned long get_timestamp(void)
207{
208 return running_clock() >> 30LL; /* 2^30 ~= 10^9 */
209}
210
211static void set_sample_period(void)
212{
213 /*
214 * convert watchdog_thresh from seconds to ns
215 * the divide by 5 is to give hrtimer several chances (two
216 * or three with the current relation between the soft
217 * and hard thresholds) to increment before the
218 * hardlockup detector generates a warning
219 */
220 sample_period = get_softlockup_thresh() * ((u64)NSEC_PER_SEC / 5);
221}
222
223/* Commands for resetting the watchdog */
224static void __touch_watchdog(void)
225{
226 __this_cpu_write(watchdog_touch_ts, get_timestamp());
227}
228
229/**
230 * touch_softlockup_watchdog_sched - touch watchdog on scheduler stalls
231 *
232 * Call when the scheduler may have stalled for legitimate reasons
233 * preventing the watchdog task from executing - e.g. the scheduler
234 * entering idle state. This should only be used for scheduler events.
235 * Use touch_softlockup_watchdog() for everything else.
236 */
237void touch_softlockup_watchdog_sched(void)
238{
239 /*
240 * Preemption can be enabled. It doesn't matter which CPU's timestamp
241 * gets zeroed here, so use the raw_ operation.
242 */
243 raw_cpu_write(watchdog_touch_ts, 0);
244}
245
246void touch_softlockup_watchdog(void)
247{
248 touch_softlockup_watchdog_sched();
249 wq_watchdog_touch(raw_smp_processor_id());
250}
251EXPORT_SYMBOL(touch_softlockup_watchdog);
252
253void touch_all_softlockup_watchdogs(void)
254{
255 int cpu;
256
257 /*
258 * this is done lockless
259 * do we care if a 0 races with a timestamp?
260 * all it means is the softlock check starts one cycle later
261 */
262 for_each_watchdog_cpu(cpu)
263 per_cpu(watchdog_touch_ts, cpu) = 0;
264 wq_watchdog_touch(-1);
265}
266
267#ifdef CONFIG_HARDLOCKUP_DETECTOR
268void touch_nmi_watchdog(void)
269{
270 /*
271 * Using __raw here because some code paths have
272 * preemption enabled. If preemption is enabled
273 * then interrupts should be enabled too, in which
274 * case we shouldn't have to worry about the watchdog
275 * going off.
276 */
277 raw_cpu_write(watchdog_nmi_touch, true);
278 touch_softlockup_watchdog();
279}
280EXPORT_SYMBOL(touch_nmi_watchdog);
281
282#endif
283
284void touch_softlockup_watchdog_sync(void)
285{
286 __this_cpu_write(softlockup_touch_sync, true);
287 __this_cpu_write(watchdog_touch_ts, 0);
288}
289
290#ifdef CONFIG_HARDLOCKUP_DETECTOR
291/* watchdog detector functions */
292static bool is_hardlockup(void)
293{
294 unsigned long hrint = __this_cpu_read(hrtimer_interrupts);
295
296 if (__this_cpu_read(hrtimer_interrupts_saved) == hrint)
297 return true;
298
299 __this_cpu_write(hrtimer_interrupts_saved, hrint);
300 return false;
301}
302#endif
303
304static int is_softlockup(unsigned long touch_ts)
305{
306 unsigned long now = get_timestamp();
307
308 if ((watchdog_enabled & SOFT_WATCHDOG_ENABLED) && watchdog_thresh){
309 /* Warn about unreasonable delays. */
310 if (time_after(now, touch_ts + get_softlockup_thresh()))
311 return now - touch_ts;
312 }
313 return 0;
314}
315
316#ifdef CONFIG_HARDLOCKUP_DETECTOR
317
318static struct perf_event_attr wd_hw_attr = {
319 .type = PERF_TYPE_HARDWARE,
320 .config = PERF_COUNT_HW_CPU_CYCLES,
321 .size = sizeof(struct perf_event_attr),
322 .pinned = 1,
323 .disabled = 1,
324};
325
326/* Callback function for perf event subsystem */
327static void watchdog_overflow_callback(struct perf_event *event,
328 struct perf_sample_data *data,
329 struct pt_regs *regs)
330{
331 /* Ensure the watchdog never gets throttled */
332 event->hw.interrupts = 0;
333
334 if (__this_cpu_read(watchdog_nmi_touch) == true) {
335 __this_cpu_write(watchdog_nmi_touch, false);
336 return;
337 }
338
339 /* check for a hardlockup
340 * This is done by making sure our timer interrupt
341 * is incrementing. The timer interrupt should have
342 * fired multiple times before we overflow'd. If it hasn't
343 * then this is a good indication the cpu is stuck
344 */
345 if (is_hardlockup()) {
346 int this_cpu = smp_processor_id();
347 struct pt_regs *regs = get_irq_regs();
348
349 /* only print hardlockups once */
350 if (__this_cpu_read(hard_watchdog_warn) == true)
351 return;
352
353 pr_emerg("Watchdog detected hard LOCKUP on cpu %d", this_cpu);
354 print_modules();
355 print_irqtrace_events(current);
356 if (regs)
357 show_regs(regs);
358 else
359 dump_stack();
360
361 /*
362 * Perform all-CPU dump only once to avoid multiple hardlockups
363 * generating interleaving traces
364 */
365 if (sysctl_hardlockup_all_cpu_backtrace &&
366 !test_and_set_bit(0, &hardlockup_allcpu_dumped))
367 trigger_allbutself_cpu_backtrace();
368
369 if (hardlockup_panic)
370 nmi_panic(regs, "Hard LOCKUP");
371
372 __this_cpu_write(hard_watchdog_warn, true);
373 return;
374 }
375
376 __this_cpu_write(hard_watchdog_warn, false);
377 return;
378}
379#endif /* CONFIG_HARDLOCKUP_DETECTOR */
380
381static void watchdog_interrupt_count(void)
382{
383 __this_cpu_inc(hrtimer_interrupts);
384}
385
386static int watchdog_nmi_enable(unsigned int cpu);
387static void watchdog_nmi_disable(unsigned int cpu);
388
389static int watchdog_enable_all_cpus(void);
390static void watchdog_disable_all_cpus(void);
391
392/* watchdog kicker functions */
393static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
394{
395 unsigned long touch_ts = __this_cpu_read(watchdog_touch_ts);
396 struct pt_regs *regs = get_irq_regs();
397 int duration;
398 int softlockup_all_cpu_backtrace = sysctl_softlockup_all_cpu_backtrace;
399
400 /* kick the hardlockup detector */
401 watchdog_interrupt_count();
402
403 /* kick the softlockup detector */
404 wake_up_process(__this_cpu_read(softlockup_watchdog));
405
406 /* .. and repeat */
407 hrtimer_forward_now(hrtimer, ns_to_ktime(sample_period));
408
409 if (touch_ts == 0) {
410 if (unlikely(__this_cpu_read(softlockup_touch_sync))) {
411 /*
412 * If the time stamp was touched atomically
413 * make sure the scheduler tick is up to date.
414 */
415 __this_cpu_write(softlockup_touch_sync, false);
416 sched_clock_tick();
417 }
418
419 /* Clear the guest paused flag on watchdog reset */
420 kvm_check_and_clear_guest_paused();
421 __touch_watchdog();
422 return HRTIMER_RESTART;
423 }
424
425 /* check for a softlockup
426 * This is done by making sure a high priority task is
427 * being scheduled. The task touches the watchdog to
428 * indicate it is getting cpu time. If it hasn't then
429 * this is a good indication some task is hogging the cpu
430 */
431 duration = is_softlockup(touch_ts);
432 if (unlikely(duration)) {
433 /*
434 * If a virtual machine is stopped by the host it can look to
435 * the watchdog like a soft lockup, check to see if the host
436 * stopped the vm before we issue the warning
437 */
438 if (kvm_check_and_clear_guest_paused())
439 return HRTIMER_RESTART;
440
441 /* only warn once */
442 if (__this_cpu_read(soft_watchdog_warn) == true) {
443 /*
444 * When multiple processes are causing softlockups the
445 * softlockup detector only warns on the first one
446 * because the code relies on a full quiet cycle to
447 * re-arm. The second process prevents the quiet cycle
448 * and never gets reported. Use task pointers to detect
449 * this.
450 */
451 if (__this_cpu_read(softlockup_task_ptr_saved) !=
452 current) {
453 __this_cpu_write(soft_watchdog_warn, false);
454 __touch_watchdog();
455 }
456 return HRTIMER_RESTART;
457 }
458
459 if (softlockup_all_cpu_backtrace) {
460 /* Prevent multiple soft-lockup reports if one cpu is already
461 * engaged in dumping cpu back traces
462 */
463 if (test_and_set_bit(0, &soft_lockup_nmi_warn)) {
464 /* Someone else will report us. Let's give up */
465 __this_cpu_write(soft_watchdog_warn, true);
466 return HRTIMER_RESTART;
467 }
468 }
469
470 pr_emerg("BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n",
471 smp_processor_id(), duration,
472 current->comm, task_pid_nr(current));
473 __this_cpu_write(softlockup_task_ptr_saved, current);
474 print_modules();
475 print_irqtrace_events(current);
476 if (regs)
477 show_regs(regs);
478 else
479 dump_stack();
480
481 if (softlockup_all_cpu_backtrace) {
482 /* Avoid generating two back traces for current
483 * given that one is already made above
484 */
485 trigger_allbutself_cpu_backtrace();
486
487 clear_bit(0, &soft_lockup_nmi_warn);
488 /* Barrier to sync with other cpus */
489 smp_mb__after_atomic();
490 }
491
492 add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK);
493 if (softlockup_panic)
494 panic("softlockup: hung tasks");
495 __this_cpu_write(soft_watchdog_warn, true);
496 } else
497 __this_cpu_write(soft_watchdog_warn, false);
498
499 return HRTIMER_RESTART;
500}
501
502static void watchdog_set_prio(unsigned int policy, unsigned int prio)
503{
504 struct sched_param param = { .sched_priority = prio };
505
506 sched_setscheduler(current, policy, ¶m);
507}
508
509static void watchdog_enable(unsigned int cpu)
510{
511 struct hrtimer *hrtimer = raw_cpu_ptr(&watchdog_hrtimer);
512
513 /* kick off the timer for the hardlockup detector */
514 hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
515 hrtimer->function = watchdog_timer_fn;
516
517 /* Enable the perf event */
518 watchdog_nmi_enable(cpu);
519
520 /* done here because hrtimer_start can only pin to smp_processor_id() */
521 hrtimer_start(hrtimer, ns_to_ktime(sample_period),
522 HRTIMER_MODE_REL_PINNED);
523
524 /* initialize timestamp */
525 watchdog_set_prio(SCHED_FIFO, MAX_RT_PRIO - 1);
526 __touch_watchdog();
527}
528
529static void watchdog_disable(unsigned int cpu)
530{
531 struct hrtimer *hrtimer = raw_cpu_ptr(&watchdog_hrtimer);
532
533 watchdog_set_prio(SCHED_NORMAL, 0);
534 hrtimer_cancel(hrtimer);
535 /* disable the perf event */
536 watchdog_nmi_disable(cpu);
537}
538
539static void watchdog_cleanup(unsigned int cpu, bool online)
540{
541 watchdog_disable(cpu);
542}
543
544static int watchdog_should_run(unsigned int cpu)
545{
546 return __this_cpu_read(hrtimer_interrupts) !=
547 __this_cpu_read(soft_lockup_hrtimer_cnt);
548}
549
550/*
551 * The watchdog thread function - touches the timestamp.
552 *
553 * It only runs once every sample_period seconds (4 seconds by
554 * default) to reset the softlockup timestamp. If this gets delayed
555 * for more than 2*watchdog_thresh seconds then the debug-printout
556 * triggers in watchdog_timer_fn().
557 */
558static void watchdog(unsigned int cpu)
559{
560 __this_cpu_write(soft_lockup_hrtimer_cnt,
561 __this_cpu_read(hrtimer_interrupts));
562 __touch_watchdog();
563
564 /*
565 * watchdog_nmi_enable() clears the NMI_WATCHDOG_ENABLED bit in the
566 * failure path. Check for failures that can occur asynchronously -
567 * for example, when CPUs are on-lined - and shut down the hardware
568 * perf event on each CPU accordingly.
569 *
570 * The only non-obvious place this bit can be cleared is through
571 * watchdog_nmi_enable(), so a pr_info() is placed there. Placing a
572 * pr_info here would be too noisy as it would result in a message
573 * every few seconds if the hardlockup was disabled but the softlockup
574 * enabled.
575 */
576 if (!(watchdog_enabled & NMI_WATCHDOG_ENABLED))
577 watchdog_nmi_disable(cpu);
578}
579
580#ifdef CONFIG_HARDLOCKUP_DETECTOR
581/*
582 * People like the simple clean cpu node info on boot.
583 * Reduce the watchdog noise by only printing messages
584 * that are different from what cpu0 displayed.
585 */
586static unsigned long cpu0_err;
587
588static int watchdog_nmi_enable(unsigned int cpu)
589{
590 struct perf_event_attr *wd_attr;
591 struct perf_event *event = per_cpu(watchdog_ev, cpu);
592
593 /* nothing to do if the hard lockup detector is disabled */
594 if (!(watchdog_enabled & NMI_WATCHDOG_ENABLED))
595 goto out;
596
597 /* is it already setup and enabled? */
598 if (event && event->state > PERF_EVENT_STATE_OFF)
599 goto out;
600
601 /* it is setup but not enabled */
602 if (event != NULL)
603 goto out_enable;
604
605 wd_attr = &wd_hw_attr;
606 wd_attr->sample_period = hw_nmi_get_sample_period(watchdog_thresh);
607
608 /* Try to register using hardware perf events */
609 event = perf_event_create_kernel_counter(wd_attr, cpu, NULL, watchdog_overflow_callback, NULL);
610
611 /* save cpu0 error for future comparision */
612 if (cpu == 0 && IS_ERR(event))
613 cpu0_err = PTR_ERR(event);
614
615 if (!IS_ERR(event)) {
616 /* only print for cpu0 or different than cpu0 */
617 if (cpu == 0 || cpu0_err)
618 pr_info("enabled on all CPUs, permanently consumes one hw-PMU counter.\n");
619 goto out_save;
620 }
621
622 /*
623 * Disable the hard lockup detector if _any_ CPU fails to set up
624 * set up the hardware perf event. The watchdog() function checks
625 * the NMI_WATCHDOG_ENABLED bit periodically.
626 *
627 * The barriers are for syncing up watchdog_enabled across all the
628 * cpus, as clear_bit() does not use barriers.
629 */
630 smp_mb__before_atomic();
631 clear_bit(NMI_WATCHDOG_ENABLED_BIT, &watchdog_enabled);
632 smp_mb__after_atomic();
633
634 /* skip displaying the same error again */
635 if (cpu > 0 && (PTR_ERR(event) == cpu0_err))
636 return PTR_ERR(event);
637
638 /* vary the KERN level based on the returned errno */
639 if (PTR_ERR(event) == -EOPNOTSUPP)
640 pr_info("disabled (cpu%i): not supported (no LAPIC?)\n", cpu);
641 else if (PTR_ERR(event) == -ENOENT)
642 pr_warn("disabled (cpu%i): hardware events not enabled\n",
643 cpu);
644 else
645 pr_err("disabled (cpu%i): unable to create perf event: %ld\n",
646 cpu, PTR_ERR(event));
647
648 pr_info("Shutting down hard lockup detector on all cpus\n");
649
650 return PTR_ERR(event);
651
652 /* success path */
653out_save:
654 per_cpu(watchdog_ev, cpu) = event;
655out_enable:
656 perf_event_enable(per_cpu(watchdog_ev, cpu));
657out:
658 return 0;
659}
660
661static void watchdog_nmi_disable(unsigned int cpu)
662{
663 struct perf_event *event = per_cpu(watchdog_ev, cpu);
664
665 if (event) {
666 perf_event_disable(event);
667 per_cpu(watchdog_ev, cpu) = NULL;
668
669 /* should be in cleanup, but blocks oprofile */
670 perf_event_release_kernel(event);
671 }
672 if (cpu == 0) {
673 /* watchdog_nmi_enable() expects this to be zero initially. */
674 cpu0_err = 0;
675 }
676}
677
678#else
679static int watchdog_nmi_enable(unsigned int cpu) { return 0; }
680static void watchdog_nmi_disable(unsigned int cpu) { return; }
681#endif /* CONFIG_HARDLOCKUP_DETECTOR */
682
683static struct smp_hotplug_thread watchdog_threads = {
684 .store = &softlockup_watchdog,
685 .thread_should_run = watchdog_should_run,
686 .thread_fn = watchdog,
687 .thread_comm = "watchdog/%u",
688 .setup = watchdog_enable,
689 .cleanup = watchdog_cleanup,
690 .park = watchdog_disable,
691 .unpark = watchdog_enable,
692};
693
694/*
695 * park all watchdog threads that are specified in 'watchdog_cpumask'
696 *
697 * This function returns an error if kthread_park() of a watchdog thread
698 * fails. In this situation, the watchdog threads of some CPUs can already
699 * be parked and the watchdog threads of other CPUs can still be runnable.
700 * Callers are expected to handle this special condition as appropriate in
701 * their context.
702 *
703 * This function may only be called in a context that is protected against
704 * races with CPU hotplug - for example, via get_online_cpus().
705 */
706static int watchdog_park_threads(void)
707{
708 int cpu, ret = 0;
709
710 for_each_watchdog_cpu(cpu) {
711 ret = kthread_park(per_cpu(softlockup_watchdog, cpu));
712 if (ret)
713 break;
714 }
715
716 return ret;
717}
718
719/*
720 * unpark all watchdog threads that are specified in 'watchdog_cpumask'
721 *
722 * This function may only be called in a context that is protected against
723 * races with CPU hotplug - for example, via get_online_cpus().
724 */
725static void watchdog_unpark_threads(void)
726{
727 int cpu;
728
729 for_each_watchdog_cpu(cpu)
730 kthread_unpark(per_cpu(softlockup_watchdog, cpu));
731}
732
733/*
734 * Suspend the hard and soft lockup detector by parking the watchdog threads.
735 */
736int lockup_detector_suspend(void)
737{
738 int ret = 0;
739
740 get_online_cpus();
741 mutex_lock(&watchdog_proc_mutex);
742 /*
743 * Multiple suspend requests can be active in parallel (counted by
744 * the 'watchdog_suspended' variable). If the watchdog threads are
745 * running, the first caller takes care that they will be parked.
746 * The state of 'watchdog_running' cannot change while a suspend
747 * request is active (see related code in 'proc' handlers).
748 */
749 if (watchdog_running && !watchdog_suspended)
750 ret = watchdog_park_threads();
751
752 if (ret == 0)
753 watchdog_suspended++;
754 else {
755 watchdog_disable_all_cpus();
756 pr_err("Failed to suspend lockup detectors, disabled\n");
757 watchdog_enabled = 0;
758 }
759
760 mutex_unlock(&watchdog_proc_mutex);
761
762 return ret;
763}
764
765/*
766 * Resume the hard and soft lockup detector by unparking the watchdog threads.
767 */
768void lockup_detector_resume(void)
769{
770 mutex_lock(&watchdog_proc_mutex);
771
772 watchdog_suspended--;
773 /*
774 * The watchdog threads are unparked if they were previously running
775 * and if there is no more active suspend request.
776 */
777 if (watchdog_running && !watchdog_suspended)
778 watchdog_unpark_threads();
779
780 mutex_unlock(&watchdog_proc_mutex);
781 put_online_cpus();
782}
783
784static int update_watchdog_all_cpus(void)
785{
786 int ret;
787
788 ret = watchdog_park_threads();
789 if (ret)
790 return ret;
791
792 watchdog_unpark_threads();
793
794 return 0;
795}
796
797static int watchdog_enable_all_cpus(void)
798{
799 int err = 0;
800
801 if (!watchdog_running) {
802 err = smpboot_register_percpu_thread_cpumask(&watchdog_threads,
803 &watchdog_cpumask);
804 if (err)
805 pr_err("Failed to create watchdog threads, disabled\n");
806 else
807 watchdog_running = 1;
808 } else {
809 /*
810 * Enable/disable the lockup detectors or
811 * change the sample period 'on the fly'.
812 */
813 err = update_watchdog_all_cpus();
814
815 if (err) {
816 watchdog_disable_all_cpus();
817 pr_err("Failed to update lockup detectors, disabled\n");
818 }
819 }
820
821 if (err)
822 watchdog_enabled = 0;
823
824 return err;
825}
826
827static void watchdog_disable_all_cpus(void)
828{
829 if (watchdog_running) {
830 watchdog_running = 0;
831 smpboot_unregister_percpu_thread(&watchdog_threads);
832 }
833}
834
835#ifdef CONFIG_SYSCTL
836
837/*
838 * Update the run state of the lockup detectors.
839 */
840static int proc_watchdog_update(void)
841{
842 int err = 0;
843
844 /*
845 * Watchdog threads won't be started if they are already active.
846 * The 'watchdog_running' variable in watchdog_*_all_cpus() takes
847 * care of this. If those threads are already active, the sample
848 * period will be updated and the lockup detectors will be enabled
849 * or disabled 'on the fly'.
850 */
851 if (watchdog_enabled && watchdog_thresh)
852 err = watchdog_enable_all_cpus();
853 else
854 watchdog_disable_all_cpus();
855
856 return err;
857
858}
859
860/*
861 * common function for watchdog, nmi_watchdog and soft_watchdog parameter
862 *
863 * caller | table->data points to | 'which' contains the flag(s)
864 * -------------------|-----------------------|-----------------------------
865 * proc_watchdog | watchdog_user_enabled | NMI_WATCHDOG_ENABLED or'ed
866 * | | with SOFT_WATCHDOG_ENABLED
867 * -------------------|-----------------------|-----------------------------
868 * proc_nmi_watchdog | nmi_watchdog_enabled | NMI_WATCHDOG_ENABLED
869 * -------------------|-----------------------|-----------------------------
870 * proc_soft_watchdog | soft_watchdog_enabled | SOFT_WATCHDOG_ENABLED
871 */
872static int proc_watchdog_common(int which, struct ctl_table *table, int write,
873 void __user *buffer, size_t *lenp, loff_t *ppos)
874{
875 int err, old, new;
876 int *watchdog_param = (int *)table->data;
877
878 get_online_cpus();
879 mutex_lock(&watchdog_proc_mutex);
880
881 if (watchdog_suspended) {
882 /* no parameter changes allowed while watchdog is suspended */
883 err = -EAGAIN;
884 goto out;
885 }
886
887 /*
888 * If the parameter is being read return the state of the corresponding
889 * bit(s) in 'watchdog_enabled', else update 'watchdog_enabled' and the
890 * run state of the lockup detectors.
891 */
892 if (!write) {
893 *watchdog_param = (watchdog_enabled & which) != 0;
894 err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
895 } else {
896 err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
897 if (err)
898 goto out;
899
900 /*
901 * There is a race window between fetching the current value
902 * from 'watchdog_enabled' and storing the new value. During
903 * this race window, watchdog_nmi_enable() can sneak in and
904 * clear the NMI_WATCHDOG_ENABLED bit in 'watchdog_enabled'.
905 * The 'cmpxchg' detects this race and the loop retries.
906 */
907 do {
908 old = watchdog_enabled;
909 /*
910 * If the parameter value is not zero set the
911 * corresponding bit(s), else clear it(them).
912 */
913 if (*watchdog_param)
914 new = old | which;
915 else
916 new = old & ~which;
917 } while (cmpxchg(&watchdog_enabled, old, new) != old);
918
919 /*
920 * Update the run state of the lockup detectors. There is _no_
921 * need to check the value returned by proc_watchdog_update()
922 * and to restore the previous value of 'watchdog_enabled' as
923 * both lockup detectors are disabled if proc_watchdog_update()
924 * returns an error.
925 */
926 if (old == new)
927 goto out;
928
929 err = proc_watchdog_update();
930 }
931out:
932 mutex_unlock(&watchdog_proc_mutex);
933 put_online_cpus();
934 return err;
935}
936
937/*
938 * /proc/sys/kernel/watchdog
939 */
940int proc_watchdog(struct ctl_table *table, int write,
941 void __user *buffer, size_t *lenp, loff_t *ppos)
942{
943 return proc_watchdog_common(NMI_WATCHDOG_ENABLED|SOFT_WATCHDOG_ENABLED,
944 table, write, buffer, lenp, ppos);
945}
946
947/*
948 * /proc/sys/kernel/nmi_watchdog
949 */
950int proc_nmi_watchdog(struct ctl_table *table, int write,
951 void __user *buffer, size_t *lenp, loff_t *ppos)
952{
953 return proc_watchdog_common(NMI_WATCHDOG_ENABLED,
954 table, write, buffer, lenp, ppos);
955}
956
957/*
958 * /proc/sys/kernel/soft_watchdog
959 */
960int proc_soft_watchdog(struct ctl_table *table, int write,
961 void __user *buffer, size_t *lenp, loff_t *ppos)
962{
963 return proc_watchdog_common(SOFT_WATCHDOG_ENABLED,
964 table, write, buffer, lenp, ppos);
965}
966
967/*
968 * /proc/sys/kernel/watchdog_thresh
969 */
970int proc_watchdog_thresh(struct ctl_table *table, int write,
971 void __user *buffer, size_t *lenp, loff_t *ppos)
972{
973 int err, old, new;
974
975 get_online_cpus();
976 mutex_lock(&watchdog_proc_mutex);
977
978 if (watchdog_suspended) {
979 /* no parameter changes allowed while watchdog is suspended */
980 err = -EAGAIN;
981 goto out;
982 }
983
984 old = ACCESS_ONCE(watchdog_thresh);
985 err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
986
987 if (err || !write)
988 goto out;
989
990 /*
991 * Update the sample period. Restore on failure.
992 */
993 new = ACCESS_ONCE(watchdog_thresh);
994 if (old == new)
995 goto out;
996
997 set_sample_period();
998 err = proc_watchdog_update();
999 if (err) {
1000 watchdog_thresh = old;
1001 set_sample_period();
1002 }
1003out:
1004 mutex_unlock(&watchdog_proc_mutex);
1005 put_online_cpus();
1006 return err;
1007}
1008
1009/*
1010 * The cpumask is the mask of possible cpus that the watchdog can run
1011 * on, not the mask of cpus it is actually running on. This allows the
1012 * user to specify a mask that will include cpus that have not yet
1013 * been brought online, if desired.
1014 */
1015int proc_watchdog_cpumask(struct ctl_table *table, int write,
1016 void __user *buffer, size_t *lenp, loff_t *ppos)
1017{
1018 int err;
1019
1020 get_online_cpus();
1021 mutex_lock(&watchdog_proc_mutex);
1022
1023 if (watchdog_suspended) {
1024 /* no parameter changes allowed while watchdog is suspended */
1025 err = -EAGAIN;
1026 goto out;
1027 }
1028
1029 err = proc_do_large_bitmap(table, write, buffer, lenp, ppos);
1030 if (!err && write) {
1031 /* Remove impossible cpus to keep sysctl output cleaner. */
1032 cpumask_and(&watchdog_cpumask, &watchdog_cpumask,
1033 cpu_possible_mask);
1034
1035 if (watchdog_running) {
1036 /*
1037 * Failure would be due to being unable to allocate
1038 * a temporary cpumask, so we are likely not in a
1039 * position to do much else to make things better.
1040 */
1041 if (smpboot_update_cpumask_percpu_thread(
1042 &watchdog_threads, &watchdog_cpumask) != 0)
1043 pr_err("cpumask update failed\n");
1044 }
1045 }
1046out:
1047 mutex_unlock(&watchdog_proc_mutex);
1048 put_online_cpus();
1049 return err;
1050}
1051
1052#endif /* CONFIG_SYSCTL */
1053
1054void __init lockup_detector_init(void)
1055{
1056 set_sample_period();
1057
1058#ifdef CONFIG_NO_HZ_FULL
1059 if (tick_nohz_full_enabled()) {
1060 pr_info("Disabling watchdog on nohz_full cores by default\n");
1061 cpumask_copy(&watchdog_cpumask, housekeeping_mask);
1062 } else
1063 cpumask_copy(&watchdog_cpumask, cpu_possible_mask);
1064#else
1065 cpumask_copy(&watchdog_cpumask, cpu_possible_mask);
1066#endif
1067
1068 if (watchdog_enabled)
1069 watchdog_enable_all_cpus();
1070}