Linux Audio

Check our new training course

Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Detect hard and soft lockups on a system
  4 *
  5 * started by Don Zickus, Copyright (C) 2010 Red Hat, Inc.
  6 *
  7 * Note: Most of this code is borrowed heavily from the original softlockup
  8 * detector, so thanks to Ingo for the initial implementation.
  9 * Some chunks also taken from the old x86-specific nmi watchdog code, thanks
 10 * to those contributors as well.
 11 */
 12
 13#define pr_fmt(fmt) "watchdog: " fmt
 14
 15#include <linux/mm.h>
 16#include <linux/cpu.h>
 17#include <linux/nmi.h>
 18#include <linux/init.h>
 
 
 
 
 
 19#include <linux/module.h>
 20#include <linux/sysctl.h>
 21#include <linux/tick.h>
 22#include <linux/sched/clock.h>
 23#include <linux/sched/debug.h>
 24#include <linux/sched/isolation.h>
 25#include <linux/stop_machine.h>
 26
 27#include <asm/irq_regs.h>
 28#include <linux/kvm_para.h>
 
 29
 30static DEFINE_MUTEX(watchdog_mutex);
 31
 32#if defined(CONFIG_HARDLOCKUP_DETECTOR) || defined(CONFIG_HAVE_NMI_WATCHDOG)
 33# define WATCHDOG_DEFAULT	(SOFT_WATCHDOG_ENABLED | NMI_WATCHDOG_ENABLED)
 34# define NMI_WATCHDOG_DEFAULT	1
 35#else
 36# define WATCHDOG_DEFAULT	(SOFT_WATCHDOG_ENABLED)
 37# define NMI_WATCHDOG_DEFAULT	0
 38#endif
 39
 40unsigned long __read_mostly watchdog_enabled;
 41int __read_mostly watchdog_user_enabled = 1;
 42int __read_mostly nmi_watchdog_user_enabled = NMI_WATCHDOG_DEFAULT;
 43int __read_mostly soft_watchdog_user_enabled = 1;
 44int __read_mostly watchdog_thresh = 10;
 45static int __read_mostly nmi_watchdog_available;
 46
 47struct cpumask watchdog_cpumask __read_mostly;
 48unsigned long *watchdog_cpumask_bits = cpumask_bits(&watchdog_cpumask);
 49
 
 
 
 
 
 50#ifdef CONFIG_HARDLOCKUP_DETECTOR
 
 
 
 
 
 
 51
 52# ifdef CONFIG_SMP
 53int __read_mostly sysctl_hardlockup_all_cpu_backtrace;
 54# endif /* CONFIG_SMP */
 55
 56/*
 57 * Should we panic when a soft-lockup or hard-lockup occurs:
 58 */
 59unsigned int __read_mostly hardlockup_panic =
 60			IS_ENABLED(CONFIG_BOOTPARAM_HARDLOCKUP_PANIC);
 61/*
 62 * We may not want to enable hard lockup detection by default in all cases,
 63 * for example when running the kernel as a guest on a hypervisor. In these
 64 * cases this function can be called to disable hard lockup detection. This
 65 * function should only be executed once by the boot processor before the
 66 * kernel command line parameters are parsed, because otherwise it is not
 67 * possible to override this in hardlockup_panic_setup().
 68 */
 69void __init hardlockup_detector_disable(void)
 70{
 71	nmi_watchdog_user_enabled = 0;
 72}
 73
 74static int __init hardlockup_panic_setup(char *str)
 75{
 76	if (!strncmp(str, "panic", 5))
 77		hardlockup_panic = 1;
 78	else if (!strncmp(str, "nopanic", 7))
 79		hardlockup_panic = 0;
 80	else if (!strncmp(str, "0", 1))
 81		nmi_watchdog_user_enabled = 0;
 82	else if (!strncmp(str, "1", 1))
 83		nmi_watchdog_user_enabled = 1;
 84	return 1;
 85}
 86__setup("nmi_watchdog=", hardlockup_panic_setup);
 87
 88#endif /* CONFIG_HARDLOCKUP_DETECTOR */
 89
 90/*
 91 * These functions can be overridden if an architecture implements its
 92 * own hardlockup detector.
 93 *
 94 * watchdog_nmi_enable/disable can be implemented to start and stop when
 95 * softlockup watchdog start and stop. The arch must select the
 96 * SOFTLOCKUP_DETECTOR Kconfig.
 97 */
 98int __weak watchdog_nmi_enable(unsigned int cpu)
 99{
100	hardlockup_detector_perf_enable();
101	return 0;
102}
103
104void __weak watchdog_nmi_disable(unsigned int cpu)
105{
106	hardlockup_detector_perf_disable();
107}
108
109/* Return 0, if a NMI watchdog is available. Error code otherwise */
110int __weak __init watchdog_nmi_probe(void)
111{
112	return hardlockup_detector_perf_init();
113}
114
115/**
116 * watchdog_nmi_stop - Stop the watchdog for reconfiguration
117 *
118 * The reconfiguration steps are:
119 * watchdog_nmi_stop();
120 * update_variables();
121 * watchdog_nmi_start();
122 */
123void __weak watchdog_nmi_stop(void) { }
124
125/**
126 * watchdog_nmi_start - Start the watchdog after reconfiguration
127 *
128 * Counterpart to watchdog_nmi_stop().
129 *
130 * The following variables have been updated in update_variables() and
131 * contain the currently valid configuration:
132 * - watchdog_enabled
133 * - watchdog_thresh
134 * - watchdog_cpumask
135 */
136void __weak watchdog_nmi_start(void) { }
137
138/**
139 * lockup_detector_update_enable - Update the sysctl enable bit
140 *
141 * Caller needs to make sure that the NMI/perf watchdogs are off, so this
142 * can't race with watchdog_nmi_disable().
143 */
144static void lockup_detector_update_enable(void)
145{
146	watchdog_enabled = 0;
147	if (!watchdog_user_enabled)
148		return;
149	if (nmi_watchdog_available && nmi_watchdog_user_enabled)
150		watchdog_enabled |= NMI_WATCHDOG_ENABLED;
151	if (soft_watchdog_user_enabled)
152		watchdog_enabled |= SOFT_WATCHDOG_ENABLED;
153}
154
155#ifdef CONFIG_SOFTLOCKUP_DETECTOR
156
157/*
158 * Delay the soflockup report when running a known slow code.
159 * It does _not_ affect the timestamp of the last successdul reschedule.
160 */
161#define SOFTLOCKUP_DELAY_REPORT	ULONG_MAX
162
163#ifdef CONFIG_SMP
164int __read_mostly sysctl_softlockup_all_cpu_backtrace;
165#endif
166
167static struct cpumask watchdog_allowed_mask __read_mostly;
168
169/* Global variables, exported for sysctl */
170unsigned int __read_mostly softlockup_panic =
171			IS_ENABLED(CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC);
172
173static bool softlockup_initialized __read_mostly;
174static u64 __read_mostly sample_period;
 
175
176/* Timestamp taken after the last successful reschedule. */
177static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts);
178/* Timestamp of the last softlockup report. */
179static DEFINE_PER_CPU(unsigned long, watchdog_report_ts);
180static DEFINE_PER_CPU(struct hrtimer, watchdog_hrtimer);
181static DEFINE_PER_CPU(bool, softlockup_touch_sync);
182static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts);
183static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved);
184static unsigned long soft_lockup_nmi_warn;
185
186static int __init nowatchdog_setup(char *str)
187{
188	watchdog_user_enabled = 0;
189	return 1;
190}
191__setup("nowatchdog", nowatchdog_setup);
192
 
193static int __init nosoftlockup_setup(char *str)
194{
195	soft_watchdog_user_enabled = 0;
196	return 1;
197}
198__setup("nosoftlockup", nosoftlockup_setup);
199
200static int __init watchdog_thresh_setup(char *str)
201{
202	get_option(&str, &watchdog_thresh);
203	return 1;
204}
205__setup("watchdog_thresh=", watchdog_thresh_setup);
206
207static void __lockup_detector_cleanup(void);
208
209/*
210 * Hard-lockup warnings should be triggered after just a few seconds. Soft-
211 * lockups can have false positives under extreme conditions. So we generally
212 * want a higher threshold for soft lockups than for hard lockups. So we couple
213 * the thresholds with a factor: we make the soft threshold twice the amount of
214 * time the hard threshold is.
215 */
216static int get_softlockup_thresh(void)
217{
218	return watchdog_thresh * 2;
219}
220
221/*
222 * Returns seconds, approximately.  We don't need nanosecond
223 * resolution, and we don't need to waste time with a big divide when
224 * 2^30ns == 1.074s.
225 */
226static unsigned long get_timestamp(void)
227{
228	return running_clock() >> 30LL;  /* 2^30 ~= 10^9 */
229}
230
231static void set_sample_period(void)
232{
233	/*
234	 * convert watchdog_thresh from seconds to ns
235	 * the divide by 5 is to give hrtimer several chances (two
236	 * or three with the current relation between the soft
237	 * and hard thresholds) to increment before the
238	 * hardlockup detector generates a warning
239	 */
240	sample_period = get_softlockup_thresh() * ((u64)NSEC_PER_SEC / 5);
241	watchdog_update_hrtimer_threshold(sample_period);
242}
243
244static void update_report_ts(void)
245{
246	__this_cpu_write(watchdog_report_ts, get_timestamp());
247}
248
249/* Commands for resetting the watchdog */
250static void update_touch_ts(void)
251{
252	__this_cpu_write(watchdog_touch_ts, get_timestamp());
253	update_report_ts();
254}
255
256/**
257 * touch_softlockup_watchdog_sched - touch watchdog on scheduler stalls
258 *
259 * Call when the scheduler may have stalled for legitimate reasons
260 * preventing the watchdog task from executing - e.g. the scheduler
261 * entering idle state.  This should only be used for scheduler events.
262 * Use touch_softlockup_watchdog() for everything else.
263 */
264notrace void touch_softlockup_watchdog_sched(void)
265{
266	/*
267	 * Preemption can be enabled.  It doesn't matter which CPU's watchdog
268	 * report period gets restarted here, so use the raw_ operation.
269	 */
270	raw_cpu_write(watchdog_report_ts, SOFTLOCKUP_DELAY_REPORT);
271}
272
273notrace void touch_softlockup_watchdog(void)
274{
275	touch_softlockup_watchdog_sched();
276	wq_watchdog_touch(raw_smp_processor_id());
277}
278EXPORT_SYMBOL(touch_softlockup_watchdog);
279
280void touch_all_softlockup_watchdogs(void)
281{
282	int cpu;
283
284	/*
285	 * watchdog_mutex cannpt be taken here, as this might be called
286	 * from (soft)interrupt context, so the access to
287	 * watchdog_allowed_cpumask might race with a concurrent update.
288	 *
289	 * The watchdog time stamp can race against a concurrent real
290	 * update as well, the only side effect might be a cycle delay for
291	 * the softlockup check.
292	 */
293	for_each_cpu(cpu, &watchdog_allowed_mask) {
294		per_cpu(watchdog_report_ts, cpu) = SOFTLOCKUP_DELAY_REPORT;
295		wq_watchdog_touch(cpu);
296	}
297}
298
299void touch_softlockup_watchdog_sync(void)
 
300{
301	__this_cpu_write(softlockup_touch_sync, true);
302	__this_cpu_write(watchdog_report_ts, SOFTLOCKUP_DELAY_REPORT);
303}
304
305static int is_softlockup(unsigned long touch_ts,
306			 unsigned long period_ts,
307			 unsigned long now)
308{
309	if ((watchdog_enabled & SOFT_WATCHDOG_ENABLED) && watchdog_thresh){
310		/* Warn about unreasonable delays. */
311		if (time_after(now, period_ts + get_softlockup_thresh()))
312			return now - touch_ts;
313	}
314	return 0;
 
 
 
 
 
 
 
 
 
315}
316
 
317/* watchdog detector functions */
318bool is_hardlockup(void)
319{
320	unsigned long hrint = __this_cpu_read(hrtimer_interrupts);
321
322	if (__this_cpu_read(hrtimer_interrupts_saved) == hrint)
323		return true;
324
325	__this_cpu_write(hrtimer_interrupts_saved, hrint);
326	return false;
327}
 
328
329static void watchdog_interrupt_count(void)
330{
331	__this_cpu_inc(hrtimer_interrupts);
 
 
 
 
 
 
332}
333
334static DEFINE_PER_CPU(struct completion, softlockup_completion);
335static DEFINE_PER_CPU(struct cpu_stop_work, softlockup_stop_work);
336
337/*
338 * The watchdog feed function - touches the timestamp.
339 *
340 * It only runs once every sample_period seconds (4 seconds by
341 * default) to reset the softlockup timestamp. If this gets delayed
342 * for more than 2*watchdog_thresh seconds then the debug-printout
343 * triggers in watchdog_timer_fn().
344 */
345static int softlockup_fn(void *data)
 
 
 
346{
347	update_touch_ts();
348	complete(this_cpu_ptr(&softlockup_completion));
349
350	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
351}
 
 
 
352
353/* watchdog kicker functions */
354static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
355{
356	unsigned long touch_ts, period_ts, now;
357	struct pt_regs *regs = get_irq_regs();
358	int duration;
359	int softlockup_all_cpu_backtrace = sysctl_softlockup_all_cpu_backtrace;
360
361	if (!watchdog_enabled)
362		return HRTIMER_NORESTART;
363
364	/* kick the hardlockup detector */
365	watchdog_interrupt_count();
366
367	/* kick the softlockup detector */
368	if (completion_done(this_cpu_ptr(&softlockup_completion))) {
369		reinit_completion(this_cpu_ptr(&softlockup_completion));
370		stop_one_cpu_nowait(smp_processor_id(),
371				softlockup_fn, NULL,
372				this_cpu_ptr(&softlockup_stop_work));
373	}
374
375	/* .. and repeat */
376	hrtimer_forward_now(hrtimer, ns_to_ktime(sample_period));
377
378	/*
379	 * Read the current timestamp first. It might become invalid anytime
380	 * when a virtual machine is stopped by the host or when the watchog
381	 * is touched from NMI.
382	 */
383	now = get_timestamp();
384	/*
385	 * If a virtual machine is stopped by the host it can look to
386	 * the watchdog like a soft lockup. This function touches the watchdog.
387	 */
388	kvm_check_and_clear_guest_paused();
389	/*
390	 * The stored timestamp is comparable with @now only when not touched.
391	 * It might get touched anytime from NMI. Make sure that is_softlockup()
392	 * uses the same (valid) value.
393	 */
394	period_ts = READ_ONCE(*this_cpu_ptr(&watchdog_report_ts));
395
396	/* Reset the interval when touched by known problematic code. */
397	if (period_ts == SOFTLOCKUP_DELAY_REPORT) {
398		if (unlikely(__this_cpu_read(softlockup_touch_sync))) {
399			/*
400			 * If the time stamp was touched atomically
401			 * make sure the scheduler tick is up to date.
402			 */
403			__this_cpu_write(softlockup_touch_sync, false);
404			sched_clock_tick();
405		}
406
407		update_report_ts();
 
 
408		return HRTIMER_RESTART;
409	}
410
411	/* Check for a softlockup. */
412	touch_ts = __this_cpu_read(watchdog_touch_ts);
413	duration = is_softlockup(touch_ts, period_ts, now);
 
 
 
 
414	if (unlikely(duration)) {
415		/*
416		 * Prevent multiple soft-lockup reports if one cpu is already
417		 * engaged in dumping all cpu back traces.
 
418		 */
419		if (softlockup_all_cpu_backtrace) {
420			if (test_and_set_bit_lock(0, &soft_lockup_nmi_warn))
421				return HRTIMER_RESTART;
422		}
423
424		/* Start period for the next softlockup warning. */
425		update_report_ts();
 
426
427		pr_emerg("BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n",
428			smp_processor_id(), duration,
429			current->comm, task_pid_nr(current));
430		print_modules();
431		print_irqtrace_events(current);
432		if (regs)
433			show_regs(regs);
434		else
435			dump_stack();
436
437		if (softlockup_all_cpu_backtrace) {
438			trigger_allbutself_cpu_backtrace();
439			clear_bit_unlock(0, &soft_lockup_nmi_warn);
440		}
441
442		add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK);
443		if (softlockup_panic)
444			panic("softlockup: hung tasks");
445	}
 
 
446
447	return HRTIMER_RESTART;
448}
449
450static void watchdog_enable(unsigned int cpu)
 
 
 
 
451{
452	struct hrtimer *hrtimer = this_cpu_ptr(&watchdog_hrtimer);
453	struct completion *done = this_cpu_ptr(&softlockup_completion);
454
455	WARN_ON_ONCE(cpu != smp_processor_id());
 
456
457	init_completion(done);
458	complete(done);
 
 
459
 
460	/*
461	 * Start the timer first to prevent the NMI watchdog triggering
462	 * before the timer has a chance to fire.
 
 
 
463	 */
464	hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
465	hrtimer->function = watchdog_timer_fn;
466	hrtimer_start(hrtimer, ns_to_ktime(sample_period),
467		      HRTIMER_MODE_REL_PINNED_HARD);
468
469	/* Initialize timestamp */
470	update_touch_ts();
471	/* Enable the perf event */
472	if (watchdog_enabled & NMI_WATCHDOG_ENABLED)
473		watchdog_nmi_enable(cpu);
474}
475
476static void watchdog_disable(unsigned int cpu)
477{
478	struct hrtimer *hrtimer = this_cpu_ptr(&watchdog_hrtimer);
479
480	WARN_ON_ONCE(cpu != smp_processor_id());
 
481
 
 
482	/*
483	 * Disable the perf event first. That prevents that a large delay
484	 * between disabling the timer and disabling the perf event causes
485	 * the perf NMI to detect a false positive.
486	 */
487	watchdog_nmi_disable(cpu);
488	hrtimer_cancel(hrtimer);
489	wait_for_completion(this_cpu_ptr(&softlockup_completion));
490}
491
492static int softlockup_stop_fn(void *data)
493{
494	watchdog_disable(smp_processor_id());
495	return 0;
496}
497
498static void softlockup_stop_all(void)
 
 
 
 
 
 
 
 
 
499{
500	int cpu;
 
501
502	if (!softlockup_initialized)
503		return;
 
504
505	for_each_cpu(cpu, &watchdog_allowed_mask)
506		smp_call_on_cpu(cpu, softlockup_stop_fn, NULL, false);
 
507
508	cpumask_clear(&watchdog_allowed_mask);
509}
510
511static int softlockup_start_fn(void *data)
512{
513	watchdog_enable(smp_processor_id());
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
514	return 0;
515}
516
517static void softlockup_start_all(void)
518{
519	int cpu;
520
521	cpumask_copy(&watchdog_allowed_mask, &watchdog_cpumask);
522	for_each_cpu(cpu, &watchdog_allowed_mask)
523		smp_call_on_cpu(cpu, softlockup_start_fn, NULL, false);
524}
525
526int lockup_detector_online_cpu(unsigned int cpu)
527{
528	if (cpumask_test_cpu(cpu, &watchdog_allowed_mask))
529		watchdog_enable(cpu);
530	return 0;
531}
 
 
 
 
532
533int lockup_detector_offline_cpu(unsigned int cpu)
 
534{
535	if (cpumask_test_cpu(cpu, &watchdog_allowed_mask))
536		watchdog_disable(cpu);
537	return 0;
 
 
538}
539
540static void __lockup_detector_reconfigure(void)
541{
542	cpus_read_lock();
543	watchdog_nmi_stop();
544
545	softlockup_stop_all();
546	set_sample_period();
547	lockup_detector_update_enable();
548	if (watchdog_enabled && watchdog_thresh)
549		softlockup_start_all();
550
551	watchdog_nmi_start();
552	cpus_read_unlock();
553	/*
554	 * Must be called outside the cpus locked section to prevent
555	 * recursive locking in the perf code.
556	 */
557	__lockup_detector_cleanup();
558}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
559
560void lockup_detector_reconfigure(void)
561{
562	mutex_lock(&watchdog_mutex);
563	__lockup_detector_reconfigure();
564	mutex_unlock(&watchdog_mutex);
565}
566
567/*
568 * Create the watchdog infrastructure and configure the detector(s).
569 */
570static __init void lockup_detector_setup(void)
571{
 
 
 
572	/*
573	 * If sysctl is off and watchdog got disabled on the command line,
574	 * nothing to do here.
575	 */
576	lockup_detector_update_enable();
577
578	if (!IS_ENABLED(CONFIG_SYSCTL) &&
579	    !(watchdog_enabled && watchdog_thresh))
580		return;
581
582	mutex_lock(&watchdog_mutex);
583	__lockup_detector_reconfigure();
584	softlockup_initialized = true;
585	mutex_unlock(&watchdog_mutex);
586}
587
588#else /* CONFIG_SOFTLOCKUP_DETECTOR */
589static void __lockup_detector_reconfigure(void)
590{
591	cpus_read_lock();
592	watchdog_nmi_stop();
593	lockup_detector_update_enable();
594	watchdog_nmi_start();
595	cpus_read_unlock();
596}
597void lockup_detector_reconfigure(void)
598{
599	__lockup_detector_reconfigure();
600}
601static inline void lockup_detector_setup(void)
602{
603	__lockup_detector_reconfigure();
604}
605#endif /* !CONFIG_SOFTLOCKUP_DETECTOR */
606
607static void __lockup_detector_cleanup(void)
608{
609	lockdep_assert_held(&watchdog_mutex);
610	hardlockup_detector_perf_cleanup();
 
611}
612
613/**
614 * lockup_detector_cleanup - Cleanup after cpu hotplug or sysctl changes
615 *
616 * Caller must not hold the cpu hotplug rwsem.
617 */
618void lockup_detector_cleanup(void)
619{
620	mutex_lock(&watchdog_mutex);
621	__lockup_detector_cleanup();
622	mutex_unlock(&watchdog_mutex);
623}
624
625/**
626 * lockup_detector_soft_poweroff - Interface to stop lockup detector(s)
627 *
628 * Special interface for parisc. It prevents lockup detector warnings from
629 * the default pm_poweroff() function which busy loops forever.
630 */
631void lockup_detector_soft_poweroff(void)
632{
633	watchdog_enabled = 0;
634}
635
636#ifdef CONFIG_SYSCTL
 
 
 
 
 
 
 
637
638/* Propagate any changes to the watchdog infrastructure */
639static void proc_watchdog_update(void)
640{
641	/* Remove impossible cpus to keep sysctl output clean. */
642	cpumask_and(&watchdog_cpumask, &watchdog_cpumask, cpu_possible_mask);
643	__lockup_detector_reconfigure();
644}
645
646/*
647 * common function for watchdog, nmi_watchdog and soft_watchdog parameter
648 *
649 * caller             | table->data points to      | 'which'
650 * -------------------|----------------------------|--------------------------
651 * proc_watchdog      | watchdog_user_enabled      | NMI_WATCHDOG_ENABLED |
652 *                    |                            | SOFT_WATCHDOG_ENABLED
653 * -------------------|----------------------------|--------------------------
654 * proc_nmi_watchdog  | nmi_watchdog_user_enabled  | NMI_WATCHDOG_ENABLED
655 * -------------------|----------------------------|--------------------------
656 * proc_soft_watchdog | soft_watchdog_user_enabled | SOFT_WATCHDOG_ENABLED
657 */
658static int proc_watchdog_common(int which, struct ctl_table *table, int write,
659				void *buffer, size_t *lenp, loff_t *ppos)
660{
661	int err, old, *param = table->data;
662
663	mutex_lock(&watchdog_mutex);
664
665	if (!write) {
666		/*
667		 * On read synchronize the userspace interface. This is a
668		 * racy snapshot.
669		 */
670		*param = (watchdog_enabled & which) != 0;
671		err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
672	} else {
673		old = READ_ONCE(*param);
674		err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
675		if (!err && old != READ_ONCE(*param))
676			proc_watchdog_update();
677	}
678	mutex_unlock(&watchdog_mutex);
679	return err;
680}
681
682/*
683 * /proc/sys/kernel/watchdog
684 */
685int proc_watchdog(struct ctl_table *table, int write,
686		  void *buffer, size_t *lenp, loff_t *ppos)
687{
688	return proc_watchdog_common(NMI_WATCHDOG_ENABLED|SOFT_WATCHDOG_ENABLED,
689				    table, write, buffer, lenp, ppos);
690}
691
692/*
693 * /proc/sys/kernel/nmi_watchdog
694 */
695int proc_nmi_watchdog(struct ctl_table *table, int write,
696		      void *buffer, size_t *lenp, loff_t *ppos)
697{
698	if (!nmi_watchdog_available && write)
699		return -ENOTSUPP;
700	return proc_watchdog_common(NMI_WATCHDOG_ENABLED,
701				    table, write, buffer, lenp, ppos);
702}
703
704/*
705 * /proc/sys/kernel/soft_watchdog
706 */
707int proc_soft_watchdog(struct ctl_table *table, int write,
708			void *buffer, size_t *lenp, loff_t *ppos)
709{
710	return proc_watchdog_common(SOFT_WATCHDOG_ENABLED,
711				    table, write, buffer, lenp, ppos);
712}
713
714/*
715 * /proc/sys/kernel/watchdog_thresh
716 */
717int proc_watchdog_thresh(struct ctl_table *table, int write,
718			 void *buffer, size_t *lenp, loff_t *ppos)
719{
720	int err, old;
721
722	mutex_lock(&watchdog_mutex);
 
 
723
724	old = READ_ONCE(watchdog_thresh);
725	err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
726
727	if (!err && write && old != READ_ONCE(watchdog_thresh))
728		proc_watchdog_update();
729
730	mutex_unlock(&watchdog_mutex);
731	return err;
732}
 
 
733
734/*
735 * The cpumask is the mask of possible cpus that the watchdog can run
736 * on, not the mask of cpus it is actually running on.  This allows the
737 * user to specify a mask that will include cpus that have not yet
738 * been brought online, if desired.
739 */
740int proc_watchdog_cpumask(struct ctl_table *table, int write,
741			  void *buffer, size_t *lenp, loff_t *ppos)
742{
743	int err;
744
745	mutex_lock(&watchdog_mutex);
746
747	err = proc_do_large_bitmap(table, write, buffer, lenp, ppos);
748	if (!err && write)
749		proc_watchdog_update();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
750
751	mutex_unlock(&watchdog_mutex);
752	return err;
 
 
 
 
753}
754
755static const int sixty = 60;
756
757static struct ctl_table watchdog_sysctls[] = {
758	{
759		.procname       = "watchdog",
760		.data		= &watchdog_user_enabled,
761		.maxlen		= sizeof(int),
762		.mode		= 0644,
763		.proc_handler   = proc_watchdog,
764		.extra1		= SYSCTL_ZERO,
765		.extra2		= SYSCTL_ONE,
766	},
767	{
768		.procname	= "watchdog_thresh",
769		.data		= &watchdog_thresh,
770		.maxlen		= sizeof(int),
771		.mode		= 0644,
772		.proc_handler	= proc_watchdog_thresh,
773		.extra1		= SYSCTL_ZERO,
774		.extra2		= (void *)&sixty,
775	},
776	{
777		.procname       = "nmi_watchdog",
778		.data		= &nmi_watchdog_user_enabled,
779		.maxlen		= sizeof(int),
780		.mode		= NMI_WATCHDOG_SYSCTL_PERM,
781		.proc_handler   = proc_nmi_watchdog,
782		.extra1		= SYSCTL_ZERO,
783		.extra2		= SYSCTL_ONE,
784	},
785	{
786		.procname	= "watchdog_cpumask",
787		.data		= &watchdog_cpumask_bits,
788		.maxlen		= NR_CPUS,
789		.mode		= 0644,
790		.proc_handler	= proc_watchdog_cpumask,
791	},
792#ifdef CONFIG_SOFTLOCKUP_DETECTOR
793	{
794		.procname       = "soft_watchdog",
795		.data		= &soft_watchdog_user_enabled,
796		.maxlen		= sizeof(int),
797		.mode		= 0644,
798		.proc_handler   = proc_soft_watchdog,
799		.extra1		= SYSCTL_ZERO,
800		.extra2		= SYSCTL_ONE,
801	},
802	{
803		.procname	= "softlockup_panic",
804		.data		= &softlockup_panic,
805		.maxlen		= sizeof(int),
806		.mode		= 0644,
807		.proc_handler	= proc_dointvec_minmax,
808		.extra1		= SYSCTL_ZERO,
809		.extra2		= SYSCTL_ONE,
810	},
811#ifdef CONFIG_SMP
812	{
813		.procname	= "softlockup_all_cpu_backtrace",
814		.data		= &sysctl_softlockup_all_cpu_backtrace,
815		.maxlen		= sizeof(int),
816		.mode		= 0644,
817		.proc_handler	= proc_dointvec_minmax,
818		.extra1		= SYSCTL_ZERO,
819		.extra2		= SYSCTL_ONE,
820	},
821#endif /* CONFIG_SMP */
822#endif
823#ifdef CONFIG_HARDLOCKUP_DETECTOR
824	{
825		.procname	= "hardlockup_panic",
826		.data		= &hardlockup_panic,
827		.maxlen		= sizeof(int),
828		.mode		= 0644,
829		.proc_handler	= proc_dointvec_minmax,
830		.extra1		= SYSCTL_ZERO,
831		.extra2		= SYSCTL_ONE,
832	},
833#ifdef CONFIG_SMP
834	{
835		.procname	= "hardlockup_all_cpu_backtrace",
836		.data		= &sysctl_hardlockup_all_cpu_backtrace,
837		.maxlen		= sizeof(int),
838		.mode		= 0644,
839		.proc_handler	= proc_dointvec_minmax,
840		.extra1		= SYSCTL_ZERO,
841		.extra2		= SYSCTL_ONE,
842	},
843#endif /* CONFIG_SMP */
844#endif
845	{}
846};
847
848static void __init watchdog_sysctl_init(void)
849{
850	register_sysctl_init("kernel", watchdog_sysctls);
851}
852#else
853#define watchdog_sysctl_init() do { } while (0)
854#endif /* CONFIG_SYSCTL */
855
856void __init lockup_detector_init(void)
857{
858	if (tick_nohz_full_enabled())
859		pr_info("Disabling watchdog on nohz_full cores by default\n");
 
 
 
860
861	cpumask_copy(&watchdog_cpumask,
862		     housekeeping_cpumask(HK_TYPE_TIMER));
863
864	if (!watchdog_nmi_probe())
865		nmi_watchdog_available = true;
866	lockup_detector_setup();
867	watchdog_sysctl_init();
868}
v3.5.6
 
  1/*
  2 * Detect hard and soft lockups on a system
  3 *
  4 * started by Don Zickus, Copyright (C) 2010 Red Hat, Inc.
  5 *
  6 * Note: Most of this code is borrowed heavily from the original softlockup
  7 * detector, so thanks to Ingo for the initial implementation.
  8 * Some chunks also taken from the old x86-specific nmi watchdog code, thanks
  9 * to those contributors as well.
 10 */
 11
 12#define pr_fmt(fmt) "NMI watchdog: " fmt
 13
 14#include <linux/mm.h>
 15#include <linux/cpu.h>
 16#include <linux/nmi.h>
 17#include <linux/init.h>
 18#include <linux/delay.h>
 19#include <linux/freezer.h>
 20#include <linux/kthread.h>
 21#include <linux/lockdep.h>
 22#include <linux/notifier.h>
 23#include <linux/module.h>
 24#include <linux/sysctl.h>
 
 
 
 
 
 25
 26#include <asm/irq_regs.h>
 27#include <linux/kvm_para.h>
 28#include <linux/perf_event.h>
 29
 30int watchdog_enabled = 1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 31int __read_mostly watchdog_thresh = 10;
 
 
 
 
 32
 33static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts);
 34static DEFINE_PER_CPU(struct task_struct *, softlockup_watchdog);
 35static DEFINE_PER_CPU(struct hrtimer, watchdog_hrtimer);
 36static DEFINE_PER_CPU(bool, softlockup_touch_sync);
 37static DEFINE_PER_CPU(bool, soft_watchdog_warn);
 38#ifdef CONFIG_HARDLOCKUP_DETECTOR
 39static DEFINE_PER_CPU(bool, hard_watchdog_warn);
 40static DEFINE_PER_CPU(bool, watchdog_nmi_touch);
 41static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts);
 42static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved);
 43static DEFINE_PER_CPU(struct perf_event *, watchdog_ev);
 44#endif
 45
 46/* boot commands */
 
 
 
 47/*
 48 * Should we panic when a soft-lockup or hard-lockup occurs:
 49 */
 50#ifdef CONFIG_HARDLOCKUP_DETECTOR
 51static int hardlockup_panic =
 52			CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE;
 
 
 
 
 
 
 
 
 
 
 
 53
 54static int __init hardlockup_panic_setup(char *str)
 55{
 56	if (!strncmp(str, "panic", 5))
 57		hardlockup_panic = 1;
 58	else if (!strncmp(str, "nopanic", 7))
 59		hardlockup_panic = 0;
 60	else if (!strncmp(str, "0", 1))
 61		watchdog_enabled = 0;
 
 
 62	return 1;
 63}
 64__setup("nmi_watchdog=", hardlockup_panic_setup);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 65#endif
 66
 
 
 
 67unsigned int __read_mostly softlockup_panic =
 68			CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE;
 69
 70static int __init softlockup_panic_setup(char *str)
 71{
 72	softlockup_panic = simple_strtoul(str, NULL, 0);
 73
 74	return 1;
 75}
 76__setup("softlockup_panic=", softlockup_panic_setup);
 
 
 
 
 
 
 77
 78static int __init nowatchdog_setup(char *str)
 79{
 80	watchdog_enabled = 0;
 81	return 1;
 82}
 83__setup("nowatchdog", nowatchdog_setup);
 84
 85/* deprecated */
 86static int __init nosoftlockup_setup(char *str)
 87{
 88	watchdog_enabled = 0;
 89	return 1;
 90}
 91__setup("nosoftlockup", nosoftlockup_setup);
 92/*  */
 
 
 
 
 
 
 
 
 93
 94/*
 95 * Hard-lockup warnings should be triggered after just a few seconds. Soft-
 96 * lockups can have false positives under extreme conditions. So we generally
 97 * want a higher threshold for soft lockups than for hard lockups. So we couple
 98 * the thresholds with a factor: we make the soft threshold twice the amount of
 99 * time the hard threshold is.
100 */
101static int get_softlockup_thresh(void)
102{
103	return watchdog_thresh * 2;
104}
105
106/*
107 * Returns seconds, approximately.  We don't need nanosecond
108 * resolution, and we don't need to waste time with a big divide when
109 * 2^30ns == 1.074s.
110 */
111static unsigned long get_timestamp(int this_cpu)
112{
113	return cpu_clock(this_cpu) >> 30LL;  /* 2^30 ~= 10^9 */
114}
115
116static unsigned long get_sample_period(void)
117{
118	/*
119	 * convert watchdog_thresh from seconds to ns
120	 * the divide by 5 is to give hrtimer several chances (two
121	 * or three with the current relation between the soft
122	 * and hard thresholds) to increment before the
123	 * hardlockup detector generates a warning
124	 */
125	return get_softlockup_thresh() * (NSEC_PER_SEC / 5);
 
 
 
 
 
 
126}
127
128/* Commands for resetting the watchdog */
129static void __touch_watchdog(void)
130{
131	int this_cpu = smp_processor_id();
 
 
132
133	__this_cpu_write(watchdog_touch_ts, get_timestamp(this_cpu));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
134}
135
136void touch_softlockup_watchdog(void)
137{
138	__this_cpu_write(watchdog_touch_ts, 0);
 
139}
140EXPORT_SYMBOL(touch_softlockup_watchdog);
141
142void touch_all_softlockup_watchdogs(void)
143{
144	int cpu;
145
146	/*
147	 * this is done lockless
148	 * do we care if a 0 races with a timestamp?
149	 * all it means is the softlock check starts one cycle later
 
 
 
 
150	 */
151	for_each_online_cpu(cpu)
152		per_cpu(watchdog_touch_ts, cpu) = 0;
 
 
153}
154
155#ifdef CONFIG_HARDLOCKUP_DETECTOR
156void touch_nmi_watchdog(void)
157{
158	if (watchdog_enabled) {
159		unsigned cpu;
 
160
161		for_each_present_cpu(cpu) {
162			if (per_cpu(watchdog_nmi_touch, cpu) != true)
163				per_cpu(watchdog_nmi_touch, cpu) = true;
164		}
 
 
 
 
165	}
166	touch_softlockup_watchdog();
167}
168EXPORT_SYMBOL(touch_nmi_watchdog);
169
170#endif
171
172void touch_softlockup_watchdog_sync(void)
173{
174	__raw_get_cpu_var(softlockup_touch_sync) = true;
175	__raw_get_cpu_var(watchdog_touch_ts) = 0;
176}
177
178#ifdef CONFIG_HARDLOCKUP_DETECTOR
179/* watchdog detector functions */
180static int is_hardlockup(void)
181{
182	unsigned long hrint = __this_cpu_read(hrtimer_interrupts);
183
184	if (__this_cpu_read(hrtimer_interrupts_saved) == hrint)
185		return 1;
186
187	__this_cpu_write(hrtimer_interrupts_saved, hrint);
188	return 0;
189}
190#endif
191
192static int is_softlockup(unsigned long touch_ts)
193{
194	unsigned long now = get_timestamp(smp_processor_id());
195
196	/* Warn about unreasonable delays: */
197	if (time_after(now, touch_ts + get_softlockup_thresh()))
198		return now - touch_ts;
199
200	return 0;
201}
202
203#ifdef CONFIG_HARDLOCKUP_DETECTOR
 
204
205static struct perf_event_attr wd_hw_attr = {
206	.type		= PERF_TYPE_HARDWARE,
207	.config		= PERF_COUNT_HW_CPU_CYCLES,
208	.size		= sizeof(struct perf_event_attr),
209	.pinned		= 1,
210	.disabled	= 1,
211};
212
213/* Callback function for perf event subsystem */
214static void watchdog_overflow_callback(struct perf_event *event,
215		 struct perf_sample_data *data,
216		 struct pt_regs *regs)
217{
218	/* Ensure the watchdog never gets throttled */
219	event->hw.interrupts = 0;
220
221	if (__this_cpu_read(watchdog_nmi_touch) == true) {
222		__this_cpu_write(watchdog_nmi_touch, false);
223		return;
224	}
225
226	/* check for a hardlockup
227	 * This is done by making sure our timer interrupt
228	 * is incrementing.  The timer interrupt should have
229	 * fired multiple times before we overflow'd.  If it hasn't
230	 * then this is a good indication the cpu is stuck
231	 */
232	if (is_hardlockup()) {
233		int this_cpu = smp_processor_id();
234
235		/* only print hardlockups once */
236		if (__this_cpu_read(hard_watchdog_warn) == true)
237			return;
238
239		if (hardlockup_panic)
240			panic("Watchdog detected hard LOCKUP on cpu %d", this_cpu);
241		else
242			WARN(1, "Watchdog detected hard LOCKUP on cpu %d", this_cpu);
243
244		__this_cpu_write(hard_watchdog_warn, true);
245		return;
246	}
247
248	__this_cpu_write(hard_watchdog_warn, false);
249	return;
250}
251static void watchdog_interrupt_count(void)
252{
253	__this_cpu_inc(hrtimer_interrupts);
254}
255#else
256static inline void watchdog_interrupt_count(void) { return; }
257#endif /* CONFIG_HARDLOCKUP_DETECTOR */
258
259/* watchdog kicker functions */
260static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
261{
262	unsigned long touch_ts = __this_cpu_read(watchdog_touch_ts);
263	struct pt_regs *regs = get_irq_regs();
264	int duration;
 
 
 
 
265
266	/* kick the hardlockup detector */
267	watchdog_interrupt_count();
268
269	/* kick the softlockup detector */
270	wake_up_process(__this_cpu_read(softlockup_watchdog));
 
 
 
 
 
271
272	/* .. and repeat */
273	hrtimer_forward_now(hrtimer, ns_to_ktime(get_sample_period()));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
274
275	if (touch_ts == 0) {
 
276		if (unlikely(__this_cpu_read(softlockup_touch_sync))) {
277			/*
278			 * If the time stamp was touched atomically
279			 * make sure the scheduler tick is up to date.
280			 */
281			__this_cpu_write(softlockup_touch_sync, false);
282			sched_clock_tick();
283		}
284
285		/* Clear the guest paused flag on watchdog reset */
286		kvm_check_and_clear_guest_paused();
287		__touch_watchdog();
288		return HRTIMER_RESTART;
289	}
290
291	/* check for a softlockup
292	 * This is done by making sure a high priority task is
293	 * being scheduled.  The task touches the watchdog to
294	 * indicate it is getting cpu time.  If it hasn't then
295	 * this is a good indication some task is hogging the cpu
296	 */
297	duration = is_softlockup(touch_ts);
298	if (unlikely(duration)) {
299		/*
300		 * If a virtual machine is stopped by the host it can look to
301		 * the watchdog like a soft lockup, check to see if the host
302		 * stopped the vm before we issue the warning
303		 */
304		if (kvm_check_and_clear_guest_paused())
305			return HRTIMER_RESTART;
 
 
306
307		/* only warn once */
308		if (__this_cpu_read(soft_watchdog_warn) == true)
309			return HRTIMER_RESTART;
310
311		printk(KERN_EMERG "BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n",
312			smp_processor_id(), duration,
313			current->comm, task_pid_nr(current));
314		print_modules();
315		print_irqtrace_events(current);
316		if (regs)
317			show_regs(regs);
318		else
319			dump_stack();
320
 
 
 
 
 
 
321		if (softlockup_panic)
322			panic("softlockup: hung tasks");
323		__this_cpu_write(soft_watchdog_warn, true);
324	} else
325		__this_cpu_write(soft_watchdog_warn, false);
326
327	return HRTIMER_RESTART;
328}
329
330
331/*
332 * The watchdog thread - touches the timestamp.
333 */
334static int watchdog(void *unused)
335{
336	struct sched_param param = { .sched_priority = 0 };
337	struct hrtimer *hrtimer = &__raw_get_cpu_var(watchdog_hrtimer);
338
339	/* initialize timestamp */
340	__touch_watchdog();
341
342	/* kick off the timer for the hardlockup detector */
343	/* done here because hrtimer_start can only pin to smp_processor_id() */
344	hrtimer_start(hrtimer, ns_to_ktime(get_sample_period()),
345		      HRTIMER_MODE_REL_PINNED);
346
347	set_current_state(TASK_INTERRUPTIBLE);
348	/*
349	 * Run briefly (kicked by the hrtimer callback function) once every
350	 * get_sample_period() seconds (4 seconds by default) to reset the
351	 * softlockup timestamp. If this gets delayed for more than
352	 * 2*watchdog_thresh seconds then the debug-printout triggers in
353	 * watchdog_timer_fn().
354	 */
355	while (!kthread_should_stop()) {
356		__touch_watchdog();
357		schedule();
 
 
 
 
 
 
 
 
 
 
 
 
358
359		if (kthread_should_stop())
360			break;
361
362		set_current_state(TASK_INTERRUPTIBLE);
363	}
364	/*
365	 * Drop the policy/priority elevation during thread exit to avoid a
366	 * scheduling latency spike.
 
367	 */
368	__set_current_state(TASK_RUNNING);
369	sched_setscheduler(current, SCHED_NORMAL, &param);
 
 
 
 
 
 
370	return 0;
371}
372
373
374#ifdef CONFIG_HARDLOCKUP_DETECTOR
375/*
376 * People like the simple clean cpu node info on boot.
377 * Reduce the watchdog noise by only printing messages
378 * that are different from what cpu0 displayed.
379 */
380static unsigned long cpu0_err;
381
382static int watchdog_nmi_enable(int cpu)
383{
384	struct perf_event_attr *wd_attr;
385	struct perf_event *event = per_cpu(watchdog_ev, cpu);
386
387	/* is it already setup and enabled? */
388	if (event && event->state > PERF_EVENT_STATE_OFF)
389		goto out;
390
391	/* it is setup but not enabled */
392	if (event != NULL)
393		goto out_enable;
394
395	wd_attr = &wd_hw_attr;
396	wd_attr->sample_period = hw_nmi_get_sample_period(watchdog_thresh);
397
398	/* Try to register using hardware perf events */
399	event = perf_event_create_kernel_counter(wd_attr, cpu, NULL, watchdog_overflow_callback, NULL);
400
401	/* save cpu0 error for future comparision */
402	if (cpu == 0 && IS_ERR(event))
403		cpu0_err = PTR_ERR(event);
404
405	if (!IS_ERR(event)) {
406		/* only print for cpu0 or different than cpu0 */
407		if (cpu == 0 || cpu0_err)
408			pr_info("enabled on all CPUs, permanently consumes one hw-PMU counter.\n");
409		goto out_save;
410	}
411
412	/* skip displaying the same error again */
413	if (cpu > 0 && (PTR_ERR(event) == cpu0_err))
414		return PTR_ERR(event);
415
416	/* vary the KERN level based on the returned errno */
417	if (PTR_ERR(event) == -EOPNOTSUPP)
418		pr_info("disabled (cpu%i): not supported (no LAPIC?)\n", cpu);
419	else if (PTR_ERR(event) == -ENOENT)
420		pr_warning("disabled (cpu%i): hardware events not enabled\n",
421			 cpu);
422	else
423		pr_err("disabled (cpu%i): unable to create perf event: %ld\n",
424			cpu, PTR_ERR(event));
425	return PTR_ERR(event);
426
427	/* success path */
428out_save:
429	per_cpu(watchdog_ev, cpu) = event;
430out_enable:
431	perf_event_enable(per_cpu(watchdog_ev, cpu));
432out:
433	return 0;
434}
435
436static void watchdog_nmi_disable(int cpu)
437{
438	struct perf_event *event = per_cpu(watchdog_ev, cpu);
439
440	if (event) {
441		perf_event_disable(event);
442		per_cpu(watchdog_ev, cpu) = NULL;
 
443
444		/* should be in cleanup, but blocks oprofile */
445		perf_event_release_kernel(event);
446	}
447	return;
 
448}
449#else
450static int watchdog_nmi_enable(int cpu) { return 0; }
451static void watchdog_nmi_disable(int cpu) { return; }
452#endif /* CONFIG_HARDLOCKUP_DETECTOR */
453
454/* prepare/enable/disable routines */
455static void watchdog_prepare_cpu(int cpu)
456{
457	struct hrtimer *hrtimer = &per_cpu(watchdog_hrtimer, cpu);
458
459	WARN_ON(per_cpu(softlockup_watchdog, cpu));
460	hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
461	hrtimer->function = watchdog_timer_fn;
462}
463
464static int watchdog_enable(int cpu)
465{
466	struct task_struct *p = per_cpu(softlockup_watchdog, cpu);
467	int err = 0;
468
469	/* enable the perf event */
470	err = watchdog_nmi_enable(cpu);
 
 
 
471
472	/* Regardless of err above, fall through and start softlockup */
473
474	/* create the watchdog thread */
475	if (!p) {
476		struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
477		p = kthread_create_on_node(watchdog, NULL, cpu_to_node(cpu), "watchdog/%d", cpu);
478		if (IS_ERR(p)) {
479			pr_err("softlockup watchdog for %i failed\n", cpu);
480			if (!err) {
481				/* if hardlockup hasn't already set this */
482				err = PTR_ERR(p);
483				/* and disable the perf event */
484				watchdog_nmi_disable(cpu);
485			}
486			goto out;
487		}
488		sched_setscheduler(p, SCHED_FIFO, &param);
489		kthread_bind(p, cpu);
490		per_cpu(watchdog_touch_ts, cpu) = 0;
491		per_cpu(softlockup_watchdog, cpu) = p;
492		wake_up_process(p);
493	}
494
495out:
496	return err;
 
 
 
497}
498
499static void watchdog_disable(int cpu)
 
 
 
500{
501	struct task_struct *p = per_cpu(softlockup_watchdog, cpu);
502	struct hrtimer *hrtimer = &per_cpu(watchdog_hrtimer, cpu);
503
504	/*
505	 * cancel the timer first to stop incrementing the stats
506	 * and waking up the kthread
507	 */
508	hrtimer_cancel(hrtimer);
 
 
 
 
 
 
 
 
 
 
509
510	/* disable the perf event */
511	watchdog_nmi_disable(cpu);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
512
513	/* stop the watchdog thread */
514	if (p) {
515		per_cpu(softlockup_watchdog, cpu) = NULL;
516		kthread_stop(p);
517	}
518}
519
520/* sysctl functions */
521#ifdef CONFIG_SYSCTL
522static void watchdog_enable_all_cpus(void)
 
 
 
523{
524	int cpu;
 
 
 
525
 
 
 
 
 
 
 
 
526	watchdog_enabled = 0;
 
527
528	for_each_online_cpu(cpu)
529		if (!watchdog_enable(cpu))
530			/* if any cpu succeeds, watchdog is considered
531			   enabled for the system */
532			watchdog_enabled = 1;
533
534	if (!watchdog_enabled)
535		pr_err("failed to be enabled on some cpus\n");
536
 
 
 
 
 
 
537}
538
539static void watchdog_disable_all_cpus(void)
 
 
 
 
 
 
 
 
 
 
 
 
 
540{
541	int cpu;
 
 
542
543	for_each_online_cpu(cpu)
544		watchdog_disable(cpu);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
545
546	/* if all watchdogs are disabled, then they are disabled for the system */
547	watchdog_enabled = 0;
 
 
 
 
 
 
548}
549
 
 
 
 
 
 
 
 
 
 
 
550
551/*
552 * proc handler for /proc/sys/kernel/nmi_watchdog,watchdog_thresh
553 */
 
 
 
 
 
 
554
555int proc_dowatchdog(struct ctl_table *table, int write,
556		    void __user *buffer, size_t *lenp, loff_t *ppos)
 
 
 
557{
558	int ret;
559
560	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
561	if (ret || !write)
562		goto out;
563
564	if (watchdog_enabled && watchdog_thresh)
565		watchdog_enable_all_cpus();
566	else
567		watchdog_disable_all_cpus();
 
568
569out:
570	return ret;
571}
572#endif /* CONFIG_SYSCTL */
573
574
575/*
576 * Create/destroy watchdog threads as CPUs come and go:
 
 
 
577 */
578static int __cpuinit
579cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
580{
581	int hotcpu = (unsigned long)hcpu;
582
583	switch (action) {
584	case CPU_UP_PREPARE:
585	case CPU_UP_PREPARE_FROZEN:
586		watchdog_prepare_cpu(hotcpu);
587		break;
588	case CPU_ONLINE:
589	case CPU_ONLINE_FROZEN:
590		if (watchdog_enabled)
591			watchdog_enable(hotcpu);
592		break;
593#ifdef CONFIG_HOTPLUG_CPU
594	case CPU_UP_CANCELED:
595	case CPU_UP_CANCELED_FROZEN:
596		watchdog_disable(hotcpu);
597		break;
598	case CPU_DEAD:
599	case CPU_DEAD_FROZEN:
600		watchdog_disable(hotcpu);
601		break;
602#endif /* CONFIG_HOTPLUG_CPU */
603	}
604
605	/*
606	 * hardlockup and softlockup are not important enough
607	 * to block cpu bring up.  Just always succeed and
608	 * rely on printk output to flag problems.
609	 */
610	return NOTIFY_OK;
611}
612
613static struct notifier_block __cpuinitdata cpu_nfb = {
614	.notifier_call = cpu_callback
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
615};
616
 
 
 
 
 
 
 
 
617void __init lockup_detector_init(void)
618{
619	void *cpu = (void *)(long)smp_processor_id();
620	int err;
621
622	err = cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu);
623	WARN_ON(notifier_to_errno(err));
624
625	cpu_callback(&cpu_nfb, CPU_ONLINE, cpu);
626	register_cpu_notifier(&cpu_nfb);
627
628	return;
 
 
 
629}