Linux Audio

Check our new training course

Loading...
v4.10.11
  1/*
  2 * Detect hard and soft lockups on a system
  3 *
  4 * started by Don Zickus, Copyright (C) 2010 Red Hat, Inc.
  5 *
  6 * Note: Most of this code is borrowed heavily from the original softlockup
  7 * detector, so thanks to Ingo for the initial implementation.
  8 * Some chunks also taken from the old x86-specific nmi watchdog code, thanks
  9 * to those contributors as well.
 10 */
 11
 12#define pr_fmt(fmt) "NMI watchdog: " fmt
 13
 14#include <linux/mm.h>
 15#include <linux/cpu.h>
 16#include <linux/nmi.h>
 17#include <linux/init.h>
 
 
 
 
 
 18#include <linux/module.h>
 19#include <linux/sysctl.h>
 20#include <linux/smpboot.h>
 21#include <linux/sched/rt.h>
 22#include <linux/tick.h>
 23#include <linux/workqueue.h>
 24
 25#include <asm/irq_regs.h>
 26#include <linux/kvm_para.h>
 27#include <linux/kthread.h>
 28
 29static DEFINE_MUTEX(watchdog_proc_mutex);
 30
 31#if defined(CONFIG_HAVE_NMI_WATCHDOG) || defined(CONFIG_HARDLOCKUP_DETECTOR)
 32unsigned long __read_mostly watchdog_enabled = SOFT_WATCHDOG_ENABLED|NMI_WATCHDOG_ENABLED;
 33#else
 34unsigned long __read_mostly watchdog_enabled = SOFT_WATCHDOG_ENABLED;
 35#endif
 36int __read_mostly nmi_watchdog_enabled;
 37int __read_mostly soft_watchdog_enabled;
 38int __read_mostly watchdog_user_enabled;
 39int __read_mostly watchdog_thresh = 10;
 40
 41#ifdef CONFIG_SMP
 42int __read_mostly sysctl_softlockup_all_cpu_backtrace;
 43int __read_mostly sysctl_hardlockup_all_cpu_backtrace;
 44#endif
 45static struct cpumask watchdog_cpumask __read_mostly;
 46unsigned long *watchdog_cpumask_bits = cpumask_bits(&watchdog_cpumask);
 47
 48/* Helper for online, unparked cpus. */
 49#define for_each_watchdog_cpu(cpu) \
 50	for_each_cpu_and((cpu), cpu_online_mask, &watchdog_cpumask)
 51
 52atomic_t watchdog_park_in_progress = ATOMIC_INIT(0);
 53
 54/*
 55 * The 'watchdog_running' variable is set to 1 when the watchdog threads
 56 * are registered/started and is set to 0 when the watchdog threads are
 57 * unregistered/stopped, so it is an indicator whether the threads exist.
 58 */
 59static int __read_mostly watchdog_running;
 60/*
 61 * If a subsystem has a need to deactivate the watchdog temporarily, it
 62 * can use the suspend/resume interface to achieve this. The content of
 63 * the 'watchdog_suspended' variable reflects this state. Existing threads
 64 * are parked/unparked by the lockup_detector_{suspend|resume} functions
 65 * (see comment blocks pertaining to those functions for further details).
 66 *
 67 * 'watchdog_suspended' also prevents threads from being registered/started
 68 * or unregistered/stopped via parameters in /proc/sys/kernel, so the state
 69 * of 'watchdog_running' cannot change while the watchdog is deactivated
 70 * temporarily (see related code in 'proc' handlers).
 71 */
 72static int __read_mostly watchdog_suspended;
 73
 74static u64 __read_mostly sample_period;
 75
 76static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts);
 77static DEFINE_PER_CPU(struct task_struct *, softlockup_watchdog);
 78static DEFINE_PER_CPU(struct hrtimer, watchdog_hrtimer);
 79static DEFINE_PER_CPU(bool, softlockup_touch_sync);
 80static DEFINE_PER_CPU(bool, soft_watchdog_warn);
 
 
 
 81static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts);
 82static DEFINE_PER_CPU(unsigned long, soft_lockup_hrtimer_cnt);
 83static DEFINE_PER_CPU(struct task_struct *, softlockup_task_ptr_saved);
 84static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved);
 85static unsigned long soft_lockup_nmi_warn;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 86
 87unsigned int __read_mostly softlockup_panic =
 88			CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE;
 89
 90static int __init softlockup_panic_setup(char *str)
 91{
 92	softlockup_panic = simple_strtoul(str, NULL, 0);
 93
 94	return 1;
 95}
 96__setup("softlockup_panic=", softlockup_panic_setup);
 97
 98static int __init nowatchdog_setup(char *str)
 99{
100	watchdog_enabled = 0;
101	return 1;
102}
103__setup("nowatchdog", nowatchdog_setup);
104
 
105static int __init nosoftlockup_setup(char *str)
106{
107	watchdog_enabled &= ~SOFT_WATCHDOG_ENABLED;
108	return 1;
109}
110__setup("nosoftlockup", nosoftlockup_setup);
111
112#ifdef CONFIG_SMP
113static int __init softlockup_all_cpu_backtrace_setup(char *str)
114{
115	sysctl_softlockup_all_cpu_backtrace =
116		!!simple_strtol(str, NULL, 0);
117	return 1;
118}
119__setup("softlockup_all_cpu_backtrace=", softlockup_all_cpu_backtrace_setup);
120static int __init hardlockup_all_cpu_backtrace_setup(char *str)
121{
122	sysctl_hardlockup_all_cpu_backtrace =
123		!!simple_strtol(str, NULL, 0);
124	return 1;
125}
126__setup("hardlockup_all_cpu_backtrace=", hardlockup_all_cpu_backtrace_setup);
127#endif
128
129/*
130 * Hard-lockup warnings should be triggered after just a few seconds. Soft-
131 * lockups can have false positives under extreme conditions. So we generally
132 * want a higher threshold for soft lockups than for hard lockups. So we couple
133 * the thresholds with a factor: we make the soft threshold twice the amount of
134 * time the hard threshold is.
135 */
136static int get_softlockup_thresh(void)
137{
138	return watchdog_thresh * 2;
139}
140
141/*
142 * Returns seconds, approximately.  We don't need nanosecond
143 * resolution, and we don't need to waste time with a big divide when
144 * 2^30ns == 1.074s.
145 */
146static unsigned long get_timestamp(void)
147{
148	return running_clock() >> 30LL;  /* 2^30 ~= 10^9 */
149}
150
151static void set_sample_period(void)
152{
153	/*
154	 * convert watchdog_thresh from seconds to ns
155	 * the divide by 5 is to give hrtimer several chances (two
156	 * or three with the current relation between the soft
157	 * and hard thresholds) to increment before the
158	 * hardlockup detector generates a warning
159	 */
160	sample_period = get_softlockup_thresh() * ((u64)NSEC_PER_SEC / 5);
161}
162
163/* Commands for resetting the watchdog */
164static void __touch_watchdog(void)
165{
166	__this_cpu_write(watchdog_touch_ts, get_timestamp());
167}
168
169/**
170 * touch_softlockup_watchdog_sched - touch watchdog on scheduler stalls
171 *
172 * Call when the scheduler may have stalled for legitimate reasons
173 * preventing the watchdog task from executing - e.g. the scheduler
174 * entering idle state.  This should only be used for scheduler events.
175 * Use touch_softlockup_watchdog() for everything else.
176 */
177void touch_softlockup_watchdog_sched(void)
178{
179	/*
180	 * Preemption can be enabled.  It doesn't matter which CPU's timestamp
181	 * gets zeroed here, so use the raw_ operation.
182	 */
183	raw_cpu_write(watchdog_touch_ts, 0);
184}
185
186void touch_softlockup_watchdog(void)
187{
188	touch_softlockup_watchdog_sched();
189	wq_watchdog_touch(raw_smp_processor_id());
190}
191EXPORT_SYMBOL(touch_softlockup_watchdog);
192
193void touch_all_softlockup_watchdogs(void)
194{
195	int cpu;
196
197	/*
198	 * this is done lockless
199	 * do we care if a 0 races with a timestamp?
200	 * all it means is the softlock check starts one cycle later
201	 */
202	for_each_watchdog_cpu(cpu)
203		per_cpu(watchdog_touch_ts, cpu) = 0;
204	wq_watchdog_touch(-1);
205}
206
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
207void touch_softlockup_watchdog_sync(void)
208{
209	__this_cpu_write(softlockup_touch_sync, true);
210	__this_cpu_write(watchdog_touch_ts, 0);
211}
212
 
213/* watchdog detector functions */
214bool is_hardlockup(void)
215{
216	unsigned long hrint = __this_cpu_read(hrtimer_interrupts);
217
218	if (__this_cpu_read(hrtimer_interrupts_saved) == hrint)
219		return true;
220
221	__this_cpu_write(hrtimer_interrupts_saved, hrint);
222	return false;
223}
 
224
225static int is_softlockup(unsigned long touch_ts)
226{
227	unsigned long now = get_timestamp();
 
 
 
 
228
229	if ((watchdog_enabled & SOFT_WATCHDOG_ENABLED) && watchdog_thresh){
230		/* Warn about unreasonable delays. */
231		if (time_after(now, touch_ts + get_softlockup_thresh()))
232			return now - touch_ts;
233	}
234	return 0;
235}
236
237static void watchdog_interrupt_count(void)
238{
239	__this_cpu_inc(hrtimer_interrupts);
240}
241
242/*
243 * These two functions are mostly architecture specific
244 * defining them as weak here.
245 */
246int __weak watchdog_nmi_enable(unsigned int cpu)
247{
248	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
249}
250void __weak watchdog_nmi_disable(unsigned int cpu)
251{
 
252}
253
254static int watchdog_enable_all_cpus(void);
255static void watchdog_disable_all_cpus(void);
256
257/* watchdog kicker functions */
258static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
259{
260	unsigned long touch_ts = __this_cpu_read(watchdog_touch_ts);
261	struct pt_regs *regs = get_irq_regs();
262	int duration;
263	int softlockup_all_cpu_backtrace = sysctl_softlockup_all_cpu_backtrace;
264
265	if (atomic_read(&watchdog_park_in_progress) != 0)
266		return HRTIMER_NORESTART;
267
268	/* kick the hardlockup detector */
269	watchdog_interrupt_count();
270
271	/* kick the softlockup detector */
272	wake_up_process(__this_cpu_read(softlockup_watchdog));
273
274	/* .. and repeat */
275	hrtimer_forward_now(hrtimer, ns_to_ktime(sample_period));
276
277	if (touch_ts == 0) {
278		if (unlikely(__this_cpu_read(softlockup_touch_sync))) {
279			/*
280			 * If the time stamp was touched atomically
281			 * make sure the scheduler tick is up to date.
282			 */
283			__this_cpu_write(softlockup_touch_sync, false);
284			sched_clock_tick();
285		}
286
287		/* Clear the guest paused flag on watchdog reset */
288		kvm_check_and_clear_guest_paused();
289		__touch_watchdog();
290		return HRTIMER_RESTART;
291	}
292
293	/* check for a softlockup
294	 * This is done by making sure a high priority task is
295	 * being scheduled.  The task touches the watchdog to
296	 * indicate it is getting cpu time.  If it hasn't then
297	 * this is a good indication some task is hogging the cpu
298	 */
299	duration = is_softlockup(touch_ts);
300	if (unlikely(duration)) {
301		/*
302		 * If a virtual machine is stopped by the host it can look to
303		 * the watchdog like a soft lockup, check to see if the host
304		 * stopped the vm before we issue the warning
305		 */
306		if (kvm_check_and_clear_guest_paused())
307			return HRTIMER_RESTART;
308
309		/* only warn once */
310		if (__this_cpu_read(soft_watchdog_warn) == true) {
311			/*
312			 * When multiple processes are causing softlockups the
313			 * softlockup detector only warns on the first one
314			 * because the code relies on a full quiet cycle to
315			 * re-arm.  The second process prevents the quiet cycle
316			 * and never gets reported.  Use task pointers to detect
317			 * this.
318			 */
319			if (__this_cpu_read(softlockup_task_ptr_saved) !=
320			    current) {
321				__this_cpu_write(soft_watchdog_warn, false);
322				__touch_watchdog();
323			}
324			return HRTIMER_RESTART;
325		}
326
327		if (softlockup_all_cpu_backtrace) {
328			/* Prevent multiple soft-lockup reports if one cpu is already
329			 * engaged in dumping cpu back traces
330			 */
331			if (test_and_set_bit(0, &soft_lockup_nmi_warn)) {
332				/* Someone else will report us. Let's give up */
333				__this_cpu_write(soft_watchdog_warn, true);
334				return HRTIMER_RESTART;
335			}
336		}
337
338		pr_emerg("BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n",
339			smp_processor_id(), duration,
340			current->comm, task_pid_nr(current));
341		__this_cpu_write(softlockup_task_ptr_saved, current);
342		print_modules();
343		print_irqtrace_events(current);
344		if (regs)
345			show_regs(regs);
346		else
347			dump_stack();
348
349		if (softlockup_all_cpu_backtrace) {
350			/* Avoid generating two back traces for current
351			 * given that one is already made above
352			 */
353			trigger_allbutself_cpu_backtrace();
354
355			clear_bit(0, &soft_lockup_nmi_warn);
356			/* Barrier to sync with other cpus */
357			smp_mb__after_atomic();
358		}
359
360		add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK);
361		if (softlockup_panic)
362			panic("softlockup: hung tasks");
363		__this_cpu_write(soft_watchdog_warn, true);
364	} else
365		__this_cpu_write(soft_watchdog_warn, false);
366
367	return HRTIMER_RESTART;
368}
369
370static void watchdog_set_prio(unsigned int policy, unsigned int prio)
371{
372	struct sched_param param = { .sched_priority = prio };
373
374	sched_setscheduler(current, policy, &param);
375}
376
377static void watchdog_enable(unsigned int cpu)
 
 
 
378{
379	struct hrtimer *hrtimer = raw_cpu_ptr(&watchdog_hrtimer);
380
381	/* kick off the timer for the hardlockup detector */
382	hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
383	hrtimer->function = watchdog_timer_fn;
384
385	/* Enable the perf event */
386	watchdog_nmi_enable(cpu);
387
388	/* done here because hrtimer_start can only pin to smp_processor_id() */
389	hrtimer_start(hrtimer, ns_to_ktime(sample_period),
390		      HRTIMER_MODE_REL_PINNED);
391
392	/* initialize timestamp */
393	watchdog_set_prio(SCHED_FIFO, MAX_RT_PRIO - 1);
394	__touch_watchdog();
395}
396
397static void watchdog_disable(unsigned int cpu)
398{
399	struct hrtimer *hrtimer = raw_cpu_ptr(&watchdog_hrtimer);
400
401	watchdog_set_prio(SCHED_NORMAL, 0);
402	hrtimer_cancel(hrtimer);
403	/* disable the perf event */
404	watchdog_nmi_disable(cpu);
405}
406
407static void watchdog_cleanup(unsigned int cpu, bool online)
408{
409	watchdog_disable(cpu);
410}
411
412static int watchdog_should_run(unsigned int cpu)
413{
414	return __this_cpu_read(hrtimer_interrupts) !=
415		__this_cpu_read(soft_lockup_hrtimer_cnt);
416}
417
418/*
419 * The watchdog thread function - touches the timestamp.
420 *
421 * It only runs once every sample_period seconds (4 seconds by
422 * default) to reset the softlockup timestamp. If this gets delayed
423 * for more than 2*watchdog_thresh seconds then the debug-printout
424 * triggers in watchdog_timer_fn().
425 */
426static void watchdog(unsigned int cpu)
427{
428	__this_cpu_write(soft_lockup_hrtimer_cnt,
429			 __this_cpu_read(hrtimer_interrupts));
430	__touch_watchdog();
431
 
432	/*
433	 * watchdog_nmi_enable() clears the NMI_WATCHDOG_ENABLED bit in the
434	 * failure path. Check for failures that can occur asynchronously -
435	 * for example, when CPUs are on-lined - and shut down the hardware
436	 * perf event on each CPU accordingly.
437	 *
438	 * The only non-obvious place this bit can be cleared is through
439	 * watchdog_nmi_enable(), so a pr_info() is placed there.  Placing a
440	 * pr_info here would be too noisy as it would result in a message
441	 * every few seconds if the hardlockup was disabled but the softlockup
442	 * enabled.
443	 */
444	if (!(watchdog_enabled & NMI_WATCHDOG_ENABLED))
445		watchdog_nmi_disable(cpu);
446}
447
448static struct smp_hotplug_thread watchdog_threads = {
449	.store			= &softlockup_watchdog,
450	.thread_should_run	= watchdog_should_run,
451	.thread_fn		= watchdog,
452	.thread_comm		= "watchdog/%u",
453	.setup			= watchdog_enable,
454	.cleanup		= watchdog_cleanup,
455	.park			= watchdog_disable,
456	.unpark			= watchdog_enable,
457};
458
459/*
460 * park all watchdog threads that are specified in 'watchdog_cpumask'
461 *
462 * This function returns an error if kthread_park() of a watchdog thread
463 * fails. In this situation, the watchdog threads of some CPUs can already
464 * be parked and the watchdog threads of other CPUs can still be runnable.
465 * Callers are expected to handle this special condition as appropriate in
466 * their context.
467 *
468 * This function may only be called in a context that is protected against
469 * races with CPU hotplug - for example, via get_online_cpus().
470 */
471static int watchdog_park_threads(void)
472{
473	int cpu, ret = 0;
474
475	atomic_set(&watchdog_park_in_progress, 1);
476
477	for_each_watchdog_cpu(cpu) {
478		ret = kthread_park(per_cpu(softlockup_watchdog, cpu));
479		if (ret)
480			break;
481	}
482
483	atomic_set(&watchdog_park_in_progress, 0);
484
485	return ret;
 
 
 
 
 
 
 
 
486}
487
488/*
489 * unpark all watchdog threads that are specified in 'watchdog_cpumask'
490 *
491 * This function may only be called in a context that is protected against
492 * races with CPU hotplug - for example, via get_online_cpus().
493 */
494static void watchdog_unpark_threads(void)
495{
496	int cpu;
497
498	for_each_watchdog_cpu(cpu)
499		kthread_unpark(per_cpu(softlockup_watchdog, cpu));
500}
501
 
502/*
503 * Suspend the hard and soft lockup detector by parking the watchdog threads.
 
 
504 */
505int lockup_detector_suspend(void)
506{
507	int ret = 0;
508
509	get_online_cpus();
510	mutex_lock(&watchdog_proc_mutex);
511	/*
512	 * Multiple suspend requests can be active in parallel (counted by
513	 * the 'watchdog_suspended' variable). If the watchdog threads are
514	 * running, the first caller takes care that they will be parked.
515	 * The state of 'watchdog_running' cannot change while a suspend
516	 * request is active (see related code in 'proc' handlers).
517	 */
518	if (watchdog_running && !watchdog_suspended)
519		ret = watchdog_park_threads();
520
521	if (ret == 0)
522		watchdog_suspended++;
523	else {
524		watchdog_disable_all_cpus();
525		pr_err("Failed to suspend lockup detectors, disabled\n");
526		watchdog_enabled = 0;
527	}
528
529	mutex_unlock(&watchdog_proc_mutex);
 
 
530
531	return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
532}
533
534/*
535 * Resume the hard and soft lockup detector by unparking the watchdog threads.
536 */
537void lockup_detector_resume(void)
538{
539	mutex_lock(&watchdog_proc_mutex);
540
541	watchdog_suspended--;
542	/*
543	 * The watchdog threads are unparked if they were previously running
544	 * and if there is no more active suspend request.
545	 */
546	if (watchdog_running && !watchdog_suspended)
547		watchdog_unpark_threads();
548
549	mutex_unlock(&watchdog_proc_mutex);
550	put_online_cpus();
 
 
551}
 
 
 
 
552
553static int update_watchdog_all_cpus(void)
 
554{
555	int ret;
556
557	ret = watchdog_park_threads();
558	if (ret)
559		return ret;
560
561	watchdog_unpark_threads();
562
563	return 0;
564}
565
566static int watchdog_enable_all_cpus(void)
567{
 
568	int err = 0;
569
570	if (!watchdog_running) {
571		err = smpboot_register_percpu_thread_cpumask(&watchdog_threads,
572							     &watchdog_cpumask);
573		if (err)
574			pr_err("Failed to create watchdog threads, disabled\n");
575		else
576			watchdog_running = 1;
577	} else {
578		/*
579		 * Enable/disable the lockup detectors or
580		 * change the sample period 'on the fly'.
581		 */
582		err = update_watchdog_all_cpus();
583
584		if (err) {
585			watchdog_disable_all_cpus();
586			pr_err("Failed to update lockup detectors, disabled\n");
 
 
 
 
 
 
 
 
 
 
587		}
 
 
 
 
 
588	}
589
590	if (err)
591		watchdog_enabled = 0;
592
593	return err;
594}
595
596static void watchdog_disable_all_cpus(void)
597{
598	if (watchdog_running) {
599		watchdog_running = 0;
600		smpboot_unregister_percpu_thread(&watchdog_threads);
601	}
602}
603
604#ifdef CONFIG_SYSCTL
605
606/*
607 * Update the run state of the lockup detectors.
608 */
609static int proc_watchdog_update(void)
610{
611	int err = 0;
 
612
613	/*
614	 * Watchdog threads won't be started if they are already active.
615	 * The 'watchdog_running' variable in watchdog_*_all_cpus() takes
616	 * care of this. If those threads are already active, the sample
617	 * period will be updated and the lockup detectors will be enabled
618	 * or disabled 'on the fly'.
619	 */
620	if (watchdog_enabled && watchdog_thresh)
621		err = watchdog_enable_all_cpus();
622	else
623		watchdog_disable_all_cpus();
624
625	return err;
 
626
 
 
 
 
 
627}
628
629/*
630 * common function for watchdog, nmi_watchdog and soft_watchdog parameter
631 *
632 * caller             | table->data points to | 'which' contains the flag(s)
633 * -------------------|-----------------------|-----------------------------
634 * proc_watchdog      | watchdog_user_enabled | NMI_WATCHDOG_ENABLED or'ed
635 *                    |                       | with SOFT_WATCHDOG_ENABLED
636 * -------------------|-----------------------|-----------------------------
637 * proc_nmi_watchdog  | nmi_watchdog_enabled  | NMI_WATCHDOG_ENABLED
638 * -------------------|-----------------------|-----------------------------
639 * proc_soft_watchdog | soft_watchdog_enabled | SOFT_WATCHDOG_ENABLED
640 */
641static int proc_watchdog_common(int which, struct ctl_table *table, int write,
642				void __user *buffer, size_t *lenp, loff_t *ppos)
643{
644	int err, old, new;
645	int *watchdog_param = (int *)table->data;
646
647	get_online_cpus();
648	mutex_lock(&watchdog_proc_mutex);
649
650	if (watchdog_suspended) {
651		/* no parameter changes allowed while watchdog is suspended */
652		err = -EAGAIN;
653		goto out;
654	}
655
656	/*
657	 * If the parameter is being read return the state of the corresponding
658	 * bit(s) in 'watchdog_enabled', else update 'watchdog_enabled' and the
659	 * run state of the lockup detectors.
660	 */
661	if (!write) {
662		*watchdog_param = (watchdog_enabled & which) != 0;
663		err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
664	} else {
665		err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
666		if (err)
667			goto out;
668
669		/*
670		 * There is a race window between fetching the current value
671		 * from 'watchdog_enabled' and storing the new value. During
672		 * this race window, watchdog_nmi_enable() can sneak in and
673		 * clear the NMI_WATCHDOG_ENABLED bit in 'watchdog_enabled'.
674		 * The 'cmpxchg' detects this race and the loop retries.
675		 */
676		do {
677			old = watchdog_enabled;
678			/*
679			 * If the parameter value is not zero set the
680			 * corresponding bit(s), else clear it(them).
681			 */
682			if (*watchdog_param)
683				new = old | which;
684			else
685				new = old & ~which;
686		} while (cmpxchg(&watchdog_enabled, old, new) != old);
687
688		/*
689		 * Update the run state of the lockup detectors. There is _no_
690		 * need to check the value returned by proc_watchdog_update()
691		 * and to restore the previous value of 'watchdog_enabled' as
692		 * both lockup detectors are disabled if proc_watchdog_update()
693		 * returns an error.
694		 */
695		if (old == new)
696			goto out;
697
698		err = proc_watchdog_update();
699	}
700out:
701	mutex_unlock(&watchdog_proc_mutex);
702	put_online_cpus();
703	return err;
704}
705
706/*
707 * /proc/sys/kernel/watchdog
708 */
709int proc_watchdog(struct ctl_table *table, int write,
710		  void __user *buffer, size_t *lenp, loff_t *ppos)
711{
712	return proc_watchdog_common(NMI_WATCHDOG_ENABLED|SOFT_WATCHDOG_ENABLED,
713				    table, write, buffer, lenp, ppos);
714}
715
716/*
717 * /proc/sys/kernel/nmi_watchdog
718 */
719int proc_nmi_watchdog(struct ctl_table *table, int write,
720		      void __user *buffer, size_t *lenp, loff_t *ppos)
721{
722	return proc_watchdog_common(NMI_WATCHDOG_ENABLED,
723				    table, write, buffer, lenp, ppos);
724}
725
726/*
727 * /proc/sys/kernel/soft_watchdog
728 */
729int proc_soft_watchdog(struct ctl_table *table, int write,
730			void __user *buffer, size_t *lenp, loff_t *ppos)
731{
732	return proc_watchdog_common(SOFT_WATCHDOG_ENABLED,
733				    table, write, buffer, lenp, ppos);
734}
735
 
736/*
737 * /proc/sys/kernel/watchdog_thresh
738 */
739int proc_watchdog_thresh(struct ctl_table *table, int write,
740			 void __user *buffer, size_t *lenp, loff_t *ppos)
741{
742	int err, old, new;
743
744	get_online_cpus();
745	mutex_lock(&watchdog_proc_mutex);
746
747	if (watchdog_suspended) {
748		/* no parameter changes allowed while watchdog is suspended */
749		err = -EAGAIN;
750		goto out;
751	}
752
753	old = ACCESS_ONCE(watchdog_thresh);
754	err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
755
756	if (err || !write)
 
757		goto out;
758
759	/*
760	 * Update the sample period. Restore on failure.
761	 */
762	new = ACCESS_ONCE(watchdog_thresh);
763	if (old == new)
764		goto out;
765
766	set_sample_period();
767	err = proc_watchdog_update();
768	if (err) {
769		watchdog_thresh = old;
770		set_sample_period();
771	}
772out:
773	mutex_unlock(&watchdog_proc_mutex);
774	put_online_cpus();
775	return err;
776}
 
 
777
778/*
779 * The cpumask is the mask of possible cpus that the watchdog can run
780 * on, not the mask of cpus it is actually running on.  This allows the
781 * user to specify a mask that will include cpus that have not yet
782 * been brought online, if desired.
783 */
784int proc_watchdog_cpumask(struct ctl_table *table, int write,
785			  void __user *buffer, size_t *lenp, loff_t *ppos)
786{
787	int err;
788
789	get_online_cpus();
790	mutex_lock(&watchdog_proc_mutex);
791
792	if (watchdog_suspended) {
793		/* no parameter changes allowed while watchdog is suspended */
794		err = -EAGAIN;
795		goto out;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
796	}
797
798	err = proc_do_large_bitmap(table, write, buffer, lenp, ppos);
799	if (!err && write) {
800		/* Remove impossible cpus to keep sysctl output cleaner. */
801		cpumask_and(&watchdog_cpumask, &watchdog_cpumask,
802			    cpu_possible_mask);
803
804		if (watchdog_running) {
805			/*
806			 * Failure would be due to being unable to allocate
807			 * a temporary cpumask, so we are likely not in a
808			 * position to do much else to make things better.
809			 */
810			if (smpboot_update_cpumask_percpu_thread(
811				    &watchdog_threads, &watchdog_cpumask) != 0)
812				pr_err("cpumask update failed\n");
813		}
814	}
815out:
816	mutex_unlock(&watchdog_proc_mutex);
817	put_online_cpus();
818	return err;
819}
820
821#endif /* CONFIG_SYSCTL */
 
 
822
823void __init lockup_detector_init(void)
824{
825	set_sample_period();
 
826
827#ifdef CONFIG_NO_HZ_FULL
828	if (tick_nohz_full_enabled()) {
829		pr_info("Disabling watchdog on nohz_full cores by default\n");
830		cpumask_copy(&watchdog_cpumask, housekeeping_mask);
831	} else
832		cpumask_copy(&watchdog_cpumask, cpu_possible_mask);
833#else
834	cpumask_copy(&watchdog_cpumask, cpu_possible_mask);
835#endif
836
837	if (watchdog_enabled)
838		watchdog_enable_all_cpus();
 
 
839}
v3.5.6
  1/*
  2 * Detect hard and soft lockups on a system
  3 *
  4 * started by Don Zickus, Copyright (C) 2010 Red Hat, Inc.
  5 *
  6 * Note: Most of this code is borrowed heavily from the original softlockup
  7 * detector, so thanks to Ingo for the initial implementation.
  8 * Some chunks also taken from the old x86-specific nmi watchdog code, thanks
  9 * to those contributors as well.
 10 */
 11
 12#define pr_fmt(fmt) "NMI watchdog: " fmt
 13
 14#include <linux/mm.h>
 15#include <linux/cpu.h>
 16#include <linux/nmi.h>
 17#include <linux/init.h>
 18#include <linux/delay.h>
 19#include <linux/freezer.h>
 20#include <linux/kthread.h>
 21#include <linux/lockdep.h>
 22#include <linux/notifier.h>
 23#include <linux/module.h>
 24#include <linux/sysctl.h>
 
 
 
 
 25
 26#include <asm/irq_regs.h>
 27#include <linux/kvm_para.h>
 28#include <linux/perf_event.h>
 
 
 29
 30int watchdog_enabled = 1;
 
 
 
 
 
 
 
 31int __read_mostly watchdog_thresh = 10;
 32
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 33static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts);
 34static DEFINE_PER_CPU(struct task_struct *, softlockup_watchdog);
 35static DEFINE_PER_CPU(struct hrtimer, watchdog_hrtimer);
 36static DEFINE_PER_CPU(bool, softlockup_touch_sync);
 37static DEFINE_PER_CPU(bool, soft_watchdog_warn);
 38#ifdef CONFIG_HARDLOCKUP_DETECTOR
 39static DEFINE_PER_CPU(bool, hard_watchdog_warn);
 40static DEFINE_PER_CPU(bool, watchdog_nmi_touch);
 41static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts);
 
 
 42static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved);
 43static DEFINE_PER_CPU(struct perf_event *, watchdog_ev);
 44#endif
 45
 46/* boot commands */
 47/*
 48 * Should we panic when a soft-lockup or hard-lockup occurs:
 49 */
 50#ifdef CONFIG_HARDLOCKUP_DETECTOR
 51static int hardlockup_panic =
 52			CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE;
 53
 54static int __init hardlockup_panic_setup(char *str)
 55{
 56	if (!strncmp(str, "panic", 5))
 57		hardlockup_panic = 1;
 58	else if (!strncmp(str, "nopanic", 7))
 59		hardlockup_panic = 0;
 60	else if (!strncmp(str, "0", 1))
 61		watchdog_enabled = 0;
 62	return 1;
 63}
 64__setup("nmi_watchdog=", hardlockup_panic_setup);
 65#endif
 66
 67unsigned int __read_mostly softlockup_panic =
 68			CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE;
 69
 70static int __init softlockup_panic_setup(char *str)
 71{
 72	softlockup_panic = simple_strtoul(str, NULL, 0);
 73
 74	return 1;
 75}
 76__setup("softlockup_panic=", softlockup_panic_setup);
 77
 78static int __init nowatchdog_setup(char *str)
 79{
 80	watchdog_enabled = 0;
 81	return 1;
 82}
 83__setup("nowatchdog", nowatchdog_setup);
 84
 85/* deprecated */
 86static int __init nosoftlockup_setup(char *str)
 87{
 88	watchdog_enabled = 0;
 89	return 1;
 90}
 91__setup("nosoftlockup", nosoftlockup_setup);
 92/*  */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 93
 94/*
 95 * Hard-lockup warnings should be triggered after just a few seconds. Soft-
 96 * lockups can have false positives under extreme conditions. So we generally
 97 * want a higher threshold for soft lockups than for hard lockups. So we couple
 98 * the thresholds with a factor: we make the soft threshold twice the amount of
 99 * time the hard threshold is.
100 */
101static int get_softlockup_thresh(void)
102{
103	return watchdog_thresh * 2;
104}
105
106/*
107 * Returns seconds, approximately.  We don't need nanosecond
108 * resolution, and we don't need to waste time with a big divide when
109 * 2^30ns == 1.074s.
110 */
111static unsigned long get_timestamp(int this_cpu)
112{
113	return cpu_clock(this_cpu) >> 30LL;  /* 2^30 ~= 10^9 */
114}
115
116static unsigned long get_sample_period(void)
117{
118	/*
119	 * convert watchdog_thresh from seconds to ns
120	 * the divide by 5 is to give hrtimer several chances (two
121	 * or three with the current relation between the soft
122	 * and hard thresholds) to increment before the
123	 * hardlockup detector generates a warning
124	 */
125	return get_softlockup_thresh() * (NSEC_PER_SEC / 5);
126}
127
128/* Commands for resetting the watchdog */
129static void __touch_watchdog(void)
130{
131	int this_cpu = smp_processor_id();
 
132
133	__this_cpu_write(watchdog_touch_ts, get_timestamp(this_cpu));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
134}
135
136void touch_softlockup_watchdog(void)
137{
138	__this_cpu_write(watchdog_touch_ts, 0);
 
139}
140EXPORT_SYMBOL(touch_softlockup_watchdog);
141
142void touch_all_softlockup_watchdogs(void)
143{
144	int cpu;
145
146	/*
147	 * this is done lockless
148	 * do we care if a 0 races with a timestamp?
149	 * all it means is the softlock check starts one cycle later
150	 */
151	for_each_online_cpu(cpu)
152		per_cpu(watchdog_touch_ts, cpu) = 0;
 
153}
154
155#ifdef CONFIG_HARDLOCKUP_DETECTOR
156void touch_nmi_watchdog(void)
157{
158	if (watchdog_enabled) {
159		unsigned cpu;
160
161		for_each_present_cpu(cpu) {
162			if (per_cpu(watchdog_nmi_touch, cpu) != true)
163				per_cpu(watchdog_nmi_touch, cpu) = true;
164		}
165	}
166	touch_softlockup_watchdog();
167}
168EXPORT_SYMBOL(touch_nmi_watchdog);
169
170#endif
171
172void touch_softlockup_watchdog_sync(void)
173{
174	__raw_get_cpu_var(softlockup_touch_sync) = true;
175	__raw_get_cpu_var(watchdog_touch_ts) = 0;
176}
177
178#ifdef CONFIG_HARDLOCKUP_DETECTOR
179/* watchdog detector functions */
180static int is_hardlockup(void)
181{
182	unsigned long hrint = __this_cpu_read(hrtimer_interrupts);
183
184	if (__this_cpu_read(hrtimer_interrupts_saved) == hrint)
185		return 1;
186
187	__this_cpu_write(hrtimer_interrupts_saved, hrint);
188	return 0;
189}
190#endif
191
192static int is_softlockup(unsigned long touch_ts)
193{
194	unsigned long now = get_timestamp(smp_processor_id());
195
196	/* Warn about unreasonable delays: */
197	if (time_after(now, touch_ts + get_softlockup_thresh()))
198		return now - touch_ts;
199
 
 
 
 
 
200	return 0;
201}
202
203#ifdef CONFIG_HARDLOCKUP_DETECTOR
 
 
 
204
205static struct perf_event_attr wd_hw_attr = {
206	.type		= PERF_TYPE_HARDWARE,
207	.config		= PERF_COUNT_HW_CPU_CYCLES,
208	.size		= sizeof(struct perf_event_attr),
209	.pinned		= 1,
210	.disabled	= 1,
211};
212
213/* Callback function for perf event subsystem */
214static void watchdog_overflow_callback(struct perf_event *event,
215		 struct perf_sample_data *data,
216		 struct pt_regs *regs)
217{
218	/* Ensure the watchdog never gets throttled */
219	event->hw.interrupts = 0;
220
221	if (__this_cpu_read(watchdog_nmi_touch) == true) {
222		__this_cpu_write(watchdog_nmi_touch, false);
223		return;
224	}
225
226	/* check for a hardlockup
227	 * This is done by making sure our timer interrupt
228	 * is incrementing.  The timer interrupt should have
229	 * fired multiple times before we overflow'd.  If it hasn't
230	 * then this is a good indication the cpu is stuck
231	 */
232	if (is_hardlockup()) {
233		int this_cpu = smp_processor_id();
234
235		/* only print hardlockups once */
236		if (__this_cpu_read(hard_watchdog_warn) == true)
237			return;
238
239		if (hardlockup_panic)
240			panic("Watchdog detected hard LOCKUP on cpu %d", this_cpu);
241		else
242			WARN(1, "Watchdog detected hard LOCKUP on cpu %d", this_cpu);
243
244		__this_cpu_write(hard_watchdog_warn, true);
245		return;
246	}
247
248	__this_cpu_write(hard_watchdog_warn, false);
249	return;
250}
251static void watchdog_interrupt_count(void)
252{
253	__this_cpu_inc(hrtimer_interrupts);
254}
255#else
256static inline void watchdog_interrupt_count(void) { return; }
257#endif /* CONFIG_HARDLOCKUP_DETECTOR */
258
259/* watchdog kicker functions */
260static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
261{
262	unsigned long touch_ts = __this_cpu_read(watchdog_touch_ts);
263	struct pt_regs *regs = get_irq_regs();
264	int duration;
 
 
 
 
265
266	/* kick the hardlockup detector */
267	watchdog_interrupt_count();
268
269	/* kick the softlockup detector */
270	wake_up_process(__this_cpu_read(softlockup_watchdog));
271
272	/* .. and repeat */
273	hrtimer_forward_now(hrtimer, ns_to_ktime(get_sample_period()));
274
275	if (touch_ts == 0) {
276		if (unlikely(__this_cpu_read(softlockup_touch_sync))) {
277			/*
278			 * If the time stamp was touched atomically
279			 * make sure the scheduler tick is up to date.
280			 */
281			__this_cpu_write(softlockup_touch_sync, false);
282			sched_clock_tick();
283		}
284
285		/* Clear the guest paused flag on watchdog reset */
286		kvm_check_and_clear_guest_paused();
287		__touch_watchdog();
288		return HRTIMER_RESTART;
289	}
290
291	/* check for a softlockup
292	 * This is done by making sure a high priority task is
293	 * being scheduled.  The task touches the watchdog to
294	 * indicate it is getting cpu time.  If it hasn't then
295	 * this is a good indication some task is hogging the cpu
296	 */
297	duration = is_softlockup(touch_ts);
298	if (unlikely(duration)) {
299		/*
300		 * If a virtual machine is stopped by the host it can look to
301		 * the watchdog like a soft lockup, check to see if the host
302		 * stopped the vm before we issue the warning
303		 */
304		if (kvm_check_and_clear_guest_paused())
305			return HRTIMER_RESTART;
306
307		/* only warn once */
308		if (__this_cpu_read(soft_watchdog_warn) == true)
 
 
 
 
 
 
 
 
 
 
 
 
 
309			return HRTIMER_RESTART;
 
 
 
 
 
 
 
 
 
 
 
 
310
311		printk(KERN_EMERG "BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n",
312			smp_processor_id(), duration,
313			current->comm, task_pid_nr(current));
 
314		print_modules();
315		print_irqtrace_events(current);
316		if (regs)
317			show_regs(regs);
318		else
319			dump_stack();
320
 
 
 
 
 
 
 
 
 
 
 
 
321		if (softlockup_panic)
322			panic("softlockup: hung tasks");
323		__this_cpu_write(soft_watchdog_warn, true);
324	} else
325		__this_cpu_write(soft_watchdog_warn, false);
326
327	return HRTIMER_RESTART;
328}
329
 
 
 
 
 
 
330
331/*
332 * The watchdog thread - touches the timestamp.
333 */
334static int watchdog(void *unused)
335{
336	struct sched_param param = { .sched_priority = 0 };
337	struct hrtimer *hrtimer = &__raw_get_cpu_var(watchdog_hrtimer);
 
 
 
 
 
 
 
 
 
 
338
339	/* initialize timestamp */
 
340	__touch_watchdog();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
341
342	/* kick off the timer for the hardlockup detector */
343	/* done here because hrtimer_start can only pin to smp_processor_id() */
344	hrtimer_start(hrtimer, ns_to_ktime(get_sample_period()),
345		      HRTIMER_MODE_REL_PINNED);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
346
347	set_current_state(TASK_INTERRUPTIBLE);
348	/*
349	 * Run briefly (kicked by the hrtimer callback function) once every
350	 * get_sample_period() seconds (4 seconds by default) to reset the
351	 * softlockup timestamp. If this gets delayed for more than
352	 * 2*watchdog_thresh seconds then the debug-printout triggers in
353	 * watchdog_timer_fn().
 
 
 
 
 
354	 */
355	while (!kthread_should_stop()) {
356		__touch_watchdog();
357		schedule();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
358
359		if (kthread_should_stop())
 
 
360			break;
 
 
 
361
362		set_current_state(TASK_INTERRUPTIBLE);
363	}
364	/*
365	 * Drop the policy/priority elevation during thread exit to avoid a
366	 * scheduling latency spike.
367	 */
368	__set_current_state(TASK_RUNNING);
369	sched_setscheduler(current, SCHED_NORMAL, &param);
370	return 0;
371}
372
 
 
 
 
 
 
 
 
 
 
 
 
 
373
374#ifdef CONFIG_HARDLOCKUP_DETECTOR
375/*
376 * People like the simple clean cpu node info on boot.
377 * Reduce the watchdog noise by only printing messages
378 * that are different from what cpu0 displayed.
379 */
380static unsigned long cpu0_err;
 
 
381
382static int watchdog_nmi_enable(int cpu)
383{
384	struct perf_event_attr *wd_attr;
385	struct perf_event *event = per_cpu(watchdog_ev, cpu);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
386
387	/* is it already setup and enabled? */
388	if (event && event->state > PERF_EVENT_STATE_OFF)
389		goto out;
390
391	/* it is setup but not enabled */
392	if (event != NULL)
393		goto out_enable;
394
395	wd_attr = &wd_hw_attr;
396	wd_attr->sample_period = hw_nmi_get_sample_period(watchdog_thresh);
397
398	/* Try to register using hardware perf events */
399	event = perf_event_create_kernel_counter(wd_attr, cpu, NULL, watchdog_overflow_callback, NULL);
400
401	/* save cpu0 error for future comparision */
402	if (cpu == 0 && IS_ERR(event))
403		cpu0_err = PTR_ERR(event);
404
405	if (!IS_ERR(event)) {
406		/* only print for cpu0 or different than cpu0 */
407		if (cpu == 0 || cpu0_err)
408			pr_info("enabled on all CPUs, permanently consumes one hw-PMU counter.\n");
409		goto out_save;
410	}
411
412	/* skip displaying the same error again */
413	if (cpu > 0 && (PTR_ERR(event) == cpu0_err))
414		return PTR_ERR(event);
415
416	/* vary the KERN level based on the returned errno */
417	if (PTR_ERR(event) == -EOPNOTSUPP)
418		pr_info("disabled (cpu%i): not supported (no LAPIC?)\n", cpu);
419	else if (PTR_ERR(event) == -ENOENT)
420		pr_warning("disabled (cpu%i): hardware events not enabled\n",
421			 cpu);
422	else
423		pr_err("disabled (cpu%i): unable to create perf event: %ld\n",
424			cpu, PTR_ERR(event));
425	return PTR_ERR(event);
426
427	/* success path */
428out_save:
429	per_cpu(watchdog_ev, cpu) = event;
430out_enable:
431	perf_event_enable(per_cpu(watchdog_ev, cpu));
432out:
433	return 0;
434}
435
436static void watchdog_nmi_disable(int cpu)
 
 
 
437{
438	struct perf_event *event = per_cpu(watchdog_ev, cpu);
439
440	if (event) {
441		perf_event_disable(event);
442		per_cpu(watchdog_ev, cpu) = NULL;
 
 
 
 
443
444		/* should be in cleanup, but blocks oprofile */
445		perf_event_release_kernel(event);
446	}
447	return;
448}
449#else
450static int watchdog_nmi_enable(int cpu) { return 0; }
451static void watchdog_nmi_disable(int cpu) { return; }
452#endif /* CONFIG_HARDLOCKUP_DETECTOR */
453
454/* prepare/enable/disable routines */
455static void watchdog_prepare_cpu(int cpu)
456{
457	struct hrtimer *hrtimer = &per_cpu(watchdog_hrtimer, cpu);
 
 
 
 
458
459	WARN_ON(per_cpu(softlockup_watchdog, cpu));
460	hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
461	hrtimer->function = watchdog_timer_fn;
462}
463
464static int watchdog_enable(int cpu)
465{
466	struct task_struct *p = per_cpu(softlockup_watchdog, cpu);
467	int err = 0;
468
469	/* enable the perf event */
470	err = watchdog_nmi_enable(cpu);
471
472	/* Regardless of err above, fall through and start softlockup */
 
 
 
 
 
 
 
 
 
473
474	/* create the watchdog thread */
475	if (!p) {
476		struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
477		p = kthread_create_on_node(watchdog, NULL, cpu_to_node(cpu), "watchdog/%d", cpu);
478		if (IS_ERR(p)) {
479			pr_err("softlockup watchdog for %i failed\n", cpu);
480			if (!err) {
481				/* if hardlockup hasn't already set this */
482				err = PTR_ERR(p);
483				/* and disable the perf event */
484				watchdog_nmi_disable(cpu);
485			}
486			goto out;
487		}
488		sched_setscheduler(p, SCHED_FIFO, &param);
489		kthread_bind(p, cpu);
490		per_cpu(watchdog_touch_ts, cpu) = 0;
491		per_cpu(softlockup_watchdog, cpu) = p;
492		wake_up_process(p);
493	}
494
495out:
 
 
496	return err;
497}
498
499static void watchdog_disable(int cpu)
 
 
 
 
 
 
 
 
 
 
 
 
 
500{
501	struct task_struct *p = per_cpu(softlockup_watchdog, cpu);
502	struct hrtimer *hrtimer = &per_cpu(watchdog_hrtimer, cpu);
503
504	/*
505	 * cancel the timer first to stop incrementing the stats
506	 * and waking up the kthread
 
 
 
507	 */
508	hrtimer_cancel(hrtimer);
 
 
 
509
510	/* disable the perf event */
511	watchdog_nmi_disable(cpu);
512
513	/* stop the watchdog thread */
514	if (p) {
515		per_cpu(softlockup_watchdog, cpu) = NULL;
516		kthread_stop(p);
517	}
518}
519
520/* sysctl functions */
521#ifdef CONFIG_SYSCTL
522static void watchdog_enable_all_cpus(void)
 
 
 
 
 
 
 
 
 
 
 
523{
524	int cpu;
 
 
 
 
 
 
 
 
 
 
525
526	watchdog_enabled = 0;
 
 
 
 
 
 
 
 
 
 
 
527
528	for_each_online_cpu(cpu)
529		if (!watchdog_enable(cpu))
530			/* if any cpu succeeds, watchdog is considered
531			   enabled for the system */
532			watchdog_enabled = 1;
 
 
 
 
 
 
 
 
 
 
 
 
 
533
534	if (!watchdog_enabled)
535		pr_err("failed to be enabled on some cpus\n");
 
 
 
 
 
 
 
536
 
 
 
 
 
 
537}
538
539static void watchdog_disable_all_cpus(void)
 
 
 
 
540{
541	int cpu;
 
 
542
543	for_each_online_cpu(cpu)
544		watchdog_disable(cpu);
 
 
 
 
 
 
 
545
546	/* if all watchdogs are disabled, then they are disabled for the system */
547	watchdog_enabled = 0;
 
 
 
 
 
 
548}
549
550
551/*
552 * proc handler for /proc/sys/kernel/nmi_watchdog,watchdog_thresh
553 */
 
 
 
 
 
 
 
554
555int proc_dowatchdog(struct ctl_table *table, int write,
556		    void __user *buffer, size_t *lenp, loff_t *ppos)
557{
558	int ret;
 
 
 
 
559
560	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
561	if (ret || !write)
562		goto out;
563
564	if (watchdog_enabled && watchdog_thresh)
565		watchdog_enable_all_cpus();
566	else
567		watchdog_disable_all_cpus();
 
 
568
 
 
 
 
 
 
569out:
570	return ret;
 
 
571}
572#endif /* CONFIG_SYSCTL */
573
574
575/*
576 * Create/destroy watchdog threads as CPUs come and go:
 
 
 
577 */
578static int __cpuinit
579cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
580{
581	int hotcpu = (unsigned long)hcpu;
 
 
 
582
583	switch (action) {
584	case CPU_UP_PREPARE:
585	case CPU_UP_PREPARE_FROZEN:
586		watchdog_prepare_cpu(hotcpu);
587		break;
588	case CPU_ONLINE:
589	case CPU_ONLINE_FROZEN:
590		if (watchdog_enabled)
591			watchdog_enable(hotcpu);
592		break;
593#ifdef CONFIG_HOTPLUG_CPU
594	case CPU_UP_CANCELED:
595	case CPU_UP_CANCELED_FROZEN:
596		watchdog_disable(hotcpu);
597		break;
598	case CPU_DEAD:
599	case CPU_DEAD_FROZEN:
600		watchdog_disable(hotcpu);
601		break;
602#endif /* CONFIG_HOTPLUG_CPU */
603	}
604
605	/*
606	 * hardlockup and softlockup are not important enough
607	 * to block cpu bring up.  Just always succeed and
608	 * rely on printk output to flag problems.
609	 */
610	return NOTIFY_OK;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
611}
612
613static struct notifier_block __cpuinitdata cpu_nfb = {
614	.notifier_call = cpu_callback
615};
616
617void __init lockup_detector_init(void)
618{
619	void *cpu = (void *)(long)smp_processor_id();
620	int err;
621
622	err = cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu);
623	WARN_ON(notifier_to_errno(err));
 
 
 
 
 
 
 
624
625	cpu_callback(&cpu_nfb, CPU_ONLINE, cpu);
626	register_cpu_notifier(&cpu_nfb);
627
628	return;
629}