Linux Audio

Check our new training course

Loading...
v5.4
  1// SPDX-License-Identifier: GPL-2.0+
  2/*
  3 * RCU CPU stall warnings for normal RCU grace periods
  4 *
  5 * Copyright IBM Corporation, 2019
  6 *
  7 * Author: Paul E. McKenney <paulmck@linux.ibm.com>
  8 */
  9
 10//////////////////////////////////////////////////////////////////////////////
 11//
 12// Controlling CPU stall warnings, including delay calculation.
 13
 14/* panic() on RCU Stall sysctl. */
 15int sysctl_panic_on_rcu_stall __read_mostly;
 16
 17#ifdef CONFIG_PROVE_RCU
 18#define RCU_STALL_DELAY_DELTA	       (5 * HZ)
 19#else
 20#define RCU_STALL_DELAY_DELTA	       0
 21#endif
 
 
 22
 23/* Limit-check stall timeouts specified at boottime and runtime. */
 24int rcu_jiffies_till_stall_check(void)
 25{
 26	int till_stall_check = READ_ONCE(rcu_cpu_stall_timeout);
 27
 28	/*
 29	 * Limit check must be consistent with the Kconfig limits
 30	 * for CONFIG_RCU_CPU_STALL_TIMEOUT.
 31	 */
 32	if (till_stall_check < 3) {
 33		WRITE_ONCE(rcu_cpu_stall_timeout, 3);
 34		till_stall_check = 3;
 35	} else if (till_stall_check > 300) {
 36		WRITE_ONCE(rcu_cpu_stall_timeout, 300);
 37		till_stall_check = 300;
 38	}
 39	return till_stall_check * HZ + RCU_STALL_DELAY_DELTA;
 40}
 41EXPORT_SYMBOL_GPL(rcu_jiffies_till_stall_check);
 42
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 43/* Don't do RCU CPU stall warnings during long sysrq printouts. */
 44void rcu_sysrq_start(void)
 45{
 46	if (!rcu_cpu_stall_suppress)
 47		rcu_cpu_stall_suppress = 2;
 48}
 49
 50void rcu_sysrq_end(void)
 51{
 52	if (rcu_cpu_stall_suppress == 2)
 53		rcu_cpu_stall_suppress = 0;
 54}
 55
 56/* Don't print RCU CPU stall warnings during a kernel panic. */
 57static int rcu_panic(struct notifier_block *this, unsigned long ev, void *ptr)
 58{
 59	rcu_cpu_stall_suppress = 1;
 60	return NOTIFY_DONE;
 61}
 62
 63static struct notifier_block rcu_panic_block = {
 64	.notifier_call = rcu_panic,
 65};
 66
 67static int __init check_cpu_stall_init(void)
 68{
 69	atomic_notifier_chain_register(&panic_notifier_list, &rcu_panic_block);
 70	return 0;
 71}
 72early_initcall(check_cpu_stall_init);
 73
 74/* If so specified via sysctl, panic, yielding cleaner stall-warning output. */
 75static void panic_on_rcu_stall(void)
 76{
 77	if (sysctl_panic_on_rcu_stall)
 78		panic("RCU Stall\n");
 79}
 80
 81/**
 82 * rcu_cpu_stall_reset - prevent further stall warnings in current grace period
 83 *
 84 * Set the stall-warning timeout way off into the future, thus preventing
 85 * any RCU CPU stall-warning messages from appearing in the current set of
 86 * RCU grace periods.
 87 *
 88 * The caller must disable hard irqs.
 89 */
 90void rcu_cpu_stall_reset(void)
 91{
 92	WRITE_ONCE(rcu_state.jiffies_stall, jiffies + ULONG_MAX / 2);
 93}
 94
 95//////////////////////////////////////////////////////////////////////////////
 96//
 97// Interaction with RCU grace periods
 98
 99/* Start of new grace period, so record stall time (and forcing times). */
100static void record_gp_stall_check_time(void)
101{
102	unsigned long j = jiffies;
103	unsigned long j1;
104
105	rcu_state.gp_start = j;
106	j1 = rcu_jiffies_till_stall_check();
107	/* Record ->gp_start before ->jiffies_stall. */
108	smp_store_release(&rcu_state.jiffies_stall, j + j1); /* ^^^ */
109	rcu_state.jiffies_resched = j + j1 / 2;
110	rcu_state.n_force_qs_gpstart = READ_ONCE(rcu_state.n_force_qs);
111}
112
113/* Zero ->ticks_this_gp and snapshot the number of RCU softirq handlers. */
114static void zero_cpu_stall_ticks(struct rcu_data *rdp)
115{
116	rdp->ticks_this_gp = 0;
117	rdp->softirq_snap = kstat_softirqs_cpu(RCU_SOFTIRQ, smp_processor_id());
118	WRITE_ONCE(rdp->last_fqs_resched, jiffies);
119}
120
121/*
122 * If too much time has passed in the current grace period, and if
123 * so configured, go kick the relevant kthreads.
124 */
125static void rcu_stall_kick_kthreads(void)
126{
127	unsigned long j;
128
129	if (!rcu_kick_kthreads)
130		return;
131	j = READ_ONCE(rcu_state.jiffies_kick_kthreads);
132	if (time_after(jiffies, j) && rcu_state.gp_kthread &&
133	    (rcu_gp_in_progress() || READ_ONCE(rcu_state.gp_flags))) {
134		WARN_ONCE(1, "Kicking %s grace-period kthread\n",
135			  rcu_state.name);
136		rcu_ftrace_dump(DUMP_ALL);
137		wake_up_process(rcu_state.gp_kthread);
138		WRITE_ONCE(rcu_state.jiffies_kick_kthreads, j + HZ);
139	}
140}
141
142/*
143 * Handler for the irq_work request posted about halfway into the RCU CPU
144 * stall timeout, and used to detect excessive irq disabling.  Set state
145 * appropriately, but just complain if there is unexpected state on entry.
146 */
147static void rcu_iw_handler(struct irq_work *iwp)
148{
149	struct rcu_data *rdp;
150	struct rcu_node *rnp;
151
152	rdp = container_of(iwp, struct rcu_data, rcu_iw);
153	rnp = rdp->mynode;
154	raw_spin_lock_rcu_node(rnp);
155	if (!WARN_ON_ONCE(!rdp->rcu_iw_pending)) {
156		rdp->rcu_iw_gp_seq = rnp->gp_seq;
157		rdp->rcu_iw_pending = false;
158	}
159	raw_spin_unlock_rcu_node(rnp);
160}
161
162//////////////////////////////////////////////////////////////////////////////
163//
164// Printing RCU CPU stall warnings
165
166#ifdef CONFIG_PREEMPTION
167
168/*
169 * Dump detailed information for all tasks blocking the current RCU
170 * grace period on the specified rcu_node structure.
171 */
172static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp)
173{
174	unsigned long flags;
175	struct task_struct *t;
176
177	raw_spin_lock_irqsave_rcu_node(rnp, flags);
178	if (!rcu_preempt_blocked_readers_cgp(rnp)) {
179		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
180		return;
181	}
182	t = list_entry(rnp->gp_tasks->prev,
183		       struct task_struct, rcu_node_entry);
184	list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
185		/*
186		 * We could be printing a lot while holding a spinlock.
187		 * Avoid triggering hard lockup.
188		 */
189		touch_nmi_watchdog();
190		sched_show_task(t);
191	}
192	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
193}
194
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
195/*
196 * Scan the current list of tasks blocked within RCU read-side critical
197 * sections, printing out the tid of each.
198 */
199static int rcu_print_task_stall(struct rcu_node *rnp)
200{
201	struct task_struct *t;
202	int ndetected = 0;
 
 
203
204	if (!rcu_preempt_blocked_readers_cgp(rnp))
205		return 0;
206	pr_err("\tTasks blocked on level-%d rcu_node (CPUs %d-%d):",
207	       rnp->level, rnp->grplo, rnp->grphi);
208	t = list_entry(rnp->gp_tasks->prev,
209		       struct task_struct, rcu_node_entry);
210	list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
211		pr_cont(" P%d", t->pid);
 
 
 
 
 
 
 
 
212		ndetected++;
213	}
214	pr_cont("\n");
215	return ndetected;
216}
217
218#else /* #ifdef CONFIG_PREEMPTION */
219
220/*
221 * Because preemptible RCU does not exist, we never have to check for
222 * tasks blocked within RCU read-side critical sections.
223 */
224static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp)
225{
226}
227
228/*
229 * Because preemptible RCU does not exist, we never have to check for
230 * tasks blocked within RCU read-side critical sections.
231 */
232static int rcu_print_task_stall(struct rcu_node *rnp)
233{
234	return 0;
235}
236#endif /* #else #ifdef CONFIG_PREEMPTION */
237
238/*
239 * Dump stacks of all tasks running on stalled CPUs.  First try using
240 * NMIs, but fall back to manual remote stack tracing on architectures
241 * that don't support NMI-based stack dumps.  The NMI-triggered stack
242 * traces are more accurate because they are printed by the target CPU.
243 */
244static void rcu_dump_cpu_stacks(void)
245{
246	int cpu;
247	unsigned long flags;
248	struct rcu_node *rnp;
249
250	rcu_for_each_leaf_node(rnp) {
251		raw_spin_lock_irqsave_rcu_node(rnp, flags);
252		for_each_leaf_node_possible_cpu(rnp, cpu)
253			if (rnp->qsmask & leaf_node_cpu_bit(rnp, cpu))
254				if (!trigger_single_cpu_backtrace(cpu))
255					dump_cpu_task(cpu);
256		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
257	}
258}
259
260#ifdef CONFIG_RCU_FAST_NO_HZ
261
262static void print_cpu_stall_fast_no_hz(char *cp, int cpu)
263{
264	struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
265
266	sprintf(cp, "last_accelerate: %04lx/%04lx, Nonlazy posted: %c%c%c",
267		rdp->last_accelerate & 0xffff, jiffies & 0xffff,
268		".l"[rdp->all_lazy],
269		".L"[!rcu_segcblist_n_nonlazy_cbs(&rdp->cblist)],
270		".D"[!!rdp->tick_nohz_enabled_snap]);
271}
272
273#else /* #ifdef CONFIG_RCU_FAST_NO_HZ */
274
275static void print_cpu_stall_fast_no_hz(char *cp, int cpu)
276{
277	*cp = '\0';
278}
279
280#endif /* #else #ifdef CONFIG_RCU_FAST_NO_HZ */
281
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
282/*
283 * Print out diagnostic information for the specified stalled CPU.
284 *
285 * If the specified CPU is aware of the current RCU grace period, then
286 * print the number of scheduling clock interrupts the CPU has taken
287 * during the time that it has been aware.  Otherwise, print the number
288 * of RCU grace periods that this CPU is ignorant of, for example, "1"
289 * if the CPU was aware of the previous grace period.
290 *
291 * Also print out idle and (if CONFIG_RCU_FAST_NO_HZ) idle-entry info.
292 */
293static void print_cpu_stall_info(int cpu)
294{
295	unsigned long delta;
 
296	char fast_no_hz[72];
297	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
298	char *ticks_title;
299	unsigned long ticks_value;
300
301	/*
302	 * We could be printing a lot while holding a spinlock.  Avoid
303	 * triggering hard lockup.
304	 */
305	touch_nmi_watchdog();
306
307	ticks_value = rcu_seq_ctr(rcu_state.gp_seq - rdp->gp_seq);
308	if (ticks_value) {
309		ticks_title = "GPs behind";
310	} else {
311		ticks_title = "ticks this GP";
312		ticks_value = rdp->ticks_this_gp;
313	}
314	print_cpu_stall_fast_no_hz(fast_no_hz, cpu);
315	delta = rcu_seq_ctr(rdp->mynode->gp_seq - rdp->rcu_iw_gp_seq);
316	pr_err("\t%d-%c%c%c%c: (%lu %s) idle=%03x/%ld/%#lx softirq=%u/%u fqs=%ld %s\n",
 
 
317	       cpu,
318	       "O."[!!cpu_online(cpu)],
319	       "o."[!!(rdp->grpmask & rdp->mynode->qsmaskinit)],
320	       "N."[!!(rdp->grpmask & rdp->mynode->qsmaskinitnext)],
321	       !IS_ENABLED(CONFIG_IRQ_WORK) ? '?' :
322			rdp->rcu_iw_pending ? (int)min(delta, 9UL) + '0' :
323				"!."[!delta],
324	       ticks_value, ticks_title,
325	       rcu_dynticks_snap(rdp) & 0xfff,
326	       rdp->dynticks_nesting, rdp->dynticks_nmi_nesting,
327	       rdp->softirq_snap, kstat_softirqs_cpu(RCU_SOFTIRQ, cpu),
328	       READ_ONCE(rcu_state.n_force_qs) - rcu_state.n_force_qs_gpstart,
329	       fast_no_hz);
 
330}
331
332/* Complain about starvation of grace-period kthread.  */
333static void rcu_check_gp_kthread_starvation(void)
334{
335	struct task_struct *gpk = rcu_state.gp_kthread;
336	unsigned long j;
337
338	j = jiffies - READ_ONCE(rcu_state.gp_activity);
339	if (j > 2 * HZ) {
340		pr_err("%s kthread starved for %ld jiffies! g%ld f%#x %s(%d) ->state=%#lx ->cpu=%d\n",
341		       rcu_state.name, j,
342		       (long)rcu_seq_current(&rcu_state.gp_seq),
343		       READ_ONCE(rcu_state.gp_flags),
344		       gp_state_getname(rcu_state.gp_state), rcu_state.gp_state,
345		       gpk ? gpk->state : ~0, gpk ? task_cpu(gpk) : -1);
346		if (gpk) {
 
347			pr_err("RCU grace-period kthread stack dump:\n");
348			sched_show_task(gpk);
349			wake_up_process(gpk);
350		}
351	}
352}
353
354static void print_other_cpu_stall(unsigned long gp_seq)
355{
356	int cpu;
357	unsigned long flags;
358	unsigned long gpa;
359	unsigned long j;
360	int ndetected = 0;
361	struct rcu_node *rnp;
362	long totqlen = 0;
363
364	/* Kick and suppress, if so configured. */
365	rcu_stall_kick_kthreads();
366	if (rcu_cpu_stall_suppress)
367		return;
368
369	/*
370	 * OK, time to rat on our buddy...
371	 * See Documentation/RCU/stallwarn.txt for info on how to debug
372	 * RCU CPU stall warnings.
373	 */
374	pr_err("INFO: %s detected stalls on CPUs/tasks:\n", rcu_state.name);
375	rcu_for_each_leaf_node(rnp) {
376		raw_spin_lock_irqsave_rcu_node(rnp, flags);
377		ndetected += rcu_print_task_stall(rnp);
378		if (rnp->qsmask != 0) {
379			for_each_leaf_node_possible_cpu(rnp, cpu)
380				if (rnp->qsmask & leaf_node_cpu_bit(rnp, cpu)) {
381					print_cpu_stall_info(cpu);
382					ndetected++;
383				}
384		}
385		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
386	}
387
388	for_each_possible_cpu(cpu)
389		totqlen += rcu_get_n_cbs_cpu(cpu);
390	pr_cont("\t(detected by %d, t=%ld jiffies, g=%ld, q=%lu)\n",
391	       smp_processor_id(), (long)(jiffies - rcu_state.gp_start),
392	       (long)rcu_seq_current(&rcu_state.gp_seq), totqlen);
393	if (ndetected) {
394		rcu_dump_cpu_stacks();
395
396		/* Complain about tasks blocking the grace period. */
397		rcu_for_each_leaf_node(rnp)
398			rcu_print_detail_task_stall_rnp(rnp);
399	} else {
400		if (rcu_seq_current(&rcu_state.gp_seq) != gp_seq) {
401			pr_err("INFO: Stall ended before state dump start\n");
402		} else {
403			j = jiffies;
404			gpa = READ_ONCE(rcu_state.gp_activity);
405			pr_err("All QSes seen, last %s kthread activity %ld (%ld-%ld), jiffies_till_next_fqs=%ld, root ->qsmask %#lx\n",
406			       rcu_state.name, j - gpa, j, gpa,
407			       READ_ONCE(jiffies_till_next_fqs),
408			       rcu_get_root()->qsmask);
409			/* In this case, the current CPU might be at fault. */
410			sched_show_task(current);
411		}
412	}
413	/* Rewrite if needed in case of slow consoles. */
414	if (ULONG_CMP_GE(jiffies, READ_ONCE(rcu_state.jiffies_stall)))
415		WRITE_ONCE(rcu_state.jiffies_stall,
416			   jiffies + 3 * rcu_jiffies_till_stall_check() + 3);
417
418	rcu_check_gp_kthread_starvation();
419
420	panic_on_rcu_stall();
421
422	rcu_force_quiescent_state();  /* Kick them all. */
423}
424
425static void print_cpu_stall(void)
426{
427	int cpu;
428	unsigned long flags;
429	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
430	struct rcu_node *rnp = rcu_get_root();
431	long totqlen = 0;
432
433	/* Kick and suppress, if so configured. */
434	rcu_stall_kick_kthreads();
435	if (rcu_cpu_stall_suppress)
436		return;
437
438	/*
439	 * OK, time to rat on ourselves...
440	 * See Documentation/RCU/stallwarn.txt for info on how to debug
441	 * RCU CPU stall warnings.
442	 */
443	pr_err("INFO: %s self-detected stall on CPU\n", rcu_state.name);
444	raw_spin_lock_irqsave_rcu_node(rdp->mynode, flags);
445	print_cpu_stall_info(smp_processor_id());
446	raw_spin_unlock_irqrestore_rcu_node(rdp->mynode, flags);
447	for_each_possible_cpu(cpu)
448		totqlen += rcu_get_n_cbs_cpu(cpu);
449	pr_cont("\t(t=%lu jiffies g=%ld q=%lu)\n",
450		jiffies - rcu_state.gp_start,
451		(long)rcu_seq_current(&rcu_state.gp_seq), totqlen);
452
453	rcu_check_gp_kthread_starvation();
454
455	rcu_dump_cpu_stacks();
456
457	raw_spin_lock_irqsave_rcu_node(rnp, flags);
458	/* Rewrite if needed in case of slow consoles. */
459	if (ULONG_CMP_GE(jiffies, READ_ONCE(rcu_state.jiffies_stall)))
460		WRITE_ONCE(rcu_state.jiffies_stall,
461			   jiffies + 3 * rcu_jiffies_till_stall_check() + 3);
462	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
463
464	panic_on_rcu_stall();
465
466	/*
467	 * Attempt to revive the RCU machinery by forcing a context switch.
468	 *
469	 * A context switch would normally allow the RCU state machine to make
470	 * progress and it could be we're stuck in kernel space without context
471	 * switches for an entirely unreasonable amount of time.
472	 */
473	set_tsk_need_resched(current);
474	set_preempt_need_resched();
475}
476
477static void check_cpu_stall(struct rcu_data *rdp)
478{
479	unsigned long gs1;
480	unsigned long gs2;
481	unsigned long gps;
482	unsigned long j;
483	unsigned long jn;
484	unsigned long js;
485	struct rcu_node *rnp;
486
487	if ((rcu_cpu_stall_suppress && !rcu_kick_kthreads) ||
488	    !rcu_gp_in_progress())
489		return;
490	rcu_stall_kick_kthreads();
491	j = jiffies;
492
493	/*
494	 * Lots of memory barriers to reject false positives.
495	 *
496	 * The idea is to pick up rcu_state.gp_seq, then
497	 * rcu_state.jiffies_stall, then rcu_state.gp_start, and finally
498	 * another copy of rcu_state.gp_seq.  These values are updated in
499	 * the opposite order with memory barriers (or equivalent) during
500	 * grace-period initialization and cleanup.  Now, a false positive
501	 * can occur if we get an new value of rcu_state.gp_start and a old
502	 * value of rcu_state.jiffies_stall.  But given the memory barriers,
503	 * the only way that this can happen is if one grace period ends
504	 * and another starts between these two fetches.  This is detected
505	 * by comparing the second fetch of rcu_state.gp_seq with the
506	 * previous fetch from rcu_state.gp_seq.
507	 *
508	 * Given this check, comparisons of jiffies, rcu_state.jiffies_stall,
509	 * and rcu_state.gp_start suffice to forestall false positives.
510	 */
511	gs1 = READ_ONCE(rcu_state.gp_seq);
512	smp_rmb(); /* Pick up ->gp_seq first... */
513	js = READ_ONCE(rcu_state.jiffies_stall);
514	smp_rmb(); /* ...then ->jiffies_stall before the rest... */
515	gps = READ_ONCE(rcu_state.gp_start);
516	smp_rmb(); /* ...and finally ->gp_start before ->gp_seq again. */
517	gs2 = READ_ONCE(rcu_state.gp_seq);
518	if (gs1 != gs2 ||
519	    ULONG_CMP_LT(j, js) ||
520	    ULONG_CMP_GE(gps, js))
521		return; /* No stall or GP completed since entering function. */
522	rnp = rdp->mynode;
523	jn = jiffies + 3 * rcu_jiffies_till_stall_check() + 3;
524	if (rcu_gp_in_progress() &&
525	    (READ_ONCE(rnp->qsmask) & rdp->grpmask) &&
526	    cmpxchg(&rcu_state.jiffies_stall, js, jn) == js) {
527
528		/* We haven't checked in, so go dump stack. */
529		print_cpu_stall();
530		if (rcu_cpu_stall_ftrace_dump)
531			rcu_ftrace_dump(DUMP_ALL);
532
533	} else if (rcu_gp_in_progress() &&
534		   ULONG_CMP_GE(j, js + RCU_STALL_RAT_DELAY) &&
535		   cmpxchg(&rcu_state.jiffies_stall, js, jn) == js) {
536
537		/* They had a few time units to dump stack, so complain. */
538		print_other_cpu_stall(gs2);
539		if (rcu_cpu_stall_ftrace_dump)
540			rcu_ftrace_dump(DUMP_ALL);
541	}
542}
543
544//////////////////////////////////////////////////////////////////////////////
545//
546// RCU forward-progress mechanisms, including of callback invocation.
547
548
549/*
550 * Show the state of the grace-period kthreads.
551 */
552void show_rcu_gp_kthreads(void)
553{
 
554	int cpu;
555	unsigned long j;
556	unsigned long ja;
557	unsigned long jr;
558	unsigned long jw;
559	struct rcu_data *rdp;
560	struct rcu_node *rnp;
 
561
562	j = jiffies;
563	ja = j - READ_ONCE(rcu_state.gp_activity);
564	jr = j - READ_ONCE(rcu_state.gp_req_activity);
565	jw = j - READ_ONCE(rcu_state.gp_wake_time);
566	pr_info("%s: wait state: %s(%d) ->state: %#lx delta ->gp_activity %lu ->gp_req_activity %lu ->gp_wake_time %lu ->gp_wake_seq %ld ->gp_seq %ld ->gp_seq_needed %ld ->gp_flags %#x\n",
567		rcu_state.name, gp_state_getname(rcu_state.gp_state),
568		rcu_state.gp_state,
569		rcu_state.gp_kthread ? rcu_state.gp_kthread->state : 0x1ffffL,
570		ja, jr, jw, (long)READ_ONCE(rcu_state.gp_wake_seq),
571		(long)READ_ONCE(rcu_state.gp_seq),
572		(long)READ_ONCE(rcu_get_root()->gp_seq_needed),
573		READ_ONCE(rcu_state.gp_flags));
574	rcu_for_each_node_breadth_first(rnp) {
575		if (ULONG_CMP_GE(rcu_state.gp_seq, rnp->gp_seq_needed))
 
576			continue;
577		pr_info("\trcu_node %d:%d ->gp_seq %ld ->gp_seq_needed %ld\n",
578			rnp->grplo, rnp->grphi, (long)rnp->gp_seq,
579			(long)rnp->gp_seq_needed);
580		if (!rcu_is_leaf_node(rnp))
581			continue;
582		for_each_leaf_node_possible_cpu(rnp, cpu) {
583			rdp = per_cpu_ptr(&rcu_data, cpu);
584			if (rdp->gpwrap ||
585			    ULONG_CMP_GE(rcu_state.gp_seq,
586					 rdp->gp_seq_needed))
587				continue;
588			pr_info("\tcpu %d ->gp_seq_needed %ld\n",
589				cpu, (long)rdp->gp_seq_needed);
590		}
591	}
592	for_each_possible_cpu(cpu) {
593		rdp = per_cpu_ptr(&rcu_data, cpu);
 
594		if (rcu_segcblist_is_offloaded(&rdp->cblist))
595			show_rcu_nocb_state(rdp);
596	}
597	/* sched_show_task(rcu_state.gp_kthread); */
 
598}
599EXPORT_SYMBOL_GPL(show_rcu_gp_kthreads);
600
601/*
602 * This function checks for grace-period requests that fail to motivate
603 * RCU to come out of its idle mode.
604 */
605static void rcu_check_gp_start_stall(struct rcu_node *rnp, struct rcu_data *rdp,
606				     const unsigned long gpssdelay)
607{
608	unsigned long flags;
609	unsigned long j;
610	struct rcu_node *rnp_root = rcu_get_root();
611	static atomic_t warned = ATOMIC_INIT(0);
612
613	if (!IS_ENABLED(CONFIG_PROVE_RCU) || rcu_gp_in_progress() ||
614	    ULONG_CMP_GE(rnp_root->gp_seq, rnp_root->gp_seq_needed))
 
 
615		return;
616	j = jiffies; /* Expensive access, and in common case don't get here. */
617	if (time_before(j, READ_ONCE(rcu_state.gp_req_activity) + gpssdelay) ||
618	    time_before(j, READ_ONCE(rcu_state.gp_activity) + gpssdelay) ||
619	    atomic_read(&warned))
620		return;
621
622	raw_spin_lock_irqsave_rcu_node(rnp, flags);
623	j = jiffies;
624	if (rcu_gp_in_progress() ||
625	    ULONG_CMP_GE(rnp_root->gp_seq, rnp_root->gp_seq_needed) ||
 
626	    time_before(j, READ_ONCE(rcu_state.gp_req_activity) + gpssdelay) ||
627	    time_before(j, READ_ONCE(rcu_state.gp_activity) + gpssdelay) ||
628	    atomic_read(&warned)) {
629		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
630		return;
631	}
632	/* Hold onto the leaf lock to make others see warned==1. */
633
634	if (rnp_root != rnp)
635		raw_spin_lock_rcu_node(rnp_root); /* irqs already disabled. */
636	j = jiffies;
637	if (rcu_gp_in_progress() ||
638	    ULONG_CMP_GE(rnp_root->gp_seq, rnp_root->gp_seq_needed) ||
639	    time_before(j, rcu_state.gp_req_activity + gpssdelay) ||
640	    time_before(j, rcu_state.gp_activity + gpssdelay) ||
 
641	    atomic_xchg(&warned, 1)) {
642		if (rnp_root != rnp)
643			/* irqs remain disabled. */
644			raw_spin_unlock_rcu_node(rnp_root);
645		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
646		return;
647	}
648	WARN_ON(1);
649	if (rnp_root != rnp)
650		raw_spin_unlock_rcu_node(rnp_root);
651	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
652	show_rcu_gp_kthreads();
653}
654
655/*
656 * Do a forward-progress check for rcutorture.  This is normally invoked
657 * due to an OOM event.  The argument "j" gives the time period during
658 * which rcutorture would like progress to have been made.
659 */
660void rcu_fwd_progress_check(unsigned long j)
661{
662	unsigned long cbs;
663	int cpu;
664	unsigned long max_cbs = 0;
665	int max_cpu = -1;
666	struct rcu_data *rdp;
667
668	if (rcu_gp_in_progress()) {
669		pr_info("%s: GP age %lu jiffies\n",
670			__func__, jiffies - rcu_state.gp_start);
671		show_rcu_gp_kthreads();
672	} else {
673		pr_info("%s: Last GP end %lu jiffies ago\n",
674			__func__, jiffies - rcu_state.gp_end);
675		preempt_disable();
676		rdp = this_cpu_ptr(&rcu_data);
677		rcu_check_gp_start_stall(rdp->mynode, rdp, j);
678		preempt_enable();
679	}
680	for_each_possible_cpu(cpu) {
681		cbs = rcu_get_n_cbs_cpu(cpu);
682		if (!cbs)
683			continue;
684		if (max_cpu < 0)
685			pr_info("%s: callbacks", __func__);
686		pr_cont(" %d: %lu", cpu, cbs);
687		if (cbs <= max_cbs)
688			continue;
689		max_cbs = cbs;
690		max_cpu = cpu;
691	}
692	if (max_cpu >= 0)
693		pr_cont("\n");
694}
695EXPORT_SYMBOL_GPL(rcu_fwd_progress_check);
696
697/* Commandeer a sysrq key to dump RCU's tree. */
698static bool sysrq_rcu;
699module_param(sysrq_rcu, bool, 0444);
700
701/* Dump grace-period-request information due to commandeered sysrq. */
702static void sysrq_show_rcu(int key)
703{
704	show_rcu_gp_kthreads();
705}
706
707static struct sysrq_key_op sysrq_rcudump_op = {
708	.handler = sysrq_show_rcu,
709	.help_msg = "show-rcu(y)",
710	.action_msg = "Show RCU tree",
711	.enable_mask = SYSRQ_ENABLE_DUMP,
712};
713
714static int __init rcu_sysrq_init(void)
715{
716	if (sysrq_rcu)
717		return register_sysrq_key('y', &sysrq_rcudump_op);
718	return 0;
719}
720early_initcall(rcu_sysrq_init);
v5.9
  1// SPDX-License-Identifier: GPL-2.0+
  2/*
  3 * RCU CPU stall warnings for normal RCU grace periods
  4 *
  5 * Copyright IBM Corporation, 2019
  6 *
  7 * Author: Paul E. McKenney <paulmck@linux.ibm.com>
  8 */
  9
 10//////////////////////////////////////////////////////////////////////////////
 11//
 12// Controlling CPU stall warnings, including delay calculation.
 13
 14/* panic() on RCU Stall sysctl. */
 15int sysctl_panic_on_rcu_stall __read_mostly;
 16
 17#ifdef CONFIG_PROVE_RCU
 18#define RCU_STALL_DELAY_DELTA		(5 * HZ)
 19#else
 20#define RCU_STALL_DELAY_DELTA		0
 21#endif
 22#define RCU_STALL_MIGHT_DIV		8
 23#define RCU_STALL_MIGHT_MIN		(2 * HZ)
 24
 25/* Limit-check stall timeouts specified at boottime and runtime. */
 26int rcu_jiffies_till_stall_check(void)
 27{
 28	int till_stall_check = READ_ONCE(rcu_cpu_stall_timeout);
 29
 30	/*
 31	 * Limit check must be consistent with the Kconfig limits
 32	 * for CONFIG_RCU_CPU_STALL_TIMEOUT.
 33	 */
 34	if (till_stall_check < 3) {
 35		WRITE_ONCE(rcu_cpu_stall_timeout, 3);
 36		till_stall_check = 3;
 37	} else if (till_stall_check > 300) {
 38		WRITE_ONCE(rcu_cpu_stall_timeout, 300);
 39		till_stall_check = 300;
 40	}
 41	return till_stall_check * HZ + RCU_STALL_DELAY_DELTA;
 42}
 43EXPORT_SYMBOL_GPL(rcu_jiffies_till_stall_check);
 44
 45/**
 46 * rcu_gp_might_be_stalled - Is it likely that the grace period is stalled?
 47 *
 48 * Returns @true if the current grace period is sufficiently old that
 49 * it is reasonable to assume that it might be stalled.  This can be
 50 * useful when deciding whether to allocate memory to enable RCU-mediated
 51 * freeing on the one hand or just invoking synchronize_rcu() on the other.
 52 * The latter is preferable when the grace period is stalled.
 53 *
 54 * Note that sampling of the .gp_start and .gp_seq fields must be done
 55 * carefully to avoid false positives at the beginnings and ends of
 56 * grace periods.
 57 */
 58bool rcu_gp_might_be_stalled(void)
 59{
 60	unsigned long d = rcu_jiffies_till_stall_check() / RCU_STALL_MIGHT_DIV;
 61	unsigned long j = jiffies;
 62
 63	if (d < RCU_STALL_MIGHT_MIN)
 64		d = RCU_STALL_MIGHT_MIN;
 65	smp_mb(); // jiffies before .gp_seq to avoid false positives.
 66	if (!rcu_gp_in_progress())
 67		return false;
 68	// Long delays at this point avoids false positive, but a delay
 69	// of ULONG_MAX/4 jiffies voids your no-false-positive warranty.
 70	smp_mb(); // .gp_seq before second .gp_start
 71	// And ditto here.
 72	return !time_before(j, READ_ONCE(rcu_state.gp_start) + d);
 73}
 74
 75/* Don't do RCU CPU stall warnings during long sysrq printouts. */
 76void rcu_sysrq_start(void)
 77{
 78	if (!rcu_cpu_stall_suppress)
 79		rcu_cpu_stall_suppress = 2;
 80}
 81
 82void rcu_sysrq_end(void)
 83{
 84	if (rcu_cpu_stall_suppress == 2)
 85		rcu_cpu_stall_suppress = 0;
 86}
 87
 88/* Don't print RCU CPU stall warnings during a kernel panic. */
 89static int rcu_panic(struct notifier_block *this, unsigned long ev, void *ptr)
 90{
 91	rcu_cpu_stall_suppress = 1;
 92	return NOTIFY_DONE;
 93}
 94
 95static struct notifier_block rcu_panic_block = {
 96	.notifier_call = rcu_panic,
 97};
 98
 99static int __init check_cpu_stall_init(void)
100{
101	atomic_notifier_chain_register(&panic_notifier_list, &rcu_panic_block);
102	return 0;
103}
104early_initcall(check_cpu_stall_init);
105
106/* If so specified via sysctl, panic, yielding cleaner stall-warning output. */
107static void panic_on_rcu_stall(void)
108{
109	if (sysctl_panic_on_rcu_stall)
110		panic("RCU Stall\n");
111}
112
113/**
114 * rcu_cpu_stall_reset - prevent further stall warnings in current grace period
115 *
116 * Set the stall-warning timeout way off into the future, thus preventing
117 * any RCU CPU stall-warning messages from appearing in the current set of
118 * RCU grace periods.
119 *
120 * The caller must disable hard irqs.
121 */
122void rcu_cpu_stall_reset(void)
123{
124	WRITE_ONCE(rcu_state.jiffies_stall, jiffies + ULONG_MAX / 2);
125}
126
127//////////////////////////////////////////////////////////////////////////////
128//
129// Interaction with RCU grace periods
130
131/* Start of new grace period, so record stall time (and forcing times). */
132static void record_gp_stall_check_time(void)
133{
134	unsigned long j = jiffies;
135	unsigned long j1;
136
137	WRITE_ONCE(rcu_state.gp_start, j);
138	j1 = rcu_jiffies_till_stall_check();
139	smp_mb(); // ->gp_start before ->jiffies_stall and caller's ->gp_seq.
140	WRITE_ONCE(rcu_state.jiffies_stall, j + j1);
141	rcu_state.jiffies_resched = j + j1 / 2;
142	rcu_state.n_force_qs_gpstart = READ_ONCE(rcu_state.n_force_qs);
143}
144
145/* Zero ->ticks_this_gp and snapshot the number of RCU softirq handlers. */
146static void zero_cpu_stall_ticks(struct rcu_data *rdp)
147{
148	rdp->ticks_this_gp = 0;
149	rdp->softirq_snap = kstat_softirqs_cpu(RCU_SOFTIRQ, smp_processor_id());
150	WRITE_ONCE(rdp->last_fqs_resched, jiffies);
151}
152
153/*
154 * If too much time has passed in the current grace period, and if
155 * so configured, go kick the relevant kthreads.
156 */
157static void rcu_stall_kick_kthreads(void)
158{
159	unsigned long j;
160
161	if (!rcu_kick_kthreads)
162		return;
163	j = READ_ONCE(rcu_state.jiffies_kick_kthreads);
164	if (time_after(jiffies, j) && rcu_state.gp_kthread &&
165	    (rcu_gp_in_progress() || READ_ONCE(rcu_state.gp_flags))) {
166		WARN_ONCE(1, "Kicking %s grace-period kthread\n",
167			  rcu_state.name);
168		rcu_ftrace_dump(DUMP_ALL);
169		wake_up_process(rcu_state.gp_kthread);
170		WRITE_ONCE(rcu_state.jiffies_kick_kthreads, j + HZ);
171	}
172}
173
174/*
175 * Handler for the irq_work request posted about halfway into the RCU CPU
176 * stall timeout, and used to detect excessive irq disabling.  Set state
177 * appropriately, but just complain if there is unexpected state on entry.
178 */
179static void rcu_iw_handler(struct irq_work *iwp)
180{
181	struct rcu_data *rdp;
182	struct rcu_node *rnp;
183
184	rdp = container_of(iwp, struct rcu_data, rcu_iw);
185	rnp = rdp->mynode;
186	raw_spin_lock_rcu_node(rnp);
187	if (!WARN_ON_ONCE(!rdp->rcu_iw_pending)) {
188		rdp->rcu_iw_gp_seq = rnp->gp_seq;
189		rdp->rcu_iw_pending = false;
190	}
191	raw_spin_unlock_rcu_node(rnp);
192}
193
194//////////////////////////////////////////////////////////////////////////////
195//
196// Printing RCU CPU stall warnings
197
198#ifdef CONFIG_PREEMPT_RCU
199
200/*
201 * Dump detailed information for all tasks blocking the current RCU
202 * grace period on the specified rcu_node structure.
203 */
204static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp)
205{
206	unsigned long flags;
207	struct task_struct *t;
208
209	raw_spin_lock_irqsave_rcu_node(rnp, flags);
210	if (!rcu_preempt_blocked_readers_cgp(rnp)) {
211		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
212		return;
213	}
214	t = list_entry(rnp->gp_tasks->prev,
215		       struct task_struct, rcu_node_entry);
216	list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
217		/*
218		 * We could be printing a lot while holding a spinlock.
219		 * Avoid triggering hard lockup.
220		 */
221		touch_nmi_watchdog();
222		sched_show_task(t);
223	}
224	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
225}
226
227// Communicate task state back to the RCU CPU stall warning request.
228struct rcu_stall_chk_rdr {
229	int nesting;
230	union rcu_special rs;
231	bool on_blkd_list;
232};
233
234/*
235 * Report out the state of a not-running task that is stalling the
236 * current RCU grace period.
237 */
238static bool check_slow_task(struct task_struct *t, void *arg)
239{
240	struct rcu_stall_chk_rdr *rscrp = arg;
241
242	if (task_curr(t))
243		return false; // It is running, so decline to inspect it.
244	rscrp->nesting = t->rcu_read_lock_nesting;
245	rscrp->rs = t->rcu_read_unlock_special;
246	rscrp->on_blkd_list = !list_empty(&t->rcu_node_entry);
247	return true;
248}
249
250/*
251 * Scan the current list of tasks blocked within RCU read-side critical
252 * sections, printing out the tid of each.
253 */
254static int rcu_print_task_stall(struct rcu_node *rnp)
255{
 
256	int ndetected = 0;
257	struct rcu_stall_chk_rdr rscr;
258	struct task_struct *t;
259
260	if (!rcu_preempt_blocked_readers_cgp(rnp))
261		return 0;
262	pr_err("\tTasks blocked on level-%d rcu_node (CPUs %d-%d):",
263	       rnp->level, rnp->grplo, rnp->grphi);
264	t = list_entry(rnp->gp_tasks->prev,
265		       struct task_struct, rcu_node_entry);
266	list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
267		if (!try_invoke_on_locked_down_task(t, check_slow_task, &rscr))
268			pr_cont(" P%d", t->pid);
269		else
270			pr_cont(" P%d/%d:%c%c%c%c",
271				t->pid, rscr.nesting,
272				".b"[rscr.rs.b.blocked],
273				".q"[rscr.rs.b.need_qs],
274				".e"[rscr.rs.b.exp_hint],
275				".l"[rscr.on_blkd_list]);
276		ndetected++;
277	}
278	pr_cont("\n");
279	return ndetected;
280}
281
282#else /* #ifdef CONFIG_PREEMPT_RCU */
283
284/*
285 * Because preemptible RCU does not exist, we never have to check for
286 * tasks blocked within RCU read-side critical sections.
287 */
288static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp)
289{
290}
291
292/*
293 * Because preemptible RCU does not exist, we never have to check for
294 * tasks blocked within RCU read-side critical sections.
295 */
296static int rcu_print_task_stall(struct rcu_node *rnp)
297{
298	return 0;
299}
300#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
301
302/*
303 * Dump stacks of all tasks running on stalled CPUs.  First try using
304 * NMIs, but fall back to manual remote stack tracing on architectures
305 * that don't support NMI-based stack dumps.  The NMI-triggered stack
306 * traces are more accurate because they are printed by the target CPU.
307 */
308static void rcu_dump_cpu_stacks(void)
309{
310	int cpu;
311	unsigned long flags;
312	struct rcu_node *rnp;
313
314	rcu_for_each_leaf_node(rnp) {
315		raw_spin_lock_irqsave_rcu_node(rnp, flags);
316		for_each_leaf_node_possible_cpu(rnp, cpu)
317			if (rnp->qsmask & leaf_node_cpu_bit(rnp, cpu))
318				if (!trigger_single_cpu_backtrace(cpu))
319					dump_cpu_task(cpu);
320		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
321	}
322}
323
324#ifdef CONFIG_RCU_FAST_NO_HZ
325
326static void print_cpu_stall_fast_no_hz(char *cp, int cpu)
327{
328	struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
329
330	sprintf(cp, "last_accelerate: %04lx/%04lx dyntick_enabled: %d",
331		rdp->last_accelerate & 0xffff, jiffies & 0xffff,
332		!!rdp->tick_nohz_enabled_snap);
 
 
333}
334
335#else /* #ifdef CONFIG_RCU_FAST_NO_HZ */
336
337static void print_cpu_stall_fast_no_hz(char *cp, int cpu)
338{
339	*cp = '\0';
340}
341
342#endif /* #else #ifdef CONFIG_RCU_FAST_NO_HZ */
343
344static const char * const gp_state_names[] = {
345	[RCU_GP_IDLE] = "RCU_GP_IDLE",
346	[RCU_GP_WAIT_GPS] = "RCU_GP_WAIT_GPS",
347	[RCU_GP_DONE_GPS] = "RCU_GP_DONE_GPS",
348	[RCU_GP_ONOFF] = "RCU_GP_ONOFF",
349	[RCU_GP_INIT] = "RCU_GP_INIT",
350	[RCU_GP_WAIT_FQS] = "RCU_GP_WAIT_FQS",
351	[RCU_GP_DOING_FQS] = "RCU_GP_DOING_FQS",
352	[RCU_GP_CLEANUP] = "RCU_GP_CLEANUP",
353	[RCU_GP_CLEANED] = "RCU_GP_CLEANED",
354};
355
356/*
357 * Convert a ->gp_state value to a character string.
358 */
359static const char *gp_state_getname(short gs)
360{
361	if (gs < 0 || gs >= ARRAY_SIZE(gp_state_names))
362		return "???";
363	return gp_state_names[gs];
364}
365
366/* Is the RCU grace-period kthread being starved of CPU time? */
367static bool rcu_is_gp_kthread_starving(unsigned long *jp)
368{
369	unsigned long j = jiffies - READ_ONCE(rcu_state.gp_activity);
370
371	if (jp)
372		*jp = j;
373	return j > 2 * HZ;
374}
375
376/*
377 * Print out diagnostic information for the specified stalled CPU.
378 *
379 * If the specified CPU is aware of the current RCU grace period, then
380 * print the number of scheduling clock interrupts the CPU has taken
381 * during the time that it has been aware.  Otherwise, print the number
382 * of RCU grace periods that this CPU is ignorant of, for example, "1"
383 * if the CPU was aware of the previous grace period.
384 *
385 * Also print out idle and (if CONFIG_RCU_FAST_NO_HZ) idle-entry info.
386 */
387static void print_cpu_stall_info(int cpu)
388{
389	unsigned long delta;
390	bool falsepositive;
391	char fast_no_hz[72];
392	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
393	char *ticks_title;
394	unsigned long ticks_value;
395
396	/*
397	 * We could be printing a lot while holding a spinlock.  Avoid
398	 * triggering hard lockup.
399	 */
400	touch_nmi_watchdog();
401
402	ticks_value = rcu_seq_ctr(rcu_state.gp_seq - rdp->gp_seq);
403	if (ticks_value) {
404		ticks_title = "GPs behind";
405	} else {
406		ticks_title = "ticks this GP";
407		ticks_value = rdp->ticks_this_gp;
408	}
409	print_cpu_stall_fast_no_hz(fast_no_hz, cpu);
410	delta = rcu_seq_ctr(rdp->mynode->gp_seq - rdp->rcu_iw_gp_seq);
411	falsepositive = rcu_is_gp_kthread_starving(NULL) &&
412			rcu_dynticks_in_eqs(rcu_dynticks_snap(rdp));
413	pr_err("\t%d-%c%c%c%c: (%lu %s) idle=%03x/%ld/%#lx softirq=%u/%u fqs=%ld %s%s\n",
414	       cpu,
415	       "O."[!!cpu_online(cpu)],
416	       "o."[!!(rdp->grpmask & rdp->mynode->qsmaskinit)],
417	       "N."[!!(rdp->grpmask & rdp->mynode->qsmaskinitnext)],
418	       !IS_ENABLED(CONFIG_IRQ_WORK) ? '?' :
419			rdp->rcu_iw_pending ? (int)min(delta, 9UL) + '0' :
420				"!."[!delta],
421	       ticks_value, ticks_title,
422	       rcu_dynticks_snap(rdp) & 0xfff,
423	       rdp->dynticks_nesting, rdp->dynticks_nmi_nesting,
424	       rdp->softirq_snap, kstat_softirqs_cpu(RCU_SOFTIRQ, cpu),
425	       data_race(rcu_state.n_force_qs) - rcu_state.n_force_qs_gpstart,
426	       fast_no_hz,
427	       falsepositive ? " (false positive?)" : "");
428}
429
430/* Complain about starvation of grace-period kthread.  */
431static void rcu_check_gp_kthread_starvation(void)
432{
433	struct task_struct *gpk = rcu_state.gp_kthread;
434	unsigned long j;
435
436	if (rcu_is_gp_kthread_starving(&j)) {
 
437		pr_err("%s kthread starved for %ld jiffies! g%ld f%#x %s(%d) ->state=%#lx ->cpu=%d\n",
438		       rcu_state.name, j,
439		       (long)rcu_seq_current(&rcu_state.gp_seq),
440		       data_race(rcu_state.gp_flags),
441		       gp_state_getname(rcu_state.gp_state), rcu_state.gp_state,
442		       gpk ? gpk->state : ~0, gpk ? task_cpu(gpk) : -1);
443		if (gpk) {
444			pr_err("\tUnless %s kthread gets sufficient CPU time, OOM is now expected behavior.\n", rcu_state.name);
445			pr_err("RCU grace-period kthread stack dump:\n");
446			sched_show_task(gpk);
447			wake_up_process(gpk);
448		}
449	}
450}
451
452static void print_other_cpu_stall(unsigned long gp_seq, unsigned long gps)
453{
454	int cpu;
455	unsigned long flags;
456	unsigned long gpa;
457	unsigned long j;
458	int ndetected = 0;
459	struct rcu_node *rnp;
460	long totqlen = 0;
461
462	/* Kick and suppress, if so configured. */
463	rcu_stall_kick_kthreads();
464	if (rcu_stall_is_suppressed())
465		return;
466
467	/*
468	 * OK, time to rat on our buddy...
469	 * See Documentation/RCU/stallwarn.rst for info on how to debug
470	 * RCU CPU stall warnings.
471	 */
472	pr_err("INFO: %s detected stalls on CPUs/tasks:\n", rcu_state.name);
473	rcu_for_each_leaf_node(rnp) {
474		raw_spin_lock_irqsave_rcu_node(rnp, flags);
475		ndetected += rcu_print_task_stall(rnp);
476		if (rnp->qsmask != 0) {
477			for_each_leaf_node_possible_cpu(rnp, cpu)
478				if (rnp->qsmask & leaf_node_cpu_bit(rnp, cpu)) {
479					print_cpu_stall_info(cpu);
480					ndetected++;
481				}
482		}
483		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
484	}
485
486	for_each_possible_cpu(cpu)
487		totqlen += rcu_get_n_cbs_cpu(cpu);
488	pr_cont("\t(detected by %d, t=%ld jiffies, g=%ld, q=%lu)\n",
489	       smp_processor_id(), (long)(jiffies - gps),
490	       (long)rcu_seq_current(&rcu_state.gp_seq), totqlen);
491	if (ndetected) {
492		rcu_dump_cpu_stacks();
493
494		/* Complain about tasks blocking the grace period. */
495		rcu_for_each_leaf_node(rnp)
496			rcu_print_detail_task_stall_rnp(rnp);
497	} else {
498		if (rcu_seq_current(&rcu_state.gp_seq) != gp_seq) {
499			pr_err("INFO: Stall ended before state dump start\n");
500		} else {
501			j = jiffies;
502			gpa = data_race(rcu_state.gp_activity);
503			pr_err("All QSes seen, last %s kthread activity %ld (%ld-%ld), jiffies_till_next_fqs=%ld, root ->qsmask %#lx\n",
504			       rcu_state.name, j - gpa, j, gpa,
505			       data_race(jiffies_till_next_fqs),
506			       rcu_get_root()->qsmask);
 
 
507		}
508	}
509	/* Rewrite if needed in case of slow consoles. */
510	if (ULONG_CMP_GE(jiffies, READ_ONCE(rcu_state.jiffies_stall)))
511		WRITE_ONCE(rcu_state.jiffies_stall,
512			   jiffies + 3 * rcu_jiffies_till_stall_check() + 3);
513
514	rcu_check_gp_kthread_starvation();
515
516	panic_on_rcu_stall();
517
518	rcu_force_quiescent_state();  /* Kick them all. */
519}
520
521static void print_cpu_stall(unsigned long gps)
522{
523	int cpu;
524	unsigned long flags;
525	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
526	struct rcu_node *rnp = rcu_get_root();
527	long totqlen = 0;
528
529	/* Kick and suppress, if so configured. */
530	rcu_stall_kick_kthreads();
531	if (rcu_stall_is_suppressed())
532		return;
533
534	/*
535	 * OK, time to rat on ourselves...
536	 * See Documentation/RCU/stallwarn.rst for info on how to debug
537	 * RCU CPU stall warnings.
538	 */
539	pr_err("INFO: %s self-detected stall on CPU\n", rcu_state.name);
540	raw_spin_lock_irqsave_rcu_node(rdp->mynode, flags);
541	print_cpu_stall_info(smp_processor_id());
542	raw_spin_unlock_irqrestore_rcu_node(rdp->mynode, flags);
543	for_each_possible_cpu(cpu)
544		totqlen += rcu_get_n_cbs_cpu(cpu);
545	pr_cont("\t(t=%lu jiffies g=%ld q=%lu)\n",
546		jiffies - gps,
547		(long)rcu_seq_current(&rcu_state.gp_seq), totqlen);
548
549	rcu_check_gp_kthread_starvation();
550
551	rcu_dump_cpu_stacks();
552
553	raw_spin_lock_irqsave_rcu_node(rnp, flags);
554	/* Rewrite if needed in case of slow consoles. */
555	if (ULONG_CMP_GE(jiffies, READ_ONCE(rcu_state.jiffies_stall)))
556		WRITE_ONCE(rcu_state.jiffies_stall,
557			   jiffies + 3 * rcu_jiffies_till_stall_check() + 3);
558	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
559
560	panic_on_rcu_stall();
561
562	/*
563	 * Attempt to revive the RCU machinery by forcing a context switch.
564	 *
565	 * A context switch would normally allow the RCU state machine to make
566	 * progress and it could be we're stuck in kernel space without context
567	 * switches for an entirely unreasonable amount of time.
568	 */
569	set_tsk_need_resched(current);
570	set_preempt_need_resched();
571}
572
573static void check_cpu_stall(struct rcu_data *rdp)
574{
575	unsigned long gs1;
576	unsigned long gs2;
577	unsigned long gps;
578	unsigned long j;
579	unsigned long jn;
580	unsigned long js;
581	struct rcu_node *rnp;
582
583	if ((rcu_stall_is_suppressed() && !rcu_kick_kthreads) ||
584	    !rcu_gp_in_progress())
585		return;
586	rcu_stall_kick_kthreads();
587	j = jiffies;
588
589	/*
590	 * Lots of memory barriers to reject false positives.
591	 *
592	 * The idea is to pick up rcu_state.gp_seq, then
593	 * rcu_state.jiffies_stall, then rcu_state.gp_start, and finally
594	 * another copy of rcu_state.gp_seq.  These values are updated in
595	 * the opposite order with memory barriers (or equivalent) during
596	 * grace-period initialization and cleanup.  Now, a false positive
597	 * can occur if we get an new value of rcu_state.gp_start and a old
598	 * value of rcu_state.jiffies_stall.  But given the memory barriers,
599	 * the only way that this can happen is if one grace period ends
600	 * and another starts between these two fetches.  This is detected
601	 * by comparing the second fetch of rcu_state.gp_seq with the
602	 * previous fetch from rcu_state.gp_seq.
603	 *
604	 * Given this check, comparisons of jiffies, rcu_state.jiffies_stall,
605	 * and rcu_state.gp_start suffice to forestall false positives.
606	 */
607	gs1 = READ_ONCE(rcu_state.gp_seq);
608	smp_rmb(); /* Pick up ->gp_seq first... */
609	js = READ_ONCE(rcu_state.jiffies_stall);
610	smp_rmb(); /* ...then ->jiffies_stall before the rest... */
611	gps = READ_ONCE(rcu_state.gp_start);
612	smp_rmb(); /* ...and finally ->gp_start before ->gp_seq again. */
613	gs2 = READ_ONCE(rcu_state.gp_seq);
614	if (gs1 != gs2 ||
615	    ULONG_CMP_LT(j, js) ||
616	    ULONG_CMP_GE(gps, js))
617		return; /* No stall or GP completed since entering function. */
618	rnp = rdp->mynode;
619	jn = jiffies + 3 * rcu_jiffies_till_stall_check() + 3;
620	if (rcu_gp_in_progress() &&
621	    (READ_ONCE(rnp->qsmask) & rdp->grpmask) &&
622	    cmpxchg(&rcu_state.jiffies_stall, js, jn) == js) {
623
624		/* We haven't checked in, so go dump stack. */
625		print_cpu_stall(gps);
626		if (rcu_cpu_stall_ftrace_dump)
627			rcu_ftrace_dump(DUMP_ALL);
628
629	} else if (rcu_gp_in_progress() &&
630		   ULONG_CMP_GE(j, js + RCU_STALL_RAT_DELAY) &&
631		   cmpxchg(&rcu_state.jiffies_stall, js, jn) == js) {
632
633		/* They had a few time units to dump stack, so complain. */
634		print_other_cpu_stall(gs2, gps);
635		if (rcu_cpu_stall_ftrace_dump)
636			rcu_ftrace_dump(DUMP_ALL);
637	}
638}
639
640//////////////////////////////////////////////////////////////////////////////
641//
642// RCU forward-progress mechanisms, including of callback invocation.
643
644
645/*
646 * Show the state of the grace-period kthreads.
647 */
648void show_rcu_gp_kthreads(void)
649{
650	unsigned long cbs = 0;
651	int cpu;
652	unsigned long j;
653	unsigned long ja;
654	unsigned long jr;
655	unsigned long jw;
656	struct rcu_data *rdp;
657	struct rcu_node *rnp;
658	struct task_struct *t = READ_ONCE(rcu_state.gp_kthread);
659
660	j = jiffies;
661	ja = j - data_race(rcu_state.gp_activity);
662	jr = j - data_race(rcu_state.gp_req_activity);
663	jw = j - data_race(rcu_state.gp_wake_time);
664	pr_info("%s: wait state: %s(%d) ->state: %#lx delta ->gp_activity %lu ->gp_req_activity %lu ->gp_wake_time %lu ->gp_wake_seq %ld ->gp_seq %ld ->gp_seq_needed %ld ->gp_flags %#x\n",
665		rcu_state.name, gp_state_getname(rcu_state.gp_state),
666		rcu_state.gp_state, t ? t->state : 0x1ffffL,
667		ja, jr, jw, (long)data_race(rcu_state.gp_wake_seq),
668		(long)data_race(rcu_state.gp_seq),
669		(long)data_race(rcu_get_root()->gp_seq_needed),
670		data_race(rcu_state.gp_flags));
 
671	rcu_for_each_node_breadth_first(rnp) {
672		if (ULONG_CMP_GE(READ_ONCE(rcu_state.gp_seq),
673				 READ_ONCE(rnp->gp_seq_needed)))
674			continue;
675		pr_info("\trcu_node %d:%d ->gp_seq %ld ->gp_seq_needed %ld\n",
676			rnp->grplo, rnp->grphi, (long)data_race(rnp->gp_seq),
677			(long)data_race(rnp->gp_seq_needed));
678		if (!rcu_is_leaf_node(rnp))
679			continue;
680		for_each_leaf_node_possible_cpu(rnp, cpu) {
681			rdp = per_cpu_ptr(&rcu_data, cpu);
682			if (READ_ONCE(rdp->gpwrap) ||
683			    ULONG_CMP_GE(READ_ONCE(rcu_state.gp_seq),
684					 READ_ONCE(rdp->gp_seq_needed)))
685				continue;
686			pr_info("\tcpu %d ->gp_seq_needed %ld\n",
687				cpu, (long)data_race(rdp->gp_seq_needed));
688		}
689	}
690	for_each_possible_cpu(cpu) {
691		rdp = per_cpu_ptr(&rcu_data, cpu);
692		cbs += data_race(rdp->n_cbs_invoked);
693		if (rcu_segcblist_is_offloaded(&rdp->cblist))
694			show_rcu_nocb_state(rdp);
695	}
696	pr_info("RCU callbacks invoked since boot: %lu\n", cbs);
697	show_rcu_tasks_gp_kthreads();
698}
699EXPORT_SYMBOL_GPL(show_rcu_gp_kthreads);
700
701/*
702 * This function checks for grace-period requests that fail to motivate
703 * RCU to come out of its idle mode.
704 */
705static void rcu_check_gp_start_stall(struct rcu_node *rnp, struct rcu_data *rdp,
706				     const unsigned long gpssdelay)
707{
708	unsigned long flags;
709	unsigned long j;
710	struct rcu_node *rnp_root = rcu_get_root();
711	static atomic_t warned = ATOMIC_INIT(0);
712
713	if (!IS_ENABLED(CONFIG_PROVE_RCU) || rcu_gp_in_progress() ||
714	    ULONG_CMP_GE(READ_ONCE(rnp_root->gp_seq),
715			 READ_ONCE(rnp_root->gp_seq_needed)) ||
716	    !smp_load_acquire(&rcu_state.gp_kthread)) // Get stable kthread.
717		return;
718	j = jiffies; /* Expensive access, and in common case don't get here. */
719	if (time_before(j, READ_ONCE(rcu_state.gp_req_activity) + gpssdelay) ||
720	    time_before(j, READ_ONCE(rcu_state.gp_activity) + gpssdelay) ||
721	    atomic_read(&warned))
722		return;
723
724	raw_spin_lock_irqsave_rcu_node(rnp, flags);
725	j = jiffies;
726	if (rcu_gp_in_progress() ||
727	    ULONG_CMP_GE(READ_ONCE(rnp_root->gp_seq),
728			 READ_ONCE(rnp_root->gp_seq_needed)) ||
729	    time_before(j, READ_ONCE(rcu_state.gp_req_activity) + gpssdelay) ||
730	    time_before(j, READ_ONCE(rcu_state.gp_activity) + gpssdelay) ||
731	    atomic_read(&warned)) {
732		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
733		return;
734	}
735	/* Hold onto the leaf lock to make others see warned==1. */
736
737	if (rnp_root != rnp)
738		raw_spin_lock_rcu_node(rnp_root); /* irqs already disabled. */
739	j = jiffies;
740	if (rcu_gp_in_progress() ||
741	    ULONG_CMP_GE(READ_ONCE(rnp_root->gp_seq),
742			 READ_ONCE(rnp_root->gp_seq_needed)) ||
743	    time_before(j, READ_ONCE(rcu_state.gp_req_activity) + gpssdelay) ||
744	    time_before(j, READ_ONCE(rcu_state.gp_activity) + gpssdelay) ||
745	    atomic_xchg(&warned, 1)) {
746		if (rnp_root != rnp)
747			/* irqs remain disabled. */
748			raw_spin_unlock_rcu_node(rnp_root);
749		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
750		return;
751	}
752	WARN_ON(1);
753	if (rnp_root != rnp)
754		raw_spin_unlock_rcu_node(rnp_root);
755	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
756	show_rcu_gp_kthreads();
757}
758
759/*
760 * Do a forward-progress check for rcutorture.  This is normally invoked
761 * due to an OOM event.  The argument "j" gives the time period during
762 * which rcutorture would like progress to have been made.
763 */
764void rcu_fwd_progress_check(unsigned long j)
765{
766	unsigned long cbs;
767	int cpu;
768	unsigned long max_cbs = 0;
769	int max_cpu = -1;
770	struct rcu_data *rdp;
771
772	if (rcu_gp_in_progress()) {
773		pr_info("%s: GP age %lu jiffies\n",
774			__func__, jiffies - rcu_state.gp_start);
775		show_rcu_gp_kthreads();
776	} else {
777		pr_info("%s: Last GP end %lu jiffies ago\n",
778			__func__, jiffies - rcu_state.gp_end);
779		preempt_disable();
780		rdp = this_cpu_ptr(&rcu_data);
781		rcu_check_gp_start_stall(rdp->mynode, rdp, j);
782		preempt_enable();
783	}
784	for_each_possible_cpu(cpu) {
785		cbs = rcu_get_n_cbs_cpu(cpu);
786		if (!cbs)
787			continue;
788		if (max_cpu < 0)
789			pr_info("%s: callbacks", __func__);
790		pr_cont(" %d: %lu", cpu, cbs);
791		if (cbs <= max_cbs)
792			continue;
793		max_cbs = cbs;
794		max_cpu = cpu;
795	}
796	if (max_cpu >= 0)
797		pr_cont("\n");
798}
799EXPORT_SYMBOL_GPL(rcu_fwd_progress_check);
800
801/* Commandeer a sysrq key to dump RCU's tree. */
802static bool sysrq_rcu;
803module_param(sysrq_rcu, bool, 0444);
804
805/* Dump grace-period-request information due to commandeered sysrq. */
806static void sysrq_show_rcu(int key)
807{
808	show_rcu_gp_kthreads();
809}
810
811static const struct sysrq_key_op sysrq_rcudump_op = {
812	.handler = sysrq_show_rcu,
813	.help_msg = "show-rcu(y)",
814	.action_msg = "Show RCU tree",
815	.enable_mask = SYSRQ_ENABLE_DUMP,
816};
817
818static int __init rcu_sysrq_init(void)
819{
820	if (sysrq_rcu)
821		return register_sysrq_key('y', &sysrq_rcudump_op);
822	return 0;
823}
824early_initcall(rcu_sysrq_init);