Linux Audio

Check our new training course

Loading...
v5.9
  1// SPDX-License-Identifier: GPL-2.0+
  2/*
  3 * RCU CPU stall warnings for normal RCU grace periods
  4 *
  5 * Copyright IBM Corporation, 2019
  6 *
  7 * Author: Paul E. McKenney <paulmck@linux.ibm.com>
  8 */
  9
 
 
 10//////////////////////////////////////////////////////////////////////////////
 11//
 12// Controlling CPU stall warnings, including delay calculation.
 13
 14/* panic() on RCU Stall sysctl. */
 15int sysctl_panic_on_rcu_stall __read_mostly;
 
 16
 17#ifdef CONFIG_PROVE_RCU
 18#define RCU_STALL_DELAY_DELTA		(5 * HZ)
 19#else
 20#define RCU_STALL_DELAY_DELTA		0
 21#endif
 22#define RCU_STALL_MIGHT_DIV		8
 23#define RCU_STALL_MIGHT_MIN		(2 * HZ)
 24
 25/* Limit-check stall timeouts specified at boottime and runtime. */
 26int rcu_jiffies_till_stall_check(void)
 27{
 28	int till_stall_check = READ_ONCE(rcu_cpu_stall_timeout);
 29
 30	/*
 31	 * Limit check must be consistent with the Kconfig limits
 32	 * for CONFIG_RCU_CPU_STALL_TIMEOUT.
 33	 */
 34	if (till_stall_check < 3) {
 35		WRITE_ONCE(rcu_cpu_stall_timeout, 3);
 36		till_stall_check = 3;
 37	} else if (till_stall_check > 300) {
 38		WRITE_ONCE(rcu_cpu_stall_timeout, 300);
 39		till_stall_check = 300;
 40	}
 41	return till_stall_check * HZ + RCU_STALL_DELAY_DELTA;
 42}
 43EXPORT_SYMBOL_GPL(rcu_jiffies_till_stall_check);
 44
 45/**
 46 * rcu_gp_might_be_stalled - Is it likely that the grace period is stalled?
 47 *
 48 * Returns @true if the current grace period is sufficiently old that
 49 * it is reasonable to assume that it might be stalled.  This can be
 50 * useful when deciding whether to allocate memory to enable RCU-mediated
 51 * freeing on the one hand or just invoking synchronize_rcu() on the other.
 52 * The latter is preferable when the grace period is stalled.
 53 *
 54 * Note that sampling of the .gp_start and .gp_seq fields must be done
 55 * carefully to avoid false positives at the beginnings and ends of
 56 * grace periods.
 57 */
 58bool rcu_gp_might_be_stalled(void)
 59{
 60	unsigned long d = rcu_jiffies_till_stall_check() / RCU_STALL_MIGHT_DIV;
 61	unsigned long j = jiffies;
 62
 63	if (d < RCU_STALL_MIGHT_MIN)
 64		d = RCU_STALL_MIGHT_MIN;
 65	smp_mb(); // jiffies before .gp_seq to avoid false positives.
 66	if (!rcu_gp_in_progress())
 67		return false;
 68	// Long delays at this point avoids false positive, but a delay
 69	// of ULONG_MAX/4 jiffies voids your no-false-positive warranty.
 70	smp_mb(); // .gp_seq before second .gp_start
 71	// And ditto here.
 72	return !time_before(j, READ_ONCE(rcu_state.gp_start) + d);
 73}
 74
 75/* Don't do RCU CPU stall warnings during long sysrq printouts. */
 76void rcu_sysrq_start(void)
 77{
 78	if (!rcu_cpu_stall_suppress)
 79		rcu_cpu_stall_suppress = 2;
 80}
 81
 82void rcu_sysrq_end(void)
 83{
 84	if (rcu_cpu_stall_suppress == 2)
 85		rcu_cpu_stall_suppress = 0;
 86}
 87
 88/* Don't print RCU CPU stall warnings during a kernel panic. */
 89static int rcu_panic(struct notifier_block *this, unsigned long ev, void *ptr)
 90{
 91	rcu_cpu_stall_suppress = 1;
 92	return NOTIFY_DONE;
 93}
 94
 95static struct notifier_block rcu_panic_block = {
 96	.notifier_call = rcu_panic,
 97};
 98
 99static int __init check_cpu_stall_init(void)
100{
101	atomic_notifier_chain_register(&panic_notifier_list, &rcu_panic_block);
102	return 0;
103}
104early_initcall(check_cpu_stall_init);
105
106/* If so specified via sysctl, panic, yielding cleaner stall-warning output. */
107static void panic_on_rcu_stall(void)
108{
 
 
 
 
 
109	if (sysctl_panic_on_rcu_stall)
110		panic("RCU Stall\n");
111}
112
113/**
114 * rcu_cpu_stall_reset - prevent further stall warnings in current grace period
115 *
116 * Set the stall-warning timeout way off into the future, thus preventing
117 * any RCU CPU stall-warning messages from appearing in the current set of
118 * RCU grace periods.
119 *
120 * The caller must disable hard irqs.
121 */
122void rcu_cpu_stall_reset(void)
123{
124	WRITE_ONCE(rcu_state.jiffies_stall, jiffies + ULONG_MAX / 2);
125}
126
127//////////////////////////////////////////////////////////////////////////////
128//
129// Interaction with RCU grace periods
130
131/* Start of new grace period, so record stall time (and forcing times). */
132static void record_gp_stall_check_time(void)
133{
134	unsigned long j = jiffies;
135	unsigned long j1;
136
137	WRITE_ONCE(rcu_state.gp_start, j);
138	j1 = rcu_jiffies_till_stall_check();
139	smp_mb(); // ->gp_start before ->jiffies_stall and caller's ->gp_seq.
140	WRITE_ONCE(rcu_state.jiffies_stall, j + j1);
141	rcu_state.jiffies_resched = j + j1 / 2;
142	rcu_state.n_force_qs_gpstart = READ_ONCE(rcu_state.n_force_qs);
143}
144
145/* Zero ->ticks_this_gp and snapshot the number of RCU softirq handlers. */
146static void zero_cpu_stall_ticks(struct rcu_data *rdp)
147{
148	rdp->ticks_this_gp = 0;
149	rdp->softirq_snap = kstat_softirqs_cpu(RCU_SOFTIRQ, smp_processor_id());
150	WRITE_ONCE(rdp->last_fqs_resched, jiffies);
151}
152
153/*
154 * If too much time has passed in the current grace period, and if
155 * so configured, go kick the relevant kthreads.
156 */
157static void rcu_stall_kick_kthreads(void)
158{
159	unsigned long j;
160
161	if (!rcu_kick_kthreads)
162		return;
163	j = READ_ONCE(rcu_state.jiffies_kick_kthreads);
164	if (time_after(jiffies, j) && rcu_state.gp_kthread &&
165	    (rcu_gp_in_progress() || READ_ONCE(rcu_state.gp_flags))) {
166		WARN_ONCE(1, "Kicking %s grace-period kthread\n",
167			  rcu_state.name);
168		rcu_ftrace_dump(DUMP_ALL);
169		wake_up_process(rcu_state.gp_kthread);
170		WRITE_ONCE(rcu_state.jiffies_kick_kthreads, j + HZ);
171	}
172}
173
174/*
175 * Handler for the irq_work request posted about halfway into the RCU CPU
176 * stall timeout, and used to detect excessive irq disabling.  Set state
177 * appropriately, but just complain if there is unexpected state on entry.
178 */
179static void rcu_iw_handler(struct irq_work *iwp)
180{
181	struct rcu_data *rdp;
182	struct rcu_node *rnp;
183
184	rdp = container_of(iwp, struct rcu_data, rcu_iw);
185	rnp = rdp->mynode;
186	raw_spin_lock_rcu_node(rnp);
187	if (!WARN_ON_ONCE(!rdp->rcu_iw_pending)) {
188		rdp->rcu_iw_gp_seq = rnp->gp_seq;
189		rdp->rcu_iw_pending = false;
190	}
191	raw_spin_unlock_rcu_node(rnp);
192}
193
194//////////////////////////////////////////////////////////////////////////////
195//
196// Printing RCU CPU stall warnings
197
198#ifdef CONFIG_PREEMPT_RCU
199
200/*
201 * Dump detailed information for all tasks blocking the current RCU
202 * grace period on the specified rcu_node structure.
203 */
204static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp)
205{
206	unsigned long flags;
207	struct task_struct *t;
208
209	raw_spin_lock_irqsave_rcu_node(rnp, flags);
210	if (!rcu_preempt_blocked_readers_cgp(rnp)) {
211		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
212		return;
213	}
214	t = list_entry(rnp->gp_tasks->prev,
215		       struct task_struct, rcu_node_entry);
216	list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
217		/*
218		 * We could be printing a lot while holding a spinlock.
219		 * Avoid triggering hard lockup.
220		 */
221		touch_nmi_watchdog();
222		sched_show_task(t);
223	}
224	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
225}
226
227// Communicate task state back to the RCU CPU stall warning request.
228struct rcu_stall_chk_rdr {
229	int nesting;
230	union rcu_special rs;
231	bool on_blkd_list;
232};
233
234/*
235 * Report out the state of a not-running task that is stalling the
236 * current RCU grace period.
237 */
238static bool check_slow_task(struct task_struct *t, void *arg)
239{
240	struct rcu_stall_chk_rdr *rscrp = arg;
241
242	if (task_curr(t))
243		return false; // It is running, so decline to inspect it.
244	rscrp->nesting = t->rcu_read_lock_nesting;
245	rscrp->rs = t->rcu_read_unlock_special;
246	rscrp->on_blkd_list = !list_empty(&t->rcu_node_entry);
247	return true;
248}
249
250/*
251 * Scan the current list of tasks blocked within RCU read-side critical
252 * sections, printing out the tid of each.
253 */
254static int rcu_print_task_stall(struct rcu_node *rnp)
 
255{
 
256	int ndetected = 0;
257	struct rcu_stall_chk_rdr rscr;
258	struct task_struct *t;
 
259
260	if (!rcu_preempt_blocked_readers_cgp(rnp))
 
 
261		return 0;
 
262	pr_err("\tTasks blocked on level-%d rcu_node (CPUs %d-%d):",
263	       rnp->level, rnp->grplo, rnp->grphi);
264	t = list_entry(rnp->gp_tasks->prev,
265		       struct task_struct, rcu_node_entry);
266	list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
 
 
 
 
 
 
 
 
267		if (!try_invoke_on_locked_down_task(t, check_slow_task, &rscr))
268			pr_cont(" P%d", t->pid);
269		else
270			pr_cont(" P%d/%d:%c%c%c%c",
271				t->pid, rscr.nesting,
272				".b"[rscr.rs.b.blocked],
273				".q"[rscr.rs.b.need_qs],
274				".e"[rscr.rs.b.exp_hint],
275				".l"[rscr.on_blkd_list]);
 
 
276		ndetected++;
277	}
278	pr_cont("\n");
279	return ndetected;
280}
281
282#else /* #ifdef CONFIG_PREEMPT_RCU */
283
284/*
285 * Because preemptible RCU does not exist, we never have to check for
286 * tasks blocked within RCU read-side critical sections.
287 */
288static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp)
289{
290}
291
292/*
293 * Because preemptible RCU does not exist, we never have to check for
294 * tasks blocked within RCU read-side critical sections.
295 */
296static int rcu_print_task_stall(struct rcu_node *rnp)
 
297{
 
298	return 0;
299}
300#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
301
302/*
303 * Dump stacks of all tasks running on stalled CPUs.  First try using
304 * NMIs, but fall back to manual remote stack tracing on architectures
305 * that don't support NMI-based stack dumps.  The NMI-triggered stack
306 * traces are more accurate because they are printed by the target CPU.
307 */
308static void rcu_dump_cpu_stacks(void)
309{
310	int cpu;
311	unsigned long flags;
312	struct rcu_node *rnp;
313
314	rcu_for_each_leaf_node(rnp) {
315		raw_spin_lock_irqsave_rcu_node(rnp, flags);
316		for_each_leaf_node_possible_cpu(rnp, cpu)
317			if (rnp->qsmask & leaf_node_cpu_bit(rnp, cpu))
318				if (!trigger_single_cpu_backtrace(cpu))
 
 
319					dump_cpu_task(cpu);
 
320		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
321	}
322}
323
324#ifdef CONFIG_RCU_FAST_NO_HZ
325
326static void print_cpu_stall_fast_no_hz(char *cp, int cpu)
327{
328	struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
329
330	sprintf(cp, "last_accelerate: %04lx/%04lx dyntick_enabled: %d",
331		rdp->last_accelerate & 0xffff, jiffies & 0xffff,
332		!!rdp->tick_nohz_enabled_snap);
333}
334
335#else /* #ifdef CONFIG_RCU_FAST_NO_HZ */
336
337static void print_cpu_stall_fast_no_hz(char *cp, int cpu)
338{
339	*cp = '\0';
340}
341
342#endif /* #else #ifdef CONFIG_RCU_FAST_NO_HZ */
343
344static const char * const gp_state_names[] = {
345	[RCU_GP_IDLE] = "RCU_GP_IDLE",
346	[RCU_GP_WAIT_GPS] = "RCU_GP_WAIT_GPS",
347	[RCU_GP_DONE_GPS] = "RCU_GP_DONE_GPS",
348	[RCU_GP_ONOFF] = "RCU_GP_ONOFF",
349	[RCU_GP_INIT] = "RCU_GP_INIT",
350	[RCU_GP_WAIT_FQS] = "RCU_GP_WAIT_FQS",
351	[RCU_GP_DOING_FQS] = "RCU_GP_DOING_FQS",
352	[RCU_GP_CLEANUP] = "RCU_GP_CLEANUP",
353	[RCU_GP_CLEANED] = "RCU_GP_CLEANED",
354};
355
356/*
357 * Convert a ->gp_state value to a character string.
358 */
359static const char *gp_state_getname(short gs)
360{
361	if (gs < 0 || gs >= ARRAY_SIZE(gp_state_names))
362		return "???";
363	return gp_state_names[gs];
364}
365
366/* Is the RCU grace-period kthread being starved of CPU time? */
367static bool rcu_is_gp_kthread_starving(unsigned long *jp)
368{
369	unsigned long j = jiffies - READ_ONCE(rcu_state.gp_activity);
370
371	if (jp)
372		*jp = j;
373	return j > 2 * HZ;
374}
375
376/*
377 * Print out diagnostic information for the specified stalled CPU.
378 *
379 * If the specified CPU is aware of the current RCU grace period, then
380 * print the number of scheduling clock interrupts the CPU has taken
381 * during the time that it has been aware.  Otherwise, print the number
382 * of RCU grace periods that this CPU is ignorant of, for example, "1"
383 * if the CPU was aware of the previous grace period.
384 *
385 * Also print out idle and (if CONFIG_RCU_FAST_NO_HZ) idle-entry info.
386 */
387static void print_cpu_stall_info(int cpu)
388{
389	unsigned long delta;
390	bool falsepositive;
391	char fast_no_hz[72];
392	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
393	char *ticks_title;
394	unsigned long ticks_value;
395
396	/*
397	 * We could be printing a lot while holding a spinlock.  Avoid
398	 * triggering hard lockup.
399	 */
400	touch_nmi_watchdog();
401
402	ticks_value = rcu_seq_ctr(rcu_state.gp_seq - rdp->gp_seq);
403	if (ticks_value) {
404		ticks_title = "GPs behind";
405	} else {
406		ticks_title = "ticks this GP";
407		ticks_value = rdp->ticks_this_gp;
408	}
409	print_cpu_stall_fast_no_hz(fast_no_hz, cpu);
410	delta = rcu_seq_ctr(rdp->mynode->gp_seq - rdp->rcu_iw_gp_seq);
411	falsepositive = rcu_is_gp_kthread_starving(NULL) &&
412			rcu_dynticks_in_eqs(rcu_dynticks_snap(rdp));
413	pr_err("\t%d-%c%c%c%c: (%lu %s) idle=%03x/%ld/%#lx softirq=%u/%u fqs=%ld %s%s\n",
414	       cpu,
415	       "O."[!!cpu_online(cpu)],
416	       "o."[!!(rdp->grpmask & rdp->mynode->qsmaskinit)],
417	       "N."[!!(rdp->grpmask & rdp->mynode->qsmaskinitnext)],
418	       !IS_ENABLED(CONFIG_IRQ_WORK) ? '?' :
419			rdp->rcu_iw_pending ? (int)min(delta, 9UL) + '0' :
420				"!."[!delta],
421	       ticks_value, ticks_title,
422	       rcu_dynticks_snap(rdp) & 0xfff,
423	       rdp->dynticks_nesting, rdp->dynticks_nmi_nesting,
424	       rdp->softirq_snap, kstat_softirqs_cpu(RCU_SOFTIRQ, cpu),
425	       data_race(rcu_state.n_force_qs) - rcu_state.n_force_qs_gpstart,
426	       fast_no_hz,
427	       falsepositive ? " (false positive?)" : "");
428}
429
430/* Complain about starvation of grace-period kthread.  */
431static void rcu_check_gp_kthread_starvation(void)
432{
 
433	struct task_struct *gpk = rcu_state.gp_kthread;
434	unsigned long j;
435
436	if (rcu_is_gp_kthread_starving(&j)) {
437		pr_err("%s kthread starved for %ld jiffies! g%ld f%#x %s(%d) ->state=%#lx ->cpu=%d\n",
 
438		       rcu_state.name, j,
439		       (long)rcu_seq_current(&rcu_state.gp_seq),
440		       data_race(rcu_state.gp_flags),
441		       gp_state_getname(rcu_state.gp_state), rcu_state.gp_state,
442		       gpk ? gpk->state : ~0, gpk ? task_cpu(gpk) : -1);
443		if (gpk) {
444			pr_err("\tUnless %s kthread gets sufficient CPU time, OOM is now expected behavior.\n", rcu_state.name);
445			pr_err("RCU grace-period kthread stack dump:\n");
446			sched_show_task(gpk);
 
 
 
 
 
 
 
 
 
447			wake_up_process(gpk);
448		}
449	}
450}
451
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
452static void print_other_cpu_stall(unsigned long gp_seq, unsigned long gps)
453{
454	int cpu;
455	unsigned long flags;
456	unsigned long gpa;
457	unsigned long j;
458	int ndetected = 0;
459	struct rcu_node *rnp;
460	long totqlen = 0;
461
 
 
462	/* Kick and suppress, if so configured. */
463	rcu_stall_kick_kthreads();
464	if (rcu_stall_is_suppressed())
465		return;
466
467	/*
468	 * OK, time to rat on our buddy...
469	 * See Documentation/RCU/stallwarn.rst for info on how to debug
470	 * RCU CPU stall warnings.
471	 */
 
472	pr_err("INFO: %s detected stalls on CPUs/tasks:\n", rcu_state.name);
473	rcu_for_each_leaf_node(rnp) {
474		raw_spin_lock_irqsave_rcu_node(rnp, flags);
475		ndetected += rcu_print_task_stall(rnp);
476		if (rnp->qsmask != 0) {
477			for_each_leaf_node_possible_cpu(rnp, cpu)
478				if (rnp->qsmask & leaf_node_cpu_bit(rnp, cpu)) {
479					print_cpu_stall_info(cpu);
480					ndetected++;
481				}
482		}
483		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 
484	}
485
486	for_each_possible_cpu(cpu)
487		totqlen += rcu_get_n_cbs_cpu(cpu);
488	pr_cont("\t(detected by %d, t=%ld jiffies, g=%ld, q=%lu)\n",
489	       smp_processor_id(), (long)(jiffies - gps),
490	       (long)rcu_seq_current(&rcu_state.gp_seq), totqlen);
491	if (ndetected) {
492		rcu_dump_cpu_stacks();
493
494		/* Complain about tasks blocking the grace period. */
495		rcu_for_each_leaf_node(rnp)
496			rcu_print_detail_task_stall_rnp(rnp);
497	} else {
498		if (rcu_seq_current(&rcu_state.gp_seq) != gp_seq) {
499			pr_err("INFO: Stall ended before state dump start\n");
500		} else {
501			j = jiffies;
502			gpa = data_race(rcu_state.gp_activity);
503			pr_err("All QSes seen, last %s kthread activity %ld (%ld-%ld), jiffies_till_next_fqs=%ld, root ->qsmask %#lx\n",
504			       rcu_state.name, j - gpa, j, gpa,
505			       data_race(jiffies_till_next_fqs),
506			       rcu_get_root()->qsmask);
507		}
508	}
509	/* Rewrite if needed in case of slow consoles. */
510	if (ULONG_CMP_GE(jiffies, READ_ONCE(rcu_state.jiffies_stall)))
511		WRITE_ONCE(rcu_state.jiffies_stall,
512			   jiffies + 3 * rcu_jiffies_till_stall_check() + 3);
513
 
514	rcu_check_gp_kthread_starvation();
515
516	panic_on_rcu_stall();
517
518	rcu_force_quiescent_state();  /* Kick them all. */
519}
520
521static void print_cpu_stall(unsigned long gps)
522{
523	int cpu;
524	unsigned long flags;
525	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
526	struct rcu_node *rnp = rcu_get_root();
527	long totqlen = 0;
528
 
 
529	/* Kick and suppress, if so configured. */
530	rcu_stall_kick_kthreads();
531	if (rcu_stall_is_suppressed())
532		return;
533
534	/*
535	 * OK, time to rat on ourselves...
536	 * See Documentation/RCU/stallwarn.rst for info on how to debug
537	 * RCU CPU stall warnings.
538	 */
 
539	pr_err("INFO: %s self-detected stall on CPU\n", rcu_state.name);
540	raw_spin_lock_irqsave_rcu_node(rdp->mynode, flags);
541	print_cpu_stall_info(smp_processor_id());
542	raw_spin_unlock_irqrestore_rcu_node(rdp->mynode, flags);
543	for_each_possible_cpu(cpu)
544		totqlen += rcu_get_n_cbs_cpu(cpu);
545	pr_cont("\t(t=%lu jiffies g=%ld q=%lu)\n",
546		jiffies - gps,
547		(long)rcu_seq_current(&rcu_state.gp_seq), totqlen);
548
 
549	rcu_check_gp_kthread_starvation();
550
551	rcu_dump_cpu_stacks();
552
553	raw_spin_lock_irqsave_rcu_node(rnp, flags);
554	/* Rewrite if needed in case of slow consoles. */
555	if (ULONG_CMP_GE(jiffies, READ_ONCE(rcu_state.jiffies_stall)))
556		WRITE_ONCE(rcu_state.jiffies_stall,
557			   jiffies + 3 * rcu_jiffies_till_stall_check() + 3);
558	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
559
560	panic_on_rcu_stall();
561
562	/*
563	 * Attempt to revive the RCU machinery by forcing a context switch.
564	 *
565	 * A context switch would normally allow the RCU state machine to make
566	 * progress and it could be we're stuck in kernel space without context
567	 * switches for an entirely unreasonable amount of time.
568	 */
569	set_tsk_need_resched(current);
570	set_preempt_need_resched();
571}
572
573static void check_cpu_stall(struct rcu_data *rdp)
574{
575	unsigned long gs1;
576	unsigned long gs2;
577	unsigned long gps;
578	unsigned long j;
579	unsigned long jn;
580	unsigned long js;
581	struct rcu_node *rnp;
582
583	if ((rcu_stall_is_suppressed() && !rcu_kick_kthreads) ||
 
584	    !rcu_gp_in_progress())
585		return;
586	rcu_stall_kick_kthreads();
587	j = jiffies;
588
589	/*
590	 * Lots of memory barriers to reject false positives.
591	 *
592	 * The idea is to pick up rcu_state.gp_seq, then
593	 * rcu_state.jiffies_stall, then rcu_state.gp_start, and finally
594	 * another copy of rcu_state.gp_seq.  These values are updated in
595	 * the opposite order with memory barriers (or equivalent) during
596	 * grace-period initialization and cleanup.  Now, a false positive
597	 * can occur if we get an new value of rcu_state.gp_start and a old
598	 * value of rcu_state.jiffies_stall.  But given the memory barriers,
599	 * the only way that this can happen is if one grace period ends
600	 * and another starts between these two fetches.  This is detected
601	 * by comparing the second fetch of rcu_state.gp_seq with the
602	 * previous fetch from rcu_state.gp_seq.
603	 *
604	 * Given this check, comparisons of jiffies, rcu_state.jiffies_stall,
605	 * and rcu_state.gp_start suffice to forestall false positives.
606	 */
607	gs1 = READ_ONCE(rcu_state.gp_seq);
608	smp_rmb(); /* Pick up ->gp_seq first... */
609	js = READ_ONCE(rcu_state.jiffies_stall);
610	smp_rmb(); /* ...then ->jiffies_stall before the rest... */
611	gps = READ_ONCE(rcu_state.gp_start);
612	smp_rmb(); /* ...and finally ->gp_start before ->gp_seq again. */
613	gs2 = READ_ONCE(rcu_state.gp_seq);
614	if (gs1 != gs2 ||
615	    ULONG_CMP_LT(j, js) ||
616	    ULONG_CMP_GE(gps, js))
617		return; /* No stall or GP completed since entering function. */
618	rnp = rdp->mynode;
619	jn = jiffies + 3 * rcu_jiffies_till_stall_check() + 3;
620	if (rcu_gp_in_progress() &&
621	    (READ_ONCE(rnp->qsmask) & rdp->grpmask) &&
622	    cmpxchg(&rcu_state.jiffies_stall, js, jn) == js) {
623
 
 
 
 
 
 
 
 
624		/* We haven't checked in, so go dump stack. */
625		print_cpu_stall(gps);
626		if (rcu_cpu_stall_ftrace_dump)
627			rcu_ftrace_dump(DUMP_ALL);
628
629	} else if (rcu_gp_in_progress() &&
630		   ULONG_CMP_GE(j, js + RCU_STALL_RAT_DELAY) &&
631		   cmpxchg(&rcu_state.jiffies_stall, js, jn) == js) {
632
 
 
 
 
 
 
 
 
633		/* They had a few time units to dump stack, so complain. */
634		print_other_cpu_stall(gs2, gps);
635		if (rcu_cpu_stall_ftrace_dump)
636			rcu_ftrace_dump(DUMP_ALL);
637	}
638}
639
640//////////////////////////////////////////////////////////////////////////////
641//
642// RCU forward-progress mechanisms, including of callback invocation.
643
644
645/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
646 * Show the state of the grace-period kthreads.
647 */
648void show_rcu_gp_kthreads(void)
649{
650	unsigned long cbs = 0;
651	int cpu;
652	unsigned long j;
653	unsigned long ja;
654	unsigned long jr;
 
655	unsigned long jw;
656	struct rcu_data *rdp;
657	struct rcu_node *rnp;
658	struct task_struct *t = READ_ONCE(rcu_state.gp_kthread);
659
660	j = jiffies;
661	ja = j - data_race(rcu_state.gp_activity);
662	jr = j - data_race(rcu_state.gp_req_activity);
 
663	jw = j - data_race(rcu_state.gp_wake_time);
664	pr_info("%s: wait state: %s(%d) ->state: %#lx delta ->gp_activity %lu ->gp_req_activity %lu ->gp_wake_time %lu ->gp_wake_seq %ld ->gp_seq %ld ->gp_seq_needed %ld ->gp_flags %#x\n",
665		rcu_state.name, gp_state_getname(rcu_state.gp_state),
666		rcu_state.gp_state, t ? t->state : 0x1ffffL,
667		ja, jr, jw, (long)data_race(rcu_state.gp_wake_seq),
668		(long)data_race(rcu_state.gp_seq),
669		(long)data_race(rcu_get_root()->gp_seq_needed),
 
670		data_race(rcu_state.gp_flags));
671	rcu_for_each_node_breadth_first(rnp) {
672		if (ULONG_CMP_GE(READ_ONCE(rcu_state.gp_seq),
673				 READ_ONCE(rnp->gp_seq_needed)))
 
674			continue;
675		pr_info("\trcu_node %d:%d ->gp_seq %ld ->gp_seq_needed %ld\n",
676			rnp->grplo, rnp->grphi, (long)data_race(rnp->gp_seq),
677			(long)data_race(rnp->gp_seq_needed));
 
 
 
 
 
 
678		if (!rcu_is_leaf_node(rnp))
679			continue;
680		for_each_leaf_node_possible_cpu(rnp, cpu) {
681			rdp = per_cpu_ptr(&rcu_data, cpu);
682			if (READ_ONCE(rdp->gpwrap) ||
683			    ULONG_CMP_GE(READ_ONCE(rcu_state.gp_seq),
684					 READ_ONCE(rdp->gp_seq_needed)))
685				continue;
686			pr_info("\tcpu %d ->gp_seq_needed %ld\n",
687				cpu, (long)data_race(rdp->gp_seq_needed));
688		}
689	}
690	for_each_possible_cpu(cpu) {
691		rdp = per_cpu_ptr(&rcu_data, cpu);
692		cbs += data_race(rdp->n_cbs_invoked);
693		if (rcu_segcblist_is_offloaded(&rdp->cblist))
694			show_rcu_nocb_state(rdp);
695	}
696	pr_info("RCU callbacks invoked since boot: %lu\n", cbs);
697	show_rcu_tasks_gp_kthreads();
698}
699EXPORT_SYMBOL_GPL(show_rcu_gp_kthreads);
700
701/*
702 * This function checks for grace-period requests that fail to motivate
703 * RCU to come out of its idle mode.
704 */
705static void rcu_check_gp_start_stall(struct rcu_node *rnp, struct rcu_data *rdp,
706				     const unsigned long gpssdelay)
707{
708	unsigned long flags;
709	unsigned long j;
710	struct rcu_node *rnp_root = rcu_get_root();
711	static atomic_t warned = ATOMIC_INIT(0);
712
713	if (!IS_ENABLED(CONFIG_PROVE_RCU) || rcu_gp_in_progress() ||
714	    ULONG_CMP_GE(READ_ONCE(rnp_root->gp_seq),
715			 READ_ONCE(rnp_root->gp_seq_needed)) ||
716	    !smp_load_acquire(&rcu_state.gp_kthread)) // Get stable kthread.
717		return;
718	j = jiffies; /* Expensive access, and in common case don't get here. */
719	if (time_before(j, READ_ONCE(rcu_state.gp_req_activity) + gpssdelay) ||
720	    time_before(j, READ_ONCE(rcu_state.gp_activity) + gpssdelay) ||
721	    atomic_read(&warned))
722		return;
723
724	raw_spin_lock_irqsave_rcu_node(rnp, flags);
725	j = jiffies;
726	if (rcu_gp_in_progress() ||
727	    ULONG_CMP_GE(READ_ONCE(rnp_root->gp_seq),
728			 READ_ONCE(rnp_root->gp_seq_needed)) ||
729	    time_before(j, READ_ONCE(rcu_state.gp_req_activity) + gpssdelay) ||
730	    time_before(j, READ_ONCE(rcu_state.gp_activity) + gpssdelay) ||
731	    atomic_read(&warned)) {
732		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
733		return;
734	}
735	/* Hold onto the leaf lock to make others see warned==1. */
736
737	if (rnp_root != rnp)
738		raw_spin_lock_rcu_node(rnp_root); /* irqs already disabled. */
739	j = jiffies;
740	if (rcu_gp_in_progress() ||
741	    ULONG_CMP_GE(READ_ONCE(rnp_root->gp_seq),
742			 READ_ONCE(rnp_root->gp_seq_needed)) ||
743	    time_before(j, READ_ONCE(rcu_state.gp_req_activity) + gpssdelay) ||
744	    time_before(j, READ_ONCE(rcu_state.gp_activity) + gpssdelay) ||
745	    atomic_xchg(&warned, 1)) {
746		if (rnp_root != rnp)
747			/* irqs remain disabled. */
748			raw_spin_unlock_rcu_node(rnp_root);
749		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
750		return;
751	}
752	WARN_ON(1);
753	if (rnp_root != rnp)
754		raw_spin_unlock_rcu_node(rnp_root);
755	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
756	show_rcu_gp_kthreads();
757}
758
759/*
760 * Do a forward-progress check for rcutorture.  This is normally invoked
761 * due to an OOM event.  The argument "j" gives the time period during
762 * which rcutorture would like progress to have been made.
763 */
764void rcu_fwd_progress_check(unsigned long j)
765{
766	unsigned long cbs;
767	int cpu;
768	unsigned long max_cbs = 0;
769	int max_cpu = -1;
770	struct rcu_data *rdp;
771
772	if (rcu_gp_in_progress()) {
773		pr_info("%s: GP age %lu jiffies\n",
774			__func__, jiffies - rcu_state.gp_start);
775		show_rcu_gp_kthreads();
776	} else {
777		pr_info("%s: Last GP end %lu jiffies ago\n",
778			__func__, jiffies - rcu_state.gp_end);
779		preempt_disable();
780		rdp = this_cpu_ptr(&rcu_data);
781		rcu_check_gp_start_stall(rdp->mynode, rdp, j);
782		preempt_enable();
783	}
784	for_each_possible_cpu(cpu) {
785		cbs = rcu_get_n_cbs_cpu(cpu);
786		if (!cbs)
787			continue;
788		if (max_cpu < 0)
789			pr_info("%s: callbacks", __func__);
790		pr_cont(" %d: %lu", cpu, cbs);
791		if (cbs <= max_cbs)
792			continue;
793		max_cbs = cbs;
794		max_cpu = cpu;
795	}
796	if (max_cpu >= 0)
797		pr_cont("\n");
798}
799EXPORT_SYMBOL_GPL(rcu_fwd_progress_check);
800
801/* Commandeer a sysrq key to dump RCU's tree. */
802static bool sysrq_rcu;
803module_param(sysrq_rcu, bool, 0444);
804
805/* Dump grace-period-request information due to commandeered sysrq. */
806static void sysrq_show_rcu(int key)
807{
808	show_rcu_gp_kthreads();
809}
810
811static const struct sysrq_key_op sysrq_rcudump_op = {
812	.handler = sysrq_show_rcu,
813	.help_msg = "show-rcu(y)",
814	.action_msg = "Show RCU tree",
815	.enable_mask = SYSRQ_ENABLE_DUMP,
816};
817
818static int __init rcu_sysrq_init(void)
819{
820	if (sysrq_rcu)
821		return register_sysrq_key('y', &sysrq_rcudump_op);
822	return 0;
823}
824early_initcall(rcu_sysrq_init);
v5.14.15
  1// SPDX-License-Identifier: GPL-2.0+
  2/*
  3 * RCU CPU stall warnings for normal RCU grace periods
  4 *
  5 * Copyright IBM Corporation, 2019
  6 *
  7 * Author: Paul E. McKenney <paulmck@linux.ibm.com>
  8 */
  9
 10#include <linux/kvm_para.h>
 11
 12//////////////////////////////////////////////////////////////////////////////
 13//
 14// Controlling CPU stall warnings, including delay calculation.
 15
 16/* panic() on RCU Stall sysctl. */
 17int sysctl_panic_on_rcu_stall __read_mostly;
 18int sysctl_max_rcu_stall_to_panic __read_mostly;
 19
 20#ifdef CONFIG_PROVE_RCU
 21#define RCU_STALL_DELAY_DELTA		(5 * HZ)
 22#else
 23#define RCU_STALL_DELAY_DELTA		0
 24#endif
 25#define RCU_STALL_MIGHT_DIV		8
 26#define RCU_STALL_MIGHT_MIN		(2 * HZ)
 27
 28/* Limit-check stall timeouts specified at boottime and runtime. */
 29int rcu_jiffies_till_stall_check(void)
 30{
 31	int till_stall_check = READ_ONCE(rcu_cpu_stall_timeout);
 32
 33	/*
 34	 * Limit check must be consistent with the Kconfig limits
 35	 * for CONFIG_RCU_CPU_STALL_TIMEOUT.
 36	 */
 37	if (till_stall_check < 3) {
 38		WRITE_ONCE(rcu_cpu_stall_timeout, 3);
 39		till_stall_check = 3;
 40	} else if (till_stall_check > 300) {
 41		WRITE_ONCE(rcu_cpu_stall_timeout, 300);
 42		till_stall_check = 300;
 43	}
 44	return till_stall_check * HZ + RCU_STALL_DELAY_DELTA;
 45}
 46EXPORT_SYMBOL_GPL(rcu_jiffies_till_stall_check);
 47
 48/**
 49 * rcu_gp_might_be_stalled - Is it likely that the grace period is stalled?
 50 *
 51 * Returns @true if the current grace period is sufficiently old that
 52 * it is reasonable to assume that it might be stalled.  This can be
 53 * useful when deciding whether to allocate memory to enable RCU-mediated
 54 * freeing on the one hand or just invoking synchronize_rcu() on the other.
 55 * The latter is preferable when the grace period is stalled.
 56 *
 57 * Note that sampling of the .gp_start and .gp_seq fields must be done
 58 * carefully to avoid false positives at the beginnings and ends of
 59 * grace periods.
 60 */
 61bool rcu_gp_might_be_stalled(void)
 62{
 63	unsigned long d = rcu_jiffies_till_stall_check() / RCU_STALL_MIGHT_DIV;
 64	unsigned long j = jiffies;
 65
 66	if (d < RCU_STALL_MIGHT_MIN)
 67		d = RCU_STALL_MIGHT_MIN;
 68	smp_mb(); // jiffies before .gp_seq to avoid false positives.
 69	if (!rcu_gp_in_progress())
 70		return false;
 71	// Long delays at this point avoids false positive, but a delay
 72	// of ULONG_MAX/4 jiffies voids your no-false-positive warranty.
 73	smp_mb(); // .gp_seq before second .gp_start
 74	// And ditto here.
 75	return !time_before(j, READ_ONCE(rcu_state.gp_start) + d);
 76}
 77
 78/* Don't do RCU CPU stall warnings during long sysrq printouts. */
 79void rcu_sysrq_start(void)
 80{
 81	if (!rcu_cpu_stall_suppress)
 82		rcu_cpu_stall_suppress = 2;
 83}
 84
 85void rcu_sysrq_end(void)
 86{
 87	if (rcu_cpu_stall_suppress == 2)
 88		rcu_cpu_stall_suppress = 0;
 89}
 90
 91/* Don't print RCU CPU stall warnings during a kernel panic. */
 92static int rcu_panic(struct notifier_block *this, unsigned long ev, void *ptr)
 93{
 94	rcu_cpu_stall_suppress = 1;
 95	return NOTIFY_DONE;
 96}
 97
 98static struct notifier_block rcu_panic_block = {
 99	.notifier_call = rcu_panic,
100};
101
102static int __init check_cpu_stall_init(void)
103{
104	atomic_notifier_chain_register(&panic_notifier_list, &rcu_panic_block);
105	return 0;
106}
107early_initcall(check_cpu_stall_init);
108
109/* If so specified via sysctl, panic, yielding cleaner stall-warning output. */
110static void panic_on_rcu_stall(void)
111{
112	static int cpu_stall;
113
114	if (++cpu_stall < sysctl_max_rcu_stall_to_panic)
115		return;
116
117	if (sysctl_panic_on_rcu_stall)
118		panic("RCU Stall\n");
119}
120
121/**
122 * rcu_cpu_stall_reset - prevent further stall warnings in current grace period
123 *
124 * Set the stall-warning timeout way off into the future, thus preventing
125 * any RCU CPU stall-warning messages from appearing in the current set of
126 * RCU grace periods.
127 *
128 * The caller must disable hard irqs.
129 */
130void rcu_cpu_stall_reset(void)
131{
132	WRITE_ONCE(rcu_state.jiffies_stall, jiffies + ULONG_MAX / 2);
133}
134
135//////////////////////////////////////////////////////////////////////////////
136//
137// Interaction with RCU grace periods
138
139/* Start of new grace period, so record stall time (and forcing times). */
140static void record_gp_stall_check_time(void)
141{
142	unsigned long j = jiffies;
143	unsigned long j1;
144
145	WRITE_ONCE(rcu_state.gp_start, j);
146	j1 = rcu_jiffies_till_stall_check();
147	smp_mb(); // ->gp_start before ->jiffies_stall and caller's ->gp_seq.
148	WRITE_ONCE(rcu_state.jiffies_stall, j + j1);
149	rcu_state.jiffies_resched = j + j1 / 2;
150	rcu_state.n_force_qs_gpstart = READ_ONCE(rcu_state.n_force_qs);
151}
152
153/* Zero ->ticks_this_gp and snapshot the number of RCU softirq handlers. */
154static void zero_cpu_stall_ticks(struct rcu_data *rdp)
155{
156	rdp->ticks_this_gp = 0;
157	rdp->softirq_snap = kstat_softirqs_cpu(RCU_SOFTIRQ, smp_processor_id());
158	WRITE_ONCE(rdp->last_fqs_resched, jiffies);
159}
160
161/*
162 * If too much time has passed in the current grace period, and if
163 * so configured, go kick the relevant kthreads.
164 */
165static void rcu_stall_kick_kthreads(void)
166{
167	unsigned long j;
168
169	if (!READ_ONCE(rcu_kick_kthreads))
170		return;
171	j = READ_ONCE(rcu_state.jiffies_kick_kthreads);
172	if (time_after(jiffies, j) && rcu_state.gp_kthread &&
173	    (rcu_gp_in_progress() || READ_ONCE(rcu_state.gp_flags))) {
174		WARN_ONCE(1, "Kicking %s grace-period kthread\n",
175			  rcu_state.name);
176		rcu_ftrace_dump(DUMP_ALL);
177		wake_up_process(rcu_state.gp_kthread);
178		WRITE_ONCE(rcu_state.jiffies_kick_kthreads, j + HZ);
179	}
180}
181
182/*
183 * Handler for the irq_work request posted about halfway into the RCU CPU
184 * stall timeout, and used to detect excessive irq disabling.  Set state
185 * appropriately, but just complain if there is unexpected state on entry.
186 */
187static void rcu_iw_handler(struct irq_work *iwp)
188{
189	struct rcu_data *rdp;
190	struct rcu_node *rnp;
191
192	rdp = container_of(iwp, struct rcu_data, rcu_iw);
193	rnp = rdp->mynode;
194	raw_spin_lock_rcu_node(rnp);
195	if (!WARN_ON_ONCE(!rdp->rcu_iw_pending)) {
196		rdp->rcu_iw_gp_seq = rnp->gp_seq;
197		rdp->rcu_iw_pending = false;
198	}
199	raw_spin_unlock_rcu_node(rnp);
200}
201
202//////////////////////////////////////////////////////////////////////////////
203//
204// Printing RCU CPU stall warnings
205
206#ifdef CONFIG_PREEMPT_RCU
207
208/*
209 * Dump detailed information for all tasks blocking the current RCU
210 * grace period on the specified rcu_node structure.
211 */
212static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp)
213{
214	unsigned long flags;
215	struct task_struct *t;
216
217	raw_spin_lock_irqsave_rcu_node(rnp, flags);
218	if (!rcu_preempt_blocked_readers_cgp(rnp)) {
219		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
220		return;
221	}
222	t = list_entry(rnp->gp_tasks->prev,
223		       struct task_struct, rcu_node_entry);
224	list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
225		/*
226		 * We could be printing a lot while holding a spinlock.
227		 * Avoid triggering hard lockup.
228		 */
229		touch_nmi_watchdog();
230		sched_show_task(t);
231	}
232	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
233}
234
235// Communicate task state back to the RCU CPU stall warning request.
236struct rcu_stall_chk_rdr {
237	int nesting;
238	union rcu_special rs;
239	bool on_blkd_list;
240};
241
242/*
243 * Report out the state of a not-running task that is stalling the
244 * current RCU grace period.
245 */
246static bool check_slow_task(struct task_struct *t, void *arg)
247{
248	struct rcu_stall_chk_rdr *rscrp = arg;
249
250	if (task_curr(t))
251		return false; // It is running, so decline to inspect it.
252	rscrp->nesting = t->rcu_read_lock_nesting;
253	rscrp->rs = t->rcu_read_unlock_special;
254	rscrp->on_blkd_list = !list_empty(&t->rcu_node_entry);
255	return true;
256}
257
258/*
259 * Scan the current list of tasks blocked within RCU read-side critical
260 * sections, printing out the tid of each of the first few of them.
261 */
262static int rcu_print_task_stall(struct rcu_node *rnp, unsigned long flags)
263	__releases(rnp->lock)
264{
265	int i = 0;
266	int ndetected = 0;
267	struct rcu_stall_chk_rdr rscr;
268	struct task_struct *t;
269	struct task_struct *ts[8];
270
271	lockdep_assert_irqs_disabled();
272	if (!rcu_preempt_blocked_readers_cgp(rnp)) {
273		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
274		return 0;
275	}
276	pr_err("\tTasks blocked on level-%d rcu_node (CPUs %d-%d):",
277	       rnp->level, rnp->grplo, rnp->grphi);
278	t = list_entry(rnp->gp_tasks->prev,
279		       struct task_struct, rcu_node_entry);
280	list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
281		get_task_struct(t);
282		ts[i++] = t;
283		if (i >= ARRAY_SIZE(ts))
284			break;
285	}
286	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
287	while (i) {
288		t = ts[--i];
289		if (!try_invoke_on_locked_down_task(t, check_slow_task, &rscr))
290			pr_cont(" P%d", t->pid);
291		else
292			pr_cont(" P%d/%d:%c%c%c%c",
293				t->pid, rscr.nesting,
294				".b"[rscr.rs.b.blocked],
295				".q"[rscr.rs.b.need_qs],
296				".e"[rscr.rs.b.exp_hint],
297				".l"[rscr.on_blkd_list]);
298		lockdep_assert_irqs_disabled();
299		put_task_struct(t);
300		ndetected++;
301	}
302	pr_cont("\n");
303	return ndetected;
304}
305
306#else /* #ifdef CONFIG_PREEMPT_RCU */
307
308/*
309 * Because preemptible RCU does not exist, we never have to check for
310 * tasks blocked within RCU read-side critical sections.
311 */
312static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp)
313{
314}
315
316/*
317 * Because preemptible RCU does not exist, we never have to check for
318 * tasks blocked within RCU read-side critical sections.
319 */
320static int rcu_print_task_stall(struct rcu_node *rnp, unsigned long flags)
321	__releases(rnp->lock)
322{
323	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
324	return 0;
325}
326#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
327
328/*
329 * Dump stacks of all tasks running on stalled CPUs.  First try using
330 * NMIs, but fall back to manual remote stack tracing on architectures
331 * that don't support NMI-based stack dumps.  The NMI-triggered stack
332 * traces are more accurate because they are printed by the target CPU.
333 */
334static void rcu_dump_cpu_stacks(void)
335{
336	int cpu;
337	unsigned long flags;
338	struct rcu_node *rnp;
339
340	rcu_for_each_leaf_node(rnp) {
341		raw_spin_lock_irqsave_rcu_node(rnp, flags);
342		for_each_leaf_node_possible_cpu(rnp, cpu)
343			if (rnp->qsmask & leaf_node_cpu_bit(rnp, cpu)) {
344				if (cpu_is_offline(cpu))
345					pr_err("Offline CPU %d blocking current GP.\n", cpu);
346				else if (!trigger_single_cpu_backtrace(cpu))
347					dump_cpu_task(cpu);
348			}
349		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
350	}
351}
352
353#ifdef CONFIG_RCU_FAST_NO_HZ
354
355static void print_cpu_stall_fast_no_hz(char *cp, int cpu)
356{
357	struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
358
359	sprintf(cp, "last_accelerate: %04lx/%04lx dyntick_enabled: %d",
360		rdp->last_accelerate & 0xffff, jiffies & 0xffff,
361		!!rdp->tick_nohz_enabled_snap);
362}
363
364#else /* #ifdef CONFIG_RCU_FAST_NO_HZ */
365
366static void print_cpu_stall_fast_no_hz(char *cp, int cpu)
367{
368	*cp = '\0';
369}
370
371#endif /* #else #ifdef CONFIG_RCU_FAST_NO_HZ */
372
373static const char * const gp_state_names[] = {
374	[RCU_GP_IDLE] = "RCU_GP_IDLE",
375	[RCU_GP_WAIT_GPS] = "RCU_GP_WAIT_GPS",
376	[RCU_GP_DONE_GPS] = "RCU_GP_DONE_GPS",
377	[RCU_GP_ONOFF] = "RCU_GP_ONOFF",
378	[RCU_GP_INIT] = "RCU_GP_INIT",
379	[RCU_GP_WAIT_FQS] = "RCU_GP_WAIT_FQS",
380	[RCU_GP_DOING_FQS] = "RCU_GP_DOING_FQS",
381	[RCU_GP_CLEANUP] = "RCU_GP_CLEANUP",
382	[RCU_GP_CLEANED] = "RCU_GP_CLEANED",
383};
384
385/*
386 * Convert a ->gp_state value to a character string.
387 */
388static const char *gp_state_getname(short gs)
389{
390	if (gs < 0 || gs >= ARRAY_SIZE(gp_state_names))
391		return "???";
392	return gp_state_names[gs];
393}
394
395/* Is the RCU grace-period kthread being starved of CPU time? */
396static bool rcu_is_gp_kthread_starving(unsigned long *jp)
397{
398	unsigned long j = jiffies - READ_ONCE(rcu_state.gp_activity);
399
400	if (jp)
401		*jp = j;
402	return j > 2 * HZ;
403}
404
405/*
406 * Print out diagnostic information for the specified stalled CPU.
407 *
408 * If the specified CPU is aware of the current RCU grace period, then
409 * print the number of scheduling clock interrupts the CPU has taken
410 * during the time that it has been aware.  Otherwise, print the number
411 * of RCU grace periods that this CPU is ignorant of, for example, "1"
412 * if the CPU was aware of the previous grace period.
413 *
414 * Also print out idle and (if CONFIG_RCU_FAST_NO_HZ) idle-entry info.
415 */
416static void print_cpu_stall_info(int cpu)
417{
418	unsigned long delta;
419	bool falsepositive;
420	char fast_no_hz[72];
421	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
422	char *ticks_title;
423	unsigned long ticks_value;
424
425	/*
426	 * We could be printing a lot while holding a spinlock.  Avoid
427	 * triggering hard lockup.
428	 */
429	touch_nmi_watchdog();
430
431	ticks_value = rcu_seq_ctr(rcu_state.gp_seq - rdp->gp_seq);
432	if (ticks_value) {
433		ticks_title = "GPs behind";
434	} else {
435		ticks_title = "ticks this GP";
436		ticks_value = rdp->ticks_this_gp;
437	}
438	print_cpu_stall_fast_no_hz(fast_no_hz, cpu);
439	delta = rcu_seq_ctr(rdp->mynode->gp_seq - rdp->rcu_iw_gp_seq);
440	falsepositive = rcu_is_gp_kthread_starving(NULL) &&
441			rcu_dynticks_in_eqs(rcu_dynticks_snap(rdp));
442	pr_err("\t%d-%c%c%c%c: (%lu %s) idle=%03x/%ld/%#lx softirq=%u/%u fqs=%ld %s%s\n",
443	       cpu,
444	       "O."[!!cpu_online(cpu)],
445	       "o."[!!(rdp->grpmask & rdp->mynode->qsmaskinit)],
446	       "N."[!!(rdp->grpmask & rdp->mynode->qsmaskinitnext)],
447	       !IS_ENABLED(CONFIG_IRQ_WORK) ? '?' :
448			rdp->rcu_iw_pending ? (int)min(delta, 9UL) + '0' :
449				"!."[!delta],
450	       ticks_value, ticks_title,
451	       rcu_dynticks_snap(rdp) & 0xfff,
452	       rdp->dynticks_nesting, rdp->dynticks_nmi_nesting,
453	       rdp->softirq_snap, kstat_softirqs_cpu(RCU_SOFTIRQ, cpu),
454	       data_race(rcu_state.n_force_qs) - rcu_state.n_force_qs_gpstart,
455	       fast_no_hz,
456	       falsepositive ? " (false positive?)" : "");
457}
458
459/* Complain about starvation of grace-period kthread.  */
460static void rcu_check_gp_kthread_starvation(void)
461{
462	int cpu;
463	struct task_struct *gpk = rcu_state.gp_kthread;
464	unsigned long j;
465
466	if (rcu_is_gp_kthread_starving(&j)) {
467		cpu = gpk ? task_cpu(gpk) : -1;
468		pr_err("%s kthread starved for %ld jiffies! g%ld f%#x %s(%d) ->state=%#x ->cpu=%d\n",
469		       rcu_state.name, j,
470		       (long)rcu_seq_current(&rcu_state.gp_seq),
471		       data_race(rcu_state.gp_flags),
472		       gp_state_getname(rcu_state.gp_state), rcu_state.gp_state,
473		       gpk ? gpk->__state : ~0, cpu);
474		if (gpk) {
475			pr_err("\tUnless %s kthread gets sufficient CPU time, OOM is now expected behavior.\n", rcu_state.name);
476			pr_err("RCU grace-period kthread stack dump:\n");
477			sched_show_task(gpk);
478			if (cpu >= 0) {
479				if (cpu_is_offline(cpu)) {
480					pr_err("RCU GP kthread last ran on offline CPU %d.\n", cpu);
481				} else  {
482					pr_err("Stack dump where RCU GP kthread last ran:\n");
483					if (!trigger_single_cpu_backtrace(cpu))
484						dump_cpu_task(cpu);
485				}
486			}
487			wake_up_process(gpk);
488		}
489	}
490}
491
492/* Complain about missing wakeups from expired fqs wait timer */
493static void rcu_check_gp_kthread_expired_fqs_timer(void)
494{
495	struct task_struct *gpk = rcu_state.gp_kthread;
496	short gp_state;
497	unsigned long jiffies_fqs;
498	int cpu;
499
500	/*
501	 * Order reads of .gp_state and .jiffies_force_qs.
502	 * Matching smp_wmb() is present in rcu_gp_fqs_loop().
503	 */
504	gp_state = smp_load_acquire(&rcu_state.gp_state);
505	jiffies_fqs = READ_ONCE(rcu_state.jiffies_force_qs);
506
507	if (gp_state == RCU_GP_WAIT_FQS &&
508	    time_after(jiffies, jiffies_fqs + RCU_STALL_MIGHT_MIN) &&
509	    gpk && !READ_ONCE(gpk->on_rq)) {
510		cpu = task_cpu(gpk);
511		pr_err("%s kthread timer wakeup didn't happen for %ld jiffies! g%ld f%#x %s(%d) ->state=%#x\n",
512		       rcu_state.name, (jiffies - jiffies_fqs),
513		       (long)rcu_seq_current(&rcu_state.gp_seq),
514		       data_race(rcu_state.gp_flags),
515		       gp_state_getname(RCU_GP_WAIT_FQS), RCU_GP_WAIT_FQS,
516		       gpk->__state);
517		pr_err("\tPossible timer handling issue on cpu=%d timer-softirq=%u\n",
518		       cpu, kstat_softirqs_cpu(TIMER_SOFTIRQ, cpu));
519	}
520}
521
522static void print_other_cpu_stall(unsigned long gp_seq, unsigned long gps)
523{
524	int cpu;
525	unsigned long flags;
526	unsigned long gpa;
527	unsigned long j;
528	int ndetected = 0;
529	struct rcu_node *rnp;
530	long totqlen = 0;
531
532	lockdep_assert_irqs_disabled();
533
534	/* Kick and suppress, if so configured. */
535	rcu_stall_kick_kthreads();
536	if (rcu_stall_is_suppressed())
537		return;
538
539	/*
540	 * OK, time to rat on our buddy...
541	 * See Documentation/RCU/stallwarn.rst for info on how to debug
542	 * RCU CPU stall warnings.
543	 */
544	trace_rcu_stall_warning(rcu_state.name, TPS("StallDetected"));
545	pr_err("INFO: %s detected stalls on CPUs/tasks:\n", rcu_state.name);
546	rcu_for_each_leaf_node(rnp) {
547		raw_spin_lock_irqsave_rcu_node(rnp, flags);
 
548		if (rnp->qsmask != 0) {
549			for_each_leaf_node_possible_cpu(rnp, cpu)
550				if (rnp->qsmask & leaf_node_cpu_bit(rnp, cpu)) {
551					print_cpu_stall_info(cpu);
552					ndetected++;
553				}
554		}
555		ndetected += rcu_print_task_stall(rnp, flags); // Releases rnp->lock.
556		lockdep_assert_irqs_disabled();
557	}
558
559	for_each_possible_cpu(cpu)
560		totqlen += rcu_get_n_cbs_cpu(cpu);
561	pr_cont("\t(detected by %d, t=%ld jiffies, g=%ld, q=%lu)\n",
562	       smp_processor_id(), (long)(jiffies - gps),
563	       (long)rcu_seq_current(&rcu_state.gp_seq), totqlen);
564	if (ndetected) {
565		rcu_dump_cpu_stacks();
566
567		/* Complain about tasks blocking the grace period. */
568		rcu_for_each_leaf_node(rnp)
569			rcu_print_detail_task_stall_rnp(rnp);
570	} else {
571		if (rcu_seq_current(&rcu_state.gp_seq) != gp_seq) {
572			pr_err("INFO: Stall ended before state dump start\n");
573		} else {
574			j = jiffies;
575			gpa = data_race(rcu_state.gp_activity);
576			pr_err("All QSes seen, last %s kthread activity %ld (%ld-%ld), jiffies_till_next_fqs=%ld, root ->qsmask %#lx\n",
577			       rcu_state.name, j - gpa, j, gpa,
578			       data_race(jiffies_till_next_fqs),
579			       rcu_get_root()->qsmask);
580		}
581	}
582	/* Rewrite if needed in case of slow consoles. */
583	if (ULONG_CMP_GE(jiffies, READ_ONCE(rcu_state.jiffies_stall)))
584		WRITE_ONCE(rcu_state.jiffies_stall,
585			   jiffies + 3 * rcu_jiffies_till_stall_check() + 3);
586
587	rcu_check_gp_kthread_expired_fqs_timer();
588	rcu_check_gp_kthread_starvation();
589
590	panic_on_rcu_stall();
591
592	rcu_force_quiescent_state();  /* Kick them all. */
593}
594
595static void print_cpu_stall(unsigned long gps)
596{
597	int cpu;
598	unsigned long flags;
599	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
600	struct rcu_node *rnp = rcu_get_root();
601	long totqlen = 0;
602
603	lockdep_assert_irqs_disabled();
604
605	/* Kick and suppress, if so configured. */
606	rcu_stall_kick_kthreads();
607	if (rcu_stall_is_suppressed())
608		return;
609
610	/*
611	 * OK, time to rat on ourselves...
612	 * See Documentation/RCU/stallwarn.rst for info on how to debug
613	 * RCU CPU stall warnings.
614	 */
615	trace_rcu_stall_warning(rcu_state.name, TPS("SelfDetected"));
616	pr_err("INFO: %s self-detected stall on CPU\n", rcu_state.name);
617	raw_spin_lock_irqsave_rcu_node(rdp->mynode, flags);
618	print_cpu_stall_info(smp_processor_id());
619	raw_spin_unlock_irqrestore_rcu_node(rdp->mynode, flags);
620	for_each_possible_cpu(cpu)
621		totqlen += rcu_get_n_cbs_cpu(cpu);
622	pr_cont("\t(t=%lu jiffies g=%ld q=%lu)\n",
623		jiffies - gps,
624		(long)rcu_seq_current(&rcu_state.gp_seq), totqlen);
625
626	rcu_check_gp_kthread_expired_fqs_timer();
627	rcu_check_gp_kthread_starvation();
628
629	rcu_dump_cpu_stacks();
630
631	raw_spin_lock_irqsave_rcu_node(rnp, flags);
632	/* Rewrite if needed in case of slow consoles. */
633	if (ULONG_CMP_GE(jiffies, READ_ONCE(rcu_state.jiffies_stall)))
634		WRITE_ONCE(rcu_state.jiffies_stall,
635			   jiffies + 3 * rcu_jiffies_till_stall_check() + 3);
636	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
637
638	panic_on_rcu_stall();
639
640	/*
641	 * Attempt to revive the RCU machinery by forcing a context switch.
642	 *
643	 * A context switch would normally allow the RCU state machine to make
644	 * progress and it could be we're stuck in kernel space without context
645	 * switches for an entirely unreasonable amount of time.
646	 */
647	set_tsk_need_resched(current);
648	set_preempt_need_resched();
649}
650
651static void check_cpu_stall(struct rcu_data *rdp)
652{
653	unsigned long gs1;
654	unsigned long gs2;
655	unsigned long gps;
656	unsigned long j;
657	unsigned long jn;
658	unsigned long js;
659	struct rcu_node *rnp;
660
661	lockdep_assert_irqs_disabled();
662	if ((rcu_stall_is_suppressed() && !READ_ONCE(rcu_kick_kthreads)) ||
663	    !rcu_gp_in_progress())
664		return;
665	rcu_stall_kick_kthreads();
666	j = jiffies;
667
668	/*
669	 * Lots of memory barriers to reject false positives.
670	 *
671	 * The idea is to pick up rcu_state.gp_seq, then
672	 * rcu_state.jiffies_stall, then rcu_state.gp_start, and finally
673	 * another copy of rcu_state.gp_seq.  These values are updated in
674	 * the opposite order with memory barriers (or equivalent) during
675	 * grace-period initialization and cleanup.  Now, a false positive
676	 * can occur if we get an new value of rcu_state.gp_start and a old
677	 * value of rcu_state.jiffies_stall.  But given the memory barriers,
678	 * the only way that this can happen is if one grace period ends
679	 * and another starts between these two fetches.  This is detected
680	 * by comparing the second fetch of rcu_state.gp_seq with the
681	 * previous fetch from rcu_state.gp_seq.
682	 *
683	 * Given this check, comparisons of jiffies, rcu_state.jiffies_stall,
684	 * and rcu_state.gp_start suffice to forestall false positives.
685	 */
686	gs1 = READ_ONCE(rcu_state.gp_seq);
687	smp_rmb(); /* Pick up ->gp_seq first... */
688	js = READ_ONCE(rcu_state.jiffies_stall);
689	smp_rmb(); /* ...then ->jiffies_stall before the rest... */
690	gps = READ_ONCE(rcu_state.gp_start);
691	smp_rmb(); /* ...and finally ->gp_start before ->gp_seq again. */
692	gs2 = READ_ONCE(rcu_state.gp_seq);
693	if (gs1 != gs2 ||
694	    ULONG_CMP_LT(j, js) ||
695	    ULONG_CMP_GE(gps, js))
696		return; /* No stall or GP completed since entering function. */
697	rnp = rdp->mynode;
698	jn = jiffies + 3 * rcu_jiffies_till_stall_check() + 3;
699	if (rcu_gp_in_progress() &&
700	    (READ_ONCE(rnp->qsmask) & rdp->grpmask) &&
701	    cmpxchg(&rcu_state.jiffies_stall, js, jn) == js) {
702
703		/*
704		 * If a virtual machine is stopped by the host it can look to
705		 * the watchdog like an RCU stall. Check to see if the host
706		 * stopped the vm.
707		 */
708		if (kvm_check_and_clear_guest_paused())
709			return;
710
711		/* We haven't checked in, so go dump stack. */
712		print_cpu_stall(gps);
713		if (READ_ONCE(rcu_cpu_stall_ftrace_dump))
714			rcu_ftrace_dump(DUMP_ALL);
715
716	} else if (rcu_gp_in_progress() &&
717		   ULONG_CMP_GE(j, js + RCU_STALL_RAT_DELAY) &&
718		   cmpxchg(&rcu_state.jiffies_stall, js, jn) == js) {
719
720		/*
721		 * If a virtual machine is stopped by the host it can look to
722		 * the watchdog like an RCU stall. Check to see if the host
723		 * stopped the vm.
724		 */
725		if (kvm_check_and_clear_guest_paused())
726			return;
727
728		/* They had a few time units to dump stack, so complain. */
729		print_other_cpu_stall(gs2, gps);
730		if (READ_ONCE(rcu_cpu_stall_ftrace_dump))
731			rcu_ftrace_dump(DUMP_ALL);
732	}
733}
734
735//////////////////////////////////////////////////////////////////////////////
736//
737// RCU forward-progress mechanisms, including of callback invocation.
738
739
740/*
741 * Check to see if a failure to end RCU priority inversion was due to
742 * a CPU not passing through a quiescent state.  When this happens, there
743 * is nothing that RCU priority boosting can do to help, so we shouldn't
744 * count this as an RCU priority boosting failure.  A return of true says
745 * RCU priority boosting is to blame, and false says otherwise.  If false
746 * is returned, the first of the CPUs to blame is stored through cpup.
747 * If there was no CPU blocking the current grace period, but also nothing
748 * in need of being boosted, *cpup is set to -1.  This can happen in case
749 * of vCPU preemption while the last CPU is reporting its quiscent state,
750 * for example.
751 *
752 * If cpup is NULL, then a lockless quick check is carried out, suitable
753 * for high-rate usage.  On the other hand, if cpup is non-NULL, each
754 * rcu_node structure's ->lock is acquired, ruling out high-rate usage.
755 */
756bool rcu_check_boost_fail(unsigned long gp_state, int *cpup)
757{
758	bool atb = false;
759	int cpu;
760	unsigned long flags;
761	struct rcu_node *rnp;
762
763	rcu_for_each_leaf_node(rnp) {
764		if (!cpup) {
765			if (READ_ONCE(rnp->qsmask)) {
766				return false;
767			} else {
768				if (READ_ONCE(rnp->gp_tasks))
769					atb = true;
770				continue;
771			}
772		}
773		*cpup = -1;
774		raw_spin_lock_irqsave_rcu_node(rnp, flags);
775		if (rnp->gp_tasks)
776			atb = true;
777		if (!rnp->qsmask) {
778			// No CPUs without quiescent states for this rnp.
779			raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
780			continue;
781		}
782		// Find the first holdout CPU.
783		for_each_leaf_node_possible_cpu(rnp, cpu) {
784			if (rnp->qsmask & (1UL << (cpu - rnp->grplo))) {
785				raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
786				*cpup = cpu;
787				return false;
788			}
789		}
790		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
791	}
792	// Can't blame CPUs, so must blame RCU priority boosting.
793	return atb;
794}
795EXPORT_SYMBOL_GPL(rcu_check_boost_fail);
796
797/*
798 * Show the state of the grace-period kthreads.
799 */
800void show_rcu_gp_kthreads(void)
801{
802	unsigned long cbs = 0;
803	int cpu;
804	unsigned long j;
805	unsigned long ja;
806	unsigned long jr;
807	unsigned long js;
808	unsigned long jw;
809	struct rcu_data *rdp;
810	struct rcu_node *rnp;
811	struct task_struct *t = READ_ONCE(rcu_state.gp_kthread);
812
813	j = jiffies;
814	ja = j - data_race(rcu_state.gp_activity);
815	jr = j - data_race(rcu_state.gp_req_activity);
816	js = j - data_race(rcu_state.gp_start);
817	jw = j - data_race(rcu_state.gp_wake_time);
818	pr_info("%s: wait state: %s(%d) ->state: %#x ->rt_priority %u delta ->gp_start %lu ->gp_activity %lu ->gp_req_activity %lu ->gp_wake_time %lu ->gp_wake_seq %ld ->gp_seq %ld ->gp_seq_needed %ld ->gp_max %lu ->gp_flags %#x\n",
819		rcu_state.name, gp_state_getname(rcu_state.gp_state),
820		rcu_state.gp_state, t ? t->__state : 0x1ffff, t ? t->rt_priority : 0xffU,
821		js, ja, jr, jw, (long)data_race(rcu_state.gp_wake_seq),
822		(long)data_race(rcu_state.gp_seq),
823		(long)data_race(rcu_get_root()->gp_seq_needed),
824		data_race(rcu_state.gp_max),
825		data_race(rcu_state.gp_flags));
826	rcu_for_each_node_breadth_first(rnp) {
827		if (ULONG_CMP_GE(READ_ONCE(rcu_state.gp_seq), READ_ONCE(rnp->gp_seq_needed)) &&
828		    !data_race(rnp->qsmask) && !data_race(rnp->boost_tasks) &&
829		    !data_race(rnp->exp_tasks) && !data_race(rnp->gp_tasks))
830			continue;
831		pr_info("\trcu_node %d:%d ->gp_seq %ld ->gp_seq_needed %ld ->qsmask %#lx %c%c%c%c ->n_boosts %ld\n",
832			rnp->grplo, rnp->grphi,
833			(long)data_race(rnp->gp_seq), (long)data_race(rnp->gp_seq_needed),
834			data_race(rnp->qsmask),
835			".b"[!!data_race(rnp->boost_kthread_task)],
836			".B"[!!data_race(rnp->boost_tasks)],
837			".E"[!!data_race(rnp->exp_tasks)],
838			".G"[!!data_race(rnp->gp_tasks)],
839			data_race(rnp->n_boosts));
840		if (!rcu_is_leaf_node(rnp))
841			continue;
842		for_each_leaf_node_possible_cpu(rnp, cpu) {
843			rdp = per_cpu_ptr(&rcu_data, cpu);
844			if (READ_ONCE(rdp->gpwrap) ||
845			    ULONG_CMP_GE(READ_ONCE(rcu_state.gp_seq),
846					 READ_ONCE(rdp->gp_seq_needed)))
847				continue;
848			pr_info("\tcpu %d ->gp_seq_needed %ld\n",
849				cpu, (long)data_race(rdp->gp_seq_needed));
850		}
851	}
852	for_each_possible_cpu(cpu) {
853		rdp = per_cpu_ptr(&rcu_data, cpu);
854		cbs += data_race(rdp->n_cbs_invoked);
855		if (rcu_segcblist_is_offloaded(&rdp->cblist))
856			show_rcu_nocb_state(rdp);
857	}
858	pr_info("RCU callbacks invoked since boot: %lu\n", cbs);
859	show_rcu_tasks_gp_kthreads();
860}
861EXPORT_SYMBOL_GPL(show_rcu_gp_kthreads);
862
863/*
864 * This function checks for grace-period requests that fail to motivate
865 * RCU to come out of its idle mode.
866 */
867static void rcu_check_gp_start_stall(struct rcu_node *rnp, struct rcu_data *rdp,
868				     const unsigned long gpssdelay)
869{
870	unsigned long flags;
871	unsigned long j;
872	struct rcu_node *rnp_root = rcu_get_root();
873	static atomic_t warned = ATOMIC_INIT(0);
874
875	if (!IS_ENABLED(CONFIG_PROVE_RCU) || rcu_gp_in_progress() ||
876	    ULONG_CMP_GE(READ_ONCE(rnp_root->gp_seq),
877			 READ_ONCE(rnp_root->gp_seq_needed)) ||
878	    !smp_load_acquire(&rcu_state.gp_kthread)) // Get stable kthread.
879		return;
880	j = jiffies; /* Expensive access, and in common case don't get here. */
881	if (time_before(j, READ_ONCE(rcu_state.gp_req_activity) + gpssdelay) ||
882	    time_before(j, READ_ONCE(rcu_state.gp_activity) + gpssdelay) ||
883	    atomic_read(&warned))
884		return;
885
886	raw_spin_lock_irqsave_rcu_node(rnp, flags);
887	j = jiffies;
888	if (rcu_gp_in_progress() ||
889	    ULONG_CMP_GE(READ_ONCE(rnp_root->gp_seq),
890			 READ_ONCE(rnp_root->gp_seq_needed)) ||
891	    time_before(j, READ_ONCE(rcu_state.gp_req_activity) + gpssdelay) ||
892	    time_before(j, READ_ONCE(rcu_state.gp_activity) + gpssdelay) ||
893	    atomic_read(&warned)) {
894		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
895		return;
896	}
897	/* Hold onto the leaf lock to make others see warned==1. */
898
899	if (rnp_root != rnp)
900		raw_spin_lock_rcu_node(rnp_root); /* irqs already disabled. */
901	j = jiffies;
902	if (rcu_gp_in_progress() ||
903	    ULONG_CMP_GE(READ_ONCE(rnp_root->gp_seq),
904			 READ_ONCE(rnp_root->gp_seq_needed)) ||
905	    time_before(j, READ_ONCE(rcu_state.gp_req_activity) + gpssdelay) ||
906	    time_before(j, READ_ONCE(rcu_state.gp_activity) + gpssdelay) ||
907	    atomic_xchg(&warned, 1)) {
908		if (rnp_root != rnp)
909			/* irqs remain disabled. */
910			raw_spin_unlock_rcu_node(rnp_root);
911		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
912		return;
913	}
914	WARN_ON(1);
915	if (rnp_root != rnp)
916		raw_spin_unlock_rcu_node(rnp_root);
917	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
918	show_rcu_gp_kthreads();
919}
920
921/*
922 * Do a forward-progress check for rcutorture.  This is normally invoked
923 * due to an OOM event.  The argument "j" gives the time period during
924 * which rcutorture would like progress to have been made.
925 */
926void rcu_fwd_progress_check(unsigned long j)
927{
928	unsigned long cbs;
929	int cpu;
930	unsigned long max_cbs = 0;
931	int max_cpu = -1;
932	struct rcu_data *rdp;
933
934	if (rcu_gp_in_progress()) {
935		pr_info("%s: GP age %lu jiffies\n",
936			__func__, jiffies - rcu_state.gp_start);
937		show_rcu_gp_kthreads();
938	} else {
939		pr_info("%s: Last GP end %lu jiffies ago\n",
940			__func__, jiffies - rcu_state.gp_end);
941		preempt_disable();
942		rdp = this_cpu_ptr(&rcu_data);
943		rcu_check_gp_start_stall(rdp->mynode, rdp, j);
944		preempt_enable();
945	}
946	for_each_possible_cpu(cpu) {
947		cbs = rcu_get_n_cbs_cpu(cpu);
948		if (!cbs)
949			continue;
950		if (max_cpu < 0)
951			pr_info("%s: callbacks", __func__);
952		pr_cont(" %d: %lu", cpu, cbs);
953		if (cbs <= max_cbs)
954			continue;
955		max_cbs = cbs;
956		max_cpu = cpu;
957	}
958	if (max_cpu >= 0)
959		pr_cont("\n");
960}
961EXPORT_SYMBOL_GPL(rcu_fwd_progress_check);
962
963/* Commandeer a sysrq key to dump RCU's tree. */
964static bool sysrq_rcu;
965module_param(sysrq_rcu, bool, 0444);
966
967/* Dump grace-period-request information due to commandeered sysrq. */
968static void sysrq_show_rcu(int key)
969{
970	show_rcu_gp_kthreads();
971}
972
973static const struct sysrq_key_op sysrq_rcudump_op = {
974	.handler = sysrq_show_rcu,
975	.help_msg = "show-rcu(y)",
976	.action_msg = "Show RCU tree",
977	.enable_mask = SYSRQ_ENABLE_DUMP,
978};
979
980static int __init rcu_sysrq_init(void)
981{
982	if (sysrq_rcu)
983		return register_sysrq_key('y', &sysrq_rcudump_op);
984	return 0;
985}
986early_initcall(rcu_sysrq_init);