Linux Audio

Check our new training course

Loading...
v6.2
   1// SPDX-License-Identifier: GPL-2.0+
   2/*
   3 * RCU CPU stall warnings for normal RCU grace periods
   4 *
   5 * Copyright IBM Corporation, 2019
   6 *
   7 * Author: Paul E. McKenney <paulmck@linux.ibm.com>
   8 */
   9
  10#include <linux/kvm_para.h>
  11
  12//////////////////////////////////////////////////////////////////////////////
  13//
  14// Controlling CPU stall warnings, including delay calculation.
  15
  16/* panic() on RCU Stall sysctl. */
  17int sysctl_panic_on_rcu_stall __read_mostly;
  18int sysctl_max_rcu_stall_to_panic __read_mostly;
  19
  20#ifdef CONFIG_PROVE_RCU
  21#define RCU_STALL_DELAY_DELTA		(5 * HZ)
  22#else
  23#define RCU_STALL_DELAY_DELTA		0
  24#endif
  25#define RCU_STALL_MIGHT_DIV		8
  26#define RCU_STALL_MIGHT_MIN		(2 * HZ)
  27
  28int rcu_exp_jiffies_till_stall_check(void)
  29{
  30	int cpu_stall_timeout = READ_ONCE(rcu_exp_cpu_stall_timeout);
  31	int exp_stall_delay_delta = 0;
  32	int till_stall_check;
  33
  34	// Zero says to use rcu_cpu_stall_timeout, but in milliseconds.
  35	if (!cpu_stall_timeout)
  36		cpu_stall_timeout = jiffies_to_msecs(rcu_jiffies_till_stall_check());
  37
  38	// Limit check must be consistent with the Kconfig limits for
  39	// CONFIG_RCU_EXP_CPU_STALL_TIMEOUT, so check the allowed range.
  40	// The minimum clamped value is "2UL", because at least one full
  41	// tick has to be guaranteed.
  42	till_stall_check = clamp(msecs_to_jiffies(cpu_stall_timeout), 2UL, 21UL * HZ);
  43
  44	if (cpu_stall_timeout && jiffies_to_msecs(till_stall_check) != cpu_stall_timeout)
  45		WRITE_ONCE(rcu_exp_cpu_stall_timeout, jiffies_to_msecs(till_stall_check));
  46
  47#ifdef CONFIG_PROVE_RCU
  48	/* Add extra ~25% out of till_stall_check. */
  49	exp_stall_delay_delta = ((till_stall_check * 25) / 100) + 1;
  50#endif
  51
  52	return till_stall_check + exp_stall_delay_delta;
  53}
  54EXPORT_SYMBOL_GPL(rcu_exp_jiffies_till_stall_check);
  55
  56/* Limit-check stall timeouts specified at boottime and runtime. */
  57int rcu_jiffies_till_stall_check(void)
  58{
  59	int till_stall_check = READ_ONCE(rcu_cpu_stall_timeout);
  60
  61	/*
  62	 * Limit check must be consistent with the Kconfig limits
  63	 * for CONFIG_RCU_CPU_STALL_TIMEOUT.
  64	 */
  65	if (till_stall_check < 3) {
  66		WRITE_ONCE(rcu_cpu_stall_timeout, 3);
  67		till_stall_check = 3;
  68	} else if (till_stall_check > 300) {
  69		WRITE_ONCE(rcu_cpu_stall_timeout, 300);
  70		till_stall_check = 300;
  71	}
  72	return till_stall_check * HZ + RCU_STALL_DELAY_DELTA;
  73}
  74EXPORT_SYMBOL_GPL(rcu_jiffies_till_stall_check);
  75
  76/**
  77 * rcu_gp_might_be_stalled - Is it likely that the grace period is stalled?
  78 *
  79 * Returns @true if the current grace period is sufficiently old that
  80 * it is reasonable to assume that it might be stalled.  This can be
  81 * useful when deciding whether to allocate memory to enable RCU-mediated
  82 * freeing on the one hand or just invoking synchronize_rcu() on the other.
  83 * The latter is preferable when the grace period is stalled.
  84 *
  85 * Note that sampling of the .gp_start and .gp_seq fields must be done
  86 * carefully to avoid false positives at the beginnings and ends of
  87 * grace periods.
  88 */
  89bool rcu_gp_might_be_stalled(void)
  90{
  91	unsigned long d = rcu_jiffies_till_stall_check() / RCU_STALL_MIGHT_DIV;
  92	unsigned long j = jiffies;
  93
  94	if (d < RCU_STALL_MIGHT_MIN)
  95		d = RCU_STALL_MIGHT_MIN;
  96	smp_mb(); // jiffies before .gp_seq to avoid false positives.
  97	if (!rcu_gp_in_progress())
  98		return false;
  99	// Long delays at this point avoids false positive, but a delay
 100	// of ULONG_MAX/4 jiffies voids your no-false-positive warranty.
 101	smp_mb(); // .gp_seq before second .gp_start
 102	// And ditto here.
 103	return !time_before(j, READ_ONCE(rcu_state.gp_start) + d);
 104}
 105
 106/* Don't do RCU CPU stall warnings during long sysrq printouts. */
 107void rcu_sysrq_start(void)
 108{
 109	if (!rcu_cpu_stall_suppress)
 110		rcu_cpu_stall_suppress = 2;
 111}
 112
 113void rcu_sysrq_end(void)
 114{
 115	if (rcu_cpu_stall_suppress == 2)
 116		rcu_cpu_stall_suppress = 0;
 117}
 118
 119/* Don't print RCU CPU stall warnings during a kernel panic. */
 120static int rcu_panic(struct notifier_block *this, unsigned long ev, void *ptr)
 121{
 122	rcu_cpu_stall_suppress = 1;
 123	return NOTIFY_DONE;
 124}
 125
 126static struct notifier_block rcu_panic_block = {
 127	.notifier_call = rcu_panic,
 128};
 129
 130static int __init check_cpu_stall_init(void)
 131{
 132	atomic_notifier_chain_register(&panic_notifier_list, &rcu_panic_block);
 133	return 0;
 134}
 135early_initcall(check_cpu_stall_init);
 136
 137/* If so specified via sysctl, panic, yielding cleaner stall-warning output. */
 138static void panic_on_rcu_stall(void)
 139{
 140	static int cpu_stall;
 141
 142	if (++cpu_stall < sysctl_max_rcu_stall_to_panic)
 143		return;
 144
 145	if (sysctl_panic_on_rcu_stall)
 146		panic("RCU Stall\n");
 147}
 148
 149/**
 150 * rcu_cpu_stall_reset - restart stall-warning timeout for current grace period
 
 
 
 
 151 *
 152 * The caller must disable hard irqs.
 153 */
 154void rcu_cpu_stall_reset(void)
 155{
 156	WRITE_ONCE(rcu_state.jiffies_stall,
 157		   jiffies + rcu_jiffies_till_stall_check());
 158}
 159
 160//////////////////////////////////////////////////////////////////////////////
 161//
 162// Interaction with RCU grace periods
 163
 164/* Start of new grace period, so record stall time (and forcing times). */
 165static void record_gp_stall_check_time(void)
 166{
 167	unsigned long j = jiffies;
 168	unsigned long j1;
 169
 170	WRITE_ONCE(rcu_state.gp_start, j);
 171	j1 = rcu_jiffies_till_stall_check();
 172	smp_mb(); // ->gp_start before ->jiffies_stall and caller's ->gp_seq.
 173	WRITE_ONCE(rcu_state.jiffies_stall, j + j1);
 174	rcu_state.jiffies_resched = j + j1 / 2;
 175	rcu_state.n_force_qs_gpstart = READ_ONCE(rcu_state.n_force_qs);
 176}
 177
 178/* Zero ->ticks_this_gp and snapshot the number of RCU softirq handlers. */
 179static void zero_cpu_stall_ticks(struct rcu_data *rdp)
 180{
 181	rdp->ticks_this_gp = 0;
 182	rdp->softirq_snap = kstat_softirqs_cpu(RCU_SOFTIRQ, smp_processor_id());
 183	WRITE_ONCE(rdp->last_fqs_resched, jiffies);
 184}
 185
 186/*
 187 * If too much time has passed in the current grace period, and if
 188 * so configured, go kick the relevant kthreads.
 189 */
 190static void rcu_stall_kick_kthreads(void)
 191{
 192	unsigned long j;
 193
 194	if (!READ_ONCE(rcu_kick_kthreads))
 195		return;
 196	j = READ_ONCE(rcu_state.jiffies_kick_kthreads);
 197	if (time_after(jiffies, j) && rcu_state.gp_kthread &&
 198	    (rcu_gp_in_progress() || READ_ONCE(rcu_state.gp_flags))) {
 199		WARN_ONCE(1, "Kicking %s grace-period kthread\n",
 200			  rcu_state.name);
 201		rcu_ftrace_dump(DUMP_ALL);
 202		wake_up_process(rcu_state.gp_kthread);
 203		WRITE_ONCE(rcu_state.jiffies_kick_kthreads, j + HZ);
 204	}
 205}
 206
 207/*
 208 * Handler for the irq_work request posted about halfway into the RCU CPU
 209 * stall timeout, and used to detect excessive irq disabling.  Set state
 210 * appropriately, but just complain if there is unexpected state on entry.
 211 */
 212static void rcu_iw_handler(struct irq_work *iwp)
 213{
 214	struct rcu_data *rdp;
 215	struct rcu_node *rnp;
 216
 217	rdp = container_of(iwp, struct rcu_data, rcu_iw);
 218	rnp = rdp->mynode;
 219	raw_spin_lock_rcu_node(rnp);
 220	if (!WARN_ON_ONCE(!rdp->rcu_iw_pending)) {
 221		rdp->rcu_iw_gp_seq = rnp->gp_seq;
 222		rdp->rcu_iw_pending = false;
 223	}
 224	raw_spin_unlock_rcu_node(rnp);
 225}
 226
 227//////////////////////////////////////////////////////////////////////////////
 228//
 229// Printing RCU CPU stall warnings
 230
 231#ifdef CONFIG_PREEMPT_RCU
 232
 233/*
 234 * Dump detailed information for all tasks blocking the current RCU
 235 * grace period on the specified rcu_node structure.
 236 */
 237static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp)
 238{
 239	unsigned long flags;
 240	struct task_struct *t;
 241
 242	raw_spin_lock_irqsave_rcu_node(rnp, flags);
 243	if (!rcu_preempt_blocked_readers_cgp(rnp)) {
 244		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 245		return;
 246	}
 247	t = list_entry(rnp->gp_tasks->prev,
 248		       struct task_struct, rcu_node_entry);
 249	list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
 250		/*
 251		 * We could be printing a lot while holding a spinlock.
 252		 * Avoid triggering hard lockup.
 253		 */
 254		touch_nmi_watchdog();
 255		sched_show_task(t);
 256	}
 257	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 258}
 259
 260// Communicate task state back to the RCU CPU stall warning request.
 261struct rcu_stall_chk_rdr {
 262	int nesting;
 263	union rcu_special rs;
 264	bool on_blkd_list;
 265};
 266
 267/*
 268 * Report out the state of a not-running task that is stalling the
 269 * current RCU grace period.
 270 */
 271static int check_slow_task(struct task_struct *t, void *arg)
 272{
 273	struct rcu_stall_chk_rdr *rscrp = arg;
 274
 275	if (task_curr(t))
 276		return -EBUSY; // It is running, so decline to inspect it.
 277	rscrp->nesting = t->rcu_read_lock_nesting;
 278	rscrp->rs = t->rcu_read_unlock_special;
 279	rscrp->on_blkd_list = !list_empty(&t->rcu_node_entry);
 280	return 0;
 281}
 282
 283/*
 284 * Scan the current list of tasks blocked within RCU read-side critical
 285 * sections, printing out the tid of each of the first few of them.
 286 */
 287static int rcu_print_task_stall(struct rcu_node *rnp, unsigned long flags)
 288	__releases(rnp->lock)
 289{
 290	int i = 0;
 291	int ndetected = 0;
 292	struct rcu_stall_chk_rdr rscr;
 293	struct task_struct *t;
 294	struct task_struct *ts[8];
 295
 296	lockdep_assert_irqs_disabled();
 297	if (!rcu_preempt_blocked_readers_cgp(rnp)) {
 298		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 299		return 0;
 300	}
 301	pr_err("\tTasks blocked on level-%d rcu_node (CPUs %d-%d):",
 302	       rnp->level, rnp->grplo, rnp->grphi);
 303	t = list_entry(rnp->gp_tasks->prev,
 304		       struct task_struct, rcu_node_entry);
 305	list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
 306		get_task_struct(t);
 307		ts[i++] = t;
 308		if (i >= ARRAY_SIZE(ts))
 309			break;
 310	}
 311	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 312	while (i) {
 313		t = ts[--i];
 314		if (task_call_func(t, check_slow_task, &rscr))
 315			pr_cont(" P%d", t->pid);
 316		else
 317			pr_cont(" P%d/%d:%c%c%c%c",
 318				t->pid, rscr.nesting,
 319				".b"[rscr.rs.b.blocked],
 320				".q"[rscr.rs.b.need_qs],
 321				".e"[rscr.rs.b.exp_hint],
 322				".l"[rscr.on_blkd_list]);
 323		lockdep_assert_irqs_disabled();
 324		put_task_struct(t);
 325		ndetected++;
 326	}
 327	pr_cont("\n");
 328	return ndetected;
 329}
 330
 331#else /* #ifdef CONFIG_PREEMPT_RCU */
 332
 333/*
 334 * Because preemptible RCU does not exist, we never have to check for
 335 * tasks blocked within RCU read-side critical sections.
 336 */
 337static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp)
 338{
 339}
 340
 341/*
 342 * Because preemptible RCU does not exist, we never have to check for
 343 * tasks blocked within RCU read-side critical sections.
 344 */
 345static int rcu_print_task_stall(struct rcu_node *rnp, unsigned long flags)
 346	__releases(rnp->lock)
 347{
 348	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 349	return 0;
 350}
 351#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
 352
 353/*
 354 * Dump stacks of all tasks running on stalled CPUs.  First try using
 355 * NMIs, but fall back to manual remote stack tracing on architectures
 356 * that don't support NMI-based stack dumps.  The NMI-triggered stack
 357 * traces are more accurate because they are printed by the target CPU.
 358 */
 359static void rcu_dump_cpu_stacks(void)
 360{
 361	int cpu;
 362	unsigned long flags;
 363	struct rcu_node *rnp;
 364
 365	rcu_for_each_leaf_node(rnp) {
 366		raw_spin_lock_irqsave_rcu_node(rnp, flags);
 367		for_each_leaf_node_possible_cpu(rnp, cpu)
 368			if (rnp->qsmask & leaf_node_cpu_bit(rnp, cpu)) {
 369				if (cpu_is_offline(cpu))
 370					pr_err("Offline CPU %d blocking current GP.\n", cpu);
 371				else
 372					dump_cpu_task(cpu);
 373			}
 374		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 375	}
 376}
 377
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 378static const char * const gp_state_names[] = {
 379	[RCU_GP_IDLE] = "RCU_GP_IDLE",
 380	[RCU_GP_WAIT_GPS] = "RCU_GP_WAIT_GPS",
 381	[RCU_GP_DONE_GPS] = "RCU_GP_DONE_GPS",
 382	[RCU_GP_ONOFF] = "RCU_GP_ONOFF",
 383	[RCU_GP_INIT] = "RCU_GP_INIT",
 384	[RCU_GP_WAIT_FQS] = "RCU_GP_WAIT_FQS",
 385	[RCU_GP_DOING_FQS] = "RCU_GP_DOING_FQS",
 386	[RCU_GP_CLEANUP] = "RCU_GP_CLEANUP",
 387	[RCU_GP_CLEANED] = "RCU_GP_CLEANED",
 388};
 389
 390/*
 391 * Convert a ->gp_state value to a character string.
 392 */
 393static const char *gp_state_getname(short gs)
 394{
 395	if (gs < 0 || gs >= ARRAY_SIZE(gp_state_names))
 396		return "???";
 397	return gp_state_names[gs];
 398}
 399
 400/* Is the RCU grace-period kthread being starved of CPU time? */
 401static bool rcu_is_gp_kthread_starving(unsigned long *jp)
 402{
 403	unsigned long j = jiffies - READ_ONCE(rcu_state.gp_activity);
 404
 405	if (jp)
 406		*jp = j;
 407	return j > 2 * HZ;
 408}
 409
 410static bool rcu_is_rcuc_kthread_starving(struct rcu_data *rdp, unsigned long *jp)
 411{
 412	int cpu;
 413	struct task_struct *rcuc;
 414	unsigned long j;
 415
 416	rcuc = rdp->rcu_cpu_kthread_task;
 417	if (!rcuc)
 418		return false;
 419
 420	cpu = task_cpu(rcuc);
 421	if (cpu_is_offline(cpu) || idle_cpu(cpu))
 422		return false;
 423
 424	j = jiffies - READ_ONCE(rdp->rcuc_activity);
 425
 426	if (jp)
 427		*jp = j;
 428	return j > 2 * HZ;
 429}
 430
 431/*
 432 * Print out diagnostic information for the specified stalled CPU.
 433 *
 434 * If the specified CPU is aware of the current RCU grace period, then
 435 * print the number of scheduling clock interrupts the CPU has taken
 436 * during the time that it has been aware.  Otherwise, print the number
 437 * of RCU grace periods that this CPU is ignorant of, for example, "1"
 438 * if the CPU was aware of the previous grace period.
 439 *
 440 * Also print out idle info.
 441 */
 442static void print_cpu_stall_info(int cpu)
 443{
 444	unsigned long delta;
 445	bool falsepositive;
 
 446	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
 447	char *ticks_title;
 448	unsigned long ticks_value;
 449	bool rcuc_starved;
 450	unsigned long j;
 451	char buf[32];
 452
 453	/*
 454	 * We could be printing a lot while holding a spinlock.  Avoid
 455	 * triggering hard lockup.
 456	 */
 457	touch_nmi_watchdog();
 458
 459	ticks_value = rcu_seq_ctr(rcu_state.gp_seq - rdp->gp_seq);
 460	if (ticks_value) {
 461		ticks_title = "GPs behind";
 462	} else {
 463		ticks_title = "ticks this GP";
 464		ticks_value = rdp->ticks_this_gp;
 465	}
 
 466	delta = rcu_seq_ctr(rdp->mynode->gp_seq - rdp->rcu_iw_gp_seq);
 467	falsepositive = rcu_is_gp_kthread_starving(NULL) &&
 468			rcu_dynticks_in_eqs(rcu_dynticks_snap(cpu));
 469	rcuc_starved = rcu_is_rcuc_kthread_starving(rdp, &j);
 470	if (rcuc_starved)
 471		sprintf(buf, " rcuc=%ld jiffies(starved)", j);
 472	pr_err("\t%d-%c%c%c%c: (%lu %s) idle=%04x/%ld/%#lx softirq=%u/%u fqs=%ld%s%s\n",
 473	       cpu,
 474	       "O."[!!cpu_online(cpu)],
 475	       "o."[!!(rdp->grpmask & rdp->mynode->qsmaskinit)],
 476	       "N."[!!(rdp->grpmask & rdp->mynode->qsmaskinitnext)],
 477	       !IS_ENABLED(CONFIG_IRQ_WORK) ? '?' :
 478			rdp->rcu_iw_pending ? (int)min(delta, 9UL) + '0' :
 479				"!."[!delta],
 480	       ticks_value, ticks_title,
 481	       rcu_dynticks_snap(cpu) & 0xffff,
 482	       ct_dynticks_nesting_cpu(cpu), ct_dynticks_nmi_nesting_cpu(cpu),
 483	       rdp->softirq_snap, kstat_softirqs_cpu(RCU_SOFTIRQ, cpu),
 484	       data_race(rcu_state.n_force_qs) - rcu_state.n_force_qs_gpstart,
 485	       rcuc_starved ? buf : "",
 486	       falsepositive ? " (false positive?)" : "");
 487}
 488
 489/* Complain about starvation of grace-period kthread.  */
 490static void rcu_check_gp_kthread_starvation(void)
 491{
 492	int cpu;
 493	struct task_struct *gpk = rcu_state.gp_kthread;
 494	unsigned long j;
 495
 496	if (rcu_is_gp_kthread_starving(&j)) {
 497		cpu = gpk ? task_cpu(gpk) : -1;
 498		pr_err("%s kthread starved for %ld jiffies! g%ld f%#x %s(%d) ->state=%#x ->cpu=%d\n",
 499		       rcu_state.name, j,
 500		       (long)rcu_seq_current(&rcu_state.gp_seq),
 501		       data_race(READ_ONCE(rcu_state.gp_flags)),
 502		       gp_state_getname(rcu_state.gp_state),
 503		       data_race(READ_ONCE(rcu_state.gp_state)),
 504		       gpk ? data_race(READ_ONCE(gpk->__state)) : ~0, cpu);
 505		if (gpk) {
 506			pr_err("\tUnless %s kthread gets sufficient CPU time, OOM is now expected behavior.\n", rcu_state.name);
 507			pr_err("RCU grace-period kthread stack dump:\n");
 508			sched_show_task(gpk);
 509			if (cpu >= 0) {
 510				if (cpu_is_offline(cpu)) {
 511					pr_err("RCU GP kthread last ran on offline CPU %d.\n", cpu);
 512				} else  {
 513					pr_err("Stack dump where RCU GP kthread last ran:\n");
 514					dump_cpu_task(cpu);
 
 515				}
 516			}
 517			wake_up_process(gpk);
 518		}
 519	}
 520}
 521
 522/* Complain about missing wakeups from expired fqs wait timer */
 523static void rcu_check_gp_kthread_expired_fqs_timer(void)
 524{
 525	struct task_struct *gpk = rcu_state.gp_kthread;
 526	short gp_state;
 527	unsigned long jiffies_fqs;
 528	int cpu;
 529
 530	/*
 531	 * Order reads of .gp_state and .jiffies_force_qs.
 532	 * Matching smp_wmb() is present in rcu_gp_fqs_loop().
 533	 */
 534	gp_state = smp_load_acquire(&rcu_state.gp_state);
 535	jiffies_fqs = READ_ONCE(rcu_state.jiffies_force_qs);
 536
 537	if (gp_state == RCU_GP_WAIT_FQS &&
 538	    time_after(jiffies, jiffies_fqs + RCU_STALL_MIGHT_MIN) &&
 539	    gpk && !READ_ONCE(gpk->on_rq)) {
 540		cpu = task_cpu(gpk);
 541		pr_err("%s kthread timer wakeup didn't happen for %ld jiffies! g%ld f%#x %s(%d) ->state=%#x\n",
 542		       rcu_state.name, (jiffies - jiffies_fqs),
 543		       (long)rcu_seq_current(&rcu_state.gp_seq),
 544		       data_race(rcu_state.gp_flags),
 545		       gp_state_getname(RCU_GP_WAIT_FQS), RCU_GP_WAIT_FQS,
 546		       data_race(READ_ONCE(gpk->__state)));
 547		pr_err("\tPossible timer handling issue on cpu=%d timer-softirq=%u\n",
 548		       cpu, kstat_softirqs_cpu(TIMER_SOFTIRQ, cpu));
 549	}
 550}
 551
 552static void print_other_cpu_stall(unsigned long gp_seq, unsigned long gps)
 553{
 554	int cpu;
 555	unsigned long flags;
 556	unsigned long gpa;
 557	unsigned long j;
 558	int ndetected = 0;
 559	struct rcu_node *rnp;
 560	long totqlen = 0;
 561
 562	lockdep_assert_irqs_disabled();
 563
 564	/* Kick and suppress, if so configured. */
 565	rcu_stall_kick_kthreads();
 566	if (rcu_stall_is_suppressed())
 567		return;
 568
 569	/*
 570	 * OK, time to rat on our buddy...
 571	 * See Documentation/RCU/stallwarn.rst for info on how to debug
 572	 * RCU CPU stall warnings.
 573	 */
 574	trace_rcu_stall_warning(rcu_state.name, TPS("StallDetected"));
 575	pr_err("INFO: %s detected stalls on CPUs/tasks:\n", rcu_state.name);
 576	rcu_for_each_leaf_node(rnp) {
 577		raw_spin_lock_irqsave_rcu_node(rnp, flags);
 578		if (rnp->qsmask != 0) {
 579			for_each_leaf_node_possible_cpu(rnp, cpu)
 580				if (rnp->qsmask & leaf_node_cpu_bit(rnp, cpu)) {
 581					print_cpu_stall_info(cpu);
 582					ndetected++;
 583				}
 584		}
 585		ndetected += rcu_print_task_stall(rnp, flags); // Releases rnp->lock.
 586		lockdep_assert_irqs_disabled();
 587	}
 588
 589	for_each_possible_cpu(cpu)
 590		totqlen += rcu_get_n_cbs_cpu(cpu);
 591	pr_cont("\t(detected by %d, t=%ld jiffies, g=%ld, q=%lu ncpus=%d)\n",
 592	       smp_processor_id(), (long)(jiffies - gps),
 593	       (long)rcu_seq_current(&rcu_state.gp_seq), totqlen, rcu_state.n_online_cpus);
 594	if (ndetected) {
 595		rcu_dump_cpu_stacks();
 596
 597		/* Complain about tasks blocking the grace period. */
 598		rcu_for_each_leaf_node(rnp)
 599			rcu_print_detail_task_stall_rnp(rnp);
 600	} else {
 601		if (rcu_seq_current(&rcu_state.gp_seq) != gp_seq) {
 602			pr_err("INFO: Stall ended before state dump start\n");
 603		} else {
 604			j = jiffies;
 605			gpa = data_race(READ_ONCE(rcu_state.gp_activity));
 606			pr_err("All QSes seen, last %s kthread activity %ld (%ld-%ld), jiffies_till_next_fqs=%ld, root ->qsmask %#lx\n",
 607			       rcu_state.name, j - gpa, j, gpa,
 608			       data_race(READ_ONCE(jiffies_till_next_fqs)),
 609			       data_race(READ_ONCE(rcu_get_root()->qsmask)));
 610		}
 611	}
 612	/* Rewrite if needed in case of slow consoles. */
 613	if (ULONG_CMP_GE(jiffies, READ_ONCE(rcu_state.jiffies_stall)))
 614		WRITE_ONCE(rcu_state.jiffies_stall,
 615			   jiffies + 3 * rcu_jiffies_till_stall_check() + 3);
 616
 617	rcu_check_gp_kthread_expired_fqs_timer();
 618	rcu_check_gp_kthread_starvation();
 619
 620	panic_on_rcu_stall();
 621
 622	rcu_force_quiescent_state();  /* Kick them all. */
 623}
 624
 625static void print_cpu_stall(unsigned long gps)
 626{
 627	int cpu;
 628	unsigned long flags;
 629	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
 630	struct rcu_node *rnp = rcu_get_root();
 631	long totqlen = 0;
 632
 633	lockdep_assert_irqs_disabled();
 634
 635	/* Kick and suppress, if so configured. */
 636	rcu_stall_kick_kthreads();
 637	if (rcu_stall_is_suppressed())
 638		return;
 639
 640	/*
 641	 * OK, time to rat on ourselves...
 642	 * See Documentation/RCU/stallwarn.rst for info on how to debug
 643	 * RCU CPU stall warnings.
 644	 */
 645	trace_rcu_stall_warning(rcu_state.name, TPS("SelfDetected"));
 646	pr_err("INFO: %s self-detected stall on CPU\n", rcu_state.name);
 647	raw_spin_lock_irqsave_rcu_node(rdp->mynode, flags);
 648	print_cpu_stall_info(smp_processor_id());
 649	raw_spin_unlock_irqrestore_rcu_node(rdp->mynode, flags);
 650	for_each_possible_cpu(cpu)
 651		totqlen += rcu_get_n_cbs_cpu(cpu);
 652	pr_cont("\t(t=%lu jiffies g=%ld q=%lu ncpus=%d)\n",
 653		jiffies - gps,
 654		(long)rcu_seq_current(&rcu_state.gp_seq), totqlen, rcu_state.n_online_cpus);
 655
 656	rcu_check_gp_kthread_expired_fqs_timer();
 657	rcu_check_gp_kthread_starvation();
 658
 659	rcu_dump_cpu_stacks();
 660
 661	raw_spin_lock_irqsave_rcu_node(rnp, flags);
 662	/* Rewrite if needed in case of slow consoles. */
 663	if (ULONG_CMP_GE(jiffies, READ_ONCE(rcu_state.jiffies_stall)))
 664		WRITE_ONCE(rcu_state.jiffies_stall,
 665			   jiffies + 3 * rcu_jiffies_till_stall_check() + 3);
 666	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 667
 668	panic_on_rcu_stall();
 669
 670	/*
 671	 * Attempt to revive the RCU machinery by forcing a context switch.
 672	 *
 673	 * A context switch would normally allow the RCU state machine to make
 674	 * progress and it could be we're stuck in kernel space without context
 675	 * switches for an entirely unreasonable amount of time.
 676	 */
 677	set_tsk_need_resched(current);
 678	set_preempt_need_resched();
 679}
 680
 681static void check_cpu_stall(struct rcu_data *rdp)
 682{
 683	bool didstall = false;
 684	unsigned long gs1;
 685	unsigned long gs2;
 686	unsigned long gps;
 687	unsigned long j;
 688	unsigned long jn;
 689	unsigned long js;
 690	struct rcu_node *rnp;
 691
 692	lockdep_assert_irqs_disabled();
 693	if ((rcu_stall_is_suppressed() && !READ_ONCE(rcu_kick_kthreads)) ||
 694	    !rcu_gp_in_progress())
 695		return;
 696	rcu_stall_kick_kthreads();
 697	j = jiffies;
 698
 699	/*
 700	 * Lots of memory barriers to reject false positives.
 701	 *
 702	 * The idea is to pick up rcu_state.gp_seq, then
 703	 * rcu_state.jiffies_stall, then rcu_state.gp_start, and finally
 704	 * another copy of rcu_state.gp_seq.  These values are updated in
 705	 * the opposite order with memory barriers (or equivalent) during
 706	 * grace-period initialization and cleanup.  Now, a false positive
 707	 * can occur if we get an new value of rcu_state.gp_start and a old
 708	 * value of rcu_state.jiffies_stall.  But given the memory barriers,
 709	 * the only way that this can happen is if one grace period ends
 710	 * and another starts between these two fetches.  This is detected
 711	 * by comparing the second fetch of rcu_state.gp_seq with the
 712	 * previous fetch from rcu_state.gp_seq.
 713	 *
 714	 * Given this check, comparisons of jiffies, rcu_state.jiffies_stall,
 715	 * and rcu_state.gp_start suffice to forestall false positives.
 716	 */
 717	gs1 = READ_ONCE(rcu_state.gp_seq);
 718	smp_rmb(); /* Pick up ->gp_seq first... */
 719	js = READ_ONCE(rcu_state.jiffies_stall);
 720	smp_rmb(); /* ...then ->jiffies_stall before the rest... */
 721	gps = READ_ONCE(rcu_state.gp_start);
 722	smp_rmb(); /* ...and finally ->gp_start before ->gp_seq again. */
 723	gs2 = READ_ONCE(rcu_state.gp_seq);
 724	if (gs1 != gs2 ||
 725	    ULONG_CMP_LT(j, js) ||
 726	    ULONG_CMP_GE(gps, js))
 727		return; /* No stall or GP completed since entering function. */
 728	rnp = rdp->mynode;
 729	jn = jiffies + ULONG_MAX / 2;
 730	if (rcu_gp_in_progress() &&
 731	    (READ_ONCE(rnp->qsmask) & rdp->grpmask) &&
 732	    cmpxchg(&rcu_state.jiffies_stall, js, jn) == js) {
 733
 734		/*
 735		 * If a virtual machine is stopped by the host it can look to
 736		 * the watchdog like an RCU stall. Check to see if the host
 737		 * stopped the vm.
 738		 */
 739		if (kvm_check_and_clear_guest_paused())
 740			return;
 741
 742		/* We haven't checked in, so go dump stack. */
 743		print_cpu_stall(gps);
 744		if (READ_ONCE(rcu_cpu_stall_ftrace_dump))
 745			rcu_ftrace_dump(DUMP_ALL);
 746		didstall = true;
 747
 748	} else if (rcu_gp_in_progress() &&
 749		   ULONG_CMP_GE(j, js + RCU_STALL_RAT_DELAY) &&
 750		   cmpxchg(&rcu_state.jiffies_stall, js, jn) == js) {
 751
 752		/*
 753		 * If a virtual machine is stopped by the host it can look to
 754		 * the watchdog like an RCU stall. Check to see if the host
 755		 * stopped the vm.
 756		 */
 757		if (kvm_check_and_clear_guest_paused())
 758			return;
 759
 760		/* They had a few time units to dump stack, so complain. */
 761		print_other_cpu_stall(gs2, gps);
 762		if (READ_ONCE(rcu_cpu_stall_ftrace_dump))
 763			rcu_ftrace_dump(DUMP_ALL);
 764		didstall = true;
 765	}
 766	if (didstall && READ_ONCE(rcu_state.jiffies_stall) == jn) {
 767		jn = jiffies + 3 * rcu_jiffies_till_stall_check() + 3;
 768		WRITE_ONCE(rcu_state.jiffies_stall, jn);
 769	}
 770}
 771
 772//////////////////////////////////////////////////////////////////////////////
 773//
 774// RCU forward-progress mechanisms, including of callback invocation.
 775
 776
 777/*
 778 * Check to see if a failure to end RCU priority inversion was due to
 779 * a CPU not passing through a quiescent state.  When this happens, there
 780 * is nothing that RCU priority boosting can do to help, so we shouldn't
 781 * count this as an RCU priority boosting failure.  A return of true says
 782 * RCU priority boosting is to blame, and false says otherwise.  If false
 783 * is returned, the first of the CPUs to blame is stored through cpup.
 784 * If there was no CPU blocking the current grace period, but also nothing
 785 * in need of being boosted, *cpup is set to -1.  This can happen in case
 786 * of vCPU preemption while the last CPU is reporting its quiscent state,
 787 * for example.
 788 *
 789 * If cpup is NULL, then a lockless quick check is carried out, suitable
 790 * for high-rate usage.  On the other hand, if cpup is non-NULL, each
 791 * rcu_node structure's ->lock is acquired, ruling out high-rate usage.
 792 */
 793bool rcu_check_boost_fail(unsigned long gp_state, int *cpup)
 794{
 795	bool atb = false;
 796	int cpu;
 797	unsigned long flags;
 798	struct rcu_node *rnp;
 799
 800	rcu_for_each_leaf_node(rnp) {
 801		if (!cpup) {
 802			if (data_race(READ_ONCE(rnp->qsmask))) {
 803				return false;
 804			} else {
 805				if (READ_ONCE(rnp->gp_tasks))
 806					atb = true;
 807				continue;
 808			}
 809		}
 810		*cpup = -1;
 811		raw_spin_lock_irqsave_rcu_node(rnp, flags);
 812		if (rnp->gp_tasks)
 813			atb = true;
 814		if (!rnp->qsmask) {
 815			// No CPUs without quiescent states for this rnp.
 816			raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 817			continue;
 818		}
 819		// Find the first holdout CPU.
 820		for_each_leaf_node_possible_cpu(rnp, cpu) {
 821			if (rnp->qsmask & (1UL << (cpu - rnp->grplo))) {
 822				raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 823				*cpup = cpu;
 824				return false;
 825			}
 826		}
 827		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 828	}
 829	// Can't blame CPUs, so must blame RCU priority boosting.
 830	return atb;
 831}
 832EXPORT_SYMBOL_GPL(rcu_check_boost_fail);
 833
 834/*
 835 * Show the state of the grace-period kthreads.
 836 */
 837void show_rcu_gp_kthreads(void)
 838{
 839	unsigned long cbs = 0;
 840	int cpu;
 841	unsigned long j;
 842	unsigned long ja;
 843	unsigned long jr;
 844	unsigned long js;
 845	unsigned long jw;
 846	struct rcu_data *rdp;
 847	struct rcu_node *rnp;
 848	struct task_struct *t = READ_ONCE(rcu_state.gp_kthread);
 849
 850	j = jiffies;
 851	ja = j - data_race(READ_ONCE(rcu_state.gp_activity));
 852	jr = j - data_race(READ_ONCE(rcu_state.gp_req_activity));
 853	js = j - data_race(READ_ONCE(rcu_state.gp_start));
 854	jw = j - data_race(READ_ONCE(rcu_state.gp_wake_time));
 855	pr_info("%s: wait state: %s(%d) ->state: %#x ->rt_priority %u delta ->gp_start %lu ->gp_activity %lu ->gp_req_activity %lu ->gp_wake_time %lu ->gp_wake_seq %ld ->gp_seq %ld ->gp_seq_needed %ld ->gp_max %lu ->gp_flags %#x\n",
 856		rcu_state.name, gp_state_getname(rcu_state.gp_state),
 857		data_race(READ_ONCE(rcu_state.gp_state)),
 858		t ? data_race(READ_ONCE(t->__state)) : 0x1ffff, t ? t->rt_priority : 0xffU,
 859		js, ja, jr, jw, (long)data_race(READ_ONCE(rcu_state.gp_wake_seq)),
 860		(long)data_race(READ_ONCE(rcu_state.gp_seq)),
 861		(long)data_race(READ_ONCE(rcu_get_root()->gp_seq_needed)),
 862		data_race(READ_ONCE(rcu_state.gp_max)),
 863		data_race(READ_ONCE(rcu_state.gp_flags)));
 864	rcu_for_each_node_breadth_first(rnp) {
 865		if (ULONG_CMP_GE(READ_ONCE(rcu_state.gp_seq), READ_ONCE(rnp->gp_seq_needed)) &&
 866		    !data_race(READ_ONCE(rnp->qsmask)) && !data_race(READ_ONCE(rnp->boost_tasks)) &&
 867		    !data_race(READ_ONCE(rnp->exp_tasks)) && !data_race(READ_ONCE(rnp->gp_tasks)))
 868			continue;
 869		pr_info("\trcu_node %d:%d ->gp_seq %ld ->gp_seq_needed %ld ->qsmask %#lx %c%c%c%c ->n_boosts %ld\n",
 870			rnp->grplo, rnp->grphi,
 871			(long)data_race(READ_ONCE(rnp->gp_seq)),
 872			(long)data_race(READ_ONCE(rnp->gp_seq_needed)),
 873			data_race(READ_ONCE(rnp->qsmask)),
 874			".b"[!!data_race(READ_ONCE(rnp->boost_kthread_task))],
 875			".B"[!!data_race(READ_ONCE(rnp->boost_tasks))],
 876			".E"[!!data_race(READ_ONCE(rnp->exp_tasks))],
 877			".G"[!!data_race(READ_ONCE(rnp->gp_tasks))],
 878			data_race(READ_ONCE(rnp->n_boosts)));
 879		if (!rcu_is_leaf_node(rnp))
 880			continue;
 881		for_each_leaf_node_possible_cpu(rnp, cpu) {
 882			rdp = per_cpu_ptr(&rcu_data, cpu);
 883			if (READ_ONCE(rdp->gpwrap) ||
 884			    ULONG_CMP_GE(READ_ONCE(rcu_state.gp_seq),
 885					 READ_ONCE(rdp->gp_seq_needed)))
 886				continue;
 887			pr_info("\tcpu %d ->gp_seq_needed %ld\n",
 888				cpu, (long)data_race(READ_ONCE(rdp->gp_seq_needed)));
 889		}
 890	}
 891	for_each_possible_cpu(cpu) {
 892		rdp = per_cpu_ptr(&rcu_data, cpu);
 893		cbs += data_race(READ_ONCE(rdp->n_cbs_invoked));
 894		if (rcu_segcblist_is_offloaded(&rdp->cblist))
 895			show_rcu_nocb_state(rdp);
 896	}
 897	pr_info("RCU callbacks invoked since boot: %lu\n", cbs);
 898	show_rcu_tasks_gp_kthreads();
 899}
 900EXPORT_SYMBOL_GPL(show_rcu_gp_kthreads);
 901
 902/*
 903 * This function checks for grace-period requests that fail to motivate
 904 * RCU to come out of its idle mode.
 905 */
 906static void rcu_check_gp_start_stall(struct rcu_node *rnp, struct rcu_data *rdp,
 907				     const unsigned long gpssdelay)
 908{
 909	unsigned long flags;
 910	unsigned long j;
 911	struct rcu_node *rnp_root = rcu_get_root();
 912	static atomic_t warned = ATOMIC_INIT(0);
 913
 914	if (!IS_ENABLED(CONFIG_PROVE_RCU) || rcu_gp_in_progress() ||
 915	    ULONG_CMP_GE(READ_ONCE(rnp_root->gp_seq),
 916			 READ_ONCE(rnp_root->gp_seq_needed)) ||
 917	    !smp_load_acquire(&rcu_state.gp_kthread)) // Get stable kthread.
 918		return;
 919	j = jiffies; /* Expensive access, and in common case don't get here. */
 920	if (time_before(j, READ_ONCE(rcu_state.gp_req_activity) + gpssdelay) ||
 921	    time_before(j, READ_ONCE(rcu_state.gp_activity) + gpssdelay) ||
 922	    atomic_read(&warned))
 923		return;
 924
 925	raw_spin_lock_irqsave_rcu_node(rnp, flags);
 926	j = jiffies;
 927	if (rcu_gp_in_progress() ||
 928	    ULONG_CMP_GE(READ_ONCE(rnp_root->gp_seq),
 929			 READ_ONCE(rnp_root->gp_seq_needed)) ||
 930	    time_before(j, READ_ONCE(rcu_state.gp_req_activity) + gpssdelay) ||
 931	    time_before(j, READ_ONCE(rcu_state.gp_activity) + gpssdelay) ||
 932	    atomic_read(&warned)) {
 933		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 934		return;
 935	}
 936	/* Hold onto the leaf lock to make others see warned==1. */
 937
 938	if (rnp_root != rnp)
 939		raw_spin_lock_rcu_node(rnp_root); /* irqs already disabled. */
 940	j = jiffies;
 941	if (rcu_gp_in_progress() ||
 942	    ULONG_CMP_GE(READ_ONCE(rnp_root->gp_seq),
 943			 READ_ONCE(rnp_root->gp_seq_needed)) ||
 944	    time_before(j, READ_ONCE(rcu_state.gp_req_activity) + gpssdelay) ||
 945	    time_before(j, READ_ONCE(rcu_state.gp_activity) + gpssdelay) ||
 946	    atomic_xchg(&warned, 1)) {
 947		if (rnp_root != rnp)
 948			/* irqs remain disabled. */
 949			raw_spin_unlock_rcu_node(rnp_root);
 950		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 951		return;
 952	}
 953	WARN_ON(1);
 954	if (rnp_root != rnp)
 955		raw_spin_unlock_rcu_node(rnp_root);
 956	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 957	show_rcu_gp_kthreads();
 958}
 959
 960/*
 961 * Do a forward-progress check for rcutorture.  This is normally invoked
 962 * due to an OOM event.  The argument "j" gives the time period during
 963 * which rcutorture would like progress to have been made.
 964 */
 965void rcu_fwd_progress_check(unsigned long j)
 966{
 967	unsigned long cbs;
 968	int cpu;
 969	unsigned long max_cbs = 0;
 970	int max_cpu = -1;
 971	struct rcu_data *rdp;
 972
 973	if (rcu_gp_in_progress()) {
 974		pr_info("%s: GP age %lu jiffies\n",
 975			__func__, jiffies - data_race(READ_ONCE(rcu_state.gp_start)));
 976		show_rcu_gp_kthreads();
 977	} else {
 978		pr_info("%s: Last GP end %lu jiffies ago\n",
 979			__func__, jiffies - data_race(READ_ONCE(rcu_state.gp_end)));
 980		preempt_disable();
 981		rdp = this_cpu_ptr(&rcu_data);
 982		rcu_check_gp_start_stall(rdp->mynode, rdp, j);
 983		preempt_enable();
 984	}
 985	for_each_possible_cpu(cpu) {
 986		cbs = rcu_get_n_cbs_cpu(cpu);
 987		if (!cbs)
 988			continue;
 989		if (max_cpu < 0)
 990			pr_info("%s: callbacks", __func__);
 991		pr_cont(" %d: %lu", cpu, cbs);
 992		if (cbs <= max_cbs)
 993			continue;
 994		max_cbs = cbs;
 995		max_cpu = cpu;
 996	}
 997	if (max_cpu >= 0)
 998		pr_cont("\n");
 999}
1000EXPORT_SYMBOL_GPL(rcu_fwd_progress_check);
1001
1002/* Commandeer a sysrq key to dump RCU's tree. */
1003static bool sysrq_rcu;
1004module_param(sysrq_rcu, bool, 0444);
1005
1006/* Dump grace-period-request information due to commandeered sysrq. */
1007static void sysrq_show_rcu(int key)
1008{
1009	show_rcu_gp_kthreads();
1010}
1011
1012static const struct sysrq_key_op sysrq_rcudump_op = {
1013	.handler = sysrq_show_rcu,
1014	.help_msg = "show-rcu(y)",
1015	.action_msg = "Show RCU tree",
1016	.enable_mask = SYSRQ_ENABLE_DUMP,
1017};
1018
1019static int __init rcu_sysrq_init(void)
1020{
1021	if (sysrq_rcu)
1022		return register_sysrq_key('y', &sysrq_rcudump_op);
1023	return 0;
1024}
1025early_initcall(rcu_sysrq_init);
v5.14.15
  1// SPDX-License-Identifier: GPL-2.0+
  2/*
  3 * RCU CPU stall warnings for normal RCU grace periods
  4 *
  5 * Copyright IBM Corporation, 2019
  6 *
  7 * Author: Paul E. McKenney <paulmck@linux.ibm.com>
  8 */
  9
 10#include <linux/kvm_para.h>
 11
 12//////////////////////////////////////////////////////////////////////////////
 13//
 14// Controlling CPU stall warnings, including delay calculation.
 15
 16/* panic() on RCU Stall sysctl. */
 17int sysctl_panic_on_rcu_stall __read_mostly;
 18int sysctl_max_rcu_stall_to_panic __read_mostly;
 19
 20#ifdef CONFIG_PROVE_RCU
 21#define RCU_STALL_DELAY_DELTA		(5 * HZ)
 22#else
 23#define RCU_STALL_DELAY_DELTA		0
 24#endif
 25#define RCU_STALL_MIGHT_DIV		8
 26#define RCU_STALL_MIGHT_MIN		(2 * HZ)
 27
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 28/* Limit-check stall timeouts specified at boottime and runtime. */
 29int rcu_jiffies_till_stall_check(void)
 30{
 31	int till_stall_check = READ_ONCE(rcu_cpu_stall_timeout);
 32
 33	/*
 34	 * Limit check must be consistent with the Kconfig limits
 35	 * for CONFIG_RCU_CPU_STALL_TIMEOUT.
 36	 */
 37	if (till_stall_check < 3) {
 38		WRITE_ONCE(rcu_cpu_stall_timeout, 3);
 39		till_stall_check = 3;
 40	} else if (till_stall_check > 300) {
 41		WRITE_ONCE(rcu_cpu_stall_timeout, 300);
 42		till_stall_check = 300;
 43	}
 44	return till_stall_check * HZ + RCU_STALL_DELAY_DELTA;
 45}
 46EXPORT_SYMBOL_GPL(rcu_jiffies_till_stall_check);
 47
 48/**
 49 * rcu_gp_might_be_stalled - Is it likely that the grace period is stalled?
 50 *
 51 * Returns @true if the current grace period is sufficiently old that
 52 * it is reasonable to assume that it might be stalled.  This can be
 53 * useful when deciding whether to allocate memory to enable RCU-mediated
 54 * freeing on the one hand or just invoking synchronize_rcu() on the other.
 55 * The latter is preferable when the grace period is stalled.
 56 *
 57 * Note that sampling of the .gp_start and .gp_seq fields must be done
 58 * carefully to avoid false positives at the beginnings and ends of
 59 * grace periods.
 60 */
 61bool rcu_gp_might_be_stalled(void)
 62{
 63	unsigned long d = rcu_jiffies_till_stall_check() / RCU_STALL_MIGHT_DIV;
 64	unsigned long j = jiffies;
 65
 66	if (d < RCU_STALL_MIGHT_MIN)
 67		d = RCU_STALL_MIGHT_MIN;
 68	smp_mb(); // jiffies before .gp_seq to avoid false positives.
 69	if (!rcu_gp_in_progress())
 70		return false;
 71	// Long delays at this point avoids false positive, but a delay
 72	// of ULONG_MAX/4 jiffies voids your no-false-positive warranty.
 73	smp_mb(); // .gp_seq before second .gp_start
 74	// And ditto here.
 75	return !time_before(j, READ_ONCE(rcu_state.gp_start) + d);
 76}
 77
 78/* Don't do RCU CPU stall warnings during long sysrq printouts. */
 79void rcu_sysrq_start(void)
 80{
 81	if (!rcu_cpu_stall_suppress)
 82		rcu_cpu_stall_suppress = 2;
 83}
 84
 85void rcu_sysrq_end(void)
 86{
 87	if (rcu_cpu_stall_suppress == 2)
 88		rcu_cpu_stall_suppress = 0;
 89}
 90
 91/* Don't print RCU CPU stall warnings during a kernel panic. */
 92static int rcu_panic(struct notifier_block *this, unsigned long ev, void *ptr)
 93{
 94	rcu_cpu_stall_suppress = 1;
 95	return NOTIFY_DONE;
 96}
 97
 98static struct notifier_block rcu_panic_block = {
 99	.notifier_call = rcu_panic,
100};
101
102static int __init check_cpu_stall_init(void)
103{
104	atomic_notifier_chain_register(&panic_notifier_list, &rcu_panic_block);
105	return 0;
106}
107early_initcall(check_cpu_stall_init);
108
109/* If so specified via sysctl, panic, yielding cleaner stall-warning output. */
110static void panic_on_rcu_stall(void)
111{
112	static int cpu_stall;
113
114	if (++cpu_stall < sysctl_max_rcu_stall_to_panic)
115		return;
116
117	if (sysctl_panic_on_rcu_stall)
118		panic("RCU Stall\n");
119}
120
121/**
122 * rcu_cpu_stall_reset - prevent further stall warnings in current grace period
123 *
124 * Set the stall-warning timeout way off into the future, thus preventing
125 * any RCU CPU stall-warning messages from appearing in the current set of
126 * RCU grace periods.
127 *
128 * The caller must disable hard irqs.
129 */
130void rcu_cpu_stall_reset(void)
131{
132	WRITE_ONCE(rcu_state.jiffies_stall, jiffies + ULONG_MAX / 2);
 
133}
134
135//////////////////////////////////////////////////////////////////////////////
136//
137// Interaction with RCU grace periods
138
139/* Start of new grace period, so record stall time (and forcing times). */
140static void record_gp_stall_check_time(void)
141{
142	unsigned long j = jiffies;
143	unsigned long j1;
144
145	WRITE_ONCE(rcu_state.gp_start, j);
146	j1 = rcu_jiffies_till_stall_check();
147	smp_mb(); // ->gp_start before ->jiffies_stall and caller's ->gp_seq.
148	WRITE_ONCE(rcu_state.jiffies_stall, j + j1);
149	rcu_state.jiffies_resched = j + j1 / 2;
150	rcu_state.n_force_qs_gpstart = READ_ONCE(rcu_state.n_force_qs);
151}
152
153/* Zero ->ticks_this_gp and snapshot the number of RCU softirq handlers. */
154static void zero_cpu_stall_ticks(struct rcu_data *rdp)
155{
156	rdp->ticks_this_gp = 0;
157	rdp->softirq_snap = kstat_softirqs_cpu(RCU_SOFTIRQ, smp_processor_id());
158	WRITE_ONCE(rdp->last_fqs_resched, jiffies);
159}
160
161/*
162 * If too much time has passed in the current grace period, and if
163 * so configured, go kick the relevant kthreads.
164 */
165static void rcu_stall_kick_kthreads(void)
166{
167	unsigned long j;
168
169	if (!READ_ONCE(rcu_kick_kthreads))
170		return;
171	j = READ_ONCE(rcu_state.jiffies_kick_kthreads);
172	if (time_after(jiffies, j) && rcu_state.gp_kthread &&
173	    (rcu_gp_in_progress() || READ_ONCE(rcu_state.gp_flags))) {
174		WARN_ONCE(1, "Kicking %s grace-period kthread\n",
175			  rcu_state.name);
176		rcu_ftrace_dump(DUMP_ALL);
177		wake_up_process(rcu_state.gp_kthread);
178		WRITE_ONCE(rcu_state.jiffies_kick_kthreads, j + HZ);
179	}
180}
181
182/*
183 * Handler for the irq_work request posted about halfway into the RCU CPU
184 * stall timeout, and used to detect excessive irq disabling.  Set state
185 * appropriately, but just complain if there is unexpected state on entry.
186 */
187static void rcu_iw_handler(struct irq_work *iwp)
188{
189	struct rcu_data *rdp;
190	struct rcu_node *rnp;
191
192	rdp = container_of(iwp, struct rcu_data, rcu_iw);
193	rnp = rdp->mynode;
194	raw_spin_lock_rcu_node(rnp);
195	if (!WARN_ON_ONCE(!rdp->rcu_iw_pending)) {
196		rdp->rcu_iw_gp_seq = rnp->gp_seq;
197		rdp->rcu_iw_pending = false;
198	}
199	raw_spin_unlock_rcu_node(rnp);
200}
201
202//////////////////////////////////////////////////////////////////////////////
203//
204// Printing RCU CPU stall warnings
205
206#ifdef CONFIG_PREEMPT_RCU
207
208/*
209 * Dump detailed information for all tasks blocking the current RCU
210 * grace period on the specified rcu_node structure.
211 */
212static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp)
213{
214	unsigned long flags;
215	struct task_struct *t;
216
217	raw_spin_lock_irqsave_rcu_node(rnp, flags);
218	if (!rcu_preempt_blocked_readers_cgp(rnp)) {
219		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
220		return;
221	}
222	t = list_entry(rnp->gp_tasks->prev,
223		       struct task_struct, rcu_node_entry);
224	list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
225		/*
226		 * We could be printing a lot while holding a spinlock.
227		 * Avoid triggering hard lockup.
228		 */
229		touch_nmi_watchdog();
230		sched_show_task(t);
231	}
232	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
233}
234
235// Communicate task state back to the RCU CPU stall warning request.
236struct rcu_stall_chk_rdr {
237	int nesting;
238	union rcu_special rs;
239	bool on_blkd_list;
240};
241
242/*
243 * Report out the state of a not-running task that is stalling the
244 * current RCU grace period.
245 */
246static bool check_slow_task(struct task_struct *t, void *arg)
247{
248	struct rcu_stall_chk_rdr *rscrp = arg;
249
250	if (task_curr(t))
251		return false; // It is running, so decline to inspect it.
252	rscrp->nesting = t->rcu_read_lock_nesting;
253	rscrp->rs = t->rcu_read_unlock_special;
254	rscrp->on_blkd_list = !list_empty(&t->rcu_node_entry);
255	return true;
256}
257
258/*
259 * Scan the current list of tasks blocked within RCU read-side critical
260 * sections, printing out the tid of each of the first few of them.
261 */
262static int rcu_print_task_stall(struct rcu_node *rnp, unsigned long flags)
263	__releases(rnp->lock)
264{
265	int i = 0;
266	int ndetected = 0;
267	struct rcu_stall_chk_rdr rscr;
268	struct task_struct *t;
269	struct task_struct *ts[8];
270
271	lockdep_assert_irqs_disabled();
272	if (!rcu_preempt_blocked_readers_cgp(rnp)) {
273		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
274		return 0;
275	}
276	pr_err("\tTasks blocked on level-%d rcu_node (CPUs %d-%d):",
277	       rnp->level, rnp->grplo, rnp->grphi);
278	t = list_entry(rnp->gp_tasks->prev,
279		       struct task_struct, rcu_node_entry);
280	list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
281		get_task_struct(t);
282		ts[i++] = t;
283		if (i >= ARRAY_SIZE(ts))
284			break;
285	}
286	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
287	while (i) {
288		t = ts[--i];
289		if (!try_invoke_on_locked_down_task(t, check_slow_task, &rscr))
290			pr_cont(" P%d", t->pid);
291		else
292			pr_cont(" P%d/%d:%c%c%c%c",
293				t->pid, rscr.nesting,
294				".b"[rscr.rs.b.blocked],
295				".q"[rscr.rs.b.need_qs],
296				".e"[rscr.rs.b.exp_hint],
297				".l"[rscr.on_blkd_list]);
298		lockdep_assert_irqs_disabled();
299		put_task_struct(t);
300		ndetected++;
301	}
302	pr_cont("\n");
303	return ndetected;
304}
305
306#else /* #ifdef CONFIG_PREEMPT_RCU */
307
308/*
309 * Because preemptible RCU does not exist, we never have to check for
310 * tasks blocked within RCU read-side critical sections.
311 */
312static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp)
313{
314}
315
316/*
317 * Because preemptible RCU does not exist, we never have to check for
318 * tasks blocked within RCU read-side critical sections.
319 */
320static int rcu_print_task_stall(struct rcu_node *rnp, unsigned long flags)
321	__releases(rnp->lock)
322{
323	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
324	return 0;
325}
326#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
327
328/*
329 * Dump stacks of all tasks running on stalled CPUs.  First try using
330 * NMIs, but fall back to manual remote stack tracing on architectures
331 * that don't support NMI-based stack dumps.  The NMI-triggered stack
332 * traces are more accurate because they are printed by the target CPU.
333 */
334static void rcu_dump_cpu_stacks(void)
335{
336	int cpu;
337	unsigned long flags;
338	struct rcu_node *rnp;
339
340	rcu_for_each_leaf_node(rnp) {
341		raw_spin_lock_irqsave_rcu_node(rnp, flags);
342		for_each_leaf_node_possible_cpu(rnp, cpu)
343			if (rnp->qsmask & leaf_node_cpu_bit(rnp, cpu)) {
344				if (cpu_is_offline(cpu))
345					pr_err("Offline CPU %d blocking current GP.\n", cpu);
346				else if (!trigger_single_cpu_backtrace(cpu))
347					dump_cpu_task(cpu);
348			}
349		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
350	}
351}
352
353#ifdef CONFIG_RCU_FAST_NO_HZ
354
355static void print_cpu_stall_fast_no_hz(char *cp, int cpu)
356{
357	struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
358
359	sprintf(cp, "last_accelerate: %04lx/%04lx dyntick_enabled: %d",
360		rdp->last_accelerate & 0xffff, jiffies & 0xffff,
361		!!rdp->tick_nohz_enabled_snap);
362}
363
364#else /* #ifdef CONFIG_RCU_FAST_NO_HZ */
365
366static void print_cpu_stall_fast_no_hz(char *cp, int cpu)
367{
368	*cp = '\0';
369}
370
371#endif /* #else #ifdef CONFIG_RCU_FAST_NO_HZ */
372
373static const char * const gp_state_names[] = {
374	[RCU_GP_IDLE] = "RCU_GP_IDLE",
375	[RCU_GP_WAIT_GPS] = "RCU_GP_WAIT_GPS",
376	[RCU_GP_DONE_GPS] = "RCU_GP_DONE_GPS",
377	[RCU_GP_ONOFF] = "RCU_GP_ONOFF",
378	[RCU_GP_INIT] = "RCU_GP_INIT",
379	[RCU_GP_WAIT_FQS] = "RCU_GP_WAIT_FQS",
380	[RCU_GP_DOING_FQS] = "RCU_GP_DOING_FQS",
381	[RCU_GP_CLEANUP] = "RCU_GP_CLEANUP",
382	[RCU_GP_CLEANED] = "RCU_GP_CLEANED",
383};
384
385/*
386 * Convert a ->gp_state value to a character string.
387 */
388static const char *gp_state_getname(short gs)
389{
390	if (gs < 0 || gs >= ARRAY_SIZE(gp_state_names))
391		return "???";
392	return gp_state_names[gs];
393}
394
395/* Is the RCU grace-period kthread being starved of CPU time? */
396static bool rcu_is_gp_kthread_starving(unsigned long *jp)
397{
398	unsigned long j = jiffies - READ_ONCE(rcu_state.gp_activity);
399
400	if (jp)
401		*jp = j;
402	return j > 2 * HZ;
403}
404
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
405/*
406 * Print out diagnostic information for the specified stalled CPU.
407 *
408 * If the specified CPU is aware of the current RCU grace period, then
409 * print the number of scheduling clock interrupts the CPU has taken
410 * during the time that it has been aware.  Otherwise, print the number
411 * of RCU grace periods that this CPU is ignorant of, for example, "1"
412 * if the CPU was aware of the previous grace period.
413 *
414 * Also print out idle and (if CONFIG_RCU_FAST_NO_HZ) idle-entry info.
415 */
416static void print_cpu_stall_info(int cpu)
417{
418	unsigned long delta;
419	bool falsepositive;
420	char fast_no_hz[72];
421	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
422	char *ticks_title;
423	unsigned long ticks_value;
 
 
 
424
425	/*
426	 * We could be printing a lot while holding a spinlock.  Avoid
427	 * triggering hard lockup.
428	 */
429	touch_nmi_watchdog();
430
431	ticks_value = rcu_seq_ctr(rcu_state.gp_seq - rdp->gp_seq);
432	if (ticks_value) {
433		ticks_title = "GPs behind";
434	} else {
435		ticks_title = "ticks this GP";
436		ticks_value = rdp->ticks_this_gp;
437	}
438	print_cpu_stall_fast_no_hz(fast_no_hz, cpu);
439	delta = rcu_seq_ctr(rdp->mynode->gp_seq - rdp->rcu_iw_gp_seq);
440	falsepositive = rcu_is_gp_kthread_starving(NULL) &&
441			rcu_dynticks_in_eqs(rcu_dynticks_snap(rdp));
442	pr_err("\t%d-%c%c%c%c: (%lu %s) idle=%03x/%ld/%#lx softirq=%u/%u fqs=%ld %s%s\n",
 
 
 
443	       cpu,
444	       "O."[!!cpu_online(cpu)],
445	       "o."[!!(rdp->grpmask & rdp->mynode->qsmaskinit)],
446	       "N."[!!(rdp->grpmask & rdp->mynode->qsmaskinitnext)],
447	       !IS_ENABLED(CONFIG_IRQ_WORK) ? '?' :
448			rdp->rcu_iw_pending ? (int)min(delta, 9UL) + '0' :
449				"!."[!delta],
450	       ticks_value, ticks_title,
451	       rcu_dynticks_snap(rdp) & 0xfff,
452	       rdp->dynticks_nesting, rdp->dynticks_nmi_nesting,
453	       rdp->softirq_snap, kstat_softirqs_cpu(RCU_SOFTIRQ, cpu),
454	       data_race(rcu_state.n_force_qs) - rcu_state.n_force_qs_gpstart,
455	       fast_no_hz,
456	       falsepositive ? " (false positive?)" : "");
457}
458
459/* Complain about starvation of grace-period kthread.  */
460static void rcu_check_gp_kthread_starvation(void)
461{
462	int cpu;
463	struct task_struct *gpk = rcu_state.gp_kthread;
464	unsigned long j;
465
466	if (rcu_is_gp_kthread_starving(&j)) {
467		cpu = gpk ? task_cpu(gpk) : -1;
468		pr_err("%s kthread starved for %ld jiffies! g%ld f%#x %s(%d) ->state=%#x ->cpu=%d\n",
469		       rcu_state.name, j,
470		       (long)rcu_seq_current(&rcu_state.gp_seq),
471		       data_race(rcu_state.gp_flags),
472		       gp_state_getname(rcu_state.gp_state), rcu_state.gp_state,
473		       gpk ? gpk->__state : ~0, cpu);
 
474		if (gpk) {
475			pr_err("\tUnless %s kthread gets sufficient CPU time, OOM is now expected behavior.\n", rcu_state.name);
476			pr_err("RCU grace-period kthread stack dump:\n");
477			sched_show_task(gpk);
478			if (cpu >= 0) {
479				if (cpu_is_offline(cpu)) {
480					pr_err("RCU GP kthread last ran on offline CPU %d.\n", cpu);
481				} else  {
482					pr_err("Stack dump where RCU GP kthread last ran:\n");
483					if (!trigger_single_cpu_backtrace(cpu))
484						dump_cpu_task(cpu);
485				}
486			}
487			wake_up_process(gpk);
488		}
489	}
490}
491
492/* Complain about missing wakeups from expired fqs wait timer */
493static void rcu_check_gp_kthread_expired_fqs_timer(void)
494{
495	struct task_struct *gpk = rcu_state.gp_kthread;
496	short gp_state;
497	unsigned long jiffies_fqs;
498	int cpu;
499
500	/*
501	 * Order reads of .gp_state and .jiffies_force_qs.
502	 * Matching smp_wmb() is present in rcu_gp_fqs_loop().
503	 */
504	gp_state = smp_load_acquire(&rcu_state.gp_state);
505	jiffies_fqs = READ_ONCE(rcu_state.jiffies_force_qs);
506
507	if (gp_state == RCU_GP_WAIT_FQS &&
508	    time_after(jiffies, jiffies_fqs + RCU_STALL_MIGHT_MIN) &&
509	    gpk && !READ_ONCE(gpk->on_rq)) {
510		cpu = task_cpu(gpk);
511		pr_err("%s kthread timer wakeup didn't happen for %ld jiffies! g%ld f%#x %s(%d) ->state=%#x\n",
512		       rcu_state.name, (jiffies - jiffies_fqs),
513		       (long)rcu_seq_current(&rcu_state.gp_seq),
514		       data_race(rcu_state.gp_flags),
515		       gp_state_getname(RCU_GP_WAIT_FQS), RCU_GP_WAIT_FQS,
516		       gpk->__state);
517		pr_err("\tPossible timer handling issue on cpu=%d timer-softirq=%u\n",
518		       cpu, kstat_softirqs_cpu(TIMER_SOFTIRQ, cpu));
519	}
520}
521
522static void print_other_cpu_stall(unsigned long gp_seq, unsigned long gps)
523{
524	int cpu;
525	unsigned long flags;
526	unsigned long gpa;
527	unsigned long j;
528	int ndetected = 0;
529	struct rcu_node *rnp;
530	long totqlen = 0;
531
532	lockdep_assert_irqs_disabled();
533
534	/* Kick and suppress, if so configured. */
535	rcu_stall_kick_kthreads();
536	if (rcu_stall_is_suppressed())
537		return;
538
539	/*
540	 * OK, time to rat on our buddy...
541	 * See Documentation/RCU/stallwarn.rst for info on how to debug
542	 * RCU CPU stall warnings.
543	 */
544	trace_rcu_stall_warning(rcu_state.name, TPS("StallDetected"));
545	pr_err("INFO: %s detected stalls on CPUs/tasks:\n", rcu_state.name);
546	rcu_for_each_leaf_node(rnp) {
547		raw_spin_lock_irqsave_rcu_node(rnp, flags);
548		if (rnp->qsmask != 0) {
549			for_each_leaf_node_possible_cpu(rnp, cpu)
550				if (rnp->qsmask & leaf_node_cpu_bit(rnp, cpu)) {
551					print_cpu_stall_info(cpu);
552					ndetected++;
553				}
554		}
555		ndetected += rcu_print_task_stall(rnp, flags); // Releases rnp->lock.
556		lockdep_assert_irqs_disabled();
557	}
558
559	for_each_possible_cpu(cpu)
560		totqlen += rcu_get_n_cbs_cpu(cpu);
561	pr_cont("\t(detected by %d, t=%ld jiffies, g=%ld, q=%lu)\n",
562	       smp_processor_id(), (long)(jiffies - gps),
563	       (long)rcu_seq_current(&rcu_state.gp_seq), totqlen);
564	if (ndetected) {
565		rcu_dump_cpu_stacks();
566
567		/* Complain about tasks blocking the grace period. */
568		rcu_for_each_leaf_node(rnp)
569			rcu_print_detail_task_stall_rnp(rnp);
570	} else {
571		if (rcu_seq_current(&rcu_state.gp_seq) != gp_seq) {
572			pr_err("INFO: Stall ended before state dump start\n");
573		} else {
574			j = jiffies;
575			gpa = data_race(rcu_state.gp_activity);
576			pr_err("All QSes seen, last %s kthread activity %ld (%ld-%ld), jiffies_till_next_fqs=%ld, root ->qsmask %#lx\n",
577			       rcu_state.name, j - gpa, j, gpa,
578			       data_race(jiffies_till_next_fqs),
579			       rcu_get_root()->qsmask);
580		}
581	}
582	/* Rewrite if needed in case of slow consoles. */
583	if (ULONG_CMP_GE(jiffies, READ_ONCE(rcu_state.jiffies_stall)))
584		WRITE_ONCE(rcu_state.jiffies_stall,
585			   jiffies + 3 * rcu_jiffies_till_stall_check() + 3);
586
587	rcu_check_gp_kthread_expired_fqs_timer();
588	rcu_check_gp_kthread_starvation();
589
590	panic_on_rcu_stall();
591
592	rcu_force_quiescent_state();  /* Kick them all. */
593}
594
595static void print_cpu_stall(unsigned long gps)
596{
597	int cpu;
598	unsigned long flags;
599	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
600	struct rcu_node *rnp = rcu_get_root();
601	long totqlen = 0;
602
603	lockdep_assert_irqs_disabled();
604
605	/* Kick and suppress, if so configured. */
606	rcu_stall_kick_kthreads();
607	if (rcu_stall_is_suppressed())
608		return;
609
610	/*
611	 * OK, time to rat on ourselves...
612	 * See Documentation/RCU/stallwarn.rst for info on how to debug
613	 * RCU CPU stall warnings.
614	 */
615	trace_rcu_stall_warning(rcu_state.name, TPS("SelfDetected"));
616	pr_err("INFO: %s self-detected stall on CPU\n", rcu_state.name);
617	raw_spin_lock_irqsave_rcu_node(rdp->mynode, flags);
618	print_cpu_stall_info(smp_processor_id());
619	raw_spin_unlock_irqrestore_rcu_node(rdp->mynode, flags);
620	for_each_possible_cpu(cpu)
621		totqlen += rcu_get_n_cbs_cpu(cpu);
622	pr_cont("\t(t=%lu jiffies g=%ld q=%lu)\n",
623		jiffies - gps,
624		(long)rcu_seq_current(&rcu_state.gp_seq), totqlen);
625
626	rcu_check_gp_kthread_expired_fqs_timer();
627	rcu_check_gp_kthread_starvation();
628
629	rcu_dump_cpu_stacks();
630
631	raw_spin_lock_irqsave_rcu_node(rnp, flags);
632	/* Rewrite if needed in case of slow consoles. */
633	if (ULONG_CMP_GE(jiffies, READ_ONCE(rcu_state.jiffies_stall)))
634		WRITE_ONCE(rcu_state.jiffies_stall,
635			   jiffies + 3 * rcu_jiffies_till_stall_check() + 3);
636	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
637
638	panic_on_rcu_stall();
639
640	/*
641	 * Attempt to revive the RCU machinery by forcing a context switch.
642	 *
643	 * A context switch would normally allow the RCU state machine to make
644	 * progress and it could be we're stuck in kernel space without context
645	 * switches for an entirely unreasonable amount of time.
646	 */
647	set_tsk_need_resched(current);
648	set_preempt_need_resched();
649}
650
651static void check_cpu_stall(struct rcu_data *rdp)
652{
 
653	unsigned long gs1;
654	unsigned long gs2;
655	unsigned long gps;
656	unsigned long j;
657	unsigned long jn;
658	unsigned long js;
659	struct rcu_node *rnp;
660
661	lockdep_assert_irqs_disabled();
662	if ((rcu_stall_is_suppressed() && !READ_ONCE(rcu_kick_kthreads)) ||
663	    !rcu_gp_in_progress())
664		return;
665	rcu_stall_kick_kthreads();
666	j = jiffies;
667
668	/*
669	 * Lots of memory barriers to reject false positives.
670	 *
671	 * The idea is to pick up rcu_state.gp_seq, then
672	 * rcu_state.jiffies_stall, then rcu_state.gp_start, and finally
673	 * another copy of rcu_state.gp_seq.  These values are updated in
674	 * the opposite order with memory barriers (or equivalent) during
675	 * grace-period initialization and cleanup.  Now, a false positive
676	 * can occur if we get an new value of rcu_state.gp_start and a old
677	 * value of rcu_state.jiffies_stall.  But given the memory barriers,
678	 * the only way that this can happen is if one grace period ends
679	 * and another starts between these two fetches.  This is detected
680	 * by comparing the second fetch of rcu_state.gp_seq with the
681	 * previous fetch from rcu_state.gp_seq.
682	 *
683	 * Given this check, comparisons of jiffies, rcu_state.jiffies_stall,
684	 * and rcu_state.gp_start suffice to forestall false positives.
685	 */
686	gs1 = READ_ONCE(rcu_state.gp_seq);
687	smp_rmb(); /* Pick up ->gp_seq first... */
688	js = READ_ONCE(rcu_state.jiffies_stall);
689	smp_rmb(); /* ...then ->jiffies_stall before the rest... */
690	gps = READ_ONCE(rcu_state.gp_start);
691	smp_rmb(); /* ...and finally ->gp_start before ->gp_seq again. */
692	gs2 = READ_ONCE(rcu_state.gp_seq);
693	if (gs1 != gs2 ||
694	    ULONG_CMP_LT(j, js) ||
695	    ULONG_CMP_GE(gps, js))
696		return; /* No stall or GP completed since entering function. */
697	rnp = rdp->mynode;
698	jn = jiffies + 3 * rcu_jiffies_till_stall_check() + 3;
699	if (rcu_gp_in_progress() &&
700	    (READ_ONCE(rnp->qsmask) & rdp->grpmask) &&
701	    cmpxchg(&rcu_state.jiffies_stall, js, jn) == js) {
702
703		/*
704		 * If a virtual machine is stopped by the host it can look to
705		 * the watchdog like an RCU stall. Check to see if the host
706		 * stopped the vm.
707		 */
708		if (kvm_check_and_clear_guest_paused())
709			return;
710
711		/* We haven't checked in, so go dump stack. */
712		print_cpu_stall(gps);
713		if (READ_ONCE(rcu_cpu_stall_ftrace_dump))
714			rcu_ftrace_dump(DUMP_ALL);
 
715
716	} else if (rcu_gp_in_progress() &&
717		   ULONG_CMP_GE(j, js + RCU_STALL_RAT_DELAY) &&
718		   cmpxchg(&rcu_state.jiffies_stall, js, jn) == js) {
719
720		/*
721		 * If a virtual machine is stopped by the host it can look to
722		 * the watchdog like an RCU stall. Check to see if the host
723		 * stopped the vm.
724		 */
725		if (kvm_check_and_clear_guest_paused())
726			return;
727
728		/* They had a few time units to dump stack, so complain. */
729		print_other_cpu_stall(gs2, gps);
730		if (READ_ONCE(rcu_cpu_stall_ftrace_dump))
731			rcu_ftrace_dump(DUMP_ALL);
 
 
 
 
 
732	}
733}
734
735//////////////////////////////////////////////////////////////////////////////
736//
737// RCU forward-progress mechanisms, including of callback invocation.
738
739
740/*
741 * Check to see if a failure to end RCU priority inversion was due to
742 * a CPU not passing through a quiescent state.  When this happens, there
743 * is nothing that RCU priority boosting can do to help, so we shouldn't
744 * count this as an RCU priority boosting failure.  A return of true says
745 * RCU priority boosting is to blame, and false says otherwise.  If false
746 * is returned, the first of the CPUs to blame is stored through cpup.
747 * If there was no CPU blocking the current grace period, but also nothing
748 * in need of being boosted, *cpup is set to -1.  This can happen in case
749 * of vCPU preemption while the last CPU is reporting its quiscent state,
750 * for example.
751 *
752 * If cpup is NULL, then a lockless quick check is carried out, suitable
753 * for high-rate usage.  On the other hand, if cpup is non-NULL, each
754 * rcu_node structure's ->lock is acquired, ruling out high-rate usage.
755 */
756bool rcu_check_boost_fail(unsigned long gp_state, int *cpup)
757{
758	bool atb = false;
759	int cpu;
760	unsigned long flags;
761	struct rcu_node *rnp;
762
763	rcu_for_each_leaf_node(rnp) {
764		if (!cpup) {
765			if (READ_ONCE(rnp->qsmask)) {
766				return false;
767			} else {
768				if (READ_ONCE(rnp->gp_tasks))
769					atb = true;
770				continue;
771			}
772		}
773		*cpup = -1;
774		raw_spin_lock_irqsave_rcu_node(rnp, flags);
775		if (rnp->gp_tasks)
776			atb = true;
777		if (!rnp->qsmask) {
778			// No CPUs without quiescent states for this rnp.
779			raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
780			continue;
781		}
782		// Find the first holdout CPU.
783		for_each_leaf_node_possible_cpu(rnp, cpu) {
784			if (rnp->qsmask & (1UL << (cpu - rnp->grplo))) {
785				raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
786				*cpup = cpu;
787				return false;
788			}
789		}
790		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
791	}
792	// Can't blame CPUs, so must blame RCU priority boosting.
793	return atb;
794}
795EXPORT_SYMBOL_GPL(rcu_check_boost_fail);
796
797/*
798 * Show the state of the grace-period kthreads.
799 */
800void show_rcu_gp_kthreads(void)
801{
802	unsigned long cbs = 0;
803	int cpu;
804	unsigned long j;
805	unsigned long ja;
806	unsigned long jr;
807	unsigned long js;
808	unsigned long jw;
809	struct rcu_data *rdp;
810	struct rcu_node *rnp;
811	struct task_struct *t = READ_ONCE(rcu_state.gp_kthread);
812
813	j = jiffies;
814	ja = j - data_race(rcu_state.gp_activity);
815	jr = j - data_race(rcu_state.gp_req_activity);
816	js = j - data_race(rcu_state.gp_start);
817	jw = j - data_race(rcu_state.gp_wake_time);
818	pr_info("%s: wait state: %s(%d) ->state: %#x ->rt_priority %u delta ->gp_start %lu ->gp_activity %lu ->gp_req_activity %lu ->gp_wake_time %lu ->gp_wake_seq %ld ->gp_seq %ld ->gp_seq_needed %ld ->gp_max %lu ->gp_flags %#x\n",
819		rcu_state.name, gp_state_getname(rcu_state.gp_state),
820		rcu_state.gp_state, t ? t->__state : 0x1ffff, t ? t->rt_priority : 0xffU,
821		js, ja, jr, jw, (long)data_race(rcu_state.gp_wake_seq),
822		(long)data_race(rcu_state.gp_seq),
823		(long)data_race(rcu_get_root()->gp_seq_needed),
824		data_race(rcu_state.gp_max),
825		data_race(rcu_state.gp_flags));
 
826	rcu_for_each_node_breadth_first(rnp) {
827		if (ULONG_CMP_GE(READ_ONCE(rcu_state.gp_seq), READ_ONCE(rnp->gp_seq_needed)) &&
828		    !data_race(rnp->qsmask) && !data_race(rnp->boost_tasks) &&
829		    !data_race(rnp->exp_tasks) && !data_race(rnp->gp_tasks))
830			continue;
831		pr_info("\trcu_node %d:%d ->gp_seq %ld ->gp_seq_needed %ld ->qsmask %#lx %c%c%c%c ->n_boosts %ld\n",
832			rnp->grplo, rnp->grphi,
833			(long)data_race(rnp->gp_seq), (long)data_race(rnp->gp_seq_needed),
834			data_race(rnp->qsmask),
835			".b"[!!data_race(rnp->boost_kthread_task)],
836			".B"[!!data_race(rnp->boost_tasks)],
837			".E"[!!data_race(rnp->exp_tasks)],
838			".G"[!!data_race(rnp->gp_tasks)],
839			data_race(rnp->n_boosts));
 
840		if (!rcu_is_leaf_node(rnp))
841			continue;
842		for_each_leaf_node_possible_cpu(rnp, cpu) {
843			rdp = per_cpu_ptr(&rcu_data, cpu);
844			if (READ_ONCE(rdp->gpwrap) ||
845			    ULONG_CMP_GE(READ_ONCE(rcu_state.gp_seq),
846					 READ_ONCE(rdp->gp_seq_needed)))
847				continue;
848			pr_info("\tcpu %d ->gp_seq_needed %ld\n",
849				cpu, (long)data_race(rdp->gp_seq_needed));
850		}
851	}
852	for_each_possible_cpu(cpu) {
853		rdp = per_cpu_ptr(&rcu_data, cpu);
854		cbs += data_race(rdp->n_cbs_invoked);
855		if (rcu_segcblist_is_offloaded(&rdp->cblist))
856			show_rcu_nocb_state(rdp);
857	}
858	pr_info("RCU callbacks invoked since boot: %lu\n", cbs);
859	show_rcu_tasks_gp_kthreads();
860}
861EXPORT_SYMBOL_GPL(show_rcu_gp_kthreads);
862
863/*
864 * This function checks for grace-period requests that fail to motivate
865 * RCU to come out of its idle mode.
866 */
867static void rcu_check_gp_start_stall(struct rcu_node *rnp, struct rcu_data *rdp,
868				     const unsigned long gpssdelay)
869{
870	unsigned long flags;
871	unsigned long j;
872	struct rcu_node *rnp_root = rcu_get_root();
873	static atomic_t warned = ATOMIC_INIT(0);
874
875	if (!IS_ENABLED(CONFIG_PROVE_RCU) || rcu_gp_in_progress() ||
876	    ULONG_CMP_GE(READ_ONCE(rnp_root->gp_seq),
877			 READ_ONCE(rnp_root->gp_seq_needed)) ||
878	    !smp_load_acquire(&rcu_state.gp_kthread)) // Get stable kthread.
879		return;
880	j = jiffies; /* Expensive access, and in common case don't get here. */
881	if (time_before(j, READ_ONCE(rcu_state.gp_req_activity) + gpssdelay) ||
882	    time_before(j, READ_ONCE(rcu_state.gp_activity) + gpssdelay) ||
883	    atomic_read(&warned))
884		return;
885
886	raw_spin_lock_irqsave_rcu_node(rnp, flags);
887	j = jiffies;
888	if (rcu_gp_in_progress() ||
889	    ULONG_CMP_GE(READ_ONCE(rnp_root->gp_seq),
890			 READ_ONCE(rnp_root->gp_seq_needed)) ||
891	    time_before(j, READ_ONCE(rcu_state.gp_req_activity) + gpssdelay) ||
892	    time_before(j, READ_ONCE(rcu_state.gp_activity) + gpssdelay) ||
893	    atomic_read(&warned)) {
894		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
895		return;
896	}
897	/* Hold onto the leaf lock to make others see warned==1. */
898
899	if (rnp_root != rnp)
900		raw_spin_lock_rcu_node(rnp_root); /* irqs already disabled. */
901	j = jiffies;
902	if (rcu_gp_in_progress() ||
903	    ULONG_CMP_GE(READ_ONCE(rnp_root->gp_seq),
904			 READ_ONCE(rnp_root->gp_seq_needed)) ||
905	    time_before(j, READ_ONCE(rcu_state.gp_req_activity) + gpssdelay) ||
906	    time_before(j, READ_ONCE(rcu_state.gp_activity) + gpssdelay) ||
907	    atomic_xchg(&warned, 1)) {
908		if (rnp_root != rnp)
909			/* irqs remain disabled. */
910			raw_spin_unlock_rcu_node(rnp_root);
911		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
912		return;
913	}
914	WARN_ON(1);
915	if (rnp_root != rnp)
916		raw_spin_unlock_rcu_node(rnp_root);
917	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
918	show_rcu_gp_kthreads();
919}
920
921/*
922 * Do a forward-progress check for rcutorture.  This is normally invoked
923 * due to an OOM event.  The argument "j" gives the time period during
924 * which rcutorture would like progress to have been made.
925 */
926void rcu_fwd_progress_check(unsigned long j)
927{
928	unsigned long cbs;
929	int cpu;
930	unsigned long max_cbs = 0;
931	int max_cpu = -1;
932	struct rcu_data *rdp;
933
934	if (rcu_gp_in_progress()) {
935		pr_info("%s: GP age %lu jiffies\n",
936			__func__, jiffies - rcu_state.gp_start);
937		show_rcu_gp_kthreads();
938	} else {
939		pr_info("%s: Last GP end %lu jiffies ago\n",
940			__func__, jiffies - rcu_state.gp_end);
941		preempt_disable();
942		rdp = this_cpu_ptr(&rcu_data);
943		rcu_check_gp_start_stall(rdp->mynode, rdp, j);
944		preempt_enable();
945	}
946	for_each_possible_cpu(cpu) {
947		cbs = rcu_get_n_cbs_cpu(cpu);
948		if (!cbs)
949			continue;
950		if (max_cpu < 0)
951			pr_info("%s: callbacks", __func__);
952		pr_cont(" %d: %lu", cpu, cbs);
953		if (cbs <= max_cbs)
954			continue;
955		max_cbs = cbs;
956		max_cpu = cpu;
957	}
958	if (max_cpu >= 0)
959		pr_cont("\n");
960}
961EXPORT_SYMBOL_GPL(rcu_fwd_progress_check);
962
963/* Commandeer a sysrq key to dump RCU's tree. */
964static bool sysrq_rcu;
965module_param(sysrq_rcu, bool, 0444);
966
967/* Dump grace-period-request information due to commandeered sysrq. */
968static void sysrq_show_rcu(int key)
969{
970	show_rcu_gp_kthreads();
971}
972
973static const struct sysrq_key_op sysrq_rcudump_op = {
974	.handler = sysrq_show_rcu,
975	.help_msg = "show-rcu(y)",
976	.action_msg = "Show RCU tree",
977	.enable_mask = SYSRQ_ENABLE_DUMP,
978};
979
980static int __init rcu_sysrq_init(void)
981{
982	if (sysrq_rcu)
983		return register_sysrq_key('y', &sysrq_rcudump_op);
984	return 0;
985}
986early_initcall(rcu_sysrq_init);