Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
   1// SPDX-License-Identifier: GPL-2.0+
   2/*
   3 * RCU CPU stall warnings for normal RCU grace periods
   4 *
   5 * Copyright IBM Corporation, 2019
   6 *
   7 * Author: Paul E. McKenney <paulmck@linux.ibm.com>
   8 */
   9
  10#include <linux/console.h>
  11#include <linux/kvm_para.h>
  12#include <linux/rcu_notifier.h>
  13#include <linux/smp.h>
  14
  15//////////////////////////////////////////////////////////////////////////////
  16//
  17// Controlling CPU stall warnings, including delay calculation.
  18
  19/* panic() on RCU Stall sysctl. */
  20int sysctl_panic_on_rcu_stall __read_mostly;
  21int sysctl_max_rcu_stall_to_panic __read_mostly;
  22
  23#ifdef CONFIG_PROVE_RCU
  24#define RCU_STALL_DELAY_DELTA		(5 * HZ)
  25#else
  26#define RCU_STALL_DELAY_DELTA		0
  27#endif
  28#define RCU_STALL_MIGHT_DIV		8
  29#define RCU_STALL_MIGHT_MIN		(2 * HZ)
  30
  31int rcu_exp_jiffies_till_stall_check(void)
  32{
  33	int cpu_stall_timeout = READ_ONCE(rcu_exp_cpu_stall_timeout);
  34	int exp_stall_delay_delta = 0;
  35	int till_stall_check;
  36
  37	// Zero says to use rcu_cpu_stall_timeout, but in milliseconds.
  38	if (!cpu_stall_timeout)
  39		cpu_stall_timeout = jiffies_to_msecs(rcu_jiffies_till_stall_check());
  40
  41	// Limit check must be consistent with the Kconfig limits for
  42	// CONFIG_RCU_EXP_CPU_STALL_TIMEOUT, so check the allowed range.
  43	// The minimum clamped value is "2UL", because at least one full
  44	// tick has to be guaranteed.
  45	till_stall_check = clamp(msecs_to_jiffies(cpu_stall_timeout), 2UL, 300UL * HZ);
  46
  47	if (cpu_stall_timeout && jiffies_to_msecs(till_stall_check) != cpu_stall_timeout)
  48		WRITE_ONCE(rcu_exp_cpu_stall_timeout, jiffies_to_msecs(till_stall_check));
  49
  50#ifdef CONFIG_PROVE_RCU
  51	/* Add extra ~25% out of till_stall_check. */
  52	exp_stall_delay_delta = ((till_stall_check * 25) / 100) + 1;
  53#endif
  54
  55	return till_stall_check + exp_stall_delay_delta;
  56}
  57EXPORT_SYMBOL_GPL(rcu_exp_jiffies_till_stall_check);
  58
  59/* Limit-check stall timeouts specified at boottime and runtime. */
  60int rcu_jiffies_till_stall_check(void)
  61{
  62	int till_stall_check = READ_ONCE(rcu_cpu_stall_timeout);
  63
  64	/*
  65	 * Limit check must be consistent with the Kconfig limits
  66	 * for CONFIG_RCU_CPU_STALL_TIMEOUT.
  67	 */
  68	if (till_stall_check < 3) {
  69		WRITE_ONCE(rcu_cpu_stall_timeout, 3);
  70		till_stall_check = 3;
  71	} else if (till_stall_check > 300) {
  72		WRITE_ONCE(rcu_cpu_stall_timeout, 300);
  73		till_stall_check = 300;
  74	}
  75	return till_stall_check * HZ + RCU_STALL_DELAY_DELTA;
  76}
  77EXPORT_SYMBOL_GPL(rcu_jiffies_till_stall_check);
  78
  79/* Don't do RCU CPU stall warnings during long sysrq printouts. */
  80void rcu_sysrq_start(void)
  81{
  82	if (!rcu_cpu_stall_suppress)
  83		rcu_cpu_stall_suppress = 2;
  84}
  85
  86void rcu_sysrq_end(void)
  87{
  88	if (rcu_cpu_stall_suppress == 2)
  89		rcu_cpu_stall_suppress = 0;
  90}
  91
  92/* Don't print RCU CPU stall warnings during a kernel panic. */
  93static int rcu_panic(struct notifier_block *this, unsigned long ev, void *ptr)
  94{
  95	rcu_cpu_stall_suppress = 1;
  96	return NOTIFY_DONE;
  97}
  98
  99static struct notifier_block rcu_panic_block = {
 100	.notifier_call = rcu_panic,
 101};
 102
 103static int __init check_cpu_stall_init(void)
 104{
 105	atomic_notifier_chain_register(&panic_notifier_list, &rcu_panic_block);
 106	return 0;
 107}
 108early_initcall(check_cpu_stall_init);
 109
 110/* If so specified via sysctl, panic, yielding cleaner stall-warning output. */
 111static void panic_on_rcu_stall(void)
 112{
 113	static int cpu_stall;
 114
 115	if (++cpu_stall < sysctl_max_rcu_stall_to_panic)
 116		return;
 117
 118	if (sysctl_panic_on_rcu_stall)
 119		panic("RCU Stall\n");
 120}
 121
 122/**
 123 * rcu_cpu_stall_reset - restart stall-warning timeout for current grace period
 124 *
 125 * To perform the reset request from the caller, disable stall detection until
 126 * 3 fqs loops have passed. This is required to ensure a fresh jiffies is
 127 * loaded.  It should be safe to do from the fqs loop as enough timer
 128 * interrupts and context switches should have passed.
 129 *
 130 * The caller must disable hard irqs.
 131 */
 132void rcu_cpu_stall_reset(void)
 133{
 134	WRITE_ONCE(rcu_state.nr_fqs_jiffies_stall, 3);
 135	WRITE_ONCE(rcu_state.jiffies_stall, ULONG_MAX);
 136}
 137
 138//////////////////////////////////////////////////////////////////////////////
 139//
 140// Interaction with RCU grace periods
 141
 142/* Start of new grace period, so record stall time (and forcing times). */
 143static void record_gp_stall_check_time(void)
 144{
 145	unsigned long j = jiffies;
 146	unsigned long j1;
 147
 148	WRITE_ONCE(rcu_state.gp_start, j);
 149	j1 = rcu_jiffies_till_stall_check();
 150	smp_mb(); // ->gp_start before ->jiffies_stall and caller's ->gp_seq.
 151	WRITE_ONCE(rcu_state.nr_fqs_jiffies_stall, 0);
 152	WRITE_ONCE(rcu_state.jiffies_stall, j + j1);
 153	rcu_state.jiffies_resched = j + j1 / 2;
 154	rcu_state.n_force_qs_gpstart = READ_ONCE(rcu_state.n_force_qs);
 155}
 156
 157/* Zero ->ticks_this_gp and snapshot the number of RCU softirq handlers. */
 158static void zero_cpu_stall_ticks(struct rcu_data *rdp)
 159{
 160	rdp->ticks_this_gp = 0;
 161	rdp->softirq_snap = kstat_softirqs_cpu(RCU_SOFTIRQ, smp_processor_id());
 162	WRITE_ONCE(rdp->last_fqs_resched, jiffies);
 163}
 164
 165/*
 166 * If too much time has passed in the current grace period, and if
 167 * so configured, go kick the relevant kthreads.
 168 */
 169static void rcu_stall_kick_kthreads(void)
 170{
 171	unsigned long j;
 172
 173	if (!READ_ONCE(rcu_kick_kthreads))
 174		return;
 175	j = READ_ONCE(rcu_state.jiffies_kick_kthreads);
 176	if (time_after(jiffies, j) && rcu_state.gp_kthread &&
 177	    (rcu_gp_in_progress() || READ_ONCE(rcu_state.gp_flags))) {
 178		WARN_ONCE(1, "Kicking %s grace-period kthread\n",
 179			  rcu_state.name);
 180		rcu_ftrace_dump(DUMP_ALL);
 181		wake_up_process(rcu_state.gp_kthread);
 182		WRITE_ONCE(rcu_state.jiffies_kick_kthreads, j + HZ);
 183	}
 184}
 185
 186/*
 187 * Handler for the irq_work request posted about halfway into the RCU CPU
 188 * stall timeout, and used to detect excessive irq disabling.  Set state
 189 * appropriately, but just complain if there is unexpected state on entry.
 190 */
 191static void rcu_iw_handler(struct irq_work *iwp)
 192{
 193	struct rcu_data *rdp;
 194	struct rcu_node *rnp;
 195
 196	rdp = container_of(iwp, struct rcu_data, rcu_iw);
 197	rnp = rdp->mynode;
 198	raw_spin_lock_rcu_node(rnp);
 199	if (!WARN_ON_ONCE(!rdp->rcu_iw_pending)) {
 200		rdp->rcu_iw_gp_seq = rnp->gp_seq;
 201		rdp->rcu_iw_pending = false;
 202	}
 203	raw_spin_unlock_rcu_node(rnp);
 204}
 205
 206//////////////////////////////////////////////////////////////////////////////
 207//
 208// Printing RCU CPU stall warnings
 209
 210#ifdef CONFIG_PREEMPT_RCU
 211
 212/*
 213 * Dump detailed information for all tasks blocking the current RCU
 214 * grace period on the specified rcu_node structure.
 215 */
 216static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp)
 217{
 218	unsigned long flags;
 219	struct task_struct *t;
 220
 221	raw_spin_lock_irqsave_rcu_node(rnp, flags);
 222	if (!rcu_preempt_blocked_readers_cgp(rnp)) {
 223		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 224		return;
 225	}
 226	t = list_entry(rnp->gp_tasks->prev,
 227		       struct task_struct, rcu_node_entry);
 228	list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
 229		/*
 230		 * We could be printing a lot while holding a spinlock.
 231		 * Avoid triggering hard lockup.
 232		 */
 233		touch_nmi_watchdog();
 234		sched_show_task(t);
 235	}
 236	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 237}
 238
 239// Communicate task state back to the RCU CPU stall warning request.
 240struct rcu_stall_chk_rdr {
 241	int nesting;
 242	union rcu_special rs;
 243	bool on_blkd_list;
 244};
 245
 246/*
 247 * Report out the state of a not-running task that is stalling the
 248 * current RCU grace period.
 249 */
 250static int check_slow_task(struct task_struct *t, void *arg)
 251{
 252	struct rcu_stall_chk_rdr *rscrp = arg;
 253
 254	if (task_curr(t))
 255		return -EBUSY; // It is running, so decline to inspect it.
 256	rscrp->nesting = t->rcu_read_lock_nesting;
 257	rscrp->rs = t->rcu_read_unlock_special;
 258	rscrp->on_blkd_list = !list_empty(&t->rcu_node_entry);
 259	return 0;
 260}
 261
 262/*
 263 * Scan the current list of tasks blocked within RCU read-side critical
 264 * sections, printing out the tid of each of the first few of them.
 265 */
 266static int rcu_print_task_stall(struct rcu_node *rnp, unsigned long flags)
 267	__releases(rnp->lock)
 268{
 269	int i = 0;
 270	int ndetected = 0;
 271	struct rcu_stall_chk_rdr rscr;
 272	struct task_struct *t;
 273	struct task_struct *ts[8];
 274
 275	lockdep_assert_irqs_disabled();
 276	if (!rcu_preempt_blocked_readers_cgp(rnp)) {
 277		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 278		return 0;
 279	}
 280	pr_err("\tTasks blocked on level-%d rcu_node (CPUs %d-%d):",
 281	       rnp->level, rnp->grplo, rnp->grphi);
 282	t = list_entry(rnp->gp_tasks->prev,
 283		       struct task_struct, rcu_node_entry);
 284	list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
 285		get_task_struct(t);
 286		ts[i++] = t;
 287		if (i >= ARRAY_SIZE(ts))
 288			break;
 289	}
 290	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 291	while (i) {
 292		t = ts[--i];
 293		if (task_call_func(t, check_slow_task, &rscr))
 294			pr_cont(" P%d", t->pid);
 295		else
 296			pr_cont(" P%d/%d:%c%c%c%c",
 297				t->pid, rscr.nesting,
 298				".b"[rscr.rs.b.blocked],
 299				".q"[rscr.rs.b.need_qs],
 300				".e"[rscr.rs.b.exp_hint],
 301				".l"[rscr.on_blkd_list]);
 302		lockdep_assert_irqs_disabled();
 303		put_task_struct(t);
 304		ndetected++;
 305	}
 306	pr_cont("\n");
 307	return ndetected;
 308}
 309
 310#else /* #ifdef CONFIG_PREEMPT_RCU */
 311
 312/*
 313 * Because preemptible RCU does not exist, we never have to check for
 314 * tasks blocked within RCU read-side critical sections.
 315 */
 316static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp)
 317{
 318}
 319
 320/*
 321 * Because preemptible RCU does not exist, we never have to check for
 322 * tasks blocked within RCU read-side critical sections.
 323 */
 324static int rcu_print_task_stall(struct rcu_node *rnp, unsigned long flags)
 325	__releases(rnp->lock)
 326{
 327	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 328	return 0;
 329}
 330#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
 331
 332/*
 333 * Dump stacks of all tasks running on stalled CPUs.  First try using
 334 * NMIs, but fall back to manual remote stack tracing on architectures
 335 * that don't support NMI-based stack dumps.  The NMI-triggered stack
 336 * traces are more accurate because they are printed by the target CPU.
 337 */
 338static void rcu_dump_cpu_stacks(unsigned long gp_seq)
 339{
 340	int cpu;
 341	unsigned long flags;
 342	struct rcu_node *rnp;
 343
 344	rcu_for_each_leaf_node(rnp) {
 345		printk_deferred_enter();
 346		for_each_leaf_node_possible_cpu(rnp, cpu) {
 347			if (gp_seq != data_race(rcu_state.gp_seq)) {
 348				printk_deferred_exit();
 349				pr_err("INFO: Stall ended during stack backtracing.\n");
 350				return;
 351			}
 352			if (!(data_race(rnp->qsmask) & leaf_node_cpu_bit(rnp, cpu)))
 353				continue;
 354			raw_spin_lock_irqsave_rcu_node(rnp, flags);
 355			if (rnp->qsmask & leaf_node_cpu_bit(rnp, cpu)) {
 356				if (cpu_is_offline(cpu))
 357					pr_err("Offline CPU %d blocking current GP.\n", cpu);
 358				else
 359					dump_cpu_task(cpu);
 360			}
 361			raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 362		}
 363		printk_deferred_exit();
 364	}
 365}
 366
 367static const char * const gp_state_names[] = {
 368	[RCU_GP_IDLE] = "RCU_GP_IDLE",
 369	[RCU_GP_WAIT_GPS] = "RCU_GP_WAIT_GPS",
 370	[RCU_GP_DONE_GPS] = "RCU_GP_DONE_GPS",
 371	[RCU_GP_ONOFF] = "RCU_GP_ONOFF",
 372	[RCU_GP_INIT] = "RCU_GP_INIT",
 373	[RCU_GP_WAIT_FQS] = "RCU_GP_WAIT_FQS",
 374	[RCU_GP_DOING_FQS] = "RCU_GP_DOING_FQS",
 375	[RCU_GP_CLEANUP] = "RCU_GP_CLEANUP",
 376	[RCU_GP_CLEANED] = "RCU_GP_CLEANED",
 377};
 378
 379/*
 380 * Convert a ->gp_state value to a character string.
 381 */
 382static const char *gp_state_getname(short gs)
 383{
 384	if (gs < 0 || gs >= ARRAY_SIZE(gp_state_names))
 385		return "???";
 386	return gp_state_names[gs];
 387}
 388
 389/* Is the RCU grace-period kthread being starved of CPU time? */
 390static bool rcu_is_gp_kthread_starving(unsigned long *jp)
 391{
 392	unsigned long j = jiffies - READ_ONCE(rcu_state.gp_activity);
 393
 394	if (jp)
 395		*jp = j;
 396	return j > 2 * HZ;
 397}
 398
 399static bool rcu_is_rcuc_kthread_starving(struct rcu_data *rdp, unsigned long *jp)
 400{
 401	int cpu;
 402	struct task_struct *rcuc;
 403	unsigned long j;
 404
 405	rcuc = rdp->rcu_cpu_kthread_task;
 406	if (!rcuc)
 407		return false;
 408
 409	cpu = task_cpu(rcuc);
 410	if (cpu_is_offline(cpu) || idle_cpu(cpu))
 411		return false;
 412
 413	j = jiffies - READ_ONCE(rdp->rcuc_activity);
 414
 415	if (jp)
 416		*jp = j;
 417	return j > 2 * HZ;
 418}
 419
 420static void print_cpu_stat_info(int cpu)
 421{
 422	struct rcu_snap_record rsr, *rsrp;
 423	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
 424	struct kernel_cpustat *kcsp = &kcpustat_cpu(cpu);
 425
 426	if (!rcu_cpu_stall_cputime)
 427		return;
 428
 429	rsrp = &rdp->snap_record;
 430	if (rsrp->gp_seq != rdp->gp_seq)
 431		return;
 432
 433	rsr.cputime_irq     = kcpustat_field(kcsp, CPUTIME_IRQ, cpu);
 434	rsr.cputime_softirq = kcpustat_field(kcsp, CPUTIME_SOFTIRQ, cpu);
 435	rsr.cputime_system  = kcpustat_field(kcsp, CPUTIME_SYSTEM, cpu);
 436
 437	pr_err("\t         hardirqs   softirqs   csw/system\n");
 438	pr_err("\t number: %8ld %10d %12lld\n",
 439		kstat_cpu_irqs_sum(cpu) - rsrp->nr_hardirqs,
 440		kstat_cpu_softirqs_sum(cpu) - rsrp->nr_softirqs,
 441		nr_context_switches_cpu(cpu) - rsrp->nr_csw);
 442	pr_err("\tcputime: %8lld %10lld %12lld   ==> %d(ms)\n",
 443		div_u64(rsr.cputime_irq - rsrp->cputime_irq, NSEC_PER_MSEC),
 444		div_u64(rsr.cputime_softirq - rsrp->cputime_softirq, NSEC_PER_MSEC),
 445		div_u64(rsr.cputime_system - rsrp->cputime_system, NSEC_PER_MSEC),
 446		jiffies_to_msecs(jiffies - rsrp->jiffies));
 447}
 448
 449/*
 450 * Print out diagnostic information for the specified stalled CPU.
 451 *
 452 * If the specified CPU is aware of the current RCU grace period, then
 453 * print the number of scheduling clock interrupts the CPU has taken
 454 * during the time that it has been aware.  Otherwise, print the number
 455 * of RCU grace periods that this CPU is ignorant of, for example, "1"
 456 * if the CPU was aware of the previous grace period.
 457 *
 458 * Also print out idle info.
 459 */
 460static void print_cpu_stall_info(int cpu)
 461{
 462	unsigned long delta;
 463	bool falsepositive;
 464	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
 465	char *ticks_title;
 466	unsigned long ticks_value;
 467	bool rcuc_starved;
 468	unsigned long j;
 469	char buf[32];
 470
 471	/*
 472	 * We could be printing a lot while holding a spinlock.  Avoid
 473	 * triggering hard lockup.
 474	 */
 475	touch_nmi_watchdog();
 476
 477	ticks_value = rcu_seq_ctr(rcu_state.gp_seq - rdp->gp_seq);
 478	if (ticks_value) {
 479		ticks_title = "GPs behind";
 480	} else {
 481		ticks_title = "ticks this GP";
 482		ticks_value = rdp->ticks_this_gp;
 483	}
 484	delta = rcu_seq_ctr(rdp->mynode->gp_seq - rdp->rcu_iw_gp_seq);
 485	falsepositive = rcu_is_gp_kthread_starving(NULL) &&
 486			rcu_watching_snap_in_eqs(ct_rcu_watching_cpu(cpu));
 487	rcuc_starved = rcu_is_rcuc_kthread_starving(rdp, &j);
 488	if (rcuc_starved)
 489		// Print signed value, as negative values indicate a probable bug.
 490		snprintf(buf, sizeof(buf), " rcuc=%ld jiffies(starved)", j);
 491	pr_err("\t%d-%c%c%c%c: (%lu %s) idle=%04x/%ld/%#lx softirq=%u/%u fqs=%ld%s%s\n",
 492	       cpu,
 493	       "O."[!!cpu_online(cpu)],
 494	       "o."[!!(rdp->grpmask & rdp->mynode->qsmaskinit)],
 495	       "N."[!!(rdp->grpmask & rdp->mynode->qsmaskinitnext)],
 496	       !IS_ENABLED(CONFIG_IRQ_WORK) ? '?' :
 497			rdp->rcu_iw_pending ? (int)min(delta, 9UL) + '0' :
 498				"!."[!delta],
 499	       ticks_value, ticks_title,
 500	       ct_rcu_watching_cpu(cpu) & 0xffff,
 501	       ct_nesting_cpu(cpu), ct_nmi_nesting_cpu(cpu),
 502	       rdp->softirq_snap, kstat_softirqs_cpu(RCU_SOFTIRQ, cpu),
 503	       data_race(rcu_state.n_force_qs) - rcu_state.n_force_qs_gpstart,
 504	       rcuc_starved ? buf : "",
 505	       falsepositive ? " (false positive?)" : "");
 506
 507	print_cpu_stat_info(cpu);
 508}
 509
 510/* Complain about starvation of grace-period kthread.  */
 511static void rcu_check_gp_kthread_starvation(void)
 512{
 513	int cpu;
 514	struct task_struct *gpk = rcu_state.gp_kthread;
 515	unsigned long j;
 516
 517	if (rcu_is_gp_kthread_starving(&j)) {
 518		cpu = gpk ? task_cpu(gpk) : -1;
 519		pr_err("%s kthread starved for %ld jiffies! g%ld f%#x %s(%d) ->state=%#x ->cpu=%d\n",
 520		       rcu_state.name, j,
 521		       (long)rcu_seq_current(&rcu_state.gp_seq),
 522		       data_race(READ_ONCE(rcu_state.gp_flags)),
 523		       gp_state_getname(rcu_state.gp_state),
 524		       data_race(READ_ONCE(rcu_state.gp_state)),
 525		       gpk ? data_race(READ_ONCE(gpk->__state)) : ~0, cpu);
 526		if (gpk) {
 527			struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
 528
 529			pr_err("\tUnless %s kthread gets sufficient CPU time, OOM is now expected behavior.\n", rcu_state.name);
 530			pr_err("RCU grace-period kthread stack dump:\n");
 531			sched_show_task(gpk);
 532			if (cpu_is_offline(cpu)) {
 533				pr_err("RCU GP kthread last ran on offline CPU %d.\n", cpu);
 534			} else if (!(data_race(READ_ONCE(rdp->mynode->qsmask)) & rdp->grpmask)) {
 535				pr_err("Stack dump where RCU GP kthread last ran:\n");
 536				dump_cpu_task(cpu);
 537			}
 538			wake_up_process(gpk);
 539		}
 540	}
 541}
 542
 543/* Complain about missing wakeups from expired fqs wait timer */
 544static void rcu_check_gp_kthread_expired_fqs_timer(void)
 545{
 546	struct task_struct *gpk = rcu_state.gp_kthread;
 547	short gp_state;
 548	unsigned long jiffies_fqs;
 549	int cpu;
 550
 551	/*
 552	 * Order reads of .gp_state and .jiffies_force_qs.
 553	 * Matching smp_wmb() is present in rcu_gp_fqs_loop().
 554	 */
 555	gp_state = smp_load_acquire(&rcu_state.gp_state);
 556	jiffies_fqs = READ_ONCE(rcu_state.jiffies_force_qs);
 557
 558	if (gp_state == RCU_GP_WAIT_FQS &&
 559	    time_after(jiffies, jiffies_fqs + RCU_STALL_MIGHT_MIN) &&
 560	    gpk && !READ_ONCE(gpk->on_rq)) {
 561		cpu = task_cpu(gpk);
 562		pr_err("%s kthread timer wakeup didn't happen for %ld jiffies! g%ld f%#x %s(%d) ->state=%#x\n",
 563		       rcu_state.name, (jiffies - jiffies_fqs),
 564		       (long)rcu_seq_current(&rcu_state.gp_seq),
 565		       data_race(READ_ONCE(rcu_state.gp_flags)), // Diagnostic read
 566		       gp_state_getname(RCU_GP_WAIT_FQS), RCU_GP_WAIT_FQS,
 567		       data_race(READ_ONCE(gpk->__state)));
 568		pr_err("\tPossible timer handling issue on cpu=%d timer-softirq=%u\n",
 569		       cpu, kstat_softirqs_cpu(TIMER_SOFTIRQ, cpu));
 570	}
 571}
 572
 573static void print_other_cpu_stall(unsigned long gp_seq, unsigned long gps)
 574{
 575	int cpu;
 576	unsigned long flags;
 577	unsigned long gpa;
 578	unsigned long j;
 579	int ndetected = 0;
 580	struct rcu_node *rnp;
 581	long totqlen = 0;
 582
 583	lockdep_assert_irqs_disabled();
 584
 585	/* Kick and suppress, if so configured. */
 586	rcu_stall_kick_kthreads();
 587	if (rcu_stall_is_suppressed())
 588		return;
 589
 590	nbcon_cpu_emergency_enter();
 591
 592	/*
 593	 * OK, time to rat on our buddy...
 594	 * See Documentation/RCU/stallwarn.rst for info on how to debug
 595	 * RCU CPU stall warnings.
 596	 */
 597	trace_rcu_stall_warning(rcu_state.name, TPS("StallDetected"));
 598	pr_err("INFO: %s detected stalls on CPUs/tasks:\n", rcu_state.name);
 599	rcu_for_each_leaf_node(rnp) {
 600		raw_spin_lock_irqsave_rcu_node(rnp, flags);
 601		if (rnp->qsmask != 0) {
 602			for_each_leaf_node_possible_cpu(rnp, cpu)
 603				if (rnp->qsmask & leaf_node_cpu_bit(rnp, cpu)) {
 604					print_cpu_stall_info(cpu);
 605					ndetected++;
 606				}
 607		}
 608		ndetected += rcu_print_task_stall(rnp, flags); // Releases rnp->lock.
 609		lockdep_assert_irqs_disabled();
 610	}
 611
 612	for_each_possible_cpu(cpu)
 613		totqlen += rcu_get_n_cbs_cpu(cpu);
 614	pr_err("\t(detected by %d, t=%ld jiffies, g=%ld, q=%lu ncpus=%d)\n",
 615	       smp_processor_id(), (long)(jiffies - gps),
 616	       (long)rcu_seq_current(&rcu_state.gp_seq), totqlen,
 617	       data_race(rcu_state.n_online_cpus)); // Diagnostic read
 618	if (ndetected) {
 619		rcu_dump_cpu_stacks(gp_seq);
 620
 621		/* Complain about tasks blocking the grace period. */
 622		rcu_for_each_leaf_node(rnp)
 623			rcu_print_detail_task_stall_rnp(rnp);
 624	} else {
 625		if (rcu_seq_current(&rcu_state.gp_seq) != gp_seq) {
 626			pr_err("INFO: Stall ended before state dump start\n");
 627		} else {
 628			j = jiffies;
 629			gpa = data_race(READ_ONCE(rcu_state.gp_activity));
 630			pr_err("All QSes seen, last %s kthread activity %ld (%ld-%ld), jiffies_till_next_fqs=%ld, root ->qsmask %#lx\n",
 631			       rcu_state.name, j - gpa, j, gpa,
 632			       data_race(READ_ONCE(jiffies_till_next_fqs)),
 633			       data_race(READ_ONCE(rcu_get_root()->qsmask)));
 634		}
 635	}
 636	/* Rewrite if needed in case of slow consoles. */
 637	if (ULONG_CMP_GE(jiffies, READ_ONCE(rcu_state.jiffies_stall)))
 638		WRITE_ONCE(rcu_state.jiffies_stall,
 639			   jiffies + 3 * rcu_jiffies_till_stall_check() + 3);
 640
 641	rcu_check_gp_kthread_expired_fqs_timer();
 642	rcu_check_gp_kthread_starvation();
 643
 644	nbcon_cpu_emergency_exit();
 645
 646	panic_on_rcu_stall();
 647
 648	rcu_force_quiescent_state();  /* Kick them all. */
 649}
 650
 651static void print_cpu_stall(unsigned long gp_seq, unsigned long gps)
 652{
 653	int cpu;
 654	unsigned long flags;
 655	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
 656	struct rcu_node *rnp = rcu_get_root();
 657	long totqlen = 0;
 658
 659	lockdep_assert_irqs_disabled();
 660
 661	/* Kick and suppress, if so configured. */
 662	rcu_stall_kick_kthreads();
 663	if (rcu_stall_is_suppressed())
 664		return;
 665
 666	nbcon_cpu_emergency_enter();
 667
 668	/*
 669	 * OK, time to rat on ourselves...
 670	 * See Documentation/RCU/stallwarn.rst for info on how to debug
 671	 * RCU CPU stall warnings.
 672	 */
 673	trace_rcu_stall_warning(rcu_state.name, TPS("SelfDetected"));
 674	pr_err("INFO: %s self-detected stall on CPU\n", rcu_state.name);
 675	raw_spin_lock_irqsave_rcu_node(rdp->mynode, flags);
 676	print_cpu_stall_info(smp_processor_id());
 677	raw_spin_unlock_irqrestore_rcu_node(rdp->mynode, flags);
 678	for_each_possible_cpu(cpu)
 679		totqlen += rcu_get_n_cbs_cpu(cpu);
 680	pr_err("\t(t=%lu jiffies g=%ld q=%lu ncpus=%d)\n",
 681		jiffies - gps,
 682		(long)rcu_seq_current(&rcu_state.gp_seq), totqlen,
 683		data_race(rcu_state.n_online_cpus)); // Diagnostic read
 684
 685	rcu_check_gp_kthread_expired_fqs_timer();
 686	rcu_check_gp_kthread_starvation();
 687
 688	rcu_dump_cpu_stacks(gp_seq);
 689
 690	raw_spin_lock_irqsave_rcu_node(rnp, flags);
 691	/* Rewrite if needed in case of slow consoles. */
 692	if (ULONG_CMP_GE(jiffies, READ_ONCE(rcu_state.jiffies_stall)))
 693		WRITE_ONCE(rcu_state.jiffies_stall,
 694			   jiffies + 3 * rcu_jiffies_till_stall_check() + 3);
 695	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 696
 697	nbcon_cpu_emergency_exit();
 698
 699	panic_on_rcu_stall();
 700
 701	/*
 702	 * Attempt to revive the RCU machinery by forcing a context switch.
 703	 *
 704	 * A context switch would normally allow the RCU state machine to make
 705	 * progress and it could be we're stuck in kernel space without context
 706	 * switches for an entirely unreasonable amount of time.
 707	 */
 708	set_tsk_need_resched(current);
 709	set_preempt_need_resched();
 710}
 711
 712static bool csd_lock_suppress_rcu_stall;
 713module_param(csd_lock_suppress_rcu_stall, bool, 0644);
 714
 715static void check_cpu_stall(struct rcu_data *rdp)
 716{
 717	bool self_detected;
 718	unsigned long gs1;
 719	unsigned long gs2;
 720	unsigned long gps;
 721	unsigned long j;
 722	unsigned long jn;
 723	unsigned long js;
 724	struct rcu_node *rnp;
 725
 726	lockdep_assert_irqs_disabled();
 727	if ((rcu_stall_is_suppressed() && !READ_ONCE(rcu_kick_kthreads)) ||
 728	    !rcu_gp_in_progress())
 729		return;
 730	rcu_stall_kick_kthreads();
 731
 732	/*
 733	 * Check if it was requested (via rcu_cpu_stall_reset()) that the FQS
 734	 * loop has to set jiffies to ensure a non-stale jiffies value. This
 735	 * is required to have good jiffies value after coming out of long
 736	 * breaks of jiffies updates. Not doing so can cause false positives.
 737	 */
 738	if (READ_ONCE(rcu_state.nr_fqs_jiffies_stall) > 0)
 739		return;
 740
 741	j = jiffies;
 742
 743	/*
 744	 * Lots of memory barriers to reject false positives.
 745	 *
 746	 * The idea is to pick up rcu_state.gp_seq, then
 747	 * rcu_state.jiffies_stall, then rcu_state.gp_start, and finally
 748	 * another copy of rcu_state.gp_seq.  These values are updated in
 749	 * the opposite order with memory barriers (or equivalent) during
 750	 * grace-period initialization and cleanup.  Now, a false positive
 751	 * can occur if we get an new value of rcu_state.gp_start and a old
 752	 * value of rcu_state.jiffies_stall.  But given the memory barriers,
 753	 * the only way that this can happen is if one grace period ends
 754	 * and another starts between these two fetches.  This is detected
 755	 * by comparing the second fetch of rcu_state.gp_seq with the
 756	 * previous fetch from rcu_state.gp_seq.
 757	 *
 758	 * Given this check, comparisons of jiffies, rcu_state.jiffies_stall,
 759	 * and rcu_state.gp_start suffice to forestall false positives.
 760	 */
 761	gs1 = READ_ONCE(rcu_state.gp_seq);
 762	smp_rmb(); /* Pick up ->gp_seq first... */
 763	js = READ_ONCE(rcu_state.jiffies_stall);
 764	smp_rmb(); /* ...then ->jiffies_stall before the rest... */
 765	gps = READ_ONCE(rcu_state.gp_start);
 766	smp_rmb(); /* ...and finally ->gp_start before ->gp_seq again. */
 767	gs2 = READ_ONCE(rcu_state.gp_seq);
 768	if (gs1 != gs2 ||
 769	    ULONG_CMP_LT(j, js) ||
 770	    ULONG_CMP_GE(gps, js) ||
 771	    !rcu_seq_state(gs2))
 772		return; /* No stall or GP completed since entering function. */
 773	rnp = rdp->mynode;
 774	jn = jiffies + ULONG_MAX / 2;
 775	self_detected = READ_ONCE(rnp->qsmask) & rdp->grpmask;
 776	if (rcu_gp_in_progress() &&
 777	    (self_detected || ULONG_CMP_GE(j, js + RCU_STALL_RAT_DELAY)) &&
 778	    cmpxchg(&rcu_state.jiffies_stall, js, jn) == js) {
 779		/*
 780		 * If a virtual machine is stopped by the host it can look to
 781		 * the watchdog like an RCU stall. Check to see if the host
 782		 * stopped the vm.
 783		 */
 784		if (kvm_check_and_clear_guest_paused())
 785			return;
 786
 787		rcu_stall_notifier_call_chain(RCU_STALL_NOTIFY_NORM, (void *)j - gps);
 788		if (READ_ONCE(csd_lock_suppress_rcu_stall) && csd_lock_is_stuck()) {
 789			pr_err("INFO: %s detected stall, but suppressed full report due to a stuck CSD-lock.\n", rcu_state.name);
 790		} else if (self_detected) {
 791			/* We haven't checked in, so go dump stack. */
 792			print_cpu_stall(gs2, gps);
 793		} else {
 794			/* They had a few time units to dump stack, so complain. */
 795			print_other_cpu_stall(gs2, gps);
 796		}
 797
 798		if (READ_ONCE(rcu_cpu_stall_ftrace_dump))
 799			rcu_ftrace_dump(DUMP_ALL);
 800
 801		if (READ_ONCE(rcu_state.jiffies_stall) == jn) {
 802			jn = jiffies + 3 * rcu_jiffies_till_stall_check() + 3;
 803			WRITE_ONCE(rcu_state.jiffies_stall, jn);
 804		}
 805	}
 806}
 807
 808//////////////////////////////////////////////////////////////////////////////
 809//
 810// RCU forward-progress mechanisms, including for callback invocation.
 811
 812
 813/*
 814 * Check to see if a failure to end RCU priority inversion was due to
 815 * a CPU not passing through a quiescent state.  When this happens, there
 816 * is nothing that RCU priority boosting can do to help, so we shouldn't
 817 * count this as an RCU priority boosting failure.  A return of true says
 818 * RCU priority boosting is to blame, and false says otherwise.  If false
 819 * is returned, the first of the CPUs to blame is stored through cpup.
 820 * If there was no CPU blocking the current grace period, but also nothing
 821 * in need of being boosted, *cpup is set to -1.  This can happen in case
 822 * of vCPU preemption while the last CPU is reporting its quiscent state,
 823 * for example.
 824 *
 825 * If cpup is NULL, then a lockless quick check is carried out, suitable
 826 * for high-rate usage.  On the other hand, if cpup is non-NULL, each
 827 * rcu_node structure's ->lock is acquired, ruling out high-rate usage.
 828 */
 829bool rcu_check_boost_fail(unsigned long gp_state, int *cpup)
 830{
 831	bool atb = false;
 832	int cpu;
 833	unsigned long flags;
 834	struct rcu_node *rnp;
 835
 836	rcu_for_each_leaf_node(rnp) {
 837		if (!cpup) {
 838			if (data_race(READ_ONCE(rnp->qsmask))) {
 839				return false;
 840			} else {
 841				if (READ_ONCE(rnp->gp_tasks))
 842					atb = true;
 843				continue;
 844			}
 845		}
 846		*cpup = -1;
 847		raw_spin_lock_irqsave_rcu_node(rnp, flags);
 848		if (rnp->gp_tasks)
 849			atb = true;
 850		if (!rnp->qsmask) {
 851			// No CPUs without quiescent states for this rnp.
 852			raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 853			continue;
 854		}
 855		// Find the first holdout CPU.
 856		for_each_leaf_node_possible_cpu(rnp, cpu) {
 857			if (rnp->qsmask & (1UL << (cpu - rnp->grplo))) {
 858				raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 859				*cpup = cpu;
 860				return false;
 861			}
 862		}
 863		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 864	}
 865	// Can't blame CPUs, so must blame RCU priority boosting.
 866	return atb;
 867}
 868EXPORT_SYMBOL_GPL(rcu_check_boost_fail);
 869
 870/*
 871 * Show the state of the grace-period kthreads.
 872 */
 873void show_rcu_gp_kthreads(void)
 874{
 875	unsigned long cbs = 0;
 876	int cpu;
 877	unsigned long j;
 878	unsigned long ja;
 879	unsigned long jr;
 880	unsigned long js;
 881	unsigned long jw;
 882	struct rcu_data *rdp;
 883	struct rcu_node *rnp;
 884	struct task_struct *t = READ_ONCE(rcu_state.gp_kthread);
 885
 886	j = jiffies;
 887	ja = j - data_race(READ_ONCE(rcu_state.gp_activity));
 888	jr = j - data_race(READ_ONCE(rcu_state.gp_req_activity));
 889	js = j - data_race(READ_ONCE(rcu_state.gp_start));
 890	jw = j - data_race(READ_ONCE(rcu_state.gp_wake_time));
 891	pr_info("%s: wait state: %s(%d) ->state: %#x ->rt_priority %u delta ->gp_start %lu ->gp_activity %lu ->gp_req_activity %lu ->gp_wake_time %lu ->gp_wake_seq %ld ->gp_seq %ld ->gp_seq_needed %ld ->gp_max %lu ->gp_flags %#x\n",
 892		rcu_state.name, gp_state_getname(rcu_state.gp_state),
 893		data_race(READ_ONCE(rcu_state.gp_state)),
 894		t ? data_race(READ_ONCE(t->__state)) : 0x1ffff, t ? t->rt_priority : 0xffU,
 895		js, ja, jr, jw, (long)data_race(READ_ONCE(rcu_state.gp_wake_seq)),
 896		(long)data_race(READ_ONCE(rcu_state.gp_seq)),
 897		(long)data_race(READ_ONCE(rcu_get_root()->gp_seq_needed)),
 898		data_race(READ_ONCE(rcu_state.gp_max)),
 899		data_race(READ_ONCE(rcu_state.gp_flags)));
 900	rcu_for_each_node_breadth_first(rnp) {
 901		if (ULONG_CMP_GE(READ_ONCE(rcu_state.gp_seq), READ_ONCE(rnp->gp_seq_needed)) &&
 902		    !data_race(READ_ONCE(rnp->qsmask)) && !data_race(READ_ONCE(rnp->boost_tasks)) &&
 903		    !data_race(READ_ONCE(rnp->exp_tasks)) && !data_race(READ_ONCE(rnp->gp_tasks)))
 904			continue;
 905		pr_info("\trcu_node %d:%d ->gp_seq %ld ->gp_seq_needed %ld ->qsmask %#lx %c%c%c%c ->n_boosts %ld\n",
 906			rnp->grplo, rnp->grphi,
 907			(long)data_race(READ_ONCE(rnp->gp_seq)),
 908			(long)data_race(READ_ONCE(rnp->gp_seq_needed)),
 909			data_race(READ_ONCE(rnp->qsmask)),
 910			".b"[!!data_race(READ_ONCE(rnp->boost_kthread_task))],
 911			".B"[!!data_race(READ_ONCE(rnp->boost_tasks))],
 912			".E"[!!data_race(READ_ONCE(rnp->exp_tasks))],
 913			".G"[!!data_race(READ_ONCE(rnp->gp_tasks))],
 914			data_race(READ_ONCE(rnp->n_boosts)));
 915		if (!rcu_is_leaf_node(rnp))
 916			continue;
 917		for_each_leaf_node_possible_cpu(rnp, cpu) {
 918			rdp = per_cpu_ptr(&rcu_data, cpu);
 919			if (READ_ONCE(rdp->gpwrap) ||
 920			    ULONG_CMP_GE(READ_ONCE(rcu_state.gp_seq),
 921					 READ_ONCE(rdp->gp_seq_needed)))
 922				continue;
 923			pr_info("\tcpu %d ->gp_seq_needed %ld\n",
 924				cpu, (long)data_race(READ_ONCE(rdp->gp_seq_needed)));
 925		}
 926	}
 927	for_each_possible_cpu(cpu) {
 928		rdp = per_cpu_ptr(&rcu_data, cpu);
 929		cbs += data_race(READ_ONCE(rdp->n_cbs_invoked));
 930		if (rcu_segcblist_is_offloaded(&rdp->cblist))
 931			show_rcu_nocb_state(rdp);
 932	}
 933	pr_info("RCU callbacks invoked since boot: %lu\n", cbs);
 934	show_rcu_tasks_gp_kthreads();
 935}
 936EXPORT_SYMBOL_GPL(show_rcu_gp_kthreads);
 937
 938/*
 939 * This function checks for grace-period requests that fail to motivate
 940 * RCU to come out of its idle mode.
 941 */
 942static void rcu_check_gp_start_stall(struct rcu_node *rnp, struct rcu_data *rdp,
 943				     const unsigned long gpssdelay)
 944{
 945	unsigned long flags;
 946	unsigned long j;
 947	struct rcu_node *rnp_root = rcu_get_root();
 948	static atomic_t warned = ATOMIC_INIT(0);
 949
 950	if (!IS_ENABLED(CONFIG_PROVE_RCU) || rcu_gp_in_progress() ||
 951	    ULONG_CMP_GE(READ_ONCE(rnp_root->gp_seq),
 952			 READ_ONCE(rnp_root->gp_seq_needed)) ||
 953	    !smp_load_acquire(&rcu_state.gp_kthread)) // Get stable kthread.
 954		return;
 955	j = jiffies; /* Expensive access, and in common case don't get here. */
 956	if (time_before(j, READ_ONCE(rcu_state.gp_req_activity) + gpssdelay) ||
 957	    time_before(j, READ_ONCE(rcu_state.gp_activity) + gpssdelay) ||
 958	    atomic_read(&warned))
 959		return;
 960
 961	raw_spin_lock_irqsave_rcu_node(rnp, flags);
 962	j = jiffies;
 963	if (rcu_gp_in_progress() ||
 964	    ULONG_CMP_GE(READ_ONCE(rnp_root->gp_seq),
 965			 READ_ONCE(rnp_root->gp_seq_needed)) ||
 966	    time_before(j, READ_ONCE(rcu_state.gp_req_activity) + gpssdelay) ||
 967	    time_before(j, READ_ONCE(rcu_state.gp_activity) + gpssdelay) ||
 968	    atomic_read(&warned)) {
 969		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 970		return;
 971	}
 972	/* Hold onto the leaf lock to make others see warned==1. */
 973
 974	if (rnp_root != rnp)
 975		raw_spin_lock_rcu_node(rnp_root); /* irqs already disabled. */
 976	j = jiffies;
 977	if (rcu_gp_in_progress() ||
 978	    ULONG_CMP_GE(READ_ONCE(rnp_root->gp_seq),
 979			 READ_ONCE(rnp_root->gp_seq_needed)) ||
 980	    time_before(j, READ_ONCE(rcu_state.gp_req_activity) + gpssdelay) ||
 981	    time_before(j, READ_ONCE(rcu_state.gp_activity) + gpssdelay) ||
 982	    atomic_xchg(&warned, 1)) {
 983		if (rnp_root != rnp)
 984			/* irqs remain disabled. */
 985			raw_spin_unlock_rcu_node(rnp_root);
 986		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 987		return;
 988	}
 989	WARN_ON(1);
 990	if (rnp_root != rnp)
 991		raw_spin_unlock_rcu_node(rnp_root);
 992	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 993	show_rcu_gp_kthreads();
 994}
 995
 996/*
 997 * Do a forward-progress check for rcutorture.  This is normally invoked
 998 * due to an OOM event.  The argument "j" gives the time period during
 999 * which rcutorture would like progress to have been made.
1000 */
1001void rcu_fwd_progress_check(unsigned long j)
1002{
1003	unsigned long cbs;
1004	int cpu;
1005	unsigned long max_cbs = 0;
1006	int max_cpu = -1;
1007	struct rcu_data *rdp;
1008
1009	if (rcu_gp_in_progress()) {
1010		pr_info("%s: GP age %lu jiffies\n",
1011			__func__, jiffies - data_race(READ_ONCE(rcu_state.gp_start)));
1012		show_rcu_gp_kthreads();
1013	} else {
1014		pr_info("%s: Last GP end %lu jiffies ago\n",
1015			__func__, jiffies - data_race(READ_ONCE(rcu_state.gp_end)));
1016		preempt_disable();
1017		rdp = this_cpu_ptr(&rcu_data);
1018		rcu_check_gp_start_stall(rdp->mynode, rdp, j);
1019		preempt_enable();
1020	}
1021	for_each_possible_cpu(cpu) {
1022		cbs = rcu_get_n_cbs_cpu(cpu);
1023		if (!cbs)
1024			continue;
1025		if (max_cpu < 0)
1026			pr_info("%s: callbacks", __func__);
1027		pr_cont(" %d: %lu", cpu, cbs);
1028		if (cbs <= max_cbs)
1029			continue;
1030		max_cbs = cbs;
1031		max_cpu = cpu;
1032	}
1033	if (max_cpu >= 0)
1034		pr_cont("\n");
1035}
1036EXPORT_SYMBOL_GPL(rcu_fwd_progress_check);
1037
1038/* Commandeer a sysrq key to dump RCU's tree. */
1039static bool sysrq_rcu;
1040module_param(sysrq_rcu, bool, 0444);
1041
1042/* Dump grace-period-request information due to commandeered sysrq. */
1043static void sysrq_show_rcu(u8 key)
1044{
1045	show_rcu_gp_kthreads();
1046}
1047
1048static const struct sysrq_key_op sysrq_rcudump_op = {
1049	.handler = sysrq_show_rcu,
1050	.help_msg = "show-rcu(y)",
1051	.action_msg = "Show RCU tree",
1052	.enable_mask = SYSRQ_ENABLE_DUMP,
1053};
1054
1055static int __init rcu_sysrq_init(void)
1056{
1057	if (sysrq_rcu)
1058		return register_sysrq_key('y', &sysrq_rcudump_op);
1059	return 0;
1060}
1061early_initcall(rcu_sysrq_init);
1062
1063#ifdef CONFIG_RCU_CPU_STALL_NOTIFIER
1064
1065//////////////////////////////////////////////////////////////////////////////
1066//
1067// RCU CPU stall-warning notifiers
1068
1069static ATOMIC_NOTIFIER_HEAD(rcu_cpu_stall_notifier_list);
1070
1071/**
1072 * rcu_stall_chain_notifier_register - Add an RCU CPU stall notifier
1073 * @n: Entry to add.
1074 *
1075 * Adds an RCU CPU stall notifier to an atomic notifier chain.
1076 * The @action passed to a notifier will be @RCU_STALL_NOTIFY_NORM or
1077 * friends.  The @data will be the duration of the stalled grace period,
1078 * in jiffies, coerced to a void* pointer.
1079 *
1080 * Returns 0 on success, %-EEXIST on error.
1081 */
1082int rcu_stall_chain_notifier_register(struct notifier_block *n)
1083{
1084	int rcsn = rcu_cpu_stall_notifiers;
1085
1086	WARN(1, "Adding %pS() to RCU stall notifier list (%s).\n", n->notifier_call,
1087	     rcsn ? "possibly suppressing RCU CPU stall warnings" : "failed, so all is well");
1088	if (rcsn)
1089		return atomic_notifier_chain_register(&rcu_cpu_stall_notifier_list, n);
1090	return -EEXIST;
1091}
1092EXPORT_SYMBOL_GPL(rcu_stall_chain_notifier_register);
1093
1094/**
1095 * rcu_stall_chain_notifier_unregister - Remove an RCU CPU stall notifier
1096 * @n: Entry to add.
1097 *
1098 * Removes an RCU CPU stall notifier from an atomic notifier chain.
1099 *
1100 * Returns zero on success, %-ENOENT on failure.
1101 */
1102int rcu_stall_chain_notifier_unregister(struct notifier_block *n)
1103{
1104	return atomic_notifier_chain_unregister(&rcu_cpu_stall_notifier_list, n);
1105}
1106EXPORT_SYMBOL_GPL(rcu_stall_chain_notifier_unregister);
1107
1108/*
1109 * rcu_stall_notifier_call_chain - Call functions in an RCU CPU stall notifier chain
1110 * @val: Value passed unmodified to notifier function
1111 * @v: Pointer passed unmodified to notifier function
1112 *
1113 * Calls each function in the RCU CPU stall notifier chain in turn, which
1114 * is an atomic call chain.  See atomic_notifier_call_chain() for more
1115 * information.
1116 *
1117 * This is for use within RCU, hence the omission of the extra asterisk
1118 * to indicate a non-kerneldoc format header comment.
1119 */
1120int rcu_stall_notifier_call_chain(unsigned long val, void *v)
1121{
1122	return atomic_notifier_call_chain(&rcu_cpu_stall_notifier_list, val, v);
1123}
1124
1125#endif // #ifdef CONFIG_RCU_CPU_STALL_NOTIFIER