Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0+
   2/*
   3 * Read-Copy Update mechanism for mutual exclusion (tree-based version)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
   4 *
   5 * Copyright IBM Corporation, 2008
   6 *
   7 * Authors: Dipankar Sarma <dipankar@in.ibm.com>
   8 *	    Manfred Spraul <manfred@colorfullife.com>
   9 *	    Paul E. McKenney <paulmck@linux.ibm.com>
  10 *
  11 * Based on the original work by Paul McKenney <paulmck@linux.ibm.com>
  12 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
  13 *
  14 * For detailed explanation of Read-Copy Update mechanism see -
  15 *	Documentation/RCU
  16 */
  17
  18#define pr_fmt(fmt) "rcu: " fmt
  19
  20#include <linux/types.h>
  21#include <linux/kernel.h>
  22#include <linux/init.h>
  23#include <linux/spinlock.h>
  24#include <linux/smp.h>
  25#include <linux/rcupdate_wait.h>
  26#include <linux/interrupt.h>
  27#include <linux/sched.h>
  28#include <linux/sched/debug.h>
  29#include <linux/nmi.h>
  30#include <linux/atomic.h>
  31#include <linux/bitops.h>
  32#include <linux/export.h>
  33#include <linux/completion.h>
  34#include <linux/kmemleak.h>
  35#include <linux/moduleparam.h>
  36#include <linux/panic.h>
  37#include <linux/panic_notifier.h>
  38#include <linux/percpu.h>
  39#include <linux/notifier.h>
  40#include <linux/cpu.h>
  41#include <linux/mutex.h>
  42#include <linux/time.h>
  43#include <linux/kernel_stat.h>
  44#include <linux/wait.h>
  45#include <linux/kthread.h>
  46#include <uapi/linux/sched/types.h>
  47#include <linux/prefetch.h>
  48#include <linux/delay.h>
 
  49#include <linux/random.h>
  50#include <linux/trace_events.h>
  51#include <linux/suspend.h>
  52#include <linux/ftrace.h>
  53#include <linux/tick.h>
  54#include <linux/sysrq.h>
  55#include <linux/kprobes.h>
  56#include <linux/gfp.h>
  57#include <linux/oom.h>
  58#include <linux/smpboot.h>
  59#include <linux/jiffies.h>
  60#include <linux/slab.h>
  61#include <linux/sched/isolation.h>
  62#include <linux/sched/clock.h>
  63#include <linux/vmalloc.h>
  64#include <linux/mm.h>
  65#include <linux/kasan.h>
  66#include <linux/context_tracking.h>
  67#include "../time/tick-internal.h"
  68
  69#include "tree.h"
  70#include "rcu.h"
  71
 
  72#ifdef MODULE_PARAM_PREFIX
  73#undef MODULE_PARAM_PREFIX
  74#endif
  75#define MODULE_PARAM_PREFIX "rcutree."
  76
  77/* Data structures. */
  78
  79static DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_data, rcu_data) = {
  80	.gpwrap = true,
  81#ifdef CONFIG_RCU_NOCB_CPU
  82	.cblist.flags = SEGCBLIST_RCU_CORE,
  83#endif
  84};
  85static struct rcu_state rcu_state = {
  86	.level = { &rcu_state.node[0] },
  87	.gp_state = RCU_GP_IDLE,
  88	.gp_seq = (0UL - 300UL) << RCU_SEQ_CTR_SHIFT,
  89	.barrier_mutex = __MUTEX_INITIALIZER(rcu_state.barrier_mutex),
  90	.barrier_lock = __RAW_SPIN_LOCK_UNLOCKED(rcu_state.barrier_lock),
  91	.name = RCU_NAME,
  92	.abbr = RCU_ABBR,
  93	.exp_mutex = __MUTEX_INITIALIZER(rcu_state.exp_mutex),
  94	.exp_wake_mutex = __MUTEX_INITIALIZER(rcu_state.exp_wake_mutex),
  95	.ofl_lock = __ARCH_SPIN_LOCK_UNLOCKED,
  96};
  97
  98/* Dump rcu_node combining tree at boot to verify correct setup. */
  99static bool dump_tree;
 100module_param(dump_tree, bool, 0444);
 101/* By default, use RCU_SOFTIRQ instead of rcuc kthreads. */
 102static bool use_softirq = !IS_ENABLED(CONFIG_PREEMPT_RT);
 103#ifndef CONFIG_PREEMPT_RT
 104module_param(use_softirq, bool, 0444);
 105#endif
 106/* Control rcu_node-tree auto-balancing at boot time. */
 107static bool rcu_fanout_exact;
 108module_param(rcu_fanout_exact, bool, 0444);
 109/* Increase (but not decrease) the RCU_FANOUT_LEAF at boot time. */
 110static int rcu_fanout_leaf = RCU_FANOUT_LEAF;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 111module_param(rcu_fanout_leaf, int, 0444);
 112int rcu_num_lvls __read_mostly = RCU_NUM_LVLS;
 113/* Number of rcu_nodes at specified level. */
 114int num_rcu_lvl[] = NUM_RCU_LVL_INIT;
 
 
 
 
 
 115int rcu_num_nodes __read_mostly = NUM_RCU_NODES; /* Total # rcu_nodes in use. */
 116
 117/*
 118 * The rcu_scheduler_active variable is initialized to the value
 119 * RCU_SCHEDULER_INACTIVE and transitions RCU_SCHEDULER_INIT just before the
 120 * first task is spawned.  So when this variable is RCU_SCHEDULER_INACTIVE,
 121 * RCU can assume that there is but one task, allowing RCU to (for example)
 122 * optimize synchronize_rcu() to a simple barrier().  When this variable
 123 * is RCU_SCHEDULER_INIT, RCU must actually do all the hard work required
 124 * to detect real grace periods.  This variable is also used to suppress
 125 * boot-time false positives from lockdep-RCU error checking.  Finally, it
 126 * transitions from RCU_SCHEDULER_INIT to RCU_SCHEDULER_RUNNING after RCU
 127 * is fully initialized, including all of its kthreads having been spawned.
 128 */
 129int rcu_scheduler_active __read_mostly;
 130EXPORT_SYMBOL_GPL(rcu_scheduler_active);
 131
 132/*
 133 * The rcu_scheduler_fully_active variable transitions from zero to one
 134 * during the early_initcall() processing, which is after the scheduler
 135 * is capable of creating new tasks.  So RCU processing (for example,
 136 * creating tasks for RCU priority boosting) must be delayed until after
 137 * rcu_scheduler_fully_active transitions from zero to one.  We also
 138 * currently delay invocation of any RCU callbacks until after this point.
 139 *
 140 * It might later prove better for people registering RCU callbacks during
 141 * early boot to take responsibility for these callbacks, but one step at
 142 * a time.
 143 */
 144static int rcu_scheduler_fully_active __read_mostly;
 145
 146static void rcu_report_qs_rnp(unsigned long mask, struct rcu_node *rnp,
 147			      unsigned long gps, unsigned long flags);
 148static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu);
 149static void invoke_rcu_core(void);
 150static void rcu_report_exp_rdp(struct rcu_data *rdp);
 151static void sync_sched_exp_online_cleanup(int cpu);
 152static void check_cb_ovld_locked(struct rcu_data *rdp, struct rcu_node *rnp);
 153static bool rcu_rdp_is_offloaded(struct rcu_data *rdp);
 154static bool rcu_rdp_cpu_online(struct rcu_data *rdp);
 155static bool rcu_init_invoked(void);
 156static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf);
 157static void rcu_init_new_rnp(struct rcu_node *rnp_leaf);
 158
 159/*
 160 * rcuc/rcub/rcuop kthread realtime priority. The "rcuop"
 161 * real-time priority(enabling/disabling) is controlled by
 162 * the extra CONFIG_RCU_NOCB_CPU_CB_BOOST configuration.
 163 */
 164static int kthread_prio = IS_ENABLED(CONFIG_RCU_BOOST) ? 1 : 0;
 165module_param(kthread_prio, int, 0444);
 166
 167/* Delay in jiffies for grace-period initialization delays, debug only. */
 168
 169static int gp_preinit_delay;
 170module_param(gp_preinit_delay, int, 0444);
 171static int gp_init_delay;
 172module_param(gp_init_delay, int, 0444);
 173static int gp_cleanup_delay;
 174module_param(gp_cleanup_delay, int, 0444);
 175
 176// Add delay to rcu_read_unlock() for strict grace periods.
 177static int rcu_unlock_delay;
 178#ifdef CONFIG_RCU_STRICT_GRACE_PERIOD
 179module_param(rcu_unlock_delay, int, 0444);
 180#endif
 181
 182/*
 183 * This rcu parameter is runtime-read-only. It reflects
 184 * a minimum allowed number of objects which can be cached
 185 * per-CPU. Object size is equal to one page. This value
 186 * can be changed at boot time.
 187 */
 188static int rcu_min_cached_objs = 5;
 189module_param(rcu_min_cached_objs, int, 0444);
 
 
 190
 191// A page shrinker can ask for pages to be freed to make them
 192// available for other parts of the system. This usually happens
 193// under low memory conditions, and in that case we should also
 194// defer page-cache filling for a short time period.
 195//
 196// The default value is 5 seconds, which is long enough to reduce
 197// interference with the shrinker while it asks other systems to
 198// drain their caches.
 199static int rcu_delay_page_cache_fill_msec = 5000;
 200module_param(rcu_delay_page_cache_fill_msec, int, 0444);
 201
 202/* Retrieve RCU kthreads priority for rcutorture */
 203int rcu_get_gp_kthreads_prio(void)
 204{
 205	return kthread_prio;
 206}
 207EXPORT_SYMBOL_GPL(rcu_get_gp_kthreads_prio);
 208
 209/*
 210 * Number of grace periods between delays, normalized by the duration of
 211 * the delay.  The longer the delay, the more the grace periods between
 212 * each delay.  The reason for this normalization is that it means that,
 213 * for non-zero delays, the overall slowdown of grace periods is constant
 214 * regardless of the duration of the delay.  This arrangement balances
 215 * the need for long delays to increase some race probabilities with the
 216 * need for fast grace periods to increase other race probabilities.
 217 */
 218#define PER_RCU_NODE_PERIOD 3	/* Number of grace periods between delays for debugging. */
 
 219
 220/*
 221 * Return true if an RCU grace period is in progress.  The READ_ONCE()s
 222 * permit this function to be invoked without holding the root rcu_node
 223 * structure's ->lock, but of course results can be subject to change.
 224 */
 225static int rcu_gp_in_progress(void)
 226{
 227	return rcu_seq_state(rcu_seq_current(&rcu_state.gp_seq));
 228}
 229
 230/*
 231 * Return the number of callbacks queued on the specified CPU.
 232 * Handles both the nocbs and normal cases.
 
 
 233 */
 234static long rcu_get_n_cbs_cpu(int cpu)
 235{
 236	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
 237
 238	if (rcu_segcblist_is_enabled(&rdp->cblist))
 239		return rcu_segcblist_n_cbs(&rdp->cblist);
 240	return 0;
 241}
 242
 243void rcu_softirq_qs(void)
 244{
 245	rcu_qs();
 246	rcu_preempt_deferred_qs(current);
 247	rcu_tasks_qs(current, false);
 
 
 248}
 249
 250/*
 251 * Reset the current CPU's ->dynticks counter to indicate that the
 252 * newly onlined CPU is no longer in an extended quiescent state.
 253 * This will either leave the counter unchanged, or increment it
 254 * to the next non-quiescent value.
 255 *
 256 * The non-atomic test/increment sequence works because the upper bits
 257 * of the ->dynticks counter are manipulated only by the corresponding CPU,
 258 * or when the corresponding CPU is offline.
 259 */
 260static void rcu_dynticks_eqs_online(void)
 261{
 262	if (ct_dynticks() & RCU_DYNTICKS_IDX)
 263		return;
 264	ct_state_inc(RCU_DYNTICKS_IDX);
 
 265}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 266
 267/*
 268 * Snapshot the ->dynticks counter with full ordering so as to allow
 269 * stable comparison of this counter with past and future snapshots.
 270 */
 271static int rcu_dynticks_snap(int cpu)
 272{
 273	smp_mb();  // Fundamental RCU ordering guarantee.
 274	return ct_dynticks_cpu_acquire(cpu);
 275}
 
 276
 277/*
 278 * Return true if the snapshot returned from rcu_dynticks_snap()
 279 * indicates that RCU is in an extended quiescent state.
 280 */
 281static bool rcu_dynticks_in_eqs(int snap)
 282{
 283	return !(snap & RCU_DYNTICKS_IDX);
 284}
 
 285
 286/*
 287 * Return true if the CPU corresponding to the specified rcu_data
 288 * structure has spent some time in an extended quiescent state since
 289 * rcu_dynticks_snap() returned the specified snapshot.
 290 */
 291static bool rcu_dynticks_in_eqs_since(struct rcu_data *rdp, int snap)
 292{
 293	return snap != rcu_dynticks_snap(rdp->cpu);
 294}
 
 295
 296/*
 297 * Return true if the referenced integer is zero while the specified
 298 * CPU remains within a single extended quiescent state.
 
 
 
 299 */
 300bool rcu_dynticks_zero_in_eqs(int cpu, int *vp)
 301{
 302	int snap;
 303
 304	// If not quiescent, force back to earlier extended quiescent state.
 305	snap = ct_dynticks_cpu(cpu) & ~RCU_DYNTICKS_IDX;
 306	smp_rmb(); // Order ->dynticks and *vp reads.
 307	if (READ_ONCE(*vp))
 308		return false;  // Non-zero, so report failure;
 309	smp_rmb(); // Order *vp read and ->dynticks re-read.
 310
 311	// If still in the same extended quiescent state, we are good!
 312	return snap == ct_dynticks_cpu(cpu);
 313}
 
 314
 315/*
 316 * Let the RCU core know that this CPU has gone through the scheduler,
 317 * which is a quiescent state.  This is called when the need for a
 318 * quiescent state is urgent, so we burn an atomic operation and full
 319 * memory barriers to let the RCU core know about it, regardless of what
 320 * this CPU might (or might not) do in the near future.
 321 *
 322 * We inform the RCU core by emulating a zero-duration dyntick-idle period.
 323 *
 324 * The caller must have disabled interrupts and must not be idle.
 325 */
 326notrace void rcu_momentary_dyntick_idle(void)
 327{
 328	int seq;
 329
 330	raw_cpu_write(rcu_data.rcu_need_heavy_qs, false);
 331	seq = ct_state_inc(2 * RCU_DYNTICKS_IDX);
 332	/* It is illegal to call this from idle state. */
 333	WARN_ON_ONCE(!(seq & RCU_DYNTICKS_IDX));
 334	rcu_preempt_deferred_qs(current);
 335}
 336EXPORT_SYMBOL_GPL(rcu_momentary_dyntick_idle);
 337
 338/**
 339 * rcu_is_cpu_rrupt_from_idle - see if 'interrupted' from idle
 340 *
 341 * If the current CPU is idle and running at a first-level (not nested)
 342 * interrupt, or directly, from idle, return true.
 343 *
 344 * The caller must have at least disabled IRQs.
 345 */
 346static int rcu_is_cpu_rrupt_from_idle(void)
 347{
 348	long nesting;
 349
 350	/*
 351	 * Usually called from the tick; but also used from smp_function_call()
 352	 * for expedited grace periods. This latter can result in running from
 353	 * the idle task, instead of an actual IPI.
 354	 */
 355	lockdep_assert_irqs_disabled();
 356
 357	/* Check for counter underflows */
 358	RCU_LOCKDEP_WARN(ct_dynticks_nesting() < 0,
 359			 "RCU dynticks_nesting counter underflow!");
 360	RCU_LOCKDEP_WARN(ct_dynticks_nmi_nesting() <= 0,
 361			 "RCU dynticks_nmi_nesting counter underflow/zero!");
 362
 363	/* Are we at first interrupt nesting level? */
 364	nesting = ct_dynticks_nmi_nesting();
 365	if (nesting > 1)
 366		return false;
 367
 368	/*
 369	 * If we're not in an interrupt, we must be in the idle task!
 370	 */
 371	WARN_ON_ONCE(!nesting && !is_idle_task(current));
 372
 373	/* Does CPU appear to be idle from an RCU standpoint? */
 374	return ct_dynticks_nesting() == 0;
 375}
 376
 377#define DEFAULT_RCU_BLIMIT (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD) ? 1000 : 10)
 378				// Maximum callbacks per rcu_do_batch ...
 379#define DEFAULT_MAX_RCU_BLIMIT 10000 // ... even during callback flood.
 380static long blimit = DEFAULT_RCU_BLIMIT;
 381#define DEFAULT_RCU_QHIMARK 10000 // If this many pending, ignore blimit.
 382static long qhimark = DEFAULT_RCU_QHIMARK;
 383#define DEFAULT_RCU_QLOMARK 100   // Once only this many pending, use blimit.
 384static long qlowmark = DEFAULT_RCU_QLOMARK;
 385#define DEFAULT_RCU_QOVLD_MULT 2
 386#define DEFAULT_RCU_QOVLD (DEFAULT_RCU_QOVLD_MULT * DEFAULT_RCU_QHIMARK)
 387static long qovld = DEFAULT_RCU_QOVLD; // If this many pending, hammer QS.
 388static long qovld_calc = -1;	  // No pre-initialization lock acquisitions!
 389
 390module_param(blimit, long, 0444);
 391module_param(qhimark, long, 0444);
 392module_param(qlowmark, long, 0444);
 393module_param(qovld, long, 0444);
 394
 395static ulong jiffies_till_first_fqs = IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD) ? 0 : ULONG_MAX;
 396static ulong jiffies_till_next_fqs = ULONG_MAX;
 397static bool rcu_kick_kthreads;
 398static int rcu_divisor = 7;
 399module_param(rcu_divisor, int, 0644);
 400
 401/* Force an exit from rcu_do_batch() after 3 milliseconds. */
 402static long rcu_resched_ns = 3 * NSEC_PER_MSEC;
 403module_param(rcu_resched_ns, long, 0644);
 404
 405/*
 406 * How long the grace period must be before we start recruiting
 407 * quiescent-state help from rcu_note_context_switch().
 408 */
 409static ulong jiffies_till_sched_qs = ULONG_MAX;
 410module_param(jiffies_till_sched_qs, ulong, 0444);
 411static ulong jiffies_to_sched_qs; /* See adjust_jiffies_till_sched_qs(). */
 412module_param(jiffies_to_sched_qs, ulong, 0444); /* Display only! */
 
 
 413
 414/*
 415 * Make sure that we give the grace-period kthread time to detect any
 416 * idle CPUs before taking active measures to force quiescent states.
 417 * However, don't go below 100 milliseconds, adjusted upwards for really
 418 * large systems.
 419 */
 420static void adjust_jiffies_till_sched_qs(void)
 
 421{
 422	unsigned long j;
 423
 424	/* If jiffies_till_sched_qs was specified, respect the request. */
 425	if (jiffies_till_sched_qs != ULONG_MAX) {
 426		WRITE_ONCE(jiffies_to_sched_qs, jiffies_till_sched_qs);
 427		return;
 428	}
 429	/* Otherwise, set to third fqs scan, but bound below on large system. */
 430	j = READ_ONCE(jiffies_till_first_fqs) +
 431		      2 * READ_ONCE(jiffies_till_next_fqs);
 432	if (j < HZ / 10 + nr_cpu_ids / RCU_JIFFIES_FQS_DIV)
 433		j = HZ / 10 + nr_cpu_ids / RCU_JIFFIES_FQS_DIV;
 434	pr_info("RCU calculated value of scheduler-enlistment delay is %ld jiffies.\n", j);
 435	WRITE_ONCE(jiffies_to_sched_qs, j);
 436}
 437
 438static int param_set_first_fqs_jiffies(const char *val, const struct kernel_param *kp)
 439{
 440	ulong j;
 441	int ret = kstrtoul(val, 0, &j);
 442
 443	if (!ret) {
 444		WRITE_ONCE(*(ulong *)kp->arg, (j > HZ) ? HZ : j);
 445		adjust_jiffies_till_sched_qs();
 446	}
 447	return ret;
 448}
 449
 450static int param_set_next_fqs_jiffies(const char *val, const struct kernel_param *kp)
 
 
 
 451{
 452	ulong j;
 453	int ret = kstrtoul(val, 0, &j);
 454
 455	if (!ret) {
 456		WRITE_ONCE(*(ulong *)kp->arg, (j > HZ) ? HZ : (j ?: 1));
 457		adjust_jiffies_till_sched_qs();
 458	}
 459	return ret;
 460}
 461
 462static const struct kernel_param_ops first_fqs_jiffies_ops = {
 463	.set = param_set_first_fqs_jiffies,
 464	.get = param_get_ulong,
 465};
 466
 467static const struct kernel_param_ops next_fqs_jiffies_ops = {
 468	.set = param_set_next_fqs_jiffies,
 469	.get = param_get_ulong,
 470};
 471
 472module_param_cb(jiffies_till_first_fqs, &first_fqs_jiffies_ops, &jiffies_till_first_fqs, 0644);
 473module_param_cb(jiffies_till_next_fqs, &next_fqs_jiffies_ops, &jiffies_till_next_fqs, 0644);
 474module_param(rcu_kick_kthreads, bool, 0644);
 475
 476static void force_qs_rnp(int (*f)(struct rcu_data *rdp));
 477static int rcu_pending(int user);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 478
 479/*
 480 * Return the number of RCU GPs completed thus far for debug & stats.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 481 */
 482unsigned long rcu_get_gp_seq(void)
 483{
 484	return READ_ONCE(rcu_state.gp_seq);
 
 
 
 
 
 485}
 486EXPORT_SYMBOL_GPL(rcu_get_gp_seq);
 487
 488/*
 489 * Return the number of RCU expedited batches completed thus far for
 490 * debug & stats.  Odd numbers mean that a batch is in progress, even
 491 * numbers mean idle.  The value returned will thus be roughly double
 492 * the cumulative batches since boot.
 
 
 
 493 */
 494unsigned long rcu_exp_batches_completed(void)
 495{
 496	return rcu_state.expedited_sequence;
 497}
 498EXPORT_SYMBOL_GPL(rcu_exp_batches_completed);
 499
 500/*
 501 * Return the root node of the rcu_state structure.
 
 
 
 
 
 
 
 
 
 
 
 
 
 502 */
 503static struct rcu_node *rcu_get_root(void)
 504{
 505	return &rcu_state.node[0];
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 506}
 507
 508/*
 509 * Send along grace-period-related data for rcutorture diagnostics.
 
 
 
 
 510 */
 511void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags,
 512			    unsigned long *gp_seq)
 513{
 514	switch (test_type) {
 515	case RCU_FLAVOR:
 516		*flags = READ_ONCE(rcu_state.gp_flags);
 517		*gp_seq = rcu_seq_current(&rcu_state.gp_seq);
 518		break;
 519	default:
 520		break;
 
 
 
 
 
 
 
 
 
 
 521	}
 522}
 523EXPORT_SYMBOL_GPL(rcutorture_get_gp_data);
 524
 525#if defined(CONFIG_NO_HZ_FULL) && (!defined(CONFIG_GENERIC_ENTRY) || !defined(CONFIG_KVM_XFER_TO_GUEST_WORK))
 526/*
 527 * An empty function that will trigger a reschedule on
 528 * IRQ tail once IRQs get re-enabled on userspace/guest resume.
 529 */
 530static void late_wakeup_func(struct irq_work *work)
 531{
 532}
 
 533
 534static DEFINE_PER_CPU(struct irq_work, late_wakeup_work) =
 535	IRQ_WORK_INIT(late_wakeup_func);
 
 
 
 
 
 
 
 
 536
 537/*
 538 * If either:
 539 *
 540 * 1) the task is about to enter in guest mode and $ARCH doesn't support KVM generic work
 541 * 2) the task is about to enter in user mode and $ARCH doesn't support generic entry.
 542 *
 543 * In these cases the late RCU wake ups aren't supported in the resched loops and our
 544 * last resort is to fire a local irq_work that will trigger a reschedule once IRQs
 545 * get re-enabled again.
 
 546 */
 547noinstr void rcu_irq_work_resched(void)
 548{
 549	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
 550
 551	if (IS_ENABLED(CONFIG_GENERIC_ENTRY) && !(current->flags & PF_VCPU))
 552		return;
 553
 554	if (IS_ENABLED(CONFIG_KVM_XFER_TO_GUEST_WORK) && (current->flags & PF_VCPU))
 555		return;
 
 
 
 
 556
 557	instrumentation_begin();
 558	if (do_nocb_deferred_wakeup(rdp) && need_resched()) {
 559		irq_work_queue(this_cpu_ptr(&late_wakeup_work));
 560	}
 561	instrumentation_end();
 
 
 
 
 
 562}
 563#endif /* #if defined(CONFIG_NO_HZ_FULL) && (!defined(CONFIG_GENERIC_ENTRY) || !defined(CONFIG_KVM_XFER_TO_GUEST_WORK)) */
 564
 565#ifdef CONFIG_PROVE_RCU
 566/**
 567 * rcu_irq_exit_check_preempt - Validate that scheduling is possible
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 568 */
 569void rcu_irq_exit_check_preempt(void)
 570{
 571	lockdep_assert_irqs_disabled();
 
 
 572
 573	RCU_LOCKDEP_WARN(ct_dynticks_nesting() <= 0,
 574			 "RCU dynticks_nesting counter underflow/zero!");
 575	RCU_LOCKDEP_WARN(ct_dynticks_nmi_nesting() !=
 576			 DYNTICK_IRQ_NONIDLE,
 577			 "Bad RCU  dynticks_nmi_nesting counter\n");
 578	RCU_LOCKDEP_WARN(rcu_dynticks_curr_cpu_in_eqs(),
 579			 "RCU in extended quiescent state!");
 
 
 
 
 580}
 581#endif /* #ifdef CONFIG_PROVE_RCU */
 582
 583#ifdef CONFIG_NO_HZ_FULL
 584/**
 585 * __rcu_irq_enter_check_tick - Enable scheduler tick on CPU if RCU needs it.
 586 *
 587 * The scheduler tick is not normally enabled when CPUs enter the kernel
 588 * from nohz_full userspace execution.  After all, nohz_full userspace
 589 * execution is an RCU quiescent state and the time executing in the kernel
 590 * is quite short.  Except of course when it isn't.  And it is not hard to
 591 * cause a large system to spend tens of seconds or even minutes looping
 592 * in the kernel, which can cause a number of problems, include RCU CPU
 593 * stall warnings.
 594 *
 595 * Therefore, if a nohz_full CPU fails to report a quiescent state
 596 * in a timely manner, the RCU grace-period kthread sets that CPU's
 597 * ->rcu_urgent_qs flag with the expectation that the next interrupt or
 598 * exception will invoke this function, which will turn on the scheduler
 599 * tick, which will enable RCU to detect that CPU's quiescent states,
 600 * for example, due to cond_resched() calls in CONFIG_PREEMPT=n kernels.
 601 * The tick will be disabled once a quiescent state is reported for
 602 * this CPU.
 603 *
 604 * Of course, in carefully tuned systems, there might never be an
 605 * interrupt or exception.  In that case, the RCU grace-period kthread
 606 * will eventually cause one to happen.  However, in less carefully
 607 * controlled environments, this function allows RCU to get what it
 608 * needs without creating otherwise useless interruptions.
 609 */
 610void __rcu_irq_enter_check_tick(void)
 611{
 612	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
 613
 614	// If we're here from NMI there's nothing to do.
 615	if (in_nmi())
 616		return;
 
 
 
 
 
 
 
 617
 618	RCU_LOCKDEP_WARN(rcu_dynticks_curr_cpu_in_eqs(),
 619			 "Illegal rcu_irq_enter_check_tick() from extended quiescent state");
 
 
 
 
 
 
 
 
 620
 621	if (!tick_nohz_full_cpu(rdp->cpu) ||
 622	    !READ_ONCE(rdp->rcu_urgent_qs) ||
 623	    READ_ONCE(rdp->rcu_forced_tick)) {
 624		// RCU doesn't need nohz_full help from this CPU, or it is
 625		// already getting that help.
 626		return;
 627	}
 628
 629	// We get here only when not in an extended quiescent state and
 630	// from interrupts (as opposed to NMIs).  Therefore, (1) RCU is
 631	// already watching and (2) The fact that we are in an interrupt
 632	// handler and that the rcu_node lock is an irq-disabled lock
 633	// prevents self-deadlock.  So we can safely recheck under the lock.
 634	// Note that the nohz_full state currently cannot change.
 635	raw_spin_lock_rcu_node(rdp->mynode);
 636	if (READ_ONCE(rdp->rcu_urgent_qs) && !rdp->rcu_forced_tick) {
 637		// A nohz_full CPU is in the kernel and RCU needs a
 638		// quiescent state.  Turn on the tick!
 639		WRITE_ONCE(rdp->rcu_forced_tick, true);
 640		tick_dep_set_cpu(rdp->cpu, TICK_DEP_BIT_RCU);
 641	}
 642	raw_spin_unlock_rcu_node(rdp->mynode);
 643}
 644NOKPROBE_SYMBOL(__rcu_irq_enter_check_tick);
 645#endif /* CONFIG_NO_HZ_FULL */
 646
 647/*
 648 * Check to see if any future non-offloaded RCU-related work will need
 649 * to be done by the current CPU, even if none need be done immediately,
 650 * returning 1 if so.  This function is part of the RCU implementation;
 651 * it is -not- an exported member of the RCU API.  This is used by
 652 * the idle-entry code to figure out whether it is safe to disable the
 653 * scheduler-clock interrupt.
 654 *
 655 * Just check whether or not this CPU has non-offloaded RCU callbacks
 656 * queued.
 657 */
 658int rcu_needs_cpu(void)
 659{
 660	return !rcu_segcblist_empty(&this_cpu_ptr(&rcu_data)->cblist) &&
 661		!rcu_rdp_is_offloaded(this_cpu_ptr(&rcu_data));
 662}
 663
 664/*
 665 * If any sort of urgency was applied to the current CPU (for example,
 666 * the scheduler-clock interrupt was enabled on a nohz_full CPU) in order
 667 * to get to a quiescent state, disable it.
 668 */
 669static void rcu_disable_urgency_upon_qs(struct rcu_data *rdp)
 670{
 671	raw_lockdep_assert_held_rcu_node(rdp->mynode);
 672	WRITE_ONCE(rdp->rcu_urgent_qs, false);
 673	WRITE_ONCE(rdp->rcu_need_heavy_qs, false);
 674	if (tick_nohz_full_cpu(rdp->cpu) && rdp->rcu_forced_tick) {
 675		tick_dep_clear_cpu(rdp->cpu, TICK_DEP_BIT_RCU);
 676		WRITE_ONCE(rdp->rcu_forced_tick, false);
 677	}
 678}
 679
 680/**
 681 * rcu_is_watching - RCU read-side critical sections permitted on current CPU?
 682 *
 683 * Return @true if RCU is watching the running CPU and @false otherwise.
 684 * An @true return means that this CPU can safely enter RCU read-side
 685 * critical sections.
 686 *
 687 * Although calls to rcu_is_watching() from most parts of the kernel
 688 * will return @true, there are important exceptions.  For example, if the
 689 * current CPU is deep within its idle loop, in kernel entry/exit code,
 690 * or offline, rcu_is_watching() will return @false.
 691 *
 692 * Make notrace because it can be called by the internal functions of
 693 * ftrace, and making this notrace removes unnecessary recursion calls.
 694 */
 695notrace bool rcu_is_watching(void)
 696{
 697	bool ret;
 698
 699	preempt_disable_notrace();
 700	ret = !rcu_dynticks_curr_cpu_in_eqs();
 701	preempt_enable_notrace();
 702	return ret;
 703}
 704EXPORT_SYMBOL_GPL(rcu_is_watching);
 705
 
 
 706/*
 707 * If a holdout task is actually running, request an urgent quiescent
 708 * state from its CPU.  This is unsynchronized, so migrations can cause
 709 * the request to go to the wrong CPU.  Which is OK, all that will happen
 710 * is that the CPU's next context switch will be a bit slower and next
 711 * time around this task will generate another request.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 712 */
 713void rcu_request_urgent_qs_task(struct task_struct *t)
 714{
 715	int cpu;
 
 
 716
 717	barrier();
 718	cpu = task_cpu(t);
 719	if (!task_curr(t))
 720		return; /* This task is not running on that CPU. */
 721	smp_store_release(per_cpu_ptr(&rcu_data.rcu_urgent_qs, cpu), true);
 
 
 
 
 722}
 
 723
 724/*
 725 * When trying to report a quiescent state on behalf of some other CPU,
 726 * it is our responsibility to check for and handle potential overflow
 727 * of the rcu_node ->gp_seq counter with respect to the rcu_data counters.
 728 * After all, the CPU might be in deep idle state, and thus executing no
 729 * code whatsoever.
 730 */
 731static void rcu_gpnum_ovf(struct rcu_node *rnp, struct rcu_data *rdp)
 732{
 733	raw_lockdep_assert_held_rcu_node(rnp);
 734	if (ULONG_CMP_LT(rcu_seq_current(&rdp->gp_seq) + ULONG_MAX / 4,
 735			 rnp->gp_seq))
 736		WRITE_ONCE(rdp->gpwrap, true);
 737	if (ULONG_CMP_LT(rdp->rcu_iw_gp_seq + ULONG_MAX / 4, rnp->gp_seq))
 738		rdp->rcu_iw_gp_seq = rnp->gp_seq + ULONG_MAX / 4;
 739}
 740
 741/*
 742 * Snapshot the specified CPU's dynticks counter so that we can later
 743 * credit them with an implicit quiescent state.  Return 1 if this CPU
 744 * is in dynticks idle mode, which is an extended quiescent state.
 745 */
 746static int dyntick_save_progress_counter(struct rcu_data *rdp)
 
 747{
 748	rdp->dynticks_snap = rcu_dynticks_snap(rdp->cpu);
 749	if (rcu_dynticks_in_eqs(rdp->dynticks_snap)) {
 750		trace_rcu_fqs(rcu_state.name, rdp->gp_seq, rdp->cpu, TPS("dti"));
 751		rcu_gpnum_ovf(rdp->mynode, rdp);
 752		return 1;
 753	}
 754	return 0;
 755}
 756
 757/*
 758 * Returns positive if the specified CPU has passed through a quiescent state
 759 * by virtue of being in or having passed through an dynticks idle state since
 760 * the last call to dyntick_save_progress_counter() for this same CPU, or by
 761 * virtue of having been offline.
 762 *
 763 * Returns negative if the specified CPU needs a force resched.
 764 *
 765 * Returns zero otherwise.
 766 */
 767static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
 
 
 
 
 
 
 
 
 
 768{
 769	unsigned long jtsq;
 770	int ret = 0;
 771	struct rcu_node *rnp = rdp->mynode;
 
 
 772
 773	/*
 774	 * If the CPU passed through or entered a dynticks idle phase with
 775	 * no active irq/NMI handlers, then we can safely pretend that the CPU
 776	 * already acknowledged the request to pass through a quiescent
 777	 * state.  Either way, that CPU cannot possibly be in an RCU
 778	 * read-side critical section that started before the beginning
 779	 * of the current RCU grace period.
 780	 */
 781	if (rcu_dynticks_in_eqs_since(rdp, rdp->dynticks_snap)) {
 782		trace_rcu_fqs(rcu_state.name, rdp->gp_seq, rdp->cpu, TPS("dti"));
 783		rcu_gpnum_ovf(rnp, rdp);
 784		return 1;
 785	}
 786
 787	/*
 788	 * Complain if a CPU that is considered to be offline from RCU's
 789	 * perspective has not yet reported a quiescent state.  After all,
 790	 * the offline CPU should have reported a quiescent state during
 791	 * the CPU-offline process, or, failing that, by rcu_gp_init()
 792	 * if it ran concurrently with either the CPU going offline or the
 793	 * last task on a leaf rcu_node structure exiting its RCU read-side
 794	 * critical section while all CPUs corresponding to that structure
 795	 * are offline.  This added warning detects bugs in any of these
 796	 * code paths.
 797	 *
 798	 * The rcu_node structure's ->lock is held here, which excludes
 799	 * the relevant portions the CPU-hotplug code, the grace-period
 800	 * initialization code, and the rcu_read_unlock() code paths.
 801	 *
 802	 * For more detail, please refer to the "Hotplug CPU" section
 803	 * of RCU's Requirements documentation.
 
 
 804	 */
 805	if (WARN_ON_ONCE(!rcu_rdp_cpu_online(rdp))) {
 806		struct rcu_node *rnp1;
 807
 808		pr_info("%s: grp: %d-%d level: %d ->gp_seq %ld ->completedqs %ld\n",
 809			__func__, rnp->grplo, rnp->grphi, rnp->level,
 810			(long)rnp->gp_seq, (long)rnp->completedqs);
 811		for (rnp1 = rnp; rnp1; rnp1 = rnp1->parent)
 812			pr_info("%s: %d:%d ->qsmask %#lx ->qsmaskinit %#lx ->qsmaskinitnext %#lx ->rcu_gp_init_mask %#lx\n",
 813				__func__, rnp1->grplo, rnp1->grphi, rnp1->qsmask, rnp1->qsmaskinit, rnp1->qsmaskinitnext, rnp1->rcu_gp_init_mask);
 814		pr_info("%s %d: %c online: %ld(%d) offline: %ld(%d)\n",
 815			__func__, rdp->cpu, ".o"[rcu_rdp_cpu_online(rdp)],
 816			(long)rdp->rcu_onl_gp_seq, rdp->rcu_onl_gp_flags,
 817			(long)rdp->rcu_ofl_gp_seq, rdp->rcu_ofl_gp_flags);
 818		return 1; /* Break things loose after complaining. */
 819	}
 820
 821	/*
 822	 * A CPU running for an extended time within the kernel can
 823	 * delay RCU grace periods: (1) At age jiffies_to_sched_qs,
 824	 * set .rcu_urgent_qs, (2) At age 2*jiffies_to_sched_qs, set
 825	 * both .rcu_need_heavy_qs and .rcu_urgent_qs.  Note that the
 826	 * unsynchronized assignments to the per-CPU rcu_need_heavy_qs
 827	 * variable are safe because the assignments are repeated if this
 828	 * CPU failed to pass through a quiescent state.  This code
 829	 * also checks .jiffies_resched in case jiffies_to_sched_qs
 830	 * is set way high.
 831	 */
 832	jtsq = READ_ONCE(jiffies_to_sched_qs);
 833	if (!READ_ONCE(rdp->rcu_need_heavy_qs) &&
 834	    (time_after(jiffies, rcu_state.gp_start + jtsq * 2) ||
 835	     time_after(jiffies, rcu_state.jiffies_resched) ||
 836	     rcu_state.cbovld)) {
 837		WRITE_ONCE(rdp->rcu_need_heavy_qs, true);
 838		/* Store rcu_need_heavy_qs before rcu_urgent_qs. */
 839		smp_store_release(&rdp->rcu_urgent_qs, true);
 840	} else if (time_after(jiffies, rcu_state.gp_start + jtsq)) {
 841		WRITE_ONCE(rdp->rcu_urgent_qs, true);
 842	}
 843
 844	/*
 845	 * NO_HZ_FULL CPUs can run in-kernel without rcu_sched_clock_irq!
 846	 * The above code handles this, but only for straight cond_resched().
 847	 * And some in-kernel loops check need_resched() before calling
 848	 * cond_resched(), which defeats the above code for CPUs that are
 849	 * running in-kernel with scheduling-clock interrupts disabled.
 850	 * So hit them over the head with the resched_cpu() hammer!
 851	 */
 852	if (tick_nohz_full_cpu(rdp->cpu) &&
 853	    (time_after(jiffies, READ_ONCE(rdp->last_fqs_resched) + jtsq * 3) ||
 854	     rcu_state.cbovld)) {
 855		WRITE_ONCE(rdp->rcu_urgent_qs, true);
 856		WRITE_ONCE(rdp->last_fqs_resched, jiffies);
 857		ret = -1;
 858	}
 859
 860	/*
 861	 * If more than halfway to RCU CPU stall-warning time, invoke
 862	 * resched_cpu() more frequently to try to loosen things up a bit.
 863	 * Also check to see if the CPU is getting hammered with interrupts,
 864	 * but only once per grace period, just to keep the IPIs down to
 865	 * a dull roar.
 866	 */
 867	if (time_after(jiffies, rcu_state.jiffies_resched)) {
 868		if (time_after(jiffies,
 869			       READ_ONCE(rdp->last_fqs_resched) + jtsq)) {
 870			WRITE_ONCE(rdp->last_fqs_resched, jiffies);
 871			ret = -1;
 872		}
 873		if (IS_ENABLED(CONFIG_IRQ_WORK) &&
 874		    !rdp->rcu_iw_pending && rdp->rcu_iw_gp_seq != rnp->gp_seq &&
 875		    (rnp->ffmask & rdp->grpmask)) {
 876			rdp->rcu_iw_pending = true;
 877			rdp->rcu_iw_gp_seq = rnp->gp_seq;
 878			irq_work_queue_on(&rdp->rcu_iw, rdp->cpu);
 879		}
 880
 881		if (rcu_cpu_stall_cputime && rdp->snap_record.gp_seq != rdp->gp_seq) {
 882			int cpu = rdp->cpu;
 883			struct rcu_snap_record *rsrp;
 884			struct kernel_cpustat *kcsp;
 885
 886			kcsp = &kcpustat_cpu(cpu);
 887
 888			rsrp = &rdp->snap_record;
 889			rsrp->cputime_irq     = kcpustat_field(kcsp, CPUTIME_IRQ, cpu);
 890			rsrp->cputime_softirq = kcpustat_field(kcsp, CPUTIME_SOFTIRQ, cpu);
 891			rsrp->cputime_system  = kcpustat_field(kcsp, CPUTIME_SYSTEM, cpu);
 892			rsrp->nr_hardirqs = kstat_cpu_irqs_sum(rdp->cpu);
 893			rsrp->nr_softirqs = kstat_cpu_softirqs_sum(rdp->cpu);
 894			rsrp->nr_csw = nr_context_switches_cpu(rdp->cpu);
 895			rsrp->jiffies = jiffies;
 896			rsrp->gp_seq = rdp->gp_seq;
 897		}
 
 
 
 
 
 898	}
 899
 900	return ret;
 901}
 902
 903/* Trace-event wrapper function for trace_rcu_future_grace_period.  */
 904static void trace_rcu_this_gp(struct rcu_node *rnp, struct rcu_data *rdp,
 905			      unsigned long gp_seq_req, const char *s)
 906{
 907	trace_rcu_future_grace_period(rcu_state.name, READ_ONCE(rnp->gp_seq),
 908				      gp_seq_req, rnp->level,
 909				      rnp->grplo, rnp->grphi, s);
 
 
 
 
 
 910}
 911
 912/*
 913 * rcu_start_this_gp - Request the start of a particular grace period
 914 * @rnp_start: The leaf node of the CPU from which to start.
 915 * @rdp: The rcu_data corresponding to the CPU from which to start.
 916 * @gp_seq_req: The gp_seq of the grace period to start.
 917 *
 918 * Start the specified grace period, as needed to handle newly arrived
 919 * callbacks.  The required future grace periods are recorded in each
 920 * rcu_node structure's ->gp_seq_needed field.  Returns true if there
 921 * is reason to awaken the grace-period kthread.
 922 *
 923 * The caller must hold the specified rcu_node structure's ->lock, which
 924 * is why the caller is responsible for waking the grace-period kthread.
 925 *
 926 * Returns true if the GP thread needs to be awakened else false.
 927 */
 928static bool rcu_start_this_gp(struct rcu_node *rnp_start, struct rcu_data *rdp,
 929			      unsigned long gp_seq_req)
 930{
 931	bool ret = false;
 
 932	struct rcu_node *rnp;
 933
 934	/*
 935	 * Use funnel locking to either acquire the root rcu_node
 936	 * structure's lock or bail out if the need for this grace period
 937	 * has already been recorded -- or if that grace period has in
 938	 * fact already started.  If there is already a grace period in
 939	 * progress in a non-leaf node, no recording is needed because the
 940	 * end of the grace period will scan the leaf rcu_node structures.
 941	 * Note that rnp_start->lock must not be released.
 942	 */
 943	raw_lockdep_assert_held_rcu_node(rnp_start);
 944	trace_rcu_this_gp(rnp_start, rdp, gp_seq_req, TPS("Startleaf"));
 945	for (rnp = rnp_start; 1; rnp = rnp->parent) {
 946		if (rnp != rnp_start)
 947			raw_spin_lock_rcu_node(rnp);
 948		if (ULONG_CMP_GE(rnp->gp_seq_needed, gp_seq_req) ||
 949		    rcu_seq_started(&rnp->gp_seq, gp_seq_req) ||
 950		    (rnp != rnp_start &&
 951		     rcu_seq_state(rcu_seq_current(&rnp->gp_seq)))) {
 952			trace_rcu_this_gp(rnp, rdp, gp_seq_req,
 953					  TPS("Prestarted"));
 954			goto unlock_out;
 955		}
 956		WRITE_ONCE(rnp->gp_seq_needed, gp_seq_req);
 957		if (rcu_seq_state(rcu_seq_current(&rnp->gp_seq))) {
 958			/*
 959			 * We just marked the leaf or internal node, and a
 960			 * grace period is in progress, which means that
 961			 * rcu_gp_cleanup() will see the marking.  Bail to
 962			 * reduce contention.
 963			 */
 964			trace_rcu_this_gp(rnp_start, rdp, gp_seq_req,
 965					  TPS("Startedleaf"));
 966			goto unlock_out;
 967		}
 968		if (rnp != rnp_start && rnp->parent != NULL)
 969			raw_spin_unlock_rcu_node(rnp);
 970		if (!rnp->parent)
 971			break;  /* At root, and perhaps also leaf. */
 972	}
 
 973
 974	/* If GP already in progress, just leave, otherwise start one. */
 975	if (rcu_gp_in_progress()) {
 976		trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("Startedleafroot"));
 977		goto unlock_out;
 978	}
 979	trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("Startedroot"));
 980	WRITE_ONCE(rcu_state.gp_flags, rcu_state.gp_flags | RCU_GP_FLAG_INIT);
 981	WRITE_ONCE(rcu_state.gp_req_activity, jiffies);
 982	if (!READ_ONCE(rcu_state.gp_kthread)) {
 983		trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("NoGPkthread"));
 984		goto unlock_out;
 985	}
 986	trace_rcu_grace_period(rcu_state.name, data_race(rcu_state.gp_seq), TPS("newreq"));
 987	ret = true;  /* Caller must wake GP kthread. */
 988unlock_out:
 989	/* Push furthest requested GP to leaf node and rcu_data structure. */
 990	if (ULONG_CMP_LT(gp_seq_req, rnp->gp_seq_needed)) {
 991		WRITE_ONCE(rnp_start->gp_seq_needed, rnp->gp_seq_needed);
 992		WRITE_ONCE(rdp->gp_seq_needed, rnp->gp_seq_needed);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 993	}
 994	if (rnp != rnp_start)
 995		raw_spin_unlock_rcu_node(rnp);
 996	return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 997}
 998
 999/*
1000 * Clean up any old requests for the just-ended grace period.  Also return
1001 * whether any additional grace periods have been requested.
1002 */
1003static bool rcu_future_gp_cleanup(struct rcu_node *rnp)
1004{
1005	bool needmore;
1006	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
1007
1008	needmore = ULONG_CMP_LT(rnp->gp_seq, rnp->gp_seq_needed);
1009	if (!needmore)
1010		rnp->gp_seq_needed = rnp->gp_seq; /* Avoid counter wrap. */
1011	trace_rcu_this_gp(rnp, rdp, rnp->gp_seq,
1012			  needmore ? TPS("CleanupMore") : TPS("Cleanup"));
1013	return needmore;
1014}
1015
1016static void swake_up_one_online_ipi(void *arg)
1017{
1018	struct swait_queue_head *wqh = arg;
 
 
 
1019
1020	swake_up_one(wqh);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1021}
1022
1023static void swake_up_one_online(struct swait_queue_head *wqh)
1024{
1025	int cpu = get_cpu();
 
 
 
 
 
 
 
 
 
1026
1027	/*
1028	 * If called from rcutree_report_cpu_starting(), wake up
1029	 * is dangerous that late in the CPU-down hotplug process. The
1030	 * scheduler might queue an ignored hrtimer. Defer the wake up
1031	 * to an online CPU instead.
 
 
 
 
 
 
 
 
 
 
 
1032	 */
1033	if (unlikely(cpu_is_offline(cpu))) {
1034		int target;
 
 
 
 
 
 
 
 
 
 
 
 
1035
1036		target = cpumask_any_and(housekeeping_cpumask(HK_TYPE_RCU),
1037					 cpu_online_mask);
1038
1039		smp_call_function_single(target, swake_up_one_online_ipi,
1040					 wqh, 0);
1041		put_cpu();
1042	} else {
1043		put_cpu();
1044		swake_up_one(wqh);
1045	}
1046}
1047
1048/*
1049 * Awaken the grace-period kthread.  Don't do a self-awaken (unless in an
1050 * interrupt or softirq handler, in which case we just might immediately
1051 * sleep upon return, resulting in a grace-period hang), and don't bother
1052 * awakening when there is nothing for the grace-period kthread to do
1053 * (as in several CPUs raced to awaken, we lost), and finally don't try
1054 * to awaken a kthread that has not yet been created.  If all those checks
1055 * are passed, track some debug information and awaken.
1056 *
1057 * So why do the self-wakeup when in an interrupt or softirq handler
1058 * in the grace-period kthread's context?  Because the kthread might have
1059 * been interrupted just as it was going to sleep, and just after the final
1060 * pre-sleep check of the awaken condition.  In this case, a wakeup really
1061 * is required, and is therefore supplied.
1062 */
1063static void rcu_gp_kthread_wake(void)
1064{
1065	struct task_struct *t = READ_ONCE(rcu_state.gp_kthread);
 
 
 
 
1066
1067	if ((current == t && !in_hardirq() && !in_serving_softirq()) ||
1068	    !READ_ONCE(rcu_state.gp_flags) || !t)
 
 
 
 
 
 
1069		return;
1070	WRITE_ONCE(rcu_state.gp_wake_time, jiffies);
1071	WRITE_ONCE(rcu_state.gp_wake_seq, READ_ONCE(rcu_state.gp_seq));
1072	swake_up_one_online(&rcu_state.gp_wq);
1073}
1074
1075/*
1076 * If there is room, assign a ->gp_seq number to any callbacks on this
1077 * CPU that have not already been assigned.  Also accelerate any callbacks
1078 * that were previously assigned a ->gp_seq number that has since proven
1079 * to be too conservative, which can happen if callbacks get assigned a
1080 * ->gp_seq number while RCU is idle, but with reference to a non-root
1081 * rcu_node structure.  This function is idempotent, so it does not hurt
1082 * to call it repeatedly.  Returns an flag saying that we should awaken
1083 * the RCU grace-period kthread.
1084 *
1085 * The caller must hold rnp->lock with interrupts disabled.
1086 */
1087static bool rcu_accelerate_cbs(struct rcu_node *rnp, struct rcu_data *rdp)
 
1088{
1089	unsigned long gp_seq_req;
1090	bool ret = false;
 
 
 
 
 
 
 
1091
1092	rcu_lockdep_assert_cblist_protected(rdp);
1093	raw_lockdep_assert_held_rcu_node(rnp);
 
 
 
 
1094
1095	/* If no pending (not yet ready to invoke) callbacks, nothing to do. */
1096	if (!rcu_segcblist_pend_cbs(&rdp->cblist))
1097		return false;
 
 
 
 
 
 
 
 
1098
1099	trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCbPreAcc"));
 
 
 
 
 
 
 
 
 
 
 
 
1100
1101	/*
1102	 * Callbacks are often registered with incomplete grace-period
1103	 * information.  Something about the fact that getting exact
1104	 * information requires acquiring a global lock...  RCU therefore
1105	 * makes a conservative estimate of the grace period number at which
1106	 * a given callback will become ready to invoke.	The following
1107	 * code checks this estimate and improves it when possible, thus
1108	 * accelerating callback invocation to an earlier grace-period
1109	 * number.
1110	 */
1111	gp_seq_req = rcu_seq_snap(&rcu_state.gp_seq);
1112	if (rcu_segcblist_accelerate(&rdp->cblist, gp_seq_req))
1113		ret = rcu_start_this_gp(rnp, rdp, gp_seq_req);
 
 
 
1114
1115	/* Trace depending on how much we were able to accelerate. */
1116	if (rcu_segcblist_restempty(&rdp->cblist, RCU_WAIT_TAIL))
1117		trace_rcu_grace_period(rcu_state.name, gp_seq_req, TPS("AccWaitCB"));
1118	else
1119		trace_rcu_grace_period(rcu_state.name, gp_seq_req, TPS("AccReadyCB"));
 
 
 
 
 
 
 
 
1120
1121	trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCbPostAcc"));
 
 
 
 
 
 
 
 
1122
1123	return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1124}
1125
1126/*
1127 * Similar to rcu_accelerate_cbs(), but does not require that the leaf
1128 * rcu_node structure's ->lock be held.  It consults the cached value
1129 * of ->gp_seq_needed in the rcu_data structure, and if that indicates
1130 * that a new grace-period request be made, invokes rcu_accelerate_cbs()
1131 * while holding the leaf rcu_node structure's ->lock.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1132 */
1133static void rcu_accelerate_cbs_unlocked(struct rcu_node *rnp,
1134					struct rcu_data *rdp)
1135{
1136	unsigned long c;
1137	bool needwake;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1138
1139	rcu_lockdep_assert_cblist_protected(rdp);
1140	c = rcu_seq_snap(&rcu_state.gp_seq);
1141	if (!READ_ONCE(rdp->gpwrap) && ULONG_CMP_GE(rdp->gp_seq_needed, c)) {
1142		/* Old request still live, so mark recent callbacks. */
1143		(void)rcu_segcblist_accelerate(&rdp->cblist, c);
 
 
1144		return;
 
 
 
 
 
 
 
 
 
1145	}
1146	raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
1147	needwake = rcu_accelerate_cbs(rnp, rdp);
1148	raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
1149	if (needwake)
1150		rcu_gp_kthread_wake();
 
 
 
1151}
1152
1153/*
1154 * Move any callbacks whose grace period has completed to the
1155 * RCU_DONE_TAIL sublist, then compact the remaining sublists and
1156 * assign ->gp_seq numbers to any callbacks in the RCU_NEXT_TAIL
1157 * sublist.  This function is idempotent, so it does not hurt to
1158 * invoke it repeatedly.  As long as it is not invoked -too- often...
1159 * Returns true if the RCU grace-period kthread needs to be awakened.
1160 *
1161 * The caller must hold rnp->lock with interrupts disabled.
1162 */
1163static bool rcu_advance_cbs(struct rcu_node *rnp, struct rcu_data *rdp)
 
1164{
1165	rcu_lockdep_assert_cblist_protected(rdp);
1166	raw_lockdep_assert_held_rcu_node(rnp);
1167
1168	/* If no pending (not yet ready to invoke) callbacks, nothing to do. */
1169	if (!rcu_segcblist_pend_cbs(&rdp->cblist))
1170		return false;
1171
1172	/*
1173	 * Find all callbacks whose ->gp_seq numbers indicate that they
1174	 * are ready to invoke, and put them into the RCU_DONE_TAIL sublist.
1175	 */
1176	rcu_segcblist_advance(&rdp->cblist, rnp->gp_seq);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1177
1178	/* Classify any remaining callbacks. */
1179	return rcu_accelerate_cbs(rnp, rdp);
1180}
1181
1182/*
1183 * Move and classify callbacks, but only if doing so won't require
1184 * that the RCU grace-period kthread be awakened.
1185 */
1186static void __maybe_unused rcu_advance_cbs_nowake(struct rcu_node *rnp,
1187						  struct rcu_data *rdp)
1188{
1189	rcu_lockdep_assert_cblist_protected(rdp);
1190	if (!rcu_seq_state(rcu_seq_current(&rnp->gp_seq)) || !raw_spin_trylock_rcu_node(rnp))
1191		return;
1192	// The grace period cannot end while we hold the rcu_node lock.
1193	if (rcu_seq_state(rcu_seq_current(&rnp->gp_seq)))
1194		WARN_ON_ONCE(rcu_advance_cbs(rnp, rdp));
1195	raw_spin_unlock_rcu_node(rnp);
1196}
1197
1198/*
1199 * In CONFIG_RCU_STRICT_GRACE_PERIOD=y kernels, attempt to generate a
1200 * quiescent state.  This is intended to be invoked when the CPU notices
1201 * a new grace period.
1202 */
1203static void rcu_strict_gp_check_qs(void)
1204{
1205	if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD)) {
1206		rcu_read_lock();
1207		rcu_read_unlock();
1208	}
1209}
1210
1211/*
1212 * Update CPU-local rcu_data state to record the beginnings and ends of
1213 * grace periods.  The caller must hold the ->lock of the leaf rcu_node
1214 * structure corresponding to the current CPU, and must have irqs disabled.
1215 * Returns true if the grace-period kthread needs to be awakened.
1216 */
1217static bool __note_gp_changes(struct rcu_node *rnp, struct rcu_data *rdp)
1218{
1219	bool ret = false;
1220	bool need_qs;
1221	const bool offloaded = rcu_rdp_is_offloaded(rdp);
1222
1223	raw_lockdep_assert_held_rcu_node(rnp);
1224
1225	if (rdp->gp_seq == rnp->gp_seq)
1226		return false; /* Nothing to do. */
1227
1228	/* Handle the ends of any preceding grace periods first. */
1229	if (rcu_seq_completed_gp(rdp->gp_seq, rnp->gp_seq) ||
1230	    unlikely(READ_ONCE(rdp->gpwrap))) {
1231		if (!offloaded)
1232			ret = rcu_advance_cbs(rnp, rdp); /* Advance CBs. */
1233		rdp->core_needs_qs = false;
1234		trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("cpuend"));
1235	} else {
1236		if (!offloaded)
1237			ret = rcu_accelerate_cbs(rnp, rdp); /* Recent CBs. */
1238		if (rdp->core_needs_qs)
1239			rdp->core_needs_qs = !!(rnp->qsmask & rdp->grpmask);
 
 
 
1240	}
1241
1242	/* Now handle the beginnings of any new-to-this-CPU grace periods. */
1243	if (rcu_seq_new_gp(rdp->gp_seq, rnp->gp_seq) ||
1244	    unlikely(READ_ONCE(rdp->gpwrap))) {
1245		/*
1246		 * If the current grace period is waiting for this CPU,
1247		 * set up to detect a quiescent state, otherwise don't
1248		 * go looking for one.
1249		 */
1250		trace_rcu_grace_period(rcu_state.name, rnp->gp_seq, TPS("cpustart"));
1251		need_qs = !!(rnp->qsmask & rdp->grpmask);
1252		rdp->cpu_no_qs.b.norm = need_qs;
1253		rdp->core_needs_qs = need_qs;
1254		zero_cpu_stall_ticks(rdp);
1255	}
1256	rdp->gp_seq = rnp->gp_seq;  /* Remember new grace-period state. */
1257	if (ULONG_CMP_LT(rdp->gp_seq_needed, rnp->gp_seq_needed) || rdp->gpwrap)
1258		WRITE_ONCE(rdp->gp_seq_needed, rnp->gp_seq_needed);
1259	if (IS_ENABLED(CONFIG_PROVE_RCU) && READ_ONCE(rdp->gpwrap))
1260		WRITE_ONCE(rdp->last_sched_clock, jiffies);
1261	WRITE_ONCE(rdp->gpwrap, false);
1262	rcu_gpnum_ovf(rnp, rdp);
1263	return ret;
1264}
1265
1266static void note_gp_changes(struct rcu_data *rdp)
1267{
1268	unsigned long flags;
1269	bool needwake;
1270	struct rcu_node *rnp;
1271
1272	local_irq_save(flags);
1273	rnp = rdp->mynode;
1274	if ((rdp->gp_seq == rcu_seq_current(&rnp->gp_seq) &&
1275	     !unlikely(READ_ONCE(rdp->gpwrap))) || /* w/out lock. */
1276	    !raw_spin_trylock_rcu_node(rnp)) { /* irqs already off, so later. */
1277		local_irq_restore(flags);
1278		return;
1279	}
1280	needwake = __note_gp_changes(rnp, rdp);
1281	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1282	rcu_strict_gp_check_qs();
1283	if (needwake)
1284		rcu_gp_kthread_wake();
1285}
1286
1287static atomic_t *rcu_gp_slow_suppress;
1288
1289/* Register a counter to suppress debugging grace-period delays. */
1290void rcu_gp_slow_register(atomic_t *rgssp)
1291{
1292	WARN_ON_ONCE(rcu_gp_slow_suppress);
1293
1294	WRITE_ONCE(rcu_gp_slow_suppress, rgssp);
1295}
1296EXPORT_SYMBOL_GPL(rcu_gp_slow_register);
1297
1298/* Unregister a counter, with NULL for not caring which. */
1299void rcu_gp_slow_unregister(atomic_t *rgssp)
1300{
1301	WARN_ON_ONCE(rgssp && rgssp != rcu_gp_slow_suppress && rcu_gp_slow_suppress != NULL);
1302
1303	WRITE_ONCE(rcu_gp_slow_suppress, NULL);
1304}
1305EXPORT_SYMBOL_GPL(rcu_gp_slow_unregister);
1306
1307static bool rcu_gp_slow_is_suppressed(void)
1308{
1309	atomic_t *rgssp = READ_ONCE(rcu_gp_slow_suppress);
1310
1311	return rgssp && atomic_read(rgssp);
1312}
1313
1314static void rcu_gp_slow(int delay)
1315{
1316	if (!rcu_gp_slow_is_suppressed() && delay > 0 &&
1317	    !(rcu_seq_ctr(rcu_state.gp_seq) % (rcu_num_nodes * PER_RCU_NODE_PERIOD * delay)))
1318		schedule_timeout_idle(delay);
1319}
1320
1321static unsigned long sleep_duration;
1322
1323/* Allow rcutorture to stall the grace-period kthread. */
1324void rcu_gp_set_torture_wait(int duration)
1325{
1326	if (IS_ENABLED(CONFIG_RCU_TORTURE_TEST) && duration > 0)
1327		WRITE_ONCE(sleep_duration, duration);
1328}
1329EXPORT_SYMBOL_GPL(rcu_gp_set_torture_wait);
1330
1331/* Actually implement the aforementioned wait. */
1332static void rcu_gp_torture_wait(void)
1333{
1334	unsigned long duration;
1335
1336	if (!IS_ENABLED(CONFIG_RCU_TORTURE_TEST))
1337		return;
1338	duration = xchg(&sleep_duration, 0UL);
1339	if (duration > 0) {
1340		pr_alert("%s: Waiting %lu jiffies\n", __func__, duration);
1341		schedule_timeout_idle(duration);
1342		pr_alert("%s: Wait complete\n", __func__);
1343	}
1344}
1345
1346/*
1347 * Handler for on_each_cpu() to invoke the target CPU's RCU core
1348 * processing.
1349 */
1350static void rcu_strict_gp_boundary(void *unused)
1351{
1352	invoke_rcu_core();
1353}
1354
1355// Make the polled API aware of the beginning of a grace period.
1356static void rcu_poll_gp_seq_start(unsigned long *snap)
1357{
1358	struct rcu_node *rnp = rcu_get_root();
1359
1360	if (rcu_scheduler_active != RCU_SCHEDULER_INACTIVE)
1361		raw_lockdep_assert_held_rcu_node(rnp);
1362
1363	// If RCU was idle, note beginning of GP.
1364	if (!rcu_seq_state(rcu_state.gp_seq_polled))
1365		rcu_seq_start(&rcu_state.gp_seq_polled);
1366
1367	// Either way, record current state.
1368	*snap = rcu_state.gp_seq_polled;
1369}
1370
1371// Make the polled API aware of the end of a grace period.
1372static void rcu_poll_gp_seq_end(unsigned long *snap)
1373{
1374	struct rcu_node *rnp = rcu_get_root();
1375
1376	if (rcu_scheduler_active != RCU_SCHEDULER_INACTIVE)
1377		raw_lockdep_assert_held_rcu_node(rnp);
1378
1379	// If the previously noted GP is still in effect, record the
1380	// end of that GP.  Either way, zero counter to avoid counter-wrap
1381	// problems.
1382	if (*snap && *snap == rcu_state.gp_seq_polled) {
1383		rcu_seq_end(&rcu_state.gp_seq_polled);
1384		rcu_state.gp_seq_polled_snap = 0;
1385		rcu_state.gp_seq_polled_exp_snap = 0;
1386	} else {
1387		*snap = 0;
1388	}
1389}
1390
1391// Make the polled API aware of the beginning of a grace period, but
1392// where caller does not hold the root rcu_node structure's lock.
1393static void rcu_poll_gp_seq_start_unlocked(unsigned long *snap)
1394{
1395	unsigned long flags;
1396	struct rcu_node *rnp = rcu_get_root();
1397
1398	if (rcu_init_invoked()) {
1399		if (rcu_scheduler_active != RCU_SCHEDULER_INACTIVE)
1400			lockdep_assert_irqs_enabled();
1401		raw_spin_lock_irqsave_rcu_node(rnp, flags);
1402	}
1403	rcu_poll_gp_seq_start(snap);
1404	if (rcu_init_invoked())
1405		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1406}
1407
1408// Make the polled API aware of the end of a grace period, but where
1409// caller does not hold the root rcu_node structure's lock.
1410static void rcu_poll_gp_seq_end_unlocked(unsigned long *snap)
1411{
1412	unsigned long flags;
1413	struct rcu_node *rnp = rcu_get_root();
1414
1415	if (rcu_init_invoked()) {
1416		if (rcu_scheduler_active != RCU_SCHEDULER_INACTIVE)
1417			lockdep_assert_irqs_enabled();
1418		raw_spin_lock_irqsave_rcu_node(rnp, flags);
1419	}
1420	rcu_poll_gp_seq_end(snap);
1421	if (rcu_init_invoked())
1422		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1423}
1424
1425/*
1426 * Initialize a new grace period.  Return false if no grace period required.
1427 */
1428static noinline_for_stack bool rcu_gp_init(void)
1429{
1430	unsigned long flags;
1431	unsigned long oldmask;
1432	unsigned long mask;
1433	struct rcu_data *rdp;
1434	struct rcu_node *rnp = rcu_get_root();
1435
1436	WRITE_ONCE(rcu_state.gp_activity, jiffies);
1437	raw_spin_lock_irq_rcu_node(rnp);
1438	if (!READ_ONCE(rcu_state.gp_flags)) {
 
1439		/* Spurious wakeup, tell caller to go back to sleep.  */
1440		raw_spin_unlock_irq_rcu_node(rnp);
1441		return false;
1442	}
1443	WRITE_ONCE(rcu_state.gp_flags, 0); /* Clear all flags: New GP. */
1444
1445	if (WARN_ON_ONCE(rcu_gp_in_progress())) {
1446		/*
1447		 * Grace period already in progress, don't start another.
1448		 * Not supposed to be able to happen.
1449		 */
1450		raw_spin_unlock_irq_rcu_node(rnp);
1451		return false;
1452	}
1453
1454	/* Advance to a new grace period and initialize state. */
1455	record_gp_stall_check_time();
1456	/* Record GP times before starting GP, hence rcu_seq_start(). */
1457	rcu_seq_start(&rcu_state.gp_seq);
1458	ASSERT_EXCLUSIVE_WRITER(rcu_state.gp_seq);
1459	trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, TPS("start"));
1460	rcu_poll_gp_seq_start(&rcu_state.gp_seq_polled_snap);
1461	raw_spin_unlock_irq_rcu_node(rnp);
1462
1463	/*
1464	 * Apply per-leaf buffered online and offline operations to
1465	 * the rcu_node tree. Note that this new grace period need not
1466	 * wait for subsequent online CPUs, and that RCU hooks in the CPU
1467	 * offlining path, when combined with checks in this function,
1468	 * will handle CPUs that are currently going offline or that will
1469	 * go offline later.  Please also refer to "Hotplug CPU" section
1470	 * of RCU's Requirements documentation.
1471	 */
1472	WRITE_ONCE(rcu_state.gp_state, RCU_GP_ONOFF);
1473	/* Exclude CPU hotplug operations. */
1474	rcu_for_each_leaf_node(rnp) {
1475		local_irq_save(flags);
1476		arch_spin_lock(&rcu_state.ofl_lock);
1477		raw_spin_lock_rcu_node(rnp);
1478		if (rnp->qsmaskinit == rnp->qsmaskinitnext &&
1479		    !rnp->wait_blkd_tasks) {
1480			/* Nothing to do on this leaf rcu_node structure. */
1481			raw_spin_unlock_rcu_node(rnp);
1482			arch_spin_unlock(&rcu_state.ofl_lock);
1483			local_irq_restore(flags);
1484			continue;
1485		}
1486
1487		/* Record old state, apply changes to ->qsmaskinit field. */
1488		oldmask = rnp->qsmaskinit;
1489		rnp->qsmaskinit = rnp->qsmaskinitnext;
1490
1491		/* If zero-ness of ->qsmaskinit changed, propagate up tree. */
1492		if (!oldmask != !rnp->qsmaskinit) {
1493			if (!oldmask) { /* First online CPU for rcu_node. */
1494				if (!rnp->wait_blkd_tasks) /* Ever offline? */
1495					rcu_init_new_rnp(rnp);
1496			} else if (rcu_preempt_has_tasks(rnp)) {
1497				rnp->wait_blkd_tasks = true; /* blocked tasks */
1498			} else { /* Last offline CPU and can propagate. */
1499				rcu_cleanup_dead_rnp(rnp);
1500			}
1501		}
1502
1503		/*
1504		 * If all waited-on tasks from prior grace period are
1505		 * done, and if all this rcu_node structure's CPUs are
1506		 * still offline, propagate up the rcu_node tree and
1507		 * clear ->wait_blkd_tasks.  Otherwise, if one of this
1508		 * rcu_node structure's CPUs has since come back online,
1509		 * simply clear ->wait_blkd_tasks.
1510		 */
1511		if (rnp->wait_blkd_tasks &&
1512		    (!rcu_preempt_has_tasks(rnp) || rnp->qsmaskinit)) {
1513			rnp->wait_blkd_tasks = false;
1514			if (!rnp->qsmaskinit)
1515				rcu_cleanup_dead_rnp(rnp);
1516		}
1517
1518		raw_spin_unlock_rcu_node(rnp);
1519		arch_spin_unlock(&rcu_state.ofl_lock);
1520		local_irq_restore(flags);
1521	}
1522	rcu_gp_slow(gp_preinit_delay); /* Races with CPU hotplug. */
1523
1524	/*
1525	 * Set the quiescent-state-needed bits in all the rcu_node
1526	 * structures for all currently online CPUs in breadth-first
1527	 * order, starting from the root rcu_node structure, relying on the
1528	 * layout of the tree within the rcu_state.node[] array.  Note that
1529	 * other CPUs will access only the leaves of the hierarchy, thus
1530	 * seeing that no grace period is in progress, at least until the
1531	 * corresponding leaf node has been initialized.
 
1532	 *
1533	 * The grace period cannot complete until the initialization
1534	 * process finishes, because this kthread handles both.
1535	 */
1536	WRITE_ONCE(rcu_state.gp_state, RCU_GP_INIT);
1537	rcu_for_each_node_breadth_first(rnp) {
1538		rcu_gp_slow(gp_init_delay);
1539		raw_spin_lock_irqsave_rcu_node(rnp, flags);
1540		rdp = this_cpu_ptr(&rcu_data);
1541		rcu_preempt_check_blocked_tasks(rnp);
1542		rnp->qsmask = rnp->qsmaskinit;
1543		WRITE_ONCE(rnp->gp_seq, rcu_state.gp_seq);
 
 
1544		if (rnp == rdp->mynode)
1545			(void)__note_gp_changes(rnp, rdp);
1546		rcu_preempt_boost_start_gp(rnp);
1547		trace_rcu_grace_period_init(rcu_state.name, rnp->gp_seq,
1548					    rnp->level, rnp->grplo,
1549					    rnp->grphi, rnp->qsmask);
1550		/* Quiescent states for tasks on any now-offline CPUs. */
1551		mask = rnp->qsmask & ~rnp->qsmaskinitnext;
1552		rnp->rcu_gp_init_mask = mask;
1553		if ((mask || rnp->wait_blkd_tasks) && rcu_is_leaf_node(rnp))
1554			rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
1555		else
1556			raw_spin_unlock_irq_rcu_node(rnp);
1557		cond_resched_tasks_rcu_qs();
1558		WRITE_ONCE(rcu_state.gp_activity, jiffies);
1559	}
1560
1561	// If strict, make all CPUs aware of new grace period.
1562	if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD))
1563		on_each_cpu(rcu_strict_gp_boundary, NULL, 0);
1564
1565	return true;
1566}
1567
1568/*
1569 * Helper function for swait_event_idle_exclusive() wakeup at force-quiescent-state
1570 * time.
1571 */
1572static bool rcu_gp_fqs_check_wake(int *gfp)
1573{
1574	struct rcu_node *rnp = rcu_get_root();
1575
1576	// If under overload conditions, force an immediate FQS scan.
1577	if (*gfp & RCU_GP_FLAG_OVLD)
1578		return true;
1579
1580	// Someone like call_rcu() requested a force-quiescent-state scan.
1581	*gfp = READ_ONCE(rcu_state.gp_flags);
1582	if (*gfp & RCU_GP_FLAG_FQS)
1583		return true;
1584
1585	// The current grace period has completed.
1586	if (!READ_ONCE(rnp->qsmask) && !rcu_preempt_blocked_readers_cgp(rnp))
1587		return true;
1588
1589	return false;
1590}
1591
1592/*
1593 * Do one round of quiescent-state forcing.
1594 */
1595static void rcu_gp_fqs(bool first_time)
1596{
1597	int nr_fqs = READ_ONCE(rcu_state.nr_fqs_jiffies_stall);
1598	struct rcu_node *rnp = rcu_get_root();
1599
1600	WRITE_ONCE(rcu_state.gp_activity, jiffies);
1601	WRITE_ONCE(rcu_state.n_force_qs, rcu_state.n_force_qs + 1);
1602
1603	WARN_ON_ONCE(nr_fqs > 3);
1604	/* Only countdown nr_fqs for stall purposes if jiffies moves. */
1605	if (nr_fqs) {
1606		if (nr_fqs == 1) {
1607			WRITE_ONCE(rcu_state.jiffies_stall,
1608				   jiffies + rcu_jiffies_till_stall_check());
1609		}
1610		WRITE_ONCE(rcu_state.nr_fqs_jiffies_stall, --nr_fqs);
1611	}
1612
1613	if (first_time) {
1614		/* Collect dyntick-idle snapshots. */
1615		force_qs_rnp(dyntick_save_progress_counter);
 
 
 
 
 
 
 
1616	} else {
1617		/* Handle dyntick-idle and offline CPUs. */
1618		force_qs_rnp(rcu_implicit_dynticks_qs);
 
1619	}
1620	/* Clear flag to prevent immediate re-entry. */
1621	if (READ_ONCE(rcu_state.gp_flags) & RCU_GP_FLAG_FQS) {
1622		raw_spin_lock_irq_rcu_node(rnp);
1623		WRITE_ONCE(rcu_state.gp_flags,
1624			   READ_ONCE(rcu_state.gp_flags) & ~RCU_GP_FLAG_FQS);
1625		raw_spin_unlock_irq_rcu_node(rnp);
1626	}
1627}
1628
1629/*
1630 * Loop doing repeated quiescent-state forcing until the grace period ends.
1631 */
1632static noinline_for_stack void rcu_gp_fqs_loop(void)
1633{
1634	bool first_gp_fqs = true;
1635	int gf = 0;
1636	unsigned long j;
1637	int ret;
1638	struct rcu_node *rnp = rcu_get_root();
1639
1640	j = READ_ONCE(jiffies_till_first_fqs);
1641	if (rcu_state.cbovld)
1642		gf = RCU_GP_FLAG_OVLD;
1643	ret = 0;
1644	for (;;) {
1645		if (rcu_state.cbovld) {
1646			j = (j + 2) / 3;
1647			if (j <= 0)
1648				j = 1;
1649		}
1650		if (!ret || time_before(jiffies + j, rcu_state.jiffies_force_qs)) {
1651			WRITE_ONCE(rcu_state.jiffies_force_qs, jiffies + j);
1652			/*
1653			 * jiffies_force_qs before RCU_GP_WAIT_FQS state
1654			 * update; required for stall checks.
1655			 */
1656			smp_wmb();
1657			WRITE_ONCE(rcu_state.jiffies_kick_kthreads,
1658				   jiffies + (j ? 3 * j : 2));
1659		}
1660		trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
1661				       TPS("fqswait"));
1662		WRITE_ONCE(rcu_state.gp_state, RCU_GP_WAIT_FQS);
1663		(void)swait_event_idle_timeout_exclusive(rcu_state.gp_wq,
1664				 rcu_gp_fqs_check_wake(&gf), j);
1665		rcu_gp_torture_wait();
1666		WRITE_ONCE(rcu_state.gp_state, RCU_GP_DOING_FQS);
1667		/* Locking provides needed memory barriers. */
1668		/*
1669		 * Exit the loop if the root rcu_node structure indicates that the grace period
1670		 * has ended, leave the loop.  The rcu_preempt_blocked_readers_cgp(rnp) check
1671		 * is required only for single-node rcu_node trees because readers blocking
1672		 * the current grace period are queued only on leaf rcu_node structures.
1673		 * For multi-node trees, checking the root node's ->qsmask suffices, because a
1674		 * given root node's ->qsmask bit is cleared only when all CPUs and tasks from
1675		 * the corresponding leaf nodes have passed through their quiescent state.
1676		 */
1677		if (!READ_ONCE(rnp->qsmask) &&
1678		    !rcu_preempt_blocked_readers_cgp(rnp))
1679			break;
1680		/* If time for quiescent-state forcing, do it. */
1681		if (!time_after(rcu_state.jiffies_force_qs, jiffies) ||
1682		    (gf & (RCU_GP_FLAG_FQS | RCU_GP_FLAG_OVLD))) {
1683			trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
1684					       TPS("fqsstart"));
1685			rcu_gp_fqs(first_gp_fqs);
1686			gf = 0;
1687			if (first_gp_fqs) {
1688				first_gp_fqs = false;
1689				gf = rcu_state.cbovld ? RCU_GP_FLAG_OVLD : 0;
1690			}
1691			trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
1692					       TPS("fqsend"));
1693			cond_resched_tasks_rcu_qs();
1694			WRITE_ONCE(rcu_state.gp_activity, jiffies);
1695			ret = 0; /* Force full wait till next FQS. */
1696			j = READ_ONCE(jiffies_till_next_fqs);
1697		} else {
1698			/* Deal with stray signal. */
1699			cond_resched_tasks_rcu_qs();
1700			WRITE_ONCE(rcu_state.gp_activity, jiffies);
1701			WARN_ON(signal_pending(current));
1702			trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
1703					       TPS("fqswaitsig"));
1704			ret = 1; /* Keep old FQS timing. */
1705			j = jiffies;
1706			if (time_after(jiffies, rcu_state.jiffies_force_qs))
1707				j = 1;
1708			else
1709				j = rcu_state.jiffies_force_qs - j;
1710			gf = 0;
1711		}
1712	}
 
1713}
1714
1715/*
1716 * Clean up after the old grace period.
1717 */
1718static noinline void rcu_gp_cleanup(void)
1719{
1720	int cpu;
1721	bool needgp = false;
1722	unsigned long gp_duration;
1723	unsigned long new_gp_seq;
1724	bool offloaded;
1725	struct rcu_data *rdp;
1726	struct rcu_node *rnp = rcu_get_root();
1727	struct swait_queue_head *sq;
1728
1729	WRITE_ONCE(rcu_state.gp_activity, jiffies);
1730	raw_spin_lock_irq_rcu_node(rnp);
1731	rcu_state.gp_end = jiffies;
1732	gp_duration = rcu_state.gp_end - rcu_state.gp_start;
1733	if (gp_duration > rcu_state.gp_max)
1734		rcu_state.gp_max = gp_duration;
1735
1736	/*
1737	 * We know the grace period is complete, but to everyone else
1738	 * it appears to still be ongoing.  But it is also the case
1739	 * that to everyone else it looks like there is nothing that
1740	 * they can do to advance the grace period.  It is therefore
1741	 * safe for us to drop the lock in order to mark the grace
1742	 * period as completed in all of the rcu_node structures.
1743	 */
1744	rcu_poll_gp_seq_end(&rcu_state.gp_seq_polled_snap);
1745	raw_spin_unlock_irq_rcu_node(rnp);
1746
1747	/*
1748	 * Propagate new ->gp_seq value to rcu_node structures so that
1749	 * other CPUs don't have to wait until the start of the next grace
1750	 * period to process their callbacks.  This also avoids some nasty
1751	 * RCU grace-period initialization races by forcing the end of
1752	 * the current grace period to be completely recorded in all of
1753	 * the rcu_node structures before the beginning of the next grace
1754	 * period is recorded in any of the rcu_node structures.
1755	 */
1756	new_gp_seq = rcu_state.gp_seq;
1757	rcu_seq_end(&new_gp_seq);
1758	rcu_for_each_node_breadth_first(rnp) {
1759		raw_spin_lock_irq_rcu_node(rnp);
1760		if (WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)))
1761			dump_blkd_tasks(rnp, 10);
1762		WARN_ON_ONCE(rnp->qsmask);
1763		WRITE_ONCE(rnp->gp_seq, new_gp_seq);
1764		if (!rnp->parent)
1765			smp_mb(); // Order against failing poll_state_synchronize_rcu_full().
1766		rdp = this_cpu_ptr(&rcu_data);
1767		if (rnp == rdp->mynode)
1768			needgp = __note_gp_changes(rnp, rdp) || needgp;
1769		/* smp_mb() provided by prior unlock-lock pair. */
1770		needgp = rcu_future_gp_cleanup(rnp) || needgp;
1771		// Reset overload indication for CPUs no longer overloaded
1772		if (rcu_is_leaf_node(rnp))
1773			for_each_leaf_node_cpu_mask(rnp, cpu, rnp->cbovldmask) {
1774				rdp = per_cpu_ptr(&rcu_data, cpu);
1775				check_cb_ovld_locked(rdp, rnp);
1776			}
1777		sq = rcu_nocb_gp_get(rnp);
1778		raw_spin_unlock_irq_rcu_node(rnp);
1779		rcu_nocb_gp_cleanup(sq);
1780		cond_resched_tasks_rcu_qs();
1781		WRITE_ONCE(rcu_state.gp_activity, jiffies);
1782		rcu_gp_slow(gp_cleanup_delay);
1783	}
1784	rnp = rcu_get_root();
1785	raw_spin_lock_irq_rcu_node(rnp); /* GP before ->gp_seq update. */
1786
1787	/* Declare grace period done, trace first to use old GP number. */
1788	trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, TPS("end"));
1789	rcu_seq_end(&rcu_state.gp_seq);
1790	ASSERT_EXCLUSIVE_WRITER(rcu_state.gp_seq);
1791	WRITE_ONCE(rcu_state.gp_state, RCU_GP_IDLE);
1792	/* Check for GP requests since above loop. */
1793	rdp = this_cpu_ptr(&rcu_data);
1794	if (!needgp && ULONG_CMP_LT(rnp->gp_seq, rnp->gp_seq_needed)) {
1795		trace_rcu_this_gp(rnp, rdp, rnp->gp_seq_needed,
1796				  TPS("CleanupMore"));
1797		needgp = true;
1798	}
1799	/* Advance CBs to reduce false positives below. */
1800	offloaded = rcu_rdp_is_offloaded(rdp);
1801	if ((offloaded || !rcu_accelerate_cbs(rnp, rdp)) && needgp) {
1802
1803		// We get here if a grace period was needed (“needgp”)
1804		// and the above call to rcu_accelerate_cbs() did not set
1805		// the RCU_GP_FLAG_INIT bit in ->gp_state (which records
1806		// the need for another grace period).  The purpose
1807		// of the “offloaded” check is to avoid invoking
1808		// rcu_accelerate_cbs() on an offloaded CPU because we do not
1809		// hold the ->nocb_lock needed to safely access an offloaded
1810		// ->cblist.  We do not want to acquire that lock because
1811		// it can be heavily contended during callback floods.
1812
1813		WRITE_ONCE(rcu_state.gp_flags, RCU_GP_FLAG_INIT);
1814		WRITE_ONCE(rcu_state.gp_req_activity, jiffies);
1815		trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, TPS("newreq"));
1816	} else {
1817
1818		// We get here either if there is no need for an
1819		// additional grace period or if rcu_accelerate_cbs() has
1820		// already set the RCU_GP_FLAG_INIT bit in ->gp_flags. 
1821		// So all we need to do is to clear all of the other
1822		// ->gp_flags bits.
1823
1824		WRITE_ONCE(rcu_state.gp_flags, rcu_state.gp_flags & RCU_GP_FLAG_INIT);
1825	}
1826	raw_spin_unlock_irq_rcu_node(rnp);
1827
1828	// If strict, make all CPUs aware of the end of the old grace period.
1829	if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD))
1830		on_each_cpu(rcu_strict_gp_boundary, NULL, 0);
1831}
1832
1833/*
1834 * Body of kthread that handles grace periods.
1835 */
1836static int __noreturn rcu_gp_kthread(void *unused)
1837{
1838	rcu_bind_gp_kthread();
 
 
 
 
 
 
1839	for (;;) {
1840
1841		/* Handle grace-period start. */
1842		for (;;) {
1843			trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
 
1844					       TPS("reqwait"));
1845			WRITE_ONCE(rcu_state.gp_state, RCU_GP_WAIT_GPS);
1846			swait_event_idle_exclusive(rcu_state.gp_wq,
1847					 READ_ONCE(rcu_state.gp_flags) &
1848					 RCU_GP_FLAG_INIT);
1849			rcu_gp_torture_wait();
1850			WRITE_ONCE(rcu_state.gp_state, RCU_GP_DONE_GPS);
1851			/* Locking provides needed memory barrier. */
1852			if (rcu_gp_init())
1853				break;
1854			cond_resched_tasks_rcu_qs();
1855			WRITE_ONCE(rcu_state.gp_activity, jiffies);
1856			WARN_ON(signal_pending(current));
1857			trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
1858					       TPS("reqwaitsig"));
1859		}
1860
1861		/* Handle quiescent-state forcing. */
1862		rcu_gp_fqs_loop();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1863
1864		/* Handle grace-period end. */
1865		WRITE_ONCE(rcu_state.gp_state, RCU_GP_CLEANUP);
1866		rcu_gp_cleanup();
1867		WRITE_ONCE(rcu_state.gp_state, RCU_GP_CLEANED);
1868	}
1869}
1870
 
 
 
 
 
 
 
 
1871/*
1872 * Report a full set of quiescent states to the rcu_state data structure.
1873 * Invoke rcu_gp_kthread_wake() to awaken the grace-period kthread if
1874 * another grace period is required.  Whether we wake the grace-period
1875 * kthread or it awakens itself for the next round of quiescent-state
1876 * forcing, that kthread will clean up after the just-completed grace
1877 * period.  Note that the caller must hold rnp->lock, which is released
1878 * before return.
1879 */
1880static void rcu_report_qs_rsp(unsigned long flags)
1881	__releases(rcu_get_root()->lock)
1882{
1883	raw_lockdep_assert_held_rcu_node(rcu_get_root());
1884	WARN_ON_ONCE(!rcu_gp_in_progress());
1885	WRITE_ONCE(rcu_state.gp_flags,
1886		   READ_ONCE(rcu_state.gp_flags) | RCU_GP_FLAG_FQS);
1887	raw_spin_unlock_irqrestore_rcu_node(rcu_get_root(), flags);
1888	rcu_gp_kthread_wake();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1889}
1890
1891/*
1892 * Similar to rcu_report_qs_rdp(), for which it is a helper function.
1893 * Allows quiescent states for a group of CPUs to be reported at one go
1894 * to the specified rcu_node structure, though all the CPUs in the group
1895 * must be represented by the same rcu_node structure (which need not be a
1896 * leaf rcu_node structure, though it often will be).  The gps parameter
1897 * is the grace-period snapshot, which means that the quiescent states
1898 * are valid only if rnp->gp_seq is equal to gps.  That structure's lock
1899 * must be held upon entry, and it is released before return.
1900 *
1901 * As a special case, if mask is zero, the bit-already-cleared check is
1902 * disabled.  This allows propagating quiescent state due to resumed tasks
1903 * during grace-period initialization.
1904 */
1905static void rcu_report_qs_rnp(unsigned long mask, struct rcu_node *rnp,
1906			      unsigned long gps, unsigned long flags)
 
1907	__releases(rnp->lock)
1908{
1909	unsigned long oldmask = 0;
1910	struct rcu_node *rnp_c;
1911
1912	raw_lockdep_assert_held_rcu_node(rnp);
1913
1914	/* Walk up the rcu_node hierarchy. */
1915	for (;;) {
1916		if ((!(rnp->qsmask & mask) && mask) || rnp->gp_seq != gps) {
1917
1918			/*
1919			 * Our bit has already been cleared, or the
1920			 * relevant grace period is already over, so done.
1921			 */
1922			raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1923			return;
1924		}
1925		WARN_ON_ONCE(oldmask); /* Any child must be all zeroed! */
1926		WARN_ON_ONCE(!rcu_is_leaf_node(rnp) &&
1927			     rcu_preempt_blocked_readers_cgp(rnp));
1928		WRITE_ONCE(rnp->qsmask, rnp->qsmask & ~mask);
1929		trace_rcu_quiescent_state_report(rcu_state.name, rnp->gp_seq,
1930						 mask, rnp->qsmask, rnp->level,
1931						 rnp->grplo, rnp->grphi,
1932						 !!rnp->gp_tasks);
1933		if (rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) {
1934
1935			/* Other bits still set at this level, so done. */
1936			raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1937			return;
1938		}
1939		rnp->completedqs = rnp->gp_seq;
1940		mask = rnp->grpmask;
1941		if (rnp->parent == NULL) {
1942
1943			/* No more levels.  Exit loop holding root lock. */
1944
1945			break;
1946		}
1947		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1948		rnp_c = rnp;
1949		rnp = rnp->parent;
1950		raw_spin_lock_irqsave_rcu_node(rnp, flags);
1951		oldmask = READ_ONCE(rnp_c->qsmask);
 
1952	}
1953
1954	/*
1955	 * Get here if we are the last CPU to pass through a quiescent
1956	 * state for this grace period.  Invoke rcu_report_qs_rsp()
1957	 * to clean up and start the next grace period if one is needed.
1958	 */
1959	rcu_report_qs_rsp(flags); /* releases rnp->lock. */
1960}
1961
1962/*
1963 * Record a quiescent state for all tasks that were previously queued
1964 * on the specified rcu_node structure and that were blocking the current
1965 * RCU grace period.  The caller must hold the corresponding rnp->lock with
1966 * irqs disabled, and this lock is released upon return, but irqs remain
1967 * disabled.
1968 */
1969static void __maybe_unused
1970rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags)
1971	__releases(rnp->lock)
1972{
1973	unsigned long gps;
1974	unsigned long mask;
1975	struct rcu_node *rnp_p;
1976
1977	raw_lockdep_assert_held_rcu_node(rnp);
1978	if (WARN_ON_ONCE(!IS_ENABLED(CONFIG_PREEMPT_RCU)) ||
1979	    WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)) ||
1980	    rnp->qsmask != 0) {
1981		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1982		return;  /* Still need more quiescent states! */
1983	}
1984
1985	rnp->completedqs = rnp->gp_seq;
1986	rnp_p = rnp->parent;
1987	if (rnp_p == NULL) {
1988		/*
1989		 * Only one rcu_node structure in the tree, so don't
1990		 * try to report up to its nonexistent parent!
1991		 */
1992		rcu_report_qs_rsp(flags);
1993		return;
1994	}
1995
1996	/* Report up the rest of the hierarchy, tracking current ->gp_seq. */
1997	gps = rnp->gp_seq;
1998	mask = rnp->grpmask;
1999	raw_spin_unlock_rcu_node(rnp);	/* irqs remain disabled. */
2000	raw_spin_lock_rcu_node(rnp_p);	/* irqs already disabled. */
2001	rcu_report_qs_rnp(mask, rnp_p, gps, flags);
2002}
2003
2004/*
2005 * Record a quiescent state for the specified CPU to that CPU's rcu_data
2006 * structure.  This must be called from the specified CPU.
 
 
 
 
 
2007 */
2008static void
2009rcu_report_qs_rdp(struct rcu_data *rdp)
2010{
2011	unsigned long flags;
2012	unsigned long mask;
2013	bool needacc = false;
2014	struct rcu_node *rnp;
2015
2016	WARN_ON_ONCE(rdp->cpu != smp_processor_id());
2017	rnp = rdp->mynode;
2018	raw_spin_lock_irqsave_rcu_node(rnp, flags);
2019	if (rdp->cpu_no_qs.b.norm || rdp->gp_seq != rnp->gp_seq ||
2020	    rdp->gpwrap) {
 
2021
2022		/*
2023		 * The grace period in which this quiescent state was
2024		 * recorded has ended, so don't report it upwards.
2025		 * We will instead need a new quiescent state that lies
2026		 * within the current grace period.
2027		 */
2028		rdp->cpu_no_qs.b.norm = true;	/* need qs for new gp. */
2029		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2030		return;
2031	}
2032	mask = rdp->grpmask;
2033	rdp->core_needs_qs = false;
2034	if ((rnp->qsmask & mask) == 0) {
2035		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2036	} else {
 
 
2037		/*
2038		 * This GP can't end until cpu checks in, so all of our
2039		 * callbacks can be processed during the next GP.
2040		 *
2041		 * NOCB kthreads have their own way to deal with that...
2042		 */
2043		if (!rcu_rdp_is_offloaded(rdp)) {
2044			/*
2045			 * The current GP has not yet ended, so it
2046			 * should not be possible for rcu_accelerate_cbs()
2047			 * to return true.  So complain, but don't awaken.
2048			 */
2049			WARN_ON_ONCE(rcu_accelerate_cbs(rnp, rdp));
2050		} else if (!rcu_segcblist_completely_offloaded(&rdp->cblist)) {
2051			/*
2052			 * ...but NOCB kthreads may miss or delay callbacks acceleration
2053			 * if in the middle of a (de-)offloading process.
2054			 */
2055			needacc = true;
2056		}
2057
2058		rcu_disable_urgency_upon_qs(rdp);
2059		rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
2060		/* ^^^ Released rnp->lock */
2061
2062		if (needacc) {
2063			rcu_nocb_lock_irqsave(rdp, flags);
2064			rcu_accelerate_cbs_unlocked(rnp, rdp);
2065			rcu_nocb_unlock_irqrestore(rdp, flags);
2066		}
2067	}
2068}
2069
2070/*
2071 * Check to see if there is a new grace period of which this CPU
2072 * is not yet aware, and if so, set up local rcu_data state for it.
2073 * Otherwise, see if this CPU has just passed through its first
2074 * quiescent state for this grace period, and record that fact if so.
2075 */
2076static void
2077rcu_check_quiescent_state(struct rcu_data *rdp)
2078{
2079	/* Check for grace-period ends and beginnings. */
2080	note_gp_changes(rdp);
2081
2082	/*
2083	 * Does this CPU still need to do its part for current grace period?
2084	 * If no, return and let the other CPUs do their part as well.
2085	 */
2086	if (!rdp->core_needs_qs)
2087		return;
2088
2089	/*
2090	 * Was there a quiescent state since the beginning of the grace
2091	 * period? If no, then exit and wait for the next call.
2092	 */
2093	if (rdp->cpu_no_qs.b.norm)
2094		return;
2095
2096	/*
2097	 * Tell RCU we are done (but rcu_report_qs_rdp() will be the
2098	 * judge of that).
2099	 */
2100	rcu_report_qs_rdp(rdp);
2101}
2102
2103/* Return true if callback-invocation time limit exceeded. */
2104static bool rcu_do_batch_check_time(long count, long tlimit,
2105				    bool jlimit_check, unsigned long jlimit)
2106{
2107	// Invoke local_clock() only once per 32 consecutive callbacks.
2108	return unlikely(tlimit) &&
2109	       (!likely(count & 31) ||
2110		(IS_ENABLED(CONFIG_RCU_DOUBLE_CHECK_CB_TIME) &&
2111		 jlimit_check && time_after(jiffies, jlimit))) &&
2112	       local_clock() >= tlimit;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2113}
2114
2115/*
2116 * Invoke any RCU callbacks that have made it to the end of their grace
2117 * period.  Throttle as specified by rdp->blimit.
2118 */
2119static void rcu_do_batch(struct rcu_data *rdp)
2120{
2121	long bl;
2122	long count = 0;
2123	int div;
2124	bool __maybe_unused empty;
2125	unsigned long flags;
2126	unsigned long jlimit;
2127	bool jlimit_check = false;
2128	long pending;
2129	struct rcu_cblist rcl = RCU_CBLIST_INITIALIZER(rcl);
2130	struct rcu_head *rhp;
2131	long tlimit = 0;
2132
2133	/* If no callbacks are ready, just return. */
2134	if (!rcu_segcblist_ready_cbs(&rdp->cblist)) {
2135		trace_rcu_batch_start(rcu_state.name,
2136				      rcu_segcblist_n_cbs(&rdp->cblist), 0);
2137		trace_rcu_batch_end(rcu_state.name, 0,
2138				    !rcu_segcblist_empty(&rdp->cblist),
2139				    need_resched(), is_idle_task(current),
2140				    rcu_is_callbacks_kthread(rdp));
2141		return;
2142	}
 
 
 
 
 
 
 
 
2143
2144	/*
2145	 * Extract the list of ready callbacks, disabling IRQs to prevent
2146	 * races with call_rcu() from interrupt handlers.  Leave the
2147	 * callback counts, as rcu_barrier() needs to be conservative.
2148	 */
2149	rcu_nocb_lock_irqsave(rdp, flags);
2150	WARN_ON_ONCE(cpu_is_offline(smp_processor_id()));
2151	pending = rcu_segcblist_get_seglen(&rdp->cblist, RCU_DONE_TAIL);
2152	div = READ_ONCE(rcu_divisor);
2153	div = div < 0 ? 7 : div > sizeof(long) * 8 - 2 ? sizeof(long) * 8 - 2 : div;
2154	bl = max(rdp->blimit, pending >> div);
2155	if ((in_serving_softirq() || rdp->rcu_cpu_kthread_status == RCU_KTHREAD_RUNNING) &&
2156	    (IS_ENABLED(CONFIG_RCU_DOUBLE_CHECK_CB_TIME) || unlikely(bl > 100))) {
2157		const long npj = NSEC_PER_SEC / HZ;
2158		long rrn = READ_ONCE(rcu_resched_ns);
2159
2160		rrn = rrn < NSEC_PER_MSEC ? NSEC_PER_MSEC : rrn > NSEC_PER_SEC ? NSEC_PER_SEC : rrn;
2161		tlimit = local_clock() + rrn;
2162		jlimit = jiffies + (rrn + npj + 1) / npj;
2163		jlimit_check = true;
2164	}
2165	trace_rcu_batch_start(rcu_state.name,
2166			      rcu_segcblist_n_cbs(&rdp->cblist), bl);
2167	rcu_segcblist_extract_done_cbs(&rdp->cblist, &rcl);
2168	if (rcu_rdp_is_offloaded(rdp))
2169		rdp->qlen_last_fqs_check = rcu_segcblist_n_cbs(&rdp->cblist);
2170
2171	trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCbDequeued"));
2172	rcu_nocb_unlock_irqrestore(rdp, flags);
 
 
 
 
 
 
 
 
2173
2174	/* Invoke callbacks. */
2175	tick_dep_set_task(current, TICK_DEP_BIT_RCU);
2176	rhp = rcu_cblist_dequeue(&rcl);
 
 
 
 
 
2177
2178	for (; rhp; rhp = rcu_cblist_dequeue(&rcl)) {
2179		rcu_callback_t f;
 
 
 
 
 
 
2180
2181		count++;
2182		debug_rcu_head_unqueue(rhp);
 
 
 
2183
2184		rcu_lock_acquire(&rcu_callback_map);
2185		trace_rcu_invoke_callback(rcu_state.name, rhp);
 
 
 
 
 
 
 
 
 
 
 
 
2186
2187		f = rhp->func;
2188		debug_rcu_head_callback(rhp);
2189		WRITE_ONCE(rhp->func, (rcu_callback_t)0L);
2190		f(rhp);
2191
2192		rcu_lock_release(&rcu_callback_map);
2193
2194		/*
2195		 * Stop only if limit reached and CPU has something to do.
2196		 */
2197		if (in_serving_softirq()) {
2198			if (count >= bl && (need_resched() || !is_idle_task(current)))
2199				break;
2200			/*
2201			 * Make sure we don't spend too much time here and deprive other
2202			 * softirq vectors of CPU cycles.
2203			 */
2204			if (rcu_do_batch_check_time(count, tlimit, jlimit_check, jlimit))
2205				break;
2206		} else {
2207			// In rcuc/rcuoc context, so no worries about
2208			// depriving other softirq vectors of CPU cycles.
2209			local_bh_enable();
2210			lockdep_assert_irqs_enabled();
2211			cond_resched_tasks_rcu_qs();
2212			lockdep_assert_irqs_enabled();
2213			local_bh_disable();
2214			// But rcuc kthreads can delay quiescent-state
2215			// reporting, so check time limits for them.
2216			if (rdp->rcu_cpu_kthread_status == RCU_KTHREAD_RUNNING &&
2217			    rcu_do_batch_check_time(count, tlimit, jlimit_check, jlimit)) {
2218				rdp->rcu_cpu_has_work = 1;
2219				break;
2220			}
2221		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2222	}
2223
2224	rcu_nocb_lock_irqsave(rdp, flags);
2225	rdp->n_cbs_invoked += count;
2226	trace_rcu_batch_end(rcu_state.name, count, !!rcl.head, need_resched(),
2227			    is_idle_task(current), rcu_is_callbacks_kthread(rdp));
 
 
 
 
 
 
 
 
 
 
 
 
2228
2229	/* Update counts and requeue any remaining callbacks. */
2230	rcu_segcblist_insert_done_cbs(&rdp->cblist, &rcl);
2231	rcu_segcblist_add_len(&rdp->cblist, -count);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2232
2233	/* Reinstate batch limit if we have worked down the excess. */
2234	count = rcu_segcblist_n_cbs(&rdp->cblist);
2235	if (rdp->blimit >= DEFAULT_MAX_RCU_BLIMIT && count <= qlowmark)
2236		rdp->blimit = blimit;
2237
2238	/* Reset ->qlen_last_fqs_check trigger if enough CBs have drained. */
2239	if (count == 0 && rdp->qlen_last_fqs_check != 0) {
2240		rdp->qlen_last_fqs_check = 0;
2241		rdp->n_force_qs_snap = READ_ONCE(rcu_state.n_force_qs);
2242	} else if (count < rdp->qlen_last_fqs_check - qhimark)
2243		rdp->qlen_last_fqs_check = count;
2244
2245	/*
2246	 * The following usually indicates a double call_rcu().  To track
2247	 * this down, try building with CONFIG_DEBUG_OBJECTS_RCU_HEAD=y.
2248	 */
2249	empty = rcu_segcblist_empty(&rdp->cblist);
2250	WARN_ON_ONCE(count == 0 && !empty);
2251	WARN_ON_ONCE(!IS_ENABLED(CONFIG_RCU_NOCB_CPU) &&
2252		     count != 0 && empty);
2253	WARN_ON_ONCE(count == 0 && rcu_segcblist_n_segment_cbs(&rdp->cblist) != 0);
2254	WARN_ON_ONCE(!empty && rcu_segcblist_n_segment_cbs(&rdp->cblist) == 0);
2255
2256	rcu_nocb_unlock_irqrestore(rdp, flags);
2257
2258	tick_dep_clear_task(current, TICK_DEP_BIT_RCU);
 
 
2259}
2260
2261/*
2262 * This function is invoked from each scheduling-clock interrupt,
2263 * and checks to see if this CPU is in a non-context-switch quiescent
2264 * state, for example, user mode or idle loop.  It also schedules RCU
2265 * core processing.  If the current grace period has gone on too long,
2266 * it will ask the scheduler to manufacture a context switch for the sole
2267 * purpose of providing the needed quiescent state.
 
2268 */
2269void rcu_sched_clock_irq(int user)
2270{
2271	unsigned long j;
2272
2273	if (IS_ENABLED(CONFIG_PROVE_RCU)) {
2274		j = jiffies;
2275		WARN_ON_ONCE(time_before(j, __this_cpu_read(rcu_data.last_sched_clock)));
2276		__this_cpu_write(rcu_data.last_sched_clock, j);
2277	}
2278	trace_rcu_utilization(TPS("Start scheduler-tick"));
2279	lockdep_assert_irqs_disabled();
2280	raw_cpu_inc(rcu_data.ticks_this_gp);
2281	/* The load-acquire pairs with the store-release setting to true. */
2282	if (smp_load_acquire(this_cpu_ptr(&rcu_data.rcu_urgent_qs))) {
2283		/* Idle and userspace execution already are quiescent states. */
2284		if (!rcu_is_cpu_rrupt_from_idle() && !user) {
2285			set_tsk_need_resched(current);
2286			set_preempt_need_resched();
2287		}
2288		__this_cpu_write(rcu_data.rcu_urgent_qs, false);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2289	}
2290	rcu_flavor_sched_clock_irq(user);
2291	if (rcu_pending(user))
2292		invoke_rcu_core();
2293	if (user || rcu_is_cpu_rrupt_from_idle())
2294		rcu_note_voluntary_context_switch(current);
2295	lockdep_assert_irqs_disabled();
2296
2297	trace_rcu_utilization(TPS("End scheduler-tick"));
2298}
2299
2300/*
2301 * Scan the leaf rcu_node structures.  For each structure on which all
2302 * CPUs have reported a quiescent state and on which there are tasks
2303 * blocking the current grace period, initiate RCU priority boosting.
2304 * Otherwise, invoke the specified function to check dyntick state for
2305 * each CPU that has not yet reported a quiescent state.
2306 */
2307static void force_qs_rnp(int (*f)(struct rcu_data *rdp))
 
 
 
2308{
 
2309	int cpu;
2310	unsigned long flags;
 
2311	struct rcu_node *rnp;
2312
2313	rcu_state.cbovld = rcu_state.cbovldnext;
2314	rcu_state.cbovldnext = false;
2315	rcu_for_each_leaf_node(rnp) {
2316		unsigned long mask = 0;
2317		unsigned long rsmask = 0;
2318
2319		cond_resched_tasks_rcu_qs();
2320		raw_spin_lock_irqsave_rcu_node(rnp, flags);
2321		rcu_state.cbovldnext |= !!rnp->cbovldmask;
2322		if (rnp->qsmask == 0) {
2323			if (rcu_preempt_blocked_readers_cgp(rnp)) {
2324				/*
2325				 * No point in scanning bits because they
2326				 * are all zero.  But we might need to
2327				 * priority-boost blocked readers.
2328				 */
2329				rcu_initiate_boost(rnp, flags);
2330				/* rcu_initiate_boost() releases rnp->lock */
2331				continue;
2332			}
2333			raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2334			continue;
2335		}
2336		for_each_leaf_node_cpu_mask(rnp, cpu, rnp->qsmask) {
2337			struct rcu_data *rdp;
2338			int ret;
2339
2340			rdp = per_cpu_ptr(&rcu_data, cpu);
2341			ret = f(rdp);
2342			if (ret > 0) {
2343				mask |= rdp->grpmask;
2344				rcu_disable_urgency_upon_qs(rdp);
2345			}
2346			if (ret < 0)
2347				rsmask |= rdp->grpmask;
2348		}
2349		if (mask != 0) {
2350			/* Idle/offline CPUs, report (releases rnp->lock). */
2351			rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
2352		} else {
2353			/* Nothing to do here, so just drop the lock. */
2354			raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2355		}
2356
2357		for_each_leaf_node_cpu_mask(rnp, cpu, rsmask)
2358			resched_cpu(cpu);
 
 
 
 
 
 
 
 
 
2359	}
2360}
2361
2362/*
2363 * Force quiescent states on reluctant CPUs, and also detect which
2364 * CPUs are in dyntick-idle mode.
2365 */
2366void rcu_force_quiescent_state(void)
2367{
2368	unsigned long flags;
2369	bool ret;
2370	struct rcu_node *rnp;
2371	struct rcu_node *rnp_old = NULL;
2372
2373	if (!rcu_gp_in_progress())
2374		return;
2375	/* Funnel through hierarchy to reduce memory contention. */
2376	rnp = raw_cpu_read(rcu_data.mynode);
2377	for (; rnp != NULL; rnp = rnp->parent) {
2378		ret = (READ_ONCE(rcu_state.gp_flags) & RCU_GP_FLAG_FQS) ||
2379		       !raw_spin_trylock(&rnp->fqslock);
2380		if (rnp_old != NULL)
2381			raw_spin_unlock(&rnp_old->fqslock);
2382		if (ret)
 
2383			return;
 
2384		rnp_old = rnp;
2385	}
2386	/* rnp_old == rcu_get_root(), rnp == NULL. */
2387
2388	/* Reached the root of the rcu_node tree, acquire lock. */
2389	raw_spin_lock_irqsave_rcu_node(rnp_old, flags);
 
2390	raw_spin_unlock(&rnp_old->fqslock);
2391	if (READ_ONCE(rcu_state.gp_flags) & RCU_GP_FLAG_FQS) {
2392		raw_spin_unlock_irqrestore_rcu_node(rnp_old, flags);
 
2393		return;  /* Someone beat us to it. */
2394	}
2395	WRITE_ONCE(rcu_state.gp_flags,
2396		   READ_ONCE(rcu_state.gp_flags) | RCU_GP_FLAG_FQS);
2397	raw_spin_unlock_irqrestore_rcu_node(rnp_old, flags);
2398	rcu_gp_kthread_wake();
2399}
2400EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
2401
2402// Workqueue handler for an RCU reader for kernels enforcing struct RCU
2403// grace periods.
2404static void strict_work_handler(struct work_struct *work)
2405{
2406	rcu_read_lock();
2407	rcu_read_unlock();
2408}
2409
2410/* Perform RCU core processing work for the current CPU.  */
2411static __latent_entropy void rcu_core(void)
 
 
 
 
 
2412{
2413	unsigned long flags;
2414	struct rcu_data *rdp = raw_cpu_ptr(&rcu_data);
2415	struct rcu_node *rnp = rdp->mynode;
2416	/*
2417	 * On RT rcu_core() can be preempted when IRQs aren't disabled.
2418	 * Therefore this function can race with concurrent NOCB (de-)offloading
2419	 * on this CPU and the below condition must be considered volatile.
2420	 * However if we race with:
2421	 *
2422	 * _ Offloading:   In the worst case we accelerate or process callbacks
2423	 *                 concurrently with NOCB kthreads. We are guaranteed to
2424	 *                 call rcu_nocb_lock() if that happens.
2425	 *
2426	 * _ Deoffloading: In the worst case we miss callbacks acceleration or
2427	 *                 processing. This is fine because the early stage
2428	 *                 of deoffloading invokes rcu_core() after setting
2429	 *                 SEGCBLIST_RCU_CORE. So we guarantee that we'll process
2430	 *                 what could have been dismissed without the need to wait
2431	 *                 for the next rcu_pending() check in the next jiffy.
2432	 */
2433	const bool do_batch = !rcu_segcblist_completely_offloaded(&rdp->cblist);
2434
2435	if (cpu_is_offline(smp_processor_id()))
2436		return;
2437	trace_rcu_utilization(TPS("Start RCU core"));
2438	WARN_ON_ONCE(!rdp->beenonline);
2439
2440	/* Report any deferred quiescent states if preemption enabled. */
2441	if (IS_ENABLED(CONFIG_PREEMPT_COUNT) && (!(preempt_count() & PREEMPT_MASK))) {
2442		rcu_preempt_deferred_qs(current);
2443	} else if (rcu_preempt_need_deferred_qs(current)) {
2444		set_tsk_need_resched(current);
2445		set_preempt_need_resched();
2446	}
2447
2448	/* Update RCU state based on any recent quiescent states. */
2449	rcu_check_quiescent_state(rdp);
2450
2451	/* No grace period and unregistered callbacks? */
2452	if (!rcu_gp_in_progress() &&
2453	    rcu_segcblist_is_enabled(&rdp->cblist) && do_batch) {
2454		rcu_nocb_lock_irqsave(rdp, flags);
2455		if (!rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL))
2456			rcu_accelerate_cbs_unlocked(rnp, rdp);
2457		rcu_nocb_unlock_irqrestore(rdp, flags);
 
2458	}
2459
2460	rcu_check_gp_start_stall(rnp, rdp, rcu_jiffies_till_stall_check());
2461
2462	/* If there are callbacks ready, invoke them. */
2463	if (do_batch && rcu_segcblist_ready_cbs(&rdp->cblist) &&
2464	    likely(READ_ONCE(rcu_scheduler_fully_active))) {
2465		rcu_do_batch(rdp);
2466		/* Re-invoke RCU core processing if there are callbacks remaining. */
2467		if (rcu_segcblist_ready_cbs(&rdp->cblist))
2468			invoke_rcu_core();
2469	}
2470
2471	/* Do any needed deferred wakeups of rcuo kthreads. */
2472	do_nocb_deferred_wakeup(rdp);
2473	trace_rcu_utilization(TPS("End RCU core"));
2474
2475	// If strict GPs, schedule an RCU reader in a clean environment.
2476	if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD))
2477		queue_work_on(rdp->cpu, rcu_gp_wq, &rdp->strict_work);
2478}
2479
2480static void rcu_core_si(struct softirq_action *h)
2481{
2482	rcu_core();
2483}
2484
2485static void rcu_wake_cond(struct task_struct *t, int status)
2486{
2487	/*
2488	 * If the thread is yielding, only wake it when this
2489	 * is invoked from idle
2490	 */
2491	if (t && (status != RCU_KTHREAD_YIELDING || is_idle_task(current)))
2492		wake_up_process(t);
2493}
2494
2495static void invoke_rcu_core_kthread(void)
2496{
2497	struct task_struct *t;
2498	unsigned long flags;
2499
2500	local_irq_save(flags);
2501	__this_cpu_write(rcu_data.rcu_cpu_has_work, 1);
2502	t = __this_cpu_read(rcu_data.rcu_cpu_kthread_task);
2503	if (t != NULL && t != current)
2504		rcu_wake_cond(t, __this_cpu_read(rcu_data.rcu_cpu_kthread_status));
2505	local_irq_restore(flags);
2506}
2507
2508/*
2509 * Wake up this CPU's rcuc kthread to do RCU core processing.
2510 */
2511static void invoke_rcu_core(void)
2512{
2513	if (!cpu_online(smp_processor_id()))
2514		return;
2515	if (use_softirq)
2516		raise_softirq(RCU_SOFTIRQ);
2517	else
2518		invoke_rcu_core_kthread();
2519}
2520
2521static void rcu_cpu_kthread_park(unsigned int cpu)
2522{
2523	per_cpu(rcu_data.rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU;
2524}
2525
2526static int rcu_cpu_kthread_should_run(unsigned int cpu)
2527{
2528	return __this_cpu_read(rcu_data.rcu_cpu_has_work);
 
 
 
2529}
2530
2531/*
2532 * Per-CPU kernel thread that invokes RCU callbacks.  This replaces
2533 * the RCU softirq used in configurations of RCU that do not support RCU
2534 * priority boosting.
 
 
2535 */
2536static void rcu_cpu_kthread(unsigned int cpu)
2537{
2538	unsigned int *statusp = this_cpu_ptr(&rcu_data.rcu_cpu_kthread_status);
2539	char work, *workp = this_cpu_ptr(&rcu_data.rcu_cpu_has_work);
2540	unsigned long *j = this_cpu_ptr(&rcu_data.rcuc_activity);
2541	int spincnt;
2542
2543	trace_rcu_utilization(TPS("Start CPU kthread@rcu_run"));
2544	for (spincnt = 0; spincnt < 10; spincnt++) {
2545		WRITE_ONCE(*j, jiffies);
2546		local_bh_disable();
2547		*statusp = RCU_KTHREAD_RUNNING;
2548		local_irq_disable();
2549		work = *workp;
2550		WRITE_ONCE(*workp, 0);
2551		local_irq_enable();
2552		if (work)
2553			rcu_core();
2554		local_bh_enable();
2555		if (!READ_ONCE(*workp)) {
2556			trace_rcu_utilization(TPS("End CPU kthread@rcu_wait"));
2557			*statusp = RCU_KTHREAD_WAITING;
2558			return;
2559		}
2560	}
2561	*statusp = RCU_KTHREAD_YIELDING;
2562	trace_rcu_utilization(TPS("Start CPU kthread@rcu_yield"));
2563	schedule_timeout_idle(2);
2564	trace_rcu_utilization(TPS("End CPU kthread@rcu_yield"));
2565	*statusp = RCU_KTHREAD_WAITING;
2566	WRITE_ONCE(*j, jiffies);
2567}
2568
2569static struct smp_hotplug_thread rcu_cpu_thread_spec = {
2570	.store			= &rcu_data.rcu_cpu_kthread_task,
2571	.thread_should_run	= rcu_cpu_kthread_should_run,
2572	.thread_fn		= rcu_cpu_kthread,
2573	.thread_comm		= "rcuc/%u",
2574	.setup			= rcu_cpu_kthread_setup,
2575	.park			= rcu_cpu_kthread_park,
2576};
2577
2578/*
2579 * Spawn per-CPU RCU core processing kthreads.
2580 */
2581static int __init rcu_spawn_core_kthreads(void)
2582{
2583	int cpu;
2584
2585	for_each_possible_cpu(cpu)
2586		per_cpu(rcu_data.rcu_cpu_has_work, cpu) = 0;
2587	if (use_softirq)
2588		return 0;
2589	WARN_ONCE(smpboot_register_percpu_thread(&rcu_cpu_thread_spec),
2590		  "%s: Could not start rcuc kthread, OOM is now expected behavior\n", __func__);
2591	return 0;
2592}
2593
2594/*
2595 * Handle any core-RCU processing required by a call_rcu() invocation.
2596 */
2597static void __call_rcu_core(struct rcu_data *rdp, struct rcu_head *head,
2598			    unsigned long flags)
2599{
2600	/*
2601	 * If called from an extended quiescent state, invoke the RCU
2602	 * core in order to force a re-evaluation of RCU's idleness.
2603	 */
2604	if (!rcu_is_watching())
2605		invoke_rcu_core();
2606
2607	/* If interrupts were disabled or CPU offline, don't invoke RCU core. */
2608	if (irqs_disabled_flags(flags) || cpu_is_offline(smp_processor_id()))
2609		return;
2610
2611	/*
2612	 * Force the grace period if too many callbacks or too long waiting.
2613	 * Enforce hysteresis, and don't invoke rcu_force_quiescent_state()
2614	 * if some other CPU has recently done so.  Also, don't bother
2615	 * invoking rcu_force_quiescent_state() if the newly enqueued callback
2616	 * is the only one waiting for a grace period to complete.
2617	 */
2618	if (unlikely(rcu_segcblist_n_cbs(&rdp->cblist) >
2619		     rdp->qlen_last_fqs_check + qhimark)) {
2620
2621		/* Are we ignoring a completed grace period? */
2622		note_gp_changes(rdp);
2623
2624		/* Start a new grace period if one not already started. */
2625		if (!rcu_gp_in_progress()) {
2626			rcu_accelerate_cbs_unlocked(rdp->mynode, rdp);
 
 
 
 
 
2627		} else {
2628			/* Give the grace period a kick. */
2629			rdp->blimit = DEFAULT_MAX_RCU_BLIMIT;
2630			if (READ_ONCE(rcu_state.n_force_qs) == rdp->n_force_qs_snap &&
2631			    rcu_segcblist_first_pend_cb(&rdp->cblist) != head)
2632				rcu_force_quiescent_state();
2633			rdp->n_force_qs_snap = READ_ONCE(rcu_state.n_force_qs);
2634			rdp->qlen_last_fqs_check = rcu_segcblist_n_cbs(&rdp->cblist);
2635		}
2636	}
2637}
2638
2639/*
2640 * RCU callback function to leak a callback.
2641 */
2642static void rcu_leak_callback(struct rcu_head *rhp)
2643{
2644}
2645
2646/*
2647 * Check and if necessary update the leaf rcu_node structure's
2648 * ->cbovldmask bit corresponding to the current CPU based on that CPU's
2649 * number of queued RCU callbacks.  The caller must hold the leaf rcu_node
2650 * structure's ->lock.
2651 */
2652static void check_cb_ovld_locked(struct rcu_data *rdp, struct rcu_node *rnp)
2653{
2654	raw_lockdep_assert_held_rcu_node(rnp);
2655	if (qovld_calc <= 0)
2656		return; // Early boot and wildcard value set.
2657	if (rcu_segcblist_n_cbs(&rdp->cblist) >= qovld_calc)
2658		WRITE_ONCE(rnp->cbovldmask, rnp->cbovldmask | rdp->grpmask);
2659	else
2660		WRITE_ONCE(rnp->cbovldmask, rnp->cbovldmask & ~rdp->grpmask);
2661}
2662
2663/*
2664 * Check and if necessary update the leaf rcu_node structure's
2665 * ->cbovldmask bit corresponding to the current CPU based on that CPU's
2666 * number of queued RCU callbacks.  No locks need be held, but the
2667 * caller must have disabled interrupts.
2668 *
2669 * Note that this function ignores the possibility that there are a lot
2670 * of callbacks all of which have already seen the end of their respective
2671 * grace periods.  This omission is due to the need for no-CBs CPUs to
2672 * be holding ->nocb_lock to do this check, which is too heavy for a
2673 * common-case operation.
2674 */
2675static void check_cb_ovld(struct rcu_data *rdp)
2676{
2677	struct rcu_node *const rnp = rdp->mynode;
2678
2679	if (qovld_calc <= 0 ||
2680	    ((rcu_segcblist_n_cbs(&rdp->cblist) >= qovld_calc) ==
2681	     !!(READ_ONCE(rnp->cbovldmask) & rdp->grpmask)))
2682		return; // Early boot wildcard value or already set correctly.
2683	raw_spin_lock_rcu_node(rnp);
2684	check_cb_ovld_locked(rdp, rnp);
2685	raw_spin_unlock_rcu_node(rnp);
2686}
2687
2688static void
2689__call_rcu_common(struct rcu_head *head, rcu_callback_t func, bool lazy_in)
 
2690{
2691	static atomic_t doublefrees;
2692	unsigned long flags;
2693	bool lazy;
2694	struct rcu_data *rdp;
2695	bool was_alldone;
2696
2697	/* Misaligned rcu_head! */
2698	WARN_ON_ONCE((unsigned long)head & (sizeof(void *) - 1));
2699
 
2700	if (debug_rcu_head_queue(head)) {
2701		/*
2702		 * Probable double call_rcu(), so leak the callback.
2703		 * Use rcu:rcu_callback trace event to find the previous
2704		 * time callback was passed to call_rcu().
2705		 */
2706		if (atomic_inc_return(&doublefrees) < 4) {
2707			pr_err("%s(): Double-freed CB %p->%pS()!!!  ", __func__, head, head->func);
2708			mem_dump_obj(head);
2709		}
2710		WRITE_ONCE(head->func, rcu_leak_callback);
2711		return;
2712	}
2713	head->func = func;
2714	head->next = NULL;
2715	kasan_record_aux_stack_noalloc(head);
2716	local_irq_save(flags);
2717	rdp = this_cpu_ptr(&rcu_data);
2718	lazy = lazy_in && !rcu_async_should_hurry();
2719
2720	/* Add the callback to our list. */
2721	if (unlikely(!rcu_segcblist_is_enabled(&rdp->cblist))) {
2722		// This can trigger due to call_rcu() from offline CPU:
2723		WARN_ON_ONCE(rcu_scheduler_active != RCU_SCHEDULER_INACTIVE);
2724		WARN_ON_ONCE(!rcu_is_watching());
2725		// Very early boot, before rcu_init().  Initialize if needed
2726		// and then drop through to queue the callback.
2727		if (rcu_segcblist_empty(&rdp->cblist))
2728			rcu_segcblist_init(&rdp->cblist);
2729	}
2730
2731	check_cb_ovld(rdp);
2732	if (rcu_nocb_try_bypass(rdp, head, &was_alldone, flags, lazy))
2733		return; // Enqueued onto ->nocb_bypass, so just leave.
2734	// If no-CBs CPU gets here, rcu_nocb_try_bypass() acquired ->nocb_lock.
2735	rcu_segcblist_enqueue(&rdp->cblist, head);
2736	if (__is_kvfree_rcu_offset((unsigned long)func))
2737		trace_rcu_kvfree_callback(rcu_state.name, head,
2738					 (unsigned long)func,
2739					 rcu_segcblist_n_cbs(&rdp->cblist));
2740	else
2741		trace_rcu_callback(rcu_state.name, head,
2742				   rcu_segcblist_n_cbs(&rdp->cblist));
2743
2744	trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCBQueued"));
2745
2746	/* Go handle any RCU core processing required. */
2747	if (unlikely(rcu_rdp_is_offloaded(rdp))) {
2748		__call_rcu_nocb_wake(rdp, was_alldone, flags); /* unlocks */
2749	} else {
2750		__call_rcu_core(rdp, head, flags);
2751		local_irq_restore(flags);
2752	}
2753}
2754
2755#ifdef CONFIG_RCU_LAZY
2756/**
2757 * call_rcu_hurry() - Queue RCU callback for invocation after grace period, and
2758 * flush all lazy callbacks (including the new one) to the main ->cblist while
2759 * doing so.
2760 *
2761 * @head: structure to be used for queueing the RCU updates.
2762 * @func: actual callback function to be invoked after the grace period
2763 *
2764 * The callback function will be invoked some time after a full grace
2765 * period elapses, in other words after all pre-existing RCU read-side
2766 * critical sections have completed.
2767 *
2768 * Use this API instead of call_rcu() if you don't want the callback to be
2769 * invoked after very long periods of time, which can happen on systems without
2770 * memory pressure and on systems which are lightly loaded or mostly idle.
2771 * This function will cause callbacks to be invoked sooner than later at the
2772 * expense of extra power. Other than that, this function is identical to, and
2773 * reuses call_rcu()'s logic. Refer to call_rcu() for more details about memory
2774 * ordering and other functionality.
2775 */
2776void call_rcu_hurry(struct rcu_head *head, rcu_callback_t func)
2777{
2778	__call_rcu_common(head, func, false);
2779}
2780EXPORT_SYMBOL_GPL(call_rcu_hurry);
2781#endif
2782
2783/**
2784 * call_rcu() - Queue an RCU callback for invocation after a grace period.
2785 * By default the callbacks are 'lazy' and are kept hidden from the main
2786 * ->cblist to prevent starting of grace periods too soon.
2787 * If you desire grace periods to start very soon, use call_rcu_hurry().
2788 *
2789 * @head: structure to be used for queueing the RCU updates.
2790 * @func: actual callback function to be invoked after the grace period
2791 *
2792 * The callback function will be invoked some time after a full grace
2793 * period elapses, in other words after all pre-existing RCU read-side
2794 * critical sections have completed.  However, the callback function
2795 * might well execute concurrently with RCU read-side critical sections
2796 * that started after call_rcu() was invoked.
2797 *
2798 * RCU read-side critical sections are delimited by rcu_read_lock()
2799 * and rcu_read_unlock(), and may be nested.  In addition, but only in
2800 * v5.0 and later, regions of code across which interrupts, preemption,
2801 * or softirqs have been disabled also serve as RCU read-side critical
2802 * sections.  This includes hardware interrupt handlers, softirq handlers,
2803 * and NMI handlers.
2804 *
2805 * Note that all CPUs must agree that the grace period extended beyond
2806 * all pre-existing RCU read-side critical section.  On systems with more
2807 * than one CPU, this means that when "func()" is invoked, each CPU is
2808 * guaranteed to have executed a full memory barrier since the end of its
2809 * last RCU read-side critical section whose beginning preceded the call
2810 * to call_rcu().  It also means that each CPU executing an RCU read-side
2811 * critical section that continues beyond the start of "func()" must have
2812 * executed a memory barrier after the call_rcu() but before the beginning
2813 * of that RCU read-side critical section.  Note that these guarantees
2814 * include CPUs that are offline, idle, or executing in user mode, as
2815 * well as CPUs that are executing in the kernel.
2816 *
2817 * Furthermore, if CPU A invoked call_rcu() and CPU B invoked the
2818 * resulting RCU callback function "func()", then both CPU A and CPU B are
2819 * guaranteed to execute a full memory barrier during the time interval
2820 * between the call to call_rcu() and the invocation of "func()" -- even
2821 * if CPU A and CPU B are the same CPU (but again only if the system has
2822 * more than one CPU).
2823 *
2824 * Implementation of these memory-ordering guarantees is described here:
2825 * Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.rst.
2826 */
2827void call_rcu(struct rcu_head *head, rcu_callback_t func)
2828{
2829	__call_rcu_common(head, func, IS_ENABLED(CONFIG_RCU_LAZY));
2830}
2831EXPORT_SYMBOL_GPL(call_rcu);
2832
2833/* Maximum number of jiffies to wait before draining a batch. */
2834#define KFREE_DRAIN_JIFFIES (5 * HZ)
2835#define KFREE_N_BATCHES 2
2836#define FREE_N_CHANNELS 2
2837
2838/**
2839 * struct kvfree_rcu_bulk_data - single block to store kvfree_rcu() pointers
2840 * @list: List node. All blocks are linked between each other
2841 * @gp_snap: Snapshot of RCU state for objects placed to this bulk
2842 * @nr_records: Number of active pointers in the array
2843 * @records: Array of the kvfree_rcu() pointers
2844 */
2845struct kvfree_rcu_bulk_data {
2846	struct list_head list;
2847	struct rcu_gp_oldstate gp_snap;
2848	unsigned long nr_records;
2849	void *records[];
2850};
2851
2852/*
2853 * This macro defines how many entries the "records" array
2854 * will contain. It is based on the fact that the size of
2855 * kvfree_rcu_bulk_data structure becomes exactly one page.
2856 */
2857#define KVFREE_BULK_MAX_ENTR \
2858	((PAGE_SIZE - sizeof(struct kvfree_rcu_bulk_data)) / sizeof(void *))
2859
2860/**
2861 * struct kfree_rcu_cpu_work - single batch of kfree_rcu() requests
2862 * @rcu_work: Let queue_rcu_work() invoke workqueue handler after grace period
2863 * @head_free: List of kfree_rcu() objects waiting for a grace period
2864 * @head_free_gp_snap: Grace-period snapshot to check for attempted premature frees.
2865 * @bulk_head_free: Bulk-List of kvfree_rcu() objects waiting for a grace period
2866 * @krcp: Pointer to @kfree_rcu_cpu structure
2867 */
2868
2869struct kfree_rcu_cpu_work {
2870	struct rcu_work rcu_work;
2871	struct rcu_head *head_free;
2872	struct rcu_gp_oldstate head_free_gp_snap;
2873	struct list_head bulk_head_free[FREE_N_CHANNELS];
2874	struct kfree_rcu_cpu *krcp;
2875};
2876
2877/**
2878 * struct kfree_rcu_cpu - batch up kfree_rcu() requests for RCU grace period
2879 * @head: List of kfree_rcu() objects not yet waiting for a grace period
2880 * @head_gp_snap: Snapshot of RCU state for objects placed to "@head"
2881 * @bulk_head: Bulk-List of kvfree_rcu() objects not yet waiting for a grace period
2882 * @krw_arr: Array of batches of kfree_rcu() objects waiting for a grace period
2883 * @lock: Synchronize access to this structure
2884 * @monitor_work: Promote @head to @head_free after KFREE_DRAIN_JIFFIES
2885 * @initialized: The @rcu_work fields have been initialized
2886 * @head_count: Number of objects in rcu_head singular list
2887 * @bulk_count: Number of objects in bulk-list
2888 * @bkvcache:
2889 *	A simple cache list that contains objects for reuse purpose.
2890 *	In order to save some per-cpu space the list is singular.
2891 *	Even though it is lockless an access has to be protected by the
2892 *	per-cpu lock.
2893 * @page_cache_work: A work to refill the cache when it is empty
2894 * @backoff_page_cache_fill: Delay cache refills
2895 * @work_in_progress: Indicates that page_cache_work is running
2896 * @hrtimer: A hrtimer for scheduling a page_cache_work
2897 * @nr_bkv_objs: number of allocated objects at @bkvcache.
2898 *
2899 * This is a per-CPU structure.  The reason that it is not included in
2900 * the rcu_data structure is to permit this code to be extracted from
2901 * the RCU files.  Such extraction could allow further optimization of
2902 * the interactions with the slab allocators.
2903 */
2904struct kfree_rcu_cpu {
2905	// Objects queued on a linked list
2906	// through their rcu_head structures.
2907	struct rcu_head *head;
2908	unsigned long head_gp_snap;
2909	atomic_t head_count;
2910
2911	// Objects queued on a bulk-list.
2912	struct list_head bulk_head[FREE_N_CHANNELS];
2913	atomic_t bulk_count[FREE_N_CHANNELS];
2914
2915	struct kfree_rcu_cpu_work krw_arr[KFREE_N_BATCHES];
2916	raw_spinlock_t lock;
2917	struct delayed_work monitor_work;
2918	bool initialized;
2919
2920	struct delayed_work page_cache_work;
2921	atomic_t backoff_page_cache_fill;
2922	atomic_t work_in_progress;
2923	struct hrtimer hrtimer;
2924
2925	struct llist_head bkvcache;
2926	int nr_bkv_objs;
2927};
2928
2929static DEFINE_PER_CPU(struct kfree_rcu_cpu, krc) = {
2930	.lock = __RAW_SPIN_LOCK_UNLOCKED(krc.lock),
2931};
2932
2933static __always_inline void
2934debug_rcu_bhead_unqueue(struct kvfree_rcu_bulk_data *bhead)
2935{
2936#ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
2937	int i;
2938
2939	for (i = 0; i < bhead->nr_records; i++)
2940		debug_rcu_head_unqueue((struct rcu_head *)(bhead->records[i]));
2941#endif
2942}
2943
2944static inline struct kfree_rcu_cpu *
2945krc_this_cpu_lock(unsigned long *flags)
2946{
2947	struct kfree_rcu_cpu *krcp;
2948
2949	local_irq_save(*flags);	// For safely calling this_cpu_ptr().
2950	krcp = this_cpu_ptr(&krc);
2951	raw_spin_lock(&krcp->lock);
2952
2953	return krcp;
2954}
2955
2956static inline void
2957krc_this_cpu_unlock(struct kfree_rcu_cpu *krcp, unsigned long flags)
2958{
2959	raw_spin_unlock_irqrestore(&krcp->lock, flags);
2960}
2961
2962static inline struct kvfree_rcu_bulk_data *
2963get_cached_bnode(struct kfree_rcu_cpu *krcp)
2964{
2965	if (!krcp->nr_bkv_objs)
2966		return NULL;
2967
2968	WRITE_ONCE(krcp->nr_bkv_objs, krcp->nr_bkv_objs - 1);
2969	return (struct kvfree_rcu_bulk_data *)
2970		llist_del_first(&krcp->bkvcache);
2971}
2972
2973static inline bool
2974put_cached_bnode(struct kfree_rcu_cpu *krcp,
2975	struct kvfree_rcu_bulk_data *bnode)
2976{
2977	// Check the limit.
2978	if (krcp->nr_bkv_objs >= rcu_min_cached_objs)
2979		return false;
2980
2981	llist_add((struct llist_node *) bnode, &krcp->bkvcache);
2982	WRITE_ONCE(krcp->nr_bkv_objs, krcp->nr_bkv_objs + 1);
2983	return true;
2984}
2985
2986static int
2987drain_page_cache(struct kfree_rcu_cpu *krcp)
2988{
2989	unsigned long flags;
2990	struct llist_node *page_list, *pos, *n;
2991	int freed = 0;
2992
2993	if (!rcu_min_cached_objs)
2994		return 0;
2995
2996	raw_spin_lock_irqsave(&krcp->lock, flags);
2997	page_list = llist_del_all(&krcp->bkvcache);
2998	WRITE_ONCE(krcp->nr_bkv_objs, 0);
2999	raw_spin_unlock_irqrestore(&krcp->lock, flags);
3000
3001	llist_for_each_safe(pos, n, page_list) {
3002		free_page((unsigned long)pos);
3003		freed++;
3004	}
3005
3006	return freed;
3007}
3008
3009static void
3010kvfree_rcu_bulk(struct kfree_rcu_cpu *krcp,
3011	struct kvfree_rcu_bulk_data *bnode, int idx)
3012{
3013	unsigned long flags;
3014	int i;
3015
3016	if (!WARN_ON_ONCE(!poll_state_synchronize_rcu_full(&bnode->gp_snap))) {
3017		debug_rcu_bhead_unqueue(bnode);
3018		rcu_lock_acquire(&rcu_callback_map);
3019		if (idx == 0) { // kmalloc() / kfree().
3020			trace_rcu_invoke_kfree_bulk_callback(
3021				rcu_state.name, bnode->nr_records,
3022				bnode->records);
3023
3024			kfree_bulk(bnode->nr_records, bnode->records);
3025		} else { // vmalloc() / vfree().
3026			for (i = 0; i < bnode->nr_records; i++) {
3027				trace_rcu_invoke_kvfree_callback(
3028					rcu_state.name, bnode->records[i], 0);
3029
3030				vfree(bnode->records[i]);
3031			}
3032		}
3033		rcu_lock_release(&rcu_callback_map);
3034	}
3035
3036	raw_spin_lock_irqsave(&krcp->lock, flags);
3037	if (put_cached_bnode(krcp, bnode))
3038		bnode = NULL;
3039	raw_spin_unlock_irqrestore(&krcp->lock, flags);
3040
3041	if (bnode)
3042		free_page((unsigned long) bnode);
3043
3044	cond_resched_tasks_rcu_qs();
3045}
3046
3047static void
3048kvfree_rcu_list(struct rcu_head *head)
3049{
3050	struct rcu_head *next;
3051
3052	for (; head; head = next) {
3053		void *ptr = (void *) head->func;
3054		unsigned long offset = (void *) head - ptr;
3055
3056		next = head->next;
3057		debug_rcu_head_unqueue((struct rcu_head *)ptr);
3058		rcu_lock_acquire(&rcu_callback_map);
3059		trace_rcu_invoke_kvfree_callback(rcu_state.name, head, offset);
3060
3061		if (!WARN_ON_ONCE(!__is_kvfree_rcu_offset(offset)))
3062			kvfree(ptr);
3063
3064		rcu_lock_release(&rcu_callback_map);
3065		cond_resched_tasks_rcu_qs();
3066	}
3067}
3068
3069/*
3070 * This function is invoked in workqueue context after a grace period.
3071 * It frees all the objects queued on ->bulk_head_free or ->head_free.
3072 */
3073static void kfree_rcu_work(struct work_struct *work)
3074{
3075	unsigned long flags;
3076	struct kvfree_rcu_bulk_data *bnode, *n;
3077	struct list_head bulk_head[FREE_N_CHANNELS];
3078	struct rcu_head *head;
3079	struct kfree_rcu_cpu *krcp;
3080	struct kfree_rcu_cpu_work *krwp;
3081	struct rcu_gp_oldstate head_gp_snap;
3082	int i;
3083
3084	krwp = container_of(to_rcu_work(work),
3085		struct kfree_rcu_cpu_work, rcu_work);
3086	krcp = krwp->krcp;
3087
3088	raw_spin_lock_irqsave(&krcp->lock, flags);
3089	// Channels 1 and 2.
3090	for (i = 0; i < FREE_N_CHANNELS; i++)
3091		list_replace_init(&krwp->bulk_head_free[i], &bulk_head[i]);
3092
3093	// Channel 3.
3094	head = krwp->head_free;
3095	krwp->head_free = NULL;
3096	head_gp_snap = krwp->head_free_gp_snap;
3097	raw_spin_unlock_irqrestore(&krcp->lock, flags);
3098
3099	// Handle the first two channels.
3100	for (i = 0; i < FREE_N_CHANNELS; i++) {
3101		// Start from the tail page, so a GP is likely passed for it.
3102		list_for_each_entry_safe(bnode, n, &bulk_head[i], list)
3103			kvfree_rcu_bulk(krcp, bnode, i);
3104	}
3105
3106	/*
3107	 * This is used when the "bulk" path can not be used for the
3108	 * double-argument of kvfree_rcu().  This happens when the
3109	 * page-cache is empty, which means that objects are instead
3110	 * queued on a linked list through their rcu_head structures.
3111	 * This list is named "Channel 3".
3112	 */
3113	if (head && !WARN_ON_ONCE(!poll_state_synchronize_rcu_full(&head_gp_snap)))
3114		kvfree_rcu_list(head);
3115}
3116
3117static bool
3118need_offload_krc(struct kfree_rcu_cpu *krcp)
3119{
3120	int i;
3121
3122	for (i = 0; i < FREE_N_CHANNELS; i++)
3123		if (!list_empty(&krcp->bulk_head[i]))
3124			return true;
3125
3126	return !!READ_ONCE(krcp->head);
3127}
3128
3129static bool
3130need_wait_for_krwp_work(struct kfree_rcu_cpu_work *krwp)
3131{
3132	int i;
3133
3134	for (i = 0; i < FREE_N_CHANNELS; i++)
3135		if (!list_empty(&krwp->bulk_head_free[i]))
3136			return true;
3137
3138	return !!krwp->head_free;
3139}
3140
3141static int krc_count(struct kfree_rcu_cpu *krcp)
3142{
3143	int sum = atomic_read(&krcp->head_count);
3144	int i;
3145
3146	for (i = 0; i < FREE_N_CHANNELS; i++)
3147		sum += atomic_read(&krcp->bulk_count[i]);
3148
3149	return sum;
3150}
3151
3152static void
3153schedule_delayed_monitor_work(struct kfree_rcu_cpu *krcp)
3154{
3155	long delay, delay_left;
3156
3157	delay = krc_count(krcp) >= KVFREE_BULK_MAX_ENTR ? 1:KFREE_DRAIN_JIFFIES;
3158	if (delayed_work_pending(&krcp->monitor_work)) {
3159		delay_left = krcp->monitor_work.timer.expires - jiffies;
3160		if (delay < delay_left)
3161			mod_delayed_work(system_wq, &krcp->monitor_work, delay);
 
3162		return;
3163	}
3164	queue_delayed_work(system_wq, &krcp->monitor_work, delay);
3165}
3166
3167static void
3168kvfree_rcu_drain_ready(struct kfree_rcu_cpu *krcp)
3169{
3170	struct list_head bulk_ready[FREE_N_CHANNELS];
3171	struct kvfree_rcu_bulk_data *bnode, *n;
3172	struct rcu_head *head_ready = NULL;
3173	unsigned long flags;
3174	int i;
3175
3176	raw_spin_lock_irqsave(&krcp->lock, flags);
3177	for (i = 0; i < FREE_N_CHANNELS; i++) {
3178		INIT_LIST_HEAD(&bulk_ready[i]);
3179
3180		list_for_each_entry_safe_reverse(bnode, n, &krcp->bulk_head[i], list) {
3181			if (!poll_state_synchronize_rcu_full(&bnode->gp_snap))
3182				break;
3183
3184			atomic_sub(bnode->nr_records, &krcp->bulk_count[i]);
3185			list_move(&bnode->list, &bulk_ready[i]);
3186		}
3187	}
3188
3189	if (krcp->head && poll_state_synchronize_rcu(krcp->head_gp_snap)) {
3190		head_ready = krcp->head;
3191		atomic_set(&krcp->head_count, 0);
3192		WRITE_ONCE(krcp->head, NULL);
3193	}
3194	raw_spin_unlock_irqrestore(&krcp->lock, flags);
3195
3196	for (i = 0; i < FREE_N_CHANNELS; i++) {
3197		list_for_each_entry_safe(bnode, n, &bulk_ready[i], list)
3198			kvfree_rcu_bulk(krcp, bnode, i);
3199	}
3200
3201	if (head_ready)
3202		kvfree_rcu_list(head_ready);
 
3203}
3204
3205/*
3206 * This function is invoked after the KFREE_DRAIN_JIFFIES timeout.
3207 */
3208static void kfree_rcu_monitor(struct work_struct *work)
3209{
3210	struct kfree_rcu_cpu *krcp = container_of(work,
3211		struct kfree_rcu_cpu, monitor_work.work);
3212	unsigned long flags;
3213	int i, j;
3214
3215	// Drain ready for reclaim.
3216	kvfree_rcu_drain_ready(krcp);
3217
3218	raw_spin_lock_irqsave(&krcp->lock, flags);
3219
3220	// Attempt to start a new batch.
3221	for (i = 0; i < KFREE_N_BATCHES; i++) {
3222		struct kfree_rcu_cpu_work *krwp = &(krcp->krw_arr[i]);
3223
3224		// Try to detach bulk_head or head and attach it, only when
3225		// all channels are free.  Any channel is not free means at krwp
3226		// there is on-going rcu work to handle krwp's free business.
3227		if (need_wait_for_krwp_work(krwp))
3228			continue;
3229
3230		// kvfree_rcu_drain_ready() might handle this krcp, if so give up.
3231		if (need_offload_krc(krcp)) {
3232			// Channel 1 corresponds to the SLAB-pointer bulk path.
3233			// Channel 2 corresponds to vmalloc-pointer bulk path.
3234			for (j = 0; j < FREE_N_CHANNELS; j++) {
3235				if (list_empty(&krwp->bulk_head_free[j])) {
3236					atomic_set(&krcp->bulk_count[j], 0);
3237					list_replace_init(&krcp->bulk_head[j],
3238						&krwp->bulk_head_free[j]);
3239				}
3240			}
3241
3242			// Channel 3 corresponds to both SLAB and vmalloc
3243			// objects queued on the linked list.
3244			if (!krwp->head_free) {
3245				krwp->head_free = krcp->head;
3246				get_state_synchronize_rcu_full(&krwp->head_free_gp_snap);
3247				atomic_set(&krcp->head_count, 0);
3248				WRITE_ONCE(krcp->head, NULL);
3249			}
3250
3251			// One work is per one batch, so there are three
3252			// "free channels", the batch can handle. It can
3253			// be that the work is in the pending state when
3254			// channels have been detached following by each
3255			// other.
3256			queue_rcu_work(system_wq, &krwp->rcu_work);
3257		}
3258	}
3259
3260	raw_spin_unlock_irqrestore(&krcp->lock, flags);
3261
3262	// If there is nothing to detach, it means that our job is
3263	// successfully done here. In case of having at least one
3264	// of the channels that is still busy we should rearm the
3265	// work to repeat an attempt. Because previous batches are
3266	// still in progress.
3267	if (need_offload_krc(krcp))
3268		schedule_delayed_monitor_work(krcp);
3269}
3270
3271static enum hrtimer_restart
3272schedule_page_work_fn(struct hrtimer *t)
3273{
3274	struct kfree_rcu_cpu *krcp =
3275		container_of(t, struct kfree_rcu_cpu, hrtimer);
3276
3277	queue_delayed_work(system_highpri_wq, &krcp->page_cache_work, 0);
3278	return HRTIMER_NORESTART;
3279}
3280
3281static void fill_page_cache_func(struct work_struct *work)
3282{
3283	struct kvfree_rcu_bulk_data *bnode;
3284	struct kfree_rcu_cpu *krcp =
3285		container_of(work, struct kfree_rcu_cpu,
3286			page_cache_work.work);
3287	unsigned long flags;
3288	int nr_pages;
3289	bool pushed;
3290	int i;
3291
3292	nr_pages = atomic_read(&krcp->backoff_page_cache_fill) ?
3293		1 : rcu_min_cached_objs;
3294
3295	for (i = READ_ONCE(krcp->nr_bkv_objs); i < nr_pages; i++) {
3296		bnode = (struct kvfree_rcu_bulk_data *)
3297			__get_free_page(GFP_KERNEL | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
3298
3299		if (!bnode)
3300			break;
3301
3302		raw_spin_lock_irqsave(&krcp->lock, flags);
3303		pushed = put_cached_bnode(krcp, bnode);
3304		raw_spin_unlock_irqrestore(&krcp->lock, flags);
3305
3306		if (!pushed) {
3307			free_page((unsigned long) bnode);
3308			break;
3309		}
3310	}
3311
3312	atomic_set(&krcp->work_in_progress, 0);
3313	atomic_set(&krcp->backoff_page_cache_fill, 0);
3314}
3315
3316static void
3317run_page_cache_worker(struct kfree_rcu_cpu *krcp)
3318{
3319	// If cache disabled, bail out.
3320	if (!rcu_min_cached_objs)
3321		return;
3322
3323	if (rcu_scheduler_active == RCU_SCHEDULER_RUNNING &&
3324			!atomic_xchg(&krcp->work_in_progress, 1)) {
3325		if (atomic_read(&krcp->backoff_page_cache_fill)) {
3326			queue_delayed_work(system_wq,
3327				&krcp->page_cache_work,
3328					msecs_to_jiffies(rcu_delay_page_cache_fill_msec));
3329		} else {
3330			hrtimer_init(&krcp->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3331			krcp->hrtimer.function = schedule_page_work_fn;
3332			hrtimer_start(&krcp->hrtimer, 0, HRTIMER_MODE_REL);
3333		}
3334	}
3335}
3336
3337// Record ptr in a page managed by krcp, with the pre-krc_this_cpu_lock()
3338// state specified by flags.  If can_alloc is true, the caller must
3339// be schedulable and not be holding any locks or mutexes that might be
3340// acquired by the memory allocator or anything that it might invoke.
3341// Returns true if ptr was successfully recorded, else the caller must
3342// use a fallback.
3343static inline bool
3344add_ptr_to_bulk_krc_lock(struct kfree_rcu_cpu **krcp,
3345	unsigned long *flags, void *ptr, bool can_alloc)
3346{
3347	struct kvfree_rcu_bulk_data *bnode;
3348	int idx;
3349
3350	*krcp = krc_this_cpu_lock(flags);
3351	if (unlikely(!(*krcp)->initialized))
3352		return false;
3353
3354	idx = !!is_vmalloc_addr(ptr);
3355	bnode = list_first_entry_or_null(&(*krcp)->bulk_head[idx],
3356		struct kvfree_rcu_bulk_data, list);
3357
3358	/* Check if a new block is required. */
3359	if (!bnode || bnode->nr_records == KVFREE_BULK_MAX_ENTR) {
3360		bnode = get_cached_bnode(*krcp);
3361		if (!bnode && can_alloc) {
3362			krc_this_cpu_unlock(*krcp, *flags);
3363
3364			// __GFP_NORETRY - allows a light-weight direct reclaim
3365			// what is OK from minimizing of fallback hitting point of
3366			// view. Apart of that it forbids any OOM invoking what is
3367			// also beneficial since we are about to release memory soon.
3368			//
3369			// __GFP_NOMEMALLOC - prevents from consuming of all the
3370			// memory reserves. Please note we have a fallback path.
3371			//
3372			// __GFP_NOWARN - it is supposed that an allocation can
3373			// be failed under low memory or high memory pressure
3374			// scenarios.
3375			bnode = (struct kvfree_rcu_bulk_data *)
3376				__get_free_page(GFP_KERNEL | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
3377			raw_spin_lock_irqsave(&(*krcp)->lock, *flags);
3378		}
3379
3380		if (!bnode)
3381			return false;
3382
3383		// Initialize the new block and attach it.
3384		bnode->nr_records = 0;
3385		list_add(&bnode->list, &(*krcp)->bulk_head[idx]);
3386	}
3387
3388	// Finally insert and update the GP for this page.
3389	bnode->records[bnode->nr_records++] = ptr;
3390	get_state_synchronize_rcu_full(&bnode->gp_snap);
3391	atomic_inc(&(*krcp)->bulk_count[idx]);
3392
3393	return true;
3394}
 
3395
3396/*
3397 * Queue a request for lazy invocation of the appropriate free routine
3398 * after a grace period.  Please note that three paths are maintained,
3399 * two for the common case using arrays of pointers and a third one that
3400 * is used only when the main paths cannot be used, for example, due to
3401 * memory pressure.
3402 *
3403 * Each kvfree_call_rcu() request is added to a batch. The batch will be drained
3404 * every KFREE_DRAIN_JIFFIES number of jiffies. All the objects in the batch will
3405 * be free'd in workqueue context. This allows us to: batch requests together to
3406 * reduce the number of grace periods during heavy kfree_rcu()/kvfree_rcu() load.
3407 */
3408void kvfree_call_rcu(struct rcu_head *head, void *ptr)
3409{
3410	unsigned long flags;
3411	struct kfree_rcu_cpu *krcp;
3412	bool success;
3413
3414	/*
3415	 * Please note there is a limitation for the head-less
3416	 * variant, that is why there is a clear rule for such
3417	 * objects: it can be used from might_sleep() context
3418	 * only. For other places please embed an rcu_head to
3419	 * your data.
3420	 */
3421	if (!head)
3422		might_sleep();
3423
3424	// Queue the object but don't yet schedule the batch.
3425	if (debug_rcu_head_queue(ptr)) {
3426		// Probable double kfree_rcu(), just leak.
3427		WARN_ONCE(1, "%s(): Double-freed call. rcu_head %p\n",
3428			  __func__, head);
3429
3430		// Mark as success and leave.
3431		return;
3432	}
3433
3434	kasan_record_aux_stack_noalloc(ptr);
3435	success = add_ptr_to_bulk_krc_lock(&krcp, &flags, ptr, !head);
3436	if (!success) {
3437		run_page_cache_worker(krcp);
3438
3439		if (head == NULL)
3440			// Inline if kvfree_rcu(one_arg) call.
3441			goto unlock_return;
3442
3443		head->func = ptr;
3444		head->next = krcp->head;
3445		WRITE_ONCE(krcp->head, head);
3446		atomic_inc(&krcp->head_count);
3447
3448		// Take a snapshot for this krcp.
3449		krcp->head_gp_snap = get_state_synchronize_rcu();
3450		success = true;
3451	}
3452
3453	/*
3454	 * The kvfree_rcu() caller considers the pointer freed at this point
3455	 * and likely removes any references to it. Since the actual slab
3456	 * freeing (and kmemleak_free()) is deferred, tell kmemleak to ignore
3457	 * this object (no scanning or false positives reporting).
3458	 */
3459	kmemleak_ignore(ptr);
3460
3461	// Set timer to drain after KFREE_DRAIN_JIFFIES.
3462	if (rcu_scheduler_active == RCU_SCHEDULER_RUNNING)
3463		schedule_delayed_monitor_work(krcp);
3464
3465unlock_return:
3466	krc_this_cpu_unlock(krcp, flags);
3467
3468	/*
3469	 * Inline kvfree() after synchronize_rcu(). We can do
3470	 * it from might_sleep() context only, so the current
3471	 * CPU can pass the QS state.
3472	 */
3473	if (!success) {
3474		debug_rcu_head_unqueue((struct rcu_head *) ptr);
3475		synchronize_rcu();
3476		kvfree(ptr);
3477	}
3478}
3479EXPORT_SYMBOL_GPL(kvfree_call_rcu);
3480
3481static unsigned long
3482kfree_rcu_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
3483{
3484	int cpu;
3485	unsigned long count = 0;
3486
3487	/* Snapshot count of all CPUs */
3488	for_each_possible_cpu(cpu) {
3489		struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);
3490
3491		count += krc_count(krcp);
3492		count += READ_ONCE(krcp->nr_bkv_objs);
3493		atomic_set(&krcp->backoff_page_cache_fill, 1);
3494	}
3495
3496	return count == 0 ? SHRINK_EMPTY : count;
3497}
3498
3499static unsigned long
3500kfree_rcu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
3501{
3502	int cpu, freed = 0;
3503
3504	for_each_possible_cpu(cpu) {
3505		int count;
3506		struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);
3507
3508		count = krc_count(krcp);
3509		count += drain_page_cache(krcp);
3510		kfree_rcu_monitor(&krcp->monitor_work.work);
3511
3512		sc->nr_to_scan -= count;
3513		freed += count;
3514
3515		if (sc->nr_to_scan <= 0)
3516			break;
3517	}
3518
3519	return freed == 0 ? SHRINK_STOP : freed;
3520}
3521
3522void __init kfree_rcu_scheduler_running(void)
3523{
3524	int cpu;
3525
3526	for_each_possible_cpu(cpu) {
3527		struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);
3528
3529		if (need_offload_krc(krcp))
3530			schedule_delayed_monitor_work(krcp);
3531	}
3532}
 
3533
3534/*
3535 * During early boot, any blocking grace-period wait automatically
3536 * implies a grace period.
3537 *
3538 * Later on, this could in theory be the case for kernels built with
3539 * CONFIG_SMP=y && CONFIG_PREEMPTION=y running on a single CPU, but this
3540 * is not a common case.  Furthermore, this optimization would cause
3541 * the rcu_gp_oldstate structure to expand by 50%, so this potential
3542 * grace-period optimization is ignored once the scheduler is running.
3543 */
3544static int rcu_blocking_is_gp(void)
3545{
3546	if (rcu_scheduler_active != RCU_SCHEDULER_INACTIVE) {
3547		might_sleep();
3548		return false;
3549	}
3550	return true;
 
 
3551}
3552
3553/**
3554 * synchronize_rcu - wait until a grace period has elapsed.
3555 *
3556 * Control will return to the caller some time after a full grace
3557 * period has elapsed, in other words after all currently executing RCU
3558 * read-side critical sections have completed.  Note, however, that
3559 * upon return from synchronize_rcu(), the caller might well be executing
3560 * concurrently with new RCU read-side critical sections that began while
3561 * synchronize_rcu() was waiting.
3562 *
3563 * RCU read-side critical sections are delimited by rcu_read_lock()
3564 * and rcu_read_unlock(), and may be nested.  In addition, but only in
3565 * v5.0 and later, regions of code across which interrupts, preemption,
3566 * or softirqs have been disabled also serve as RCU read-side critical
3567 * sections.  This includes hardware interrupt handlers, softirq handlers,
3568 * and NMI handlers.
3569 *
3570 * Note that this guarantee implies further memory-ordering guarantees.
3571 * On systems with more than one CPU, when synchronize_rcu() returns,
3572 * each CPU is guaranteed to have executed a full memory barrier since
3573 * the end of its last RCU read-side critical section whose beginning
3574 * preceded the call to synchronize_rcu().  In addition, each CPU having
3575 * an RCU read-side critical section that extends beyond the return from
3576 * synchronize_rcu() is guaranteed to have executed a full memory barrier
3577 * after the beginning of synchronize_rcu() and before the beginning of
3578 * that RCU read-side critical section.  Note that these guarantees include
3579 * CPUs that are offline, idle, or executing in user mode, as well as CPUs
3580 * that are executing in the kernel.
3581 *
3582 * Furthermore, if CPU A invoked synchronize_rcu(), which returned
3583 * to its caller on CPU B, then both CPU A and CPU B are guaranteed
3584 * to have executed a full memory barrier during the execution of
3585 * synchronize_rcu() -- even if CPU A and CPU B are the same CPU (but
3586 * again only if the system has more than one CPU).
3587 *
3588 * Implementation of these memory-ordering guarantees is described here:
3589 * Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.rst.
3590 */
3591void synchronize_rcu(void)
3592{
3593	unsigned long flags;
3594	struct rcu_node *rnp;
3595
3596	RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
3597			 lock_is_held(&rcu_lock_map) ||
3598			 lock_is_held(&rcu_sched_lock_map),
3599			 "Illegal synchronize_rcu() in RCU read-side critical section");
3600	if (!rcu_blocking_is_gp()) {
3601		if (rcu_gp_is_expedited())
3602			synchronize_rcu_expedited();
3603		else
3604			wait_rcu_gp(call_rcu_hurry);
3605		return;
3606	}
3607
3608	// Context allows vacuous grace periods.
3609	// Note well that this code runs with !PREEMPT && !SMP.
3610	// In addition, all code that advances grace periods runs at
3611	// process level.  Therefore, this normal GP overlaps with other
3612	// normal GPs only by being fully nested within them, which allows
3613	// reuse of ->gp_seq_polled_snap.
3614	rcu_poll_gp_seq_start_unlocked(&rcu_state.gp_seq_polled_snap);
3615	rcu_poll_gp_seq_end_unlocked(&rcu_state.gp_seq_polled_snap);
3616
3617	// Update the normal grace-period counters to record
3618	// this grace period, but only those used by the boot CPU.
3619	// The rcu_scheduler_starting() will take care of the rest of
3620	// these counters.
3621	local_irq_save(flags);
3622	WARN_ON_ONCE(num_online_cpus() > 1);
3623	rcu_state.gp_seq += (1 << RCU_SEQ_CTR_SHIFT);
3624	for (rnp = this_cpu_ptr(&rcu_data)->mynode; rnp; rnp = rnp->parent)
3625		rnp->gp_seq_needed = rnp->gp_seq = rcu_state.gp_seq;
3626	local_irq_restore(flags);
3627}
3628EXPORT_SYMBOL_GPL(synchronize_rcu);
3629
3630/**
3631 * get_completed_synchronize_rcu_full - Return a full pre-completed polled state cookie
3632 * @rgosp: Place to put state cookie
3633 *
3634 * Stores into @rgosp a value that will always be treated by functions
3635 * like poll_state_synchronize_rcu_full() as a cookie whose grace period
3636 * has already completed.
 
 
 
 
 
3637 */
3638void get_completed_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp)
3639{
3640	rgosp->rgos_norm = RCU_GET_STATE_COMPLETED;
3641	rgosp->rgos_exp = RCU_GET_STATE_COMPLETED;
 
 
 
 
 
 
 
 
3642}
3643EXPORT_SYMBOL_GPL(get_completed_synchronize_rcu_full);
3644
3645/**
3646 * get_state_synchronize_rcu - Snapshot current RCU state
3647 *
3648 * Returns a cookie that is used by a later call to cond_synchronize_rcu()
3649 * or poll_state_synchronize_rcu() to determine whether or not a full
3650 * grace period has elapsed in the meantime.
3651 */
3652unsigned long get_state_synchronize_rcu(void)
3653{
3654	/*
3655	 * Any prior manipulation of RCU-protected data must happen
3656	 * before the load from ->gp_seq.
3657	 */
3658	smp_mb();  /* ^^^ */
3659	return rcu_seq_snap(&rcu_state.gp_seq_polled);
 
 
 
 
 
 
3660}
3661EXPORT_SYMBOL_GPL(get_state_synchronize_rcu);
3662
3663/**
3664 * get_state_synchronize_rcu_full - Snapshot RCU state, both normal and expedited
3665 * @rgosp: location to place combined normal/expedited grace-period state
3666 *
3667 * Places the normal and expedited grace-period states in @rgosp.  This
3668 * state value can be passed to a later call to cond_synchronize_rcu_full()
3669 * or poll_state_synchronize_rcu_full() to determine whether or not a
3670 * grace period (whether normal or expedited) has elapsed in the meantime.
3671 * The rcu_gp_oldstate structure takes up twice the memory of an unsigned
3672 * long, but is guaranteed to see all grace periods.  In contrast, the
3673 * combined state occupies less memory, but can sometimes fail to take
3674 * grace periods into account.
3675 *
3676 * This does not guarantee that the needed grace period will actually
3677 * start.
 
 
 
 
 
 
3678 */
3679void get_state_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp)
3680{
3681	struct rcu_node *rnp = rcu_get_root();
3682
3683	/*
3684	 * Any prior manipulation of RCU-protected data must happen
3685	 * before the loads from ->gp_seq and ->expedited_sequence.
3686	 */
3687	smp_mb();  /* ^^^ */
3688	rgosp->rgos_norm = rcu_seq_snap(&rnp->gp_seq);
3689	rgosp->rgos_exp = rcu_seq_snap(&rcu_state.expedited_sequence);
3690}
3691EXPORT_SYMBOL_GPL(get_state_synchronize_rcu_full);
3692
3693/*
3694 * Helper function for start_poll_synchronize_rcu() and
3695 * start_poll_synchronize_rcu_full().
3696 */
3697static void start_poll_synchronize_rcu_common(void)
3698{
3699	unsigned long flags;
3700	bool needwake;
3701	struct rcu_data *rdp;
3702	struct rcu_node *rnp;
3703
3704	lockdep_assert_irqs_enabled();
3705	local_irq_save(flags);
3706	rdp = this_cpu_ptr(&rcu_data);
3707	rnp = rdp->mynode;
3708	raw_spin_lock_rcu_node(rnp); // irqs already disabled.
3709	// Note it is possible for a grace period to have elapsed between
3710	// the above call to get_state_synchronize_rcu() and the below call
3711	// to rcu_seq_snap.  This is OK, the worst that happens is that we
3712	// get a grace period that no one needed.  These accesses are ordered
3713	// by smp_mb(), and we are accessing them in the opposite order
3714	// from which they are updated at grace-period start, as required.
3715	needwake = rcu_start_this_gp(rnp, rdp, rcu_seq_snap(&rcu_state.gp_seq));
3716	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
3717	if (needwake)
3718		rcu_gp_kthread_wake();
3719}
 
3720
3721/**
3722 * start_poll_synchronize_rcu - Snapshot and start RCU grace period
3723 *
3724 * Returns a cookie that is used by a later call to cond_synchronize_rcu()
3725 * or poll_state_synchronize_rcu() to determine whether or not a full
3726 * grace period has elapsed in the meantime.  If the needed grace period
3727 * is not already slated to start, notifies RCU core of the need for that
3728 * grace period.
3729 *
3730 * Interrupts must be enabled for the case where it is necessary to awaken
3731 * the grace-period kthread.
3732 */
3733unsigned long start_poll_synchronize_rcu(void)
3734{
3735	unsigned long gp_seq = get_state_synchronize_rcu();
3736
3737	start_poll_synchronize_rcu_common();
3738	return gp_seq;
 
 
 
 
 
 
 
 
 
3739}
3740EXPORT_SYMBOL_GPL(start_poll_synchronize_rcu);
3741
3742/**
3743 * start_poll_synchronize_rcu_full - Take a full snapshot and start RCU grace period
3744 * @rgosp: value from get_state_synchronize_rcu_full() or start_poll_synchronize_rcu_full()
3745 *
3746 * Places the normal and expedited grace-period states in *@rgos.  This
3747 * state value can be passed to a later call to cond_synchronize_rcu_full()
3748 * or poll_state_synchronize_rcu_full() to determine whether or not a
3749 * grace period (whether normal or expedited) has elapsed in the meantime.
3750 * If the needed grace period is not already slated to start, notifies
3751 * RCU core of the need for that grace period.
3752 *
3753 * Interrupts must be enabled for the case where it is necessary to awaken
3754 * the grace-period kthread.
3755 */
3756void start_poll_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp)
3757{
3758	get_state_synchronize_rcu_full(rgosp);
3759
3760	start_poll_synchronize_rcu_common();
3761}
3762EXPORT_SYMBOL_GPL(start_poll_synchronize_rcu_full);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3763
3764/**
3765 * poll_state_synchronize_rcu - Has the specified RCU grace period completed?
3766 * @oldstate: value from get_state_synchronize_rcu() or start_poll_synchronize_rcu()
3767 *
3768 * If a full RCU grace period has elapsed since the earlier call from
3769 * which @oldstate was obtained, return @true, otherwise return @false.
3770 * If @false is returned, it is the caller's responsibility to invoke this
3771 * function later on until it does return @true.  Alternatively, the caller
3772 * can explicitly wait for a grace period, for example, by passing @oldstate
3773 * to either cond_synchronize_rcu() or cond_synchronize_rcu_expedited()
3774 * on the one hand or by directly invoking either synchronize_rcu() or
3775 * synchronize_rcu_expedited() on the other.
3776 *
3777 * Yes, this function does not take counter wrap into account.
3778 * But counter wrap is harmless.  If the counter wraps, we have waited for
3779 * more than a billion grace periods (and way more on a 64-bit system!).
3780 * Those needing to keep old state values for very long time periods
3781 * (many hours even on 32-bit systems) should check them occasionally and
3782 * either refresh them or set a flag indicating that the grace period has
3783 * completed.  Alternatively, they can use get_completed_synchronize_rcu()
3784 * to get a guaranteed-completed grace-period state.
3785 *
3786 * In addition, because oldstate compresses the grace-period state for
3787 * both normal and expedited grace periods into a single unsigned long,
3788 * it can miss a grace period when synchronize_rcu() runs concurrently
3789 * with synchronize_rcu_expedited().  If this is unacceptable, please
3790 * instead use the _full() variant of these polling APIs.
3791 *
3792 * This function provides the same memory-ordering guarantees that
3793 * would be provided by a synchronize_rcu() that was invoked at the call
3794 * to the function that provided @oldstate, and that returned at the end
3795 * of this function.
3796 */
3797bool poll_state_synchronize_rcu(unsigned long oldstate)
3798{
3799	if (oldstate == RCU_GET_STATE_COMPLETED ||
3800	    rcu_seq_done_exact(&rcu_state.gp_seq_polled, oldstate)) {
3801		smp_mb(); /* Ensure GP ends before subsequent accesses. */
3802		return true;
3803	}
3804	return false;
3805}
3806EXPORT_SYMBOL_GPL(poll_state_synchronize_rcu);
3807
3808/**
3809 * poll_state_synchronize_rcu_full - Has the specified RCU grace period completed?
3810 * @rgosp: value from get_state_synchronize_rcu_full() or start_poll_synchronize_rcu_full()
3811 *
3812 * If a full RCU grace period has elapsed since the earlier call from
3813 * which *rgosp was obtained, return @true, otherwise return @false.
3814 * If @false is returned, it is the caller's responsibility to invoke this
3815 * function later on until it does return @true.  Alternatively, the caller
3816 * can explicitly wait for a grace period, for example, by passing @rgosp
3817 * to cond_synchronize_rcu() or by directly invoking synchronize_rcu().
3818 *
3819 * Yes, this function does not take counter wrap into account.
3820 * But counter wrap is harmless.  If the counter wraps, we have waited
3821 * for more than a billion grace periods (and way more on a 64-bit
3822 * system!).  Those needing to keep rcu_gp_oldstate values for very
3823 * long time periods (many hours even on 32-bit systems) should check
3824 * them occasionally and either refresh them or set a flag indicating
3825 * that the grace period has completed.  Alternatively, they can use
3826 * get_completed_synchronize_rcu_full() to get a guaranteed-completed
3827 * grace-period state.
3828 *
3829 * This function provides the same memory-ordering guarantees that would
3830 * be provided by a synchronize_rcu() that was invoked at the call to
3831 * the function that provided @rgosp, and that returned at the end of this
3832 * function.  And this guarantee requires that the root rcu_node structure's
3833 * ->gp_seq field be checked instead of that of the rcu_state structure.
3834 * The problem is that the just-ending grace-period's callbacks can be
3835 * invoked between the time that the root rcu_node structure's ->gp_seq
3836 * field is updated and the time that the rcu_state structure's ->gp_seq
3837 * field is updated.  Therefore, if a single synchronize_rcu() is to
3838 * cause a subsequent poll_state_synchronize_rcu_full() to return @true,
3839 * then the root rcu_node structure is the one that needs to be polled.
3840 */
3841bool poll_state_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp)
3842{
3843	struct rcu_node *rnp = rcu_get_root();
3844
3845	smp_mb(); // Order against root rcu_node structure grace-period cleanup.
3846	if (rgosp->rgos_norm == RCU_GET_STATE_COMPLETED ||
3847	    rcu_seq_done_exact(&rnp->gp_seq, rgosp->rgos_norm) ||
3848	    rgosp->rgos_exp == RCU_GET_STATE_COMPLETED ||
3849	    rcu_seq_done_exact(&rcu_state.expedited_sequence, rgosp->rgos_exp)) {
3850		smp_mb(); /* Ensure GP ends before subsequent accesses. */
3851		return true;
3852	}
3853	return false;
3854}
3855EXPORT_SYMBOL_GPL(poll_state_synchronize_rcu_full);
3856
3857/**
3858 * cond_synchronize_rcu - Conditionally wait for an RCU grace period
3859 * @oldstate: value from get_state_synchronize_rcu(), start_poll_synchronize_rcu(), or start_poll_synchronize_rcu_expedited()
3860 *
3861 * If a full RCU grace period has elapsed since the earlier call to
3862 * get_state_synchronize_rcu() or start_poll_synchronize_rcu(), just return.
3863 * Otherwise, invoke synchronize_rcu() to wait for a full grace period.
3864 *
3865 * Yes, this function does not take counter wrap into account.
3866 * But counter wrap is harmless.  If the counter wraps, we have waited for
3867 * more than 2 billion grace periods (and way more on a 64-bit system!),
3868 * so waiting for a couple of additional grace periods should be just fine.
3869 *
3870 * This function provides the same memory-ordering guarantees that
3871 * would be provided by a synchronize_rcu() that was invoked at the call
3872 * to the function that provided @oldstate and that returned at the end
3873 * of this function.
3874 */
3875void cond_synchronize_rcu(unsigned long oldstate)
3876{
3877	if (!poll_state_synchronize_rcu(oldstate))
3878		synchronize_rcu();
3879}
3880EXPORT_SYMBOL_GPL(cond_synchronize_rcu);
 
 
 
 
 
 
3881
3882/**
3883 * cond_synchronize_rcu_full - Conditionally wait for an RCU grace period
3884 * @rgosp: value from get_state_synchronize_rcu_full(), start_poll_synchronize_rcu_full(), or start_poll_synchronize_rcu_expedited_full()
3885 *
3886 * If a full RCU grace period has elapsed since the call to
3887 * get_state_synchronize_rcu_full(), start_poll_synchronize_rcu_full(),
3888 * or start_poll_synchronize_rcu_expedited_full() from which @rgosp was
3889 * obtained, just return.  Otherwise, invoke synchronize_rcu() to wait
3890 * for a full grace period.
3891 *
3892 * Yes, this function does not take counter wrap into account.
3893 * But counter wrap is harmless.  If the counter wraps, we have waited for
3894 * more than 2 billion grace periods (and way more on a 64-bit system!),
3895 * so waiting for a couple of additional grace periods should be just fine.
3896 *
3897 * This function provides the same memory-ordering guarantees that
3898 * would be provided by a synchronize_rcu() that was invoked at the call
3899 * to the function that provided @rgosp and that returned at the end of
3900 * this function.
3901 */
3902void cond_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp)
3903{
3904	if (!poll_state_synchronize_rcu_full(rgosp))
3905		synchronize_rcu();
3906}
3907EXPORT_SYMBOL_GPL(cond_synchronize_rcu_full);
3908
3909/*
3910 * Check to see if there is any immediate RCU-related work to be done by
3911 * the current CPU, returning 1 if so and zero otherwise.  The checks are
3912 * in order of increasing expense: checks that can be carried out against
3913 * CPU-local state are performed first.  However, we must check for CPU
3914 * stalls first, else we might not get a chance.
3915 */
3916static int rcu_pending(int user)
3917{
3918	bool gp_in_progress;
3919	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
3920	struct rcu_node *rnp = rdp->mynode;
3921
3922	lockdep_assert_irqs_disabled();
3923
3924	/* Check for CPU stalls, if enabled. */
3925	check_cpu_stall(rdp);
3926
3927	/* Does this CPU need a deferred NOCB wakeup? */
3928	if (rcu_nocb_need_deferred_wakeup(rdp, RCU_NOCB_WAKE))
3929		return 1;
3930
3931	/* Is this a nohz_full CPU in userspace or idle?  (Ignore RCU if so.) */
3932	if ((user || rcu_is_cpu_rrupt_from_idle()) && rcu_nohz_full_cpu())
3933		return 0;
3934
3935	/* Is the RCU core waiting for a quiescent state from this CPU? */
3936	gp_in_progress = rcu_gp_in_progress();
3937	if (rdp->core_needs_qs && !rdp->cpu_no_qs.b.norm && gp_in_progress)
 
 
 
3938		return 1;
 
3939
3940	/* Does this CPU have callbacks ready to invoke? */
3941	if (!rcu_rdp_is_offloaded(rdp) &&
3942	    rcu_segcblist_ready_cbs(&rdp->cblist))
3943		return 1;
 
3944
3945	/* Has RCU gone idle with this CPU needing another grace period? */
3946	if (!gp_in_progress && rcu_segcblist_is_enabled(&rdp->cblist) &&
3947	    !rcu_rdp_is_offloaded(rdp) &&
3948	    !rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL))
 
 
 
 
 
 
 
 
 
 
 
3949		return 1;
 
3950
3951	/* Have RCU grace period completed or started?  */
3952	if (rcu_seq_current(&rnp->gp_seq) != rdp->gp_seq ||
3953	    unlikely(READ_ONCE(rdp->gpwrap))) /* outside lock */
3954		return 1;
 
3955
3956	/* nothing to do */
 
3957	return 0;
3958}
3959
3960/*
3961 * Helper function for rcu_barrier() tracing.  If tracing is disabled,
3962 * the compiler is expected to optimize this away.
 
3963 */
3964static void rcu_barrier_trace(const char *s, int cpu, unsigned long done)
3965{
3966	trace_rcu_barrier(rcu_state.name, s, cpu,
3967			  atomic_read(&rcu_state.barrier_cpu_count), done);
 
 
 
 
3968}
3969
3970/*
3971 * RCU callback function for rcu_barrier().  If we are last, wake
3972 * up the task executing rcu_barrier().
3973 *
3974 * Note that the value of rcu_state.barrier_sequence must be captured
3975 * before the atomic_dec_and_test().  Otherwise, if this CPU is not last,
3976 * other CPUs might count the value down to zero before this CPU gets
3977 * around to invoking rcu_barrier_trace(), which might result in bogus
3978 * data from the next instance of rcu_barrier().
3979 */
3980static void rcu_barrier_callback(struct rcu_head *rhp)
3981{
3982	unsigned long __maybe_unused s = rcu_state.barrier_sequence;
 
 
 
3983
3984	if (atomic_dec_and_test(&rcu_state.barrier_cpu_count)) {
3985		rcu_barrier_trace(TPS("LastCB"), -1, s);
3986		complete(&rcu_state.barrier_completion);
3987	} else {
3988		rcu_barrier_trace(TPS("CB"), -1, s);
 
 
 
 
3989	}
 
 
 
3990}
3991
3992/*
3993 * If needed, entrain an rcu_barrier() callback on rdp->cblist.
 
3994 */
3995static void rcu_barrier_entrain(struct rcu_data *rdp)
 
3996{
3997	unsigned long gseq = READ_ONCE(rcu_state.barrier_sequence);
3998	unsigned long lseq = READ_ONCE(rdp->barrier_seq_snap);
3999	bool wake_nocb = false;
4000	bool was_alldone = false;
4001
4002	lockdep_assert_held(&rcu_state.barrier_lock);
4003	if (rcu_seq_state(lseq) || !rcu_seq_state(gseq) || rcu_seq_ctr(lseq) != rcu_seq_ctr(gseq))
4004		return;
4005	rcu_barrier_trace(TPS("IRQ"), -1, rcu_state.barrier_sequence);
4006	rdp->barrier_head.func = rcu_barrier_callback;
4007	debug_rcu_head_queue(&rdp->barrier_head);
4008	rcu_nocb_lock(rdp);
4009	/*
4010	 * Flush bypass and wakeup rcuog if we add callbacks to an empty regular
4011	 * queue. This way we don't wait for bypass timer that can reach seconds
4012	 * if it's fully lazy.
4013	 */
4014	was_alldone = rcu_rdp_is_offloaded(rdp) && !rcu_segcblist_pend_cbs(&rdp->cblist);
4015	WARN_ON_ONCE(!rcu_nocb_flush_bypass(rdp, NULL, jiffies, false));
4016	wake_nocb = was_alldone && rcu_segcblist_pend_cbs(&rdp->cblist);
4017	if (rcu_segcblist_entrain(&rdp->cblist, &rdp->barrier_head)) {
4018		atomic_inc(&rcu_state.barrier_cpu_count);
4019	} else {
4020		debug_rcu_head_unqueue(&rdp->barrier_head);
4021		rcu_barrier_trace(TPS("IRQNQ"), -1, rcu_state.barrier_sequence);
4022	}
4023	rcu_nocb_unlock(rdp);
4024	if (wake_nocb)
4025		wake_nocb_gp(rdp, false);
4026	smp_store_release(&rdp->barrier_seq_snap, gseq);
4027}
4028
4029/*
4030 * Called with preemption disabled, and from cross-cpu IRQ context.
4031 */
4032static void rcu_barrier_handler(void *cpu_in)
4033{
4034	uintptr_t cpu = (uintptr_t)cpu_in;
4035	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
4036
4037	lockdep_assert_irqs_disabled();
4038	WARN_ON_ONCE(cpu != rdp->cpu);
4039	WARN_ON_ONCE(cpu != smp_processor_id());
4040	raw_spin_lock(&rcu_state.barrier_lock);
4041	rcu_barrier_entrain(rdp);
4042	raw_spin_unlock(&rcu_state.barrier_lock);
4043}
4044
4045/**
4046 * rcu_barrier - Wait until all in-flight call_rcu() callbacks complete.
4047 *
4048 * Note that this primitive does not necessarily wait for an RCU grace period
4049 * to complete.  For example, if there are no RCU callbacks queued anywhere
4050 * in the system, then rcu_barrier() is within its rights to return
4051 * immediately, without waiting for anything, much less an RCU grace period.
4052 */
4053void rcu_barrier(void)
4054{
4055	uintptr_t cpu;
4056	unsigned long flags;
4057	unsigned long gseq;
4058	struct rcu_data *rdp;
4059	unsigned long s = rcu_seq_snap(&rcu_state.barrier_sequence);
 
4060
4061	rcu_barrier_trace(TPS("Begin"), -1, s);
4062
4063	/* Take mutex to serialize concurrent rcu_barrier() requests. */
4064	mutex_lock(&rcu_state.barrier_mutex);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4065
4066	/* Did someone else do our work for us? */
4067	if (rcu_seq_done(&rcu_state.barrier_sequence, s)) {
4068		rcu_barrier_trace(TPS("EarlyExit"), -1, rcu_state.barrier_sequence);
 
 
 
 
 
 
 
 
 
4069		smp_mb(); /* caller's subsequent code after above check. */
4070		mutex_unlock(&rcu_state.barrier_mutex);
4071		return;
4072	}
4073
4074	/* Mark the start of the barrier operation. */
4075	raw_spin_lock_irqsave(&rcu_state.barrier_lock, flags);
4076	rcu_seq_start(&rcu_state.barrier_sequence);
4077	gseq = rcu_state.barrier_sequence;
4078	rcu_barrier_trace(TPS("Inc1"), -1, rcu_state.barrier_sequence);
4079
4080	/*
4081	 * Initialize the count to two rather than to zero in order
4082	 * to avoid a too-soon return to zero in case of an immediate
4083	 * invocation of the just-enqueued callback (or preemption of
4084	 * this task).  Exclude CPU-hotplug operations to ensure that no
4085	 * offline non-offloaded CPU has callbacks queued.
4086	 */
4087	init_completion(&rcu_state.barrier_completion);
4088	atomic_set(&rcu_state.barrier_cpu_count, 2);
4089	raw_spin_unlock_irqrestore(&rcu_state.barrier_lock, flags);
 
 
 
4090
4091	/*
4092	 * Force each CPU with callbacks to register a new callback.
4093	 * When that callback is invoked, we will know that all of the
4094	 * corresponding CPU's preceding callbacks have been invoked.
4095	 */
4096	for_each_possible_cpu(cpu) {
4097		rdp = per_cpu_ptr(&rcu_data, cpu);
4098retry:
4099		if (smp_load_acquire(&rdp->barrier_seq_snap) == gseq)
4100			continue;
4101		raw_spin_lock_irqsave(&rcu_state.barrier_lock, flags);
4102		if (!rcu_segcblist_n_cbs(&rdp->cblist)) {
4103			WRITE_ONCE(rdp->barrier_seq_snap, gseq);
4104			raw_spin_unlock_irqrestore(&rcu_state.barrier_lock, flags);
4105			rcu_barrier_trace(TPS("NQ"), cpu, rcu_state.barrier_sequence);
4106			continue;
4107		}
4108		if (!rcu_rdp_cpu_online(rdp)) {
4109			rcu_barrier_entrain(rdp);
4110			WARN_ON_ONCE(READ_ONCE(rdp->barrier_seq_snap) != gseq);
4111			raw_spin_unlock_irqrestore(&rcu_state.barrier_lock, flags);
4112			rcu_barrier_trace(TPS("OfflineNoCBQ"), cpu, rcu_state.barrier_sequence);
4113			continue;
4114		}
4115		raw_spin_unlock_irqrestore(&rcu_state.barrier_lock, flags);
4116		if (smp_call_function_single(cpu, rcu_barrier_handler, (void *)cpu, 1)) {
4117			schedule_timeout_uninterruptible(1);
4118			goto retry;
4119		}
4120		WARN_ON_ONCE(READ_ONCE(rdp->barrier_seq_snap) != gseq);
4121		rcu_barrier_trace(TPS("OnlineQ"), cpu, rcu_state.barrier_sequence);
4122	}
 
4123
4124	/*
4125	 * Now that we have an rcu_barrier_callback() callback on each
4126	 * CPU, and thus each counted, remove the initial count.
4127	 */
4128	if (atomic_sub_and_test(2, &rcu_state.barrier_cpu_count))
4129		complete(&rcu_state.barrier_completion);
4130
4131	/* Wait for all rcu_barrier_callback() callbacks to be invoked. */
4132	wait_for_completion(&rcu_state.barrier_completion);
4133
4134	/* Mark the end of the barrier operation. */
4135	rcu_barrier_trace(TPS("Inc2"), -1, rcu_state.barrier_sequence);
4136	rcu_seq_end(&rcu_state.barrier_sequence);
4137	gseq = rcu_state.barrier_sequence;
4138	for_each_possible_cpu(cpu) {
4139		rdp = per_cpu_ptr(&rcu_data, cpu);
4140
4141		WRITE_ONCE(rdp->barrier_seq_snap, gseq);
4142	}
4143
4144	/* Other rcu_barrier() invocations can now safely proceed. */
4145	mutex_unlock(&rcu_state.barrier_mutex);
4146}
4147EXPORT_SYMBOL_GPL(rcu_barrier);
4148
4149static unsigned long rcu_barrier_last_throttle;
4150
4151/**
4152 * rcu_barrier_throttled - Do rcu_barrier(), but limit to one per second
4153 *
4154 * This can be thought of as guard rails around rcu_barrier() that
4155 * permits unrestricted userspace use, at least assuming the hardware's
4156 * try_cmpxchg() is robust.  There will be at most one call per second to
4157 * rcu_barrier() system-wide from use of this function, which means that
4158 * callers might needlessly wait a second or three.
4159 *
4160 * This is intended for use by test suites to avoid OOM by flushing RCU
4161 * callbacks from the previous test before starting the next.  See the
4162 * rcutree.do_rcu_barrier module parameter for more information.
4163 *
4164 * Why not simply make rcu_barrier() more scalable?  That might be
4165 * the eventual endpoint, but let's keep it simple for the time being.
4166 * Note that the module parameter infrastructure serializes calls to a
4167 * given .set() function, but should concurrent .set() invocation ever be
4168 * possible, we are ready!
4169 */
4170static void rcu_barrier_throttled(void)
4171{
4172	unsigned long j = jiffies;
4173	unsigned long old = READ_ONCE(rcu_barrier_last_throttle);
4174	unsigned long s = rcu_seq_snap(&rcu_state.barrier_sequence);
4175
4176	while (time_in_range(j, old, old + HZ / 16) ||
4177	       !try_cmpxchg(&rcu_barrier_last_throttle, &old, j)) {
4178		schedule_timeout_idle(HZ / 16);
4179		if (rcu_seq_done(&rcu_state.barrier_sequence, s)) {
4180			smp_mb(); /* caller's subsequent code after above check. */
4181			return;
4182		}
4183		j = jiffies;
4184		old = READ_ONCE(rcu_barrier_last_throttle);
4185	}
4186	rcu_barrier();
4187}
4188
4189/*
4190 * Invoke rcu_barrier_throttled() when a rcutree.do_rcu_barrier
4191 * request arrives.  We insist on a true value to allow for possible
4192 * future expansion.
4193 */
4194static int param_set_do_rcu_barrier(const char *val, const struct kernel_param *kp)
4195{
4196	bool b;
4197	int ret;
4198
4199	if (rcu_scheduler_active != RCU_SCHEDULER_RUNNING)
4200		return -EAGAIN;
4201	ret = kstrtobool(val, &b);
4202	if (!ret && b) {
4203		atomic_inc((atomic_t *)kp->arg);
4204		rcu_barrier_throttled();
4205		atomic_dec((atomic_t *)kp->arg);
4206	}
4207	return ret;
4208}
4209
4210/*
4211 * Output the number of outstanding rcutree.do_rcu_barrier requests.
4212 */
4213static int param_get_do_rcu_barrier(char *buffer, const struct kernel_param *kp)
4214{
4215	return sprintf(buffer, "%d\n", atomic_read((atomic_t *)kp->arg));
4216}
4217
4218static const struct kernel_param_ops do_rcu_barrier_ops = {
4219	.set = param_set_do_rcu_barrier,
4220	.get = param_get_do_rcu_barrier,
4221};
4222static atomic_t do_rcu_barrier;
4223module_param_cb(do_rcu_barrier, &do_rcu_barrier_ops, &do_rcu_barrier, 0644);
4224
4225/*
4226 * Compute the mask of online CPUs for the specified rcu_node structure.
4227 * This will not be stable unless the rcu_node structure's ->lock is
4228 * held, but the bit corresponding to the current CPU will be stable
4229 * in most contexts.
4230 */
4231static unsigned long rcu_rnp_online_cpus(struct rcu_node *rnp)
4232{
4233	return READ_ONCE(rnp->qsmaskinitnext);
4234}
4235
4236/*
4237 * Is the CPU corresponding to the specified rcu_data structure online
4238 * from RCU's perspective?  This perspective is given by that structure's
4239 * ->qsmaskinitnext field rather than by the global cpu_online_mask.
4240 */
4241static bool rcu_rdp_cpu_online(struct rcu_data *rdp)
4242{
4243	return !!(rdp->grpmask & rcu_rnp_online_cpus(rdp->mynode));
4244}
4245
4246bool rcu_cpu_online(int cpu)
4247{
4248	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
4249
4250	return rcu_rdp_cpu_online(rdp);
4251}
4252
4253#if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU)
4254
4255/*
4256 * Is the current CPU online as far as RCU is concerned?
4257 *
4258 * Disable preemption to avoid false positives that could otherwise
4259 * happen due to the current CPU number being sampled, this task being
4260 * preempted, its old CPU being taken offline, resuming on some other CPU,
4261 * then determining that its old CPU is now offline.
4262 *
4263 * Disable checking if in an NMI handler because we cannot safely
4264 * report errors from NMI handlers anyway.  In addition, it is OK to use
4265 * RCU on an offline processor during initial boot, hence the check for
4266 * rcu_scheduler_fully_active.
4267 */
4268bool rcu_lockdep_current_cpu_online(void)
4269{
4270	struct rcu_data *rdp;
4271	bool ret = false;
4272
4273	if (in_nmi() || !rcu_scheduler_fully_active)
4274		return true;
4275	preempt_disable_notrace();
4276	rdp = this_cpu_ptr(&rcu_data);
4277	/*
4278	 * Strictly, we care here about the case where the current CPU is
4279	 * in rcutree_report_cpu_starting() and thus has an excuse for rdp->grpmask
4280	 * not being up to date. So arch_spin_is_locked() might have a
4281	 * false positive if it's held by some *other* CPU, but that's
4282	 * OK because that just means a false *negative* on the warning.
4283	 */
4284	if (rcu_rdp_cpu_online(rdp) || arch_spin_is_locked(&rcu_state.ofl_lock))
4285		ret = true;
4286	preempt_enable_notrace();
4287	return ret;
4288}
4289EXPORT_SYMBOL_GPL(rcu_lockdep_current_cpu_online);
4290
4291#endif /* #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU) */
4292
4293// Has rcu_init() been invoked?  This is used (for example) to determine
4294// whether spinlocks may be acquired safely.
4295static bool rcu_init_invoked(void)
4296{
4297	return !!rcu_state.n_online_cpus;
4298}
4299
4300/*
4301 * All CPUs for the specified rcu_node structure have gone offline,
4302 * and all tasks that were preempted within an RCU read-side critical
4303 * section while running on one of those CPUs have since exited their RCU
4304 * read-side critical section.  Some other CPU is reporting this fact with
4305 * the specified rcu_node structure's ->lock held and interrupts disabled.
4306 * This function therefore goes up the tree of rcu_node structures,
4307 * clearing the corresponding bits in the ->qsmaskinit fields.  Note that
4308 * the leaf rcu_node structure's ->qsmaskinit field has already been
4309 * updated.
4310 *
4311 * This function does check that the specified rcu_node structure has
4312 * all CPUs offline and no blocked tasks, so it is OK to invoke it
4313 * prematurely.  That said, invoking it after the fact will cost you
4314 * a needless lock acquisition.  So once it has done its work, don't
4315 * invoke it again.
4316 */
4317static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf)
4318{
4319	long mask;
4320	struct rcu_node *rnp = rnp_leaf;
4321
4322	raw_lockdep_assert_held_rcu_node(rnp_leaf);
4323	if (!IS_ENABLED(CONFIG_HOTPLUG_CPU) ||
4324	    WARN_ON_ONCE(rnp_leaf->qsmaskinit) ||
4325	    WARN_ON_ONCE(rcu_preempt_has_tasks(rnp_leaf)))
4326		return;
4327	for (;;) {
4328		mask = rnp->grpmask;
4329		rnp = rnp->parent;
4330		if (!rnp)
4331			break;
4332		raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
4333		rnp->qsmaskinit &= ~mask;
4334		/* Between grace periods, so better already be zero! */
4335		WARN_ON_ONCE(rnp->qsmask);
4336		if (rnp->qsmaskinit) {
4337			raw_spin_unlock_rcu_node(rnp);
4338			/* irqs remain disabled. */
4339			return;
4340		}
4341		raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
4342	}
4343}
4344
4345/*
4346 * Propagate ->qsinitmask bits up the rcu_node tree to account for the
4347 * first CPU in a given leaf rcu_node structure coming online.  The caller
4348 * must hold the corresponding leaf rcu_node ->lock with interrupts
4349 * disabled.
4350 */
4351static void rcu_init_new_rnp(struct rcu_node *rnp_leaf)
4352{
4353	long mask;
4354	long oldmask;
4355	struct rcu_node *rnp = rnp_leaf;
4356
4357	raw_lockdep_assert_held_rcu_node(rnp_leaf);
4358	WARN_ON_ONCE(rnp->wait_blkd_tasks);
4359	for (;;) {
4360		mask = rnp->grpmask;
4361		rnp = rnp->parent;
4362		if (rnp == NULL)
4363			return;
4364		raw_spin_lock_rcu_node(rnp); /* Interrupts already disabled. */
4365		oldmask = rnp->qsmaskinit;
4366		rnp->qsmaskinit |= mask;
4367		raw_spin_unlock_rcu_node(rnp); /* Interrupts remain disabled. */
4368		if (oldmask)
4369			return;
4370	}
4371}
 
4372
4373/*
4374 * Do boot-time initialization of a CPU's per-CPU RCU data.
4375 */
4376static void __init
4377rcu_boot_init_percpu_data(int cpu)
4378{
4379	struct context_tracking *ct = this_cpu_ptr(&context_tracking);
4380	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
 
4381
4382	/* Set up local state, ensuring consistent view of global state. */
4383	rdp->grpmask = leaf_node_cpu_bit(rdp->mynode, cpu);
4384	INIT_WORK(&rdp->strict_work, strict_work_handler);
4385	WARN_ON_ONCE(ct->dynticks_nesting != 1);
4386	WARN_ON_ONCE(rcu_dynticks_in_eqs(rcu_dynticks_snap(cpu)));
4387	rdp->barrier_seq_snap = rcu_state.barrier_sequence;
4388	rdp->rcu_ofl_gp_seq = rcu_state.gp_seq;
4389	rdp->rcu_ofl_gp_flags = RCU_GP_CLEANED;
4390	rdp->rcu_onl_gp_seq = rcu_state.gp_seq;
4391	rdp->rcu_onl_gp_flags = RCU_GP_CLEANED;
4392	rdp->last_sched_clock = jiffies;
4393	rdp->cpu = cpu;
 
4394	rcu_boot_init_nocb_percpu_data(rdp);
 
4395}
4396
4397/*
4398 * Invoked early in the CPU-online process, when pretty much all services
4399 * are available.  The incoming CPU is not present.
4400 *
4401 * Initializes a CPU's per-CPU RCU data.  Note that only one online or
4402 * offline event can be happening at a given time.  Note also that we can
4403 * accept some slop in the rsp->gp_seq access due to the fact that this
4404 * CPU cannot possibly have any non-offloaded RCU callbacks in flight yet.
4405 * And any offloaded callbacks are being numbered elsewhere.
4406 */
4407int rcutree_prepare_cpu(unsigned int cpu)
 
4408{
4409	unsigned long flags;
4410	struct context_tracking *ct = per_cpu_ptr(&context_tracking, cpu);
4411	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
4412	struct rcu_node *rnp = rcu_get_root();
 
 
 
4413
4414	/* Set up local state, ensuring consistent view of global state. */
4415	raw_spin_lock_irqsave_rcu_node(rnp, flags);
 
 
4416	rdp->qlen_last_fqs_check = 0;
4417	rdp->n_force_qs_snap = READ_ONCE(rcu_state.n_force_qs);
4418	rdp->blimit = blimit;
4419	ct->dynticks_nesting = 1;	/* CPU not up, no tearing. */
4420	raw_spin_unlock_rcu_node(rnp);		/* irqs remain disabled. */
4421
4422	/*
4423	 * Only non-NOCB CPUs that didn't have early-boot callbacks need to be
4424	 * (re-)initialized.
4425	 */
4426	if (!rcu_segcblist_is_enabled(&rdp->cblist))
4427		rcu_segcblist_init(&rdp->cblist);  /* Re-enable callbacks. */
4428
4429	/*
4430	 * Add CPU to leaf rcu_node pending-online bitmask.  Any needed
4431	 * propagation up the rcu_node tree will happen at the beginning
4432	 * of the next grace period.
4433	 */
4434	rnp = rdp->mynode;
4435	raw_spin_lock_rcu_node(rnp);		/* irqs already disabled. */
4436	rdp->gp_seq = READ_ONCE(rnp->gp_seq);
4437	rdp->gp_seq_needed = rdp->gp_seq;
4438	rdp->cpu_no_qs.b.norm = true;
4439	rdp->core_needs_qs = false;
4440	rdp->rcu_iw_pending = false;
4441	rdp->rcu_iw = IRQ_WORK_INIT_HARD(rcu_iw_handler);
4442	rdp->rcu_iw_gp_seq = rdp->gp_seq - 1;
4443	trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("cpuonl"));
4444	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
4445	rcu_spawn_one_boost_kthread(rnp);
4446	rcu_spawn_cpu_nocb_kthread(cpu);
4447	WRITE_ONCE(rcu_state.n_online_cpus, rcu_state.n_online_cpus + 1);
4448
4449	return 0;
4450}
4451
4452/*
4453 * Update RCU priority boot kthread affinity for CPU-hotplug changes.
4454 */
4455static void rcutree_affinity_setting(unsigned int cpu, int outgoing)
4456{
4457	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
4458
4459	rcu_boost_kthread_setaffinity(rdp->mynode, outgoing);
4460}
4461
4462/*
4463 * Has the specified (known valid) CPU ever been fully online?
4464 */
4465bool rcu_cpu_beenfullyonline(int cpu)
4466{
4467	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
4468
4469	return smp_load_acquire(&rdp->beenonline);
4470}
4471
4472/*
4473 * Near the end of the CPU-online process.  Pretty much all services
4474 * enabled, and the CPU is now very much alive.
4475 */
4476int rcutree_online_cpu(unsigned int cpu)
4477{
4478	unsigned long flags;
4479	struct rcu_data *rdp;
4480	struct rcu_node *rnp;
4481
4482	rdp = per_cpu_ptr(&rcu_data, cpu);
4483	rnp = rdp->mynode;
4484	raw_spin_lock_irqsave_rcu_node(rnp, flags);
4485	rnp->ffmask |= rdp->grpmask;
4486	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
4487	if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE)
4488		return 0; /* Too early in boot for scheduler work. */
4489	sync_sched_exp_online_cleanup(cpu);
4490	rcutree_affinity_setting(cpu, -1);
4491
4492	// Stop-machine done, so allow nohz_full to disable tick.
4493	tick_dep_clear(TICK_DEP_BIT_RCU);
4494	return 0;
4495}
4496
4497/*
4498 * Mark the specified CPU as being online so that subsequent grace periods
4499 * (both expedited and normal) will wait on it.  Note that this means that
4500 * incoming CPUs are not allowed to use RCU read-side critical sections
4501 * until this function is called.  Failing to observe this restriction
4502 * will result in lockdep splats.
4503 *
4504 * Note that this function is special in that it is invoked directly
4505 * from the incoming CPU rather than from the cpuhp_step mechanism.
4506 * This is because this function must be invoked at a precise location.
4507 * This incoming CPU must not have enabled interrupts yet.
4508 *
4509 * This mirrors the effects of rcutree_report_cpu_dead().
4510 */
4511void rcutree_report_cpu_starting(unsigned int cpu)
4512{
4513	unsigned long mask;
4514	struct rcu_data *rdp;
4515	struct rcu_node *rnp;
4516	bool newcpu;
4517
4518	lockdep_assert_irqs_disabled();
4519	rdp = per_cpu_ptr(&rcu_data, cpu);
4520	if (rdp->cpu_started)
4521		return;
4522	rdp->cpu_started = true;
4523
 
4524	rnp = rdp->mynode;
4525	mask = rdp->grpmask;
4526	arch_spin_lock(&rcu_state.ofl_lock);
4527	rcu_dynticks_eqs_online();
4528	raw_spin_lock(&rcu_state.barrier_lock);
4529	raw_spin_lock_rcu_node(rnp);
4530	WRITE_ONCE(rnp->qsmaskinitnext, rnp->qsmaskinitnext | mask);
4531	raw_spin_unlock(&rcu_state.barrier_lock);
4532	newcpu = !(rnp->expmaskinitnext & mask);
4533	rnp->expmaskinitnext |= mask;
4534	/* Allow lockless access for expedited grace periods. */
4535	smp_store_release(&rcu_state.ncpus, rcu_state.ncpus + newcpu); /* ^^^ */
4536	ASSERT_EXCLUSIVE_WRITER(rcu_state.ncpus);
4537	rcu_gpnum_ovf(rnp, rdp); /* Offline-induced counter wrap? */
4538	rdp->rcu_onl_gp_seq = READ_ONCE(rcu_state.gp_seq);
4539	rdp->rcu_onl_gp_flags = READ_ONCE(rcu_state.gp_flags);
4540
4541	/* An incoming CPU should never be blocking a grace period. */
4542	if (WARN_ON_ONCE(rnp->qsmask & mask)) { /* RCU waiting on incoming CPU? */
4543		/* rcu_report_qs_rnp() *really* wants some flags to restore */
4544		unsigned long flags;
4545
4546		local_irq_save(flags);
4547		rcu_disable_urgency_upon_qs(rdp);
4548		/* Report QS -after- changing ->qsmaskinitnext! */
4549		rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
4550	} else {
4551		raw_spin_unlock_rcu_node(rnp);
4552	}
4553	arch_spin_unlock(&rcu_state.ofl_lock);
4554	smp_store_release(&rdp->beenonline, true);
4555	smp_mb(); /* Ensure RCU read-side usage follows above initialization. */
4556}
4557
4558/*
4559 * The outgoing function has no further need of RCU, so remove it from
4560 * the rcu_node tree's ->qsmaskinitnext bit masks.
4561 *
4562 * Note that this function is special in that it is invoked directly
4563 * from the outgoing CPU rather than from the cpuhp_step mechanism.
4564 * This is because this function must be invoked at a precise location.
4565 *
4566 * This mirrors the effect of rcutree_report_cpu_starting().
4567 */
4568void rcutree_report_cpu_dead(void)
4569{
4570	unsigned long flags;
4571	unsigned long mask;
4572	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
4573	struct rcu_node *rnp = rdp->mynode;  /* Outgoing CPU's rdp & rnp. */
4574
4575	/*
4576	 * IRQS must be disabled from now on and until the CPU dies, or an interrupt
4577	 * may introduce a new READ-side while it is actually off the QS masks.
4578	 */
4579	lockdep_assert_irqs_disabled();
4580	// Do any dangling deferred wakeups.
4581	do_nocb_deferred_wakeup(rdp);
4582
4583	rcu_preempt_deferred_qs(current);
4584
4585	/* Remove outgoing CPU from mask in the leaf rcu_node structure. */
4586	mask = rdp->grpmask;
4587	arch_spin_lock(&rcu_state.ofl_lock);
4588	raw_spin_lock_irqsave_rcu_node(rnp, flags); /* Enforce GP memory-order guarantee. */
4589	rdp->rcu_ofl_gp_seq = READ_ONCE(rcu_state.gp_seq);
4590	rdp->rcu_ofl_gp_flags = READ_ONCE(rcu_state.gp_flags);
4591	if (rnp->qsmask & mask) { /* RCU waiting on outgoing CPU? */
4592		/* Report quiescent state -before- changing ->qsmaskinitnext! */
4593		rcu_disable_urgency_upon_qs(rdp);
4594		rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
4595		raw_spin_lock_irqsave_rcu_node(rnp, flags);
4596	}
4597	WRITE_ONCE(rnp->qsmaskinitnext, rnp->qsmaskinitnext & ~mask);
4598	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
4599	arch_spin_unlock(&rcu_state.ofl_lock);
4600	rdp->cpu_started = false;
4601}
4602
4603#ifdef CONFIG_HOTPLUG_CPU
4604/*
4605 * The outgoing CPU has just passed through the dying-idle state, and we
4606 * are being invoked from the CPU that was IPIed to continue the offline
4607 * operation.  Migrate the outgoing CPU's callbacks to the current CPU.
4608 */
4609void rcutree_migrate_callbacks(int cpu)
4610{
4611	unsigned long flags;
4612	struct rcu_data *my_rdp;
4613	struct rcu_node *my_rnp;
4614	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
4615	bool needwake;
4616
4617	if (rcu_rdp_is_offloaded(rdp) ||
4618	    rcu_segcblist_empty(&rdp->cblist))
4619		return;  /* No callbacks to migrate. */
4620
4621	raw_spin_lock_irqsave(&rcu_state.barrier_lock, flags);
4622	WARN_ON_ONCE(rcu_rdp_cpu_online(rdp));
4623	rcu_barrier_entrain(rdp);
4624	my_rdp = this_cpu_ptr(&rcu_data);
4625	my_rnp = my_rdp->mynode;
4626	rcu_nocb_lock(my_rdp); /* irqs already disabled. */
4627	WARN_ON_ONCE(!rcu_nocb_flush_bypass(my_rdp, NULL, jiffies, false));
4628	raw_spin_lock_rcu_node(my_rnp); /* irqs already disabled. */
4629	/* Leverage recent GPs and set GP for new callbacks. */
4630	needwake = rcu_advance_cbs(my_rnp, rdp) ||
4631		   rcu_advance_cbs(my_rnp, my_rdp);
4632	rcu_segcblist_merge(&my_rdp->cblist, &rdp->cblist);
4633	raw_spin_unlock(&rcu_state.barrier_lock); /* irqs remain disabled. */
4634	needwake = needwake || rcu_advance_cbs(my_rnp, my_rdp);
4635	rcu_segcblist_disable(&rdp->cblist);
4636	WARN_ON_ONCE(rcu_segcblist_empty(&my_rdp->cblist) != !rcu_segcblist_n_cbs(&my_rdp->cblist));
4637	check_cb_ovld_locked(my_rdp, my_rnp);
4638	if (rcu_rdp_is_offloaded(my_rdp)) {
4639		raw_spin_unlock_rcu_node(my_rnp); /* irqs remain disabled. */
4640		__call_rcu_nocb_wake(my_rdp, true, flags);
4641	} else {
4642		rcu_nocb_unlock(my_rdp); /* irqs remain disabled. */
4643		raw_spin_unlock_irqrestore_rcu_node(my_rnp, flags);
4644	}
4645	if (needwake)
4646		rcu_gp_kthread_wake();
4647	lockdep_assert_irqs_enabled();
4648	WARN_ONCE(rcu_segcblist_n_cbs(&rdp->cblist) != 0 ||
4649		  !rcu_segcblist_empty(&rdp->cblist),
4650		  "rcu_cleanup_dead_cpu: Callbacks on offline CPU %d: qlen=%lu, 1stCB=%p\n",
4651		  cpu, rcu_segcblist_n_cbs(&rdp->cblist),
4652		  rcu_segcblist_first_cb(&rdp->cblist));
4653}
4654
4655/*
4656 * The CPU has been completely removed, and some other CPU is reporting
4657 * this fact from process context.  Do the remainder of the cleanup.
4658 * There can only be one CPU hotplug operation at a time, so no need for
4659 * explicit locking.
4660 */
4661int rcutree_dead_cpu(unsigned int cpu)
4662{
4663	WRITE_ONCE(rcu_state.n_online_cpus, rcu_state.n_online_cpus - 1);
4664	// Stop-machine done, so allow nohz_full to disable tick.
4665	tick_dep_clear(TICK_DEP_BIT_RCU);
4666	return 0;
4667}
4668
4669/*
4670 * Near the end of the offline process.  Trace the fact that this CPU
4671 * is going offline.
4672 */
4673int rcutree_dying_cpu(unsigned int cpu)
 
4674{
4675	bool blkd;
4676	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
4677	struct rcu_node *rnp = rdp->mynode;
 
4678
4679	blkd = !!(READ_ONCE(rnp->qsmask) & rdp->grpmask);
4680	trace_rcu_grace_period(rcu_state.name, READ_ONCE(rnp->gp_seq),
4681			       blkd ? TPS("cpuofl-bgp") : TPS("cpuofl"));
4682	return 0;
4683}
4684
4685/*
4686 * Near the beginning of the process.  The CPU is still very much alive
4687 * with pretty much all services enabled.
4688 */
4689int rcutree_offline_cpu(unsigned int cpu)
4690{
4691	unsigned long flags;
4692	struct rcu_data *rdp;
4693	struct rcu_node *rnp;
4694
4695	rdp = per_cpu_ptr(&rcu_data, cpu);
4696	rnp = rdp->mynode;
4697	raw_spin_lock_irqsave_rcu_node(rnp, flags);
4698	rnp->ffmask &= ~rdp->grpmask;
4699	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
4700
4701	rcutree_affinity_setting(cpu, cpu);
4702
4703	// nohz_full CPUs need the tick for stop-machine to work quickly
4704	tick_dep_set(TICK_DEP_BIT_RCU);
4705	return 0;
 
 
 
 
4706}
4707#endif /* #ifdef CONFIG_HOTPLUG_CPU */
4708
4709/*
4710 * On non-huge systems, use expedited RCU grace periods to make suspend
4711 * and hibernation run faster.
4712 */
4713static int rcu_pm_notify(struct notifier_block *self,
4714			 unsigned long action, void *hcpu)
4715{
4716	switch (action) {
4717	case PM_HIBERNATION_PREPARE:
4718	case PM_SUSPEND_PREPARE:
4719		rcu_async_hurry();
4720		rcu_expedite_gp();
4721		break;
4722	case PM_POST_HIBERNATION:
4723	case PM_POST_SUSPEND:
4724		rcu_unexpedite_gp();
4725		rcu_async_relax();
4726		break;
4727	default:
4728		break;
4729	}
4730	return NOTIFY_OK;
4731}
4732
4733#ifdef CONFIG_RCU_EXP_KTHREAD
4734struct kthread_worker *rcu_exp_gp_kworker;
4735struct kthread_worker *rcu_exp_par_gp_kworker;
4736
4737static void __init rcu_start_exp_gp_kworkers(void)
4738{
4739	const char *par_gp_kworker_name = "rcu_exp_par_gp_kthread_worker";
4740	const char *gp_kworker_name = "rcu_exp_gp_kthread_worker";
4741	struct sched_param param = { .sched_priority = kthread_prio };
4742
4743	rcu_exp_gp_kworker = kthread_create_worker(0, gp_kworker_name);
4744	if (IS_ERR_OR_NULL(rcu_exp_gp_kworker)) {
4745		pr_err("Failed to create %s!\n", gp_kworker_name);
4746		return;
4747	}
4748
4749	rcu_exp_par_gp_kworker = kthread_create_worker(0, par_gp_kworker_name);
4750	if (IS_ERR_OR_NULL(rcu_exp_par_gp_kworker)) {
4751		pr_err("Failed to create %s!\n", par_gp_kworker_name);
4752		kthread_destroy_worker(rcu_exp_gp_kworker);
4753		return;
4754	}
4755
4756	sched_setscheduler_nocheck(rcu_exp_gp_kworker->task, SCHED_FIFO, &param);
4757	sched_setscheduler_nocheck(rcu_exp_par_gp_kworker->task, SCHED_FIFO,
4758				   &param);
4759}
4760
4761static inline void rcu_alloc_par_gp_wq(void)
4762{
4763}
4764#else /* !CONFIG_RCU_EXP_KTHREAD */
4765struct workqueue_struct *rcu_par_gp_wq;
4766
4767static void __init rcu_start_exp_gp_kworkers(void)
4768{
4769}
4770
4771static inline void rcu_alloc_par_gp_wq(void)
4772{
4773	rcu_par_gp_wq = alloc_workqueue("rcu_par_gp", WQ_MEM_RECLAIM, 0);
4774	WARN_ON(!rcu_par_gp_wq);
4775}
4776#endif /* CONFIG_RCU_EXP_KTHREAD */
4777
4778/*
4779 * Spawn the kthreads that handle RCU's grace periods.
4780 */
4781static int __init rcu_spawn_gp_kthread(void)
4782{
4783	unsigned long flags;
4784	struct rcu_node *rnp;
4785	struct sched_param sp;
4786	struct task_struct *t;
4787	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
4788
4789	rcu_scheduler_fully_active = 1;
4790	t = kthread_create(rcu_gp_kthread, NULL, "%s", rcu_state.name);
4791	if (WARN_ONCE(IS_ERR(t), "%s: Could not start grace-period kthread, OOM is now expected behavior\n", __func__))
4792		return 0;
4793	if (kthread_prio) {
4794		sp.sched_priority = kthread_prio;
4795		sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
4796	}
4797	rnp = rcu_get_root();
4798	raw_spin_lock_irqsave_rcu_node(rnp, flags);
4799	WRITE_ONCE(rcu_state.gp_activity, jiffies);
4800	WRITE_ONCE(rcu_state.gp_req_activity, jiffies);
4801	// Reset .gp_activity and .gp_req_activity before setting .gp_kthread.
4802	smp_store_release(&rcu_state.gp_kthread, t);  /* ^^^ */
4803	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
4804	wake_up_process(t);
4805	/* This is a pre-SMP initcall, we expect a single CPU */
4806	WARN_ON(num_online_cpus() > 1);
4807	/*
4808	 * Those kthreads couldn't be created on rcu_init() -> rcutree_prepare_cpu()
4809	 * due to rcu_scheduler_fully_active.
4810	 */
4811	rcu_spawn_cpu_nocb_kthread(smp_processor_id());
4812	rcu_spawn_one_boost_kthread(rdp->mynode);
4813	rcu_spawn_core_kthreads();
4814	/* Create kthread worker for expedited GPs */
4815	rcu_start_exp_gp_kworkers();
4816	return 0;
4817}
4818early_initcall(rcu_spawn_gp_kthread);
4819
4820/*
4821 * This function is invoked towards the end of the scheduler's
4822 * initialization process.  Before this is called, the idle task might
4823 * contain synchronous grace-period primitives (during which time, this idle
4824 * task is booting the system, and such primitives are no-ops).  After this
4825 * function is called, any synchronous grace-period primitives are run as
4826 * expedited, with the requesting task driving the grace period forward.
4827 * A later core_initcall() rcu_set_runtime_mode() will switch to full
4828 * runtime RCU functionality.
4829 */
4830void rcu_scheduler_starting(void)
4831{
4832	unsigned long flags;
4833	struct rcu_node *rnp;
4834
4835	WARN_ON(num_online_cpus() != 1);
4836	WARN_ON(nr_context_switches() > 0);
4837	rcu_test_sync_prims();
 
4838
4839	// Fix up the ->gp_seq counters.
4840	local_irq_save(flags);
4841	rcu_for_each_node_breadth_first(rnp)
4842		rnp->gp_seq_needed = rnp->gp_seq = rcu_state.gp_seq;
4843	local_irq_restore(flags);
 
 
 
4844
4845	// Switch out of early boot mode.
4846	rcu_scheduler_active = RCU_SCHEDULER_INIT;
4847	rcu_test_sync_prims();
4848}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4849
4850/*
4851 * Helper function for rcu_init() that initializes the rcu_state structure.
4852 */
4853static void __init rcu_init_one(void)
 
4854{
4855	static const char * const buf[] = RCU_NODE_NAME_INIT;
4856	static const char * const fqs[] = RCU_FQS_NAME_INIT;
4857	static struct lock_class_key rcu_node_class[RCU_NUM_LVLS];
4858	static struct lock_class_key rcu_fqs_class[RCU_NUM_LVLS];
4859
4860	int levelspread[RCU_NUM_LVLS];		/* kids/node in each level. */
 
 
4861	int cpustride = 1;
4862	int i;
4863	int j;
4864	struct rcu_node *rnp;
4865
4866	BUILD_BUG_ON(RCU_NUM_LVLS > ARRAY_SIZE(buf));  /* Fix buf[] init! */
4867
4868	/* Silence gcc 4.8 false positive about array index out of range. */
4869	if (rcu_num_lvls <= 0 || rcu_num_lvls > RCU_NUM_LVLS)
4870		panic("rcu_init_one: rcu_num_lvls out of range");
4871
4872	/* Initialize the level-tracking arrays. */
4873
 
 
4874	for (i = 1; i < rcu_num_lvls; i++)
4875		rcu_state.level[i] =
4876			rcu_state.level[i - 1] + num_rcu_lvl[i - 1];
4877	rcu_init_levelspread(levelspread, num_rcu_lvl);
4878
4879	/* Initialize the elements themselves, starting from the leaves. */
4880
4881	for (i = rcu_num_lvls - 1; i >= 0; i--) {
4882		cpustride *= levelspread[i];
4883		rnp = rcu_state.level[i];
4884		for (j = 0; j < num_rcu_lvl[i]; j++, rnp++) {
4885			raw_spin_lock_init(&ACCESS_PRIVATE(rnp, lock));
4886			lockdep_set_class_and_name(&ACCESS_PRIVATE(rnp, lock),
4887						   &rcu_node_class[i], buf[i]);
4888			raw_spin_lock_init(&rnp->fqslock);
4889			lockdep_set_class_and_name(&rnp->fqslock,
4890						   &rcu_fqs_class[i], fqs[i]);
4891			rnp->gp_seq = rcu_state.gp_seq;
4892			rnp->gp_seq_needed = rcu_state.gp_seq;
4893			rnp->completedqs = rcu_state.gp_seq;
4894			rnp->qsmask = 0;
4895			rnp->qsmaskinit = 0;
4896			rnp->grplo = j * cpustride;
4897			rnp->grphi = (j + 1) * cpustride - 1;
4898			if (rnp->grphi >= nr_cpu_ids)
4899				rnp->grphi = nr_cpu_ids - 1;
4900			if (i == 0) {
4901				rnp->grpnum = 0;
4902				rnp->grpmask = 0;
4903				rnp->parent = NULL;
4904			} else {
4905				rnp->grpnum = j % levelspread[i - 1];
4906				rnp->grpmask = BIT(rnp->grpnum);
4907				rnp->parent = rcu_state.level[i - 1] +
4908					      j / levelspread[i - 1];
4909			}
4910			rnp->level = i;
4911			INIT_LIST_HEAD(&rnp->blkd_tasks);
4912			rcu_init_one_nocb(rnp);
4913			init_waitqueue_head(&rnp->exp_wq[0]);
4914			init_waitqueue_head(&rnp->exp_wq[1]);
4915			init_waitqueue_head(&rnp->exp_wq[2]);
4916			init_waitqueue_head(&rnp->exp_wq[3]);
4917			spin_lock_init(&rnp->exp_lock);
4918			mutex_init(&rnp->boost_kthread_mutex);
4919			raw_spin_lock_init(&rnp->exp_poll_lock);
4920			rnp->exp_seq_poll_rq = RCU_GET_STATE_COMPLETED;
4921			INIT_WORK(&rnp->exp_poll_wq, sync_rcu_do_polled_gp);
4922		}
4923	}
4924
4925	init_swait_queue_head(&rcu_state.gp_wq);
4926	init_swait_queue_head(&rcu_state.expedited_wq);
4927	rnp = rcu_first_leaf_node();
 
4928	for_each_possible_cpu(i) {
4929		while (i > rnp->grphi)
4930			rnp++;
4931		per_cpu_ptr(&rcu_data, i)->mynode = rnp;
4932		rcu_boot_init_percpu_data(i);
4933	}
4934}
4935
4936/*
4937 * Force priority from the kernel command-line into range.
4938 */
4939static void __init sanitize_kthread_prio(void)
4940{
4941	int kthread_prio_in = kthread_prio;
4942
4943	if (IS_ENABLED(CONFIG_RCU_BOOST) && kthread_prio < 2
4944	    && IS_BUILTIN(CONFIG_RCU_TORTURE_TEST))
4945		kthread_prio = 2;
4946	else if (IS_ENABLED(CONFIG_RCU_BOOST) && kthread_prio < 1)
4947		kthread_prio = 1;
4948	else if (kthread_prio < 0)
4949		kthread_prio = 0;
4950	else if (kthread_prio > 99)
4951		kthread_prio = 99;
4952
4953	if (kthread_prio != kthread_prio_in)
4954		pr_alert("%s: Limited prio to %d from %d\n",
4955			 __func__, kthread_prio, kthread_prio_in);
4956}
4957
4958/*
4959 * Compute the rcu_node tree geometry from kernel parameters.  This cannot
4960 * replace the definitions in tree.h because those are needed to size
4961 * the ->node array in the rcu_state structure.
4962 */
4963void rcu_init_geometry(void)
4964{
4965	ulong d;
4966	int i;
4967	static unsigned long old_nr_cpu_ids;
4968	int rcu_capacity[RCU_NUM_LVLS];
4969	static bool initialized;
4970
4971	if (initialized) {
4972		/*
4973		 * Warn if setup_nr_cpu_ids() had not yet been invoked,
4974		 * unless nr_cpus_ids == NR_CPUS, in which case who cares?
4975		 */
4976		WARN_ON_ONCE(old_nr_cpu_ids != nr_cpu_ids);
4977		return;
4978	}
4979
4980	old_nr_cpu_ids = nr_cpu_ids;
4981	initialized = true;
4982
4983	/*
4984	 * Initialize any unspecified boot parameters.
4985	 * The default values of jiffies_till_first_fqs and
4986	 * jiffies_till_next_fqs are set to the RCU_JIFFIES_TILL_FORCE_QS
4987	 * value, which is a function of HZ, then adding one for each
4988	 * RCU_JIFFIES_FQS_DIV CPUs that might be on the system.
4989	 */
4990	d = RCU_JIFFIES_TILL_FORCE_QS + nr_cpu_ids / RCU_JIFFIES_FQS_DIV;
4991	if (jiffies_till_first_fqs == ULONG_MAX)
4992		jiffies_till_first_fqs = d;
4993	if (jiffies_till_next_fqs == ULONG_MAX)
4994		jiffies_till_next_fqs = d;
4995	adjust_jiffies_till_sched_qs();
4996
4997	/* If the compile-time values are accurate, just leave. */
4998	if (rcu_fanout_leaf == RCU_FANOUT_LEAF &&
4999	    nr_cpu_ids == NR_CPUS)
5000		return;
5001	pr_info("Adjusting geometry for rcu_fanout_leaf=%d, nr_cpu_ids=%u\n",
5002		rcu_fanout_leaf, nr_cpu_ids);
5003
5004	/*
5005	 * The boot-time rcu_fanout_leaf parameter must be at least two
5006	 * and cannot exceed the number of bits in the rcu_node masks.
5007	 * Complain and fall back to the compile-time values if this
5008	 * limit is exceeded.
5009	 */
5010	if (rcu_fanout_leaf < 2 ||
5011	    rcu_fanout_leaf > sizeof(unsigned long) * 8) {
5012		rcu_fanout_leaf = RCU_FANOUT_LEAF;
5013		WARN_ON(1);
5014		return;
5015	}
5016
5017	/*
5018	 * Compute number of nodes that can be handled an rcu_node tree
5019	 * with the given number of levels.
 
5020	 */
5021	rcu_capacity[0] = rcu_fanout_leaf;
5022	for (i = 1; i < RCU_NUM_LVLS; i++)
5023		rcu_capacity[i] = rcu_capacity[i - 1] * RCU_FANOUT;
5024
5025	/*
5026	 * The tree must be able to accommodate the configured number of CPUs.
5027	 * If this limit is exceeded, fall back to the compile-time values.
5028	 */
5029	if (nr_cpu_ids > rcu_capacity[RCU_NUM_LVLS - 1]) {
5030		rcu_fanout_leaf = RCU_FANOUT_LEAF;
 
 
 
 
 
 
5031		WARN_ON(1);
5032		return;
5033	}
5034
5035	/* Calculate the number of levels in the tree. */
5036	for (i = 0; nr_cpu_ids > rcu_capacity[i]; i++) {
5037	}
5038	rcu_num_lvls = i + 1;
5039
5040	/* Calculate the number of rcu_nodes at each level of the tree. */
5041	for (i = 0; i < rcu_num_lvls; i++) {
5042		int cap = rcu_capacity[(rcu_num_lvls - 1) - i];
5043		num_rcu_lvl[i] = DIV_ROUND_UP(nr_cpu_ids, cap);
5044	}
 
 
 
 
 
 
5045
5046	/* Calculate the total number of rcu_node structures. */
5047	rcu_num_nodes = 0;
5048	for (i = 0; i < rcu_num_lvls; i++)
5049		rcu_num_nodes += num_rcu_lvl[i];
5050}
5051
5052/*
5053 * Dump out the structure of the rcu_node combining tree associated
5054 * with the rcu_state structure.
5055 */
5056static void __init rcu_dump_rcu_node_tree(void)
5057{
5058	int level = 0;
5059	struct rcu_node *rnp;
5060
5061	pr_info("rcu_node tree layout dump\n");
5062	pr_info(" ");
5063	rcu_for_each_node_breadth_first(rnp) {
5064		if (rnp->level != level) {
5065			pr_cont("\n");
5066			pr_info(" ");
5067			level = rnp->level;
5068		}
5069		pr_cont("%d:%d ^%d  ", rnp->grplo, rnp->grphi, rnp->grpnum);
5070	}
5071	pr_cont("\n");
5072}
5073
5074struct workqueue_struct *rcu_gp_wq;
5075
5076static void __init kfree_rcu_batch_init(void)
5077{
5078	int cpu;
5079	int i, j;
5080	struct shrinker *kfree_rcu_shrinker;
5081
5082	/* Clamp it to [0:100] seconds interval. */
5083	if (rcu_delay_page_cache_fill_msec < 0 ||
5084		rcu_delay_page_cache_fill_msec > 100 * MSEC_PER_SEC) {
5085
5086		rcu_delay_page_cache_fill_msec =
5087			clamp(rcu_delay_page_cache_fill_msec, 0,
5088				(int) (100 * MSEC_PER_SEC));
5089
5090		pr_info("Adjusting rcutree.rcu_delay_page_cache_fill_msec to %d ms.\n",
5091			rcu_delay_page_cache_fill_msec);
5092	}
5093
5094	for_each_possible_cpu(cpu) {
5095		struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);
5096
5097		for (i = 0; i < KFREE_N_BATCHES; i++) {
5098			INIT_RCU_WORK(&krcp->krw_arr[i].rcu_work, kfree_rcu_work);
5099			krcp->krw_arr[i].krcp = krcp;
5100
5101			for (j = 0; j < FREE_N_CHANNELS; j++)
5102				INIT_LIST_HEAD(&krcp->krw_arr[i].bulk_head_free[j]);
5103		}
5104
5105		for (i = 0; i < FREE_N_CHANNELS; i++)
5106			INIT_LIST_HEAD(&krcp->bulk_head[i]);
5107
5108		INIT_DELAYED_WORK(&krcp->monitor_work, kfree_rcu_monitor);
5109		INIT_DELAYED_WORK(&krcp->page_cache_work, fill_page_cache_func);
5110		krcp->initialized = true;
5111	}
5112
5113	kfree_rcu_shrinker = shrinker_alloc(0, "rcu-kfree");
5114	if (!kfree_rcu_shrinker) {
5115		pr_err("Failed to allocate kfree_rcu() shrinker!\n");
5116		return;
5117	}
5118
5119	kfree_rcu_shrinker->count_objects = kfree_rcu_shrink_count;
5120	kfree_rcu_shrinker->scan_objects = kfree_rcu_shrink_scan;
5121
5122	shrinker_register(kfree_rcu_shrinker);
5123}
5124
5125void __init rcu_init(void)
5126{
5127	int cpu = smp_processor_id();
5128
5129	rcu_early_boot_tests();
5130
5131	kfree_rcu_batch_init();
5132	rcu_bootup_announce();
5133	sanitize_kthread_prio();
5134	rcu_init_geometry();
5135	rcu_init_one();
5136	if (dump_tree)
5137		rcu_dump_rcu_node_tree();
5138	if (use_softirq)
5139		open_softirq(RCU_SOFTIRQ, rcu_core_si);
5140
5141	/*
5142	 * We don't need protection against CPU-hotplug here because
5143	 * this is called early in boot, before either interrupts
5144	 * or the scheduler are operational.
5145	 */
 
5146	pm_notifier(rcu_pm_notify, 0);
5147	WARN_ON(num_online_cpus() > 1); // Only one CPU this early in boot.
5148	rcutree_prepare_cpu(cpu);
5149	rcutree_report_cpu_starting(cpu);
5150	rcutree_online_cpu(cpu);
5151
5152	/* Create workqueue for Tree SRCU and for expedited GPs. */
5153	rcu_gp_wq = alloc_workqueue("rcu_gp", WQ_MEM_RECLAIM, 0);
5154	WARN_ON(!rcu_gp_wq);
5155	rcu_alloc_par_gp_wq();
5156
5157	/* Fill in default value for rcutree.qovld boot parameter. */
5158	/* -After- the rcu_node ->lock fields are initialized! */
5159	if (qovld < 0)
5160		qovld_calc = DEFAULT_RCU_QOVLD_MULT * qhimark;
5161	else
5162		qovld_calc = qovld;
5163
5164	// Kick-start in case any polled grace periods started early.
5165	(void)start_poll_synchronize_rcu_expedited();
5166
5167	rcu_test_sync_prims();
5168}
5169
5170#include "tree_stall.h"
5171#include "tree_exp.h"
5172#include "tree_nocb.h"
5173#include "tree_plugin.h"
v3.15
 
   1/*
   2 * Read-Copy Update mechanism for mutual exclusion
   3 *
   4 * This program is free software; you can redistribute it and/or modify
   5 * it under the terms of the GNU General Public License as published by
   6 * the Free Software Foundation; either version 2 of the License, or
   7 * (at your option) any later version.
   8 *
   9 * This program is distributed in the hope that it will be useful,
  10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  12 * GNU General Public License for more details.
  13 *
  14 * You should have received a copy of the GNU General Public License
  15 * along with this program; if not, you can access it online at
  16 * http://www.gnu.org/licenses/gpl-2.0.html.
  17 *
  18 * Copyright IBM Corporation, 2008
  19 *
  20 * Authors: Dipankar Sarma <dipankar@in.ibm.com>
  21 *	    Manfred Spraul <manfred@colorfullife.com>
  22 *	    Paul E. McKenney <paulmck@linux.vnet.ibm.com> Hierarchical version
  23 *
  24 * Based on the original work by Paul McKenney <paulmck@us.ibm.com>
  25 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
  26 *
  27 * For detailed explanation of Read-Copy Update mechanism see -
  28 *	Documentation/RCU
  29 */
 
 
 
  30#include <linux/types.h>
  31#include <linux/kernel.h>
  32#include <linux/init.h>
  33#include <linux/spinlock.h>
  34#include <linux/smp.h>
  35#include <linux/rcupdate.h>
  36#include <linux/interrupt.h>
  37#include <linux/sched.h>
 
  38#include <linux/nmi.h>
  39#include <linux/atomic.h>
  40#include <linux/bitops.h>
  41#include <linux/export.h>
  42#include <linux/completion.h>
 
  43#include <linux/moduleparam.h>
  44#include <linux/module.h>
 
  45#include <linux/percpu.h>
  46#include <linux/notifier.h>
  47#include <linux/cpu.h>
  48#include <linux/mutex.h>
  49#include <linux/time.h>
  50#include <linux/kernel_stat.h>
  51#include <linux/wait.h>
  52#include <linux/kthread.h>
 
  53#include <linux/prefetch.h>
  54#include <linux/delay.h>
  55#include <linux/stop_machine.h>
  56#include <linux/random.h>
  57#include <linux/ftrace_event.h>
  58#include <linux/suspend.h>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  59
  60#include "tree.h"
  61#include "rcu.h"
  62
  63MODULE_ALIAS("rcutree");
  64#ifdef MODULE_PARAM_PREFIX
  65#undef MODULE_PARAM_PREFIX
  66#endif
  67#define MODULE_PARAM_PREFIX "rcutree."
  68
  69/* Data structures. */
  70
  71static struct lock_class_key rcu_node_class[RCU_NUM_LVLS];
  72static struct lock_class_key rcu_fqs_class[RCU_NUM_LVLS];
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  73
  74/*
  75 * In order to export the rcu_state name to the tracing tools, it
  76 * needs to be added in the __tracepoint_string section.
  77 * This requires defining a separate variable tp_<sname>_varname
  78 * that points to the string being used, and this will allow
  79 * the tracing userspace tools to be able to decipher the string
  80 * address to the matching string.
  81 */
  82#define RCU_STATE_INITIALIZER(sname, sabbr, cr) \
  83static char sname##_varname[] = #sname; \
  84static const char *tp_##sname##_varname __used __tracepoint_string = sname##_varname; \
  85struct rcu_state sname##_state = { \
  86	.level = { &sname##_state.node[0] }, \
  87	.call = cr, \
  88	.fqs_state = RCU_GP_IDLE, \
  89	.gpnum = 0UL - 300UL, \
  90	.completed = 0UL - 300UL, \
  91	.orphan_lock = __RAW_SPIN_LOCK_UNLOCKED(&sname##_state.orphan_lock), \
  92	.orphan_nxttail = &sname##_state.orphan_nxtlist, \
  93	.orphan_donetail = &sname##_state.orphan_donelist, \
  94	.barrier_mutex = __MUTEX_INITIALIZER(sname##_state.barrier_mutex), \
  95	.onoff_mutex = __MUTEX_INITIALIZER(sname##_state.onoff_mutex), \
  96	.name = sname##_varname, \
  97	.abbr = sabbr, \
  98}; \
  99DEFINE_PER_CPU(struct rcu_data, sname##_data)
 100
 101RCU_STATE_INITIALIZER(rcu_sched, 's', call_rcu_sched);
 102RCU_STATE_INITIALIZER(rcu_bh, 'b', call_rcu_bh);
 103
 104static struct rcu_state *rcu_state;
 105LIST_HEAD(rcu_struct_flavors);
 106
 107/* Increase (but not decrease) the CONFIG_RCU_FANOUT_LEAF at boot time. */
 108static int rcu_fanout_leaf = CONFIG_RCU_FANOUT_LEAF;
 109module_param(rcu_fanout_leaf, int, 0444);
 110int rcu_num_lvls __read_mostly = RCU_NUM_LVLS;
 111static int num_rcu_lvl[] = {  /* Number of rcu_nodes at specified level. */
 112	NUM_RCU_LVL_0,
 113	NUM_RCU_LVL_1,
 114	NUM_RCU_LVL_2,
 115	NUM_RCU_LVL_3,
 116	NUM_RCU_LVL_4,
 117};
 118int rcu_num_nodes __read_mostly = NUM_RCU_NODES; /* Total # rcu_nodes in use. */
 119
 120/*
 121 * The rcu_scheduler_active variable transitions from zero to one just
 122 * before the first task is spawned.  So when this variable is zero, RCU
 123 * can assume that there is but one task, allowing RCU to (for example)
 124 * optimize synchronize_sched() to a simple barrier().  When this variable
 125 * is one, RCU must actually do all the hard work required to detect real
 126 * grace periods.  This variable is also used to suppress boot-time false
 127 * positives from lockdep-RCU error checking.
 
 
 
 128 */
 129int rcu_scheduler_active __read_mostly;
 130EXPORT_SYMBOL_GPL(rcu_scheduler_active);
 131
 132/*
 133 * The rcu_scheduler_fully_active variable transitions from zero to one
 134 * during the early_initcall() processing, which is after the scheduler
 135 * is capable of creating new tasks.  So RCU processing (for example,
 136 * creating tasks for RCU priority boosting) must be delayed until after
 137 * rcu_scheduler_fully_active transitions from zero to one.  We also
 138 * currently delay invocation of any RCU callbacks until after this point.
 139 *
 140 * It might later prove better for people registering RCU callbacks during
 141 * early boot to take responsibility for these callbacks, but one step at
 142 * a time.
 143 */
 144static int rcu_scheduler_fully_active __read_mostly;
 145
 146#ifdef CONFIG_RCU_BOOST
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 147
 148/*
 149 * Control variables for per-CPU and per-rcu_node kthreads.  These
 150 * handle all flavors of RCU.
 
 
 151 */
 152static DEFINE_PER_CPU(struct task_struct *, rcu_cpu_kthread_task);
 153DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_status);
 154DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_loops);
 155DEFINE_PER_CPU(char, rcu_cpu_has_work);
 156
 157#endif /* #ifdef CONFIG_RCU_BOOST */
 
 
 
 
 
 
 
 
 
 158
 159static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu);
 160static void invoke_rcu_core(void);
 161static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp);
 
 
 
 162
 163/*
 164 * Track the rcutorture test sequence number and the update version
 165 * number within a given test.  The rcutorture_testseq is incremented
 166 * on every rcutorture module load and unload, so has an odd value
 167 * when a test is running.  The rcutorture_vernum is set to zero
 168 * when rcutorture starts and is incremented on each rcutorture update.
 169 * These variables enable correlating rcutorture output with the
 170 * RCU tracing information.
 171 */
 172unsigned long rcutorture_testseq;
 173unsigned long rcutorture_vernum;
 174
 175/*
 176 * Return true if an RCU grace period is in progress.  The ACCESS_ONCE()s
 177 * permit this function to be invoked without holding the root rcu_node
 178 * structure's ->lock, but of course results can be subject to change.
 179 */
 180static int rcu_gp_in_progress(struct rcu_state *rsp)
 181{
 182	return ACCESS_ONCE(rsp->completed) != ACCESS_ONCE(rsp->gpnum);
 183}
 184
 185/*
 186 * Note a quiescent state.  Because we do not need to know
 187 * how many quiescent states passed, just if there was at least
 188 * one since the start of the grace period, this just sets a flag.
 189 * The caller must have disabled preemption.
 190 */
 191void rcu_sched_qs(int cpu)
 192{
 193	struct rcu_data *rdp = &per_cpu(rcu_sched_data, cpu);
 194
 195	if (rdp->passed_quiesce == 0)
 196		trace_rcu_grace_period(TPS("rcu_sched"), rdp->gpnum, TPS("cpuqs"));
 197	rdp->passed_quiesce = 1;
 198}
 199
 200void rcu_bh_qs(int cpu)
 201{
 202	struct rcu_data *rdp = &per_cpu(rcu_bh_data, cpu);
 203
 204	if (rdp->passed_quiesce == 0)
 205		trace_rcu_grace_period(TPS("rcu_bh"), rdp->gpnum, TPS("cpuqs"));
 206	rdp->passed_quiesce = 1;
 207}
 208
 209/*
 210 * Note a context switch.  This is a quiescent state for RCU-sched,
 211 * and requires special handling for preemptible RCU.
 212 * The caller must have disabled preemption.
 
 
 
 
 
 213 */
 214void rcu_note_context_switch(int cpu)
 215{
 216	trace_rcu_utilization(TPS("Start context switch"));
 217	rcu_sched_qs(cpu);
 218	rcu_preempt_note_context_switch(cpu);
 219	trace_rcu_utilization(TPS("End context switch"));
 220}
 221EXPORT_SYMBOL_GPL(rcu_note_context_switch);
 222
 223static DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = {
 224	.dynticks_nesting = DYNTICK_TASK_EXIT_IDLE,
 225	.dynticks = ATOMIC_INIT(1),
 226#ifdef CONFIG_NO_HZ_FULL_SYSIDLE
 227	.dynticks_idle_nesting = DYNTICK_TASK_NEST_VALUE,
 228	.dynticks_idle = ATOMIC_INIT(1),
 229#endif /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
 230};
 231
 232static long blimit = 10;	/* Maximum callbacks per rcu_do_batch. */
 233static long qhimark = 10000;	/* If this many pending, ignore blimit. */
 234static long qlowmark = 100;	/* Once only this many pending, use blimit. */
 235
 236module_param(blimit, long, 0444);
 237module_param(qhimark, long, 0444);
 238module_param(qlowmark, long, 0444);
 239
 240static ulong jiffies_till_first_fqs = ULONG_MAX;
 241static ulong jiffies_till_next_fqs = ULONG_MAX;
 242
 243module_param(jiffies_till_first_fqs, ulong, 0644);
 244module_param(jiffies_till_next_fqs, ulong, 0644);
 245
 246static void rcu_start_gp_advanced(struct rcu_state *rsp, struct rcu_node *rnp,
 247				  struct rcu_data *rdp);
 248static void force_qs_rnp(struct rcu_state *rsp,
 249			 int (*f)(struct rcu_data *rsp, bool *isidle,
 250				  unsigned long *maxj),
 251			 bool *isidle, unsigned long *maxj);
 252static void force_quiescent_state(struct rcu_state *rsp);
 253static int rcu_pending(int cpu);
 254
 255/*
 256 * Return the number of RCU-sched batches processed thus far for debug & stats.
 
 257 */
 258long rcu_batches_completed_sched(void)
 259{
 260	return rcu_sched_state.completed;
 
 261}
 262EXPORT_SYMBOL_GPL(rcu_batches_completed_sched);
 263
 264/*
 265 * Return the number of RCU BH batches processed thus far for debug & stats.
 
 266 */
 267long rcu_batches_completed_bh(void)
 268{
 269	return rcu_bh_state.completed;
 270}
 271EXPORT_SYMBOL_GPL(rcu_batches_completed_bh);
 272
 273/*
 274 * Force a quiescent state for RCU BH.
 
 
 275 */
 276void rcu_bh_force_quiescent_state(void)
 277{
 278	force_quiescent_state(&rcu_bh_state);
 279}
 280EXPORT_SYMBOL_GPL(rcu_bh_force_quiescent_state);
 281
 282/*
 283 * Record the number of times rcutorture tests have been initiated and
 284 * terminated.  This information allows the debugfs tracing stats to be
 285 * correlated to the rcutorture messages, even when the rcutorture module
 286 * is being repeatedly loaded and unloaded.  In other words, we cannot
 287 * store this state in rcutorture itself.
 288 */
 289void rcutorture_record_test_transition(void)
 290{
 291	rcutorture_testseq++;
 292	rcutorture_vernum = 0;
 
 
 
 
 
 
 
 
 
 293}
 294EXPORT_SYMBOL_GPL(rcutorture_record_test_transition);
 295
 296/*
 297 * Record the number of writer passes through the current rcutorture test.
 298 * This is also used to correlate debugfs tracing stats with the rcutorture
 299 * messages.
 
 
 
 
 
 
 300 */
 301void rcutorture_record_progress(unsigned long vernum)
 302{
 303	rcutorture_vernum++;
 
 
 
 
 
 
 304}
 305EXPORT_SYMBOL_GPL(rcutorture_record_progress);
 306
 307/*
 308 * Force a quiescent state for RCU-sched.
 
 
 
 
 
 309 */
 310void rcu_sched_force_quiescent_state(void)
 311{
 312	force_quiescent_state(&rcu_sched_state);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 313}
 314EXPORT_SYMBOL_GPL(rcu_sched_force_quiescent_state);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 315
 316/*
 317 * Does the CPU have callbacks ready to be invoked?
 
 318 */
 319static int
 320cpu_has_callbacks_ready_to_invoke(struct rcu_data *rdp)
 321{
 322	return &rdp->nxtlist != rdp->nxttail[RCU_DONE_TAIL] &&
 323	       rdp->nxttail[RCU_DONE_TAIL] != NULL;
 324}
 325
 326/*
 327 * Does the current CPU require a not-yet-started grace period?
 328 * The caller must have disabled interrupts to prevent races with
 329 * normal callback registry.
 
 330 */
 331static int
 332cpu_needs_another_gp(struct rcu_state *rsp, struct rcu_data *rdp)
 333{
 334	int i;
 335
 336	if (rcu_gp_in_progress(rsp))
 337		return 0;  /* No, a grace period is already in progress. */
 338	if (rcu_nocb_needs_gp(rsp))
 339		return 1;  /* Yes, a no-CBs CPU needs one. */
 340	if (!rdp->nxttail[RCU_NEXT_TAIL])
 341		return 0;  /* No, this is a no-CBs (or offline) CPU. */
 342	if (*rdp->nxttail[RCU_NEXT_READY_TAIL])
 343		return 1;  /* Yes, this CPU has newly registered callbacks. */
 344	for (i = RCU_WAIT_TAIL; i < RCU_NEXT_TAIL; i++)
 345		if (rdp->nxttail[i - 1] != rdp->nxttail[i] &&
 346		    ULONG_CMP_LT(ACCESS_ONCE(rsp->completed),
 347				 rdp->nxtcompleted[i]))
 348			return 1;  /* Yes, CBs for future grace period. */
 349	return 0; /* No grace period needed. */
 
 
 
 
 
 
 
 
 
 
 350}
 351
 352/*
 353 * Return the root node of the specified rcu_state structure.
 354 */
 355static struct rcu_node *rcu_get_root(struct rcu_state *rsp)
 356{
 357	return &rsp->node[0];
 
 
 
 
 
 
 
 358}
 359
 360/*
 361 * rcu_eqs_enter_common - current CPU is moving towards extended quiescent state
 362 *
 363 * If the new value of the ->dynticks_nesting counter now is zero,
 364 * we really have entered idle, and must do the appropriate accounting.
 365 * The caller must have disabled interrupts.
 366 */
 367static void rcu_eqs_enter_common(struct rcu_dynticks *rdtp, long long oldval,
 368				bool user)
 369{
 370	struct rcu_state *rsp;
 371	struct rcu_data *rdp;
 
 372
 373	trace_rcu_dyntick(TPS("Start"), oldval, rdtp->dynticks_nesting);
 374	if (!user && !is_idle_task(current)) {
 375		struct task_struct *idle __maybe_unused =
 376			idle_task(smp_processor_id());
 377
 378		trace_rcu_dyntick(TPS("Error on entry: not idle task"), oldval, 0);
 379		ftrace_dump(DUMP_ORIG);
 380		WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
 381			  current->pid, current->comm,
 382			  idle->pid, idle->comm); /* must be idle task! */
 383	}
 384	for_each_rcu_flavor(rsp) {
 385		rdp = this_cpu_ptr(rsp->rda);
 386		do_nocb_deferred_wakeup(rdp);
 387	}
 388	rcu_prepare_for_idle(smp_processor_id());
 389	/* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
 390	smp_mb__before_atomic_inc();  /* See above. */
 391	atomic_inc(&rdtp->dynticks);
 392	smp_mb__after_atomic_inc();  /* Force ordering with next sojourn. */
 393	WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
 394
 395	/*
 396	 * It is illegal to enter an extended quiescent state while
 397	 * in an RCU read-side critical section.
 398	 */
 399	rcu_lockdep_assert(!lock_is_held(&rcu_lock_map),
 400			   "Illegal idle entry in RCU read-side critical section.");
 401	rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map),
 402			   "Illegal idle entry in RCU-bh read-side critical section.");
 403	rcu_lockdep_assert(!lock_is_held(&rcu_sched_lock_map),
 404			   "Illegal idle entry in RCU-sched read-side critical section.");
 405}
 406
 407/*
 408 * Enter an RCU extended quiescent state, which can be either the
 409 * idle loop or adaptive-tickless usermode execution.
 410 */
 411static void rcu_eqs_enter(bool user)
 412{
 413	long long oldval;
 414	struct rcu_dynticks *rdtp;
 415
 416	rdtp = this_cpu_ptr(&rcu_dynticks);
 417	oldval = rdtp->dynticks_nesting;
 418	WARN_ON_ONCE((oldval & DYNTICK_TASK_NEST_MASK) == 0);
 419	if ((oldval & DYNTICK_TASK_NEST_MASK) == DYNTICK_TASK_NEST_VALUE) {
 420		rdtp->dynticks_nesting = 0;
 421		rcu_eqs_enter_common(rdtp, oldval, user);
 422	} else {
 423		rdtp->dynticks_nesting -= DYNTICK_TASK_NEST_VALUE;
 424	}
 425}
 426
 427/**
 428 * rcu_idle_enter - inform RCU that current CPU is entering idle
 429 *
 430 * Enter idle mode, in other words, -leave- the mode in which RCU
 431 * read-side critical sections can occur.  (Though RCU read-side
 432 * critical sections can occur in irq handlers in idle, a possibility
 433 * handled by irq_enter() and irq_exit().)
 434 *
 435 * We crowbar the ->dynticks_nesting field to zero to allow for
 436 * the possibility of usermode upcalls having messed up our count
 437 * of interrupt nesting level during the prior busy period.
 438 */
 439void rcu_idle_enter(void)
 440{
 441	unsigned long flags;
 442
 443	local_irq_save(flags);
 444	rcu_eqs_enter(false);
 445	rcu_sysidle_enter(this_cpu_ptr(&rcu_dynticks), 0);
 446	local_irq_restore(flags);
 447}
 448EXPORT_SYMBOL_GPL(rcu_idle_enter);
 449
 450#ifdef CONFIG_RCU_USER_QS
 451/**
 452 * rcu_user_enter - inform RCU that we are resuming userspace.
 453 *
 454 * Enter RCU idle mode right before resuming userspace.  No use of RCU
 455 * is permitted between this call and rcu_user_exit(). This way the
 456 * CPU doesn't need to maintain the tick for RCU maintenance purposes
 457 * when the CPU runs in userspace.
 458 */
 459void rcu_user_enter(void)
 460{
 461	rcu_eqs_enter(1);
 462}
 463#endif /* CONFIG_RCU_USER_QS */
 464
 465/**
 466 * rcu_irq_exit - inform RCU that current CPU is exiting irq towards idle
 467 *
 468 * Exit from an interrupt handler, which might possibly result in entering
 469 * idle mode, in other words, leaving the mode in which read-side critical
 470 * sections can occur.
 471 *
 472 * This code assumes that the idle loop never does anything that might
 473 * result in unbalanced calls to irq_enter() and irq_exit().  If your
 474 * architecture violates this assumption, RCU will give you what you
 475 * deserve, good and hard.  But very infrequently and irreproducibly.
 476 *
 477 * Use things like work queues to work around this limitation.
 478 *
 479 * You have been warned.
 480 */
 481void rcu_irq_exit(void)
 482{
 483	unsigned long flags;
 484	long long oldval;
 485	struct rcu_dynticks *rdtp;
 486
 487	local_irq_save(flags);
 488	rdtp = this_cpu_ptr(&rcu_dynticks);
 489	oldval = rdtp->dynticks_nesting;
 490	rdtp->dynticks_nesting--;
 491	WARN_ON_ONCE(rdtp->dynticks_nesting < 0);
 492	if (rdtp->dynticks_nesting)
 493		trace_rcu_dyntick(TPS("--="), oldval, rdtp->dynticks_nesting);
 494	else
 495		rcu_eqs_enter_common(rdtp, oldval, true);
 496	rcu_sysidle_enter(rdtp, 1);
 497	local_irq_restore(flags);
 498}
 499
 500/*
 501 * rcu_eqs_exit_common - current CPU moving away from extended quiescent state
 502 *
 503 * If the new value of the ->dynticks_nesting counter was previously zero,
 504 * we really have exited idle, and must do the appropriate accounting.
 505 * The caller must have disabled interrupts.
 506 */
 507static void rcu_eqs_exit_common(struct rcu_dynticks *rdtp, long long oldval,
 508			       int user)
 509{
 510	smp_mb__before_atomic_inc();  /* Force ordering w/previous sojourn. */
 511	atomic_inc(&rdtp->dynticks);
 512	/* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
 513	smp_mb__after_atomic_inc();  /* See above. */
 514	WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
 515	rcu_cleanup_after_idle(smp_processor_id());
 516	trace_rcu_dyntick(TPS("End"), oldval, rdtp->dynticks_nesting);
 517	if (!user && !is_idle_task(current)) {
 518		struct task_struct *idle __maybe_unused =
 519			idle_task(smp_processor_id());
 520
 521		trace_rcu_dyntick(TPS("Error on exit: not idle task"),
 522				  oldval, rdtp->dynticks_nesting);
 523		ftrace_dump(DUMP_ORIG);
 524		WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
 525			  current->pid, current->comm,
 526			  idle->pid, idle->comm); /* must be idle task! */
 527	}
 528}
 
 529
 
 530/*
 531 * Exit an RCU extended quiescent state, which can be either the
 532 * idle loop or adaptive-tickless usermode execution.
 533 */
 534static void rcu_eqs_exit(bool user)
 535{
 536	struct rcu_dynticks *rdtp;
 537	long long oldval;
 538
 539	rdtp = this_cpu_ptr(&rcu_dynticks);
 540	oldval = rdtp->dynticks_nesting;
 541	WARN_ON_ONCE(oldval < 0);
 542	if (oldval & DYNTICK_TASK_NEST_MASK) {
 543		rdtp->dynticks_nesting += DYNTICK_TASK_NEST_VALUE;
 544	} else {
 545		rdtp->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
 546		rcu_eqs_exit_common(rdtp, oldval, user);
 547	}
 548}
 549
 550/**
 551 * rcu_idle_exit - inform RCU that current CPU is leaving idle
 552 *
 553 * Exit idle mode, in other words, -enter- the mode in which RCU
 554 * read-side critical sections can occur.
 555 *
 556 * We crowbar the ->dynticks_nesting field to DYNTICK_TASK_NEST to
 557 * allow for the possibility of usermode upcalls messing up our count
 558 * of interrupt nesting level during the busy period that is just
 559 * now starting.
 560 */
 561void rcu_idle_exit(void)
 562{
 563	unsigned long flags;
 
 
 
 564
 565	local_irq_save(flags);
 566	rcu_eqs_exit(false);
 567	rcu_sysidle_exit(this_cpu_ptr(&rcu_dynticks), 0);
 568	local_irq_restore(flags);
 569}
 570EXPORT_SYMBOL_GPL(rcu_idle_exit);
 571
 572#ifdef CONFIG_RCU_USER_QS
 573/**
 574 * rcu_user_exit - inform RCU that we are exiting userspace.
 575 *
 576 * Exit RCU idle mode while entering the kernel because it can
 577 * run a RCU read side critical section anytime.
 578 */
 579void rcu_user_exit(void)
 580{
 581	rcu_eqs_exit(1);
 582}
 583#endif /* CONFIG_RCU_USER_QS */
 584
 
 585/**
 586 * rcu_irq_enter - inform RCU that current CPU is entering irq away from idle
 587 *
 588 * Enter an interrupt handler, which might possibly result in exiting
 589 * idle mode, in other words, entering the mode in which read-side critical
 590 * sections can occur.
 591 *
 592 * Note that the Linux kernel is fully capable of entering an interrupt
 593 * handler that it never exits, for example when doing upcalls to
 594 * user mode!  This code assumes that the idle loop never does upcalls to
 595 * user mode.  If your architecture does do upcalls from the idle loop (or
 596 * does anything else that results in unbalanced calls to the irq_enter()
 597 * and irq_exit() functions), RCU will give you what you deserve, good
 598 * and hard.  But very infrequently and irreproducibly.
 599 *
 600 * Use things like work queues to work around this limitation.
 601 *
 602 * You have been warned.
 603 */
 604void rcu_irq_enter(void)
 605{
 606	unsigned long flags;
 607	struct rcu_dynticks *rdtp;
 608	long long oldval;
 609
 610	local_irq_save(flags);
 611	rdtp = this_cpu_ptr(&rcu_dynticks);
 612	oldval = rdtp->dynticks_nesting;
 613	rdtp->dynticks_nesting++;
 614	WARN_ON_ONCE(rdtp->dynticks_nesting == 0);
 615	if (oldval)
 616		trace_rcu_dyntick(TPS("++="), oldval, rdtp->dynticks_nesting);
 617	else
 618		rcu_eqs_exit_common(rdtp, oldval, true);
 619	rcu_sysidle_exit(rdtp, 1);
 620	local_irq_restore(flags);
 621}
 
 622
 
 623/**
 624 * rcu_nmi_enter - inform RCU of entry to NMI context
 625 *
 626 * If the CPU was idle with dynamic ticks active, and there is no
 627 * irq handler running, this updates rdtp->dynticks_nmi to let the
 628 * RCU grace-period handling know that the CPU is active.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 629 */
 630void rcu_nmi_enter(void)
 631{
 632	struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
 633
 634	if (rdtp->dynticks_nmi_nesting == 0 &&
 635	    (atomic_read(&rdtp->dynticks) & 0x1))
 636		return;
 637	rdtp->dynticks_nmi_nesting++;
 638	smp_mb__before_atomic_inc();  /* Force delay from prior write. */
 639	atomic_inc(&rdtp->dynticks);
 640	/* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
 641	smp_mb__after_atomic_inc();  /* See above. */
 642	WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
 643}
 644
 645/**
 646 * rcu_nmi_exit - inform RCU of exit from NMI context
 647 *
 648 * If the CPU was idle with dynamic ticks active, and there is no
 649 * irq handler running, this updates rdtp->dynticks_nmi to let the
 650 * RCU grace-period handling know that the CPU is no longer active.
 651 */
 652void rcu_nmi_exit(void)
 653{
 654	struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
 655
 656	if (rdtp->dynticks_nmi_nesting == 0 ||
 657	    --rdtp->dynticks_nmi_nesting != 0)
 
 
 
 658		return;
 659	/* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
 660	smp_mb__before_atomic_inc();  /* See above. */
 661	atomic_inc(&rdtp->dynticks);
 662	smp_mb__after_atomic_inc();  /* Force delay to next write. */
 663	WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 664}
 665
 666/**
 667 * __rcu_is_watching - are RCU read-side critical sections safe?
 668 *
 669 * Return true if RCU is watching the running CPU, which means that
 670 * this CPU can safely enter RCU read-side critical sections.  Unlike
 671 * rcu_is_watching(), the caller of __rcu_is_watching() must have at
 672 * least disabled preemption.
 673 */
 674bool notrace __rcu_is_watching(void)
 675{
 676	return atomic_read(this_cpu_ptr(&rcu_dynticks.dynticks)) & 0x1;
 
 
 
 677}
 678
 679/**
 680 * rcu_is_watching - see if RCU thinks that the current CPU is idle
 
 
 
 
 
 
 
 
 
 681 *
 682 * If the current CPU is in its idle loop and is neither in an interrupt
 683 * or NMI handler, return true.
 684 */
 685bool notrace rcu_is_watching(void)
 686{
 687	int ret;
 688
 689	preempt_disable();
 690	ret = __rcu_is_watching();
 691	preempt_enable();
 692	return ret;
 693}
 694EXPORT_SYMBOL_GPL(rcu_is_watching);
 695
 696#if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU)
 697
 698/*
 699 * Is the current CPU online?  Disable preemption to avoid false positives
 700 * that could otherwise happen due to the current CPU number being sampled,
 701 * this task being preempted, its old CPU being taken offline, resuming
 702 * on some other CPU, then determining that its old CPU is now offline.
 703 * It is OK to use RCU on an offline processor during initial boot, hence
 704 * the check for rcu_scheduler_fully_active.  Note also that it is OK
 705 * for a CPU coming online to use RCU for one jiffy prior to marking itself
 706 * online in the cpu_online_mask.  Similarly, it is OK for a CPU going
 707 * offline to continue to use RCU for one jiffy after marking itself
 708 * offline in the cpu_online_mask.  This leniency is necessary given the
 709 * non-atomic nature of the online and offline processing, for example,
 710 * the fact that a CPU enters the scheduler after completing the CPU_DYING
 711 * notifiers.
 712 *
 713 * This is also why RCU internally marks CPUs online during the
 714 * CPU_UP_PREPARE phase and offline during the CPU_DEAD phase.
 715 *
 716 * Disable checking if in an NMI handler because we cannot safely report
 717 * errors from NMI handlers anyway.
 718 */
 719bool rcu_lockdep_current_cpu_online(void)
 720{
 721	struct rcu_data *rdp;
 722	struct rcu_node *rnp;
 723	bool ret;
 724
 725	if (in_nmi())
 726		return true;
 727	preempt_disable();
 728	rdp = this_cpu_ptr(&rcu_sched_data);
 729	rnp = rdp->mynode;
 730	ret = (rdp->grpmask & rnp->qsmaskinit) ||
 731	      !rcu_scheduler_fully_active;
 732	preempt_enable();
 733	return ret;
 734}
 735EXPORT_SYMBOL_GPL(rcu_lockdep_current_cpu_online);
 736
 737#endif /* #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU) */
 738
 739/**
 740 * rcu_is_cpu_rrupt_from_idle - see if idle or immediately interrupted from idle
 741 *
 742 * If the current CPU is idle or running at a first-level (not nested)
 743 * interrupt from idle, return true.  The caller must have at least
 744 * disabled preemption.
 745 */
 746static int rcu_is_cpu_rrupt_from_idle(void)
 747{
 748	return __this_cpu_read(rcu_dynticks.dynticks_nesting) <= 1;
 
 
 
 749}
 750
 751/*
 752 * Snapshot the specified CPU's dynticks counter so that we can later
 753 * credit them with an implicit quiescent state.  Return 1 if this CPU
 754 * is in dynticks idle mode, which is an extended quiescent state.
 755 */
 756static int dyntick_save_progress_counter(struct rcu_data *rdp,
 757					 bool *isidle, unsigned long *maxj)
 758{
 759	rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
 760	rcu_sysidle_check_cpu(rdp, isidle, maxj);
 761	return (rdp->dynticks_snap & 0x1) == 0;
 
 
 
 
 762}
 763
 764/*
 765 * This function really isn't for public consumption, but RCU is special in
 766 * that context switches can allow the state machine to make progress.
 
 
 
 
 
 
 767 */
 768extern void resched_cpu(int cpu);
 769
 770/*
 771 * Return true if the specified CPU has passed through a quiescent
 772 * state by virtue of being in or having passed through an dynticks
 773 * idle state since the last call to dyntick_save_progress_counter()
 774 * for this same CPU, or by virtue of having been offline.
 775 */
 776static int rcu_implicit_dynticks_qs(struct rcu_data *rdp,
 777				    bool *isidle, unsigned long *maxj)
 778{
 779	unsigned int curr;
 780	unsigned int snap;
 781
 782	curr = (unsigned int)atomic_add_return(0, &rdp->dynticks->dynticks);
 783	snap = (unsigned int)rdp->dynticks_snap;
 784
 785	/*
 786	 * If the CPU passed through or entered a dynticks idle phase with
 787	 * no active irq/NMI handlers, then we can safely pretend that the CPU
 788	 * already acknowledged the request to pass through a quiescent
 789	 * state.  Either way, that CPU cannot possibly be in an RCU
 790	 * read-side critical section that started before the beginning
 791	 * of the current RCU grace period.
 792	 */
 793	if ((curr & 0x1) == 0 || UINT_CMP_GE(curr, snap + 2)) {
 794		trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("dti"));
 795		rdp->dynticks_fqs++;
 796		return 1;
 797	}
 798
 799	/*
 800	 * Check for the CPU being offline, but only if the grace period
 801	 * is old enough.  We don't need to worry about the CPU changing
 802	 * state: If we see it offline even once, it has been through a
 803	 * quiescent state.
 
 
 
 
 
 
 
 
 
 804	 *
 805	 * The reason for insisting that the grace period be at least
 806	 * one jiffy old is that CPUs that are not quite online and that
 807	 * have just gone offline can still execute RCU read-side critical
 808	 * sections.
 809	 */
 810	if (ULONG_CMP_GE(rdp->rsp->gp_start + 2, jiffies))
 811		return 0;  /* Grace period is not old enough. */
 812	barrier();
 813	if (cpu_is_offline(rdp->cpu)) {
 814		trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("ofl"));
 815		rdp->offline_fqs++;
 816		return 1;
 817	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 818
 819	/*
 820	 * There is a possibility that a CPU in adaptive-ticks state
 821	 * might run in the kernel with the scheduling-clock tick disabled
 822	 * for an extended time period.  Invoke rcu_kick_nohz_cpu() to
 823	 * force the CPU to restart the scheduling-clock tick in this
 824	 * CPU is in this state.
 825	 */
 826	rcu_kick_nohz_cpu(rdp->cpu);
 827
 828	/*
 829	 * Alternatively, the CPU might be running in the kernel
 830	 * for an extended period of time without a quiescent state.
 831	 * Attempt to force the CPU through the scheduler to gain the
 832	 * needed quiescent state, but only if the grace period has gone
 833	 * on for an uncommonly long time.  If there are many stuck CPUs,
 834	 * we will beat on the first one until it gets unstuck, then move
 835	 * to the next.  Only do this for the primary flavor of RCU.
 836	 */
 837	if (rdp->rsp == rcu_state &&
 838	    ULONG_CMP_GE(jiffies, rdp->rsp->jiffies_resched)) {
 839		rdp->rsp->jiffies_resched += 5;
 840		resched_cpu(rdp->cpu);
 841	}
 842
 843	return 0;
 844}
 845
 846static void record_gp_stall_check_time(struct rcu_state *rsp)
 
 
 847{
 848	unsigned long j = jiffies;
 849	unsigned long j1;
 850
 851	rsp->gp_start = j;
 852	smp_wmb(); /* Record start time before stall time. */
 853	j1 = rcu_jiffies_till_stall_check();
 854	rsp->jiffies_stall = j + j1;
 855	rsp->jiffies_resched = j + j1 / 2;
 856}
 857
 858/*
 859 * Dump stacks of all tasks running on stalled CPUs.  This is a fallback
 860 * for architectures that do not implement trigger_all_cpu_backtrace().
 861 * The NMI-triggered stack traces are more accurate because they are
 862 * printed by the target CPU.
 
 
 
 
 
 
 
 
 
 
 863 */
 864static void rcu_dump_cpu_stacks(struct rcu_state *rsp)
 
 865{
 866	int cpu;
 867	unsigned long flags;
 868	struct rcu_node *rnp;
 869
 870	rcu_for_each_leaf_node(rsp, rnp) {
 871		raw_spin_lock_irqsave(&rnp->lock, flags);
 872		if (rnp->qsmask != 0) {
 873			for (cpu = 0; cpu <= rnp->grphi - rnp->grplo; cpu++)
 874				if (rnp->qsmask & (1UL << cpu))
 875					dump_cpu_task(rnp->grplo + cpu);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 876		}
 877		raw_spin_unlock_irqrestore(&rnp->lock, flags);
 
 
 
 878	}
 879}
 880
 881static void print_other_cpu_stall(struct rcu_state *rsp)
 882{
 883	int cpu;
 884	long delta;
 885	unsigned long flags;
 886	int ndetected = 0;
 887	struct rcu_node *rnp = rcu_get_root(rsp);
 888	long totqlen = 0;
 889
 890	/* Only let one CPU complain about others per time interval. */
 891
 892	raw_spin_lock_irqsave(&rnp->lock, flags);
 893	delta = jiffies - rsp->jiffies_stall;
 894	if (delta < RCU_STALL_RAT_DELAY || !rcu_gp_in_progress(rsp)) {
 895		raw_spin_unlock_irqrestore(&rnp->lock, flags);
 896		return;
 897	}
 898	rsp->jiffies_stall = jiffies + 3 * rcu_jiffies_till_stall_check() + 3;
 899	raw_spin_unlock_irqrestore(&rnp->lock, flags);
 900
 901	/*
 902	 * OK, time to rat on our buddy...
 903	 * See Documentation/RCU/stallwarn.txt for info on how to debug
 904	 * RCU CPU stall warnings.
 905	 */
 906	pr_err("INFO: %s detected stalls on CPUs/tasks:",
 907	       rsp->name);
 908	print_cpu_stall_info_begin();
 909	rcu_for_each_leaf_node(rsp, rnp) {
 910		raw_spin_lock_irqsave(&rnp->lock, flags);
 911		ndetected += rcu_print_task_stall(rnp);
 912		if (rnp->qsmask != 0) {
 913			for (cpu = 0; cpu <= rnp->grphi - rnp->grplo; cpu++)
 914				if (rnp->qsmask & (1UL << cpu)) {
 915					print_cpu_stall_info(rsp,
 916							     rnp->grplo + cpu);
 917					ndetected++;
 918				}
 919		}
 920		raw_spin_unlock_irqrestore(&rnp->lock, flags);
 921	}
 922
 923	/*
 924	 * Now rat on any tasks that got kicked up to the root rcu_node
 925	 * due to CPU offlining.
 926	 */
 927	rnp = rcu_get_root(rsp);
 928	raw_spin_lock_irqsave(&rnp->lock, flags);
 929	ndetected += rcu_print_task_stall(rnp);
 930	raw_spin_unlock_irqrestore(&rnp->lock, flags);
 931
 932	print_cpu_stall_info_end();
 933	for_each_possible_cpu(cpu)
 934		totqlen += per_cpu_ptr(rsp->rda, cpu)->qlen;
 935	pr_cont("(detected by %d, t=%ld jiffies, g=%lu, c=%lu, q=%lu)\n",
 936	       smp_processor_id(), (long)(jiffies - rsp->gp_start),
 937	       rsp->gpnum, rsp->completed, totqlen);
 938	if (ndetected == 0)
 939		pr_err("INFO: Stall ended before state dump start\n");
 940	else if (!trigger_all_cpu_backtrace())
 941		rcu_dump_cpu_stacks(rsp);
 942
 943	/* Complain about tasks blocking the grace period. */
 944
 945	rcu_print_detail_task_stall(rsp);
 946
 947	force_quiescent_state(rsp);  /* Kick them all. */
 948}
 949
 950/*
 951 * This function really isn't for public consumption, but RCU is special in
 952 * that context switches can allow the state machine to make progress.
 953 */
 954extern void resched_cpu(int cpu);
 
 
 
 955
 956static void print_cpu_stall(struct rcu_state *rsp)
 
 
 
 
 
 
 
 
 957{
 958	int cpu;
 959	unsigned long flags;
 960	struct rcu_node *rnp = rcu_get_root(rsp);
 961	long totqlen = 0;
 962
 963	/*
 964	 * OK, time to rat on ourselves...
 965	 * See Documentation/RCU/stallwarn.txt for info on how to debug
 966	 * RCU CPU stall warnings.
 967	 */
 968	pr_err("INFO: %s self-detected stall on CPU", rsp->name);
 969	print_cpu_stall_info_begin();
 970	print_cpu_stall_info(rsp, smp_processor_id());
 971	print_cpu_stall_info_end();
 972	for_each_possible_cpu(cpu)
 973		totqlen += per_cpu_ptr(rsp->rda, cpu)->qlen;
 974	pr_cont(" (t=%lu jiffies g=%lu c=%lu q=%lu)\n",
 975		jiffies - rsp->gp_start, rsp->gpnum, rsp->completed, totqlen);
 976	if (!trigger_all_cpu_backtrace())
 977		dump_stack();
 978
 979	raw_spin_lock_irqsave(&rnp->lock, flags);
 980	if (ULONG_CMP_GE(jiffies, rsp->jiffies_stall))
 981		rsp->jiffies_stall = jiffies +
 982				     3 * rcu_jiffies_till_stall_check() + 3;
 983	raw_spin_unlock_irqrestore(&rnp->lock, flags);
 984
 985	/*
 986	 * Attempt to revive the RCU machinery by forcing a context switch.
 987	 *
 988	 * A context switch would normally allow the RCU state machine to make
 989	 * progress and it could be we're stuck in kernel space without context
 990	 * switches for an entirely unreasonable amount of time.
 991	 */
 992	resched_cpu(smp_processor_id());
 993}
 994
 995static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp)
 996{
 997	unsigned long completed;
 998	unsigned long gpnum;
 999	unsigned long gps;
1000	unsigned long j;
1001	unsigned long js;
1002	struct rcu_node *rnp;
1003
1004	if (rcu_cpu_stall_suppress || !rcu_gp_in_progress(rsp))
1005		return;
1006	j = jiffies;
1007
1008	/*
1009	 * Lots of memory barriers to reject false positives.
1010	 *
1011	 * The idea is to pick up rsp->gpnum, then rsp->jiffies_stall,
1012	 * then rsp->gp_start, and finally rsp->completed.  These values
1013	 * are updated in the opposite order with memory barriers (or
1014	 * equivalent) during grace-period initialization and cleanup.
1015	 * Now, a false positive can occur if we get an new value of
1016	 * rsp->gp_start and a old value of rsp->jiffies_stall.  But given
1017	 * the memory barriers, the only way that this can happen is if one
1018	 * grace period ends and another starts between these two fetches.
1019	 * Detect this by comparing rsp->completed with the previous fetch
1020	 * from rsp->gpnum.
1021	 *
1022	 * Given this check, comparisons of jiffies, rsp->jiffies_stall,
1023	 * and rsp->gp_start suffice to forestall false positives.
1024	 */
1025	gpnum = ACCESS_ONCE(rsp->gpnum);
1026	smp_rmb(); /* Pick up ->gpnum first... */
1027	js = ACCESS_ONCE(rsp->jiffies_stall);
1028	smp_rmb(); /* ...then ->jiffies_stall before the rest... */
1029	gps = ACCESS_ONCE(rsp->gp_start);
1030	smp_rmb(); /* ...and finally ->gp_start before ->completed. */
1031	completed = ACCESS_ONCE(rsp->completed);
1032	if (ULONG_CMP_GE(completed, gpnum) ||
1033	    ULONG_CMP_LT(j, js) ||
1034	    ULONG_CMP_GE(gps, js))
1035		return; /* No stall or GP completed since entering function. */
1036	rnp = rdp->mynode;
1037	if (rcu_gp_in_progress(rsp) &&
1038	    (ACCESS_ONCE(rnp->qsmask) & rdp->grpmask)) {
1039
1040		/* We haven't checked in, so go dump stack. */
1041		print_cpu_stall(rsp);
1042
1043	} else if (rcu_gp_in_progress(rsp) &&
1044		   ULONG_CMP_GE(j, js + RCU_STALL_RAT_DELAY)) {
1045
1046		/* They had a few time units to dump stack, so complain. */
1047		print_other_cpu_stall(rsp);
 
1048	}
1049}
1050
1051/**
1052 * rcu_cpu_stall_reset - prevent further stall warnings in current grace period
 
 
 
 
 
 
1053 *
1054 * Set the stall-warning timeout way off into the future, thus preventing
1055 * any RCU CPU stall-warning messages from appearing in the current set of
1056 * RCU grace periods.
1057 *
1058 * The caller must disable hard irqs.
1059 */
1060void rcu_cpu_stall_reset(void)
1061{
1062	struct rcu_state *rsp;
1063
1064	for_each_rcu_flavor(rsp)
1065		rsp->jiffies_stall = jiffies + ULONG_MAX / 2;
1066}
1067
1068/*
1069 * Initialize the specified rcu_data structure's callback list to empty.
1070 */
1071static void init_callback_list(struct rcu_data *rdp)
1072{
1073	int i;
1074
1075	if (init_nocb_callback_list(rdp))
1076		return;
1077	rdp->nxtlist = NULL;
1078	for (i = 0; i < RCU_NEXT_SIZE; i++)
1079		rdp->nxttail[i] = &rdp->nxtlist;
1080}
1081
1082/*
1083 * Determine the value that ->completed will have at the end of the
1084 * next subsequent grace period.  This is used to tag callbacks so that
1085 * a CPU can invoke callbacks in a timely fashion even if that CPU has
1086 * been dyntick-idle for an extended period with callbacks under the
1087 * influence of RCU_FAST_NO_HZ.
 
 
 
1088 *
1089 * The caller must hold rnp->lock with interrupts disabled.
1090 */
1091static unsigned long rcu_cbs_completed(struct rcu_state *rsp,
1092				       struct rcu_node *rnp)
1093{
1094	/*
1095	 * If RCU is idle, we just wait for the next grace period.
1096	 * But we can only be sure that RCU is idle if we are looking
1097	 * at the root rcu_node structure -- otherwise, a new grace
1098	 * period might have started, but just not yet gotten around
1099	 * to initializing the current non-root rcu_node structure.
1100	 */
1101	if (rcu_get_root(rsp) == rnp && rnp->gpnum == rnp->completed)
1102		return rnp->completed + 1;
1103
1104	/*
1105	 * Otherwise, wait for a possible partial grace period and
1106	 * then the subsequent full grace period.
1107	 */
1108	return rnp->completed + 2;
1109}
1110
1111/*
1112 * Trace-event helper function for rcu_start_future_gp() and
1113 * rcu_nocb_wait_gp().
1114 */
1115static void trace_rcu_future_gp(struct rcu_node *rnp, struct rcu_data *rdp,
1116				unsigned long c, const char *s)
1117{
1118	trace_rcu_future_grace_period(rdp->rsp->name, rnp->gpnum,
1119				      rnp->completed, c, rnp->level,
1120				      rnp->grplo, rnp->grphi, s);
1121}
1122
1123/*
1124 * Start some future grace period, as needed to handle newly arrived
1125 * callbacks.  The required future grace periods are recorded in each
1126 * rcu_node structure's ->need_future_gp field.
1127 *
1128 * The caller must hold the specified rcu_node structure's ->lock.
1129 */
1130static unsigned long __maybe_unused
1131rcu_start_future_gp(struct rcu_node *rnp, struct rcu_data *rdp)
1132{
1133	unsigned long c;
1134	int i;
1135	struct rcu_node *rnp_root = rcu_get_root(rdp->rsp);
1136
1137	/*
1138	 * Pick up grace-period number for new callbacks.  If this
1139	 * grace period is already marked as needed, return to the caller.
 
 
 
 
 
 
1140	 */
1141	c = rcu_cbs_completed(rdp->rsp, rnp);
1142	trace_rcu_future_gp(rnp, rdp, c, TPS("Startleaf"));
1143	if (rnp->need_future_gp[c & 0x1]) {
1144		trace_rcu_future_gp(rnp, rdp, c, TPS("Prestartleaf"));
1145		return c;
1146	}
1147
1148	/*
1149	 * If either this rcu_node structure or the root rcu_node structure
1150	 * believe that a grace period is in progress, then we must wait
1151	 * for the one following, which is in "c".  Because our request
1152	 * will be noticed at the end of the current grace period, we don't
1153	 * need to explicitly start one.
1154	 */
1155	if (rnp->gpnum != rnp->completed ||
1156	    ACCESS_ONCE(rnp->gpnum) != ACCESS_ONCE(rnp->completed)) {
1157		rnp->need_future_gp[c & 0x1]++;
1158		trace_rcu_future_gp(rnp, rdp, c, TPS("Startedleaf"));
1159		return c;
1160	}
1161
1162	/*
1163	 * There might be no grace period in progress.  If we don't already
1164	 * hold it, acquire the root rcu_node structure's lock in order to
1165	 * start one (if needed).
1166	 */
1167	if (rnp != rnp_root) {
1168		raw_spin_lock(&rnp_root->lock);
1169		smp_mb__after_unlock_lock();
1170	}
1171
1172	/*
1173	 * Get a new grace-period number.  If there really is no grace
1174	 * period in progress, it will be smaller than the one we obtained
1175	 * earlier.  Adjust callbacks as needed.  Note that even no-CBs
1176	 * CPUs have a ->nxtcompleted[] array, so no no-CBs checks needed.
1177	 */
1178	c = rcu_cbs_completed(rdp->rsp, rnp_root);
1179	for (i = RCU_DONE_TAIL; i < RCU_NEXT_TAIL; i++)
1180		if (ULONG_CMP_LT(c, rdp->nxtcompleted[i]))
1181			rdp->nxtcompleted[i] = c;
1182
1183	/*
1184	 * If the needed for the required grace period is already
1185	 * recorded, trace and leave.
1186	 */
1187	if (rnp_root->need_future_gp[c & 0x1]) {
1188		trace_rcu_future_gp(rnp, rdp, c, TPS("Prestartedroot"));
1189		goto unlock_out;
1190	}
1191
1192	/* Record the need for the future grace period. */
1193	rnp_root->need_future_gp[c & 0x1]++;
1194
1195	/* If a grace period is not already in progress, start one. */
1196	if (rnp_root->gpnum != rnp_root->completed) {
1197		trace_rcu_future_gp(rnp, rdp, c, TPS("Startedleafroot"));
1198	} else {
1199		trace_rcu_future_gp(rnp, rdp, c, TPS("Startedroot"));
1200		rcu_start_gp_advanced(rdp->rsp, rnp_root, rdp);
1201	}
1202unlock_out:
1203	if (rnp != rnp_root)
1204		raw_spin_unlock(&rnp_root->lock);
1205	return c;
1206}
1207
1208/*
1209 * Clean up any old requests for the just-ended grace period.  Also return
1210 * whether any additional grace periods have been requested.  Also invoke
1211 * rcu_nocb_gp_cleanup() in order to wake up any no-callbacks kthreads
1212 * waiting for this grace period to complete.
1213 */
1214static int rcu_future_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp)
1215{
1216	int c = rnp->completed;
1217	int needmore;
1218	struct rcu_data *rdp = this_cpu_ptr(rsp->rda);
1219
1220	rcu_nocb_gp_cleanup(rsp, rnp);
1221	rnp->need_future_gp[c & 0x1] = 0;
1222	needmore = rnp->need_future_gp[(c + 1) & 0x1];
1223	trace_rcu_future_gp(rnp, rdp, c,
1224			    needmore ? TPS("CleanupMore") : TPS("Cleanup"));
1225	return needmore;
1226}
1227
1228/*
1229 * If there is room, assign a ->completed number to any callbacks on
1230 * this CPU that have not already been assigned.  Also accelerate any
1231 * callbacks that were previously assigned a ->completed number that has
1232 * since proven to be too conservative, which can happen if callbacks get
1233 * assigned a ->completed number while RCU is idle, but with reference to
1234 * a non-root rcu_node structure.  This function is idempotent, so it does
1235 * not hurt to call it repeatedly.
1236 *
1237 * The caller must hold rnp->lock with interrupts disabled.
1238 */
1239static void rcu_accelerate_cbs(struct rcu_state *rsp, struct rcu_node *rnp,
1240			       struct rcu_data *rdp)
1241{
1242	unsigned long c;
1243	int i;
1244
1245	/* If the CPU has no callbacks, nothing to do. */
1246	if (!rdp->nxttail[RCU_NEXT_TAIL] || !*rdp->nxttail[RCU_DONE_TAIL])
1247		return;
1248
1249	/*
1250	 * Starting from the sublist containing the callbacks most
1251	 * recently assigned a ->completed number and working down, find the
1252	 * first sublist that is not assignable to an upcoming grace period.
1253	 * Such a sublist has something in it (first two tests) and has
1254	 * a ->completed number assigned that will complete sooner than
1255	 * the ->completed number for newly arrived callbacks (last test).
1256	 *
1257	 * The key point is that any later sublist can be assigned the
1258	 * same ->completed number as the newly arrived callbacks, which
1259	 * means that the callbacks in any of these later sublist can be
1260	 * grouped into a single sublist, whether or not they have already
1261	 * been assigned a ->completed number.
1262	 */
1263	c = rcu_cbs_completed(rsp, rnp);
1264	for (i = RCU_NEXT_TAIL - 1; i > RCU_DONE_TAIL; i--)
1265		if (rdp->nxttail[i] != rdp->nxttail[i - 1] &&
1266		    !ULONG_CMP_GE(rdp->nxtcompleted[i], c))
1267			break;
1268
1269	/*
1270	 * If there are no sublist for unassigned callbacks, leave.
1271	 * At the same time, advance "i" one sublist, so that "i" will
1272	 * index into the sublist where all the remaining callbacks should
1273	 * be grouped into.
1274	 */
1275	if (++i >= RCU_NEXT_TAIL)
1276		return;
1277
1278	/*
1279	 * Assign all subsequent callbacks' ->completed number to the next
1280	 * full grace period and group them all in the sublist initially
1281	 * indexed by "i".
1282	 */
1283	for (; i <= RCU_NEXT_TAIL; i++) {
1284		rdp->nxttail[i] = rdp->nxttail[RCU_NEXT_TAIL];
1285		rdp->nxtcompleted[i] = c;
1286	}
1287	/* Record any needed additional grace periods. */
1288	rcu_start_future_gp(rnp, rdp);
1289
1290	/* Trace depending on how much we were able to accelerate. */
1291	if (!*rdp->nxttail[RCU_WAIT_TAIL])
1292		trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("AccWaitCB"));
1293	else
1294		trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("AccReadyCB"));
1295}
1296
1297/*
1298 * Move any callbacks whose grace period has completed to the
1299 * RCU_DONE_TAIL sublist, then compact the remaining sublists and
1300 * assign ->completed numbers to any callbacks in the RCU_NEXT_TAIL
1301 * sublist.  This function is idempotent, so it does not hurt to
1302 * invoke it repeatedly.  As long as it is not invoked -too- often...
 
1303 *
1304 * The caller must hold rnp->lock with interrupts disabled.
1305 */
1306static void rcu_advance_cbs(struct rcu_state *rsp, struct rcu_node *rnp,
1307			    struct rcu_data *rdp)
1308{
1309	int i, j;
 
1310
1311	/* If the CPU has no callbacks, nothing to do. */
1312	if (!rdp->nxttail[RCU_NEXT_TAIL] || !*rdp->nxttail[RCU_DONE_TAIL])
1313		return;
1314
1315	/*
1316	 * Find all callbacks whose ->completed numbers indicate that they
1317	 * are ready to invoke, and put them into the RCU_DONE_TAIL sublist.
1318	 */
1319	for (i = RCU_WAIT_TAIL; i < RCU_NEXT_TAIL; i++) {
1320		if (ULONG_CMP_LT(rnp->completed, rdp->nxtcompleted[i]))
1321			break;
1322		rdp->nxttail[RCU_DONE_TAIL] = rdp->nxttail[i];
1323	}
1324	/* Clean up any sublist tail pointers that were misordered above. */
1325	for (j = RCU_WAIT_TAIL; j < i; j++)
1326		rdp->nxttail[j] = rdp->nxttail[RCU_DONE_TAIL];
1327
1328	/* Copy down callbacks to fill in empty sublists. */
1329	for (j = RCU_WAIT_TAIL; i < RCU_NEXT_TAIL; i++, j++) {
1330		if (rdp->nxttail[j] == rdp->nxttail[RCU_NEXT_TAIL])
1331			break;
1332		rdp->nxttail[j] = rdp->nxttail[i];
1333		rdp->nxtcompleted[j] = rdp->nxtcompleted[i];
1334	}
1335
1336	/* Classify any remaining callbacks. */
1337	rcu_accelerate_cbs(rsp, rnp, rdp);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1338}
1339
1340/*
1341 * Update CPU-local rcu_data state to record the beginnings and ends of
1342 * grace periods.  The caller must hold the ->lock of the leaf rcu_node
1343 * structure corresponding to the current CPU, and must have irqs disabled.
 
1344 */
1345static void __note_gp_changes(struct rcu_state *rsp, struct rcu_node *rnp, struct rcu_data *rdp)
1346{
1347	/* Handle the ends of any preceding grace periods first. */
1348	if (rdp->completed == rnp->completed) {
 
 
 
1349
1350		/* No grace period end, so just accelerate recent callbacks. */
1351		rcu_accelerate_cbs(rsp, rnp, rdp);
1352
 
 
 
 
 
 
 
1353	} else {
1354
1355		/* Advance callbacks. */
1356		rcu_advance_cbs(rsp, rnp, rdp);
1357
1358		/* Remember that we saw this grace-period completion. */
1359		rdp->completed = rnp->completed;
1360		trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpuend"));
1361	}
1362
1363	if (rdp->gpnum != rnp->gpnum) {
 
 
1364		/*
1365		 * If the current grace period is waiting for this CPU,
1366		 * set up to detect a quiescent state, otherwise don't
1367		 * go looking for one.
1368		 */
1369		rdp->gpnum = rnp->gpnum;
1370		trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpustart"));
1371		rdp->passed_quiesce = 0;
1372		rdp->qs_pending = !!(rnp->qsmask & rdp->grpmask);
1373		zero_cpu_stall_ticks(rdp);
1374	}
 
 
 
 
 
 
 
 
1375}
1376
1377static void note_gp_changes(struct rcu_state *rsp, struct rcu_data *rdp)
1378{
1379	unsigned long flags;
 
1380	struct rcu_node *rnp;
1381
1382	local_irq_save(flags);
1383	rnp = rdp->mynode;
1384	if ((rdp->gpnum == ACCESS_ONCE(rnp->gpnum) &&
1385	     rdp->completed == ACCESS_ONCE(rnp->completed)) || /* w/out lock. */
1386	    !raw_spin_trylock(&rnp->lock)) { /* irqs already off, so later. */
1387		local_irq_restore(flags);
1388		return;
1389	}
1390	smp_mb__after_unlock_lock();
1391	__note_gp_changes(rsp, rnp, rdp);
1392	raw_spin_unlock_irqrestore(&rnp->lock, flags);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1393}
1394
1395/*
1396 * Initialize a new grace period.  Return 0 if no grace period required.
 
1397 */
1398static int rcu_gp_init(struct rcu_state *rsp)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1399{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1400	struct rcu_data *rdp;
1401	struct rcu_node *rnp = rcu_get_root(rsp);
1402
1403	rcu_bind_gp_kthread();
1404	raw_spin_lock_irq(&rnp->lock);
1405	smp_mb__after_unlock_lock();
1406	if (rsp->gp_flags == 0) {
1407		/* Spurious wakeup, tell caller to go back to sleep.  */
1408		raw_spin_unlock_irq(&rnp->lock);
1409		return 0;
1410	}
1411	rsp->gp_flags = 0; /* Clear all flags: New grace period. */
1412
1413	if (WARN_ON_ONCE(rcu_gp_in_progress(rsp))) {
1414		/*
1415		 * Grace period already in progress, don't start another.
1416		 * Not supposed to be able to happen.
1417		 */
1418		raw_spin_unlock_irq(&rnp->lock);
1419		return 0;
1420	}
1421
1422	/* Advance to a new grace period and initialize state. */
1423	record_gp_stall_check_time(rsp);
1424	/* Record GP times before starting GP, hence smp_store_release(). */
1425	smp_store_release(&rsp->gpnum, rsp->gpnum + 1);
1426	trace_rcu_grace_period(rsp->name, rsp->gpnum, TPS("start"));
1427	raw_spin_unlock_irq(&rnp->lock);
1428
1429	/* Exclude any concurrent CPU-hotplug operations. */
1430	mutex_lock(&rsp->onoff_mutex);
1431	smp_mb__after_unlock_lock(); /* ->gpnum increment before GP! */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1432
1433	/*
1434	 * Set the quiescent-state-needed bits in all the rcu_node
1435	 * structures for all currently online CPUs in breadth-first order,
1436	 * starting from the root rcu_node structure, relying on the layout
1437	 * of the tree within the rsp->node[] array.  Note that other CPUs
1438	 * will access only the leaves of the hierarchy, thus seeing that no
1439	 * grace period is in progress, at least until the corresponding
1440	 * leaf node has been initialized.  In addition, we have excluded
1441	 * CPU-hotplug operations.
1442	 *
1443	 * The grace period cannot complete until the initialization
1444	 * process finishes, because this kthread handles both.
1445	 */
1446	rcu_for_each_node_breadth_first(rsp, rnp) {
1447		raw_spin_lock_irq(&rnp->lock);
1448		smp_mb__after_unlock_lock();
1449		rdp = this_cpu_ptr(rsp->rda);
 
1450		rcu_preempt_check_blocked_tasks(rnp);
1451		rnp->qsmask = rnp->qsmaskinit;
1452		ACCESS_ONCE(rnp->gpnum) = rsp->gpnum;
1453		WARN_ON_ONCE(rnp->completed != rsp->completed);
1454		ACCESS_ONCE(rnp->completed) = rsp->completed;
1455		if (rnp == rdp->mynode)
1456			__note_gp_changes(rsp, rnp, rdp);
1457		rcu_preempt_boost_start_gp(rnp);
1458		trace_rcu_grace_period_init(rsp->name, rnp->gpnum,
1459					    rnp->level, rnp->grplo,
1460					    rnp->grphi, rnp->qsmask);
1461		raw_spin_unlock_irq(&rnp->lock);
1462#ifdef CONFIG_PROVE_RCU_DELAY
1463		if ((prandom_u32() % (rcu_num_nodes + 1)) == 0 &&
1464		    system_state == SYSTEM_RUNNING)
1465			udelay(200);
1466#endif /* #ifdef CONFIG_PROVE_RCU_DELAY */
1467		cond_resched();
 
 
1468	}
1469
1470	mutex_unlock(&rsp->onoff_mutex);
1471	return 1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1472}
1473
1474/*
1475 * Do one round of quiescent-state forcing.
1476 */
1477static int rcu_gp_fqs(struct rcu_state *rsp, int fqs_state_in)
1478{
1479	int fqs_state = fqs_state_in;
1480	bool isidle = false;
1481	unsigned long maxj;
1482	struct rcu_node *rnp = rcu_get_root(rsp);
 
1483
1484	rsp->n_force_qs++;
1485	if (fqs_state == RCU_SAVE_DYNTICK) {
 
 
 
 
 
 
 
 
 
1486		/* Collect dyntick-idle snapshots. */
1487		if (is_sysidle_rcu_state(rsp)) {
1488			isidle = 1;
1489			maxj = jiffies - ULONG_MAX / 4;
1490		}
1491		force_qs_rnp(rsp, dyntick_save_progress_counter,
1492			     &isidle, &maxj);
1493		rcu_sysidle_report_gp(rsp, isidle, maxj);
1494		fqs_state = RCU_FORCE_QS;
1495	} else {
1496		/* Handle dyntick-idle and offline CPUs. */
1497		isidle = 0;
1498		force_qs_rnp(rsp, rcu_implicit_dynticks_qs, &isidle, &maxj);
1499	}
1500	/* Clear flag to prevent immediate re-entry. */
1501	if (ACCESS_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) {
1502		raw_spin_lock_irq(&rnp->lock);
1503		smp_mb__after_unlock_lock();
1504		rsp->gp_flags &= ~RCU_GP_FLAG_FQS;
1505		raw_spin_unlock_irq(&rnp->lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1506	}
1507	return fqs_state;
1508}
1509
1510/*
1511 * Clean up after the old grace period.
1512 */
1513static void rcu_gp_cleanup(struct rcu_state *rsp)
1514{
 
 
1515	unsigned long gp_duration;
1516	int nocb = 0;
 
1517	struct rcu_data *rdp;
1518	struct rcu_node *rnp = rcu_get_root(rsp);
 
1519
1520	raw_spin_lock_irq(&rnp->lock);
1521	smp_mb__after_unlock_lock();
1522	gp_duration = jiffies - rsp->gp_start;
1523	if (gp_duration > rsp->gp_max)
1524		rsp->gp_max = gp_duration;
 
1525
1526	/*
1527	 * We know the grace period is complete, but to everyone else
1528	 * it appears to still be ongoing.  But it is also the case
1529	 * that to everyone else it looks like there is nothing that
1530	 * they can do to advance the grace period.  It is therefore
1531	 * safe for us to drop the lock in order to mark the grace
1532	 * period as completed in all of the rcu_node structures.
1533	 */
1534	raw_spin_unlock_irq(&rnp->lock);
 
1535
1536	/*
1537	 * Propagate new ->completed value to rcu_node structures so
1538	 * that other CPUs don't have to wait until the start of the next
1539	 * grace period to process their callbacks.  This also avoids
1540	 * some nasty RCU grace-period initialization races by forcing
1541	 * the end of the current grace period to be completely recorded in
1542	 * all of the rcu_node structures before the beginning of the next
1543	 * grace period is recorded in any of the rcu_node structures.
1544	 */
1545	rcu_for_each_node_breadth_first(rsp, rnp) {
1546		raw_spin_lock_irq(&rnp->lock);
1547		smp_mb__after_unlock_lock();
1548		ACCESS_ONCE(rnp->completed) = rsp->gpnum;
1549		rdp = this_cpu_ptr(rsp->rda);
 
 
 
 
 
 
1550		if (rnp == rdp->mynode)
1551			__note_gp_changes(rsp, rnp, rdp);
1552		/* smp_mb() provided by prior unlock-lock pair. */
1553		nocb += rcu_future_gp_cleanup(rsp, rnp);
1554		raw_spin_unlock_irq(&rnp->lock);
1555		cond_resched();
1556	}
1557	rnp = rcu_get_root(rsp);
1558	raw_spin_lock_irq(&rnp->lock);
1559	smp_mb__after_unlock_lock(); /* Order GP before ->completed update. */
1560	rcu_nocb_gp_set(rnp, nocb);
1561
1562	/* Declare grace period done. */
1563	ACCESS_ONCE(rsp->completed) = rsp->gpnum;
1564	trace_rcu_grace_period(rsp->name, rsp->completed, TPS("end"));
1565	rsp->fqs_state = RCU_GP_IDLE;
1566	rdp = this_cpu_ptr(rsp->rda);
1567	rcu_advance_cbs(rsp, rnp, rdp);  /* Reduce false positives below. */
1568	if (cpu_needs_another_gp(rsp, rdp)) {
1569		rsp->gp_flags = RCU_GP_FLAG_INIT;
1570		trace_rcu_grace_period(rsp->name,
1571				       ACCESS_ONCE(rsp->gpnum),
1572				       TPS("newreq"));
1573	}
1574	raw_spin_unlock_irq(&rnp->lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1575}
1576
1577/*
1578 * Body of kthread that handles grace periods.
1579 */
1580static int __noreturn rcu_gp_kthread(void *arg)
1581{
1582	int fqs_state;
1583	int gf;
1584	unsigned long j;
1585	int ret;
1586	struct rcu_state *rsp = arg;
1587	struct rcu_node *rnp = rcu_get_root(rsp);
1588
1589	for (;;) {
1590
1591		/* Handle grace-period start. */
1592		for (;;) {
1593			trace_rcu_grace_period(rsp->name,
1594					       ACCESS_ONCE(rsp->gpnum),
1595					       TPS("reqwait"));
1596			wait_event_interruptible(rsp->gp_wq,
1597						 ACCESS_ONCE(rsp->gp_flags) &
1598						 RCU_GP_FLAG_INIT);
 
 
 
1599			/* Locking provides needed memory barrier. */
1600			if (rcu_gp_init(rsp))
1601				break;
1602			cond_resched();
1603			flush_signals(current);
1604			trace_rcu_grace_period(rsp->name,
1605					       ACCESS_ONCE(rsp->gpnum),
1606					       TPS("reqwaitsig"));
1607		}
1608
1609		/* Handle quiescent-state forcing. */
1610		fqs_state = RCU_SAVE_DYNTICK;
1611		j = jiffies_till_first_fqs;
1612		if (j > HZ) {
1613			j = HZ;
1614			jiffies_till_first_fqs = HZ;
1615		}
1616		ret = 0;
1617		for (;;) {
1618			if (!ret)
1619				rsp->jiffies_force_qs = jiffies + j;
1620			trace_rcu_grace_period(rsp->name,
1621					       ACCESS_ONCE(rsp->gpnum),
1622					       TPS("fqswait"));
1623			ret = wait_event_interruptible_timeout(rsp->gp_wq,
1624					((gf = ACCESS_ONCE(rsp->gp_flags)) &
1625					 RCU_GP_FLAG_FQS) ||
1626					(!ACCESS_ONCE(rnp->qsmask) &&
1627					 !rcu_preempt_blocked_readers_cgp(rnp)),
1628					j);
1629			/* Locking provides needed memory barriers. */
1630			/* If grace period done, leave loop. */
1631			if (!ACCESS_ONCE(rnp->qsmask) &&
1632			    !rcu_preempt_blocked_readers_cgp(rnp))
1633				break;
1634			/* If time for quiescent-state forcing, do it. */
1635			if (ULONG_CMP_GE(jiffies, rsp->jiffies_force_qs) ||
1636			    (gf & RCU_GP_FLAG_FQS)) {
1637				trace_rcu_grace_period(rsp->name,
1638						       ACCESS_ONCE(rsp->gpnum),
1639						       TPS("fqsstart"));
1640				fqs_state = rcu_gp_fqs(rsp, fqs_state);
1641				trace_rcu_grace_period(rsp->name,
1642						       ACCESS_ONCE(rsp->gpnum),
1643						       TPS("fqsend"));
1644				cond_resched();
1645			} else {
1646				/* Deal with stray signal. */
1647				cond_resched();
1648				flush_signals(current);
1649				trace_rcu_grace_period(rsp->name,
1650						       ACCESS_ONCE(rsp->gpnum),
1651						       TPS("fqswaitsig"));
1652			}
1653			j = jiffies_till_next_fqs;
1654			if (j > HZ) {
1655				j = HZ;
1656				jiffies_till_next_fqs = HZ;
1657			} else if (j < 1) {
1658				j = 1;
1659				jiffies_till_next_fqs = 1;
1660			}
1661		}
1662
1663		/* Handle grace-period end. */
1664		rcu_gp_cleanup(rsp);
 
 
1665	}
1666}
1667
1668static void rsp_wakeup(struct irq_work *work)
1669{
1670	struct rcu_state *rsp = container_of(work, struct rcu_state, wakeup_work);
1671
1672	/* Wake up rcu_gp_kthread() to start the grace period. */
1673	wake_up(&rsp->gp_wq);
1674}
1675
1676/*
1677 * Start a new RCU grace period if warranted, re-initializing the hierarchy
1678 * in preparation for detecting the next grace period.  The caller must hold
1679 * the root node's ->lock and hard irqs must be disabled.
1680 *
1681 * Note that it is legal for a dying CPU (which is marked as offline) to
1682 * invoke this function.  This can happen when the dying CPU reports its
1683 * quiescent state.
1684 */
1685static void
1686rcu_start_gp_advanced(struct rcu_state *rsp, struct rcu_node *rnp,
1687		      struct rcu_data *rdp)
1688{
1689	if (!rsp->gp_kthread || !cpu_needs_another_gp(rsp, rdp)) {
1690		/*
1691		 * Either we have not yet spawned the grace-period
1692		 * task, this CPU does not need another grace period,
1693		 * or a grace period is already in progress.
1694		 * Either way, don't start a new grace period.
1695		 */
1696		return;
1697	}
1698	rsp->gp_flags = RCU_GP_FLAG_INIT;
1699	trace_rcu_grace_period(rsp->name, ACCESS_ONCE(rsp->gpnum),
1700			       TPS("newreq"));
1701
1702	/*
1703	 * We can't do wakeups while holding the rnp->lock, as that
1704	 * could cause possible deadlocks with the rq->lock. Defer
1705	 * the wakeup to interrupt context.  And don't bother waking
1706	 * up the running kthread.
1707	 */
1708	if (current != rsp->gp_kthread)
1709		irq_work_queue(&rsp->wakeup_work);
1710}
1711
1712/*
1713 * Similar to rcu_start_gp_advanced(), but also advance the calling CPU's
1714 * callbacks.  Note that rcu_start_gp_advanced() cannot do this because it
1715 * is invoked indirectly from rcu_advance_cbs(), which would result in
1716 * endless recursion -- or would do so if it wasn't for the self-deadlock
1717 * that is encountered beforehand.
1718 */
1719static void
1720rcu_start_gp(struct rcu_state *rsp)
1721{
1722	struct rcu_data *rdp = this_cpu_ptr(rsp->rda);
1723	struct rcu_node *rnp = rcu_get_root(rsp);
1724
1725	/*
1726	 * If there is no grace period in progress right now, any
1727	 * callbacks we have up to this point will be satisfied by the
1728	 * next grace period.  Also, advancing the callbacks reduces the
1729	 * probability of false positives from cpu_needs_another_gp()
1730	 * resulting in pointless grace periods.  So, advance callbacks
1731	 * then start the grace period!
1732	 */
1733	rcu_advance_cbs(rsp, rnp, rdp);
1734	rcu_start_gp_advanced(rsp, rnp, rdp);
1735}
1736
1737/*
1738 * Report a full set of quiescent states to the specified rcu_state
1739 * data structure.  This involves cleaning up after the prior grace
1740 * period and letting rcu_start_gp() start up the next grace period
1741 * if one is needed.  Note that the caller must hold rnp->lock, which
1742 * is released before return.
1743 */
1744static void rcu_report_qs_rsp(struct rcu_state *rsp, unsigned long flags)
1745	__releases(rcu_get_root(rsp)->lock)
1746{
1747	WARN_ON_ONCE(!rcu_gp_in_progress(rsp));
1748	raw_spin_unlock_irqrestore(&rcu_get_root(rsp)->lock, flags);
1749	wake_up(&rsp->gp_wq);  /* Memory barrier implied by wake_up() path. */
1750}
1751
1752/*
1753 * Similar to rcu_report_qs_rdp(), for which it is a helper function.
1754 * Allows quiescent states for a group of CPUs to be reported at one go
1755 * to the specified rcu_node structure, though all the CPUs in the group
1756 * must be represented by the same rcu_node structure (which need not be
1757 * a leaf rcu_node structure, though it often will be).  That structure's
1758 * lock must be held upon entry, and it is released before return.
 
 
 
 
 
 
1759 */
1760static void
1761rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
1762		  struct rcu_node *rnp, unsigned long flags)
1763	__releases(rnp->lock)
1764{
 
1765	struct rcu_node *rnp_c;
1766
 
 
1767	/* Walk up the rcu_node hierarchy. */
1768	for (;;) {
1769		if (!(rnp->qsmask & mask)) {
1770
1771			/* Our bit has already been cleared, so done. */
1772			raw_spin_unlock_irqrestore(&rnp->lock, flags);
 
 
 
1773			return;
1774		}
1775		rnp->qsmask &= ~mask;
1776		trace_rcu_quiescent_state_report(rsp->name, rnp->gpnum,
 
 
 
1777						 mask, rnp->qsmask, rnp->level,
1778						 rnp->grplo, rnp->grphi,
1779						 !!rnp->gp_tasks);
1780		if (rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) {
1781
1782			/* Other bits still set at this level, so done. */
1783			raw_spin_unlock_irqrestore(&rnp->lock, flags);
1784			return;
1785		}
 
1786		mask = rnp->grpmask;
1787		if (rnp->parent == NULL) {
1788
1789			/* No more levels.  Exit loop holding root lock. */
1790
1791			break;
1792		}
1793		raw_spin_unlock_irqrestore(&rnp->lock, flags);
1794		rnp_c = rnp;
1795		rnp = rnp->parent;
1796		raw_spin_lock_irqsave(&rnp->lock, flags);
1797		smp_mb__after_unlock_lock();
1798		WARN_ON_ONCE(rnp_c->qsmask);
1799	}
1800
1801	/*
1802	 * Get here if we are the last CPU to pass through a quiescent
1803	 * state for this grace period.  Invoke rcu_report_qs_rsp()
1804	 * to clean up and start the next grace period if one is needed.
1805	 */
1806	rcu_report_qs_rsp(rsp, flags); /* releases rnp->lock. */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1807}
1808
1809/*
1810 * Record a quiescent state for the specified CPU to that CPU's rcu_data
1811 * structure.  This must be either called from the specified CPU, or
1812 * called when the specified CPU is known to be offline (and when it is
1813 * also known that no other CPU is concurrently trying to help the offline
1814 * CPU).  The lastcomp argument is used to make sure we are still in the
1815 * grace period of interest.  We don't want to end the current grace period
1816 * based on quiescent states detected in an earlier grace period!
1817 */
1818static void
1819rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp)
1820{
1821	unsigned long flags;
1822	unsigned long mask;
 
1823	struct rcu_node *rnp;
1824
 
1825	rnp = rdp->mynode;
1826	raw_spin_lock_irqsave(&rnp->lock, flags);
1827	smp_mb__after_unlock_lock();
1828	if (rdp->passed_quiesce == 0 || rdp->gpnum != rnp->gpnum ||
1829	    rnp->completed == rnp->gpnum) {
1830
1831		/*
1832		 * The grace period in which this quiescent state was
1833		 * recorded has ended, so don't report it upwards.
1834		 * We will instead need a new quiescent state that lies
1835		 * within the current grace period.
1836		 */
1837		rdp->passed_quiesce = 0;	/* need qs for new gp. */
1838		raw_spin_unlock_irqrestore(&rnp->lock, flags);
1839		return;
1840	}
1841	mask = rdp->grpmask;
 
1842	if ((rnp->qsmask & mask) == 0) {
1843		raw_spin_unlock_irqrestore(&rnp->lock, flags);
1844	} else {
1845		rdp->qs_pending = 0;
1846
1847		/*
1848		 * This GP can't end until cpu checks in, so all of our
1849		 * callbacks can be processed during the next GP.
 
 
1850		 */
1851		rcu_accelerate_cbs(rsp, rnp, rdp);
 
 
 
 
 
 
 
 
 
 
 
 
 
1852
1853		rcu_report_qs_rnp(mask, rsp, rnp, flags); /* rlses rnp->lock */
 
 
 
 
 
 
 
 
1854	}
1855}
1856
1857/*
1858 * Check to see if there is a new grace period of which this CPU
1859 * is not yet aware, and if so, set up local rcu_data state for it.
1860 * Otherwise, see if this CPU has just passed through its first
1861 * quiescent state for this grace period, and record that fact if so.
1862 */
1863static void
1864rcu_check_quiescent_state(struct rcu_state *rsp, struct rcu_data *rdp)
1865{
1866	/* Check for grace-period ends and beginnings. */
1867	note_gp_changes(rsp, rdp);
1868
1869	/*
1870	 * Does this CPU still need to do its part for current grace period?
1871	 * If no, return and let the other CPUs do their part as well.
1872	 */
1873	if (!rdp->qs_pending)
1874		return;
1875
1876	/*
1877	 * Was there a quiescent state since the beginning of the grace
1878	 * period? If no, then exit and wait for the next call.
1879	 */
1880	if (!rdp->passed_quiesce)
1881		return;
1882
1883	/*
1884	 * Tell RCU we are done (but rcu_report_qs_rdp() will be the
1885	 * judge of that).
1886	 */
1887	rcu_report_qs_rdp(rdp->cpu, rsp, rdp);
1888}
1889
1890#ifdef CONFIG_HOTPLUG_CPU
1891
1892/*
1893 * Send the specified CPU's RCU callbacks to the orphanage.  The
1894 * specified CPU must be offline, and the caller must hold the
1895 * ->orphan_lock.
1896 */
1897static void
1898rcu_send_cbs_to_orphanage(int cpu, struct rcu_state *rsp,
1899			  struct rcu_node *rnp, struct rcu_data *rdp)
1900{
1901	/* No-CBs CPUs do not have orphanable callbacks. */
1902	if (rcu_is_nocb_cpu(rdp->cpu))
1903		return;
1904
1905	/*
1906	 * Orphan the callbacks.  First adjust the counts.  This is safe
1907	 * because _rcu_barrier() excludes CPU-hotplug operations, so it
1908	 * cannot be running now.  Thus no memory barrier is required.
1909	 */
1910	if (rdp->nxtlist != NULL) {
1911		rsp->qlen_lazy += rdp->qlen_lazy;
1912		rsp->qlen += rdp->qlen;
1913		rdp->n_cbs_orphaned += rdp->qlen;
1914		rdp->qlen_lazy = 0;
1915		ACCESS_ONCE(rdp->qlen) = 0;
1916	}
1917
1918	/*
1919	 * Next, move those callbacks still needing a grace period to
1920	 * the orphanage, where some other CPU will pick them up.
1921	 * Some of the callbacks might have gone partway through a grace
1922	 * period, but that is too bad.  They get to start over because we
1923	 * cannot assume that grace periods are synchronized across CPUs.
1924	 * We don't bother updating the ->nxttail[] array yet, instead
1925	 * we just reset the whole thing later on.
1926	 */
1927	if (*rdp->nxttail[RCU_DONE_TAIL] != NULL) {
1928		*rsp->orphan_nxttail = *rdp->nxttail[RCU_DONE_TAIL];
1929		rsp->orphan_nxttail = rdp->nxttail[RCU_NEXT_TAIL];
1930		*rdp->nxttail[RCU_DONE_TAIL] = NULL;
1931	}
1932
1933	/*
1934	 * Then move the ready-to-invoke callbacks to the orphanage,
1935	 * where some other CPU will pick them up.  These will not be
1936	 * required to pass though another grace period: They are done.
1937	 */
1938	if (rdp->nxtlist != NULL) {
1939		*rsp->orphan_donetail = rdp->nxtlist;
1940		rsp->orphan_donetail = rdp->nxttail[RCU_DONE_TAIL];
1941	}
1942
1943	/* Finally, initialize the rcu_data structure's list to empty.  */
1944	init_callback_list(rdp);
1945}
1946
1947/*
1948 * Adopt the RCU callbacks from the specified rcu_state structure's
1949 * orphanage.  The caller must hold the ->orphan_lock.
1950 */
1951static void rcu_adopt_orphan_cbs(struct rcu_state *rsp, unsigned long flags)
1952{
1953	int i;
1954	struct rcu_data *rdp = __this_cpu_ptr(rsp->rda);
 
 
 
 
 
 
 
 
 
1955
1956	/* No-CBs CPUs are handled specially. */
1957	if (rcu_nocb_adopt_orphan_cbs(rsp, rdp, flags))
 
 
 
 
 
 
1958		return;
1959
1960	/* Do the accounting first. */
1961	rdp->qlen_lazy += rsp->qlen_lazy;
1962	rdp->qlen += rsp->qlen;
1963	rdp->n_cbs_adopted += rsp->qlen;
1964	if (rsp->qlen_lazy != rsp->qlen)
1965		rcu_idle_count_callbacks_posted();
1966	rsp->qlen_lazy = 0;
1967	rsp->qlen = 0;
1968
1969	/*
1970	 * We do not need a memory barrier here because the only way we
1971	 * can get here if there is an rcu_barrier() in flight is if
1972	 * we are the task doing the rcu_barrier().
1973	 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1974
1975	/* First adopt the ready-to-invoke callbacks. */
1976	if (rsp->orphan_donelist != NULL) {
1977		*rsp->orphan_donetail = *rdp->nxttail[RCU_DONE_TAIL];
1978		*rdp->nxttail[RCU_DONE_TAIL] = rsp->orphan_donelist;
1979		for (i = RCU_NEXT_SIZE - 1; i >= RCU_DONE_TAIL; i--)
1980			if (rdp->nxttail[i] == rdp->nxttail[RCU_DONE_TAIL])
1981				rdp->nxttail[i] = rsp->orphan_donetail;
1982		rsp->orphan_donelist = NULL;
1983		rsp->orphan_donetail = &rsp->orphan_donelist;
1984	}
1985
1986	/* And then adopt the callbacks that still need a grace period. */
1987	if (rsp->orphan_nxtlist != NULL) {
1988		*rdp->nxttail[RCU_NEXT_TAIL] = rsp->orphan_nxtlist;
1989		rdp->nxttail[RCU_NEXT_TAIL] = rsp->orphan_nxttail;
1990		rsp->orphan_nxtlist = NULL;
1991		rsp->orphan_nxttail = &rsp->orphan_nxtlist;
1992	}
1993}
1994
1995/*
1996 * Trace the fact that this CPU is going offline.
1997 */
1998static void rcu_cleanup_dying_cpu(struct rcu_state *rsp)
1999{
2000	RCU_TRACE(unsigned long mask);
2001	RCU_TRACE(struct rcu_data *rdp = this_cpu_ptr(rsp->rda));
2002	RCU_TRACE(struct rcu_node *rnp = rdp->mynode);
2003
2004	RCU_TRACE(mask = rdp->grpmask);
2005	trace_rcu_grace_period(rsp->name,
2006			       rnp->gpnum + 1 - !!(rnp->qsmask & mask),
2007			       TPS("cpuofl"));
2008}
2009
2010/*
2011 * The CPU has been completely removed, and some other CPU is reporting
2012 * this fact from process context.  Do the remainder of the cleanup,
2013 * including orphaning the outgoing CPU's RCU callbacks, and also
2014 * adopting them.  There can only be one CPU hotplug operation at a time,
2015 * so no other CPU can be attempting to update rcu_cpu_kthread_task.
2016 */
2017static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp)
2018{
2019	unsigned long flags;
2020	unsigned long mask;
2021	int need_report = 0;
2022	struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
2023	struct rcu_node *rnp = rdp->mynode;  /* Outgoing CPU's rdp & rnp. */
2024
2025	/* Adjust any no-longer-needed kthreads. */
2026	rcu_boost_kthread_setaffinity(rnp, -1);
 
 
2027
2028	/* Remove the dead CPU from the bitmasks in the rcu_node hierarchy. */
2029
2030	/* Exclude any attempts to start a new grace period. */
2031	mutex_lock(&rsp->onoff_mutex);
2032	raw_spin_lock_irqsave(&rsp->orphan_lock, flags);
2033
2034	/* Orphan the dead CPU's callbacks, and adopt them if appropriate. */
2035	rcu_send_cbs_to_orphanage(cpu, rsp, rnp, rdp);
2036	rcu_adopt_orphan_cbs(rsp, flags);
2037
2038	/* Remove the outgoing CPU from the masks in the rcu_node hierarchy. */
2039	mask = rdp->grpmask;	/* rnp->grplo is constant. */
2040	do {
2041		raw_spin_lock(&rnp->lock);	/* irqs already disabled. */
2042		smp_mb__after_unlock_lock();
2043		rnp->qsmaskinit &= ~mask;
2044		if (rnp->qsmaskinit != 0) {
2045			if (rnp != rdp->mynode)
2046				raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
2047			break;
 
 
 
 
 
 
 
 
 
2048		}
2049		if (rnp == rdp->mynode)
2050			need_report = rcu_preempt_offline_tasks(rsp, rnp, rdp);
2051		else
2052			raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
2053		mask = rnp->grpmask;
2054		rnp = rnp->parent;
2055	} while (rnp != NULL);
2056
2057	/*
2058	 * We still hold the leaf rcu_node structure lock here, and
2059	 * irqs are still disabled.  The reason for this subterfuge is
2060	 * because invoking rcu_report_unblock_qs_rnp() with ->orphan_lock
2061	 * held leads to deadlock.
2062	 */
2063	raw_spin_unlock(&rsp->orphan_lock); /* irqs remain disabled. */
2064	rnp = rdp->mynode;
2065	if (need_report & RCU_OFL_TASKS_NORM_GP)
2066		rcu_report_unblock_qs_rnp(rnp, flags);
2067	else
2068		raw_spin_unlock_irqrestore(&rnp->lock, flags);
2069	if (need_report & RCU_OFL_TASKS_EXP_GP)
2070		rcu_report_exp_rnp(rsp, rnp, true);
2071	WARN_ONCE(rdp->qlen != 0 || rdp->nxtlist != NULL,
2072		  "rcu_cleanup_dead_cpu: Callbacks on offline CPU %d: qlen=%lu, nxtlist=%p\n",
2073		  cpu, rdp->qlen, rdp->nxtlist);
2074	init_callback_list(rdp);
2075	/* Disallow further callbacks on this CPU. */
2076	rdp->nxttail[RCU_NEXT_TAIL] = NULL;
2077	mutex_unlock(&rsp->onoff_mutex);
2078}
2079
2080#else /* #ifdef CONFIG_HOTPLUG_CPU */
2081
2082static void rcu_cleanup_dying_cpu(struct rcu_state *rsp)
2083{
2084}
2085
2086static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp)
2087{
2088}
2089
2090#endif /* #else #ifdef CONFIG_HOTPLUG_CPU */
2091
2092/*
2093 * Invoke any RCU callbacks that have made it to the end of their grace
2094 * period.  Thottle as specified by rdp->blimit.
2095 */
2096static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
2097{
2098	unsigned long flags;
2099	struct rcu_head *next, *list, **tail;
2100	long bl, count, count_lazy;
2101	int i;
2102
2103	/* If no callbacks are ready, just return. */
2104	if (!cpu_has_callbacks_ready_to_invoke(rdp)) {
2105		trace_rcu_batch_start(rsp->name, rdp->qlen_lazy, rdp->qlen, 0);
2106		trace_rcu_batch_end(rsp->name, 0, !!ACCESS_ONCE(rdp->nxtlist),
2107				    need_resched(), is_idle_task(current),
2108				    rcu_is_callbacks_kthread());
2109		return;
2110	}
2111
2112	/*
2113	 * Extract the list of ready callbacks, disabling to prevent
2114	 * races with call_rcu() from interrupt handlers.
2115	 */
2116	local_irq_save(flags);
2117	WARN_ON_ONCE(cpu_is_offline(smp_processor_id()));
2118	bl = rdp->blimit;
2119	trace_rcu_batch_start(rsp->name, rdp->qlen_lazy, rdp->qlen, bl);
2120	list = rdp->nxtlist;
2121	rdp->nxtlist = *rdp->nxttail[RCU_DONE_TAIL];
2122	*rdp->nxttail[RCU_DONE_TAIL] = NULL;
2123	tail = rdp->nxttail[RCU_DONE_TAIL];
2124	for (i = RCU_NEXT_SIZE - 1; i >= 0; i--)
2125		if (rdp->nxttail[i] == rdp->nxttail[RCU_DONE_TAIL])
2126			rdp->nxttail[i] = &rdp->nxtlist;
2127	local_irq_restore(flags);
2128
2129	/* Invoke callbacks. */
2130	count = count_lazy = 0;
2131	while (list) {
2132		next = list->next;
2133		prefetch(next);
2134		debug_rcu_head_unqueue(list);
2135		if (__rcu_reclaim(rsp->name, list))
2136			count_lazy++;
2137		list = next;
2138		/* Stop only if limit reached and CPU has something to do. */
2139		if (++count >= bl &&
2140		    (need_resched() ||
2141		     (!is_idle_task(current) && !rcu_is_callbacks_kthread())))
2142			break;
2143	}
2144
2145	local_irq_save(flags);
2146	trace_rcu_batch_end(rsp->name, count, !!list, need_resched(),
2147			    is_idle_task(current),
2148			    rcu_is_callbacks_kthread());
2149
2150	/* Update count, and requeue any remaining callbacks. */
2151	if (list != NULL) {
2152		*tail = rdp->nxtlist;
2153		rdp->nxtlist = list;
2154		for (i = 0; i < RCU_NEXT_SIZE; i++)
2155			if (&rdp->nxtlist == rdp->nxttail[i])
2156				rdp->nxttail[i] = tail;
2157			else
2158				break;
2159	}
2160	smp_mb(); /* List handling before counting for rcu_barrier(). */
2161	rdp->qlen_lazy -= count_lazy;
2162	ACCESS_ONCE(rdp->qlen) -= count;
2163	rdp->n_cbs_invoked += count;
2164
2165	/* Reinstate batch limit if we have worked down the excess. */
2166	if (rdp->blimit == LONG_MAX && rdp->qlen <= qlowmark)
 
2167		rdp->blimit = blimit;
2168
2169	/* Reset ->qlen_last_fqs_check trigger if enough CBs have drained. */
2170	if (rdp->qlen == 0 && rdp->qlen_last_fqs_check != 0) {
2171		rdp->qlen_last_fqs_check = 0;
2172		rdp->n_force_qs_snap = rsp->n_force_qs;
2173	} else if (rdp->qlen < rdp->qlen_last_fqs_check - qhimark)
2174		rdp->qlen_last_fqs_check = rdp->qlen;
2175	WARN_ON_ONCE((rdp->nxtlist == NULL) != (rdp->qlen == 0));
 
 
 
 
 
 
 
 
 
 
2176
2177	local_irq_restore(flags);
2178
2179	/* Re-invoke RCU core processing if there are callbacks remaining. */
2180	if (cpu_has_callbacks_ready_to_invoke(rdp))
2181		invoke_rcu_core();
2182}
2183
2184/*
2185 * Check to see if this CPU is in a non-context-switch quiescent state
2186 * (user mode or idle loop for rcu, non-softirq execution for rcu_bh).
2187 * Also schedule RCU core processing.
2188 *
2189 * This function must be called from hardirq context.  It is normally
2190 * invoked from the scheduling-clock interrupt.  If rcu_pending returns
2191 * false, there is no point in invoking rcu_check_callbacks().
2192 */
2193void rcu_check_callbacks(int cpu, int user)
2194{
 
 
 
 
 
 
 
2195	trace_rcu_utilization(TPS("Start scheduler-tick"));
2196	increment_cpu_stall_ticks();
2197	if (user || rcu_is_cpu_rrupt_from_idle()) {
2198
2199		/*
2200		 * Get here if this CPU took its interrupt from user
2201		 * mode or from the idle loop, and if this is not a
2202		 * nested interrupt.  In this case, the CPU is in
2203		 * a quiescent state, so note it.
2204		 *
2205		 * No memory barrier is required here because both
2206		 * rcu_sched_qs() and rcu_bh_qs() reference only CPU-local
2207		 * variables that other CPUs neither access nor modify,
2208		 * at least not while the corresponding CPU is online.
2209		 */
2210
2211		rcu_sched_qs(cpu);
2212		rcu_bh_qs(cpu);
2213
2214	} else if (!in_softirq()) {
2215
2216		/*
2217		 * Get here if this CPU did not take its interrupt from
2218		 * softirq, in other words, if it is not interrupting
2219		 * a rcu_bh read-side critical section.  This is an _bh
2220		 * critical section, so note it.
2221		 */
2222
2223		rcu_bh_qs(cpu);
2224	}
2225	rcu_preempt_check_callbacks(cpu);
2226	if (rcu_pending(cpu))
2227		invoke_rcu_core();
 
 
 
 
2228	trace_rcu_utilization(TPS("End scheduler-tick"));
2229}
2230
2231/*
2232 * Scan the leaf rcu_node structures, processing dyntick state for any that
2233 * have not yet encountered a quiescent state, using the function specified.
2234 * Also initiate boosting for any threads blocked on the root rcu_node.
2235 *
2236 * The caller must have suppressed start of new grace periods.
2237 */
2238static void force_qs_rnp(struct rcu_state *rsp,
2239			 int (*f)(struct rcu_data *rsp, bool *isidle,
2240				  unsigned long *maxj),
2241			 bool *isidle, unsigned long *maxj)
2242{
2243	unsigned long bit;
2244	int cpu;
2245	unsigned long flags;
2246	unsigned long mask;
2247	struct rcu_node *rnp;
2248
2249	rcu_for_each_leaf_node(rsp, rnp) {
2250		cond_resched();
2251		mask = 0;
2252		raw_spin_lock_irqsave(&rnp->lock, flags);
2253		smp_mb__after_unlock_lock();
2254		if (!rcu_gp_in_progress(rsp)) {
2255			raw_spin_unlock_irqrestore(&rnp->lock, flags);
2256			return;
2257		}
2258		if (rnp->qsmask == 0) {
2259			rcu_initiate_boost(rnp, flags); /* releases rnp->lock */
 
 
 
 
 
 
 
 
 
 
2260			continue;
2261		}
2262		cpu = rnp->grplo;
2263		bit = 1;
2264		for (; cpu <= rnp->grphi; cpu++, bit <<= 1) {
2265			if ((rnp->qsmask & bit) != 0) {
2266				if ((rnp->qsmaskinit & bit) != 0)
2267					*isidle = 0;
2268				if (f(per_cpu_ptr(rsp->rda, cpu), isidle, maxj))
2269					mask |= bit;
 
2270			}
 
 
2271		}
2272		if (mask != 0) {
 
 
 
 
 
 
2273
2274			/* rcu_report_qs_rnp() releases rnp->lock. */
2275			rcu_report_qs_rnp(mask, rsp, rnp, flags);
2276			continue;
2277		}
2278		raw_spin_unlock_irqrestore(&rnp->lock, flags);
2279	}
2280	rnp = rcu_get_root(rsp);
2281	if (rnp->qsmask == 0) {
2282		raw_spin_lock_irqsave(&rnp->lock, flags);
2283		smp_mb__after_unlock_lock();
2284		rcu_initiate_boost(rnp, flags); /* releases rnp->lock. */
2285	}
2286}
2287
2288/*
2289 * Force quiescent states on reluctant CPUs, and also detect which
2290 * CPUs are in dyntick-idle mode.
2291 */
2292static void force_quiescent_state(struct rcu_state *rsp)
2293{
2294	unsigned long flags;
2295	bool ret;
2296	struct rcu_node *rnp;
2297	struct rcu_node *rnp_old = NULL;
2298
 
 
2299	/* Funnel through hierarchy to reduce memory contention. */
2300	rnp = per_cpu_ptr(rsp->rda, raw_smp_processor_id())->mynode;
2301	for (; rnp != NULL; rnp = rnp->parent) {
2302		ret = (ACCESS_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) ||
2303		      !raw_spin_trylock(&rnp->fqslock);
2304		if (rnp_old != NULL)
2305			raw_spin_unlock(&rnp_old->fqslock);
2306		if (ret) {
2307			ACCESS_ONCE(rsp->n_force_qs_lh)++;
2308			return;
2309		}
2310		rnp_old = rnp;
2311	}
2312	/* rnp_old == rcu_get_root(rsp), rnp == NULL. */
2313
2314	/* Reached the root of the rcu_node tree, acquire lock. */
2315	raw_spin_lock_irqsave(&rnp_old->lock, flags);
2316	smp_mb__after_unlock_lock();
2317	raw_spin_unlock(&rnp_old->fqslock);
2318	if (ACCESS_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) {
2319		ACCESS_ONCE(rsp->n_force_qs_lh)++;
2320		raw_spin_unlock_irqrestore(&rnp_old->lock, flags);
2321		return;  /* Someone beat us to it. */
2322	}
2323	rsp->gp_flags |= RCU_GP_FLAG_FQS;
2324	raw_spin_unlock_irqrestore(&rnp_old->lock, flags);
2325	wake_up(&rsp->gp_wq);  /* Memory barrier implied by wake_up() path. */
 
 
 
 
 
 
 
 
 
 
2326}
2327
2328/*
2329 * This does the RCU core processing work for the specified rcu_state
2330 * and rcu_data structures.  This may be called only from the CPU to
2331 * whom the rdp belongs.
2332 */
2333static void
2334__rcu_process_callbacks(struct rcu_state *rsp)
2335{
2336	unsigned long flags;
2337	struct rcu_data *rdp = __this_cpu_ptr(rsp->rda);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2338
2339	WARN_ON_ONCE(rdp->beenonline == 0);
 
 
 
 
 
 
2340
2341	/* Update RCU state based on any recent quiescent states. */
2342	rcu_check_quiescent_state(rsp, rdp);
2343
2344	/* Does this CPU require a not-yet-started grace period? */
2345	local_irq_save(flags);
2346	if (cpu_needs_another_gp(rsp, rdp)) {
2347		raw_spin_lock(&rcu_get_root(rsp)->lock); /* irqs disabled. */
2348		rcu_start_gp(rsp);
2349		raw_spin_unlock_irqrestore(&rcu_get_root(rsp)->lock, flags);
2350	} else {
2351		local_irq_restore(flags);
2352	}
2353
 
 
2354	/* If there are callbacks ready, invoke them. */
2355	if (cpu_has_callbacks_ready_to_invoke(rdp))
2356		invoke_rcu_callbacks(rsp, rdp);
 
 
 
 
 
2357
2358	/* Do any needed deferred wakeups of rcuo kthreads. */
2359	do_nocb_deferred_wakeup(rdp);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2360}
2361
2362/*
2363 * Do RCU core processing for the current CPU.
2364 */
2365static void rcu_process_callbacks(struct softirq_action *unused)
 
 
 
 
 
 
 
 
 
 
2366{
2367	struct rcu_state *rsp;
 
2368
2369	if (cpu_is_offline(smp_processor_id()))
2370		return;
2371	trace_rcu_utilization(TPS("Start RCU core"));
2372	for_each_rcu_flavor(rsp)
2373		__rcu_process_callbacks(rsp);
2374	trace_rcu_utilization(TPS("End RCU core"));
2375}
2376
2377/*
2378 * Schedule RCU callback invocation.  If the specified type of RCU
2379 * does not support RCU priority boosting, just do a direct call,
2380 * otherwise wake up the per-CPU kernel kthread.  Note that because we
2381 * are running on the current CPU with interrupts disabled, the
2382 * rcu_cpu_kthread_task cannot disappear out from under us.
2383 */
2384static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
2385{
2386	if (unlikely(!ACCESS_ONCE(rcu_scheduler_fully_active)))
2387		return;
2388	if (likely(!rsp->boost)) {
2389		rcu_do_batch(rsp, rdp);
2390		return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2391	}
2392	invoke_rcu_callbacks_kthread();
 
 
 
 
 
2393}
2394
2395static void invoke_rcu_core(void)
 
 
 
 
 
 
 
 
 
 
 
 
2396{
2397	if (cpu_online(smp_processor_id()))
2398		raise_softirq(RCU_SOFTIRQ);
 
 
 
 
 
 
 
2399}
2400
2401/*
2402 * Handle any core-RCU processing required by a call_rcu() invocation.
2403 */
2404static void __call_rcu_core(struct rcu_state *rsp, struct rcu_data *rdp,
2405			    struct rcu_head *head, unsigned long flags)
2406{
2407	/*
2408	 * If called from an extended quiescent state, invoke the RCU
2409	 * core in order to force a re-evaluation of RCU's idleness.
2410	 */
2411	if (!rcu_is_watching() && cpu_online(smp_processor_id()))
2412		invoke_rcu_core();
2413
2414	/* If interrupts were disabled or CPU offline, don't invoke RCU core. */
2415	if (irqs_disabled_flags(flags) || cpu_is_offline(smp_processor_id()))
2416		return;
2417
2418	/*
2419	 * Force the grace period if too many callbacks or too long waiting.
2420	 * Enforce hysteresis, and don't invoke force_quiescent_state()
2421	 * if some other CPU has recently done so.  Also, don't bother
2422	 * invoking force_quiescent_state() if the newly enqueued callback
2423	 * is the only one waiting for a grace period to complete.
2424	 */
2425	if (unlikely(rdp->qlen > rdp->qlen_last_fqs_check + qhimark)) {
 
2426
2427		/* Are we ignoring a completed grace period? */
2428		note_gp_changes(rsp, rdp);
2429
2430		/* Start a new grace period if one not already started. */
2431		if (!rcu_gp_in_progress(rsp)) {
2432			struct rcu_node *rnp_root = rcu_get_root(rsp);
2433
2434			raw_spin_lock(&rnp_root->lock);
2435			smp_mb__after_unlock_lock();
2436			rcu_start_gp(rsp);
2437			raw_spin_unlock(&rnp_root->lock);
2438		} else {
2439			/* Give the grace period a kick. */
2440			rdp->blimit = LONG_MAX;
2441			if (rsp->n_force_qs == rdp->n_force_qs_snap &&
2442			    *rdp->nxttail[RCU_DONE_TAIL] != head)
2443				force_quiescent_state(rsp);
2444			rdp->n_force_qs_snap = rsp->n_force_qs;
2445			rdp->qlen_last_fqs_check = rdp->qlen;
2446		}
2447	}
2448}
2449
2450/*
2451 * RCU callback function to leak a callback.
2452 */
2453static void rcu_leak_callback(struct rcu_head *rhp)
2454{
2455}
2456
2457/*
2458 * Helper function for call_rcu() and friends.  The cpu argument will
2459 * normally be -1, indicating "currently running CPU".  It may specify
2460 * a CPU only if that CPU is a no-CBs CPU.  Currently, only _rcu_barrier()
2461 * is expected to specify a CPU.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2462 */
 
 
 
 
 
 
 
 
 
 
 
 
 
2463static void
2464__call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
2465	   struct rcu_state *rsp, int cpu, bool lazy)
2466{
 
2467	unsigned long flags;
 
2468	struct rcu_data *rdp;
 
 
 
 
2469
2470	WARN_ON_ONCE((unsigned long)head & 0x3); /* Misaligned rcu_head! */
2471	if (debug_rcu_head_queue(head)) {
2472		/* Probable double call_rcu(), so leak the callback. */
2473		ACCESS_ONCE(head->func) = rcu_leak_callback;
2474		WARN_ONCE(1, "__call_rcu(): Leaked duplicate callback\n");
 
 
 
 
 
 
 
2475		return;
2476	}
2477	head->func = func;
2478	head->next = NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2479
2480	/*
2481	 * Opportunistically note grace-period endings and beginnings.
2482	 * Note that we might see a beginning right after we see an
2483	 * end, but never vice versa, since this CPU has to pass through
2484	 * a quiescent state betweentimes.
 
2485	 */
2486	local_irq_save(flags);
2487	rdp = this_cpu_ptr(rsp->rda);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2488
2489	/* Add the callback to our list. */
2490	if (unlikely(rdp->nxttail[RCU_NEXT_TAIL] == NULL) || cpu != -1) {
2491		int offline;
 
2492
2493		if (cpu != -1)
2494			rdp = per_cpu_ptr(rsp->rda, cpu);
2495		offline = !__call_rcu_nocb(rdp, head, lazy, flags);
2496		WARN_ON_ONCE(offline);
2497		/* _call_rcu() is illegal on offline CPU; leak the callback. */
2498		local_irq_restore(flags);
2499		return;
2500	}
2501	ACCESS_ONCE(rdp->qlen)++;
2502	if (lazy)
2503		rdp->qlen_lazy++;
2504	else
2505		rcu_idle_count_callbacks_posted();
2506	smp_mb();  /* Count before adding callback for rcu_barrier(). */
2507	*rdp->nxttail[RCU_NEXT_TAIL] = head;
2508	rdp->nxttail[RCU_NEXT_TAIL] = &head->next;
2509
2510	if (__is_kfree_rcu_offset((unsigned long)func))
2511		trace_rcu_kfree_callback(rsp->name, head, (unsigned long)func,
2512					 rdp->qlen_lazy, rdp->qlen);
2513	else
2514		trace_rcu_callback(rsp->name, head, rdp->qlen_lazy, rdp->qlen);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2515
2516	/* Go handle any RCU core processing required. */
2517	__call_rcu_core(rsp, rdp, head, flags);
2518	local_irq_restore(flags);
2519}
2520
2521/*
2522 * Queue an RCU-sched callback for invocation after a grace period.
2523 */
2524void call_rcu_sched(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
2525{
2526	__call_rcu(head, func, &rcu_sched_state, -1, 0);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2527}
2528EXPORT_SYMBOL_GPL(call_rcu_sched);
2529
2530/*
2531 * Queue an RCU callback for invocation after a quicker grace period.
 
 
 
 
 
 
 
 
 
2532 */
2533void call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
2534{
2535	__call_rcu(head, func, &rcu_bh_state, -1, 0);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2536}
2537EXPORT_SYMBOL_GPL(call_rcu_bh);
2538
2539/*
2540 * Because a context switch is a grace period for RCU-sched and RCU-bh,
2541 * any blocking grace-period wait automatically implies a grace period
2542 * if there is only one CPU online at any point time during execution
2543 * of either synchronize_sched() or synchronize_rcu_bh().  It is OK to
2544 * occasionally incorrectly indicate that there are multiple CPUs online
2545 * when there was in fact only one the whole time, as this just adds
2546 * some overhead: RCU still operates correctly.
 
2547 */
2548static inline int rcu_blocking_is_gp(void)
2549{
2550	int ret;
2551
2552	might_sleep();  /* Check for RCU read-side critical section. */
2553	preempt_disable();
2554	ret = num_online_cpus() <= 1;
2555	preempt_enable();
2556	return ret;
2557}
2558
2559/**
2560 * synchronize_sched - wait until an rcu-sched grace period has elapsed.
2561 *
2562 * Control will return to the caller some time after a full rcu-sched
2563 * grace period has elapsed, in other words after all currently executing
2564 * rcu-sched read-side critical sections have completed.   These read-side
2565 * critical sections are delimited by rcu_read_lock_sched() and
2566 * rcu_read_unlock_sched(), and may be nested.  Note that preempt_disable(),
2567 * local_irq_disable(), and so on may be used in place of
2568 * rcu_read_lock_sched().
2569 *
2570 * This means that all preempt_disable code sequences, including NMI and
2571 * non-threaded hardware-interrupt handlers, in progress on entry will
2572 * have completed before this primitive returns.  However, this does not
2573 * guarantee that softirq handlers will have completed, since in some
2574 * kernels, these handlers can run in process context, and can block.
2575 *
2576 * Note that this guarantee implies further memory-ordering guarantees.
2577 * On systems with more than one CPU, when synchronize_sched() returns,
2578 * each CPU is guaranteed to have executed a full memory barrier since the
2579 * end of its last RCU-sched read-side critical section whose beginning
2580 * preceded the call to synchronize_sched().  In addition, each CPU having
2581 * an RCU read-side critical section that extends beyond the return from
2582 * synchronize_sched() is guaranteed to have executed a full memory barrier
2583 * after the beginning of synchronize_sched() and before the beginning of
2584 * that RCU read-side critical section.  Note that these guarantees include
2585 * CPUs that are offline, idle, or executing in user mode, as well as CPUs
2586 * that are executing in the kernel.
2587 *
2588 * Furthermore, if CPU A invoked synchronize_sched(), which returned
2589 * to its caller on CPU B, then both CPU A and CPU B are guaranteed
2590 * to have executed a full memory barrier during the execution of
2591 * synchronize_sched() -- even if CPU A and CPU B are the same CPU (but
2592 * again only if the system has more than one CPU).
2593 *
2594 * This primitive provides the guarantees made by the (now removed)
2595 * synchronize_kernel() API.  In contrast, synchronize_rcu() only
2596 * guarantees that rcu_read_lock() sections will have completed.
2597 * In "classic RCU", these two guarantees happen to be one and
2598 * the same, but can differ in realtime RCU implementations.
2599 */
2600void synchronize_sched(void)
2601{
2602	rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map) &&
2603			   !lock_is_held(&rcu_lock_map) &&
2604			   !lock_is_held(&rcu_sched_lock_map),
2605			   "Illegal synchronize_sched() in RCU-sched read-side critical section");
2606	if (rcu_blocking_is_gp())
 
 
 
 
2607		return;
2608	if (rcu_expedited)
2609		synchronize_sched_expedited();
2610	else
2611		wait_rcu_gp(call_rcu_sched);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2612}
2613EXPORT_SYMBOL_GPL(synchronize_sched);
2614
2615/**
2616 * synchronize_rcu_bh - wait until an rcu_bh grace period has elapsed.
 
2617 *
2618 * Control will return to the caller some time after a full rcu_bh grace
2619 * period has elapsed, in other words after all currently executing rcu_bh
2620 * read-side critical sections have completed.  RCU read-side critical
2621 * sections are delimited by rcu_read_lock_bh() and rcu_read_unlock_bh(),
2622 * and may be nested.
2623 *
2624 * See the description of synchronize_sched() for more detailed information
2625 * on memory ordering guarantees.
2626 */
2627void synchronize_rcu_bh(void)
2628{
2629	rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map) &&
2630			   !lock_is_held(&rcu_lock_map) &&
2631			   !lock_is_held(&rcu_sched_lock_map),
2632			   "Illegal synchronize_rcu_bh() in RCU-bh read-side critical section");
2633	if (rcu_blocking_is_gp())
2634		return;
2635	if (rcu_expedited)
2636		synchronize_rcu_bh_expedited();
2637	else
2638		wait_rcu_gp(call_rcu_bh);
2639}
2640EXPORT_SYMBOL_GPL(synchronize_rcu_bh);
2641
2642/**
2643 * get_state_synchronize_rcu - Snapshot current RCU state
2644 *
2645 * Returns a cookie that is used by a later call to cond_synchronize_rcu()
2646 * to determine whether or not a full grace period has elapsed in the
2647 * meantime.
2648 */
2649unsigned long get_state_synchronize_rcu(void)
2650{
2651	/*
2652	 * Any prior manipulation of RCU-protected data must happen
2653	 * before the load from ->gpnum.
2654	 */
2655	smp_mb();  /* ^^^ */
2656
2657	/*
2658	 * Make sure this load happens before the purportedly
2659	 * time-consuming work between get_state_synchronize_rcu()
2660	 * and cond_synchronize_rcu().
2661	 */
2662	return smp_load_acquire(&rcu_state->gpnum);
2663}
2664EXPORT_SYMBOL_GPL(get_state_synchronize_rcu);
2665
2666/**
2667 * cond_synchronize_rcu - Conditionally wait for an RCU grace period
 
2668 *
2669 * @oldstate: return value from earlier call to get_state_synchronize_rcu()
 
 
 
 
 
 
 
2670 *
2671 * If a full RCU grace period has elapsed since the earlier call to
2672 * get_state_synchronize_rcu(), just return.  Otherwise, invoke
2673 * synchronize_rcu() to wait for a full grace period.
2674 *
2675 * Yes, this function does not take counter wrap into account.  But
2676 * counter wrap is harmless.  If the counter wraps, we have waited for
2677 * more than 2 billion grace periods (and way more on a 64-bit system!),
2678 * so waiting for one additional grace period should be just fine.
2679 */
2680void cond_synchronize_rcu(unsigned long oldstate)
2681{
2682	unsigned long newstate;
2683
2684	/*
2685	 * Ensure that this load happens before any RCU-destructive
2686	 * actions the caller might carry out after we return.
2687	 */
2688	newstate = smp_load_acquire(&rcu_state->completed);
2689	if (ULONG_CMP_GE(oldstate, newstate))
2690		synchronize_rcu();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2691}
2692EXPORT_SYMBOL_GPL(cond_synchronize_rcu);
2693
2694static int synchronize_sched_expedited_cpu_stop(void *data)
 
 
 
 
 
 
 
 
 
 
 
 
2695{
2696	/*
2697	 * There must be a full memory barrier on each affected CPU
2698	 * between the time that try_stop_cpus() is called and the
2699	 * time that it returns.
2700	 *
2701	 * In the current initial implementation of cpu_stop, the
2702	 * above condition is already met when the control reaches
2703	 * this point and the following smp_mb() is not strictly
2704	 * necessary.  Do smp_mb() anyway for documentation and
2705	 * robustness against future implementation changes.
2706	 */
2707	smp_mb(); /* See above comment block. */
2708	return 0;
2709}
 
2710
2711/**
2712 * synchronize_sched_expedited - Brute-force RCU-sched grace period
 
2713 *
2714 * Wait for an RCU-sched grace period to elapse, but use a "big hammer"
2715 * approach to force the grace period to end quickly.  This consumes
2716 * significant time on all CPUs and is unfriendly to real-time workloads,
2717 * so is thus not recommended for any sort of common-case code.  In fact,
2718 * if you are using synchronize_sched_expedited() in a loop, please
2719 * restructure your code to batch your updates, and then use a single
2720 * synchronize_sched() instead.
2721 *
2722 * Note that it is illegal to call this function while holding any lock
2723 * that is acquired by a CPU-hotplug notifier.  And yes, it is also illegal
2724 * to call this function from a CPU-hotplug notifier.  Failing to observe
2725 * these restriction will result in deadlock.
2726 *
2727 * This implementation can be thought of as an application of ticket
2728 * locking to RCU, with sync_sched_expedited_started and
2729 * sync_sched_expedited_done taking on the roles of the halves
2730 * of the ticket-lock word.  Each task atomically increments
2731 * sync_sched_expedited_started upon entry, snapshotting the old value,
2732 * then attempts to stop all the CPUs.  If this succeeds, then each
2733 * CPU will have executed a context switch, resulting in an RCU-sched
2734 * grace period.  We are then done, so we use atomic_cmpxchg() to
2735 * update sync_sched_expedited_done to match our snapshot -- but
2736 * only if someone else has not already advanced past our snapshot.
2737 *
2738 * On the other hand, if try_stop_cpus() fails, we check the value
2739 * of sync_sched_expedited_done.  If it has advanced past our
2740 * initial snapshot, then someone else must have forced a grace period
2741 * some time after we took our snapshot.  In this case, our work is
2742 * done for us, and we can simply return.  Otherwise, we try again,
2743 * but keep our initial snapshot for purposes of checking for someone
2744 * doing our work for us.
2745 *
2746 * If we fail too many times in a row, we fall back to synchronize_sched().
2747 */
2748void synchronize_sched_expedited(void)
2749{
2750	long firstsnap, s, snap;
2751	int trycount = 0;
2752	struct rcu_state *rsp = &rcu_sched_state;
2753
2754	/*
2755	 * If we are in danger of counter wrap, just do synchronize_sched().
2756	 * By allowing sync_sched_expedited_started to advance no more than
2757	 * ULONG_MAX/8 ahead of sync_sched_expedited_done, we are ensuring
2758	 * that more than 3.5 billion CPUs would be required to force a
2759	 * counter wrap on a 32-bit system.  Quite a few more CPUs would of
2760	 * course be required on a 64-bit system.
2761	 */
2762	if (ULONG_CMP_GE((ulong)atomic_long_read(&rsp->expedited_start),
2763			 (ulong)atomic_long_read(&rsp->expedited_done) +
2764			 ULONG_MAX / 8)) {
2765		synchronize_sched();
2766		atomic_long_inc(&rsp->expedited_wrap);
2767		return;
2768	}
2769
2770	/*
2771	 * Take a ticket.  Note that atomic_inc_return() implies a
2772	 * full memory barrier.
2773	 */
2774	snap = atomic_long_inc_return(&rsp->expedited_start);
2775	firstsnap = snap;
2776	get_online_cpus();
2777	WARN_ON_ONCE(cpu_is_offline(raw_smp_processor_id()));
2778
2779	/*
2780	 * Each pass through the following loop attempts to force a
2781	 * context switch on each CPU.
2782	 */
2783	while (try_stop_cpus(cpu_online_mask,
2784			     synchronize_sched_expedited_cpu_stop,
2785			     NULL) == -EAGAIN) {
2786		put_online_cpus();
2787		atomic_long_inc(&rsp->expedited_tryfail);
2788
2789		/* Check to see if someone else did our work for us. */
2790		s = atomic_long_read(&rsp->expedited_done);
2791		if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
2792			/* ensure test happens before caller kfree */
2793			smp_mb__before_atomic_inc(); /* ^^^ */
2794			atomic_long_inc(&rsp->expedited_workdone1);
2795			return;
2796		}
2797
2798		/* No joy, try again later.  Or just synchronize_sched(). */
2799		if (trycount++ < 10) {
2800			udelay(trycount * num_online_cpus());
2801		} else {
2802			wait_rcu_gp(call_rcu_sched);
2803			atomic_long_inc(&rsp->expedited_normal);
2804			return;
2805		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2806
2807		/* Recheck to see if someone else did our work for us. */
2808		s = atomic_long_read(&rsp->expedited_done);
2809		if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
2810			/* ensure test happens before caller kfree */
2811			smp_mb__before_atomic_inc(); /* ^^^ */
2812			atomic_long_inc(&rsp->expedited_workdone2);
2813			return;
2814		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2815
2816		/*
2817		 * Refetching sync_sched_expedited_started allows later
2818		 * callers to piggyback on our grace period.  We retry
2819		 * after they started, so our grace period works for them,
2820		 * and they started after our first try, so their grace
2821		 * period works for us.
2822		 */
2823		get_online_cpus();
2824		snap = atomic_long_read(&rsp->expedited_start);
2825		smp_mb(); /* ensure read is before try_stop_cpus(). */
2826	}
2827	atomic_long_inc(&rsp->expedited_stoppedcpus);
2828
2829	/*
2830	 * Everyone up to our most recent fetch is covered by our grace
2831	 * period.  Update the counter, but only if our work is still
2832	 * relevant -- which it won't be if someone who started later
2833	 * than we did already did their update.
2834	 */
2835	do {
2836		atomic_long_inc(&rsp->expedited_done_tries);
2837		s = atomic_long_read(&rsp->expedited_done);
2838		if (ULONG_CMP_GE((ulong)s, (ulong)snap)) {
2839			/* ensure test happens before caller kfree */
2840			smp_mb__before_atomic_inc(); /* ^^^ */
2841			atomic_long_inc(&rsp->expedited_done_lost);
2842			break;
2843		}
2844	} while (atomic_long_cmpxchg(&rsp->expedited_done, s, snap) != s);
2845	atomic_long_inc(&rsp->expedited_done_exit);
2846
2847	put_online_cpus();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2848}
2849EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
2850
2851/*
2852 * Check to see if there is any immediate RCU-related work to be done
2853 * by the current CPU, for the specified type of RCU, returning 1 if so.
2854 * The checks are in order of increasing expense: checks that can be
2855 * carried out against CPU-local state are performed first.  However,
2856 * we must check for CPU stalls first, else we might not get a chance.
2857 */
2858static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp)
2859{
 
 
2860	struct rcu_node *rnp = rdp->mynode;
2861
2862	rdp->n_rcu_pending++;
2863
2864	/* Check for CPU stalls, if enabled. */
2865	check_cpu_stall(rsp, rdp);
 
 
 
 
2866
2867	/* Is this CPU a NO_HZ_FULL CPU that should ignore RCU? */
2868	if (rcu_nohz_full_cpu(rsp))
2869		return 0;
2870
2871	/* Is the RCU core waiting for a quiescent state from this CPU? */
2872	if (rcu_scheduler_fully_active &&
2873	    rdp->qs_pending && !rdp->passed_quiesce) {
2874		rdp->n_rp_qs_pending++;
2875	} else if (rdp->qs_pending && rdp->passed_quiesce) {
2876		rdp->n_rp_report_qs++;
2877		return 1;
2878	}
2879
2880	/* Does this CPU have callbacks ready to invoke? */
2881	if (cpu_has_callbacks_ready_to_invoke(rdp)) {
2882		rdp->n_rp_cb_ready++;
2883		return 1;
2884	}
2885
2886	/* Has RCU gone idle with this CPU needing another grace period? */
2887	if (cpu_needs_another_gp(rsp, rdp)) {
2888		rdp->n_rp_cpu_needs_gp++;
2889		return 1;
2890	}
2891
2892	/* Has another RCU grace period completed?  */
2893	if (ACCESS_ONCE(rnp->completed) != rdp->completed) { /* outside lock */
2894		rdp->n_rp_gp_completed++;
2895		return 1;
2896	}
2897
2898	/* Has a new RCU grace period started? */
2899	if (ACCESS_ONCE(rnp->gpnum) != rdp->gpnum) { /* outside lock */
2900		rdp->n_rp_gp_started++;
2901		return 1;
2902	}
2903
2904	/* Does this CPU need a deferred NOCB wakeup? */
2905	if (rcu_nocb_need_deferred_wakeup(rdp)) {
2906		rdp->n_rp_nocb_defer_wakeup++;
2907		return 1;
2908	}
2909
2910	/* nothing to do */
2911	rdp->n_rp_need_nothing++;
2912	return 0;
2913}
2914
2915/*
2916 * Check to see if there is any immediate RCU-related work to be done
2917 * by the current CPU, returning 1 if so.  This function is part of the
2918 * RCU implementation; it is -not- an exported member of the RCU API.
2919 */
2920static int rcu_pending(int cpu)
2921{
2922	struct rcu_state *rsp;
2923
2924	for_each_rcu_flavor(rsp)
2925		if (__rcu_pending(rsp, per_cpu_ptr(rsp->rda, cpu)))
2926			return 1;
2927	return 0;
2928}
2929
2930/*
2931 * Return true if the specified CPU has any callback.  If all_lazy is
2932 * non-NULL, store an indication of whether all callbacks are lazy.
2933 * (If there are no callbacks, all of them are deemed to be lazy.)
 
 
 
 
 
2934 */
2935static int __maybe_unused rcu_cpu_has_callbacks(int cpu, bool *all_lazy)
2936{
2937	bool al = true;
2938	bool hc = false;
2939	struct rcu_data *rdp;
2940	struct rcu_state *rsp;
2941
2942	for_each_rcu_flavor(rsp) {
2943		rdp = per_cpu_ptr(rsp->rda, cpu);
2944		if (!rdp->nxtlist)
2945			continue;
2946		hc = true;
2947		if (rdp->qlen != rdp->qlen_lazy || !all_lazy) {
2948			al = false;
2949			break;
2950		}
2951	}
2952	if (all_lazy)
2953		*all_lazy = al;
2954	return hc;
2955}
2956
2957/*
2958 * Helper function for _rcu_barrier() tracing.  If tracing is disabled,
2959 * the compiler is expected to optimize this away.
2960 */
2961static void _rcu_barrier_trace(struct rcu_state *rsp, const char *s,
2962			       int cpu, unsigned long done)
2963{
2964	trace_rcu_barrier(rsp->name, s, cpu,
2965			  atomic_read(&rsp->barrier_cpu_count), done);
2966}
 
2967
2968/*
2969 * RCU callback function for _rcu_barrier().  If we are last, wake
2970 * up the task executing _rcu_barrier().
2971 */
2972static void rcu_barrier_callback(struct rcu_head *rhp)
2973{
2974	struct rcu_data *rdp = container_of(rhp, struct rcu_data, barrier_head);
2975	struct rcu_state *rsp = rdp->rsp;
2976
2977	if (atomic_dec_and_test(&rsp->barrier_cpu_count)) {
2978		_rcu_barrier_trace(rsp, "LastCB", -1, rsp->n_barrier_done);
2979		complete(&rsp->barrier_completion);
 
 
 
 
 
2980	} else {
2981		_rcu_barrier_trace(rsp, "CB", -1, rsp->n_barrier_done);
 
2982	}
 
 
 
 
2983}
2984
2985/*
2986 * Called with preemption disabled, and from cross-cpu IRQ context.
2987 */
2988static void rcu_barrier_func(void *type)
2989{
2990	struct rcu_state *rsp = type;
2991	struct rcu_data *rdp = __this_cpu_ptr(rsp->rda);
2992
2993	_rcu_barrier_trace(rsp, "IRQ", -1, rsp->n_barrier_done);
2994	atomic_inc(&rsp->barrier_cpu_count);
2995	rsp->call(&rdp->barrier_head, rcu_barrier_callback);
 
 
 
2996}
2997
2998/*
2999 * Orchestrate the specified type of RCU barrier, waiting for all
3000 * RCU callbacks of the specified type to complete.
 
 
 
 
3001 */
3002static void _rcu_barrier(struct rcu_state *rsp)
3003{
3004	int cpu;
 
 
3005	struct rcu_data *rdp;
3006	unsigned long snap = ACCESS_ONCE(rsp->n_barrier_done);
3007	unsigned long snap_done;
3008
3009	_rcu_barrier_trace(rsp, "Begin", -1, snap);
3010
3011	/* Take mutex to serialize concurrent rcu_barrier() requests. */
3012	mutex_lock(&rsp->barrier_mutex);
3013
3014	/*
3015	 * Ensure that all prior references, including to ->n_barrier_done,
3016	 * are ordered before the _rcu_barrier() machinery.
3017	 */
3018	smp_mb();  /* See above block comment. */
3019
3020	/*
3021	 * Recheck ->n_barrier_done to see if others did our work for us.
3022	 * This means checking ->n_barrier_done for an even-to-odd-to-even
3023	 * transition.  The "if" expression below therefore rounds the old
3024	 * value up to the next even number and adds two before comparing.
3025	 */
3026	snap_done = rsp->n_barrier_done;
3027	_rcu_barrier_trace(rsp, "Check", -1, snap_done);
3028
3029	/*
3030	 * If the value in snap is odd, we needed to wait for the current
3031	 * rcu_barrier() to complete, then wait for the next one, in other
3032	 * words, we need the value of snap_done to be three larger than
3033	 * the value of snap.  On the other hand, if the value in snap is
3034	 * even, we only had to wait for the next rcu_barrier() to complete,
3035	 * in other words, we need the value of snap_done to be only two
3036	 * greater than the value of snap.  The "(snap + 3) & ~0x1" computes
3037	 * this for us (thank you, Linus!).
3038	 */
3039	if (ULONG_CMP_GE(snap_done, (snap + 3) & ~0x1)) {
3040		_rcu_barrier_trace(rsp, "EarlyExit", -1, snap_done);
3041		smp_mb(); /* caller's subsequent code after above check. */
3042		mutex_unlock(&rsp->barrier_mutex);
3043		return;
3044	}
3045
3046	/*
3047	 * Increment ->n_barrier_done to avoid duplicate work.  Use
3048	 * ACCESS_ONCE() to prevent the compiler from speculating
3049	 * the increment to precede the early-exit check.
3050	 */
3051	ACCESS_ONCE(rsp->n_barrier_done)++;
3052	WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 1);
3053	_rcu_barrier_trace(rsp, "Inc1", -1, rsp->n_barrier_done);
3054	smp_mb(); /* Order ->n_barrier_done increment with below mechanism. */
3055
3056	/*
3057	 * Initialize the count to one rather than to zero in order to
3058	 * avoid a too-soon return to zero in case of a short grace period
3059	 * (or preemption of this task).  Exclude CPU-hotplug operations
3060	 * to ensure that no offline CPU has callbacks queued.
3061	 */
3062	init_completion(&rsp->barrier_completion);
3063	atomic_set(&rsp->barrier_cpu_count, 1);
3064	get_online_cpus();
3065
3066	/*
3067	 * Force each CPU with callbacks to register a new callback.
3068	 * When that callback is invoked, we will know that all of the
3069	 * corresponding CPU's preceding callbacks have been invoked.
3070	 */
3071	for_each_possible_cpu(cpu) {
3072		if (!cpu_online(cpu) && !rcu_is_nocb_cpu(cpu))
 
 
3073			continue;
3074		rdp = per_cpu_ptr(rsp->rda, cpu);
3075		if (rcu_is_nocb_cpu(cpu)) {
3076			_rcu_barrier_trace(rsp, "OnlineNoCB", cpu,
3077					   rsp->n_barrier_done);
3078			atomic_inc(&rsp->barrier_cpu_count);
3079			__call_rcu(&rdp->barrier_head, rcu_barrier_callback,
3080				   rsp, cpu, 0);
3081		} else if (ACCESS_ONCE(rdp->qlen)) {
3082			_rcu_barrier_trace(rsp, "OnlineQ", cpu,
3083					   rsp->n_barrier_done);
3084			smp_call_function_single(cpu, rcu_barrier_func, rsp, 1);
3085		} else {
3086			_rcu_barrier_trace(rsp, "OnlineNQ", cpu,
3087					   rsp->n_barrier_done);
 
 
 
 
3088		}
 
 
3089	}
3090	put_online_cpus();
3091
3092	/*
3093	 * Now that we have an rcu_barrier_callback() callback on each
3094	 * CPU, and thus each counted, remove the initial count.
3095	 */
3096	if (atomic_dec_and_test(&rsp->barrier_cpu_count))
3097		complete(&rsp->barrier_completion);
3098
3099	/* Increment ->n_barrier_done to prevent duplicate work. */
3100	smp_mb(); /* Keep increment after above mechanism. */
3101	ACCESS_ONCE(rsp->n_barrier_done)++;
3102	WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 0);
3103	_rcu_barrier_trace(rsp, "Inc2", -1, rsp->n_barrier_done);
3104	smp_mb(); /* Keep increment before caller's subsequent code. */
 
 
 
3105
3106	/* Wait for all rcu_barrier_callback() callbacks to be invoked. */
3107	wait_for_completion(&rsp->barrier_completion);
3108
3109	/* Other rcu_barrier() invocations can now safely proceed. */
3110	mutex_unlock(&rsp->barrier_mutex);
3111}
 
 
 
3112
3113/**
3114 * rcu_barrier_bh - Wait until all in-flight call_rcu_bh() callbacks complete.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3115 */
3116void rcu_barrier_bh(void)
3117{
3118	_rcu_barrier(&rcu_bh_state);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3119}
3120EXPORT_SYMBOL_GPL(rcu_barrier_bh);
3121
3122/**
3123 * rcu_barrier_sched - Wait for in-flight call_rcu_sched() callbacks.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3124 */
3125void rcu_barrier_sched(void)
3126{
3127	_rcu_barrier(&rcu_sched_state);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3128}
3129EXPORT_SYMBOL_GPL(rcu_barrier_sched);
3130
3131/*
3132 * Do boot-time initialization of a CPU's per-CPU RCU data.
3133 */
3134static void __init
3135rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
3136{
3137	unsigned long flags;
3138	struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
3139	struct rcu_node *rnp = rcu_get_root(rsp);
3140
3141	/* Set up local state, ensuring consistent view of global state. */
3142	raw_spin_lock_irqsave(&rnp->lock, flags);
3143	rdp->grpmask = 1UL << (cpu - rdp->mynode->grplo);
3144	init_callback_list(rdp);
3145	rdp->qlen_lazy = 0;
3146	ACCESS_ONCE(rdp->qlen) = 0;
3147	rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
3148	WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_EXIT_IDLE);
3149	WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1);
 
 
3150	rdp->cpu = cpu;
3151	rdp->rsp = rsp;
3152	rcu_boot_init_nocb_percpu_data(rdp);
3153	raw_spin_unlock_irqrestore(&rnp->lock, flags);
3154}
3155
3156/*
3157 * Initialize a CPU's per-CPU RCU data.  Note that only one online or
3158 * offline event can be happening at a given time.  Note also that we
3159 * can accept some slop in the rsp->completed access due to the fact
3160 * that this CPU cannot possibly have any RCU callbacks in flight yet.
 
 
 
 
3161 */
3162static void
3163rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptible)
3164{
3165	unsigned long flags;
3166	unsigned long mask;
3167	struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
3168	struct rcu_node *rnp = rcu_get_root(rsp);
3169
3170	/* Exclude new grace periods. */
3171	mutex_lock(&rsp->onoff_mutex);
3172
3173	/* Set up local state, ensuring consistent view of global state. */
3174	raw_spin_lock_irqsave(&rnp->lock, flags);
3175	rdp->beenonline = 1;	 /* We have now been online. */
3176	rdp->preemptible = preemptible;
3177	rdp->qlen_last_fqs_check = 0;
3178	rdp->n_force_qs_snap = rsp->n_force_qs;
3179	rdp->blimit = blimit;
3180	init_callback_list(rdp);  /* Re-enable callbacks on this CPU. */
3181	rdp->dynticks->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
3182	rcu_sysidle_init_percpu_data(rdp->dynticks);
3183	atomic_set(&rdp->dynticks->dynticks,
3184		   (atomic_read(&rdp->dynticks->dynticks) & ~0x1) + 1);
3185	raw_spin_unlock(&rnp->lock);		/* irqs remain disabled. */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3186
3187	/* Add CPU to rcu_node bitmasks. */
3188	rnp = rdp->mynode;
3189	mask = rdp->grpmask;
3190	do {
3191		/* Exclude any attempts to start a new GP on small systems. */
3192		raw_spin_lock(&rnp->lock);	/* irqs already disabled. */
3193		rnp->qsmaskinit |= mask;
3194		mask = rnp->grpmask;
3195		if (rnp == rdp->mynode) {
3196			/*
3197			 * If there is a grace period in progress, we will
3198			 * set up to wait for it next time we run the
3199			 * RCU core code.
3200			 */
3201			rdp->gpnum = rnp->completed;
3202			rdp->completed = rnp->completed;
3203			rdp->passed_quiesce = 0;
3204			rdp->qs_pending = 0;
3205			trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpuonl"));
3206		}
3207		raw_spin_unlock(&rnp->lock); /* irqs already disabled. */
3208		rnp = rnp->parent;
3209	} while (rnp != NULL && !(rnp->qsmaskinit & mask));
3210	local_irq_restore(flags);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3211
3212	mutex_unlock(&rsp->onoff_mutex);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3213}
3214
3215static void rcu_prepare_cpu(int cpu)
 
 
 
 
 
 
3216{
3217	struct rcu_state *rsp;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3218
3219	for_each_rcu_flavor(rsp)
3220		rcu_init_percpu_data(cpu, rsp,
3221				     strcmp(rsp->name, "rcu_preempt") == 0);
 
 
 
 
 
 
 
 
 
3222}
3223
3224/*
3225 * Handle CPU online/offline notification events.
 
3226 */
3227static int rcu_cpu_notify(struct notifier_block *self,
3228				    unsigned long action, void *hcpu)
3229{
3230	long cpu = (long)hcpu;
3231	struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, cpu);
3232	struct rcu_node *rnp = rdp->mynode;
3233	struct rcu_state *rsp;
3234
3235	trace_rcu_utilization(TPS("Start CPU hotplug"));
3236	switch (action) {
3237	case CPU_UP_PREPARE:
3238	case CPU_UP_PREPARE_FROZEN:
3239		rcu_prepare_cpu(cpu);
3240		rcu_prepare_kthreads(cpu);
3241		break;
3242	case CPU_ONLINE:
3243	case CPU_DOWN_FAILED:
3244		rcu_boost_kthread_setaffinity(rnp, -1);
3245		break;
3246	case CPU_DOWN_PREPARE:
3247		rcu_boost_kthread_setaffinity(rnp, cpu);
3248		break;
3249	case CPU_DYING:
3250	case CPU_DYING_FROZEN:
3251		for_each_rcu_flavor(rsp)
3252			rcu_cleanup_dying_cpu(rsp);
3253		break;
3254	case CPU_DEAD:
3255	case CPU_DEAD_FROZEN:
3256	case CPU_UP_CANCELED:
3257	case CPU_UP_CANCELED_FROZEN:
3258		for_each_rcu_flavor(rsp)
3259			rcu_cleanup_dead_cpu(cpu, rsp);
3260		break;
3261	default:
3262		break;
3263	}
3264	trace_rcu_utilization(TPS("End CPU hotplug"));
3265	return NOTIFY_OK;
3266}
 
3267
 
 
 
 
3268static int rcu_pm_notify(struct notifier_block *self,
3269			 unsigned long action, void *hcpu)
3270{
3271	switch (action) {
3272	case PM_HIBERNATION_PREPARE:
3273	case PM_SUSPEND_PREPARE:
3274		if (nr_cpu_ids <= 256) /* Expediting bad for large systems. */
3275			rcu_expedited = 1;
3276		break;
3277	case PM_POST_HIBERNATION:
3278	case PM_POST_SUSPEND:
3279		rcu_expedited = 0;
 
3280		break;
3281	default:
3282		break;
3283	}
3284	return NOTIFY_OK;
3285}
3286
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3287/*
3288 * Spawn the kthread that handles this RCU flavor's grace periods.
3289 */
3290static int __init rcu_spawn_gp_kthread(void)
3291{
3292	unsigned long flags;
3293	struct rcu_node *rnp;
3294	struct rcu_state *rsp;
3295	struct task_struct *t;
 
3296
3297	for_each_rcu_flavor(rsp) {
3298		t = kthread_run(rcu_gp_kthread, rsp, "%s", rsp->name);
3299		BUG_ON(IS_ERR(t));
3300		rnp = rcu_get_root(rsp);
3301		raw_spin_lock_irqsave(&rnp->lock, flags);
3302		rsp->gp_kthread = t;
3303		raw_spin_unlock_irqrestore(&rnp->lock, flags);
3304		rcu_spawn_nocb_kthreads(rsp);
3305	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3306	return 0;
3307}
3308early_initcall(rcu_spawn_gp_kthread);
3309
3310/*
3311 * This function is invoked towards the end of the scheduler's initialization
3312 * process.  Before this is called, the idle task might contain
3313 * RCU read-side critical sections (during which time, this idle
3314 * task is booting the system).  After this function is called, the
3315 * idle tasks are prohibited from containing RCU read-side critical
3316 * sections.  This function also enables RCU lockdep checking.
 
 
3317 */
3318void rcu_scheduler_starting(void)
3319{
 
 
 
3320	WARN_ON(num_online_cpus() != 1);
3321	WARN_ON(nr_context_switches() > 0);
3322	rcu_scheduler_active = 1;
3323}
3324
3325/*
3326 * Compute the per-level fanout, either using the exact fanout specified
3327 * or balancing the tree, depending on CONFIG_RCU_FANOUT_EXACT.
3328 */
3329#ifdef CONFIG_RCU_FANOUT_EXACT
3330static void __init rcu_init_levelspread(struct rcu_state *rsp)
3331{
3332	int i;
3333
3334	rsp->levelspread[rcu_num_lvls - 1] = rcu_fanout_leaf;
3335	for (i = rcu_num_lvls - 2; i >= 0; i--)
3336		rsp->levelspread[i] = CONFIG_RCU_FANOUT;
3337}
3338#else /* #ifdef CONFIG_RCU_FANOUT_EXACT */
3339static void __init rcu_init_levelspread(struct rcu_state *rsp)
3340{
3341	int ccur;
3342	int cprv;
3343	int i;
3344
3345	cprv = nr_cpu_ids;
3346	for (i = rcu_num_lvls - 1; i >= 0; i--) {
3347		ccur = rsp->levelcnt[i];
3348		rsp->levelspread[i] = (cprv + ccur - 1) / ccur;
3349		cprv = ccur;
3350	}
3351}
3352#endif /* #else #ifdef CONFIG_RCU_FANOUT_EXACT */
3353
3354/*
3355 * Helper function for rcu_init() that initializes one rcu_state structure.
3356 */
3357static void __init rcu_init_one(struct rcu_state *rsp,
3358		struct rcu_data __percpu *rda)
3359{
3360	static char *buf[] = { "rcu_node_0",
3361			       "rcu_node_1",
3362			       "rcu_node_2",
3363			       "rcu_node_3" };  /* Match MAX_RCU_LVLS */
3364	static char *fqs[] = { "rcu_node_fqs_0",
3365			       "rcu_node_fqs_1",
3366			       "rcu_node_fqs_2",
3367			       "rcu_node_fqs_3" };  /* Match MAX_RCU_LVLS */
3368	int cpustride = 1;
3369	int i;
3370	int j;
3371	struct rcu_node *rnp;
3372
3373	BUILD_BUG_ON(MAX_RCU_LVLS > ARRAY_SIZE(buf));  /* Fix buf[] init! */
3374
3375	/* Silence gcc 4.8 warning about array index out of range. */
3376	if (rcu_num_lvls > RCU_NUM_LVLS)
3377		panic("rcu_init_one: rcu_num_lvls overflow");
3378
3379	/* Initialize the level-tracking arrays. */
3380
3381	for (i = 0; i < rcu_num_lvls; i++)
3382		rsp->levelcnt[i] = num_rcu_lvl[i];
3383	for (i = 1; i < rcu_num_lvls; i++)
3384		rsp->level[i] = rsp->level[i - 1] + rsp->levelcnt[i - 1];
3385	rcu_init_levelspread(rsp);
 
3386
3387	/* Initialize the elements themselves, starting from the leaves. */
3388
3389	for (i = rcu_num_lvls - 1; i >= 0; i--) {
3390		cpustride *= rsp->levelspread[i];
3391		rnp = rsp->level[i];
3392		for (j = 0; j < rsp->levelcnt[i]; j++, rnp++) {
3393			raw_spin_lock_init(&rnp->lock);
3394			lockdep_set_class_and_name(&rnp->lock,
3395						   &rcu_node_class[i], buf[i]);
3396			raw_spin_lock_init(&rnp->fqslock);
3397			lockdep_set_class_and_name(&rnp->fqslock,
3398						   &rcu_fqs_class[i], fqs[i]);
3399			rnp->gpnum = rsp->gpnum;
3400			rnp->completed = rsp->completed;
 
3401			rnp->qsmask = 0;
3402			rnp->qsmaskinit = 0;
3403			rnp->grplo = j * cpustride;
3404			rnp->grphi = (j + 1) * cpustride - 1;
3405			if (rnp->grphi >= NR_CPUS)
3406				rnp->grphi = NR_CPUS - 1;
3407			if (i == 0) {
3408				rnp->grpnum = 0;
3409				rnp->grpmask = 0;
3410				rnp->parent = NULL;
3411			} else {
3412				rnp->grpnum = j % rsp->levelspread[i - 1];
3413				rnp->grpmask = 1UL << rnp->grpnum;
3414				rnp->parent = rsp->level[i - 1] +
3415					      j / rsp->levelspread[i - 1];
3416			}
3417			rnp->level = i;
3418			INIT_LIST_HEAD(&rnp->blkd_tasks);
3419			rcu_init_one_nocb(rnp);
 
 
 
 
 
 
 
 
 
3420		}
3421	}
3422
3423	rsp->rda = rda;
3424	init_waitqueue_head(&rsp->gp_wq);
3425	init_irq_work(&rsp->wakeup_work, rsp_wakeup);
3426	rnp = rsp->level[rcu_num_lvls - 1];
3427	for_each_possible_cpu(i) {
3428		while (i > rnp->grphi)
3429			rnp++;
3430		per_cpu_ptr(rsp->rda, i)->mynode = rnp;
3431		rcu_boot_init_percpu_data(i, rsp);
3432	}
3433	list_add(&rsp->flavors, &rcu_struct_flavors);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3434}
3435
3436/*
3437 * Compute the rcu_node tree geometry from kernel parameters.  This cannot
3438 * replace the definitions in tree.h because those are needed to size
3439 * the ->node array in the rcu_state structure.
3440 */
3441static void __init rcu_init_geometry(void)
3442{
3443	ulong d;
3444	int i;
3445	int j;
3446	int n = nr_cpu_ids;
3447	int rcu_capacity[MAX_RCU_LVLS + 1];
 
 
 
 
 
 
 
 
 
 
 
 
3448
3449	/*
3450	 * Initialize any unspecified boot parameters.
3451	 * The default values of jiffies_till_first_fqs and
3452	 * jiffies_till_next_fqs are set to the RCU_JIFFIES_TILL_FORCE_QS
3453	 * value, which is a function of HZ, then adding one for each
3454	 * RCU_JIFFIES_FQS_DIV CPUs that might be on the system.
3455	 */
3456	d = RCU_JIFFIES_TILL_FORCE_QS + nr_cpu_ids / RCU_JIFFIES_FQS_DIV;
3457	if (jiffies_till_first_fqs == ULONG_MAX)
3458		jiffies_till_first_fqs = d;
3459	if (jiffies_till_next_fqs == ULONG_MAX)
3460		jiffies_till_next_fqs = d;
 
3461
3462	/* If the compile-time values are accurate, just leave. */
3463	if (rcu_fanout_leaf == CONFIG_RCU_FANOUT_LEAF &&
3464	    nr_cpu_ids == NR_CPUS)
3465		return;
3466	pr_info("RCU: Adjusting geometry for rcu_fanout_leaf=%d, nr_cpu_ids=%d\n",
3467		rcu_fanout_leaf, nr_cpu_ids);
3468
3469	/*
 
 
 
 
 
 
 
 
 
 
 
 
 
3470	 * Compute number of nodes that can be handled an rcu_node tree
3471	 * with the given number of levels.  Setting rcu_capacity[0] makes
3472	 * some of the arithmetic easier.
3473	 */
3474	rcu_capacity[0] = 1;
3475	rcu_capacity[1] = rcu_fanout_leaf;
3476	for (i = 2; i <= MAX_RCU_LVLS; i++)
3477		rcu_capacity[i] = rcu_capacity[i - 1] * CONFIG_RCU_FANOUT;
3478
3479	/*
3480	 * The boot-time rcu_fanout_leaf parameter is only permitted
3481	 * to increase the leaf-level fanout, not decrease it.  Of course,
3482	 * the leaf-level fanout cannot exceed the number of bits in
3483	 * the rcu_node masks.  Finally, the tree must be able to accommodate
3484	 * the configured number of CPUs.  Complain and fall back to the
3485	 * compile-time values if these limits are exceeded.
3486	 */
3487	if (rcu_fanout_leaf < CONFIG_RCU_FANOUT_LEAF ||
3488	    rcu_fanout_leaf > sizeof(unsigned long) * 8 ||
3489	    n > rcu_capacity[MAX_RCU_LVLS]) {
3490		WARN_ON(1);
3491		return;
3492	}
3493
 
 
 
 
 
3494	/* Calculate the number of rcu_nodes at each level of the tree. */
3495	for (i = 1; i <= MAX_RCU_LVLS; i++)
3496		if (n <= rcu_capacity[i]) {
3497			for (j = 0; j <= i; j++)
3498				num_rcu_lvl[j] =
3499					DIV_ROUND_UP(n, rcu_capacity[i - j]);
3500			rcu_num_lvls = i;
3501			for (j = i + 1; j <= MAX_RCU_LVLS; j++)
3502				num_rcu_lvl[j] = 0;
3503			break;
3504		}
3505
3506	/* Calculate the total number of rcu_node structures. */
3507	rcu_num_nodes = 0;
3508	for (i = 0; i <= MAX_RCU_LVLS; i++)
3509		rcu_num_nodes += num_rcu_lvl[i];
3510	rcu_num_nodes -= n;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3511}
3512
3513void __init rcu_init(void)
3514{
3515	int cpu;
 
 
3516
 
3517	rcu_bootup_announce();
 
3518	rcu_init_geometry();
3519	rcu_init_one(&rcu_bh_state, &rcu_bh_data);
3520	rcu_init_one(&rcu_sched_state, &rcu_sched_data);
3521	__rcu_init_preempt();
3522	open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
 
3523
3524	/*
3525	 * We don't need protection against CPU-hotplug here because
3526	 * this is called early in boot, before either interrupts
3527	 * or the scheduler are operational.
3528	 */
3529	cpu_notifier(rcu_cpu_notify, 0);
3530	pm_notifier(rcu_pm_notify, 0);
3531	for_each_online_cpu(cpu)
3532		rcu_cpu_notify(NULL, CPU_UP_PREPARE, (void *)(long)cpu);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3533}
3534
 
 
 
3535#include "tree_plugin.h"