Linux Audio

Check our new training course

In-person Linux kernel drivers training

Jun 16-20, 2025
Register
Loading...
v3.15
   1/*
   2 * Read-Copy Update mechanism for mutual exclusion (tree-based version)
   3 * Internal non-public definitions that provide either classic
   4 * or preemptible semantics.
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License as published by
   8 * the Free Software Foundation; either version 2 of the License, or
   9 * (at your option) any later version.
  10 *
  11 * This program is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  14 * GNU General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU General Public License
  17 * along with this program; if not, you can access it online at
  18 * http://www.gnu.org/licenses/gpl-2.0.html.
  19 *
  20 * Copyright Red Hat, 2009
  21 * Copyright IBM Corporation, 2009
  22 *
  23 * Author: Ingo Molnar <mingo@elte.hu>
  24 *	   Paul E. McKenney <paulmck@linux.vnet.ibm.com>
  25 */
  26
  27#include <linux/delay.h>
  28#include <linux/gfp.h>
  29#include <linux/oom.h>
 
  30#include <linux/smpboot.h>
 
 
  31#include "../time/tick-internal.h"
  32
  33#define RCU_KTHREAD_PRIO 1
  34
  35#ifdef CONFIG_RCU_BOOST
  36#define RCU_BOOST_PRIO CONFIG_RCU_BOOST_PRIO
  37#else
  38#define RCU_BOOST_PRIO RCU_KTHREAD_PRIO
  39#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  40
  41#ifdef CONFIG_RCU_NOCB_CPU
  42static cpumask_var_t rcu_nocb_mask; /* CPUs to have callbacks offloaded. */
  43static bool have_rcu_nocb_mask;	    /* Was rcu_nocb_mask allocated? */
  44static bool __read_mostly rcu_nocb_poll;    /* Offload kthread are to poll. */
  45static char __initdata nocb_buf[NR_CPUS * 5];
  46#endif /* #ifdef CONFIG_RCU_NOCB_CPU */
  47
  48/*
  49 * Check the RCU kernel configuration parameters and print informative
  50 * messages about anything out of the ordinary.  If you like #ifdef, you
  51 * will love this function.
  52 */
  53static void __init rcu_bootup_announce_oddness(void)
  54{
  55#ifdef CONFIG_RCU_TRACE
  56	pr_info("\tRCU debugfs-based tracing is enabled.\n");
  57#endif
  58#if (defined(CONFIG_64BIT) && CONFIG_RCU_FANOUT != 64) || (!defined(CONFIG_64BIT) && CONFIG_RCU_FANOUT != 32)
  59	pr_info("\tCONFIG_RCU_FANOUT set to non-default value of %d\n",
  60	       CONFIG_RCU_FANOUT);
  61#endif
  62#ifdef CONFIG_RCU_FANOUT_EXACT
  63	pr_info("\tHierarchical RCU autobalancing is disabled.\n");
  64#endif
  65#ifdef CONFIG_RCU_FAST_NO_HZ
  66	pr_info("\tRCU dyntick-idle grace-period acceleration is enabled.\n");
  67#endif
  68#ifdef CONFIG_PROVE_RCU
  69	pr_info("\tRCU lockdep checking is enabled.\n");
  70#endif
  71#ifdef CONFIG_RCU_TORTURE_TEST_RUNNABLE
  72	pr_info("\tRCU torture testing starts during boot.\n");
  73#endif
  74#if defined(CONFIG_TREE_PREEMPT_RCU) && !defined(CONFIG_RCU_CPU_STALL_VERBOSE)
  75	pr_info("\tDump stacks of tasks blocking RCU-preempt GP.\n");
  76#endif
  77#if defined(CONFIG_RCU_CPU_STALL_INFO)
  78	pr_info("\tAdditional per-CPU info printed with stalls.\n");
  79#endif
  80#if NUM_RCU_LVL_4 != 0
  81	pr_info("\tFour-level hierarchy is enabled.\n");
  82#endif
  83	if (rcu_fanout_leaf != CONFIG_RCU_FANOUT_LEAF)
  84		pr_info("\tBoot-time adjustment of leaf fanout to %d.\n", rcu_fanout_leaf);
  85	if (nr_cpu_ids != NR_CPUS)
  86		pr_info("\tRCU restricting CPUs from NR_CPUS=%d to nr_cpu_ids=%d.\n", NR_CPUS, nr_cpu_ids);
  87#ifdef CONFIG_RCU_NOCB_CPU
  88#ifndef CONFIG_RCU_NOCB_CPU_NONE
  89	if (!have_rcu_nocb_mask) {
  90		zalloc_cpumask_var(&rcu_nocb_mask, GFP_KERNEL);
  91		have_rcu_nocb_mask = true;
  92	}
  93#ifdef CONFIG_RCU_NOCB_CPU_ZERO
  94	pr_info("\tOffload RCU callbacks from CPU 0\n");
  95	cpumask_set_cpu(0, rcu_nocb_mask);
  96#endif /* #ifdef CONFIG_RCU_NOCB_CPU_ZERO */
  97#ifdef CONFIG_RCU_NOCB_CPU_ALL
  98	pr_info("\tOffload RCU callbacks from all CPUs\n");
  99	cpumask_copy(rcu_nocb_mask, cpu_possible_mask);
 100#endif /* #ifdef CONFIG_RCU_NOCB_CPU_ALL */
 101#endif /* #ifndef CONFIG_RCU_NOCB_CPU_NONE */
 102	if (have_rcu_nocb_mask) {
 103		if (!cpumask_subset(rcu_nocb_mask, cpu_possible_mask)) {
 104			pr_info("\tNote: kernel parameter 'rcu_nocbs=' contains nonexistent CPUs.\n");
 105			cpumask_and(rcu_nocb_mask, cpu_possible_mask,
 106				    rcu_nocb_mask);
 107		}
 108		cpulist_scnprintf(nocb_buf, sizeof(nocb_buf), rcu_nocb_mask);
 109		pr_info("\tOffload RCU callbacks from CPUs: %s.\n", nocb_buf);
 110		if (rcu_nocb_poll)
 111			pr_info("\tPoll for callbacks from no-CBs CPUs.\n");
 112	}
 113#endif /* #ifdef CONFIG_RCU_NOCB_CPU */
 114}
 115
 116#ifdef CONFIG_TREE_PREEMPT_RCU
 117
 118RCU_STATE_INITIALIZER(rcu_preempt, 'p', call_rcu);
 119static struct rcu_state *rcu_state = &rcu_preempt_state;
 
 120
 121static int rcu_preempted_readers_exp(struct rcu_node *rnp);
 
 122
 123/*
 124 * Tell them what RCU they are running.
 125 */
 126static void __init rcu_bootup_announce(void)
 127{
 128	pr_info("Preemptible hierarchical RCU implementation.\n");
 129	rcu_bootup_announce_oddness();
 130}
 131
 132/*
 133 * Return the number of RCU-preempt batches processed thus far
 134 * for debug and statistics.
 135 */
 136long rcu_batches_completed_preempt(void)
 137{
 138	return rcu_preempt_state.completed;
 139}
 140EXPORT_SYMBOL_GPL(rcu_batches_completed_preempt);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 141
 142/*
 143 * Return the number of RCU batches processed thus far for debug & stats.
 144 */
 145long rcu_batches_completed(void)
 146{
 147	return rcu_batches_completed_preempt();
 148}
 149EXPORT_SYMBOL_GPL(rcu_batches_completed);
 150
 151/*
 152 * Force a quiescent state for preemptible RCU.
 153 */
 154void rcu_force_quiescent_state(void)
 155{
 156	force_quiescent_state(&rcu_preempt_state);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 157}
 158EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
 159
 160/*
 161 * Record a preemptible-RCU quiescent state for the specified CPU.  Note
 162 * that this just means that the task currently running on the CPU is
 163 * not in a quiescent state.  There might be any number of tasks blocked
 164 * while in an RCU read-side critical section.
 165 *
 166 * Unlike the other rcu_*_qs() functions, callers to this function
 167 * must disable irqs in order to protect the assignment to
 168 * ->rcu_read_unlock_special.
 169 */
 170static void rcu_preempt_qs(int cpu)
 171{
 172	struct rcu_data *rdp = &per_cpu(rcu_preempt_data, cpu);
 173
 174	if (rdp->passed_quiesce == 0)
 175		trace_rcu_grace_period(TPS("rcu_preempt"), rdp->gpnum, TPS("cpuqs"));
 176	rdp->passed_quiesce = 1;
 177	current->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS;
 
 
 178}
 179
 180/*
 181 * We have entered the scheduler, and the current task might soon be
 182 * context-switched away from.  If this task is in an RCU read-side
 183 * critical section, we will no longer be able to rely on the CPU to
 184 * record that fact, so we enqueue the task on the blkd_tasks list.
 185 * The task will dequeue itself when it exits the outermost enclosing
 186 * RCU read-side critical section.  Therefore, the current grace period
 187 * cannot be permitted to complete until the blkd_tasks list entries
 188 * predating the current grace period drain, in other words, until
 189 * rnp->gp_tasks becomes NULL.
 190 *
 191 * Caller must disable preemption.
 192 */
 193static void rcu_preempt_note_context_switch(int cpu)
 194{
 195	struct task_struct *t = current;
 196	unsigned long flags;
 197	struct rcu_data *rdp;
 198	struct rcu_node *rnp;
 199
 
 
 200	if (t->rcu_read_lock_nesting > 0 &&
 201	    (t->rcu_read_unlock_special & RCU_READ_UNLOCK_BLOCKED) == 0) {
 202
 203		/* Possibly blocking in an RCU read-side critical section. */
 204		rdp = per_cpu_ptr(rcu_preempt_state.rda, cpu);
 205		rnp = rdp->mynode;
 206		raw_spin_lock_irqsave(&rnp->lock, flags);
 207		smp_mb__after_unlock_lock();
 208		t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BLOCKED;
 209		t->rcu_blocked_node = rnp;
 210
 211		/*
 212		 * If this CPU has already checked in, then this task
 213		 * will hold up the next grace period rather than the
 214		 * current grace period.  Queue the task accordingly.
 215		 * If the task is queued for the current grace period
 216		 * (i.e., this CPU has not yet passed through a quiescent
 217		 * state for the current grace period), then as long
 218		 * as that task remains queued, the current grace period
 219		 * cannot end.  Note that there is some uncertainty as
 220		 * to exactly when the current grace period started.
 221		 * We take a conservative approach, which can result
 222		 * in unnecessarily waiting on tasks that started very
 223		 * slightly after the current grace period began.  C'est
 224		 * la vie!!!
 225		 *
 226		 * But first, note that the current CPU must still be
 227		 * on line!
 228		 */
 229		WARN_ON_ONCE((rdp->grpmask & rnp->qsmaskinit) == 0);
 230		WARN_ON_ONCE(!list_empty(&t->rcu_node_entry));
 231		if ((rnp->qsmask & rdp->grpmask) && rnp->gp_tasks != NULL) {
 232			list_add(&t->rcu_node_entry, rnp->gp_tasks->prev);
 233			rnp->gp_tasks = &t->rcu_node_entry;
 234#ifdef CONFIG_RCU_BOOST
 235			if (rnp->boost_tasks != NULL)
 236				rnp->boost_tasks = rnp->gp_tasks;
 237#endif /* #ifdef CONFIG_RCU_BOOST */
 238		} else {
 239			list_add(&t->rcu_node_entry, &rnp->blkd_tasks);
 240			if (rnp->qsmask & rdp->grpmask)
 241				rnp->gp_tasks = &t->rcu_node_entry;
 242		}
 243		trace_rcu_preempt_task(rdp->rsp->name,
 244				       t->pid,
 245				       (rnp->qsmask & rdp->grpmask)
 246				       ? rnp->gpnum
 247				       : rnp->gpnum + 1);
 248		raw_spin_unlock_irqrestore(&rnp->lock, flags);
 249	} else if (t->rcu_read_lock_nesting < 0 &&
 250		   t->rcu_read_unlock_special) {
 251
 252		/*
 253		 * Complete exit from RCU read-side critical section on
 254		 * behalf of preempted instance of __rcu_read_unlock().
 255		 */
 256		rcu_read_unlock_special(t);
 257	}
 258
 259	/*
 260	 * Either we were not in an RCU read-side critical section to
 261	 * begin with, or we have now recorded that critical section
 262	 * globally.  Either way, we can now note a quiescent state
 263	 * for this CPU.  Again, if we were in an RCU read-side critical
 264	 * section, and if that critical section was blocking the current
 265	 * grace period, then the fact that the task has been enqueued
 266	 * means that we continue to block the current grace period.
 267	 */
 268	local_irq_save(flags);
 269	rcu_preempt_qs(cpu);
 270	local_irq_restore(flags);
 271}
 272
 273/*
 274 * Check for preempted RCU readers blocking the current grace period
 275 * for the specified rcu_node structure.  If the caller needs a reliable
 276 * answer, it must hold the rcu_node's ->lock.
 277 */
 278static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp)
 279{
 280	return rnp->gp_tasks != NULL;
 281}
 282
 283/*
 284 * Record a quiescent state for all tasks that were previously queued
 285 * on the specified rcu_node structure and that were blocking the current
 286 * RCU grace period.  The caller must hold the specified rnp->lock with
 287 * irqs disabled, and this lock is released upon return, but irqs remain
 288 * disabled.
 289 */
 290static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags)
 291	__releases(rnp->lock)
 292{
 293	unsigned long mask;
 294	struct rcu_node *rnp_p;
 295
 296	if (rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) {
 297		raw_spin_unlock_irqrestore(&rnp->lock, flags);
 298		return;  /* Still need more quiescent states! */
 299	}
 300
 301	rnp_p = rnp->parent;
 302	if (rnp_p == NULL) {
 303		/*
 304		 * Either there is only one rcu_node in the tree,
 305		 * or tasks were kicked up to root rcu_node due to
 306		 * CPUs going offline.
 307		 */
 308		rcu_report_qs_rsp(&rcu_preempt_state, flags);
 309		return;
 310	}
 311
 312	/* Report up the rest of the hierarchy. */
 313	mask = rnp->grpmask;
 314	raw_spin_unlock(&rnp->lock);	/* irqs remain disabled. */
 315	raw_spin_lock(&rnp_p->lock);	/* irqs already disabled. */
 316	smp_mb__after_unlock_lock();
 317	rcu_report_qs_rnp(mask, &rcu_preempt_state, rnp_p, flags);
 318}
 319
 320/*
 321 * Advance a ->blkd_tasks-list pointer to the next entry, instead
 322 * returning NULL if at the end of the list.
 323 */
 324static struct list_head *rcu_next_node_entry(struct task_struct *t,
 325					     struct rcu_node *rnp)
 326{
 327	struct list_head *np;
 328
 329	np = t->rcu_node_entry.next;
 330	if (np == &rnp->blkd_tasks)
 331		np = NULL;
 332	return np;
 333}
 334
 335/*
 
 
 
 
 
 
 
 
 
 336 * Handle special cases during rcu_read_unlock(), such as needing to
 337 * notify RCU core processing or task having blocked during the RCU
 338 * read-side critical section.
 339 */
 340void rcu_read_unlock_special(struct task_struct *t)
 341{
 342	int empty;
 343	int empty_exp;
 344	int empty_exp_now;
 345	unsigned long flags;
 346	struct list_head *np;
 347#ifdef CONFIG_RCU_BOOST
 348	struct rt_mutex *rbmp = NULL;
 349#endif /* #ifdef CONFIG_RCU_BOOST */
 350	struct rcu_node *rnp;
 351	int special;
 352
 353	/* NMI handlers cannot block and cannot safely manipulate state. */
 354	if (in_nmi())
 355		return;
 356
 357	local_irq_save(flags);
 358
 359	/*
 360	 * If RCU core is waiting for this CPU to exit critical section,
 361	 * let it know that we have done so.
 
 362	 */
 363	special = t->rcu_read_unlock_special;
 364	if (special & RCU_READ_UNLOCK_NEED_QS) {
 365		rcu_preempt_qs(smp_processor_id());
 366		if (!t->rcu_read_unlock_special) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 367			local_irq_restore(flags);
 368			return;
 369		}
 370	}
 371
 372	/* Hardware IRQ handlers cannot block, complain if they get here. */
 373	if (WARN_ON_ONCE(in_irq() || in_serving_softirq())) {
 
 
 
 
 
 
 
 374		local_irq_restore(flags);
 375		return;
 376	}
 377
 378	/* Clean up if blocked during RCU read-side critical section. */
 379	if (special & RCU_READ_UNLOCK_BLOCKED) {
 380		t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_BLOCKED;
 381
 382		/*
 383		 * Remove this task from the list it blocked on.  The
 384		 * task can migrate while we acquire the lock, but at
 385		 * most one time.  So at most two passes through loop.
 
 386		 */
 387		for (;;) {
 388			rnp = t->rcu_blocked_node;
 389			raw_spin_lock(&rnp->lock);  /* irqs already disabled. */
 390			smp_mb__after_unlock_lock();
 391			if (rnp == t->rcu_blocked_node)
 392				break;
 393			raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
 394		}
 395		empty = !rcu_preempt_blocked_readers_cgp(rnp);
 396		empty_exp = !rcu_preempted_readers_exp(rnp);
 397		smp_mb(); /* ensure expedited fastpath sees end of RCU c-s. */
 398		np = rcu_next_node_entry(t, rnp);
 399		list_del_init(&t->rcu_node_entry);
 400		t->rcu_blocked_node = NULL;
 401		trace_rcu_unlock_preempted_task(TPS("rcu_preempt"),
 402						rnp->gpnum, t->pid);
 403		if (&t->rcu_node_entry == rnp->gp_tasks)
 404			rnp->gp_tasks = np;
 405		if (&t->rcu_node_entry == rnp->exp_tasks)
 406			rnp->exp_tasks = np;
 407#ifdef CONFIG_RCU_BOOST
 408		if (&t->rcu_node_entry == rnp->boost_tasks)
 409			rnp->boost_tasks = np;
 410		/* Snapshot/clear ->rcu_boost_mutex with rcu_node lock held. */
 411		if (t->rcu_boost_mutex) {
 412			rbmp = t->rcu_boost_mutex;
 413			t->rcu_boost_mutex = NULL;
 414		}
 415#endif /* #ifdef CONFIG_RCU_BOOST */
 416
 417		/*
 418		 * If this was the last task on the current list, and if
 419		 * we aren't waiting on any CPUs, report the quiescent state.
 420		 * Note that rcu_report_unblock_qs_rnp() releases rnp->lock,
 421		 * so we must take a snapshot of the expedited state.
 422		 */
 423		empty_exp_now = !rcu_preempted_readers_exp(rnp);
 424		if (!empty && !rcu_preempt_blocked_readers_cgp(rnp)) {
 425			trace_rcu_quiescent_state_report(TPS("preempt_rcu"),
 426							 rnp->gpnum,
 427							 0, rnp->qsmask,
 428							 rnp->level,
 429							 rnp->grplo,
 430							 rnp->grphi,
 431							 !!rnp->gp_tasks);
 432			rcu_report_unblock_qs_rnp(rnp, flags);
 433		} else {
 434			raw_spin_unlock_irqrestore(&rnp->lock, flags);
 435		}
 436
 437#ifdef CONFIG_RCU_BOOST
 438		/* Unboost if we were boosted. */
 439		if (rbmp)
 440			rt_mutex_unlock(rbmp);
 441#endif /* #ifdef CONFIG_RCU_BOOST */
 442
 443		/*
 444		 * If this was the last task on the expedited lists,
 445		 * then we need to report up the rcu_node hierarchy.
 446		 */
 447		if (!empty_exp && empty_exp_now)
 448			rcu_report_exp_rnp(&rcu_preempt_state, rnp, true);
 449	} else {
 450		local_irq_restore(flags);
 451	}
 452}
 453
 454#ifdef CONFIG_RCU_CPU_STALL_VERBOSE
 455
 456/*
 457 * Dump detailed information for all tasks blocking the current RCU
 458 * grace period on the specified rcu_node structure.
 459 */
 460static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp)
 461{
 462	unsigned long flags;
 463	struct task_struct *t;
 464
 465	raw_spin_lock_irqsave(&rnp->lock, flags);
 466	if (!rcu_preempt_blocked_readers_cgp(rnp)) {
 467		raw_spin_unlock_irqrestore(&rnp->lock, flags);
 468		return;
 469	}
 470	t = list_entry(rnp->gp_tasks,
 471		       struct task_struct, rcu_node_entry);
 472	list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry)
 
 
 
 
 
 473		sched_show_task(t);
 474	raw_spin_unlock_irqrestore(&rnp->lock, flags);
 
 475}
 476
 477/*
 478 * Dump detailed information for all tasks blocking the current RCU
 479 * grace period.
 480 */
 481static void rcu_print_detail_task_stall(struct rcu_state *rsp)
 482{
 483	struct rcu_node *rnp = rcu_get_root(rsp);
 484
 485	rcu_print_detail_task_stall_rnp(rnp);
 486	rcu_for_each_leaf_node(rsp, rnp)
 487		rcu_print_detail_task_stall_rnp(rnp);
 488}
 489
 490#else /* #ifdef CONFIG_RCU_CPU_STALL_VERBOSE */
 491
 492static void rcu_print_detail_task_stall(struct rcu_state *rsp)
 493{
 494}
 495
 496#endif /* #else #ifdef CONFIG_RCU_CPU_STALL_VERBOSE */
 497
 498#ifdef CONFIG_RCU_CPU_STALL_INFO
 499
 500static void rcu_print_task_stall_begin(struct rcu_node *rnp)
 501{
 502	pr_err("\tTasks blocked on level-%d rcu_node (CPUs %d-%d):",
 503	       rnp->level, rnp->grplo, rnp->grphi);
 504}
 505
 506static void rcu_print_task_stall_end(void)
 507{
 508	pr_cont("\n");
 509}
 510
 511#else /* #ifdef CONFIG_RCU_CPU_STALL_INFO */
 512
 513static void rcu_print_task_stall_begin(struct rcu_node *rnp)
 514{
 515}
 516
 517static void rcu_print_task_stall_end(void)
 518{
 519}
 520
 521#endif /* #else #ifdef CONFIG_RCU_CPU_STALL_INFO */
 522
 523/*
 524 * Scan the current list of tasks blocked within RCU read-side critical
 525 * sections, printing out the tid of each.
 526 */
 527static int rcu_print_task_stall(struct rcu_node *rnp)
 528{
 529	struct task_struct *t;
 530	int ndetected = 0;
 531
 532	if (!rcu_preempt_blocked_readers_cgp(rnp))
 533		return 0;
 534	rcu_print_task_stall_begin(rnp);
 535	t = list_entry(rnp->gp_tasks,
 536		       struct task_struct, rcu_node_entry);
 537	list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
 538		pr_cont(" P%d", t->pid);
 539		ndetected++;
 540	}
 541	rcu_print_task_stall_end();
 542	return ndetected;
 543}
 544
 545/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 546 * Check that the list of blocked tasks for the newly completed grace
 547 * period is in fact empty.  It is a serious bug to complete a grace
 548 * period that still has RCU readers blocked!  This function must be
 549 * invoked -before- updating this rnp's ->gpnum, and the rnp's ->lock
 550 * must be held by the caller.
 551 *
 552 * Also, if there are blocked tasks on the list, they automatically
 553 * block the newly created grace period, so set up ->gp_tasks accordingly.
 554 */
 555static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
 556{
 
 
 
 557	WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp));
 558	if (!list_empty(&rnp->blkd_tasks))
 559		rnp->gp_tasks = rnp->blkd_tasks.next;
 
 
 
 
 
 560	WARN_ON_ONCE(rnp->qsmask);
 561}
 562
 563#ifdef CONFIG_HOTPLUG_CPU
 564
 565/*
 566 * Handle tasklist migration for case in which all CPUs covered by the
 567 * specified rcu_node have gone offline.  Move them up to the root
 568 * rcu_node.  The reason for not just moving them to the immediate
 569 * parent is to remove the need for rcu_read_unlock_special() to
 570 * make more than two attempts to acquire the target rcu_node's lock.
 571 * Returns true if there were tasks blocking the current RCU grace
 572 * period.
 573 *
 574 * Returns 1 if there was previously a task blocking the current grace
 575 * period on the specified rcu_node structure.
 576 *
 577 * The caller must hold rnp->lock with irqs disabled.
 578 */
 579static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
 580				     struct rcu_node *rnp,
 581				     struct rcu_data *rdp)
 582{
 583	struct list_head *lp;
 584	struct list_head *lp_root;
 585	int retval = 0;
 586	struct rcu_node *rnp_root = rcu_get_root(rsp);
 587	struct task_struct *t;
 588
 589	if (rnp == rnp_root) {
 590		WARN_ONCE(1, "Last CPU thought to be offlined?");
 591		return 0;  /* Shouldn't happen: at least one CPU online. */
 592	}
 593
 594	/* If we are on an internal node, complain bitterly. */
 595	WARN_ON_ONCE(rnp != rdp->mynode);
 596
 597	/*
 598	 * Move tasks up to root rcu_node.  Don't try to get fancy for
 599	 * this corner-case operation -- just put this node's tasks
 600	 * at the head of the root node's list, and update the root node's
 601	 * ->gp_tasks and ->exp_tasks pointers to those of this node's,
 602	 * if non-NULL.  This might result in waiting for more tasks than
 603	 * absolutely necessary, but this is a good performance/complexity
 604	 * tradeoff.
 605	 */
 606	if (rcu_preempt_blocked_readers_cgp(rnp) && rnp->qsmask == 0)
 607		retval |= RCU_OFL_TASKS_NORM_GP;
 608	if (rcu_preempted_readers_exp(rnp))
 609		retval |= RCU_OFL_TASKS_EXP_GP;
 610	lp = &rnp->blkd_tasks;
 611	lp_root = &rnp_root->blkd_tasks;
 612	while (!list_empty(lp)) {
 613		t = list_entry(lp->next, typeof(*t), rcu_node_entry);
 614		raw_spin_lock(&rnp_root->lock); /* irqs already disabled */
 615		smp_mb__after_unlock_lock();
 616		list_del(&t->rcu_node_entry);
 617		t->rcu_blocked_node = rnp_root;
 618		list_add(&t->rcu_node_entry, lp_root);
 619		if (&t->rcu_node_entry == rnp->gp_tasks)
 620			rnp_root->gp_tasks = rnp->gp_tasks;
 621		if (&t->rcu_node_entry == rnp->exp_tasks)
 622			rnp_root->exp_tasks = rnp->exp_tasks;
 623#ifdef CONFIG_RCU_BOOST
 624		if (&t->rcu_node_entry == rnp->boost_tasks)
 625			rnp_root->boost_tasks = rnp->boost_tasks;
 626#endif /* #ifdef CONFIG_RCU_BOOST */
 627		raw_spin_unlock(&rnp_root->lock); /* irqs still disabled */
 628	}
 629
 630	rnp->gp_tasks = NULL;
 631	rnp->exp_tasks = NULL;
 632#ifdef CONFIG_RCU_BOOST
 633	rnp->boost_tasks = NULL;
 634	/*
 635	 * In case root is being boosted and leaf was not.  Make sure
 636	 * that we boost the tasks blocking the current grace period
 637	 * in this case.
 638	 */
 639	raw_spin_lock(&rnp_root->lock); /* irqs already disabled */
 640	smp_mb__after_unlock_lock();
 641	if (rnp_root->boost_tasks != NULL &&
 642	    rnp_root->boost_tasks != rnp_root->gp_tasks &&
 643	    rnp_root->boost_tasks != rnp_root->exp_tasks)
 644		rnp_root->boost_tasks = rnp_root->gp_tasks;
 645	raw_spin_unlock(&rnp_root->lock); /* irqs still disabled */
 646#endif /* #ifdef CONFIG_RCU_BOOST */
 647
 648	return retval;
 649}
 650
 651#endif /* #ifdef CONFIG_HOTPLUG_CPU */
 652
 653/*
 654 * Check for a quiescent state from the current CPU.  When a task blocks,
 655 * the task is recorded in the corresponding CPU's rcu_node structure,
 656 * which is checked elsewhere.
 657 *
 658 * Caller must disable hard irqs.
 659 */
 660static void rcu_preempt_check_callbacks(int cpu)
 661{
 662	struct task_struct *t = current;
 663
 664	if (t->rcu_read_lock_nesting == 0) {
 665		rcu_preempt_qs(cpu);
 666		return;
 667	}
 668	if (t->rcu_read_lock_nesting > 0 &&
 669	    per_cpu(rcu_preempt_data, cpu).qs_pending)
 670		t->rcu_read_unlock_special |= RCU_READ_UNLOCK_NEED_QS;
 
 671}
 672
 673#ifdef CONFIG_RCU_BOOST
 674
 675static void rcu_preempt_do_callbacks(void)
 676{
 677	rcu_do_batch(&rcu_preempt_state, this_cpu_ptr(&rcu_preempt_data));
 678}
 679
 680#endif /* #ifdef CONFIG_RCU_BOOST */
 681
 682/*
 683 * Queue a preemptible-RCU callback for invocation after a grace period.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 684 */
 685void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
 686{
 687	__call_rcu(head, func, &rcu_preempt_state, -1, 0);
 688}
 689EXPORT_SYMBOL_GPL(call_rcu);
 690
 691/*
 692 * Queue an RCU callback for lazy invocation after a grace period.
 693 * This will likely be later named something like "call_rcu_lazy()",
 694 * but this change will require some way of tagging the lazy RCU
 695 * callbacks in the list of pending callbacks.  Until then, this
 696 * function may only be called from __kfree_rcu().
 697 */
 698void kfree_call_rcu(struct rcu_head *head,
 699		    void (*func)(struct rcu_head *rcu))
 700{
 701	__call_rcu(head, func, &rcu_preempt_state, -1, 1);
 702}
 703EXPORT_SYMBOL_GPL(kfree_call_rcu);
 704
 705/**
 706 * synchronize_rcu - wait until a grace period has elapsed.
 707 *
 708 * Control will return to the caller some time after a full grace
 709 * period has elapsed, in other words after all currently executing RCU
 710 * read-side critical sections have completed.  Note, however, that
 711 * upon return from synchronize_rcu(), the caller might well be executing
 712 * concurrently with new RCU read-side critical sections that began while
 713 * synchronize_rcu() was waiting.  RCU read-side critical sections are
 714 * delimited by rcu_read_lock() and rcu_read_unlock(), and may be nested.
 715 *
 716 * See the description of synchronize_sched() for more detailed information
 717 * on memory ordering guarantees.
 
 
 
 
 
 718 */
 719void synchronize_rcu(void)
 720{
 721	rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map) &&
 722			   !lock_is_held(&rcu_lock_map) &&
 723			   !lock_is_held(&rcu_sched_lock_map),
 724			   "Illegal synchronize_rcu() in RCU read-side critical section");
 725	if (!rcu_scheduler_active)
 726		return;
 727	if (rcu_expedited)
 728		synchronize_rcu_expedited();
 729	else
 730		wait_rcu_gp(call_rcu);
 731}
 732EXPORT_SYMBOL_GPL(synchronize_rcu);
 733
 734static DECLARE_WAIT_QUEUE_HEAD(sync_rcu_preempt_exp_wq);
 735static unsigned long sync_rcu_preempt_exp_count;
 736static DEFINE_MUTEX(sync_rcu_preempt_exp_mutex);
 737
 738/*
 739 * Return non-zero if there are any tasks in RCU read-side critical
 740 * sections blocking the current preemptible-RCU expedited grace period.
 741 * If there is no preemptible-RCU expedited grace period currently in
 742 * progress, returns zero unconditionally.
 743 */
 744static int rcu_preempted_readers_exp(struct rcu_node *rnp)
 745{
 746	return rnp->exp_tasks != NULL;
 747}
 748
 749/*
 750 * return non-zero if there is no RCU expedited grace period in progress
 751 * for the specified rcu_node structure, in other words, if all CPUs and
 752 * tasks covered by the specified rcu_node structure have done their bit
 753 * for the current expedited grace period.  Works only for preemptible
 754 * RCU -- other RCU implementation use other means.
 755 *
 756 * Caller must hold sync_rcu_preempt_exp_mutex.
 757 */
 758static int sync_rcu_preempt_exp_done(struct rcu_node *rnp)
 759{
 760	return !rcu_preempted_readers_exp(rnp) &&
 761	       ACCESS_ONCE(rnp->expmask) == 0;
 762}
 763
 764/*
 765 * Report the exit from RCU read-side critical section for the last task
 766 * that queued itself during or before the current expedited preemptible-RCU
 767 * grace period.  This event is reported either to the rcu_node structure on
 768 * which the task was queued or to one of that rcu_node structure's ancestors,
 769 * recursively up the tree.  (Calm down, calm down, we do the recursion
 770 * iteratively!)
 771 *
 772 * Most callers will set the "wake" flag, but the task initiating the
 773 * expedited grace period need not wake itself.
 774 *
 775 * Caller must hold sync_rcu_preempt_exp_mutex.
 776 */
 777static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
 778			       bool wake)
 779{
 780	unsigned long flags;
 781	unsigned long mask;
 782
 783	raw_spin_lock_irqsave(&rnp->lock, flags);
 784	smp_mb__after_unlock_lock();
 785	for (;;) {
 786		if (!sync_rcu_preempt_exp_done(rnp)) {
 787			raw_spin_unlock_irqrestore(&rnp->lock, flags);
 788			break;
 789		}
 790		if (rnp->parent == NULL) {
 791			raw_spin_unlock_irqrestore(&rnp->lock, flags);
 792			if (wake) {
 793				smp_mb(); /* EGP done before wake_up(). */
 794				wake_up(&sync_rcu_preempt_exp_wq);
 795			}
 796			break;
 797		}
 798		mask = rnp->grpmask;
 799		raw_spin_unlock(&rnp->lock); /* irqs remain disabled */
 800		rnp = rnp->parent;
 801		raw_spin_lock(&rnp->lock); /* irqs already disabled */
 802		smp_mb__after_unlock_lock();
 803		rnp->expmask &= ~mask;
 804	}
 805}
 806
 807/*
 808 * Snapshot the tasks blocking the newly started preemptible-RCU expedited
 809 * grace period for the specified rcu_node structure.  If there are no such
 810 * tasks, report it up the rcu_node hierarchy.
 811 *
 812 * Caller must hold sync_rcu_preempt_exp_mutex and must exclude
 813 * CPU hotplug operations.
 814 */
 815static void
 816sync_rcu_preempt_exp_init(struct rcu_state *rsp, struct rcu_node *rnp)
 817{
 818	unsigned long flags;
 819	int must_wait = 0;
 820
 821	raw_spin_lock_irqsave(&rnp->lock, flags);
 822	smp_mb__after_unlock_lock();
 823	if (list_empty(&rnp->blkd_tasks)) {
 824		raw_spin_unlock_irqrestore(&rnp->lock, flags);
 825	} else {
 826		rnp->exp_tasks = rnp->blkd_tasks.next;
 827		rcu_initiate_boost(rnp, flags);  /* releases rnp->lock */
 828		must_wait = 1;
 829	}
 830	if (!must_wait)
 831		rcu_report_exp_rnp(rsp, rnp, false); /* Don't wake self. */
 832}
 833
 834/**
 835 * synchronize_rcu_expedited - Brute-force RCU grace period
 836 *
 837 * Wait for an RCU-preempt grace period, but expedite it.  The basic
 838 * idea is to invoke synchronize_sched_expedited() to push all the tasks to
 839 * the ->blkd_tasks lists and wait for this list to drain.  This consumes
 840 * significant time on all CPUs and is unfriendly to real-time workloads,
 841 * so is thus not recommended for any sort of common-case code.
 842 * In fact, if you are using synchronize_rcu_expedited() in a loop,
 843 * please restructure your code to batch your updates, and then Use a
 844 * single synchronize_rcu() instead.
 845 *
 846 * Note that it is illegal to call this function while holding any lock
 847 * that is acquired by a CPU-hotplug notifier.  And yes, it is also illegal
 848 * to call this function from a CPU-hotplug notifier.  Failing to observe
 849 * these restriction will result in deadlock.
 850 */
 851void synchronize_rcu_expedited(void)
 852{
 853	unsigned long flags;
 854	struct rcu_node *rnp;
 855	struct rcu_state *rsp = &rcu_preempt_state;
 856	unsigned long snap;
 857	int trycount = 0;
 858
 859	smp_mb(); /* Caller's modifications seen first by other CPUs. */
 860	snap = ACCESS_ONCE(sync_rcu_preempt_exp_count) + 1;
 861	smp_mb(); /* Above access cannot bleed into critical section. */
 862
 863	/*
 864	 * Block CPU-hotplug operations.  This means that any CPU-hotplug
 865	 * operation that finds an rcu_node structure with tasks in the
 866	 * process of being boosted will know that all tasks blocking
 867	 * this expedited grace period will already be in the process of
 868	 * being boosted.  This simplifies the process of moving tasks
 869	 * from leaf to root rcu_node structures.
 870	 */
 871	get_online_cpus();
 872
 873	/*
 874	 * Acquire lock, falling back to synchronize_rcu() if too many
 875	 * lock-acquisition failures.  Of course, if someone does the
 876	 * expedited grace period for us, just leave.
 877	 */
 878	while (!mutex_trylock(&sync_rcu_preempt_exp_mutex)) {
 879		if (ULONG_CMP_LT(snap,
 880		    ACCESS_ONCE(sync_rcu_preempt_exp_count))) {
 881			put_online_cpus();
 882			goto mb_ret; /* Others did our work for us. */
 883		}
 884		if (trycount++ < 10) {
 885			udelay(trycount * num_online_cpus());
 886		} else {
 887			put_online_cpus();
 888			wait_rcu_gp(call_rcu);
 889			return;
 890		}
 891	}
 892	if (ULONG_CMP_LT(snap, ACCESS_ONCE(sync_rcu_preempt_exp_count))) {
 893		put_online_cpus();
 894		goto unlock_mb_ret; /* Others did our work for us. */
 895	}
 896
 897	/* force all RCU readers onto ->blkd_tasks lists. */
 898	synchronize_sched_expedited();
 899
 900	/* Initialize ->expmask for all non-leaf rcu_node structures. */
 901	rcu_for_each_nonleaf_node_breadth_first(rsp, rnp) {
 902		raw_spin_lock_irqsave(&rnp->lock, flags);
 903		smp_mb__after_unlock_lock();
 904		rnp->expmask = rnp->qsmaskinit;
 905		raw_spin_unlock_irqrestore(&rnp->lock, flags);
 906	}
 907
 908	/* Snapshot current state of ->blkd_tasks lists. */
 909	rcu_for_each_leaf_node(rsp, rnp)
 910		sync_rcu_preempt_exp_init(rsp, rnp);
 911	if (NUM_RCU_NODES > 1)
 912		sync_rcu_preempt_exp_init(rsp, rcu_get_root(rsp));
 913
 914	put_online_cpus();
 915
 916	/* Wait for snapshotted ->blkd_tasks lists to drain. */
 917	rnp = rcu_get_root(rsp);
 918	wait_event(sync_rcu_preempt_exp_wq,
 919		   sync_rcu_preempt_exp_done(rnp));
 920
 921	/* Clean up and exit. */
 922	smp_mb(); /* ensure expedited GP seen before counter increment. */
 923	ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
 924unlock_mb_ret:
 925	mutex_unlock(&sync_rcu_preempt_exp_mutex);
 926mb_ret:
 927	smp_mb(); /* ensure subsequent action seen after grace period. */
 928}
 929EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
 930
 931/**
 932 * rcu_barrier - Wait until all in-flight call_rcu() callbacks complete.
 933 *
 934 * Note that this primitive does not necessarily wait for an RCU grace period
 935 * to complete.  For example, if there are no RCU callbacks queued anywhere
 936 * in the system, then rcu_barrier() is within its rights to return
 937 * immediately, without waiting for anything, much less an RCU grace period.
 938 */
 939void rcu_barrier(void)
 940{
 941	_rcu_barrier(&rcu_preempt_state);
 942}
 943EXPORT_SYMBOL_GPL(rcu_barrier);
 944
 945/*
 946 * Initialize preemptible RCU's state structures.
 947 */
 948static void __init __rcu_init_preempt(void)
 949{
 950	rcu_init_one(&rcu_preempt_state, &rcu_preempt_data);
 951}
 952
 953/*
 954 * Check for a task exiting while in a preemptible-RCU read-side
 955 * critical section, clean up if so.  No need to issue warnings,
 956 * as debug_check_no_locks_held() already does this if lockdep
 957 * is enabled.
 958 */
 959void exit_rcu(void)
 960{
 961	struct task_struct *t = current;
 962
 963	if (likely(list_empty(&current->rcu_node_entry)))
 964		return;
 965	t->rcu_read_lock_nesting = 1;
 966	barrier();
 967	t->rcu_read_unlock_special = RCU_READ_UNLOCK_BLOCKED;
 968	__rcu_read_unlock();
 969}
 970
 971#else /* #ifdef CONFIG_TREE_PREEMPT_RCU */
 972
 973static struct rcu_state *rcu_state = &rcu_sched_state;
 974
 975/*
 976 * Tell them what RCU they are running.
 977 */
 978static void __init rcu_bootup_announce(void)
 979{
 980	pr_info("Hierarchical RCU implementation.\n");
 981	rcu_bootup_announce_oddness();
 982}
 983
 984/*
 985 * Return the number of RCU batches processed thus far for debug & stats.
 986 */
 987long rcu_batches_completed(void)
 988{
 989	return rcu_batches_completed_sched();
 990}
 991EXPORT_SYMBOL_GPL(rcu_batches_completed);
 992
 993/*
 994 * Force a quiescent state for RCU, which, because there is no preemptible
 995 * RCU, becomes the same as rcu-sched.
 996 */
 997void rcu_force_quiescent_state(void)
 998{
 999	rcu_sched_force_quiescent_state();
1000}
1001EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
1002
1003/*
1004 * Because preemptible RCU does not exist, we never have to check for
1005 * CPUs being in quiescent states.
1006 */
1007static void rcu_preempt_note_context_switch(int cpu)
1008{
1009}
1010
1011/*
1012 * Because preemptible RCU does not exist, there are never any preempted
1013 * RCU readers.
1014 */
1015static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp)
1016{
1017	return 0;
1018}
1019
1020#ifdef CONFIG_HOTPLUG_CPU
1021
1022/* Because preemptible RCU does not exist, no quieting of tasks. */
1023static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags)
1024{
1025	raw_spin_unlock_irqrestore(&rnp->lock, flags);
1026}
1027
1028#endif /* #ifdef CONFIG_HOTPLUG_CPU */
1029
1030/*
1031 * Because preemptible RCU does not exist, we never have to check for
1032 * tasks blocked within RCU read-side critical sections.
1033 */
1034static void rcu_print_detail_task_stall(struct rcu_state *rsp)
1035{
1036}
1037
1038/*
1039 * Because preemptible RCU does not exist, we never have to check for
1040 * tasks blocked within RCU read-side critical sections.
1041 */
1042static int rcu_print_task_stall(struct rcu_node *rnp)
1043{
1044	return 0;
1045}
1046
1047/*
1048 * Because there is no preemptible RCU, there can be no readers blocked,
1049 * so there is no need to check for blocked tasks.  So check only for
1050 * bogus qsmask values.
1051 */
1052static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
1053{
1054	WARN_ON_ONCE(rnp->qsmask);
1055}
1056
1057#ifdef CONFIG_HOTPLUG_CPU
1058
1059/*
1060 * Because preemptible RCU does not exist, it never needs to migrate
1061 * tasks that were blocked within RCU read-side critical sections, and
1062 * such non-existent tasks cannot possibly have been blocking the current
1063 * grace period.
1064 */
1065static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
1066				     struct rcu_node *rnp,
1067				     struct rcu_data *rdp)
1068{
1069	return 0;
1070}
1071
1072#endif /* #ifdef CONFIG_HOTPLUG_CPU */
1073
1074/*
1075 * Because preemptible RCU does not exist, it never has any callbacks
1076 * to check.
1077 */
1078static void rcu_preempt_check_callbacks(int cpu)
1079{
1080}
1081
1082/*
1083 * Queue an RCU callback for lazy invocation after a grace period.
1084 * This will likely be later named something like "call_rcu_lazy()",
1085 * but this change will require some way of tagging the lazy RCU
1086 * callbacks in the list of pending callbacks.  Until then, this
1087 * function may only be called from __kfree_rcu().
1088 *
1089 * Because there is no preemptible RCU, we use RCU-sched instead.
1090 */
1091void kfree_call_rcu(struct rcu_head *head,
1092		    void (*func)(struct rcu_head *rcu))
1093{
1094	__call_rcu(head, func, &rcu_sched_state, -1, 1);
1095}
1096EXPORT_SYMBOL_GPL(kfree_call_rcu);
1097
1098/*
1099 * Wait for an rcu-preempt grace period, but make it happen quickly.
1100 * But because preemptible RCU does not exist, map to rcu-sched.
1101 */
1102void synchronize_rcu_expedited(void)
1103{
1104	synchronize_sched_expedited();
1105}
1106EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
1107
1108#ifdef CONFIG_HOTPLUG_CPU
1109
1110/*
1111 * Because preemptible RCU does not exist, there is never any need to
1112 * report on tasks preempted in RCU read-side critical sections during
1113 * expedited RCU grace periods.
1114 */
1115static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
1116			       bool wake)
1117{
1118}
1119
1120#endif /* #ifdef CONFIG_HOTPLUG_CPU */
1121
1122/*
1123 * Because preemptible RCU does not exist, rcu_barrier() is just
1124 * another name for rcu_barrier_sched().
1125 */
1126void rcu_barrier(void)
1127{
1128	rcu_barrier_sched();
1129}
1130EXPORT_SYMBOL_GPL(rcu_barrier);
1131
1132/*
1133 * Because preemptible RCU does not exist, it need not be initialized.
1134 */
1135static void __init __rcu_init_preempt(void)
1136{
1137}
1138
1139/*
1140 * Because preemptible RCU does not exist, tasks cannot possibly exit
1141 * while in preemptible RCU read-side critical sections.
1142 */
1143void exit_rcu(void)
1144{
1145}
1146
1147#endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */
1148
1149#ifdef CONFIG_RCU_BOOST
1150
1151#include "../locking/rtmutex_common.h"
1152
1153#ifdef CONFIG_RCU_TRACE
1154
1155static void rcu_initiate_boost_trace(struct rcu_node *rnp)
1156{
1157	if (list_empty(&rnp->blkd_tasks))
1158		rnp->n_balk_blkd_tasks++;
1159	else if (rnp->exp_tasks == NULL && rnp->gp_tasks == NULL)
1160		rnp->n_balk_exp_gp_tasks++;
1161	else if (rnp->gp_tasks != NULL && rnp->boost_tasks != NULL)
1162		rnp->n_balk_boost_tasks++;
1163	else if (rnp->gp_tasks != NULL && rnp->qsmask != 0)
1164		rnp->n_balk_notblocked++;
1165	else if (rnp->gp_tasks != NULL &&
1166		 ULONG_CMP_LT(jiffies, rnp->boost_time))
1167		rnp->n_balk_notyet++;
1168	else
1169		rnp->n_balk_nos++;
1170}
1171
1172#else /* #ifdef CONFIG_RCU_TRACE */
1173
1174static void rcu_initiate_boost_trace(struct rcu_node *rnp)
1175{
1176}
1177
1178#endif /* #else #ifdef CONFIG_RCU_TRACE */
1179
1180static void rcu_wake_cond(struct task_struct *t, int status)
1181{
1182	/*
1183	 * If the thread is yielding, only wake it when this
1184	 * is invoked from idle
1185	 */
1186	if (status != RCU_KTHREAD_YIELDING || is_idle_task(current))
1187		wake_up_process(t);
1188}
1189
1190/*
1191 * Carry out RCU priority boosting on the task indicated by ->exp_tasks
1192 * or ->boost_tasks, advancing the pointer to the next task in the
1193 * ->blkd_tasks list.
1194 *
1195 * Note that irqs must be enabled: boosting the task can block.
1196 * Returns 1 if there are more tasks needing to be boosted.
1197 */
1198static int rcu_boost(struct rcu_node *rnp)
1199{
1200	unsigned long flags;
1201	struct rt_mutex mtx;
1202	struct task_struct *t;
1203	struct list_head *tb;
1204
1205	if (rnp->exp_tasks == NULL && rnp->boost_tasks == NULL)
 
1206		return 0;  /* Nothing left to boost. */
1207
1208	raw_spin_lock_irqsave(&rnp->lock, flags);
1209	smp_mb__after_unlock_lock();
1210
1211	/*
1212	 * Recheck under the lock: all tasks in need of boosting
1213	 * might exit their RCU read-side critical sections on their own.
1214	 */
1215	if (rnp->exp_tasks == NULL && rnp->boost_tasks == NULL) {
1216		raw_spin_unlock_irqrestore(&rnp->lock, flags);
1217		return 0;
1218	}
1219
1220	/*
1221	 * Preferentially boost tasks blocking expedited grace periods.
1222	 * This cannot starve the normal grace periods because a second
1223	 * expedited grace period must boost all blocked tasks, including
1224	 * those blocking the pre-existing normal grace period.
1225	 */
1226	if (rnp->exp_tasks != NULL) {
1227		tb = rnp->exp_tasks;
1228		rnp->n_exp_boosts++;
1229	} else {
1230		tb = rnp->boost_tasks;
1231		rnp->n_normal_boosts++;
1232	}
1233	rnp->n_tasks_boosted++;
1234
1235	/*
1236	 * We boost task t by manufacturing an rt_mutex that appears to
1237	 * be held by task t.  We leave a pointer to that rt_mutex where
1238	 * task t can find it, and task t will release the mutex when it
1239	 * exits its outermost RCU read-side critical section.  Then
1240	 * simply acquiring this artificial rt_mutex will boost task
1241	 * t's priority.  (Thanks to tglx for suggesting this approach!)
1242	 *
1243	 * Note that task t must acquire rnp->lock to remove itself from
1244	 * the ->blkd_tasks list, which it will do from exit() if from
1245	 * nowhere else.  We therefore are guaranteed that task t will
1246	 * stay around at least until we drop rnp->lock.  Note that
1247	 * rnp->lock also resolves races between our priority boosting
1248	 * and task t's exiting its outermost RCU read-side critical
1249	 * section.
1250	 */
1251	t = container_of(tb, struct task_struct, rcu_node_entry);
1252	rt_mutex_init_proxy_locked(&mtx, t);
1253	t->rcu_boost_mutex = &mtx;
1254	raw_spin_unlock_irqrestore(&rnp->lock, flags);
1255	rt_mutex_lock(&mtx);  /* Side effect: boosts task t's priority. */
1256	rt_mutex_unlock(&mtx);  /* Keep lockdep happy. */
1257
1258	return ACCESS_ONCE(rnp->exp_tasks) != NULL ||
1259	       ACCESS_ONCE(rnp->boost_tasks) != NULL;
1260}
1261
1262/*
1263 * Priority-boosting kthread.  One per leaf rcu_node and one for the
1264 * root rcu_node.
1265 */
1266static int rcu_boost_kthread(void *arg)
1267{
1268	struct rcu_node *rnp = (struct rcu_node *)arg;
1269	int spincnt = 0;
1270	int more2boost;
1271
1272	trace_rcu_utilization(TPS("Start boost kthread@init"));
1273	for (;;) {
1274		rnp->boost_kthread_status = RCU_KTHREAD_WAITING;
1275		trace_rcu_utilization(TPS("End boost kthread@rcu_wait"));
1276		rcu_wait(rnp->boost_tasks || rnp->exp_tasks);
1277		trace_rcu_utilization(TPS("Start boost kthread@rcu_wait"));
1278		rnp->boost_kthread_status = RCU_KTHREAD_RUNNING;
1279		more2boost = rcu_boost(rnp);
1280		if (more2boost)
1281			spincnt++;
1282		else
1283			spincnt = 0;
1284		if (spincnt > 10) {
1285			rnp->boost_kthread_status = RCU_KTHREAD_YIELDING;
1286			trace_rcu_utilization(TPS("End boost kthread@rcu_yield"));
1287			schedule_timeout_interruptible(2);
1288			trace_rcu_utilization(TPS("Start boost kthread@rcu_yield"));
1289			spincnt = 0;
1290		}
1291	}
1292	/* NOTREACHED */
1293	trace_rcu_utilization(TPS("End boost kthread@notreached"));
1294	return 0;
1295}
1296
1297/*
1298 * Check to see if it is time to start boosting RCU readers that are
1299 * blocking the current grace period, and, if so, tell the per-rcu_node
1300 * kthread to start boosting them.  If there is an expedited grace
1301 * period in progress, it is always time to boost.
1302 *
1303 * The caller must hold rnp->lock, which this function releases.
1304 * The ->boost_kthread_task is immortal, so we don't need to worry
1305 * about it going away.
1306 */
1307static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
 
1308{
1309	struct task_struct *t;
1310
 
1311	if (!rcu_preempt_blocked_readers_cgp(rnp) && rnp->exp_tasks == NULL) {
1312		rnp->n_balk_exp_gp_tasks++;
1313		raw_spin_unlock_irqrestore(&rnp->lock, flags);
1314		return;
1315	}
1316	if (rnp->exp_tasks != NULL ||
1317	    (rnp->gp_tasks != NULL &&
1318	     rnp->boost_tasks == NULL &&
1319	     rnp->qsmask == 0 &&
1320	     ULONG_CMP_GE(jiffies, rnp->boost_time))) {
1321		if (rnp->exp_tasks == NULL)
1322			rnp->boost_tasks = rnp->gp_tasks;
1323		raw_spin_unlock_irqrestore(&rnp->lock, flags);
1324		t = rnp->boost_kthread_task;
1325		if (t)
1326			rcu_wake_cond(t, rnp->boost_kthread_status);
1327	} else {
1328		rcu_initiate_boost_trace(rnp);
1329		raw_spin_unlock_irqrestore(&rnp->lock, flags);
1330	}
1331}
1332
1333/*
1334 * Wake up the per-CPU kthread to invoke RCU callbacks.
1335 */
1336static void invoke_rcu_callbacks_kthread(void)
1337{
1338	unsigned long flags;
1339
1340	local_irq_save(flags);
1341	__this_cpu_write(rcu_cpu_has_work, 1);
1342	if (__this_cpu_read(rcu_cpu_kthread_task) != NULL &&
1343	    current != __this_cpu_read(rcu_cpu_kthread_task)) {
1344		rcu_wake_cond(__this_cpu_read(rcu_cpu_kthread_task),
1345			      __this_cpu_read(rcu_cpu_kthread_status));
1346	}
1347	local_irq_restore(flags);
1348}
1349
1350/*
1351 * Is the current CPU running the RCU-callbacks kthread?
1352 * Caller must have preemption disabled.
1353 */
1354static bool rcu_is_callbacks_kthread(void)
1355{
1356	return __this_cpu_read(rcu_cpu_kthread_task) == current;
1357}
1358
1359#define RCU_BOOST_DELAY_JIFFIES DIV_ROUND_UP(CONFIG_RCU_BOOST_DELAY * HZ, 1000)
1360
1361/*
1362 * Do priority-boost accounting for the start of a new grace period.
1363 */
1364static void rcu_preempt_boost_start_gp(struct rcu_node *rnp)
1365{
1366	rnp->boost_time = jiffies + RCU_BOOST_DELAY_JIFFIES;
1367}
1368
1369/*
1370 * Create an RCU-boost kthread for the specified node if one does not
1371 * already exist.  We only create this kthread for preemptible RCU.
1372 * Returns zero if all is well, a negated errno otherwise.
1373 */
1374static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
1375						 struct rcu_node *rnp)
1376{
1377	int rnp_index = rnp - &rsp->node[0];
1378	unsigned long flags;
1379	struct sched_param sp;
1380	struct task_struct *t;
1381
1382	if (&rcu_preempt_state != rsp)
1383		return 0;
1384
1385	if (!rcu_scheduler_fully_active || rnp->qsmaskinit == 0)
1386		return 0;
1387
1388	rsp->boost = 1;
1389	if (rnp->boost_kthread_task != NULL)
1390		return 0;
1391	t = kthread_create(rcu_boost_kthread, (void *)rnp,
1392			   "rcub/%d", rnp_index);
1393	if (IS_ERR(t))
1394		return PTR_ERR(t);
1395	raw_spin_lock_irqsave(&rnp->lock, flags);
1396	smp_mb__after_unlock_lock();
1397	rnp->boost_kthread_task = t;
1398	raw_spin_unlock_irqrestore(&rnp->lock, flags);
1399	sp.sched_priority = RCU_BOOST_PRIO;
1400	sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
1401	wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */
1402	return 0;
1403}
1404
1405static void rcu_kthread_do_work(void)
1406{
1407	rcu_do_batch(&rcu_sched_state, this_cpu_ptr(&rcu_sched_data));
1408	rcu_do_batch(&rcu_bh_state, this_cpu_ptr(&rcu_bh_data));
1409	rcu_preempt_do_callbacks();
1410}
1411
1412static void rcu_cpu_kthread_setup(unsigned int cpu)
1413{
1414	struct sched_param sp;
1415
1416	sp.sched_priority = RCU_KTHREAD_PRIO;
1417	sched_setscheduler_nocheck(current, SCHED_FIFO, &sp);
1418}
1419
1420static void rcu_cpu_kthread_park(unsigned int cpu)
1421{
1422	per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU;
1423}
1424
1425static int rcu_cpu_kthread_should_run(unsigned int cpu)
1426{
1427	return __this_cpu_read(rcu_cpu_has_work);
1428}
1429
1430/*
1431 * Per-CPU kernel thread that invokes RCU callbacks.  This replaces the
1432 * RCU softirq used in flavors and configurations of RCU that do not
1433 * support RCU priority boosting.
1434 */
1435static void rcu_cpu_kthread(unsigned int cpu)
1436{
1437	unsigned int *statusp = this_cpu_ptr(&rcu_cpu_kthread_status);
1438	char work, *workp = this_cpu_ptr(&rcu_cpu_has_work);
1439	int spincnt;
1440
1441	for (spincnt = 0; spincnt < 10; spincnt++) {
1442		trace_rcu_utilization(TPS("Start CPU kthread@rcu_wait"));
1443		local_bh_disable();
1444		*statusp = RCU_KTHREAD_RUNNING;
1445		this_cpu_inc(rcu_cpu_kthread_loops);
1446		local_irq_disable();
1447		work = *workp;
1448		*workp = 0;
1449		local_irq_enable();
1450		if (work)
1451			rcu_kthread_do_work();
1452		local_bh_enable();
1453		if (*workp == 0) {
1454			trace_rcu_utilization(TPS("End CPU kthread@rcu_wait"));
1455			*statusp = RCU_KTHREAD_WAITING;
1456			return;
1457		}
1458	}
1459	*statusp = RCU_KTHREAD_YIELDING;
1460	trace_rcu_utilization(TPS("Start CPU kthread@rcu_yield"));
1461	schedule_timeout_interruptible(2);
1462	trace_rcu_utilization(TPS("End CPU kthread@rcu_yield"));
1463	*statusp = RCU_KTHREAD_WAITING;
1464}
1465
1466/*
1467 * Set the per-rcu_node kthread's affinity to cover all CPUs that are
1468 * served by the rcu_node in question.  The CPU hotplug lock is still
1469 * held, so the value of rnp->qsmaskinit will be stable.
1470 *
1471 * We don't include outgoingcpu in the affinity set, use -1 if there is
1472 * no outgoing CPU.  If there are no CPUs left in the affinity set,
1473 * this function allows the kthread to execute on any CPU.
1474 */
1475static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
1476{
1477	struct task_struct *t = rnp->boost_kthread_task;
1478	unsigned long mask = rnp->qsmaskinit;
1479	cpumask_var_t cm;
1480	int cpu;
1481
1482	if (!t)
1483		return;
1484	if (!zalloc_cpumask_var(&cm, GFP_KERNEL))
1485		return;
1486	for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1)
1487		if ((mask & 0x1) && cpu != outgoingcpu)
 
1488			cpumask_set_cpu(cpu, cm);
1489	if (cpumask_weight(cm) == 0) {
1490		cpumask_setall(cm);
1491		for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++)
1492			cpumask_clear_cpu(cpu, cm);
1493		WARN_ON_ONCE(cpumask_weight(cm) == 0);
1494	}
1495	set_cpus_allowed_ptr(t, cm);
1496	free_cpumask_var(cm);
1497}
1498
1499static struct smp_hotplug_thread rcu_cpu_thread_spec = {
1500	.store			= &rcu_cpu_kthread_task,
1501	.thread_should_run	= rcu_cpu_kthread_should_run,
1502	.thread_fn		= rcu_cpu_kthread,
1503	.thread_comm		= "rcuc/%u",
1504	.setup			= rcu_cpu_kthread_setup,
1505	.park			= rcu_cpu_kthread_park,
1506};
1507
1508/*
1509 * Spawn all kthreads -- called as soon as the scheduler is running.
1510 */
1511static int __init rcu_spawn_kthreads(void)
1512{
1513	struct rcu_node *rnp;
1514	int cpu;
1515
1516	rcu_scheduler_fully_active = 1;
1517	for_each_possible_cpu(cpu)
1518		per_cpu(rcu_cpu_has_work, cpu) = 0;
1519	BUG_ON(smpboot_register_percpu_thread(&rcu_cpu_thread_spec));
1520	rnp = rcu_get_root(rcu_state);
1521	(void)rcu_spawn_one_boost_kthread(rcu_state, rnp);
1522	if (NUM_RCU_NODES > 1) {
1523		rcu_for_each_leaf_node(rcu_state, rnp)
1524			(void)rcu_spawn_one_boost_kthread(rcu_state, rnp);
1525	}
1526	return 0;
1527}
1528early_initcall(rcu_spawn_kthreads);
1529
1530static void rcu_prepare_kthreads(int cpu)
1531{
1532	struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, cpu);
1533	struct rcu_node *rnp = rdp->mynode;
1534
1535	/* Fire up the incoming CPU's kthread and leaf rcu_node kthread. */
1536	if (rcu_scheduler_fully_active)
1537		(void)rcu_spawn_one_boost_kthread(rcu_state, rnp);
1538}
1539
1540#else /* #ifdef CONFIG_RCU_BOOST */
1541
1542static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
 
1543{
1544	raw_spin_unlock_irqrestore(&rnp->lock, flags);
1545}
1546
1547static void invoke_rcu_callbacks_kthread(void)
1548{
1549	WARN_ON_ONCE(1);
1550}
1551
1552static bool rcu_is_callbacks_kthread(void)
1553{
1554	return false;
1555}
1556
1557static void rcu_preempt_boost_start_gp(struct rcu_node *rnp)
1558{
1559}
1560
1561static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
1562{
1563}
1564
1565static int __init rcu_scheduler_really_started(void)
1566{
1567	rcu_scheduler_fully_active = 1;
1568	return 0;
1569}
1570early_initcall(rcu_scheduler_really_started);
1571
1572static void rcu_prepare_kthreads(int cpu)
1573{
1574}
1575
1576#endif /* #else #ifdef CONFIG_RCU_BOOST */
1577
1578#if !defined(CONFIG_RCU_FAST_NO_HZ)
1579
1580/*
1581 * Check to see if any future RCU-related work will need to be done
1582 * by the current CPU, even if none need be done immediately, returning
1583 * 1 if so.  This function is part of the RCU implementation; it is -not-
1584 * an exported member of the RCU API.
1585 *
1586 * Because we not have RCU_FAST_NO_HZ, just check whether this CPU needs
1587 * any flavor of RCU.
1588 */
1589#ifndef CONFIG_RCU_NOCB_CPU_ALL
1590int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies)
1591{
1592	*delta_jiffies = ULONG_MAX;
1593	return rcu_cpu_has_callbacks(cpu, NULL);
1594}
1595#endif /* #ifndef CONFIG_RCU_NOCB_CPU_ALL */
1596
1597/*
1598 * Because we do not have RCU_FAST_NO_HZ, don't bother cleaning up
1599 * after it.
1600 */
1601static void rcu_cleanup_after_idle(int cpu)
1602{
1603}
1604
1605/*
1606 * Do the idle-entry grace-period work, which, because CONFIG_RCU_FAST_NO_HZ=n,
1607 * is nothing.
1608 */
1609static void rcu_prepare_for_idle(int cpu)
1610{
1611}
1612
1613/*
1614 * Don't bother keeping a running count of the number of RCU callbacks
1615 * posted because CONFIG_RCU_FAST_NO_HZ=n.
1616 */
1617static void rcu_idle_count_callbacks_posted(void)
1618{
1619}
1620
1621#else /* #if !defined(CONFIG_RCU_FAST_NO_HZ) */
1622
1623/*
1624 * This code is invoked when a CPU goes idle, at which point we want
1625 * to have the CPU do everything required for RCU so that it can enter
1626 * the energy-efficient dyntick-idle mode.  This is handled by a
1627 * state machine implemented by rcu_prepare_for_idle() below.
1628 *
1629 * The following three proprocessor symbols control this state machine:
1630 *
1631 * RCU_IDLE_GP_DELAY gives the number of jiffies that a CPU is permitted
1632 *	to sleep in dyntick-idle mode with RCU callbacks pending.  This
1633 *	is sized to be roughly one RCU grace period.  Those energy-efficiency
1634 *	benchmarkers who might otherwise be tempted to set this to a large
1635 *	number, be warned: Setting RCU_IDLE_GP_DELAY too high can hang your
1636 *	system.  And if you are -that- concerned about energy efficiency,
1637 *	just power the system down and be done with it!
1638 * RCU_IDLE_LAZY_GP_DELAY gives the number of jiffies that a CPU is
1639 *	permitted to sleep in dyntick-idle mode with only lazy RCU
1640 *	callbacks pending.  Setting this too high can OOM your system.
1641 *
1642 * The values below work well in practice.  If future workloads require
1643 * adjustment, they can be converted into kernel config parameters, though
1644 * making the state machine smarter might be a better option.
1645 */
1646#define RCU_IDLE_GP_DELAY 4		/* Roughly one grace period. */
1647#define RCU_IDLE_LAZY_GP_DELAY (6 * HZ)	/* Roughly six seconds. */
1648
1649static int rcu_idle_gp_delay = RCU_IDLE_GP_DELAY;
1650module_param(rcu_idle_gp_delay, int, 0644);
1651static int rcu_idle_lazy_gp_delay = RCU_IDLE_LAZY_GP_DELAY;
1652module_param(rcu_idle_lazy_gp_delay, int, 0644);
1653
1654extern int tick_nohz_active;
1655
1656/*
1657 * Try to advance callbacks for all flavors of RCU on the current CPU, but
1658 * only if it has been awhile since the last time we did so.  Afterwards,
1659 * if there are any callbacks ready for immediate invocation, return true.
1660 */
1661static bool __maybe_unused rcu_try_advance_all_cbs(void)
1662{
1663	bool cbs_ready = false;
1664	struct rcu_data *rdp;
1665	struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
1666	struct rcu_node *rnp;
1667	struct rcu_state *rsp;
1668
1669	/* Exit early if we advanced recently. */
1670	if (jiffies == rdtp->last_advance_all)
1671		return 0;
1672	rdtp->last_advance_all = jiffies;
1673
1674	for_each_rcu_flavor(rsp) {
1675		rdp = this_cpu_ptr(rsp->rda);
1676		rnp = rdp->mynode;
1677
1678		/*
1679		 * Don't bother checking unless a grace period has
1680		 * completed since we last checked and there are
1681		 * callbacks not yet ready to invoke.
1682		 */
1683		if (rdp->completed != rnp->completed &&
1684		    rdp->nxttail[RCU_DONE_TAIL] != rdp->nxttail[RCU_NEXT_TAIL])
 
1685			note_gp_changes(rsp, rdp);
1686
1687		if (cpu_has_callbacks_ready_to_invoke(rdp))
1688			cbs_ready = true;
1689	}
1690	return cbs_ready;
1691}
1692
1693/*
1694 * Allow the CPU to enter dyntick-idle mode unless it has callbacks ready
1695 * to invoke.  If the CPU has callbacks, try to advance them.  Tell the
1696 * caller to set the timeout based on whether or not there are non-lazy
1697 * callbacks.
1698 *
1699 * The caller must have disabled interrupts.
1700 */
1701#ifndef CONFIG_RCU_NOCB_CPU_ALL
1702int rcu_needs_cpu(int cpu, unsigned long *dj)
1703{
1704	struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
 
 
 
1705
1706	/* Snapshot to detect later posting of non-lazy callback. */
1707	rdtp->nonlazy_posted_snap = rdtp->nonlazy_posted;
1708
1709	/* If no callbacks, RCU doesn't need the CPU. */
1710	if (!rcu_cpu_has_callbacks(cpu, &rdtp->all_lazy)) {
1711		*dj = ULONG_MAX;
1712		return 0;
1713	}
1714
1715	/* Attempt to advance callbacks. */
1716	if (rcu_try_advance_all_cbs()) {
1717		/* Some ready to invoke, so initiate later invocation. */
1718		invoke_rcu_core();
1719		return 1;
1720	}
1721	rdtp->last_accelerate = jiffies;
1722
1723	/* Request timer delay depending on laziness, and round. */
1724	if (!rdtp->all_lazy) {
1725		*dj = round_up(rcu_idle_gp_delay + jiffies,
1726			       rcu_idle_gp_delay) - jiffies;
1727	} else {
1728		*dj = round_jiffies(rcu_idle_lazy_gp_delay + jiffies) - jiffies;
1729	}
 
1730	return 0;
1731}
1732#endif /* #ifndef CONFIG_RCU_NOCB_CPU_ALL */
1733
1734/*
1735 * Prepare a CPU for idle from an RCU perspective.  The first major task
1736 * is to sense whether nohz mode has been enabled or disabled via sysfs.
1737 * The second major task is to check to see if a non-lazy callback has
1738 * arrived at a CPU that previously had only lazy callbacks.  The third
1739 * major task is to accelerate (that is, assign grace-period numbers to)
1740 * any recently arrived callbacks.
1741 *
1742 * The caller must have disabled interrupts.
1743 */
1744static void rcu_prepare_for_idle(int cpu)
1745{
1746#ifndef CONFIG_RCU_NOCB_CPU_ALL
1747	struct rcu_data *rdp;
1748	struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
1749	struct rcu_node *rnp;
1750	struct rcu_state *rsp;
1751	int tne;
1752
 
 
 
 
1753	/* Handle nohz enablement switches conservatively. */
1754	tne = ACCESS_ONCE(tick_nohz_active);
1755	if (tne != rdtp->tick_nohz_enabled_snap) {
1756		if (rcu_cpu_has_callbacks(cpu, NULL))
1757			invoke_rcu_core(); /* force nohz to see update. */
1758		rdtp->tick_nohz_enabled_snap = tne;
1759		return;
1760	}
1761	if (!tne)
1762		return;
1763
1764	/* If this is a no-CBs CPU, no callbacks, just return. */
1765	if (rcu_is_nocb_cpu(cpu))
1766		return;
1767
1768	/*
1769	 * If a non-lazy callback arrived at a CPU having only lazy
1770	 * callbacks, invoke RCU core for the side-effect of recalculating
1771	 * idle duration on re-entry to idle.
1772	 */
1773	if (rdtp->all_lazy &&
1774	    rdtp->nonlazy_posted != rdtp->nonlazy_posted_snap) {
1775		rdtp->all_lazy = false;
1776		rdtp->nonlazy_posted_snap = rdtp->nonlazy_posted;
1777		invoke_rcu_core();
1778		return;
1779	}
1780
1781	/*
1782	 * If we have not yet accelerated this jiffy, accelerate all
1783	 * callbacks on this CPU.
1784	 */
1785	if (rdtp->last_accelerate == jiffies)
1786		return;
1787	rdtp->last_accelerate = jiffies;
1788	for_each_rcu_flavor(rsp) {
1789		rdp = per_cpu_ptr(rsp->rda, cpu);
1790		if (!*rdp->nxttail[RCU_DONE_TAIL])
1791			continue;
1792		rnp = rdp->mynode;
1793		raw_spin_lock(&rnp->lock); /* irqs already disabled. */
1794		smp_mb__after_unlock_lock();
1795		rcu_accelerate_cbs(rsp, rnp, rdp);
1796		raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
 
1797	}
1798#endif /* #ifndef CONFIG_RCU_NOCB_CPU_ALL */
1799}
1800
1801/*
1802 * Clean up for exit from idle.  Attempt to advance callbacks based on
1803 * any grace periods that elapsed while the CPU was idle, and if any
1804 * callbacks are now ready to invoke, initiate invocation.
1805 */
1806static void rcu_cleanup_after_idle(int cpu)
1807{
1808#ifndef CONFIG_RCU_NOCB_CPU_ALL
1809	if (rcu_is_nocb_cpu(cpu))
1810		return;
1811	if (rcu_try_advance_all_cbs())
1812		invoke_rcu_core();
1813#endif /* #ifndef CONFIG_RCU_NOCB_CPU_ALL */
1814}
1815
1816/*
1817 * Keep a running count of the number of non-lazy callbacks posted
1818 * on this CPU.  This running counter (which is never decremented) allows
1819 * rcu_prepare_for_idle() to detect when something out of the idle loop
1820 * posts a callback, even if an equal number of callbacks are invoked.
1821 * Of course, callbacks should only be posted from within a trace event
1822 * designed to be called from idle or from within RCU_NONIDLE().
1823 */
1824static void rcu_idle_count_callbacks_posted(void)
1825{
1826	__this_cpu_add(rcu_dynticks.nonlazy_posted, 1);
1827}
1828
1829/*
1830 * Data for flushing lazy RCU callbacks at OOM time.
1831 */
1832static atomic_t oom_callback_count;
1833static DECLARE_WAIT_QUEUE_HEAD(oom_callback_wq);
1834
1835/*
1836 * RCU OOM callback -- decrement the outstanding count and deliver the
1837 * wake-up if we are the last one.
1838 */
1839static void rcu_oom_callback(struct rcu_head *rhp)
1840{
1841	if (atomic_dec_and_test(&oom_callback_count))
1842		wake_up(&oom_callback_wq);
1843}
1844
1845/*
1846 * Post an rcu_oom_notify callback on the current CPU if it has at
1847 * least one lazy callback.  This will unnecessarily post callbacks
1848 * to CPUs that already have a non-lazy callback at the end of their
1849 * callback list, but this is an infrequent operation, so accept some
1850 * extra overhead to keep things simple.
1851 */
1852static void rcu_oom_notify_cpu(void *unused)
1853{
1854	struct rcu_state *rsp;
1855	struct rcu_data *rdp;
1856
1857	for_each_rcu_flavor(rsp) {
1858		rdp = __this_cpu_ptr(rsp->rda);
1859		if (rdp->qlen_lazy != 0) {
1860			atomic_inc(&oom_callback_count);
1861			rsp->call(&rdp->oom_head, rcu_oom_callback);
1862		}
1863	}
1864}
1865
1866/*
1867 * If low on memory, ensure that each CPU has a non-lazy callback.
1868 * This will wake up CPUs that have only lazy callbacks, in turn
1869 * ensuring that they free up the corresponding memory in a timely manner.
1870 * Because an uncertain amount of memory will be freed in some uncertain
1871 * timeframe, we do not claim to have freed anything.
1872 */
1873static int rcu_oom_notify(struct notifier_block *self,
1874			  unsigned long notused, void *nfreed)
1875{
1876	int cpu;
1877
1878	/* Wait for callbacks from earlier instance to complete. */
1879	wait_event(oom_callback_wq, atomic_read(&oom_callback_count) == 0);
1880	smp_mb(); /* Ensure callback reuse happens after callback invocation. */
1881
1882	/*
1883	 * Prevent premature wakeup: ensure that all increments happen
1884	 * before there is a chance of the counter reaching zero.
1885	 */
1886	atomic_set(&oom_callback_count, 1);
1887
1888	get_online_cpus();
1889	for_each_online_cpu(cpu) {
1890		smp_call_function_single(cpu, rcu_oom_notify_cpu, NULL, 1);
1891		cond_resched();
1892	}
1893	put_online_cpus();
1894
1895	/* Unconditionally decrement: no need to wake ourselves up. */
1896	atomic_dec(&oom_callback_count);
1897
1898	return NOTIFY_OK;
1899}
1900
1901static struct notifier_block rcu_oom_nb = {
1902	.notifier_call = rcu_oom_notify
1903};
1904
1905static int __init rcu_register_oom_notifier(void)
1906{
1907	register_oom_notifier(&rcu_oom_nb);
1908	return 0;
1909}
1910early_initcall(rcu_register_oom_notifier);
1911
1912#endif /* #else #if !defined(CONFIG_RCU_FAST_NO_HZ) */
1913
1914#ifdef CONFIG_RCU_CPU_STALL_INFO
1915
1916#ifdef CONFIG_RCU_FAST_NO_HZ
1917
1918static void print_cpu_stall_fast_no_hz(char *cp, int cpu)
1919{
1920	struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
1921	unsigned long nlpd = rdtp->nonlazy_posted - rdtp->nonlazy_posted_snap;
1922
1923	sprintf(cp, "last_accelerate: %04lx/%04lx, nonlazy_posted: %ld, %c%c",
1924		rdtp->last_accelerate & 0xffff, jiffies & 0xffff,
1925		ulong2long(nlpd),
1926		rdtp->all_lazy ? 'L' : '.',
1927		rdtp->tick_nohz_enabled_snap ? '.' : 'D');
1928}
1929
1930#else /* #ifdef CONFIG_RCU_FAST_NO_HZ */
1931
1932static void print_cpu_stall_fast_no_hz(char *cp, int cpu)
1933{
1934	*cp = '\0';
1935}
1936
1937#endif /* #else #ifdef CONFIG_RCU_FAST_NO_HZ */
1938
1939/* Initiate the stall-info list. */
1940static void print_cpu_stall_info_begin(void)
1941{
1942	pr_cont("\n");
1943}
1944
1945/*
1946 * Print out diagnostic information for the specified stalled CPU.
1947 *
1948 * If the specified CPU is aware of the current RCU grace period
1949 * (flavor specified by rsp), then print the number of scheduling
1950 * clock interrupts the CPU has taken during the time that it has
1951 * been aware.  Otherwise, print the number of RCU grace periods
1952 * that this CPU is ignorant of, for example, "1" if the CPU was
1953 * aware of the previous grace period.
1954 *
1955 * Also print out idle and (if CONFIG_RCU_FAST_NO_HZ) idle-entry info.
1956 */
1957static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
1958{
 
1959	char fast_no_hz[72];
1960	struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
1961	struct rcu_dynticks *rdtp = rdp->dynticks;
1962	char *ticks_title;
1963	unsigned long ticks_value;
1964
 
 
 
 
 
 
1965	if (rsp->gpnum == rdp->gpnum) {
1966		ticks_title = "ticks this GP";
1967		ticks_value = rdp->ticks_this_gp;
1968	} else {
1969		ticks_title = "GPs behind";
1970		ticks_value = rsp->gpnum - rdp->gpnum;
1971	}
1972	print_cpu_stall_fast_no_hz(fast_no_hz, cpu);
1973	pr_err("\t%d: (%lu %s) idle=%03x/%llx/%d softirq=%u/%u %s\n",
1974	       cpu, ticks_value, ticks_title,
1975	       atomic_read(&rdtp->dynticks) & 0xfff,
 
 
 
 
 
 
 
 
1976	       rdtp->dynticks_nesting, rdtp->dynticks_nmi_nesting,
1977	       rdp->softirq_snap, kstat_softirqs_cpu(RCU_SOFTIRQ, cpu),
 
1978	       fast_no_hz);
1979}
1980
1981/* Terminate the stall-info list. */
1982static void print_cpu_stall_info_end(void)
1983{
1984	pr_err("\t");
1985}
1986
1987/* Zero ->ticks_this_gp for all flavors of RCU. */
1988static void zero_cpu_stall_ticks(struct rcu_data *rdp)
1989{
1990	rdp->ticks_this_gp = 0;
1991	rdp->softirq_snap = kstat_softirqs_cpu(RCU_SOFTIRQ, smp_processor_id());
1992}
1993
1994/* Increment ->ticks_this_gp for all flavors of RCU. */
1995static void increment_cpu_stall_ticks(void)
1996{
1997	struct rcu_state *rsp;
1998
1999	for_each_rcu_flavor(rsp)
2000		__this_cpu_ptr(rsp->rda)->ticks_this_gp++;
2001}
2002
2003#else /* #ifdef CONFIG_RCU_CPU_STALL_INFO */
2004
2005static void print_cpu_stall_info_begin(void)
2006{
2007	pr_cont(" {");
2008}
2009
2010static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
2011{
2012	pr_cont(" %d", cpu);
2013}
2014
2015static void print_cpu_stall_info_end(void)
2016{
2017	pr_cont("} ");
2018}
2019
2020static void zero_cpu_stall_ticks(struct rcu_data *rdp)
2021{
2022}
2023
2024static void increment_cpu_stall_ticks(void)
2025{
2026}
2027
2028#endif /* #else #ifdef CONFIG_RCU_CPU_STALL_INFO */
2029
2030#ifdef CONFIG_RCU_NOCB_CPU
2031
2032/*
2033 * Offload callback processing from the boot-time-specified set of CPUs
2034 * specified by rcu_nocb_mask.  For each CPU in the set, there is a
2035 * kthread created that pulls the callbacks from the corresponding CPU,
2036 * waits for a grace period to elapse, and invokes the callbacks.
2037 * The no-CBs CPUs do a wake_up() on their kthread when they insert
2038 * a callback into any empty list, unless the rcu_nocb_poll boot parameter
2039 * has been specified, in which case each kthread actively polls its
2040 * CPU.  (Which isn't so great for energy efficiency, but which does
2041 * reduce RCU's overhead on that CPU.)
2042 *
2043 * This is intended to be used in conjunction with Frederic Weisbecker's
2044 * adaptive-idle work, which would seriously reduce OS jitter on CPUs
2045 * running CPU-bound user-mode computations.
2046 *
2047 * Offloading of callback processing could also in theory be used as
2048 * an energy-efficiency measure because CPUs with no RCU callbacks
2049 * queued are more aggressive about entering dyntick-idle mode.
2050 */
2051
2052
2053/* Parse the boot-time rcu_nocb_mask CPU list from the kernel parameters. */
2054static int __init rcu_nocb_setup(char *str)
2055{
2056	alloc_bootmem_cpumask_var(&rcu_nocb_mask);
2057	have_rcu_nocb_mask = true;
2058	cpulist_parse(str, rcu_nocb_mask);
2059	return 1;
2060}
2061__setup("rcu_nocbs=", rcu_nocb_setup);
2062
2063static int __init parse_rcu_nocb_poll(char *arg)
2064{
2065	rcu_nocb_poll = 1;
2066	return 0;
2067}
2068early_param("rcu_nocb_poll", parse_rcu_nocb_poll);
2069
2070/*
2071 * Do any no-CBs CPUs need another grace period?
2072 *
2073 * Interrupts must be disabled.  If the caller does not hold the root
2074 * rnp_node structure's ->lock, the results are advisory only.
2075 */
2076static int rcu_nocb_needs_gp(struct rcu_state *rsp)
2077{
2078	struct rcu_node *rnp = rcu_get_root(rsp);
2079
2080	return rnp->need_future_gp[(ACCESS_ONCE(rnp->completed) + 1) & 0x1];
2081}
2082
2083/*
2084 * Wake up any no-CBs CPUs' kthreads that were waiting on the just-ended
2085 * grace period.
2086 */
2087static void rcu_nocb_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp)
2088{
2089	wake_up_all(&rnp->nocb_gp_wq[rnp->completed & 0x1]);
2090}
2091
2092/*
2093 * Set the root rcu_node structure's ->need_future_gp field
2094 * based on the sum of those of all rcu_node structures.  This does
2095 * double-count the root rcu_node structure's requests, but this
2096 * is necessary to handle the possibility of a rcu_nocb_kthread()
2097 * having awakened during the time that the rcu_node structures
2098 * were being updated for the end of the previous grace period.
2099 */
2100static void rcu_nocb_gp_set(struct rcu_node *rnp, int nrq)
2101{
2102	rnp->need_future_gp[(rnp->completed + 1) & 0x1] += nrq;
2103}
2104
 
 
 
 
 
2105static void rcu_init_one_nocb(struct rcu_node *rnp)
2106{
2107	init_waitqueue_head(&rnp->nocb_gp_wq[0]);
2108	init_waitqueue_head(&rnp->nocb_gp_wq[1]);
2109}
2110
2111#ifndef CONFIG_RCU_NOCB_CPU_ALL
2112/* Is the specified CPU a no-CPUs CPU? */
2113bool rcu_is_nocb_cpu(int cpu)
2114{
2115	if (have_rcu_nocb_mask)
2116		return cpumask_test_cpu(cpu, rcu_nocb_mask);
2117	return false;
2118}
2119#endif /* #ifndef CONFIG_RCU_NOCB_CPU_ALL */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2120
2121/*
2122 * Enqueue the specified string of rcu_head structures onto the specified
2123 * CPU's no-CBs lists.  The CPU is specified by rdp, the head of the
2124 * string by rhp, and the tail of the string by rhtp.  The non-lazy/lazy
2125 * counts are supplied by rhcount and rhcount_lazy.
2126 *
2127 * If warranted, also wake up the kthread servicing this CPUs queues.
2128 */
2129static void __call_rcu_nocb_enqueue(struct rcu_data *rdp,
2130				    struct rcu_head *rhp,
2131				    struct rcu_head **rhtp,
2132				    int rhcount, int rhcount_lazy,
2133				    unsigned long flags)
2134{
2135	int len;
2136	struct rcu_head **old_rhpp;
2137	struct task_struct *t;
2138
2139	/* Enqueue the callback on the nocb list and update counts. */
2140	old_rhpp = xchg(&rdp->nocb_tail, rhtp);
2141	ACCESS_ONCE(*old_rhpp) = rhp;
2142	atomic_long_add(rhcount, &rdp->nocb_q_count);
 
 
 
2143	atomic_long_add(rhcount_lazy, &rdp->nocb_q_count_lazy);
 
2144
2145	/* If we are not being polled and there is a kthread, awaken it ... */
2146	t = ACCESS_ONCE(rdp->nocb_kthread);
2147	if (rcu_nocb_poll || !t) {
2148		trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
2149				    TPS("WakeNotPoll"));
2150		return;
2151	}
2152	len = atomic_long_read(&rdp->nocb_q_count);
2153	if (old_rhpp == &rdp->nocb_head) {
2154		if (!irqs_disabled_flags(flags)) {
2155			wake_up(&rdp->nocb_wq); /* ... if queue was empty ... */
 
2156			trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
2157					    TPS("WakeEmpty"));
2158		} else {
2159			rdp->nocb_defer_wakeup = true;
2160			trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
2161					    TPS("WakeEmptyIsDeferred"));
2162		}
2163		rdp->qlen_last_fqs_check = 0;
2164	} else if (len > rdp->qlen_last_fqs_check + qhimark) {
2165		wake_up_process(t); /* ... or if many callbacks queued. */
 
 
 
 
 
 
 
 
2166		rdp->qlen_last_fqs_check = LONG_MAX / 2;
2167		trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("WakeOvf"));
2168	} else {
2169		trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("WakeNot"));
2170	}
2171	return;
2172}
2173
2174/*
2175 * This is a helper for __call_rcu(), which invokes this when the normal
2176 * callback queue is inoperable.  If this is not a no-CBs CPU, this
2177 * function returns failure back to __call_rcu(), which can complain
2178 * appropriately.
2179 *
2180 * Otherwise, this function queues the callback where the corresponding
2181 * "rcuo" kthread can find it.
2182 */
2183static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp,
2184			    bool lazy, unsigned long flags)
2185{
2186
2187	if (!rcu_is_nocb_cpu(rdp->cpu))
2188		return 0;
2189	__call_rcu_nocb_enqueue(rdp, rhp, &rhp->next, 1, lazy, flags);
2190	if (__is_kfree_rcu_offset((unsigned long)rhp->func))
2191		trace_rcu_kfree_callback(rdp->rsp->name, rhp,
2192					 (unsigned long)rhp->func,
2193					 -atomic_long_read(&rdp->nocb_q_count_lazy),
2194					 -atomic_long_read(&rdp->nocb_q_count));
2195	else
2196		trace_rcu_callback(rdp->rsp->name, rhp,
2197				   -atomic_long_read(&rdp->nocb_q_count_lazy),
2198				   -atomic_long_read(&rdp->nocb_q_count));
2199	return 1;
 
 
 
 
 
 
 
 
 
 
 
2200}
2201
2202/*
2203 * Adopt orphaned callbacks on a no-CBs CPU, or return 0 if this is
2204 * not a no-CBs CPU.
2205 */
2206static bool __maybe_unused rcu_nocb_adopt_orphan_cbs(struct rcu_state *rsp,
2207						     struct rcu_data *rdp,
2208						     unsigned long flags)
2209{
2210	long ql = rsp->qlen;
2211	long qll = rsp->qlen_lazy;
2212
2213	/* If this is not a no-CBs CPU, tell the caller to do it the old way. */
2214	if (!rcu_is_nocb_cpu(smp_processor_id()))
2215		return 0;
2216	rsp->qlen = 0;
2217	rsp->qlen_lazy = 0;
2218
2219	/* First, enqueue the donelist, if any.  This preserves CB ordering. */
2220	if (rsp->orphan_donelist != NULL) {
2221		__call_rcu_nocb_enqueue(rdp, rsp->orphan_donelist,
2222					rsp->orphan_donetail, ql, qll, flags);
2223		ql = qll = 0;
2224		rsp->orphan_donelist = NULL;
2225		rsp->orphan_donetail = &rsp->orphan_donelist;
2226	}
2227	if (rsp->orphan_nxtlist != NULL) {
2228		__call_rcu_nocb_enqueue(rdp, rsp->orphan_nxtlist,
2229					rsp->orphan_nxttail, ql, qll, flags);
2230		ql = qll = 0;
2231		rsp->orphan_nxtlist = NULL;
2232		rsp->orphan_nxttail = &rsp->orphan_nxtlist;
2233	}
2234	return 1;
2235}
2236
2237/*
2238 * If necessary, kick off a new grace period, and either way wait
2239 * for a subsequent grace period to complete.
2240 */
2241static void rcu_nocb_wait_gp(struct rcu_data *rdp)
2242{
2243	unsigned long c;
2244	bool d;
2245	unsigned long flags;
 
2246	struct rcu_node *rnp = rdp->mynode;
2247
2248	raw_spin_lock_irqsave(&rnp->lock, flags);
2249	smp_mb__after_unlock_lock();
2250	c = rcu_start_future_gp(rnp, rdp);
2251	raw_spin_unlock_irqrestore(&rnp->lock, flags);
 
2252
2253	/*
2254	 * Wait for the grace period.  Do so interruptibly to avoid messing
2255	 * up the load average.
2256	 */
2257	trace_rcu_future_gp(rnp, rdp, c, TPS("StartWait"));
2258	for (;;) {
2259		wait_event_interruptible(
2260			rnp->nocb_gp_wq[c & 0x1],
2261			(d = ULONG_CMP_GE(ACCESS_ONCE(rnp->completed), c)));
2262		if (likely(d))
2263			break;
2264		flush_signals(current);
2265		trace_rcu_future_gp(rnp, rdp, c, TPS("ResumeWait"));
2266	}
2267	trace_rcu_future_gp(rnp, rdp, c, TPS("EndWait"));
2268	smp_mb(); /* Ensure that CB invocation happens after GP end. */
2269}
2270
2271/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2272 * Per-rcu_data kthread, but only for no-CBs CPUs.  Each kthread invokes
2273 * callbacks queued by the corresponding no-CBs CPU.
 
 
2274 */
2275static int rcu_nocb_kthread(void *arg)
2276{
2277	int c, cl;
2278	bool firsttime = 1;
2279	struct rcu_head *list;
2280	struct rcu_head *next;
2281	struct rcu_head **tail;
2282	struct rcu_data *rdp = arg;
2283
2284	/* Each pass through this loop invokes one batch of callbacks */
2285	for (;;) {
2286		/* If not polling, wait for next batch of callbacks. */
2287		if (!rcu_nocb_poll) {
2288			trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
2289					    TPS("Sleep"));
2290			wait_event_interruptible(rdp->nocb_wq, rdp->nocb_head);
2291			/* Memory barrier provide by xchg() below. */
2292		} else if (firsttime) {
2293			firsttime = 0;
2294			trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
2295					    TPS("Poll"));
2296		}
2297		list = ACCESS_ONCE(rdp->nocb_head);
2298		if (!list) {
2299			if (!rcu_nocb_poll)
2300				trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
2301						    TPS("WokeEmpty"));
2302			schedule_timeout_interruptible(1);
2303			flush_signals(current);
2304			continue;
2305		}
2306		firsttime = 1;
2307		trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
2308				    TPS("WokeNonEmpty"));
2309
2310		/*
2311		 * Extract queued callbacks, update counts, and wait
2312		 * for a grace period to elapse.
2313		 */
2314		ACCESS_ONCE(rdp->nocb_head) = NULL;
2315		tail = xchg(&rdp->nocb_tail, &rdp->nocb_head);
2316		c = atomic_long_xchg(&rdp->nocb_q_count, 0);
2317		cl = atomic_long_xchg(&rdp->nocb_q_count_lazy, 0);
2318		ACCESS_ONCE(rdp->nocb_p_count) += c;
2319		ACCESS_ONCE(rdp->nocb_p_count_lazy) += cl;
2320		rcu_nocb_wait_gp(rdp);
2321
2322		/* Each pass through the following loop invokes a callback. */
2323		trace_rcu_batch_start(rdp->rsp->name, cl, c, -1);
 
 
2324		c = cl = 0;
2325		while (list) {
2326			next = list->next;
2327			/* Wait for enqueuing to complete, if needed. */
2328			while (next == NULL && &list->next != tail) {
2329				trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
2330						    TPS("WaitQueue"));
2331				schedule_timeout_interruptible(1);
2332				trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
2333						    TPS("WokeQueue"));
2334				next = list->next;
2335			}
2336			debug_rcu_head_unqueue(list);
2337			local_bh_disable();
2338			if (__rcu_reclaim(rdp->rsp->name, list))
2339				cl++;
2340			c++;
2341			local_bh_enable();
 
2342			list = next;
2343		}
2344		trace_rcu_batch_end(rdp->rsp->name, c, !!list, 0, 0, 1);
2345		ACCESS_ONCE(rdp->nocb_p_count) -= c;
2346		ACCESS_ONCE(rdp->nocb_p_count_lazy) -= cl;
2347		rdp->n_nocbs_invoked += c;
2348	}
2349	return 0;
2350}
2351
2352/* Is a deferred wakeup of rcu_nocb_kthread() required? */
2353static bool rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp)
2354{
2355	return ACCESS_ONCE(rdp->nocb_defer_wakeup);
2356}
2357
2358/* Do a deferred wakeup of rcu_nocb_kthread(). */
2359static void do_nocb_deferred_wakeup(struct rcu_data *rdp)
2360{
2361	if (!rcu_nocb_need_deferred_wakeup(rdp))
2362		return;
2363	ACCESS_ONCE(rdp->nocb_defer_wakeup) = false;
2364	wake_up(&rdp->nocb_wq);
2365	trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("DeferredWakeEmpty"));
2366}
2367
2368/* Initialize per-rcu_data variables for no-CBs CPUs. */
2369static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp)
2370{
2371	rdp->nocb_tail = &rdp->nocb_head;
2372	init_waitqueue_head(&rdp->nocb_wq);
2373}
2374
2375/* Create a kthread for each RCU flavor for each no-CBs CPU. */
2376static void __init rcu_spawn_nocb_kthreads(struct rcu_state *rsp)
2377{
2378	int cpu;
2379	struct rcu_data *rdp;
2380	struct task_struct *t;
2381
2382	if (rcu_nocb_mask == NULL)
 
 
2383		return;
2384	for_each_cpu(cpu, rcu_nocb_mask) {
2385		rdp = per_cpu_ptr(rsp->rda, cpu);
2386		t = kthread_run(rcu_nocb_kthread, rdp,
2387				"rcuo%c/%d", rsp->abbr, cpu);
2388		BUG_ON(IS_ERR(t));
2389		ACCESS_ONCE(rdp->nocb_kthread) = t;
2390	}
 
 
 
 
2391}
2392
2393/* Prevent __call_rcu() from enqueuing callbacks on no-CBs CPUs */
2394static bool init_nocb_callback_list(struct rcu_data *rdp)
2395{
2396	if (rcu_nocb_mask == NULL ||
2397	    !cpumask_test_cpu(rdp->cpu, rcu_nocb_mask))
2398		return false;
2399	rdp->nxttail[RCU_NEXT_TAIL] = NULL;
2400	return true;
2401}
2402
2403#else /* #ifdef CONFIG_RCU_NOCB_CPU */
2404
2405static int rcu_nocb_needs_gp(struct rcu_state *rsp)
2406{
2407	return 0;
2408}
2409
2410static void rcu_nocb_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp)
2411{
2412}
2413
2414static void rcu_nocb_gp_set(struct rcu_node *rnp, int nrq)
2415{
2416}
2417
2418static void rcu_init_one_nocb(struct rcu_node *rnp)
2419{
2420}
2421
2422static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp,
2423			    bool lazy, unsigned long flags)
2424{
2425	return 0;
2426}
2427
2428static bool __maybe_unused rcu_nocb_adopt_orphan_cbs(struct rcu_state *rsp,
2429						     struct rcu_data *rdp,
2430						     unsigned long flags)
2431{
2432	return 0;
2433}
2434
2435static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp)
2436{
2437}
2438
2439static bool rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp)
2440{
2441	return false;
2442}
2443
2444static void do_nocb_deferred_wakeup(struct rcu_data *rdp)
2445{
2446}
2447
2448static void __init rcu_spawn_nocb_kthreads(struct rcu_state *rsp)
2449{
2450}
2451
2452static bool init_nocb_callback_list(struct rcu_data *rdp)
2453{
2454	return false;
2455}
2456
2457#endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */
2458
2459/*
2460 * An adaptive-ticks CPU can potentially execute in kernel mode for an
2461 * arbitrarily long period of time with the scheduling-clock tick turned
2462 * off.  RCU will be paying attention to this CPU because it is in the
2463 * kernel, but the CPU cannot be guaranteed to be executing the RCU state
2464 * machine because the scheduling-clock tick has been disabled.  Therefore,
2465 * if an adaptive-ticks CPU is failing to respond to the current grace
2466 * period and has not be idle from an RCU perspective, kick it.
2467 */
2468static void rcu_kick_nohz_cpu(int cpu)
2469{
2470#ifdef CONFIG_NO_HZ_FULL
2471	if (tick_nohz_full_cpu(cpu))
2472		smp_send_reschedule(cpu);
2473#endif /* #ifdef CONFIG_NO_HZ_FULL */
2474}
2475
 
 
 
 
 
2476
2477#ifdef CONFIG_NO_HZ_FULL_SYSIDLE
2478
2479/*
2480 * Define RCU flavor that holds sysidle state.  This needs to be the
2481 * most active flavor of RCU.
2482 */
2483#ifdef CONFIG_PREEMPT_RCU
2484static struct rcu_state *rcu_sysidle_state = &rcu_preempt_state;
2485#else /* #ifdef CONFIG_PREEMPT_RCU */
2486static struct rcu_state *rcu_sysidle_state = &rcu_sched_state;
2487#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
2488
2489static int full_sysidle_state;		/* Current system-idle state. */
2490#define RCU_SYSIDLE_NOT		0	/* Some CPU is not idle. */
2491#define RCU_SYSIDLE_SHORT	1	/* All CPUs idle for brief period. */
2492#define RCU_SYSIDLE_LONG	2	/* All CPUs idle for long enough. */
2493#define RCU_SYSIDLE_FULL	3	/* All CPUs idle, ready for sysidle. */
2494#define RCU_SYSIDLE_FULL_NOTED	4	/* Actually entered sysidle state. */
2495
2496/*
2497 * Invoked to note exit from irq or task transition to idle.  Note that
2498 * usermode execution does -not- count as idle here!  After all, we want
2499 * to detect full-system idle states, not RCU quiescent states and grace
2500 * periods.  The caller must have disabled interrupts.
2501 */
2502static void rcu_sysidle_enter(struct rcu_dynticks *rdtp, int irq)
2503{
2504	unsigned long j;
2505
2506	/* Adjust nesting, check for fully idle. */
2507	if (irq) {
2508		rdtp->dynticks_idle_nesting--;
2509		WARN_ON_ONCE(rdtp->dynticks_idle_nesting < 0);
2510		if (rdtp->dynticks_idle_nesting != 0)
2511			return;  /* Still not fully idle. */
2512	} else {
2513		if ((rdtp->dynticks_idle_nesting & DYNTICK_TASK_NEST_MASK) ==
2514		    DYNTICK_TASK_NEST_VALUE) {
2515			rdtp->dynticks_idle_nesting = 0;
2516		} else {
2517			rdtp->dynticks_idle_nesting -= DYNTICK_TASK_NEST_VALUE;
2518			WARN_ON_ONCE(rdtp->dynticks_idle_nesting < 0);
2519			return;  /* Still not fully idle. */
2520		}
2521	}
 
 
2522
2523	/* Record start of fully idle period. */
2524	j = jiffies;
2525	ACCESS_ONCE(rdtp->dynticks_idle_jiffies) = j;
2526	smp_mb__before_atomic_inc();
2527	atomic_inc(&rdtp->dynticks_idle);
2528	smp_mb__after_atomic_inc();
2529	WARN_ON_ONCE(atomic_read(&rdtp->dynticks_idle) & 0x1);
2530}
2531
2532/*
2533 * Unconditionally force exit from full system-idle state.  This is
2534 * invoked when a normal CPU exits idle, but must be called separately
2535 * for the timekeeping CPU (tick_do_timer_cpu).  The reason for this
2536 * is that the timekeeping CPU is permitted to take scheduling-clock
2537 * interrupts while the system is in system-idle state, and of course
2538 * rcu_sysidle_exit() has no way of distinguishing a scheduling-clock
2539 * interrupt from any other type of interrupt.
2540 */
2541void rcu_sysidle_force_exit(void)
2542{
2543	int oldstate = ACCESS_ONCE(full_sysidle_state);
2544	int newoldstate;
2545
2546	/*
2547	 * Each pass through the following loop attempts to exit full
2548	 * system-idle state.  If contention proves to be a problem,
2549	 * a trylock-based contention tree could be used here.
2550	 */
2551	while (oldstate > RCU_SYSIDLE_SHORT) {
2552		newoldstate = cmpxchg(&full_sysidle_state,
2553				      oldstate, RCU_SYSIDLE_NOT);
2554		if (oldstate == newoldstate &&
2555		    oldstate == RCU_SYSIDLE_FULL_NOTED) {
2556			rcu_kick_nohz_cpu(tick_do_timer_cpu);
2557			return; /* We cleared it, done! */
2558		}
2559		oldstate = newoldstate;
2560	}
2561	smp_mb(); /* Order initial oldstate fetch vs. later non-idle work. */
2562}
2563
2564/*
2565 * Invoked to note entry to irq or task transition from idle.  Note that
2566 * usermode execution does -not- count as idle here!  The caller must
2567 * have disabled interrupts.
2568 */
2569static void rcu_sysidle_exit(struct rcu_dynticks *rdtp, int irq)
2570{
2571	/* Adjust nesting, check for already non-idle. */
2572	if (irq) {
2573		rdtp->dynticks_idle_nesting++;
2574		WARN_ON_ONCE(rdtp->dynticks_idle_nesting <= 0);
2575		if (rdtp->dynticks_idle_nesting != 1)
2576			return; /* Already non-idle. */
2577	} else {
2578		/*
2579		 * Allow for irq misnesting.  Yes, it really is possible
2580		 * to enter an irq handler then never leave it, and maybe
2581		 * also vice versa.  Handle both possibilities.
2582		 */
2583		if (rdtp->dynticks_idle_nesting & DYNTICK_TASK_NEST_MASK) {
2584			rdtp->dynticks_idle_nesting += DYNTICK_TASK_NEST_VALUE;
2585			WARN_ON_ONCE(rdtp->dynticks_idle_nesting <= 0);
2586			return; /* Already non-idle. */
2587		} else {
2588			rdtp->dynticks_idle_nesting = DYNTICK_TASK_EXIT_IDLE;
2589		}
2590	}
2591
2592	/* Record end of idle period. */
2593	smp_mb__before_atomic_inc();
2594	atomic_inc(&rdtp->dynticks_idle);
2595	smp_mb__after_atomic_inc();
2596	WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks_idle) & 0x1));
2597
2598	/*
2599	 * If we are the timekeeping CPU, we are permitted to be non-idle
2600	 * during a system-idle state.  This must be the case, because
2601	 * the timekeeping CPU has to take scheduling-clock interrupts
2602	 * during the time that the system is transitioning to full
2603	 * system-idle state.  This means that the timekeeping CPU must
2604	 * invoke rcu_sysidle_force_exit() directly if it does anything
2605	 * more than take a scheduling-clock interrupt.
2606	 */
2607	if (smp_processor_id() == tick_do_timer_cpu)
2608		return;
2609
2610	/* Update system-idle state: We are clearly no longer fully idle! */
2611	rcu_sysidle_force_exit();
2612}
2613
2614/*
2615 * Check to see if the current CPU is idle.  Note that usermode execution
2616 * does not count as idle.  The caller must have disabled interrupts.
 
 
2617 */
2618static void rcu_sysidle_check_cpu(struct rcu_data *rdp, bool *isidle,
2619				  unsigned long *maxj)
2620{
2621	int cur;
2622	unsigned long j;
2623	struct rcu_dynticks *rdtp = rdp->dynticks;
 
 
2624
2625	/*
2626	 * If some other CPU has already reported non-idle, if this is
2627	 * not the flavor of RCU that tracks sysidle state, or if this
2628	 * is an offline or the timekeeping CPU, nothing to do.
2629	 */
2630	if (!*isidle || rdp->rsp != rcu_sysidle_state ||
2631	    cpu_is_offline(rdp->cpu) || rdp->cpu == tick_do_timer_cpu)
2632		return;
2633	if (rcu_gp_in_progress(rdp->rsp))
2634		WARN_ON_ONCE(smp_processor_id() != tick_do_timer_cpu);
2635
2636	/* Pick up current idle and NMI-nesting counter and check. */
2637	cur = atomic_read(&rdtp->dynticks_idle);
2638	if (cur & 0x1) {
2639		*isidle = false; /* We are not idle! */
2640		return;
 
 
 
 
 
 
 
 
 
 
 
 
 
2641	}
2642	smp_mb(); /* Read counters before timestamps. */
2643
2644	/* Pick up timestamps. */
2645	j = ACCESS_ONCE(rdtp->dynticks_idle_jiffies);
2646	/* If this CPU entered idle more recently, update maxj timestamp. */
2647	if (ULONG_CMP_LT(*maxj, j))
2648		*maxj = j;
2649}
2650
2651/*
2652 * Is this the flavor of RCU that is handling full-system idle?
 
2653 */
2654static bool is_sysidle_rcu_state(struct rcu_state *rsp)
2655{
2656	return rsp == rcu_sysidle_state;
 
 
 
 
2657}
2658
2659/*
2660 * Bind the grace-period kthread for the sysidle flavor of RCU to the
2661 * timekeeping CPU.
 
 
2662 */
2663static void rcu_bind_gp_kthread(void)
2664{
2665	int cpu = ACCESS_ONCE(tick_do_timer_cpu);
2666
2667	if (cpu < 0 || cpu >= nr_cpu_ids)
2668		return;
2669	if (raw_smp_processor_id() != cpu)
2670		set_cpus_allowed_ptr(current, cpumask_of(cpu));
2671}
2672
2673/*
2674 * Return a delay in jiffies based on the number of CPUs, rcu_node
2675 * leaf fanout, and jiffies tick rate.  The idea is to allow larger
2676 * systems more time to transition to full-idle state in order to
2677 * avoid the cache thrashing that otherwise occur on the state variable.
2678 * Really small systems (less than a couple of tens of CPUs) should
2679 * instead use a single global atomically incremented counter, and later
2680 * versions of this will automatically reconfigure themselves accordingly.
2681 */
2682static unsigned long rcu_sysidle_delay(void)
2683{
2684	if (nr_cpu_ids <= CONFIG_NO_HZ_FULL_SYSIDLE_SMALL)
2685		return 0;
2686	return DIV_ROUND_UP(nr_cpu_ids * HZ, rcu_fanout_leaf * 1000);
2687}
2688
2689/*
2690 * Advance the full-system-idle state.  This is invoked when all of
2691 * the non-timekeeping CPUs are idle.
2692 */
2693static void rcu_sysidle(unsigned long j)
2694{
2695	/* Check the current state. */
2696	switch (ACCESS_ONCE(full_sysidle_state)) {
2697	case RCU_SYSIDLE_NOT:
2698
2699		/* First time all are idle, so note a short idle period. */
2700		ACCESS_ONCE(full_sysidle_state) = RCU_SYSIDLE_SHORT;
2701		break;
2702
2703	case RCU_SYSIDLE_SHORT:
2704
2705		/*
2706		 * Idle for a bit, time to advance to next state?
2707		 * cmpxchg failure means race with non-idle, let them win.
2708		 */
2709		if (ULONG_CMP_GE(jiffies, j + rcu_sysidle_delay()))
2710			(void)cmpxchg(&full_sysidle_state,
2711				      RCU_SYSIDLE_SHORT, RCU_SYSIDLE_LONG);
2712		break;
2713
2714	case RCU_SYSIDLE_LONG:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2715
2716		/*
2717		 * Do an additional check pass before advancing to full.
2718		 * cmpxchg failure means race with non-idle, let them win.
2719		 */
2720		if (ULONG_CMP_GE(jiffies, j + rcu_sysidle_delay()))
2721			(void)cmpxchg(&full_sysidle_state,
2722				      RCU_SYSIDLE_LONG, RCU_SYSIDLE_FULL);
2723		break;
2724
2725	default:
2726		break;
 
 
 
 
 
 
 
2727	}
 
 
2728}
2729
2730/*
2731 * Found a non-idle non-timekeeping CPU, so kick the system-idle state
2732 * back to the beginning.
2733 */
2734static void rcu_sysidle_cancel(void)
2735{
2736	smp_mb();
2737	ACCESS_ONCE(full_sysidle_state) = RCU_SYSIDLE_NOT;
2738}
2739
2740/*
2741 * Update the sysidle state based on the results of a force-quiescent-state
2742 * scan of the CPUs' dyntick-idle state.
2743 */
2744static void rcu_sysidle_report(struct rcu_state *rsp, int isidle,
2745			       unsigned long maxj, bool gpkt)
2746{
2747	if (rsp != rcu_sysidle_state)
2748		return;  /* Wrong flavor, ignore. */
2749	if (gpkt && nr_cpu_ids <= CONFIG_NO_HZ_FULL_SYSIDLE_SMALL)
2750		return;  /* Running state machine from timekeeping CPU. */
2751	if (isidle)
2752		rcu_sysidle(maxj);    /* More idle! */
2753	else
2754		rcu_sysidle_cancel(); /* Idle is over. */
2755}
2756
2757/*
2758 * Wrapper for rcu_sysidle_report() when called from the grace-period
2759 * kthread's context.
2760 */
2761static void rcu_sysidle_report_gp(struct rcu_state *rsp, int isidle,
2762				  unsigned long maxj)
2763{
2764	rcu_sysidle_report(rsp, isidle, maxj, true);
2765}
2766
2767/* Callback and function for forcing an RCU grace period. */
2768struct rcu_sysidle_head {
2769	struct rcu_head rh;
2770	int inuse;
2771};
2772
2773static void rcu_sysidle_cb(struct rcu_head *rhp)
2774{
2775	struct rcu_sysidle_head *rshp;
2776
2777	/*
2778	 * The following memory barrier is needed to replace the
2779	 * memory barriers that would normally be in the memory
2780	 * allocator.
2781	 */
2782	smp_mb();  /* grace period precedes setting inuse. */
2783
2784	rshp = container_of(rhp, struct rcu_sysidle_head, rh);
2785	ACCESS_ONCE(rshp->inuse) = 0;
2786}
2787
2788/*
2789 * Check to see if the system is fully idle, other than the timekeeping CPU.
2790 * The caller must have disabled interrupts.
2791 */
2792bool rcu_sys_is_idle(void)
2793{
2794	static struct rcu_sysidle_head rsh;
2795	int rss = ACCESS_ONCE(full_sysidle_state);
2796
2797	if (WARN_ON_ONCE(smp_processor_id() != tick_do_timer_cpu))
2798		return false;
2799
2800	/* Handle small-system case by doing a full scan of CPUs. */
2801	if (nr_cpu_ids <= CONFIG_NO_HZ_FULL_SYSIDLE_SMALL) {
2802		int oldrss = rss - 1;
2803
2804		/*
2805		 * One pass to advance to each state up to _FULL.
2806		 * Give up if any pass fails to advance the state.
2807		 */
2808		while (rss < RCU_SYSIDLE_FULL && oldrss < rss) {
2809			int cpu;
2810			bool isidle = true;
2811			unsigned long maxj = jiffies - ULONG_MAX / 4;
2812			struct rcu_data *rdp;
2813
2814			/* Scan all the CPUs looking for nonidle CPUs. */
2815			for_each_possible_cpu(cpu) {
2816				rdp = per_cpu_ptr(rcu_sysidle_state->rda, cpu);
2817				rcu_sysidle_check_cpu(rdp, &isidle, &maxj);
2818				if (!isidle)
2819					break;
2820			}
2821			rcu_sysidle_report(rcu_sysidle_state,
2822					   isidle, maxj, false);
2823			oldrss = rss;
2824			rss = ACCESS_ONCE(full_sysidle_state);
2825		}
2826	}
2827
2828	/* If this is the first observation of an idle period, record it. */
2829	if (rss == RCU_SYSIDLE_FULL) {
2830		rss = cmpxchg(&full_sysidle_state,
2831			      RCU_SYSIDLE_FULL, RCU_SYSIDLE_FULL_NOTED);
2832		return rss == RCU_SYSIDLE_FULL;
2833	}
2834
2835	smp_mb(); /* ensure rss load happens before later caller actions. */
2836
2837	/* If already fully idle, tell the caller (in case of races). */
2838	if (rss == RCU_SYSIDLE_FULL_NOTED)
2839		return true;
2840
2841	/*
2842	 * If we aren't there yet, and a grace period is not in flight,
2843	 * initiate a grace period.  Either way, tell the caller that
2844	 * we are not there yet.  We use an xchg() rather than an assignment
2845	 * to make up for the memory barriers that would otherwise be
2846	 * provided by the memory allocator.
2847	 */
2848	if (nr_cpu_ids > CONFIG_NO_HZ_FULL_SYSIDLE_SMALL &&
2849	    !rcu_gp_in_progress(rcu_sysidle_state) &&
2850	    !rsh.inuse && xchg(&rsh.inuse, 1) == 0)
2851		call_rcu(&rsh.rh, rcu_sysidle_cb);
2852	return false;
2853}
2854
2855/*
2856 * Initialize dynticks sysidle state for CPUs coming online.
2857 */
2858static void rcu_sysidle_init_percpu_data(struct rcu_dynticks *rdtp)
2859{
2860	rdtp->dynticks_idle_nesting = DYNTICK_TASK_NEST_VALUE;
2861}
2862
2863#else /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
2864
2865static void rcu_sysidle_enter(struct rcu_dynticks *rdtp, int irq)
2866{
2867}
2868
2869static void rcu_sysidle_exit(struct rcu_dynticks *rdtp, int irq)
2870{
 
2871}
2872
2873static void rcu_sysidle_check_cpu(struct rcu_data *rdp, bool *isidle,
2874				  unsigned long *maxj)
2875{
2876}
2877
2878static bool is_sysidle_rcu_state(struct rcu_state *rsp)
2879{
2880	return false;
2881}
2882
2883static void rcu_bind_gp_kthread(void)
2884{
2885}
2886
2887static void rcu_sysidle_report_gp(struct rcu_state *rsp, int isidle,
2888				  unsigned long maxj)
2889{
 
2890}
2891
2892static void rcu_sysidle_init_percpu_data(struct rcu_dynticks *rdtp)
 
 
 
 
 
 
 
 
 
 
 
2893{
 
 
 
 
2894}
2895
2896#endif /* #else #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
2897
2898/*
2899 * Is this CPU a NO_HZ_FULL CPU that should ignore RCU so that the
2900 * grace-period kthread will do force_quiescent_state() processing?
2901 * The idea is to avoid waking up RCU core processing on such a
2902 * CPU unless the grace period has extended for too long.
2903 *
2904 * This code relies on the fact that all NO_HZ_FULL CPUs are also
2905 * CONFIG_RCU_NOCB_CPU CPUs.
2906 */
2907static bool rcu_nohz_full_cpu(struct rcu_state *rsp)
2908{
2909#ifdef CONFIG_NO_HZ_FULL
2910	if (tick_nohz_full_cpu(smp_processor_id()) &&
2911	    (!rcu_gp_in_progress(rsp) ||
2912	     ULONG_CMP_LT(jiffies, ACCESS_ONCE(rsp->gp_start) + HZ)))
2913		return 1;
2914#endif /* #ifdef CONFIG_NO_HZ_FULL */
2915	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2916}
v4.17
   1/*
   2 * Read-Copy Update mechanism for mutual exclusion (tree-based version)
   3 * Internal non-public definitions that provide either classic
   4 * or preemptible semantics.
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License as published by
   8 * the Free Software Foundation; either version 2 of the License, or
   9 * (at your option) any later version.
  10 *
  11 * This program is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  14 * GNU General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU General Public License
  17 * along with this program; if not, you can access it online at
  18 * http://www.gnu.org/licenses/gpl-2.0.html.
  19 *
  20 * Copyright Red Hat, 2009
  21 * Copyright IBM Corporation, 2009
  22 *
  23 * Author: Ingo Molnar <mingo@elte.hu>
  24 *	   Paul E. McKenney <paulmck@linux.vnet.ibm.com>
  25 */
  26
  27#include <linux/delay.h>
  28#include <linux/gfp.h>
  29#include <linux/oom.h>
  30#include <linux/sched/debug.h>
  31#include <linux/smpboot.h>
  32#include <linux/sched/isolation.h>
  33#include <uapi/linux/sched/types.h>
  34#include "../time/tick-internal.h"
  35
 
 
  36#ifdef CONFIG_RCU_BOOST
  37
  38#include "../locking/rtmutex_common.h"
  39
  40/*
  41 * Control variables for per-CPU and per-rcu_node kthreads.  These
  42 * handle all flavors of RCU.
  43 */
  44static DEFINE_PER_CPU(struct task_struct *, rcu_cpu_kthread_task);
  45DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_status);
  46DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_loops);
  47DEFINE_PER_CPU(char, rcu_cpu_has_work);
  48
  49#else /* #ifdef CONFIG_RCU_BOOST */
  50
  51/*
  52 * Some architectures do not define rt_mutexes, but if !CONFIG_RCU_BOOST,
  53 * all uses are in dead code.  Provide a definition to keep the compiler
  54 * happy, but add WARN_ON_ONCE() to complain if used in the wrong place.
  55 * This probably needs to be excluded from -rt builds.
  56 */
  57#define rt_mutex_owner(a) ({ WARN_ON_ONCE(1); NULL; })
  58#define rt_mutex_futex_unlock(x) WARN_ON_ONCE(1)
  59
  60#endif /* #else #ifdef CONFIG_RCU_BOOST */
  61
  62#ifdef CONFIG_RCU_NOCB_CPU
  63static cpumask_var_t rcu_nocb_mask; /* CPUs to have callbacks offloaded. */
 
  64static bool __read_mostly rcu_nocb_poll;    /* Offload kthread are to poll. */
 
  65#endif /* #ifdef CONFIG_RCU_NOCB_CPU */
  66
  67/*
  68 * Check the RCU kernel configuration parameters and print informative
  69 * messages about anything out of the ordinary.
 
  70 */
  71static void __init rcu_bootup_announce_oddness(void)
  72{
  73	if (IS_ENABLED(CONFIG_RCU_TRACE))
  74		pr_info("\tRCU event tracing is enabled.\n");
  75	if ((IS_ENABLED(CONFIG_64BIT) && RCU_FANOUT != 64) ||
  76	    (!IS_ENABLED(CONFIG_64BIT) && RCU_FANOUT != 32))
  77		pr_info("\tCONFIG_RCU_FANOUT set to non-default value of %d\n",
  78		       RCU_FANOUT);
  79	if (rcu_fanout_exact)
  80		pr_info("\tHierarchical RCU autobalancing is disabled.\n");
  81	if (IS_ENABLED(CONFIG_RCU_FAST_NO_HZ))
  82		pr_info("\tRCU dyntick-idle grace-period acceleration is enabled.\n");
  83	if (IS_ENABLED(CONFIG_PROVE_RCU))
  84		pr_info("\tRCU lockdep checking is enabled.\n");
  85	if (RCU_NUM_LVLS >= 4)
  86		pr_info("\tFour(or more)-level hierarchy is enabled.\n");
  87	if (RCU_FANOUT_LEAF != 16)
  88		pr_info("\tBuild-time adjustment of leaf fanout to %d.\n",
  89			RCU_FANOUT_LEAF);
  90	if (rcu_fanout_leaf != RCU_FANOUT_LEAF)
 
 
 
 
 
 
 
 
 
 
 
  91		pr_info("\tBoot-time adjustment of leaf fanout to %d.\n", rcu_fanout_leaf);
  92	if (nr_cpu_ids != NR_CPUS)
  93		pr_info("\tRCU restricting CPUs from NR_CPUS=%d to nr_cpu_ids=%u.\n", NR_CPUS, nr_cpu_ids);
  94#ifdef CONFIG_RCU_BOOST
  95	pr_info("\tRCU priority boosting: priority %d delay %d ms.\n", kthread_prio, CONFIG_RCU_BOOST_DELAY);
  96#endif
  97	if (blimit != DEFAULT_RCU_BLIMIT)
  98		pr_info("\tBoot-time adjustment of callback invocation limit to %ld.\n", blimit);
  99	if (qhimark != DEFAULT_RCU_QHIMARK)
 100		pr_info("\tBoot-time adjustment of callback high-water mark to %ld.\n", qhimark);
 101	if (qlowmark != DEFAULT_RCU_QLOMARK)
 102		pr_info("\tBoot-time adjustment of callback low-water mark to %ld.\n", qlowmark);
 103	if (jiffies_till_first_fqs != ULONG_MAX)
 104		pr_info("\tBoot-time adjustment of first FQS scan delay to %ld jiffies.\n", jiffies_till_first_fqs);
 105	if (jiffies_till_next_fqs != ULONG_MAX)
 106		pr_info("\tBoot-time adjustment of subsequent FQS scan delay to %ld jiffies.\n", jiffies_till_next_fqs);
 107	if (rcu_kick_kthreads)
 108		pr_info("\tKick kthreads if too-long grace period.\n");
 109	if (IS_ENABLED(CONFIG_DEBUG_OBJECTS_RCU_HEAD))
 110		pr_info("\tRCU callback double-/use-after-free debug enabled.\n");
 111	if (gp_preinit_delay)
 112		pr_info("\tRCU debug GP pre-init slowdown %d jiffies.\n", gp_preinit_delay);
 113	if (gp_init_delay)
 114		pr_info("\tRCU debug GP init slowdown %d jiffies.\n", gp_init_delay);
 115	if (gp_cleanup_delay)
 116		pr_info("\tRCU debug GP init slowdown %d jiffies.\n", gp_cleanup_delay);
 117	if (IS_ENABLED(CONFIG_RCU_EQS_DEBUG))
 118		pr_info("\tRCU debug extended QS entry/exit.\n");
 119	rcupdate_announce_bootup_oddness();
 
 120}
 121
 122#ifdef CONFIG_PREEMPT_RCU
 123
 124RCU_STATE_INITIALIZER(rcu_preempt, 'p', call_rcu);
 125static struct rcu_state *const rcu_state_p = &rcu_preempt_state;
 126static struct rcu_data __percpu *const rcu_data_p = &rcu_preempt_data;
 127
 128static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
 129			       bool wake);
 130
 131/*
 132 * Tell them what RCU they are running.
 133 */
 134static void __init rcu_bootup_announce(void)
 135{
 136	pr_info("Preemptible hierarchical RCU implementation.\n");
 137	rcu_bootup_announce_oddness();
 138}
 139
 140/* Flags for rcu_preempt_ctxt_queue() decision table. */
 141#define RCU_GP_TASKS	0x8
 142#define RCU_EXP_TASKS	0x4
 143#define RCU_GP_BLKD	0x2
 144#define RCU_EXP_BLKD	0x1
 145
 146/*
 147 * Queues a task preempted within an RCU-preempt read-side critical
 148 * section into the appropriate location within the ->blkd_tasks list,
 149 * depending on the states of any ongoing normal and expedited grace
 150 * periods.  The ->gp_tasks pointer indicates which element the normal
 151 * grace period is waiting on (NULL if none), and the ->exp_tasks pointer
 152 * indicates which element the expedited grace period is waiting on (again,
 153 * NULL if none).  If a grace period is waiting on a given element in the
 154 * ->blkd_tasks list, it also waits on all subsequent elements.  Thus,
 155 * adding a task to the tail of the list blocks any grace period that is
 156 * already waiting on one of the elements.  In contrast, adding a task
 157 * to the head of the list won't block any grace period that is already
 158 * waiting on one of the elements.
 159 *
 160 * This queuing is imprecise, and can sometimes make an ongoing grace
 161 * period wait for a task that is not strictly speaking blocking it.
 162 * Given the choice, we needlessly block a normal grace period rather than
 163 * blocking an expedited grace period.
 164 *
 165 * Note that an endless sequence of expedited grace periods still cannot
 166 * indefinitely postpone a normal grace period.  Eventually, all of the
 167 * fixed number of preempted tasks blocking the normal grace period that are
 168 * not also blocking the expedited grace period will resume and complete
 169 * their RCU read-side critical sections.  At that point, the ->gp_tasks
 170 * pointer will equal the ->exp_tasks pointer, at which point the end of
 171 * the corresponding expedited grace period will also be the end of the
 172 * normal grace period.
 173 */
 174static void rcu_preempt_ctxt_queue(struct rcu_node *rnp, struct rcu_data *rdp)
 175	__releases(rnp->lock) /* But leaves rrupts disabled. */
 176{
 177	int blkd_state = (rnp->gp_tasks ? RCU_GP_TASKS : 0) +
 178			 (rnp->exp_tasks ? RCU_EXP_TASKS : 0) +
 179			 (rnp->qsmask & rdp->grpmask ? RCU_GP_BLKD : 0) +
 180			 (rnp->expmask & rdp->grpmask ? RCU_EXP_BLKD : 0);
 181	struct task_struct *t = current;
 182
 183	raw_lockdep_assert_held_rcu_node(rnp);
 184	WARN_ON_ONCE(rdp->mynode != rnp);
 185	WARN_ON_ONCE(rnp->level != rcu_num_lvls - 1);
 
 
 
 
 
 186
 187	/*
 188	 * Decide where to queue the newly blocked task.  In theory,
 189	 * this could be an if-statement.  In practice, when I tried
 190	 * that, it was quite messy.
 191	 */
 192	switch (blkd_state) {
 193	case 0:
 194	case                RCU_EXP_TASKS:
 195	case                RCU_EXP_TASKS + RCU_GP_BLKD:
 196	case RCU_GP_TASKS:
 197	case RCU_GP_TASKS + RCU_EXP_TASKS:
 198
 199		/*
 200		 * Blocking neither GP, or first task blocking the normal
 201		 * GP but not blocking the already-waiting expedited GP.
 202		 * Queue at the head of the list to avoid unnecessarily
 203		 * blocking the already-waiting GPs.
 204		 */
 205		list_add(&t->rcu_node_entry, &rnp->blkd_tasks);
 206		break;
 207
 208	case                                              RCU_EXP_BLKD:
 209	case                                RCU_GP_BLKD:
 210	case                                RCU_GP_BLKD + RCU_EXP_BLKD:
 211	case RCU_GP_TASKS +                               RCU_EXP_BLKD:
 212	case RCU_GP_TASKS +                 RCU_GP_BLKD + RCU_EXP_BLKD:
 213	case RCU_GP_TASKS + RCU_EXP_TASKS + RCU_GP_BLKD + RCU_EXP_BLKD:
 214
 215		/*
 216		 * First task arriving that blocks either GP, or first task
 217		 * arriving that blocks the expedited GP (with the normal
 218		 * GP already waiting), or a task arriving that blocks
 219		 * both GPs with both GPs already waiting.  Queue at the
 220		 * tail of the list to avoid any GP waiting on any of the
 221		 * already queued tasks that are not blocking it.
 222		 */
 223		list_add_tail(&t->rcu_node_entry, &rnp->blkd_tasks);
 224		break;
 225
 226	case                RCU_EXP_TASKS +               RCU_EXP_BLKD:
 227	case                RCU_EXP_TASKS + RCU_GP_BLKD + RCU_EXP_BLKD:
 228	case RCU_GP_TASKS + RCU_EXP_TASKS +               RCU_EXP_BLKD:
 229
 230		/*
 231		 * Second or subsequent task blocking the expedited GP.
 232		 * The task either does not block the normal GP, or is the
 233		 * first task blocking the normal GP.  Queue just after
 234		 * the first task blocking the expedited GP.
 235		 */
 236		list_add(&t->rcu_node_entry, rnp->exp_tasks);
 237		break;
 238
 239	case RCU_GP_TASKS +                 RCU_GP_BLKD:
 240	case RCU_GP_TASKS + RCU_EXP_TASKS + RCU_GP_BLKD:
 241
 242		/*
 243		 * Second or subsequent task blocking the normal GP.
 244		 * The task does not block the expedited GP. Queue just
 245		 * after the first task blocking the normal GP.
 246		 */
 247		list_add(&t->rcu_node_entry, rnp->gp_tasks);
 248		break;
 249
 250	default:
 251
 252		/* Yet another exercise in excessive paranoia. */
 253		WARN_ON_ONCE(1);
 254		break;
 255	}
 256
 257	/*
 258	 * We have now queued the task.  If it was the first one to
 259	 * block either grace period, update the ->gp_tasks and/or
 260	 * ->exp_tasks pointers, respectively, to reference the newly
 261	 * blocked tasks.
 262	 */
 263	if (!rnp->gp_tasks && (blkd_state & RCU_GP_BLKD))
 264		rnp->gp_tasks = &t->rcu_node_entry;
 265	if (!rnp->exp_tasks && (blkd_state & RCU_EXP_BLKD))
 266		rnp->exp_tasks = &t->rcu_node_entry;
 267	WARN_ON_ONCE(!(blkd_state & RCU_GP_BLKD) !=
 268		     !(rnp->qsmask & rdp->grpmask));
 269	WARN_ON_ONCE(!(blkd_state & RCU_EXP_BLKD) !=
 270		     !(rnp->expmask & rdp->grpmask));
 271	raw_spin_unlock_rcu_node(rnp); /* interrupts remain disabled. */
 272
 273	/*
 274	 * Report the quiescent state for the expedited GP.  This expedited
 275	 * GP should not be able to end until we report, so there should be
 276	 * no need to check for a subsequent expedited GP.  (Though we are
 277	 * still in a quiescent state in any case.)
 278	 */
 279	if (blkd_state & RCU_EXP_BLKD &&
 280	    t->rcu_read_unlock_special.b.exp_need_qs) {
 281		t->rcu_read_unlock_special.b.exp_need_qs = false;
 282		rcu_report_exp_rdp(rdp->rsp, rdp, true);
 283	} else {
 284		WARN_ON_ONCE(t->rcu_read_unlock_special.b.exp_need_qs);
 285	}
 286}
 
 287
 288/*
 289 * Record a preemptible-RCU quiescent state for the specified CPU.  Note
 290 * that this just means that the task currently running on the CPU is
 291 * not in a quiescent state.  There might be any number of tasks blocked
 292 * while in an RCU read-side critical section.
 293 *
 294 * As with the other rcu_*_qs() functions, callers to this function
 295 * must disable preemption.
 296 */
 297static void rcu_preempt_qs(void)
 298{
 299	RCU_LOCKDEP_WARN(preemptible(), "rcu_preempt_qs() invoked with preemption enabled!!!\n");
 300	if (__this_cpu_read(rcu_data_p->cpu_no_qs.s)) {
 301		trace_rcu_grace_period(TPS("rcu_preempt"),
 302				       __this_cpu_read(rcu_data_p->gpnum),
 303				       TPS("cpuqs"));
 304		__this_cpu_write(rcu_data_p->cpu_no_qs.b.norm, false);
 305		barrier(); /* Coordinate with rcu_preempt_check_callbacks(). */
 306		current->rcu_read_unlock_special.b.need_qs = false;
 307	}
 308}
 309
 310/*
 311 * We have entered the scheduler, and the current task might soon be
 312 * context-switched away from.  If this task is in an RCU read-side
 313 * critical section, we will no longer be able to rely on the CPU to
 314 * record that fact, so we enqueue the task on the blkd_tasks list.
 315 * The task will dequeue itself when it exits the outermost enclosing
 316 * RCU read-side critical section.  Therefore, the current grace period
 317 * cannot be permitted to complete until the blkd_tasks list entries
 318 * predating the current grace period drain, in other words, until
 319 * rnp->gp_tasks becomes NULL.
 320 *
 321 * Caller must disable interrupts.
 322 */
 323static void rcu_preempt_note_context_switch(bool preempt)
 324{
 325	struct task_struct *t = current;
 
 326	struct rcu_data *rdp;
 327	struct rcu_node *rnp;
 328
 329	lockdep_assert_irqs_disabled();
 330	WARN_ON_ONCE(!preempt && t->rcu_read_lock_nesting > 0);
 331	if (t->rcu_read_lock_nesting > 0 &&
 332	    !t->rcu_read_unlock_special.b.blocked) {
 333
 334		/* Possibly blocking in an RCU read-side critical section. */
 335		rdp = this_cpu_ptr(rcu_state_p->rda);
 336		rnp = rdp->mynode;
 337		raw_spin_lock_rcu_node(rnp);
 338		t->rcu_read_unlock_special.b.blocked = true;
 
 339		t->rcu_blocked_node = rnp;
 340
 341		/*
 342		 * Verify the CPU's sanity, trace the preemption, and
 343		 * then queue the task as required based on the states
 344		 * of any ongoing and expedited grace periods.
 
 
 
 
 
 
 
 
 
 
 
 
 
 345		 */
 346		WARN_ON_ONCE((rdp->grpmask & rcu_rnp_online_cpus(rnp)) == 0);
 347		WARN_ON_ONCE(!list_empty(&t->rcu_node_entry));
 
 
 
 
 
 
 
 
 
 
 
 
 348		trace_rcu_preempt_task(rdp->rsp->name,
 349				       t->pid,
 350				       (rnp->qsmask & rdp->grpmask)
 351				       ? rnp->gpnum
 352				       : rnp->gpnum + 1);
 353		rcu_preempt_ctxt_queue(rnp, rdp);
 354	} else if (t->rcu_read_lock_nesting < 0 &&
 355		   t->rcu_read_unlock_special.s) {
 356
 357		/*
 358		 * Complete exit from RCU read-side critical section on
 359		 * behalf of preempted instance of __rcu_read_unlock().
 360		 */
 361		rcu_read_unlock_special(t);
 362	}
 363
 364	/*
 365	 * Either we were not in an RCU read-side critical section to
 366	 * begin with, or we have now recorded that critical section
 367	 * globally.  Either way, we can now note a quiescent state
 368	 * for this CPU.  Again, if we were in an RCU read-side critical
 369	 * section, and if that critical section was blocking the current
 370	 * grace period, then the fact that the task has been enqueued
 371	 * means that we continue to block the current grace period.
 372	 */
 373	rcu_preempt_qs();
 
 
 374}
 375
 376/*
 377 * Check for preempted RCU readers blocking the current grace period
 378 * for the specified rcu_node structure.  If the caller needs a reliable
 379 * answer, it must hold the rcu_node's ->lock.
 380 */
 381static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp)
 382{
 383	return rnp->gp_tasks != NULL;
 384}
 385
 386/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 387 * Advance a ->blkd_tasks-list pointer to the next entry, instead
 388 * returning NULL if at the end of the list.
 389 */
 390static struct list_head *rcu_next_node_entry(struct task_struct *t,
 391					     struct rcu_node *rnp)
 392{
 393	struct list_head *np;
 394
 395	np = t->rcu_node_entry.next;
 396	if (np == &rnp->blkd_tasks)
 397		np = NULL;
 398	return np;
 399}
 400
 401/*
 402 * Return true if the specified rcu_node structure has tasks that were
 403 * preempted within an RCU read-side critical section.
 404 */
 405static bool rcu_preempt_has_tasks(struct rcu_node *rnp)
 406{
 407	return !list_empty(&rnp->blkd_tasks);
 408}
 409
 410/*
 411 * Handle special cases during rcu_read_unlock(), such as needing to
 412 * notify RCU core processing or task having blocked during the RCU
 413 * read-side critical section.
 414 */
 415void rcu_read_unlock_special(struct task_struct *t)
 416{
 417	bool empty_exp;
 418	bool empty_norm;
 419	bool empty_exp_now;
 420	unsigned long flags;
 421	struct list_head *np;
 422	bool drop_boost_mutex = false;
 423	struct rcu_data *rdp;
 
 424	struct rcu_node *rnp;
 425	union rcu_special special;
 426
 427	/* NMI handlers cannot block and cannot safely manipulate state. */
 428	if (in_nmi())
 429		return;
 430
 431	local_irq_save(flags);
 432
 433	/*
 434	 * If RCU core is waiting for this CPU to exit its critical section,
 435	 * report the fact that it has exited.  Because irqs are disabled,
 436	 * t->rcu_read_unlock_special cannot change.
 437	 */
 438	special = t->rcu_read_unlock_special;
 439	if (special.b.need_qs) {
 440		rcu_preempt_qs();
 441		t->rcu_read_unlock_special.b.need_qs = false;
 442		if (!t->rcu_read_unlock_special.s) {
 443			local_irq_restore(flags);
 444			return;
 445		}
 446	}
 447
 448	/*
 449	 * Respond to a request for an expedited grace period, but only if
 450	 * we were not preempted, meaning that we were running on the same
 451	 * CPU throughout.  If we were preempted, the exp_need_qs flag
 452	 * would have been cleared at the time of the first preemption,
 453	 * and the quiescent state would be reported when we were dequeued.
 454	 */
 455	if (special.b.exp_need_qs) {
 456		WARN_ON_ONCE(special.b.blocked);
 457		t->rcu_read_unlock_special.b.exp_need_qs = false;
 458		rdp = this_cpu_ptr(rcu_state_p->rda);
 459		rcu_report_exp_rdp(rcu_state_p, rdp, true);
 460		if (!t->rcu_read_unlock_special.s) {
 461			local_irq_restore(flags);
 462			return;
 463		}
 464	}
 465
 466	/* Hardware IRQ handlers cannot block, complain if they get here. */
 467	if (in_irq() || in_serving_softirq()) {
 468		lockdep_rcu_suspicious(__FILE__, __LINE__,
 469				       "rcu_read_unlock() from irq or softirq with blocking in critical section!!!\n");
 470		pr_alert("->rcu_read_unlock_special: %#x (b: %d, enq: %d nq: %d)\n",
 471			 t->rcu_read_unlock_special.s,
 472			 t->rcu_read_unlock_special.b.blocked,
 473			 t->rcu_read_unlock_special.b.exp_need_qs,
 474			 t->rcu_read_unlock_special.b.need_qs);
 475		local_irq_restore(flags);
 476		return;
 477	}
 478
 479	/* Clean up if blocked during RCU read-side critical section. */
 480	if (special.b.blocked) {
 481		t->rcu_read_unlock_special.b.blocked = false;
 482
 483		/*
 484		 * Remove this task from the list it blocked on.  The task
 485		 * now remains queued on the rcu_node corresponding to the
 486		 * CPU it first blocked on, so there is no longer any need
 487		 * to loop.  Retain a WARN_ON_ONCE() out of sheer paranoia.
 488		 */
 489		rnp = t->rcu_blocked_node;
 490		raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
 491		WARN_ON_ONCE(rnp != t->rcu_blocked_node);
 492		WARN_ON_ONCE(rnp->level != rcu_num_lvls - 1);
 493		empty_norm = !rcu_preempt_blocked_readers_cgp(rnp);
 494		empty_exp = sync_rcu_preempt_exp_done(rnp);
 
 
 
 
 495		smp_mb(); /* ensure expedited fastpath sees end of RCU c-s. */
 496		np = rcu_next_node_entry(t, rnp);
 497		list_del_init(&t->rcu_node_entry);
 498		t->rcu_blocked_node = NULL;
 499		trace_rcu_unlock_preempted_task(TPS("rcu_preempt"),
 500						rnp->gpnum, t->pid);
 501		if (&t->rcu_node_entry == rnp->gp_tasks)
 502			rnp->gp_tasks = np;
 503		if (&t->rcu_node_entry == rnp->exp_tasks)
 504			rnp->exp_tasks = np;
 505		if (IS_ENABLED(CONFIG_RCU_BOOST)) {
 506			/* Snapshot ->boost_mtx ownership w/rnp->lock held. */
 507			drop_boost_mutex = rt_mutex_owner(&rnp->boost_mtx) == t;
 508			if (&t->rcu_node_entry == rnp->boost_tasks)
 509				rnp->boost_tasks = np;
 
 
 510		}
 
 511
 512		/*
 513		 * If this was the last task on the current list, and if
 514		 * we aren't waiting on any CPUs, report the quiescent state.
 515		 * Note that rcu_report_unblock_qs_rnp() releases rnp->lock,
 516		 * so we must take a snapshot of the expedited state.
 517		 */
 518		empty_exp_now = sync_rcu_preempt_exp_done(rnp);
 519		if (!empty_norm && !rcu_preempt_blocked_readers_cgp(rnp)) {
 520			trace_rcu_quiescent_state_report(TPS("preempt_rcu"),
 521							 rnp->gpnum,
 522							 0, rnp->qsmask,
 523							 rnp->level,
 524							 rnp->grplo,
 525							 rnp->grphi,
 526							 !!rnp->gp_tasks);
 527			rcu_report_unblock_qs_rnp(rcu_state_p, rnp, flags);
 528		} else {
 529			raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 530		}
 531
 
 532		/* Unboost if we were boosted. */
 533		if (IS_ENABLED(CONFIG_RCU_BOOST) && drop_boost_mutex)
 534			rt_mutex_futex_unlock(&rnp->boost_mtx);
 
 535
 536		/*
 537		 * If this was the last task on the expedited lists,
 538		 * then we need to report up the rcu_node hierarchy.
 539		 */
 540		if (!empty_exp && empty_exp_now)
 541			rcu_report_exp_rnp(rcu_state_p, rnp, true);
 542	} else {
 543		local_irq_restore(flags);
 544	}
 545}
 546
 
 
 547/*
 548 * Dump detailed information for all tasks blocking the current RCU
 549 * grace period on the specified rcu_node structure.
 550 */
 551static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp)
 552{
 553	unsigned long flags;
 554	struct task_struct *t;
 555
 556	raw_spin_lock_irqsave_rcu_node(rnp, flags);
 557	if (!rcu_preempt_blocked_readers_cgp(rnp)) {
 558		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 559		return;
 560	}
 561	t = list_entry(rnp->gp_tasks->prev,
 562		       struct task_struct, rcu_node_entry);
 563	list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
 564		/*
 565		 * We could be printing a lot while holding a spinlock.
 566		 * Avoid triggering hard lockup.
 567		 */
 568		touch_nmi_watchdog();
 569		sched_show_task(t);
 570	}
 571	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 572}
 573
 574/*
 575 * Dump detailed information for all tasks blocking the current RCU
 576 * grace period.
 577 */
 578static void rcu_print_detail_task_stall(struct rcu_state *rsp)
 579{
 580	struct rcu_node *rnp = rcu_get_root(rsp);
 581
 582	rcu_print_detail_task_stall_rnp(rnp);
 583	rcu_for_each_leaf_node(rsp, rnp)
 584		rcu_print_detail_task_stall_rnp(rnp);
 585}
 586
 
 
 
 
 
 
 
 
 
 
 587static void rcu_print_task_stall_begin(struct rcu_node *rnp)
 588{
 589	pr_err("\tTasks blocked on level-%d rcu_node (CPUs %d-%d):",
 590	       rnp->level, rnp->grplo, rnp->grphi);
 591}
 592
 593static void rcu_print_task_stall_end(void)
 594{
 595	pr_cont("\n");
 596}
 597
 
 
 
 
 
 
 
 
 
 
 
 
 598/*
 599 * Scan the current list of tasks blocked within RCU read-side critical
 600 * sections, printing out the tid of each.
 601 */
 602static int rcu_print_task_stall(struct rcu_node *rnp)
 603{
 604	struct task_struct *t;
 605	int ndetected = 0;
 606
 607	if (!rcu_preempt_blocked_readers_cgp(rnp))
 608		return 0;
 609	rcu_print_task_stall_begin(rnp);
 610	t = list_entry(rnp->gp_tasks->prev,
 611		       struct task_struct, rcu_node_entry);
 612	list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
 613		pr_cont(" P%d", t->pid);
 614		ndetected++;
 615	}
 616	rcu_print_task_stall_end();
 617	return ndetected;
 618}
 619
 620/*
 621 * Scan the current list of tasks blocked within RCU read-side critical
 622 * sections, printing out the tid of each that is blocking the current
 623 * expedited grace period.
 624 */
 625static int rcu_print_task_exp_stall(struct rcu_node *rnp)
 626{
 627	struct task_struct *t;
 628	int ndetected = 0;
 629
 630	if (!rnp->exp_tasks)
 631		return 0;
 632	t = list_entry(rnp->exp_tasks->prev,
 633		       struct task_struct, rcu_node_entry);
 634	list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
 635		pr_cont(" P%d", t->pid);
 636		ndetected++;
 637	}
 638	return ndetected;
 639}
 640
 641/*
 642 * Check that the list of blocked tasks for the newly completed grace
 643 * period is in fact empty.  It is a serious bug to complete a grace
 644 * period that still has RCU readers blocked!  This function must be
 645 * invoked -before- updating this rnp's ->gpnum, and the rnp's ->lock
 646 * must be held by the caller.
 647 *
 648 * Also, if there are blocked tasks on the list, they automatically
 649 * block the newly created grace period, so set up ->gp_tasks accordingly.
 650 */
 651static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
 652{
 653	struct task_struct *t;
 654
 655	RCU_LOCKDEP_WARN(preemptible(), "rcu_preempt_check_blocked_tasks() invoked with preemption enabled!!!\n");
 656	WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp));
 657	if (rcu_preempt_has_tasks(rnp)) {
 658		rnp->gp_tasks = rnp->blkd_tasks.next;
 659		t = container_of(rnp->gp_tasks, struct task_struct,
 660				 rcu_node_entry);
 661		trace_rcu_unlock_preempted_task(TPS("rcu_preempt-GPS"),
 662						rnp->gpnum, t->pid);
 663	}
 664	WARN_ON_ONCE(rnp->qsmask);
 665}
 666
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 667/*
 668 * Check for a quiescent state from the current CPU.  When a task blocks,
 669 * the task is recorded in the corresponding CPU's rcu_node structure,
 670 * which is checked elsewhere.
 671 *
 672 * Caller must disable hard irqs.
 673 */
 674static void rcu_preempt_check_callbacks(void)
 675{
 676	struct task_struct *t = current;
 677
 678	if (t->rcu_read_lock_nesting == 0) {
 679		rcu_preempt_qs();
 680		return;
 681	}
 682	if (t->rcu_read_lock_nesting > 0 &&
 683	    __this_cpu_read(rcu_data_p->core_needs_qs) &&
 684	    __this_cpu_read(rcu_data_p->cpu_no_qs.b.norm))
 685		t->rcu_read_unlock_special.b.need_qs = true;
 686}
 687
 688#ifdef CONFIG_RCU_BOOST
 689
 690static void rcu_preempt_do_callbacks(void)
 691{
 692	rcu_do_batch(rcu_state_p, this_cpu_ptr(rcu_data_p));
 693}
 694
 695#endif /* #ifdef CONFIG_RCU_BOOST */
 696
 697/**
 698 * call_rcu() - Queue an RCU callback for invocation after a grace period.
 699 * @head: structure to be used for queueing the RCU updates.
 700 * @func: actual callback function to be invoked after the grace period
 701 *
 702 * The callback function will be invoked some time after a full grace
 703 * period elapses, in other words after all pre-existing RCU read-side
 704 * critical sections have completed.  However, the callback function
 705 * might well execute concurrently with RCU read-side critical sections
 706 * that started after call_rcu() was invoked.  RCU read-side critical
 707 * sections are delimited by rcu_read_lock() and rcu_read_unlock(),
 708 * and may be nested.
 709 *
 710 * Note that all CPUs must agree that the grace period extended beyond
 711 * all pre-existing RCU read-side critical section.  On systems with more
 712 * than one CPU, this means that when "func()" is invoked, each CPU is
 713 * guaranteed to have executed a full memory barrier since the end of its
 714 * last RCU read-side critical section whose beginning preceded the call
 715 * to call_rcu().  It also means that each CPU executing an RCU read-side
 716 * critical section that continues beyond the start of "func()" must have
 717 * executed a memory barrier after the call_rcu() but before the beginning
 718 * of that RCU read-side critical section.  Note that these guarantees
 719 * include CPUs that are offline, idle, or executing in user mode, as
 720 * well as CPUs that are executing in the kernel.
 721 *
 722 * Furthermore, if CPU A invoked call_rcu() and CPU B invoked the
 723 * resulting RCU callback function "func()", then both CPU A and CPU B are
 724 * guaranteed to execute a full memory barrier during the time interval
 725 * between the call to call_rcu() and the invocation of "func()" -- even
 726 * if CPU A and CPU B are the same CPU (but again only if the system has
 727 * more than one CPU).
 728 */
 729void call_rcu(struct rcu_head *head, rcu_callback_t func)
 730{
 731	__call_rcu(head, func, rcu_state_p, -1, 0);
 732}
 733EXPORT_SYMBOL_GPL(call_rcu);
 734
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 735/**
 736 * synchronize_rcu - wait until a grace period has elapsed.
 737 *
 738 * Control will return to the caller some time after a full grace
 739 * period has elapsed, in other words after all currently executing RCU
 740 * read-side critical sections have completed.  Note, however, that
 741 * upon return from synchronize_rcu(), the caller might well be executing
 742 * concurrently with new RCU read-side critical sections that began while
 743 * synchronize_rcu() was waiting.  RCU read-side critical sections are
 744 * delimited by rcu_read_lock() and rcu_read_unlock(), and may be nested.
 745 *
 746 * See the description of synchronize_sched() for more detailed
 747 * information on memory-ordering guarantees.  However, please note
 748 * that -only- the memory-ordering guarantees apply.  For example,
 749 * synchronize_rcu() is -not- guaranteed to wait on things like code
 750 * protected by preempt_disable(), instead, synchronize_rcu() is -only-
 751 * guaranteed to wait on RCU read-side critical sections, that is, sections
 752 * of code protected by rcu_read_lock().
 753 */
 754void synchronize_rcu(void)
 755{
 756	RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
 757			 lock_is_held(&rcu_lock_map) ||
 758			 lock_is_held(&rcu_sched_lock_map),
 759			 "Illegal synchronize_rcu() in RCU read-side critical section");
 760	if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE)
 761		return;
 762	if (rcu_gp_is_expedited())
 763		synchronize_rcu_expedited();
 764	else
 765		wait_rcu_gp(call_rcu);
 766}
 767EXPORT_SYMBOL_GPL(synchronize_rcu);
 768
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 769/**
 770 * rcu_barrier - Wait until all in-flight call_rcu() callbacks complete.
 771 *
 772 * Note that this primitive does not necessarily wait for an RCU grace period
 773 * to complete.  For example, if there are no RCU callbacks queued anywhere
 774 * in the system, then rcu_barrier() is within its rights to return
 775 * immediately, without waiting for anything, much less an RCU grace period.
 776 */
 777void rcu_barrier(void)
 778{
 779	_rcu_barrier(rcu_state_p);
 780}
 781EXPORT_SYMBOL_GPL(rcu_barrier);
 782
 783/*
 784 * Initialize preemptible RCU's state structures.
 785 */
 786static void __init __rcu_init_preempt(void)
 787{
 788	rcu_init_one(rcu_state_p);
 789}
 790
 791/*
 792 * Check for a task exiting while in a preemptible-RCU read-side
 793 * critical section, clean up if so.  No need to issue warnings,
 794 * as debug_check_no_locks_held() already does this if lockdep
 795 * is enabled.
 796 */
 797void exit_rcu(void)
 798{
 799	struct task_struct *t = current;
 800
 801	if (likely(list_empty(&current->rcu_node_entry)))
 802		return;
 803	t->rcu_read_lock_nesting = 1;
 804	barrier();
 805	t->rcu_read_unlock_special.b.blocked = true;
 806	__rcu_read_unlock();
 807}
 808
 809#else /* #ifdef CONFIG_PREEMPT_RCU */
 810
 811static struct rcu_state *const rcu_state_p = &rcu_sched_state;
 812
 813/*
 814 * Tell them what RCU they are running.
 815 */
 816static void __init rcu_bootup_announce(void)
 817{
 818	pr_info("Hierarchical RCU implementation.\n");
 819	rcu_bootup_announce_oddness();
 820}
 821
 822/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 823 * Because preemptible RCU does not exist, we never have to check for
 824 * CPUs being in quiescent states.
 825 */
 826static void rcu_preempt_note_context_switch(bool preempt)
 827{
 828}
 829
 830/*
 831 * Because preemptible RCU does not exist, there are never any preempted
 832 * RCU readers.
 833 */
 834static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp)
 835{
 836	return 0;
 837}
 838
 839/*
 840 * Because there is no preemptible RCU, there can be no readers blocked.
 841 */
 842static bool rcu_preempt_has_tasks(struct rcu_node *rnp)
 843{
 844	return false;
 845}
 846
 
 
 847/*
 848 * Because preemptible RCU does not exist, we never have to check for
 849 * tasks blocked within RCU read-side critical sections.
 850 */
 851static void rcu_print_detail_task_stall(struct rcu_state *rsp)
 852{
 853}
 854
 855/*
 856 * Because preemptible RCU does not exist, we never have to check for
 857 * tasks blocked within RCU read-side critical sections.
 858 */
 859static int rcu_print_task_stall(struct rcu_node *rnp)
 860{
 861	return 0;
 862}
 863
 864/*
 865 * Because preemptible RCU does not exist, we never have to check for
 866 * tasks blocked within RCU read-side critical sections that are
 867 * blocking the current expedited grace period.
 868 */
 869static int rcu_print_task_exp_stall(struct rcu_node *rnp)
 870{
 871	return 0;
 872}
 873
 
 
 874/*
 875 * Because there is no preemptible RCU, there can be no readers blocked,
 876 * so there is no need to check for blocked tasks.  So check only for
 877 * bogus qsmask values.
 
 878 */
 879static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
 
 
 880{
 881	WARN_ON_ONCE(rnp->qsmask);
 882}
 883
 
 
 884/*
 885 * Because preemptible RCU does not exist, it never has any callbacks
 886 * to check.
 887 */
 888static void rcu_preempt_check_callbacks(void)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 889{
 
 890}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 891
 892/*
 893 * Because preemptible RCU does not exist, rcu_barrier() is just
 894 * another name for rcu_barrier_sched().
 895 */
 896void rcu_barrier(void)
 897{
 898	rcu_barrier_sched();
 899}
 900EXPORT_SYMBOL_GPL(rcu_barrier);
 901
 902/*
 903 * Because preemptible RCU does not exist, it need not be initialized.
 904 */
 905static void __init __rcu_init_preempt(void)
 906{
 907}
 908
 909/*
 910 * Because preemptible RCU does not exist, tasks cannot possibly exit
 911 * while in preemptible RCU read-side critical sections.
 912 */
 913void exit_rcu(void)
 914{
 915}
 916
 917#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
 918
 919#ifdef CONFIG_RCU_BOOST
 920
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 921static void rcu_wake_cond(struct task_struct *t, int status)
 922{
 923	/*
 924	 * If the thread is yielding, only wake it when this
 925	 * is invoked from idle
 926	 */
 927	if (status != RCU_KTHREAD_YIELDING || is_idle_task(current))
 928		wake_up_process(t);
 929}
 930
 931/*
 932 * Carry out RCU priority boosting on the task indicated by ->exp_tasks
 933 * or ->boost_tasks, advancing the pointer to the next task in the
 934 * ->blkd_tasks list.
 935 *
 936 * Note that irqs must be enabled: boosting the task can block.
 937 * Returns 1 if there are more tasks needing to be boosted.
 938 */
 939static int rcu_boost(struct rcu_node *rnp)
 940{
 941	unsigned long flags;
 
 942	struct task_struct *t;
 943	struct list_head *tb;
 944
 945	if (READ_ONCE(rnp->exp_tasks) == NULL &&
 946	    READ_ONCE(rnp->boost_tasks) == NULL)
 947		return 0;  /* Nothing left to boost. */
 948
 949	raw_spin_lock_irqsave_rcu_node(rnp, flags);
 
 950
 951	/*
 952	 * Recheck under the lock: all tasks in need of boosting
 953	 * might exit their RCU read-side critical sections on their own.
 954	 */
 955	if (rnp->exp_tasks == NULL && rnp->boost_tasks == NULL) {
 956		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 957		return 0;
 958	}
 959
 960	/*
 961	 * Preferentially boost tasks blocking expedited grace periods.
 962	 * This cannot starve the normal grace periods because a second
 963	 * expedited grace period must boost all blocked tasks, including
 964	 * those blocking the pre-existing normal grace period.
 965	 */
 966	if (rnp->exp_tasks != NULL)
 967		tb = rnp->exp_tasks;
 968	else
 
 969		tb = rnp->boost_tasks;
 
 
 
 970
 971	/*
 972	 * We boost task t by manufacturing an rt_mutex that appears to
 973	 * be held by task t.  We leave a pointer to that rt_mutex where
 974	 * task t can find it, and task t will release the mutex when it
 975	 * exits its outermost RCU read-side critical section.  Then
 976	 * simply acquiring this artificial rt_mutex will boost task
 977	 * t's priority.  (Thanks to tglx for suggesting this approach!)
 978	 *
 979	 * Note that task t must acquire rnp->lock to remove itself from
 980	 * the ->blkd_tasks list, which it will do from exit() if from
 981	 * nowhere else.  We therefore are guaranteed that task t will
 982	 * stay around at least until we drop rnp->lock.  Note that
 983	 * rnp->lock also resolves races between our priority boosting
 984	 * and task t's exiting its outermost RCU read-side critical
 985	 * section.
 986	 */
 987	t = container_of(tb, struct task_struct, rcu_node_entry);
 988	rt_mutex_init_proxy_locked(&rnp->boost_mtx, t);
 989	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 990	/* Lock only for side effect: boosts task t's priority. */
 991	rt_mutex_lock(&rnp->boost_mtx);
 992	rt_mutex_unlock(&rnp->boost_mtx);  /* Then keep lockdep happy. */
 993
 994	return READ_ONCE(rnp->exp_tasks) != NULL ||
 995	       READ_ONCE(rnp->boost_tasks) != NULL;
 996}
 997
 998/*
 999 * Priority-boosting kthread, one per leaf rcu_node.
 
1000 */
1001static int rcu_boost_kthread(void *arg)
1002{
1003	struct rcu_node *rnp = (struct rcu_node *)arg;
1004	int spincnt = 0;
1005	int more2boost;
1006
1007	trace_rcu_utilization(TPS("Start boost kthread@init"));
1008	for (;;) {
1009		rnp->boost_kthread_status = RCU_KTHREAD_WAITING;
1010		trace_rcu_utilization(TPS("End boost kthread@rcu_wait"));
1011		rcu_wait(rnp->boost_tasks || rnp->exp_tasks);
1012		trace_rcu_utilization(TPS("Start boost kthread@rcu_wait"));
1013		rnp->boost_kthread_status = RCU_KTHREAD_RUNNING;
1014		more2boost = rcu_boost(rnp);
1015		if (more2boost)
1016			spincnt++;
1017		else
1018			spincnt = 0;
1019		if (spincnt > 10) {
1020			rnp->boost_kthread_status = RCU_KTHREAD_YIELDING;
1021			trace_rcu_utilization(TPS("End boost kthread@rcu_yield"));
1022			schedule_timeout_interruptible(2);
1023			trace_rcu_utilization(TPS("Start boost kthread@rcu_yield"));
1024			spincnt = 0;
1025		}
1026	}
1027	/* NOTREACHED */
1028	trace_rcu_utilization(TPS("End boost kthread@notreached"));
1029	return 0;
1030}
1031
1032/*
1033 * Check to see if it is time to start boosting RCU readers that are
1034 * blocking the current grace period, and, if so, tell the per-rcu_node
1035 * kthread to start boosting them.  If there is an expedited grace
1036 * period in progress, it is always time to boost.
1037 *
1038 * The caller must hold rnp->lock, which this function releases.
1039 * The ->boost_kthread_task is immortal, so we don't need to worry
1040 * about it going away.
1041 */
1042static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
1043	__releases(rnp->lock)
1044{
1045	struct task_struct *t;
1046
1047	raw_lockdep_assert_held_rcu_node(rnp);
1048	if (!rcu_preempt_blocked_readers_cgp(rnp) && rnp->exp_tasks == NULL) {
1049		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 
1050		return;
1051	}
1052	if (rnp->exp_tasks != NULL ||
1053	    (rnp->gp_tasks != NULL &&
1054	     rnp->boost_tasks == NULL &&
1055	     rnp->qsmask == 0 &&
1056	     ULONG_CMP_GE(jiffies, rnp->boost_time))) {
1057		if (rnp->exp_tasks == NULL)
1058			rnp->boost_tasks = rnp->gp_tasks;
1059		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1060		t = rnp->boost_kthread_task;
1061		if (t)
1062			rcu_wake_cond(t, rnp->boost_kthread_status);
1063	} else {
1064		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 
1065	}
1066}
1067
1068/*
1069 * Wake up the per-CPU kthread to invoke RCU callbacks.
1070 */
1071static void invoke_rcu_callbacks_kthread(void)
1072{
1073	unsigned long flags;
1074
1075	local_irq_save(flags);
1076	__this_cpu_write(rcu_cpu_has_work, 1);
1077	if (__this_cpu_read(rcu_cpu_kthread_task) != NULL &&
1078	    current != __this_cpu_read(rcu_cpu_kthread_task)) {
1079		rcu_wake_cond(__this_cpu_read(rcu_cpu_kthread_task),
1080			      __this_cpu_read(rcu_cpu_kthread_status));
1081	}
1082	local_irq_restore(flags);
1083}
1084
1085/*
1086 * Is the current CPU running the RCU-callbacks kthread?
1087 * Caller must have preemption disabled.
1088 */
1089static bool rcu_is_callbacks_kthread(void)
1090{
1091	return __this_cpu_read(rcu_cpu_kthread_task) == current;
1092}
1093
1094#define RCU_BOOST_DELAY_JIFFIES DIV_ROUND_UP(CONFIG_RCU_BOOST_DELAY * HZ, 1000)
1095
1096/*
1097 * Do priority-boost accounting for the start of a new grace period.
1098 */
1099static void rcu_preempt_boost_start_gp(struct rcu_node *rnp)
1100{
1101	rnp->boost_time = jiffies + RCU_BOOST_DELAY_JIFFIES;
1102}
1103
1104/*
1105 * Create an RCU-boost kthread for the specified node if one does not
1106 * already exist.  We only create this kthread for preemptible RCU.
1107 * Returns zero if all is well, a negated errno otherwise.
1108 */
1109static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
1110				       struct rcu_node *rnp)
1111{
1112	int rnp_index = rnp - &rsp->node[0];
1113	unsigned long flags;
1114	struct sched_param sp;
1115	struct task_struct *t;
1116
1117	if (rcu_state_p != rsp)
1118		return 0;
1119
1120	if (!rcu_scheduler_fully_active || rcu_rnp_online_cpus(rnp) == 0)
1121		return 0;
1122
1123	rsp->boost = 1;
1124	if (rnp->boost_kthread_task != NULL)
1125		return 0;
1126	t = kthread_create(rcu_boost_kthread, (void *)rnp,
1127			   "rcub/%d", rnp_index);
1128	if (IS_ERR(t))
1129		return PTR_ERR(t);
1130	raw_spin_lock_irqsave_rcu_node(rnp, flags);
 
1131	rnp->boost_kthread_task = t;
1132	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1133	sp.sched_priority = kthread_prio;
1134	sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
1135	wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */
1136	return 0;
1137}
1138
1139static void rcu_kthread_do_work(void)
1140{
1141	rcu_do_batch(&rcu_sched_state, this_cpu_ptr(&rcu_sched_data));
1142	rcu_do_batch(&rcu_bh_state, this_cpu_ptr(&rcu_bh_data));
1143	rcu_preempt_do_callbacks();
1144}
1145
1146static void rcu_cpu_kthread_setup(unsigned int cpu)
1147{
1148	struct sched_param sp;
1149
1150	sp.sched_priority = kthread_prio;
1151	sched_setscheduler_nocheck(current, SCHED_FIFO, &sp);
1152}
1153
1154static void rcu_cpu_kthread_park(unsigned int cpu)
1155{
1156	per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU;
1157}
1158
1159static int rcu_cpu_kthread_should_run(unsigned int cpu)
1160{
1161	return __this_cpu_read(rcu_cpu_has_work);
1162}
1163
1164/*
1165 * Per-CPU kernel thread that invokes RCU callbacks.  This replaces the
1166 * RCU softirq used in flavors and configurations of RCU that do not
1167 * support RCU priority boosting.
1168 */
1169static void rcu_cpu_kthread(unsigned int cpu)
1170{
1171	unsigned int *statusp = this_cpu_ptr(&rcu_cpu_kthread_status);
1172	char work, *workp = this_cpu_ptr(&rcu_cpu_has_work);
1173	int spincnt;
1174
1175	for (spincnt = 0; spincnt < 10; spincnt++) {
1176		trace_rcu_utilization(TPS("Start CPU kthread@rcu_wait"));
1177		local_bh_disable();
1178		*statusp = RCU_KTHREAD_RUNNING;
1179		this_cpu_inc(rcu_cpu_kthread_loops);
1180		local_irq_disable();
1181		work = *workp;
1182		*workp = 0;
1183		local_irq_enable();
1184		if (work)
1185			rcu_kthread_do_work();
1186		local_bh_enable();
1187		if (*workp == 0) {
1188			trace_rcu_utilization(TPS("End CPU kthread@rcu_wait"));
1189			*statusp = RCU_KTHREAD_WAITING;
1190			return;
1191		}
1192	}
1193	*statusp = RCU_KTHREAD_YIELDING;
1194	trace_rcu_utilization(TPS("Start CPU kthread@rcu_yield"));
1195	schedule_timeout_interruptible(2);
1196	trace_rcu_utilization(TPS("End CPU kthread@rcu_yield"));
1197	*statusp = RCU_KTHREAD_WAITING;
1198}
1199
1200/*
1201 * Set the per-rcu_node kthread's affinity to cover all CPUs that are
1202 * served by the rcu_node in question.  The CPU hotplug lock is still
1203 * held, so the value of rnp->qsmaskinit will be stable.
1204 *
1205 * We don't include outgoingcpu in the affinity set, use -1 if there is
1206 * no outgoing CPU.  If there are no CPUs left in the affinity set,
1207 * this function allows the kthread to execute on any CPU.
1208 */
1209static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
1210{
1211	struct task_struct *t = rnp->boost_kthread_task;
1212	unsigned long mask = rcu_rnp_online_cpus(rnp);
1213	cpumask_var_t cm;
1214	int cpu;
1215
1216	if (!t)
1217		return;
1218	if (!zalloc_cpumask_var(&cm, GFP_KERNEL))
1219		return;
1220	for_each_leaf_node_possible_cpu(rnp, cpu)
1221		if ((mask & leaf_node_cpu_bit(rnp, cpu)) &&
1222		    cpu != outgoingcpu)
1223			cpumask_set_cpu(cpu, cm);
1224	if (cpumask_weight(cm) == 0)
1225		cpumask_setall(cm);
 
 
 
 
1226	set_cpus_allowed_ptr(t, cm);
1227	free_cpumask_var(cm);
1228}
1229
1230static struct smp_hotplug_thread rcu_cpu_thread_spec = {
1231	.store			= &rcu_cpu_kthread_task,
1232	.thread_should_run	= rcu_cpu_kthread_should_run,
1233	.thread_fn		= rcu_cpu_kthread,
1234	.thread_comm		= "rcuc/%u",
1235	.setup			= rcu_cpu_kthread_setup,
1236	.park			= rcu_cpu_kthread_park,
1237};
1238
1239/*
1240 * Spawn boost kthreads -- called as soon as the scheduler is running.
1241 */
1242static void __init rcu_spawn_boost_kthreads(void)
1243{
1244	struct rcu_node *rnp;
1245	int cpu;
1246
 
1247	for_each_possible_cpu(cpu)
1248		per_cpu(rcu_cpu_has_work, cpu) = 0;
1249	BUG_ON(smpboot_register_percpu_thread(&rcu_cpu_thread_spec));
1250	rcu_for_each_leaf_node(rcu_state_p, rnp)
1251		(void)rcu_spawn_one_boost_kthread(rcu_state_p, rnp);
 
 
 
 
 
1252}
 
1253
1254static void rcu_prepare_kthreads(int cpu)
1255{
1256	struct rcu_data *rdp = per_cpu_ptr(rcu_state_p->rda, cpu);
1257	struct rcu_node *rnp = rdp->mynode;
1258
1259	/* Fire up the incoming CPU's kthread and leaf rcu_node kthread. */
1260	if (rcu_scheduler_fully_active)
1261		(void)rcu_spawn_one_boost_kthread(rcu_state_p, rnp);
1262}
1263
1264#else /* #ifdef CONFIG_RCU_BOOST */
1265
1266static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
1267	__releases(rnp->lock)
1268{
1269	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1270}
1271
1272static void invoke_rcu_callbacks_kthread(void)
1273{
1274	WARN_ON_ONCE(1);
1275}
1276
1277static bool rcu_is_callbacks_kthread(void)
1278{
1279	return false;
1280}
1281
1282static void rcu_preempt_boost_start_gp(struct rcu_node *rnp)
1283{
1284}
1285
1286static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
1287{
1288}
1289
1290static void __init rcu_spawn_boost_kthreads(void)
1291{
 
 
1292}
 
1293
1294static void rcu_prepare_kthreads(int cpu)
1295{
1296}
1297
1298#endif /* #else #ifdef CONFIG_RCU_BOOST */
1299
1300#if !defined(CONFIG_RCU_FAST_NO_HZ)
1301
1302/*
1303 * Check to see if any future RCU-related work will need to be done
1304 * by the current CPU, even if none need be done immediately, returning
1305 * 1 if so.  This function is part of the RCU implementation; it is -not-
1306 * an exported member of the RCU API.
1307 *
1308 * Because we not have RCU_FAST_NO_HZ, just check whether this CPU needs
1309 * any flavor of RCU.
1310 */
1311int rcu_needs_cpu(u64 basemono, u64 *nextevt)
 
1312{
1313	*nextevt = KTIME_MAX;
1314	return rcu_cpu_has_callbacks(NULL);
1315}
 
1316
1317/*
1318 * Because we do not have RCU_FAST_NO_HZ, don't bother cleaning up
1319 * after it.
1320 */
1321static void rcu_cleanup_after_idle(void)
1322{
1323}
1324
1325/*
1326 * Do the idle-entry grace-period work, which, because CONFIG_RCU_FAST_NO_HZ=n,
1327 * is nothing.
1328 */
1329static void rcu_prepare_for_idle(void)
1330{
1331}
1332
1333/*
1334 * Don't bother keeping a running count of the number of RCU callbacks
1335 * posted because CONFIG_RCU_FAST_NO_HZ=n.
1336 */
1337static void rcu_idle_count_callbacks_posted(void)
1338{
1339}
1340
1341#else /* #if !defined(CONFIG_RCU_FAST_NO_HZ) */
1342
1343/*
1344 * This code is invoked when a CPU goes idle, at which point we want
1345 * to have the CPU do everything required for RCU so that it can enter
1346 * the energy-efficient dyntick-idle mode.  This is handled by a
1347 * state machine implemented by rcu_prepare_for_idle() below.
1348 *
1349 * The following three proprocessor symbols control this state machine:
1350 *
1351 * RCU_IDLE_GP_DELAY gives the number of jiffies that a CPU is permitted
1352 *	to sleep in dyntick-idle mode with RCU callbacks pending.  This
1353 *	is sized to be roughly one RCU grace period.  Those energy-efficiency
1354 *	benchmarkers who might otherwise be tempted to set this to a large
1355 *	number, be warned: Setting RCU_IDLE_GP_DELAY too high can hang your
1356 *	system.  And if you are -that- concerned about energy efficiency,
1357 *	just power the system down and be done with it!
1358 * RCU_IDLE_LAZY_GP_DELAY gives the number of jiffies that a CPU is
1359 *	permitted to sleep in dyntick-idle mode with only lazy RCU
1360 *	callbacks pending.  Setting this too high can OOM your system.
1361 *
1362 * The values below work well in practice.  If future workloads require
1363 * adjustment, they can be converted into kernel config parameters, though
1364 * making the state machine smarter might be a better option.
1365 */
1366#define RCU_IDLE_GP_DELAY 4		/* Roughly one grace period. */
1367#define RCU_IDLE_LAZY_GP_DELAY (6 * HZ)	/* Roughly six seconds. */
1368
1369static int rcu_idle_gp_delay = RCU_IDLE_GP_DELAY;
1370module_param(rcu_idle_gp_delay, int, 0644);
1371static int rcu_idle_lazy_gp_delay = RCU_IDLE_LAZY_GP_DELAY;
1372module_param(rcu_idle_lazy_gp_delay, int, 0644);
1373
 
 
1374/*
1375 * Try to advance callbacks for all flavors of RCU on the current CPU, but
1376 * only if it has been awhile since the last time we did so.  Afterwards,
1377 * if there are any callbacks ready for immediate invocation, return true.
1378 */
1379static bool __maybe_unused rcu_try_advance_all_cbs(void)
1380{
1381	bool cbs_ready = false;
1382	struct rcu_data *rdp;
1383	struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
1384	struct rcu_node *rnp;
1385	struct rcu_state *rsp;
1386
1387	/* Exit early if we advanced recently. */
1388	if (jiffies == rdtp->last_advance_all)
1389		return false;
1390	rdtp->last_advance_all = jiffies;
1391
1392	for_each_rcu_flavor(rsp) {
1393		rdp = this_cpu_ptr(rsp->rda);
1394		rnp = rdp->mynode;
1395
1396		/*
1397		 * Don't bother checking unless a grace period has
1398		 * completed since we last checked and there are
1399		 * callbacks not yet ready to invoke.
1400		 */
1401		if ((rdp->completed != rnp->completed ||
1402		     unlikely(READ_ONCE(rdp->gpwrap))) &&
1403		    rcu_segcblist_pend_cbs(&rdp->cblist))
1404			note_gp_changes(rsp, rdp);
1405
1406		if (rcu_segcblist_ready_cbs(&rdp->cblist))
1407			cbs_ready = true;
1408	}
1409	return cbs_ready;
1410}
1411
1412/*
1413 * Allow the CPU to enter dyntick-idle mode unless it has callbacks ready
1414 * to invoke.  If the CPU has callbacks, try to advance them.  Tell the
1415 * caller to set the timeout based on whether or not there are non-lazy
1416 * callbacks.
1417 *
1418 * The caller must have disabled interrupts.
1419 */
1420int rcu_needs_cpu(u64 basemono, u64 *nextevt)
 
1421{
1422	struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
1423	unsigned long dj;
1424
1425	lockdep_assert_irqs_disabled();
1426
1427	/* Snapshot to detect later posting of non-lazy callback. */
1428	rdtp->nonlazy_posted_snap = rdtp->nonlazy_posted;
1429
1430	/* If no callbacks, RCU doesn't need the CPU. */
1431	if (!rcu_cpu_has_callbacks(&rdtp->all_lazy)) {
1432		*nextevt = KTIME_MAX;
1433		return 0;
1434	}
1435
1436	/* Attempt to advance callbacks. */
1437	if (rcu_try_advance_all_cbs()) {
1438		/* Some ready to invoke, so initiate later invocation. */
1439		invoke_rcu_core();
1440		return 1;
1441	}
1442	rdtp->last_accelerate = jiffies;
1443
1444	/* Request timer delay depending on laziness, and round. */
1445	if (!rdtp->all_lazy) {
1446		dj = round_up(rcu_idle_gp_delay + jiffies,
1447			       rcu_idle_gp_delay) - jiffies;
1448	} else {
1449		dj = round_jiffies(rcu_idle_lazy_gp_delay + jiffies) - jiffies;
1450	}
1451	*nextevt = basemono + dj * TICK_NSEC;
1452	return 0;
1453}
 
1454
1455/*
1456 * Prepare a CPU for idle from an RCU perspective.  The first major task
1457 * is to sense whether nohz mode has been enabled or disabled via sysfs.
1458 * The second major task is to check to see if a non-lazy callback has
1459 * arrived at a CPU that previously had only lazy callbacks.  The third
1460 * major task is to accelerate (that is, assign grace-period numbers to)
1461 * any recently arrived callbacks.
1462 *
1463 * The caller must have disabled interrupts.
1464 */
1465static void rcu_prepare_for_idle(void)
1466{
1467	bool needwake;
1468	struct rcu_data *rdp;
1469	struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
1470	struct rcu_node *rnp;
1471	struct rcu_state *rsp;
1472	int tne;
1473
1474	lockdep_assert_irqs_disabled();
1475	if (rcu_is_nocb_cpu(smp_processor_id()))
1476		return;
1477
1478	/* Handle nohz enablement switches conservatively. */
1479	tne = READ_ONCE(tick_nohz_active);
1480	if (tne != rdtp->tick_nohz_enabled_snap) {
1481		if (rcu_cpu_has_callbacks(NULL))
1482			invoke_rcu_core(); /* force nohz to see update. */
1483		rdtp->tick_nohz_enabled_snap = tne;
1484		return;
1485	}
1486	if (!tne)
1487		return;
1488
 
 
 
 
1489	/*
1490	 * If a non-lazy callback arrived at a CPU having only lazy
1491	 * callbacks, invoke RCU core for the side-effect of recalculating
1492	 * idle duration on re-entry to idle.
1493	 */
1494	if (rdtp->all_lazy &&
1495	    rdtp->nonlazy_posted != rdtp->nonlazy_posted_snap) {
1496		rdtp->all_lazy = false;
1497		rdtp->nonlazy_posted_snap = rdtp->nonlazy_posted;
1498		invoke_rcu_core();
1499		return;
1500	}
1501
1502	/*
1503	 * If we have not yet accelerated this jiffy, accelerate all
1504	 * callbacks on this CPU.
1505	 */
1506	if (rdtp->last_accelerate == jiffies)
1507		return;
1508	rdtp->last_accelerate = jiffies;
1509	for_each_rcu_flavor(rsp) {
1510		rdp = this_cpu_ptr(rsp->rda);
1511		if (!rcu_segcblist_pend_cbs(&rdp->cblist))
1512			continue;
1513		rnp = rdp->mynode;
1514		raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
1515		needwake = rcu_accelerate_cbs(rsp, rnp, rdp);
1516		raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
1517		if (needwake)
1518			rcu_gp_kthread_wake(rsp);
1519	}
 
1520}
1521
1522/*
1523 * Clean up for exit from idle.  Attempt to advance callbacks based on
1524 * any grace periods that elapsed while the CPU was idle, and if any
1525 * callbacks are now ready to invoke, initiate invocation.
1526 */
1527static void rcu_cleanup_after_idle(void)
1528{
1529	lockdep_assert_irqs_disabled();
1530	if (rcu_is_nocb_cpu(smp_processor_id()))
1531		return;
1532	if (rcu_try_advance_all_cbs())
1533		invoke_rcu_core();
 
1534}
1535
1536/*
1537 * Keep a running count of the number of non-lazy callbacks posted
1538 * on this CPU.  This running counter (which is never decremented) allows
1539 * rcu_prepare_for_idle() to detect when something out of the idle loop
1540 * posts a callback, even if an equal number of callbacks are invoked.
1541 * Of course, callbacks should only be posted from within a trace event
1542 * designed to be called from idle or from within RCU_NONIDLE().
1543 */
1544static void rcu_idle_count_callbacks_posted(void)
1545{
1546	__this_cpu_add(rcu_dynticks.nonlazy_posted, 1);
1547}
1548
1549/*
1550 * Data for flushing lazy RCU callbacks at OOM time.
1551 */
1552static atomic_t oom_callback_count;
1553static DECLARE_WAIT_QUEUE_HEAD(oom_callback_wq);
1554
1555/*
1556 * RCU OOM callback -- decrement the outstanding count and deliver the
1557 * wake-up if we are the last one.
1558 */
1559static void rcu_oom_callback(struct rcu_head *rhp)
1560{
1561	if (atomic_dec_and_test(&oom_callback_count))
1562		wake_up(&oom_callback_wq);
1563}
1564
1565/*
1566 * Post an rcu_oom_notify callback on the current CPU if it has at
1567 * least one lazy callback.  This will unnecessarily post callbacks
1568 * to CPUs that already have a non-lazy callback at the end of their
1569 * callback list, but this is an infrequent operation, so accept some
1570 * extra overhead to keep things simple.
1571 */
1572static void rcu_oom_notify_cpu(void *unused)
1573{
1574	struct rcu_state *rsp;
1575	struct rcu_data *rdp;
1576
1577	for_each_rcu_flavor(rsp) {
1578		rdp = raw_cpu_ptr(rsp->rda);
1579		if (rcu_segcblist_n_lazy_cbs(&rdp->cblist)) {
1580			atomic_inc(&oom_callback_count);
1581			rsp->call(&rdp->oom_head, rcu_oom_callback);
1582		}
1583	}
1584}
1585
1586/*
1587 * If low on memory, ensure that each CPU has a non-lazy callback.
1588 * This will wake up CPUs that have only lazy callbacks, in turn
1589 * ensuring that they free up the corresponding memory in a timely manner.
1590 * Because an uncertain amount of memory will be freed in some uncertain
1591 * timeframe, we do not claim to have freed anything.
1592 */
1593static int rcu_oom_notify(struct notifier_block *self,
1594			  unsigned long notused, void *nfreed)
1595{
1596	int cpu;
1597
1598	/* Wait for callbacks from earlier instance to complete. */
1599	wait_event(oom_callback_wq, atomic_read(&oom_callback_count) == 0);
1600	smp_mb(); /* Ensure callback reuse happens after callback invocation. */
1601
1602	/*
1603	 * Prevent premature wakeup: ensure that all increments happen
1604	 * before there is a chance of the counter reaching zero.
1605	 */
1606	atomic_set(&oom_callback_count, 1);
1607
 
1608	for_each_online_cpu(cpu) {
1609		smp_call_function_single(cpu, rcu_oom_notify_cpu, NULL, 1);
1610		cond_resched_rcu_qs();
1611	}
 
1612
1613	/* Unconditionally decrement: no need to wake ourselves up. */
1614	atomic_dec(&oom_callback_count);
1615
1616	return NOTIFY_OK;
1617}
1618
1619static struct notifier_block rcu_oom_nb = {
1620	.notifier_call = rcu_oom_notify
1621};
1622
1623static int __init rcu_register_oom_notifier(void)
1624{
1625	register_oom_notifier(&rcu_oom_nb);
1626	return 0;
1627}
1628early_initcall(rcu_register_oom_notifier);
1629
1630#endif /* #else #if !defined(CONFIG_RCU_FAST_NO_HZ) */
1631
 
 
1632#ifdef CONFIG_RCU_FAST_NO_HZ
1633
1634static void print_cpu_stall_fast_no_hz(char *cp, int cpu)
1635{
1636	struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
1637	unsigned long nlpd = rdtp->nonlazy_posted - rdtp->nonlazy_posted_snap;
1638
1639	sprintf(cp, "last_accelerate: %04lx/%04lx, nonlazy_posted: %ld, %c%c",
1640		rdtp->last_accelerate & 0xffff, jiffies & 0xffff,
1641		ulong2long(nlpd),
1642		rdtp->all_lazy ? 'L' : '.',
1643		rdtp->tick_nohz_enabled_snap ? '.' : 'D');
1644}
1645
1646#else /* #ifdef CONFIG_RCU_FAST_NO_HZ */
1647
1648static void print_cpu_stall_fast_no_hz(char *cp, int cpu)
1649{
1650	*cp = '\0';
1651}
1652
1653#endif /* #else #ifdef CONFIG_RCU_FAST_NO_HZ */
1654
1655/* Initiate the stall-info list. */
1656static void print_cpu_stall_info_begin(void)
1657{
1658	pr_cont("\n");
1659}
1660
1661/*
1662 * Print out diagnostic information for the specified stalled CPU.
1663 *
1664 * If the specified CPU is aware of the current RCU grace period
1665 * (flavor specified by rsp), then print the number of scheduling
1666 * clock interrupts the CPU has taken during the time that it has
1667 * been aware.  Otherwise, print the number of RCU grace periods
1668 * that this CPU is ignorant of, for example, "1" if the CPU was
1669 * aware of the previous grace period.
1670 *
1671 * Also print out idle and (if CONFIG_RCU_FAST_NO_HZ) idle-entry info.
1672 */
1673static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
1674{
1675	unsigned long delta;
1676	char fast_no_hz[72];
1677	struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
1678	struct rcu_dynticks *rdtp = rdp->dynticks;
1679	char *ticks_title;
1680	unsigned long ticks_value;
1681
1682	/*
1683	 * We could be printing a lot while holding a spinlock.  Avoid
1684	 * triggering hard lockup.
1685	 */
1686	touch_nmi_watchdog();
1687
1688	if (rsp->gpnum == rdp->gpnum) {
1689		ticks_title = "ticks this GP";
1690		ticks_value = rdp->ticks_this_gp;
1691	} else {
1692		ticks_title = "GPs behind";
1693		ticks_value = rsp->gpnum - rdp->gpnum;
1694	}
1695	print_cpu_stall_fast_no_hz(fast_no_hz, cpu);
1696	delta = rdp->mynode->gpnum - rdp->rcu_iw_gpnum;
1697	pr_err("\t%d-%c%c%c%c: (%lu %s) idle=%03x/%ld/%ld softirq=%u/%u fqs=%ld %s\n",
1698	       cpu,
1699	       "O."[!!cpu_online(cpu)],
1700	       "o."[!!(rdp->grpmask & rdp->mynode->qsmaskinit)],
1701	       "N."[!!(rdp->grpmask & rdp->mynode->qsmaskinitnext)],
1702	       !IS_ENABLED(CONFIG_IRQ_WORK) ? '?' :
1703			rdp->rcu_iw_pending ? (int)min(delta, 9UL) + '0' :
1704				"!."[!delta],
1705	       ticks_value, ticks_title,
1706	       rcu_dynticks_snap(rdtp) & 0xfff,
1707	       rdtp->dynticks_nesting, rdtp->dynticks_nmi_nesting,
1708	       rdp->softirq_snap, kstat_softirqs_cpu(RCU_SOFTIRQ, cpu),
1709	       READ_ONCE(rsp->n_force_qs) - rsp->n_force_qs_gpstart,
1710	       fast_no_hz);
1711}
1712
1713/* Terminate the stall-info list. */
1714static void print_cpu_stall_info_end(void)
1715{
1716	pr_err("\t");
1717}
1718
1719/* Zero ->ticks_this_gp for all flavors of RCU. */
1720static void zero_cpu_stall_ticks(struct rcu_data *rdp)
1721{
1722	rdp->ticks_this_gp = 0;
1723	rdp->softirq_snap = kstat_softirqs_cpu(RCU_SOFTIRQ, smp_processor_id());
1724}
1725
1726/* Increment ->ticks_this_gp for all flavors of RCU. */
1727static void increment_cpu_stall_ticks(void)
1728{
1729	struct rcu_state *rsp;
1730
1731	for_each_rcu_flavor(rsp)
1732		raw_cpu_inc(rsp->rda->ticks_this_gp);
1733}
1734
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1735#ifdef CONFIG_RCU_NOCB_CPU
1736
1737/*
1738 * Offload callback processing from the boot-time-specified set of CPUs
1739 * specified by rcu_nocb_mask.  For each CPU in the set, there is a
1740 * kthread created that pulls the callbacks from the corresponding CPU,
1741 * waits for a grace period to elapse, and invokes the callbacks.
1742 * The no-CBs CPUs do a wake_up() on their kthread when they insert
1743 * a callback into any empty list, unless the rcu_nocb_poll boot parameter
1744 * has been specified, in which case each kthread actively polls its
1745 * CPU.  (Which isn't so great for energy efficiency, but which does
1746 * reduce RCU's overhead on that CPU.)
1747 *
1748 * This is intended to be used in conjunction with Frederic Weisbecker's
1749 * adaptive-idle work, which would seriously reduce OS jitter on CPUs
1750 * running CPU-bound user-mode computations.
1751 *
1752 * Offloading of callback processing could also in theory be used as
1753 * an energy-efficiency measure because CPUs with no RCU callbacks
1754 * queued are more aggressive about entering dyntick-idle mode.
1755 */
1756
1757
1758/* Parse the boot-time rcu_nocb_mask CPU list from the kernel parameters. */
1759static int __init rcu_nocb_setup(char *str)
1760{
1761	alloc_bootmem_cpumask_var(&rcu_nocb_mask);
 
1762	cpulist_parse(str, rcu_nocb_mask);
1763	return 1;
1764}
1765__setup("rcu_nocbs=", rcu_nocb_setup);
1766
1767static int __init parse_rcu_nocb_poll(char *arg)
1768{
1769	rcu_nocb_poll = true;
1770	return 0;
1771}
1772early_param("rcu_nocb_poll", parse_rcu_nocb_poll);
1773
1774/*
 
 
 
 
 
 
 
 
 
 
 
 
 
1775 * Wake up any no-CBs CPUs' kthreads that were waiting on the just-ended
1776 * grace period.
1777 */
1778static void rcu_nocb_gp_cleanup(struct swait_queue_head *sq)
1779{
1780	swake_up_all(sq);
1781}
1782
1783/*
1784 * Set the root rcu_node structure's ->need_future_gp field
1785 * based on the sum of those of all rcu_node structures.  This does
1786 * double-count the root rcu_node structure's requests, but this
1787 * is necessary to handle the possibility of a rcu_nocb_kthread()
1788 * having awakened during the time that the rcu_node structures
1789 * were being updated for the end of the previous grace period.
1790 */
1791static void rcu_nocb_gp_set(struct rcu_node *rnp, int nrq)
1792{
1793	rnp->need_future_gp[(rnp->completed + 1) & 0x1] += nrq;
1794}
1795
1796static struct swait_queue_head *rcu_nocb_gp_get(struct rcu_node *rnp)
1797{
1798	return &rnp->nocb_gp_wq[rnp->completed & 0x1];
1799}
1800
1801static void rcu_init_one_nocb(struct rcu_node *rnp)
1802{
1803	init_swait_queue_head(&rnp->nocb_gp_wq[0]);
1804	init_swait_queue_head(&rnp->nocb_gp_wq[1]);
1805}
1806
1807/* Is the specified CPU a no-CBs CPU? */
 
1808bool rcu_is_nocb_cpu(int cpu)
1809{
1810	if (cpumask_available(rcu_nocb_mask))
1811		return cpumask_test_cpu(cpu, rcu_nocb_mask);
1812	return false;
1813}
1814
1815/*
1816 * Kick the leader kthread for this NOCB group.  Caller holds ->nocb_lock
1817 * and this function releases it.
1818 */
1819static void __wake_nocb_leader(struct rcu_data *rdp, bool force,
1820			       unsigned long flags)
1821	__releases(rdp->nocb_lock)
1822{
1823	struct rcu_data *rdp_leader = rdp->nocb_leader;
1824
1825	lockdep_assert_held(&rdp->nocb_lock);
1826	if (!READ_ONCE(rdp_leader->nocb_kthread)) {
1827		raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags);
1828		return;
1829	}
1830	if (rdp_leader->nocb_leader_sleep || force) {
1831		/* Prior smp_mb__after_atomic() orders against prior enqueue. */
1832		WRITE_ONCE(rdp_leader->nocb_leader_sleep, false);
1833		del_timer(&rdp->nocb_timer);
1834		raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags);
1835		smp_mb(); /* ->nocb_leader_sleep before swake_up(). */
1836		swake_up(&rdp_leader->nocb_wq);
1837	} else {
1838		raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags);
1839	}
1840}
1841
1842/*
1843 * Kick the leader kthread for this NOCB group, but caller has not
1844 * acquired locks.
1845 */
1846static void wake_nocb_leader(struct rcu_data *rdp, bool force)
1847{
1848	unsigned long flags;
1849
1850	raw_spin_lock_irqsave(&rdp->nocb_lock, flags);
1851	__wake_nocb_leader(rdp, force, flags);
1852}
1853
1854/*
1855 * Arrange to wake the leader kthread for this NOCB group at some
1856 * future time when it is safe to do so.
1857 */
1858static void wake_nocb_leader_defer(struct rcu_data *rdp, int waketype,
1859				   const char *reason)
1860{
1861	unsigned long flags;
1862
1863	raw_spin_lock_irqsave(&rdp->nocb_lock, flags);
1864	if (rdp->nocb_defer_wakeup == RCU_NOCB_WAKE_NOT)
1865		mod_timer(&rdp->nocb_timer, jiffies + 1);
1866	WRITE_ONCE(rdp->nocb_defer_wakeup, waketype);
1867	trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, reason);
1868	raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags);
1869}
1870
1871/*
1872 * Does the specified CPU need an RCU callback for the specified flavor
1873 * of rcu_barrier()?
1874 */
1875static bool rcu_nocb_cpu_needs_barrier(struct rcu_state *rsp, int cpu)
1876{
1877	struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
1878	unsigned long ret;
1879#ifdef CONFIG_PROVE_RCU
1880	struct rcu_head *rhp;
1881#endif /* #ifdef CONFIG_PROVE_RCU */
1882
1883	/*
1884	 * Check count of all no-CBs callbacks awaiting invocation.
1885	 * There needs to be a barrier before this function is called,
1886	 * but associated with a prior determination that no more
1887	 * callbacks would be posted.  In the worst case, the first
1888	 * barrier in _rcu_barrier() suffices (but the caller cannot
1889	 * necessarily rely on this, not a substitute for the caller
1890	 * getting the concurrency design right!).  There must also be
1891	 * a barrier between the following load an posting of a callback
1892	 * (if a callback is in fact needed).  This is associated with an
1893	 * atomic_inc() in the caller.
1894	 */
1895	ret = atomic_long_read(&rdp->nocb_q_count);
1896
1897#ifdef CONFIG_PROVE_RCU
1898	rhp = READ_ONCE(rdp->nocb_head);
1899	if (!rhp)
1900		rhp = READ_ONCE(rdp->nocb_gp_head);
1901	if (!rhp)
1902		rhp = READ_ONCE(rdp->nocb_follower_head);
1903
1904	/* Having no rcuo kthread but CBs after scheduler starts is bad! */
1905	if (!READ_ONCE(rdp->nocb_kthread) && rhp &&
1906	    rcu_scheduler_fully_active) {
1907		/* RCU callback enqueued before CPU first came online??? */
1908		pr_err("RCU: Never-onlined no-CBs CPU %d has CB %p\n",
1909		       cpu, rhp->func);
1910		WARN_ON_ONCE(1);
1911	}
1912#endif /* #ifdef CONFIG_PROVE_RCU */
1913
1914	return !!ret;
1915}
1916
1917/*
1918 * Enqueue the specified string of rcu_head structures onto the specified
1919 * CPU's no-CBs lists.  The CPU is specified by rdp, the head of the
1920 * string by rhp, and the tail of the string by rhtp.  The non-lazy/lazy
1921 * counts are supplied by rhcount and rhcount_lazy.
1922 *
1923 * If warranted, also wake up the kthread servicing this CPUs queues.
1924 */
1925static void __call_rcu_nocb_enqueue(struct rcu_data *rdp,
1926				    struct rcu_head *rhp,
1927				    struct rcu_head **rhtp,
1928				    int rhcount, int rhcount_lazy,
1929				    unsigned long flags)
1930{
1931	int len;
1932	struct rcu_head **old_rhpp;
1933	struct task_struct *t;
1934
1935	/* Enqueue the callback on the nocb list and update counts. */
 
 
1936	atomic_long_add(rhcount, &rdp->nocb_q_count);
1937	/* rcu_barrier() relies on ->nocb_q_count add before xchg. */
1938	old_rhpp = xchg(&rdp->nocb_tail, rhtp);
1939	WRITE_ONCE(*old_rhpp, rhp);
1940	atomic_long_add(rhcount_lazy, &rdp->nocb_q_count_lazy);
1941	smp_mb__after_atomic(); /* Store *old_rhpp before _wake test. */
1942
1943	/* If we are not being polled and there is a kthread, awaken it ... */
1944	t = READ_ONCE(rdp->nocb_kthread);
1945	if (rcu_nocb_poll || !t) {
1946		trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
1947				    TPS("WakeNotPoll"));
1948		return;
1949	}
1950	len = atomic_long_read(&rdp->nocb_q_count);
1951	if (old_rhpp == &rdp->nocb_head) {
1952		if (!irqs_disabled_flags(flags)) {
1953			/* ... if queue was empty ... */
1954			wake_nocb_leader(rdp, false);
1955			trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
1956					    TPS("WakeEmpty"));
1957		} else {
1958			wake_nocb_leader_defer(rdp, RCU_NOCB_WAKE,
1959					       TPS("WakeEmptyIsDeferred"));
 
1960		}
1961		rdp->qlen_last_fqs_check = 0;
1962	} else if (len > rdp->qlen_last_fqs_check + qhimark) {
1963		/* ... or if many callbacks queued. */
1964		if (!irqs_disabled_flags(flags)) {
1965			wake_nocb_leader(rdp, true);
1966			trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
1967					    TPS("WakeOvf"));
1968		} else {
1969			wake_nocb_leader_defer(rdp, RCU_NOCB_WAKE,
1970					       TPS("WakeOvfIsDeferred"));
1971		}
1972		rdp->qlen_last_fqs_check = LONG_MAX / 2;
 
1973	} else {
1974		trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("WakeNot"));
1975	}
1976	return;
1977}
1978
1979/*
1980 * This is a helper for __call_rcu(), which invokes this when the normal
1981 * callback queue is inoperable.  If this is not a no-CBs CPU, this
1982 * function returns failure back to __call_rcu(), which can complain
1983 * appropriately.
1984 *
1985 * Otherwise, this function queues the callback where the corresponding
1986 * "rcuo" kthread can find it.
1987 */
1988static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp,
1989			    bool lazy, unsigned long flags)
1990{
1991
1992	if (!rcu_is_nocb_cpu(rdp->cpu))
1993		return false;
1994	__call_rcu_nocb_enqueue(rdp, rhp, &rhp->next, 1, lazy, flags);
1995	if (__is_kfree_rcu_offset((unsigned long)rhp->func))
1996		trace_rcu_kfree_callback(rdp->rsp->name, rhp,
1997					 (unsigned long)rhp->func,
1998					 -atomic_long_read(&rdp->nocb_q_count_lazy),
1999					 -atomic_long_read(&rdp->nocb_q_count));
2000	else
2001		trace_rcu_callback(rdp->rsp->name, rhp,
2002				   -atomic_long_read(&rdp->nocb_q_count_lazy),
2003				   -atomic_long_read(&rdp->nocb_q_count));
2004
2005	/*
2006	 * If called from an extended quiescent state with interrupts
2007	 * disabled, invoke the RCU core in order to allow the idle-entry
2008	 * deferred-wakeup check to function.
2009	 */
2010	if (irqs_disabled_flags(flags) &&
2011	    !rcu_is_watching() &&
2012	    cpu_online(smp_processor_id()))
2013		invoke_rcu_core();
2014
2015	return true;
2016}
2017
2018/*
2019 * Adopt orphaned callbacks on a no-CBs CPU, or return 0 if this is
2020 * not a no-CBs CPU.
2021 */
2022static bool __maybe_unused rcu_nocb_adopt_orphan_cbs(struct rcu_data *my_rdp,
2023						     struct rcu_data *rdp,
2024						     unsigned long flags)
2025{
2026	lockdep_assert_irqs_disabled();
 
 
 
2027	if (!rcu_is_nocb_cpu(smp_processor_id()))
2028		return false; /* Not NOCBs CPU, caller must migrate CBs. */
2029	__call_rcu_nocb_enqueue(my_rdp, rcu_segcblist_head(&rdp->cblist),
2030				rcu_segcblist_tail(&rdp->cblist),
2031				rcu_segcblist_n_cbs(&rdp->cblist),
2032				rcu_segcblist_n_lazy_cbs(&rdp->cblist), flags);
2033	rcu_segcblist_init(&rdp->cblist);
2034	rcu_segcblist_disable(&rdp->cblist);
2035	return true;
 
 
 
 
 
 
 
 
 
 
 
 
2036}
2037
2038/*
2039 * If necessary, kick off a new grace period, and either way wait
2040 * for a subsequent grace period to complete.
2041 */
2042static void rcu_nocb_wait_gp(struct rcu_data *rdp)
2043{
2044	unsigned long c;
2045	bool d;
2046	unsigned long flags;
2047	bool needwake;
2048	struct rcu_node *rnp = rdp->mynode;
2049
2050	raw_spin_lock_irqsave_rcu_node(rnp, flags);
2051	needwake = rcu_start_future_gp(rnp, rdp, &c);
2052	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2053	if (needwake)
2054		rcu_gp_kthread_wake(rdp->rsp);
2055
2056	/*
2057	 * Wait for the grace period.  Do so interruptibly to avoid messing
2058	 * up the load average.
2059	 */
2060	trace_rcu_future_gp(rnp, rdp, c, TPS("StartWait"));
2061	for (;;) {
2062		swait_event_interruptible(
2063			rnp->nocb_gp_wq[c & 0x1],
2064			(d = ULONG_CMP_GE(READ_ONCE(rnp->completed), c)));
2065		if (likely(d))
2066			break;
2067		WARN_ON(signal_pending(current));
2068		trace_rcu_future_gp(rnp, rdp, c, TPS("ResumeWait"));
2069	}
2070	trace_rcu_future_gp(rnp, rdp, c, TPS("EndWait"));
2071	smp_mb(); /* Ensure that CB invocation happens after GP end. */
2072}
2073
2074/*
2075 * Leaders come here to wait for additional callbacks to show up.
2076 * This function does not return until callbacks appear.
2077 */
2078static void nocb_leader_wait(struct rcu_data *my_rdp)
2079{
2080	bool firsttime = true;
2081	unsigned long flags;
2082	bool gotcbs;
2083	struct rcu_data *rdp;
2084	struct rcu_head **tail;
2085
2086wait_again:
2087
2088	/* Wait for callbacks to appear. */
2089	if (!rcu_nocb_poll) {
2090		trace_rcu_nocb_wake(my_rdp->rsp->name, my_rdp->cpu, TPS("Sleep"));
2091		swait_event_interruptible(my_rdp->nocb_wq,
2092				!READ_ONCE(my_rdp->nocb_leader_sleep));
2093		raw_spin_lock_irqsave(&my_rdp->nocb_lock, flags);
2094		my_rdp->nocb_leader_sleep = true;
2095		WRITE_ONCE(my_rdp->nocb_defer_wakeup, RCU_NOCB_WAKE_NOT);
2096		del_timer(&my_rdp->nocb_timer);
2097		raw_spin_unlock_irqrestore(&my_rdp->nocb_lock, flags);
2098	} else if (firsttime) {
2099		firsttime = false; /* Don't drown trace log with "Poll"! */
2100		trace_rcu_nocb_wake(my_rdp->rsp->name, my_rdp->cpu, TPS("Poll"));
2101	}
2102
2103	/*
2104	 * Each pass through the following loop checks a follower for CBs.
2105	 * We are our own first follower.  Any CBs found are moved to
2106	 * nocb_gp_head, where they await a grace period.
2107	 */
2108	gotcbs = false;
2109	smp_mb(); /* wakeup and _sleep before ->nocb_head reads. */
2110	for (rdp = my_rdp; rdp; rdp = rdp->nocb_next_follower) {
2111		rdp->nocb_gp_head = READ_ONCE(rdp->nocb_head);
2112		if (!rdp->nocb_gp_head)
2113			continue;  /* No CBs here, try next follower. */
2114
2115		/* Move callbacks to wait-for-GP list, which is empty. */
2116		WRITE_ONCE(rdp->nocb_head, NULL);
2117		rdp->nocb_gp_tail = xchg(&rdp->nocb_tail, &rdp->nocb_head);
2118		gotcbs = true;
2119	}
2120
2121	/* No callbacks?  Sleep a bit if polling, and go retry.  */
2122	if (unlikely(!gotcbs)) {
2123		WARN_ON(signal_pending(current));
2124		if (rcu_nocb_poll) {
2125			schedule_timeout_interruptible(1);
2126		} else {
2127			trace_rcu_nocb_wake(my_rdp->rsp->name, my_rdp->cpu,
2128					    TPS("WokeEmpty"));
2129		}
2130		goto wait_again;
2131	}
2132
2133	/* Wait for one grace period. */
2134	rcu_nocb_wait_gp(my_rdp);
2135
2136	/* Each pass through the following loop wakes a follower, if needed. */
2137	for (rdp = my_rdp; rdp; rdp = rdp->nocb_next_follower) {
2138		if (!rcu_nocb_poll &&
2139		    READ_ONCE(rdp->nocb_head) &&
2140		    READ_ONCE(my_rdp->nocb_leader_sleep)) {
2141			raw_spin_lock_irqsave(&my_rdp->nocb_lock, flags);
2142			my_rdp->nocb_leader_sleep = false;/* No need to sleep.*/
2143			raw_spin_unlock_irqrestore(&my_rdp->nocb_lock, flags);
2144		}
2145		if (!rdp->nocb_gp_head)
2146			continue; /* No CBs, so no need to wake follower. */
2147
2148		/* Append callbacks to follower's "done" list. */
2149		raw_spin_lock_irqsave(&rdp->nocb_lock, flags);
2150		tail = rdp->nocb_follower_tail;
2151		rdp->nocb_follower_tail = rdp->nocb_gp_tail;
2152		*tail = rdp->nocb_gp_head;
2153		raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags);
2154		if (rdp != my_rdp && tail == &rdp->nocb_follower_head) {
2155			/* List was empty, so wake up the follower.  */
2156			swake_up(&rdp->nocb_wq);
2157		}
2158	}
2159
2160	/* If we (the leader) don't have CBs, go wait some more. */
2161	if (!my_rdp->nocb_follower_head)
2162		goto wait_again;
2163}
2164
2165/*
2166 * Followers come here to wait for additional callbacks to show up.
2167 * This function does not return until callbacks appear.
2168 */
2169static void nocb_follower_wait(struct rcu_data *rdp)
2170{
2171	for (;;) {
2172		trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("FollowerSleep"));
2173		swait_event_interruptible(rdp->nocb_wq,
2174					 READ_ONCE(rdp->nocb_follower_head));
2175		if (smp_load_acquire(&rdp->nocb_follower_head)) {
2176			/* ^^^ Ensure CB invocation follows _head test. */
2177			return;
2178		}
2179		WARN_ON(signal_pending(current));
2180		trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("WokeEmpty"));
2181	}
2182}
2183
2184/*
2185 * Per-rcu_data kthread, but only for no-CBs CPUs.  Each kthread invokes
2186 * callbacks queued by the corresponding no-CBs CPU, however, there is
2187 * an optional leader-follower relationship so that the grace-period
2188 * kthreads don't have to do quite so many wakeups.
2189 */
2190static int rcu_nocb_kthread(void *arg)
2191{
2192	int c, cl;
2193	unsigned long flags;
2194	struct rcu_head *list;
2195	struct rcu_head *next;
2196	struct rcu_head **tail;
2197	struct rcu_data *rdp = arg;
2198
2199	/* Each pass through this loop invokes one batch of callbacks */
2200	for (;;) {
2201		/* Wait for callbacks. */
2202		if (rdp->nocb_leader == rdp)
2203			nocb_leader_wait(rdp);
2204		else
2205			nocb_follower_wait(rdp);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2206
2207		/* Pull the ready-to-invoke callbacks onto local list. */
2208		raw_spin_lock_irqsave(&rdp->nocb_lock, flags);
2209		list = rdp->nocb_follower_head;
2210		rdp->nocb_follower_head = NULL;
2211		tail = rdp->nocb_follower_tail;
2212		rdp->nocb_follower_tail = &rdp->nocb_follower_head;
2213		raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags);
2214		BUG_ON(!list);
2215		trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("WokeNonEmpty"));
 
 
2216
2217		/* Each pass through the following loop invokes a callback. */
2218		trace_rcu_batch_start(rdp->rsp->name,
2219				      atomic_long_read(&rdp->nocb_q_count_lazy),
2220				      atomic_long_read(&rdp->nocb_q_count), -1);
2221		c = cl = 0;
2222		while (list) {
2223			next = list->next;
2224			/* Wait for enqueuing to complete, if needed. */
2225			while (next == NULL && &list->next != tail) {
2226				trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
2227						    TPS("WaitQueue"));
2228				schedule_timeout_interruptible(1);
2229				trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
2230						    TPS("WokeQueue"));
2231				next = list->next;
2232			}
2233			debug_rcu_head_unqueue(list);
2234			local_bh_disable();
2235			if (__rcu_reclaim(rdp->rsp->name, list))
2236				cl++;
2237			c++;
2238			local_bh_enable();
2239			cond_resched_rcu_qs();
2240			list = next;
2241		}
2242		trace_rcu_batch_end(rdp->rsp->name, c, !!list, 0, 0, 1);
2243		smp_mb__before_atomic();  /* _add after CB invocation. */
2244		atomic_long_add(-c, &rdp->nocb_q_count);
2245		atomic_long_add(-cl, &rdp->nocb_q_count_lazy);
2246	}
2247	return 0;
2248}
2249
2250/* Is a deferred wakeup of rcu_nocb_kthread() required? */
2251static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp)
2252{
2253	return READ_ONCE(rdp->nocb_defer_wakeup);
2254}
2255
2256/* Do a deferred wakeup of rcu_nocb_kthread(). */
2257static void do_nocb_deferred_wakeup_common(struct rcu_data *rdp)
2258{
2259	unsigned long flags;
2260	int ndw;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2261
2262	raw_spin_lock_irqsave(&rdp->nocb_lock, flags);
2263	if (!rcu_nocb_need_deferred_wakeup(rdp)) {
2264		raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags);
2265		return;
 
 
 
 
 
 
2266	}
2267	ndw = READ_ONCE(rdp->nocb_defer_wakeup);
2268	WRITE_ONCE(rdp->nocb_defer_wakeup, RCU_NOCB_WAKE_NOT);
2269	__wake_nocb_leader(rdp, ndw == RCU_NOCB_WAKE_FORCE, flags);
2270	trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("DeferredWake"));
2271}
2272
2273/* Do a deferred wakeup of rcu_nocb_kthread() from a timer handler. */
2274static void do_nocb_deferred_wakeup_timer(struct timer_list *t)
2275{
2276	struct rcu_data *rdp = from_timer(rdp, t, nocb_timer);
 
 
 
 
 
 
 
2277
2278	do_nocb_deferred_wakeup_common(rdp);
 
 
 
 
 
 
2279}
2280
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2281/*
2282 * Do a deferred wakeup of rcu_nocb_kthread() from fastpath.
2283 * This means we do an inexact common-case check.  Note that if
2284 * we miss, ->nocb_timer will eventually clean things up.
 
 
 
 
2285 */
2286static void do_nocb_deferred_wakeup(struct rcu_data *rdp)
2287{
2288	if (rcu_nocb_need_deferred_wakeup(rdp))
2289		do_nocb_deferred_wakeup_common(rdp);
 
 
2290}
2291
2292void __init rcu_init_nohz(void)
2293{
2294	int cpu;
2295	bool need_rcu_nocb_mask = true;
2296	struct rcu_state *rsp;
2297
2298#if defined(CONFIG_NO_HZ_FULL)
2299	if (tick_nohz_full_running && cpumask_weight(tick_nohz_full_mask))
2300		need_rcu_nocb_mask = true;
2301#endif /* #if defined(CONFIG_NO_HZ_FULL) */
2302
2303	if (!cpumask_available(rcu_nocb_mask) && need_rcu_nocb_mask) {
2304		if (!zalloc_cpumask_var(&rcu_nocb_mask, GFP_KERNEL)) {
2305			pr_info("rcu_nocb_mask allocation failed, callback offloading disabled.\n");
2306			return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2307		}
2308	}
2309	if (!cpumask_available(rcu_nocb_mask))
2310		return;
2311
2312#if defined(CONFIG_NO_HZ_FULL)
2313	if (tick_nohz_full_running)
2314		cpumask_or(rcu_nocb_mask, rcu_nocb_mask, tick_nohz_full_mask);
2315#endif /* #if defined(CONFIG_NO_HZ_FULL) */
 
 
 
 
2316
2317	if (!cpumask_subset(rcu_nocb_mask, cpu_possible_mask)) {
2318		pr_info("\tNote: kernel parameter 'rcu_nocbs=' contains nonexistent CPUs.\n");
2319		cpumask_and(rcu_nocb_mask, cpu_possible_mask,
2320			    rcu_nocb_mask);
2321	}
2322	if (cpumask_empty(rcu_nocb_mask))
2323		pr_info("\tOffload RCU callbacks from CPUs: (none).\n");
2324	else
2325		pr_info("\tOffload RCU callbacks from CPUs: %*pbl.\n",
2326			cpumask_pr_args(rcu_nocb_mask));
2327	if (rcu_nocb_poll)
2328		pr_info("\tPoll for callbacks from no-CBs CPUs.\n");
 
2329
2330	for_each_rcu_flavor(rsp) {
2331		for_each_cpu(cpu, rcu_nocb_mask)
2332			init_nocb_callback_list(per_cpu_ptr(rsp->rda, cpu));
2333		rcu_organize_nocb_kthreads(rsp);
 
 
 
 
 
 
 
 
 
 
2334	}
 
2335}
2336
2337/* Initialize per-rcu_data variables for no-CBs CPUs. */
2338static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp)
 
 
 
 
2339{
2340	rdp->nocb_tail = &rdp->nocb_head;
2341	init_swait_queue_head(&rdp->nocb_wq);
2342	rdp->nocb_follower_tail = &rdp->nocb_follower_head;
2343	raw_spin_lock_init(&rdp->nocb_lock);
2344	timer_setup(&rdp->nocb_timer, do_nocb_deferred_wakeup_timer, 0);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2345}
2346
2347/*
2348 * If the specified CPU is a no-CBs CPU that does not already have its
2349 * rcuo kthread for the specified RCU flavor, spawn it.  If the CPUs are
2350 * brought online out of order, this can require re-organizing the
2351 * leader-follower relationships.
2352 */
2353static void rcu_spawn_one_nocb_kthread(struct rcu_state *rsp, int cpu)
 
2354{
2355	struct rcu_data *rdp;
2356	struct rcu_data *rdp_last;
2357	struct rcu_data *rdp_old_leader;
2358	struct rcu_data *rdp_spawn = per_cpu_ptr(rsp->rda, cpu);
2359	struct task_struct *t;
2360
2361	/*
2362	 * If this isn't a no-CBs CPU or if it already has an rcuo kthread,
2363	 * then nothing to do.
 
2364	 */
2365	if (!rcu_is_nocb_cpu(cpu) || rdp_spawn->nocb_kthread)
 
2366		return;
 
 
2367
2368	/* If we didn't spawn the leader first, reorganize! */
2369	rdp_old_leader = rdp_spawn->nocb_leader;
2370	if (rdp_old_leader != rdp_spawn && !rdp_old_leader->nocb_kthread) {
2371		rdp_last = NULL;
2372		rdp = rdp_old_leader;
2373		do {
2374			rdp->nocb_leader = rdp_spawn;
2375			if (rdp_last && rdp != rdp_spawn)
2376				rdp_last->nocb_next_follower = rdp;
2377			if (rdp == rdp_spawn) {
2378				rdp = rdp->nocb_next_follower;
2379			} else {
2380				rdp_last = rdp;
2381				rdp = rdp->nocb_next_follower;
2382				rdp_last->nocb_next_follower = NULL;
2383			}
2384		} while (rdp);
2385		rdp_spawn->nocb_next_follower = rdp_old_leader;
2386	}
 
2387
2388	/* Spawn the kthread for this CPU and RCU flavor. */
2389	t = kthread_run(rcu_nocb_kthread, rdp_spawn,
2390			"rcuo%c/%d", rsp->abbr, cpu);
2391	BUG_ON(IS_ERR(t));
2392	WRITE_ONCE(rdp_spawn->nocb_kthread, t);
2393}
2394
2395/*
2396 * If the specified CPU is a no-CBs CPU that does not already have its
2397 * rcuo kthreads, spawn them.
2398 */
2399static void rcu_spawn_all_nocb_kthreads(int cpu)
2400{
2401	struct rcu_state *rsp;
2402
2403	if (rcu_scheduler_fully_active)
2404		for_each_rcu_flavor(rsp)
2405			rcu_spawn_one_nocb_kthread(rsp, cpu);
2406}
2407
2408/*
2409 * Once the scheduler is running, spawn rcuo kthreads for all online
2410 * no-CBs CPUs.  This assumes that the early_initcall()s happen before
2411 * non-boot CPUs come online -- if this changes, we will need to add
2412 * some mutual exclusion.
2413 */
2414static void __init rcu_spawn_nocb_kthreads(void)
2415{
2416	int cpu;
2417
2418	for_each_online_cpu(cpu)
2419		rcu_spawn_all_nocb_kthreads(cpu);
 
 
2420}
2421
2422/* How many follower CPU IDs per leader?  Default of -1 for sqrt(nr_cpu_ids). */
2423static int rcu_nocb_leader_stride = -1;
2424module_param(rcu_nocb_leader_stride, int, 0444);
 
 
 
 
 
 
 
 
 
 
 
 
2425
2426/*
2427 * Initialize leader-follower relationships for all no-CBs CPU.
 
2428 */
2429static void __init rcu_organize_nocb_kthreads(struct rcu_state *rsp)
2430{
2431	int cpu;
2432	int ls = rcu_nocb_leader_stride;
2433	int nl = 0;  /* Next leader. */
2434	struct rcu_data *rdp;
2435	struct rcu_data *rdp_leader = NULL;  /* Suppress misguided gcc warn. */
2436	struct rcu_data *rdp_prev = NULL;
 
 
 
2437
2438	if (!cpumask_available(rcu_nocb_mask))
2439		return;
2440	if (ls == -1) {
2441		ls = int_sqrt(nr_cpu_ids);
2442		rcu_nocb_leader_stride = ls;
2443	}
 
 
2444
2445	/*
2446	 * Each pass through this loop sets up one rcu_data structure.
2447	 * Should the corresponding CPU come online in the future, then
2448	 * we will spawn the needed set of rcu_nocb_kthread() kthreads.
2449	 */
2450	for_each_cpu(cpu, rcu_nocb_mask) {
2451		rdp = per_cpu_ptr(rsp->rda, cpu);
2452		if (rdp->cpu >= nl) {
2453			/* New leader, set up for followers & next leader. */
2454			nl = DIV_ROUND_UP(rdp->cpu + 1, ls) * ls;
2455			rdp->nocb_leader = rdp;
2456			rdp_leader = rdp;
2457		} else {
2458			/* Another follower, link to previous leader. */
2459			rdp->nocb_leader = rdp_leader;
2460			rdp_prev->nocb_next_follower = rdp;
2461		}
2462		rdp_prev = rdp;
2463	}
2464}
2465
2466/* Prevent __call_rcu() from enqueuing callbacks on no-CBs CPUs */
2467static bool init_nocb_callback_list(struct rcu_data *rdp)
2468{
2469	if (!rcu_is_nocb_cpu(rdp->cpu))
2470		return false;
 
 
 
2471
2472	/* If there are early-boot callbacks, move them to nocb lists. */
2473	if (!rcu_segcblist_empty(&rdp->cblist)) {
2474		rdp->nocb_head = rcu_segcblist_head(&rdp->cblist);
2475		rdp->nocb_tail = rcu_segcblist_tail(&rdp->cblist);
2476		atomic_long_set(&rdp->nocb_q_count,
2477				rcu_segcblist_n_cbs(&rdp->cblist));
2478		atomic_long_set(&rdp->nocb_q_count_lazy,
2479				rcu_segcblist_n_lazy_cbs(&rdp->cblist));
2480		rcu_segcblist_init(&rdp->cblist);
2481	}
2482	rcu_segcblist_disable(&rdp->cblist);
2483	return true;
2484}
2485
2486#else /* #ifdef CONFIG_RCU_NOCB_CPU */
2487
2488static bool rcu_nocb_cpu_needs_barrier(struct rcu_state *rsp, int cpu)
 
 
2489{
2490	WARN_ON_ONCE(1); /* Should be dead code. */
2491	return false;
2492}
2493
2494static void rcu_nocb_gp_cleanup(struct swait_queue_head *sq)
 
 
 
 
 
2495{
 
 
 
 
 
 
 
 
2496}
2497
2498static void rcu_nocb_gp_set(struct rcu_node *rnp, int nrq)
 
 
 
 
 
2499{
 
2500}
2501
2502static struct swait_queue_head *rcu_nocb_gp_get(struct rcu_node *rnp)
 
 
 
 
 
 
2503{
2504	return NULL;
 
 
 
 
 
 
 
 
 
 
2505}
2506
2507static void rcu_init_one_nocb(struct rcu_node *rnp)
 
 
 
 
2508{
2509}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2510
2511static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp,
2512			    bool lazy, unsigned long flags)
2513{
 
 
 
 
 
 
 
 
2514	return false;
2515}
2516
2517static bool __maybe_unused rcu_nocb_adopt_orphan_cbs(struct rcu_data *my_rdp,
2518						     struct rcu_data *rdp,
2519						     unsigned long flags)
 
2520{
2521	return false;
2522}
2523
2524static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp)
 
 
2525{
2526}
2527
2528static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp)
2529{
2530	return false;
2531}
2532
2533static void do_nocb_deferred_wakeup(struct rcu_data *rdp)
 
2534{
2535}
2536
2537static void rcu_spawn_all_nocb_kthreads(int cpu)
2538{
 
2539}
2540
2541static void __init rcu_spawn_nocb_kthreads(void)
2542{
2543}
2544
2545static bool init_nocb_callback_list(struct rcu_data *rdp)
 
2546{
2547	return false;
2548}
2549
2550#endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */
2551
2552/*
2553 * An adaptive-ticks CPU can potentially execute in kernel mode for an
2554 * arbitrarily long period of time with the scheduling-clock tick turned
2555 * off.  RCU will be paying attention to this CPU because it is in the
2556 * kernel, but the CPU cannot be guaranteed to be executing the RCU state
2557 * machine because the scheduling-clock tick has been disabled.  Therefore,
2558 * if an adaptive-ticks CPU is failing to respond to the current grace
2559 * period and has not be idle from an RCU perspective, kick it.
2560 */
2561static void __maybe_unused rcu_kick_nohz_cpu(int cpu)
2562{
2563#ifdef CONFIG_NO_HZ_FULL
2564	if (tick_nohz_full_cpu(cpu))
2565		smp_send_reschedule(cpu);
2566#endif /* #ifdef CONFIG_NO_HZ_FULL */
2567}
2568
 
 
2569/*
2570 * Is this CPU a NO_HZ_FULL CPU that should ignore RCU so that the
2571 * grace-period kthread will do force_quiescent_state() processing?
2572 * The idea is to avoid waking up RCU core processing on such a
2573 * CPU unless the grace period has extended for too long.
2574 *
2575 * This code relies on the fact that all NO_HZ_FULL CPUs are also
2576 * CONFIG_RCU_NOCB_CPU CPUs.
2577 */
2578static bool rcu_nohz_full_cpu(struct rcu_state *rsp)
2579{
2580#ifdef CONFIG_NO_HZ_FULL
2581	if (tick_nohz_full_cpu(smp_processor_id()) &&
2582	    (!rcu_gp_in_progress(rsp) ||
2583	     ULONG_CMP_LT(jiffies, READ_ONCE(rsp->gp_start) + HZ)))
2584		return true;
2585#endif /* #ifdef CONFIG_NO_HZ_FULL */
2586	return false;
2587}
2588
2589/*
2590 * Bind the grace-period kthread for the sysidle flavor of RCU to the
2591 * timekeeping CPU.
2592 */
2593static void rcu_bind_gp_kthread(void)
2594{
2595	int __maybe_unused cpu;
2596
2597	if (!tick_nohz_full_enabled())
2598		return;
2599	housekeeping_affine(current, HK_FLAG_RCU);
2600}
2601
2602/* Record the current task on dyntick-idle entry. */
2603static void rcu_dynticks_task_enter(void)
2604{
2605#if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL)
2606	WRITE_ONCE(current->rcu_tasks_idle_cpu, smp_processor_id());
2607#endif /* #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL) */
2608}
2609
2610/* Record no current task on dyntick-idle exit. */
2611static void rcu_dynticks_task_exit(void)
2612{
2613#if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL)
2614	WRITE_ONCE(current->rcu_tasks_idle_cpu, -1);
2615#endif /* #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL) */
2616}