Loading...
1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Read-Copy Update mechanism for mutual exclusion (tree-based version)
4 *
5 * Copyright IBM Corporation, 2008
6 *
7 * Authors: Dipankar Sarma <dipankar@in.ibm.com>
8 * Manfred Spraul <manfred@colorfullife.com>
9 * Paul E. McKenney <paulmck@linux.ibm.com>
10 *
11 * Based on the original work by Paul McKenney <paulmck@linux.ibm.com>
12 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
13 *
14 * For detailed explanation of Read-Copy Update mechanism see -
15 * Documentation/RCU
16 */
17
18#define pr_fmt(fmt) "rcu: " fmt
19
20#include <linux/types.h>
21#include <linux/kernel.h>
22#include <linux/init.h>
23#include <linux/spinlock.h>
24#include <linux/smp.h>
25#include <linux/rcupdate_wait.h>
26#include <linux/interrupt.h>
27#include <linux/sched.h>
28#include <linux/sched/debug.h>
29#include <linux/nmi.h>
30#include <linux/atomic.h>
31#include <linux/bitops.h>
32#include <linux/export.h>
33#include <linux/completion.h>
34#include <linux/moduleparam.h>
35#include <linux/panic.h>
36#include <linux/panic_notifier.h>
37#include <linux/percpu.h>
38#include <linux/notifier.h>
39#include <linux/cpu.h>
40#include <linux/mutex.h>
41#include <linux/time.h>
42#include <linux/kernel_stat.h>
43#include <linux/wait.h>
44#include <linux/kthread.h>
45#include <uapi/linux/sched/types.h>
46#include <linux/prefetch.h>
47#include <linux/delay.h>
48#include <linux/random.h>
49#include <linux/trace_events.h>
50#include <linux/suspend.h>
51#include <linux/ftrace.h>
52#include <linux/tick.h>
53#include <linux/sysrq.h>
54#include <linux/kprobes.h>
55#include <linux/gfp.h>
56#include <linux/oom.h>
57#include <linux/smpboot.h>
58#include <linux/jiffies.h>
59#include <linux/slab.h>
60#include <linux/sched/isolation.h>
61#include <linux/sched/clock.h>
62#include <linux/vmalloc.h>
63#include <linux/mm.h>
64#include <linux/kasan.h>
65#include <linux/context_tracking.h>
66#include "../time/tick-internal.h"
67
68#include "tree.h"
69#include "rcu.h"
70
71#ifdef MODULE_PARAM_PREFIX
72#undef MODULE_PARAM_PREFIX
73#endif
74#define MODULE_PARAM_PREFIX "rcutree."
75
76/* Data structures. */
77
78static DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_data, rcu_data) = {
79 .gpwrap = true,
80#ifdef CONFIG_RCU_NOCB_CPU
81 .cblist.flags = SEGCBLIST_RCU_CORE,
82#endif
83};
84static struct rcu_state rcu_state = {
85 .level = { &rcu_state.node[0] },
86 .gp_state = RCU_GP_IDLE,
87 .gp_seq = (0UL - 300UL) << RCU_SEQ_CTR_SHIFT,
88 .barrier_mutex = __MUTEX_INITIALIZER(rcu_state.barrier_mutex),
89 .barrier_lock = __RAW_SPIN_LOCK_UNLOCKED(rcu_state.barrier_lock),
90 .name = RCU_NAME,
91 .abbr = RCU_ABBR,
92 .exp_mutex = __MUTEX_INITIALIZER(rcu_state.exp_mutex),
93 .exp_wake_mutex = __MUTEX_INITIALIZER(rcu_state.exp_wake_mutex),
94 .ofl_lock = __ARCH_SPIN_LOCK_UNLOCKED,
95};
96
97/* Dump rcu_node combining tree at boot to verify correct setup. */
98static bool dump_tree;
99module_param(dump_tree, bool, 0444);
100/* By default, use RCU_SOFTIRQ instead of rcuc kthreads. */
101static bool use_softirq = !IS_ENABLED(CONFIG_PREEMPT_RT);
102#ifndef CONFIG_PREEMPT_RT
103module_param(use_softirq, bool, 0444);
104#endif
105/* Control rcu_node-tree auto-balancing at boot time. */
106static bool rcu_fanout_exact;
107module_param(rcu_fanout_exact, bool, 0444);
108/* Increase (but not decrease) the RCU_FANOUT_LEAF at boot time. */
109static int rcu_fanout_leaf = RCU_FANOUT_LEAF;
110module_param(rcu_fanout_leaf, int, 0444);
111int rcu_num_lvls __read_mostly = RCU_NUM_LVLS;
112/* Number of rcu_nodes at specified level. */
113int num_rcu_lvl[] = NUM_RCU_LVL_INIT;
114int rcu_num_nodes __read_mostly = NUM_RCU_NODES; /* Total # rcu_nodes in use. */
115
116/*
117 * The rcu_scheduler_active variable is initialized to the value
118 * RCU_SCHEDULER_INACTIVE and transitions RCU_SCHEDULER_INIT just before the
119 * first task is spawned. So when this variable is RCU_SCHEDULER_INACTIVE,
120 * RCU can assume that there is but one task, allowing RCU to (for example)
121 * optimize synchronize_rcu() to a simple barrier(). When this variable
122 * is RCU_SCHEDULER_INIT, RCU must actually do all the hard work required
123 * to detect real grace periods. This variable is also used to suppress
124 * boot-time false positives from lockdep-RCU error checking. Finally, it
125 * transitions from RCU_SCHEDULER_INIT to RCU_SCHEDULER_RUNNING after RCU
126 * is fully initialized, including all of its kthreads having been spawned.
127 */
128int rcu_scheduler_active __read_mostly;
129EXPORT_SYMBOL_GPL(rcu_scheduler_active);
130
131/*
132 * The rcu_scheduler_fully_active variable transitions from zero to one
133 * during the early_initcall() processing, which is after the scheduler
134 * is capable of creating new tasks. So RCU processing (for example,
135 * creating tasks for RCU priority boosting) must be delayed until after
136 * rcu_scheduler_fully_active transitions from zero to one. We also
137 * currently delay invocation of any RCU callbacks until after this point.
138 *
139 * It might later prove better for people registering RCU callbacks during
140 * early boot to take responsibility for these callbacks, but one step at
141 * a time.
142 */
143static int rcu_scheduler_fully_active __read_mostly;
144
145static void rcu_report_qs_rnp(unsigned long mask, struct rcu_node *rnp,
146 unsigned long gps, unsigned long flags);
147static void rcu_init_new_rnp(struct rcu_node *rnp_leaf);
148static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf);
149static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu);
150static void invoke_rcu_core(void);
151static void rcu_report_exp_rdp(struct rcu_data *rdp);
152static void sync_sched_exp_online_cleanup(int cpu);
153static void check_cb_ovld_locked(struct rcu_data *rdp, struct rcu_node *rnp);
154static bool rcu_rdp_is_offloaded(struct rcu_data *rdp);
155
156/*
157 * rcuc/rcub/rcuop kthread realtime priority. The "rcuop"
158 * real-time priority(enabling/disabling) is controlled by
159 * the extra CONFIG_RCU_NOCB_CPU_CB_BOOST configuration.
160 */
161static int kthread_prio = IS_ENABLED(CONFIG_RCU_BOOST) ? 1 : 0;
162module_param(kthread_prio, int, 0444);
163
164/* Delay in jiffies for grace-period initialization delays, debug only. */
165
166static int gp_preinit_delay;
167module_param(gp_preinit_delay, int, 0444);
168static int gp_init_delay;
169module_param(gp_init_delay, int, 0444);
170static int gp_cleanup_delay;
171module_param(gp_cleanup_delay, int, 0444);
172
173// Add delay to rcu_read_unlock() for strict grace periods.
174static int rcu_unlock_delay;
175#ifdef CONFIG_RCU_STRICT_GRACE_PERIOD
176module_param(rcu_unlock_delay, int, 0444);
177#endif
178
179/*
180 * This rcu parameter is runtime-read-only. It reflects
181 * a minimum allowed number of objects which can be cached
182 * per-CPU. Object size is equal to one page. This value
183 * can be changed at boot time.
184 */
185static int rcu_min_cached_objs = 5;
186module_param(rcu_min_cached_objs, int, 0444);
187
188// A page shrinker can ask for pages to be freed to make them
189// available for other parts of the system. This usually happens
190// under low memory conditions, and in that case we should also
191// defer page-cache filling for a short time period.
192//
193// The default value is 5 seconds, which is long enough to reduce
194// interference with the shrinker while it asks other systems to
195// drain their caches.
196static int rcu_delay_page_cache_fill_msec = 5000;
197module_param(rcu_delay_page_cache_fill_msec, int, 0444);
198
199/* Retrieve RCU kthreads priority for rcutorture */
200int rcu_get_gp_kthreads_prio(void)
201{
202 return kthread_prio;
203}
204EXPORT_SYMBOL_GPL(rcu_get_gp_kthreads_prio);
205
206/*
207 * Number of grace periods between delays, normalized by the duration of
208 * the delay. The longer the delay, the more the grace periods between
209 * each delay. The reason for this normalization is that it means that,
210 * for non-zero delays, the overall slowdown of grace periods is constant
211 * regardless of the duration of the delay. This arrangement balances
212 * the need for long delays to increase some race probabilities with the
213 * need for fast grace periods to increase other race probabilities.
214 */
215#define PER_RCU_NODE_PERIOD 3 /* Number of grace periods between delays for debugging. */
216
217/*
218 * Compute the mask of online CPUs for the specified rcu_node structure.
219 * This will not be stable unless the rcu_node structure's ->lock is
220 * held, but the bit corresponding to the current CPU will be stable
221 * in most contexts.
222 */
223static unsigned long rcu_rnp_online_cpus(struct rcu_node *rnp)
224{
225 return READ_ONCE(rnp->qsmaskinitnext);
226}
227
228/*
229 * Is the CPU corresponding to the specified rcu_data structure online
230 * from RCU's perspective? This perspective is given by that structure's
231 * ->qsmaskinitnext field rather than by the global cpu_online_mask.
232 */
233static bool rcu_rdp_cpu_online(struct rcu_data *rdp)
234{
235 return !!(rdp->grpmask & rcu_rnp_online_cpus(rdp->mynode));
236}
237
238/*
239 * Return true if an RCU grace period is in progress. The READ_ONCE()s
240 * permit this function to be invoked without holding the root rcu_node
241 * structure's ->lock, but of course results can be subject to change.
242 */
243static int rcu_gp_in_progress(void)
244{
245 return rcu_seq_state(rcu_seq_current(&rcu_state.gp_seq));
246}
247
248/*
249 * Return the number of callbacks queued on the specified CPU.
250 * Handles both the nocbs and normal cases.
251 */
252static long rcu_get_n_cbs_cpu(int cpu)
253{
254 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
255
256 if (rcu_segcblist_is_enabled(&rdp->cblist))
257 return rcu_segcblist_n_cbs(&rdp->cblist);
258 return 0;
259}
260
261void rcu_softirq_qs(void)
262{
263 rcu_qs();
264 rcu_preempt_deferred_qs(current);
265 rcu_tasks_qs(current, false);
266}
267
268/*
269 * Reset the current CPU's ->dynticks counter to indicate that the
270 * newly onlined CPU is no longer in an extended quiescent state.
271 * This will either leave the counter unchanged, or increment it
272 * to the next non-quiescent value.
273 *
274 * The non-atomic test/increment sequence works because the upper bits
275 * of the ->dynticks counter are manipulated only by the corresponding CPU,
276 * or when the corresponding CPU is offline.
277 */
278static void rcu_dynticks_eqs_online(void)
279{
280 if (ct_dynticks() & RCU_DYNTICKS_IDX)
281 return;
282 ct_state_inc(RCU_DYNTICKS_IDX);
283}
284
285/*
286 * Snapshot the ->dynticks counter with full ordering so as to allow
287 * stable comparison of this counter with past and future snapshots.
288 */
289static int rcu_dynticks_snap(int cpu)
290{
291 smp_mb(); // Fundamental RCU ordering guarantee.
292 return ct_dynticks_cpu_acquire(cpu);
293}
294
295/*
296 * Return true if the snapshot returned from rcu_dynticks_snap()
297 * indicates that RCU is in an extended quiescent state.
298 */
299static bool rcu_dynticks_in_eqs(int snap)
300{
301 return !(snap & RCU_DYNTICKS_IDX);
302}
303
304/*
305 * Return true if the CPU corresponding to the specified rcu_data
306 * structure has spent some time in an extended quiescent state since
307 * rcu_dynticks_snap() returned the specified snapshot.
308 */
309static bool rcu_dynticks_in_eqs_since(struct rcu_data *rdp, int snap)
310{
311 return snap != rcu_dynticks_snap(rdp->cpu);
312}
313
314/*
315 * Return true if the referenced integer is zero while the specified
316 * CPU remains within a single extended quiescent state.
317 */
318bool rcu_dynticks_zero_in_eqs(int cpu, int *vp)
319{
320 int snap;
321
322 // If not quiescent, force back to earlier extended quiescent state.
323 snap = ct_dynticks_cpu(cpu) & ~RCU_DYNTICKS_IDX;
324 smp_rmb(); // Order ->dynticks and *vp reads.
325 if (READ_ONCE(*vp))
326 return false; // Non-zero, so report failure;
327 smp_rmb(); // Order *vp read and ->dynticks re-read.
328
329 // If still in the same extended quiescent state, we are good!
330 return snap == ct_dynticks_cpu(cpu);
331}
332
333/*
334 * Let the RCU core know that this CPU has gone through the scheduler,
335 * which is a quiescent state. This is called when the need for a
336 * quiescent state is urgent, so we burn an atomic operation and full
337 * memory barriers to let the RCU core know about it, regardless of what
338 * this CPU might (or might not) do in the near future.
339 *
340 * We inform the RCU core by emulating a zero-duration dyntick-idle period.
341 *
342 * The caller must have disabled interrupts and must not be idle.
343 */
344notrace void rcu_momentary_dyntick_idle(void)
345{
346 int seq;
347
348 raw_cpu_write(rcu_data.rcu_need_heavy_qs, false);
349 seq = ct_state_inc(2 * RCU_DYNTICKS_IDX);
350 /* It is illegal to call this from idle state. */
351 WARN_ON_ONCE(!(seq & RCU_DYNTICKS_IDX));
352 rcu_preempt_deferred_qs(current);
353}
354EXPORT_SYMBOL_GPL(rcu_momentary_dyntick_idle);
355
356/**
357 * rcu_is_cpu_rrupt_from_idle - see if 'interrupted' from idle
358 *
359 * If the current CPU is idle and running at a first-level (not nested)
360 * interrupt, or directly, from idle, return true.
361 *
362 * The caller must have at least disabled IRQs.
363 */
364static int rcu_is_cpu_rrupt_from_idle(void)
365{
366 long nesting;
367
368 /*
369 * Usually called from the tick; but also used from smp_function_call()
370 * for expedited grace periods. This latter can result in running from
371 * the idle task, instead of an actual IPI.
372 */
373 lockdep_assert_irqs_disabled();
374
375 /* Check for counter underflows */
376 RCU_LOCKDEP_WARN(ct_dynticks_nesting() < 0,
377 "RCU dynticks_nesting counter underflow!");
378 RCU_LOCKDEP_WARN(ct_dynticks_nmi_nesting() <= 0,
379 "RCU dynticks_nmi_nesting counter underflow/zero!");
380
381 /* Are we at first interrupt nesting level? */
382 nesting = ct_dynticks_nmi_nesting();
383 if (nesting > 1)
384 return false;
385
386 /*
387 * If we're not in an interrupt, we must be in the idle task!
388 */
389 WARN_ON_ONCE(!nesting && !is_idle_task(current));
390
391 /* Does CPU appear to be idle from an RCU standpoint? */
392 return ct_dynticks_nesting() == 0;
393}
394
395#define DEFAULT_RCU_BLIMIT (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD) ? 1000 : 10)
396 // Maximum callbacks per rcu_do_batch ...
397#define DEFAULT_MAX_RCU_BLIMIT 10000 // ... even during callback flood.
398static long blimit = DEFAULT_RCU_BLIMIT;
399#define DEFAULT_RCU_QHIMARK 10000 // If this many pending, ignore blimit.
400static long qhimark = DEFAULT_RCU_QHIMARK;
401#define DEFAULT_RCU_QLOMARK 100 // Once only this many pending, use blimit.
402static long qlowmark = DEFAULT_RCU_QLOMARK;
403#define DEFAULT_RCU_QOVLD_MULT 2
404#define DEFAULT_RCU_QOVLD (DEFAULT_RCU_QOVLD_MULT * DEFAULT_RCU_QHIMARK)
405static long qovld = DEFAULT_RCU_QOVLD; // If this many pending, hammer QS.
406static long qovld_calc = -1; // No pre-initialization lock acquisitions!
407
408module_param(blimit, long, 0444);
409module_param(qhimark, long, 0444);
410module_param(qlowmark, long, 0444);
411module_param(qovld, long, 0444);
412
413static ulong jiffies_till_first_fqs = IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD) ? 0 : ULONG_MAX;
414static ulong jiffies_till_next_fqs = ULONG_MAX;
415static bool rcu_kick_kthreads;
416static int rcu_divisor = 7;
417module_param(rcu_divisor, int, 0644);
418
419/* Force an exit from rcu_do_batch() after 3 milliseconds. */
420static long rcu_resched_ns = 3 * NSEC_PER_MSEC;
421module_param(rcu_resched_ns, long, 0644);
422
423/*
424 * How long the grace period must be before we start recruiting
425 * quiescent-state help from rcu_note_context_switch().
426 */
427static ulong jiffies_till_sched_qs = ULONG_MAX;
428module_param(jiffies_till_sched_qs, ulong, 0444);
429static ulong jiffies_to_sched_qs; /* See adjust_jiffies_till_sched_qs(). */
430module_param(jiffies_to_sched_qs, ulong, 0444); /* Display only! */
431
432/*
433 * Make sure that we give the grace-period kthread time to detect any
434 * idle CPUs before taking active measures to force quiescent states.
435 * However, don't go below 100 milliseconds, adjusted upwards for really
436 * large systems.
437 */
438static void adjust_jiffies_till_sched_qs(void)
439{
440 unsigned long j;
441
442 /* If jiffies_till_sched_qs was specified, respect the request. */
443 if (jiffies_till_sched_qs != ULONG_MAX) {
444 WRITE_ONCE(jiffies_to_sched_qs, jiffies_till_sched_qs);
445 return;
446 }
447 /* Otherwise, set to third fqs scan, but bound below on large system. */
448 j = READ_ONCE(jiffies_till_first_fqs) +
449 2 * READ_ONCE(jiffies_till_next_fqs);
450 if (j < HZ / 10 + nr_cpu_ids / RCU_JIFFIES_FQS_DIV)
451 j = HZ / 10 + nr_cpu_ids / RCU_JIFFIES_FQS_DIV;
452 pr_info("RCU calculated value of scheduler-enlistment delay is %ld jiffies.\n", j);
453 WRITE_ONCE(jiffies_to_sched_qs, j);
454}
455
456static int param_set_first_fqs_jiffies(const char *val, const struct kernel_param *kp)
457{
458 ulong j;
459 int ret = kstrtoul(val, 0, &j);
460
461 if (!ret) {
462 WRITE_ONCE(*(ulong *)kp->arg, (j > HZ) ? HZ : j);
463 adjust_jiffies_till_sched_qs();
464 }
465 return ret;
466}
467
468static int param_set_next_fqs_jiffies(const char *val, const struct kernel_param *kp)
469{
470 ulong j;
471 int ret = kstrtoul(val, 0, &j);
472
473 if (!ret) {
474 WRITE_ONCE(*(ulong *)kp->arg, (j > HZ) ? HZ : (j ?: 1));
475 adjust_jiffies_till_sched_qs();
476 }
477 return ret;
478}
479
480static const struct kernel_param_ops first_fqs_jiffies_ops = {
481 .set = param_set_first_fqs_jiffies,
482 .get = param_get_ulong,
483};
484
485static const struct kernel_param_ops next_fqs_jiffies_ops = {
486 .set = param_set_next_fqs_jiffies,
487 .get = param_get_ulong,
488};
489
490module_param_cb(jiffies_till_first_fqs, &first_fqs_jiffies_ops, &jiffies_till_first_fqs, 0644);
491module_param_cb(jiffies_till_next_fqs, &next_fqs_jiffies_ops, &jiffies_till_next_fqs, 0644);
492module_param(rcu_kick_kthreads, bool, 0644);
493
494static void force_qs_rnp(int (*f)(struct rcu_data *rdp));
495static int rcu_pending(int user);
496
497/*
498 * Return the number of RCU GPs completed thus far for debug & stats.
499 */
500unsigned long rcu_get_gp_seq(void)
501{
502 return READ_ONCE(rcu_state.gp_seq);
503}
504EXPORT_SYMBOL_GPL(rcu_get_gp_seq);
505
506/*
507 * Return the number of RCU expedited batches completed thus far for
508 * debug & stats. Odd numbers mean that a batch is in progress, even
509 * numbers mean idle. The value returned will thus be roughly double
510 * the cumulative batches since boot.
511 */
512unsigned long rcu_exp_batches_completed(void)
513{
514 return rcu_state.expedited_sequence;
515}
516EXPORT_SYMBOL_GPL(rcu_exp_batches_completed);
517
518/*
519 * Return the root node of the rcu_state structure.
520 */
521static struct rcu_node *rcu_get_root(void)
522{
523 return &rcu_state.node[0];
524}
525
526/*
527 * Send along grace-period-related data for rcutorture diagnostics.
528 */
529void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags,
530 unsigned long *gp_seq)
531{
532 switch (test_type) {
533 case RCU_FLAVOR:
534 *flags = READ_ONCE(rcu_state.gp_flags);
535 *gp_seq = rcu_seq_current(&rcu_state.gp_seq);
536 break;
537 default:
538 break;
539 }
540}
541EXPORT_SYMBOL_GPL(rcutorture_get_gp_data);
542
543#if defined(CONFIG_NO_HZ_FULL) && (!defined(CONFIG_GENERIC_ENTRY) || !defined(CONFIG_KVM_XFER_TO_GUEST_WORK))
544/*
545 * An empty function that will trigger a reschedule on
546 * IRQ tail once IRQs get re-enabled on userspace/guest resume.
547 */
548static void late_wakeup_func(struct irq_work *work)
549{
550}
551
552static DEFINE_PER_CPU(struct irq_work, late_wakeup_work) =
553 IRQ_WORK_INIT(late_wakeup_func);
554
555/*
556 * If either:
557 *
558 * 1) the task is about to enter in guest mode and $ARCH doesn't support KVM generic work
559 * 2) the task is about to enter in user mode and $ARCH doesn't support generic entry.
560 *
561 * In these cases the late RCU wake ups aren't supported in the resched loops and our
562 * last resort is to fire a local irq_work that will trigger a reschedule once IRQs
563 * get re-enabled again.
564 */
565noinstr void rcu_irq_work_resched(void)
566{
567 struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
568
569 if (IS_ENABLED(CONFIG_GENERIC_ENTRY) && !(current->flags & PF_VCPU))
570 return;
571
572 if (IS_ENABLED(CONFIG_KVM_XFER_TO_GUEST_WORK) && (current->flags & PF_VCPU))
573 return;
574
575 instrumentation_begin();
576 if (do_nocb_deferred_wakeup(rdp) && need_resched()) {
577 irq_work_queue(this_cpu_ptr(&late_wakeup_work));
578 }
579 instrumentation_end();
580}
581#endif /* #if defined(CONFIG_NO_HZ_FULL) && (!defined(CONFIG_GENERIC_ENTRY) || !defined(CONFIG_KVM_XFER_TO_GUEST_WORK)) */
582
583#ifdef CONFIG_PROVE_RCU
584/**
585 * rcu_irq_exit_check_preempt - Validate that scheduling is possible
586 */
587void rcu_irq_exit_check_preempt(void)
588{
589 lockdep_assert_irqs_disabled();
590
591 RCU_LOCKDEP_WARN(ct_dynticks_nesting() <= 0,
592 "RCU dynticks_nesting counter underflow/zero!");
593 RCU_LOCKDEP_WARN(ct_dynticks_nmi_nesting() !=
594 DYNTICK_IRQ_NONIDLE,
595 "Bad RCU dynticks_nmi_nesting counter\n");
596 RCU_LOCKDEP_WARN(rcu_dynticks_curr_cpu_in_eqs(),
597 "RCU in extended quiescent state!");
598}
599#endif /* #ifdef CONFIG_PROVE_RCU */
600
601#ifdef CONFIG_NO_HZ_FULL
602/**
603 * __rcu_irq_enter_check_tick - Enable scheduler tick on CPU if RCU needs it.
604 *
605 * The scheduler tick is not normally enabled when CPUs enter the kernel
606 * from nohz_full userspace execution. After all, nohz_full userspace
607 * execution is an RCU quiescent state and the time executing in the kernel
608 * is quite short. Except of course when it isn't. And it is not hard to
609 * cause a large system to spend tens of seconds or even minutes looping
610 * in the kernel, which can cause a number of problems, include RCU CPU
611 * stall warnings.
612 *
613 * Therefore, if a nohz_full CPU fails to report a quiescent state
614 * in a timely manner, the RCU grace-period kthread sets that CPU's
615 * ->rcu_urgent_qs flag with the expectation that the next interrupt or
616 * exception will invoke this function, which will turn on the scheduler
617 * tick, which will enable RCU to detect that CPU's quiescent states,
618 * for example, due to cond_resched() calls in CONFIG_PREEMPT=n kernels.
619 * The tick will be disabled once a quiescent state is reported for
620 * this CPU.
621 *
622 * Of course, in carefully tuned systems, there might never be an
623 * interrupt or exception. In that case, the RCU grace-period kthread
624 * will eventually cause one to happen. However, in less carefully
625 * controlled environments, this function allows RCU to get what it
626 * needs without creating otherwise useless interruptions.
627 */
628void __rcu_irq_enter_check_tick(void)
629{
630 struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
631
632 // If we're here from NMI there's nothing to do.
633 if (in_nmi())
634 return;
635
636 RCU_LOCKDEP_WARN(rcu_dynticks_curr_cpu_in_eqs(),
637 "Illegal rcu_irq_enter_check_tick() from extended quiescent state");
638
639 if (!tick_nohz_full_cpu(rdp->cpu) ||
640 !READ_ONCE(rdp->rcu_urgent_qs) ||
641 READ_ONCE(rdp->rcu_forced_tick)) {
642 // RCU doesn't need nohz_full help from this CPU, or it is
643 // already getting that help.
644 return;
645 }
646
647 // We get here only when not in an extended quiescent state and
648 // from interrupts (as opposed to NMIs). Therefore, (1) RCU is
649 // already watching and (2) The fact that we are in an interrupt
650 // handler and that the rcu_node lock is an irq-disabled lock
651 // prevents self-deadlock. So we can safely recheck under the lock.
652 // Note that the nohz_full state currently cannot change.
653 raw_spin_lock_rcu_node(rdp->mynode);
654 if (rdp->rcu_urgent_qs && !rdp->rcu_forced_tick) {
655 // A nohz_full CPU is in the kernel and RCU needs a
656 // quiescent state. Turn on the tick!
657 WRITE_ONCE(rdp->rcu_forced_tick, true);
658 tick_dep_set_cpu(rdp->cpu, TICK_DEP_BIT_RCU);
659 }
660 raw_spin_unlock_rcu_node(rdp->mynode);
661}
662#endif /* CONFIG_NO_HZ_FULL */
663
664/*
665 * Check to see if any future non-offloaded RCU-related work will need
666 * to be done by the current CPU, even if none need be done immediately,
667 * returning 1 if so. This function is part of the RCU implementation;
668 * it is -not- an exported member of the RCU API. This is used by
669 * the idle-entry code to figure out whether it is safe to disable the
670 * scheduler-clock interrupt.
671 *
672 * Just check whether or not this CPU has non-offloaded RCU callbacks
673 * queued.
674 */
675int rcu_needs_cpu(void)
676{
677 return !rcu_segcblist_empty(&this_cpu_ptr(&rcu_data)->cblist) &&
678 !rcu_rdp_is_offloaded(this_cpu_ptr(&rcu_data));
679}
680
681/*
682 * If any sort of urgency was applied to the current CPU (for example,
683 * the scheduler-clock interrupt was enabled on a nohz_full CPU) in order
684 * to get to a quiescent state, disable it.
685 */
686static void rcu_disable_urgency_upon_qs(struct rcu_data *rdp)
687{
688 raw_lockdep_assert_held_rcu_node(rdp->mynode);
689 WRITE_ONCE(rdp->rcu_urgent_qs, false);
690 WRITE_ONCE(rdp->rcu_need_heavy_qs, false);
691 if (tick_nohz_full_cpu(rdp->cpu) && rdp->rcu_forced_tick) {
692 tick_dep_clear_cpu(rdp->cpu, TICK_DEP_BIT_RCU);
693 WRITE_ONCE(rdp->rcu_forced_tick, false);
694 }
695}
696
697/**
698 * rcu_is_watching - see if RCU thinks that the current CPU is not idle
699 *
700 * Return true if RCU is watching the running CPU, which means that this
701 * CPU can safely enter RCU read-side critical sections. In other words,
702 * if the current CPU is not in its idle loop or is in an interrupt or
703 * NMI handler, return true.
704 *
705 * Make notrace because it can be called by the internal functions of
706 * ftrace, and making this notrace removes unnecessary recursion calls.
707 */
708notrace bool rcu_is_watching(void)
709{
710 bool ret;
711
712 preempt_disable_notrace();
713 ret = !rcu_dynticks_curr_cpu_in_eqs();
714 preempt_enable_notrace();
715 return ret;
716}
717EXPORT_SYMBOL_GPL(rcu_is_watching);
718
719/*
720 * If a holdout task is actually running, request an urgent quiescent
721 * state from its CPU. This is unsynchronized, so migrations can cause
722 * the request to go to the wrong CPU. Which is OK, all that will happen
723 * is that the CPU's next context switch will be a bit slower and next
724 * time around this task will generate another request.
725 */
726void rcu_request_urgent_qs_task(struct task_struct *t)
727{
728 int cpu;
729
730 barrier();
731 cpu = task_cpu(t);
732 if (!task_curr(t))
733 return; /* This task is not running on that CPU. */
734 smp_store_release(per_cpu_ptr(&rcu_data.rcu_urgent_qs, cpu), true);
735}
736
737#if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU)
738
739/*
740 * Is the current CPU online as far as RCU is concerned?
741 *
742 * Disable preemption to avoid false positives that could otherwise
743 * happen due to the current CPU number being sampled, this task being
744 * preempted, its old CPU being taken offline, resuming on some other CPU,
745 * then determining that its old CPU is now offline.
746 *
747 * Disable checking if in an NMI handler because we cannot safely
748 * report errors from NMI handlers anyway. In addition, it is OK to use
749 * RCU on an offline processor during initial boot, hence the check for
750 * rcu_scheduler_fully_active.
751 */
752bool rcu_lockdep_current_cpu_online(void)
753{
754 struct rcu_data *rdp;
755 bool ret = false;
756
757 if (in_nmi() || !rcu_scheduler_fully_active)
758 return true;
759 preempt_disable_notrace();
760 rdp = this_cpu_ptr(&rcu_data);
761 /*
762 * Strictly, we care here about the case where the current CPU is
763 * in rcu_cpu_starting() and thus has an excuse for rdp->grpmask
764 * not being up to date. So arch_spin_is_locked() might have a
765 * false positive if it's held by some *other* CPU, but that's
766 * OK because that just means a false *negative* on the warning.
767 */
768 if (rcu_rdp_cpu_online(rdp) || arch_spin_is_locked(&rcu_state.ofl_lock))
769 ret = true;
770 preempt_enable_notrace();
771 return ret;
772}
773EXPORT_SYMBOL_GPL(rcu_lockdep_current_cpu_online);
774
775#endif /* #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU) */
776
777/*
778 * When trying to report a quiescent state on behalf of some other CPU,
779 * it is our responsibility to check for and handle potential overflow
780 * of the rcu_node ->gp_seq counter with respect to the rcu_data counters.
781 * After all, the CPU might be in deep idle state, and thus executing no
782 * code whatsoever.
783 */
784static void rcu_gpnum_ovf(struct rcu_node *rnp, struct rcu_data *rdp)
785{
786 raw_lockdep_assert_held_rcu_node(rnp);
787 if (ULONG_CMP_LT(rcu_seq_current(&rdp->gp_seq) + ULONG_MAX / 4,
788 rnp->gp_seq))
789 WRITE_ONCE(rdp->gpwrap, true);
790 if (ULONG_CMP_LT(rdp->rcu_iw_gp_seq + ULONG_MAX / 4, rnp->gp_seq))
791 rdp->rcu_iw_gp_seq = rnp->gp_seq + ULONG_MAX / 4;
792}
793
794/*
795 * Snapshot the specified CPU's dynticks counter so that we can later
796 * credit them with an implicit quiescent state. Return 1 if this CPU
797 * is in dynticks idle mode, which is an extended quiescent state.
798 */
799static int dyntick_save_progress_counter(struct rcu_data *rdp)
800{
801 rdp->dynticks_snap = rcu_dynticks_snap(rdp->cpu);
802 if (rcu_dynticks_in_eqs(rdp->dynticks_snap)) {
803 trace_rcu_fqs(rcu_state.name, rdp->gp_seq, rdp->cpu, TPS("dti"));
804 rcu_gpnum_ovf(rdp->mynode, rdp);
805 return 1;
806 }
807 return 0;
808}
809
810/*
811 * Return true if the specified CPU has passed through a quiescent
812 * state by virtue of being in or having passed through an dynticks
813 * idle state since the last call to dyntick_save_progress_counter()
814 * for this same CPU, or by virtue of having been offline.
815 */
816static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
817{
818 unsigned long jtsq;
819 struct rcu_node *rnp = rdp->mynode;
820
821 /*
822 * If the CPU passed through or entered a dynticks idle phase with
823 * no active irq/NMI handlers, then we can safely pretend that the CPU
824 * already acknowledged the request to pass through a quiescent
825 * state. Either way, that CPU cannot possibly be in an RCU
826 * read-side critical section that started before the beginning
827 * of the current RCU grace period.
828 */
829 if (rcu_dynticks_in_eqs_since(rdp, rdp->dynticks_snap)) {
830 trace_rcu_fqs(rcu_state.name, rdp->gp_seq, rdp->cpu, TPS("dti"));
831 rcu_gpnum_ovf(rnp, rdp);
832 return 1;
833 }
834
835 /*
836 * Complain if a CPU that is considered to be offline from RCU's
837 * perspective has not yet reported a quiescent state. After all,
838 * the offline CPU should have reported a quiescent state during
839 * the CPU-offline process, or, failing that, by rcu_gp_init()
840 * if it ran concurrently with either the CPU going offline or the
841 * last task on a leaf rcu_node structure exiting its RCU read-side
842 * critical section while all CPUs corresponding to that structure
843 * are offline. This added warning detects bugs in any of these
844 * code paths.
845 *
846 * The rcu_node structure's ->lock is held here, which excludes
847 * the relevant portions the CPU-hotplug code, the grace-period
848 * initialization code, and the rcu_read_unlock() code paths.
849 *
850 * For more detail, please refer to the "Hotplug CPU" section
851 * of RCU's Requirements documentation.
852 */
853 if (WARN_ON_ONCE(!rcu_rdp_cpu_online(rdp))) {
854 struct rcu_node *rnp1;
855
856 pr_info("%s: grp: %d-%d level: %d ->gp_seq %ld ->completedqs %ld\n",
857 __func__, rnp->grplo, rnp->grphi, rnp->level,
858 (long)rnp->gp_seq, (long)rnp->completedqs);
859 for (rnp1 = rnp; rnp1; rnp1 = rnp1->parent)
860 pr_info("%s: %d:%d ->qsmask %#lx ->qsmaskinit %#lx ->qsmaskinitnext %#lx ->rcu_gp_init_mask %#lx\n",
861 __func__, rnp1->grplo, rnp1->grphi, rnp1->qsmask, rnp1->qsmaskinit, rnp1->qsmaskinitnext, rnp1->rcu_gp_init_mask);
862 pr_info("%s %d: %c online: %ld(%d) offline: %ld(%d)\n",
863 __func__, rdp->cpu, ".o"[rcu_rdp_cpu_online(rdp)],
864 (long)rdp->rcu_onl_gp_seq, rdp->rcu_onl_gp_flags,
865 (long)rdp->rcu_ofl_gp_seq, rdp->rcu_ofl_gp_flags);
866 return 1; /* Break things loose after complaining. */
867 }
868
869 /*
870 * A CPU running for an extended time within the kernel can
871 * delay RCU grace periods: (1) At age jiffies_to_sched_qs,
872 * set .rcu_urgent_qs, (2) At age 2*jiffies_to_sched_qs, set
873 * both .rcu_need_heavy_qs and .rcu_urgent_qs. Note that the
874 * unsynchronized assignments to the per-CPU rcu_need_heavy_qs
875 * variable are safe because the assignments are repeated if this
876 * CPU failed to pass through a quiescent state. This code
877 * also checks .jiffies_resched in case jiffies_to_sched_qs
878 * is set way high.
879 */
880 jtsq = READ_ONCE(jiffies_to_sched_qs);
881 if (!READ_ONCE(rdp->rcu_need_heavy_qs) &&
882 (time_after(jiffies, rcu_state.gp_start + jtsq * 2) ||
883 time_after(jiffies, rcu_state.jiffies_resched) ||
884 rcu_state.cbovld)) {
885 WRITE_ONCE(rdp->rcu_need_heavy_qs, true);
886 /* Store rcu_need_heavy_qs before rcu_urgent_qs. */
887 smp_store_release(&rdp->rcu_urgent_qs, true);
888 } else if (time_after(jiffies, rcu_state.gp_start + jtsq)) {
889 WRITE_ONCE(rdp->rcu_urgent_qs, true);
890 }
891
892 /*
893 * NO_HZ_FULL CPUs can run in-kernel without rcu_sched_clock_irq!
894 * The above code handles this, but only for straight cond_resched().
895 * And some in-kernel loops check need_resched() before calling
896 * cond_resched(), which defeats the above code for CPUs that are
897 * running in-kernel with scheduling-clock interrupts disabled.
898 * So hit them over the head with the resched_cpu() hammer!
899 */
900 if (tick_nohz_full_cpu(rdp->cpu) &&
901 (time_after(jiffies, READ_ONCE(rdp->last_fqs_resched) + jtsq * 3) ||
902 rcu_state.cbovld)) {
903 WRITE_ONCE(rdp->rcu_urgent_qs, true);
904 resched_cpu(rdp->cpu);
905 WRITE_ONCE(rdp->last_fqs_resched, jiffies);
906 }
907
908 /*
909 * If more than halfway to RCU CPU stall-warning time, invoke
910 * resched_cpu() more frequently to try to loosen things up a bit.
911 * Also check to see if the CPU is getting hammered with interrupts,
912 * but only once per grace period, just to keep the IPIs down to
913 * a dull roar.
914 */
915 if (time_after(jiffies, rcu_state.jiffies_resched)) {
916 if (time_after(jiffies,
917 READ_ONCE(rdp->last_fqs_resched) + jtsq)) {
918 resched_cpu(rdp->cpu);
919 WRITE_ONCE(rdp->last_fqs_resched, jiffies);
920 }
921 if (IS_ENABLED(CONFIG_IRQ_WORK) &&
922 !rdp->rcu_iw_pending && rdp->rcu_iw_gp_seq != rnp->gp_seq &&
923 (rnp->ffmask & rdp->grpmask)) {
924 rdp->rcu_iw_pending = true;
925 rdp->rcu_iw_gp_seq = rnp->gp_seq;
926 irq_work_queue_on(&rdp->rcu_iw, rdp->cpu);
927 }
928 }
929
930 return 0;
931}
932
933/* Trace-event wrapper function for trace_rcu_future_grace_period. */
934static void trace_rcu_this_gp(struct rcu_node *rnp, struct rcu_data *rdp,
935 unsigned long gp_seq_req, const char *s)
936{
937 trace_rcu_future_grace_period(rcu_state.name, READ_ONCE(rnp->gp_seq),
938 gp_seq_req, rnp->level,
939 rnp->grplo, rnp->grphi, s);
940}
941
942/*
943 * rcu_start_this_gp - Request the start of a particular grace period
944 * @rnp_start: The leaf node of the CPU from which to start.
945 * @rdp: The rcu_data corresponding to the CPU from which to start.
946 * @gp_seq_req: The gp_seq of the grace period to start.
947 *
948 * Start the specified grace period, as needed to handle newly arrived
949 * callbacks. The required future grace periods are recorded in each
950 * rcu_node structure's ->gp_seq_needed field. Returns true if there
951 * is reason to awaken the grace-period kthread.
952 *
953 * The caller must hold the specified rcu_node structure's ->lock, which
954 * is why the caller is responsible for waking the grace-period kthread.
955 *
956 * Returns true if the GP thread needs to be awakened else false.
957 */
958static bool rcu_start_this_gp(struct rcu_node *rnp_start, struct rcu_data *rdp,
959 unsigned long gp_seq_req)
960{
961 bool ret = false;
962 struct rcu_node *rnp;
963
964 /*
965 * Use funnel locking to either acquire the root rcu_node
966 * structure's lock or bail out if the need for this grace period
967 * has already been recorded -- or if that grace period has in
968 * fact already started. If there is already a grace period in
969 * progress in a non-leaf node, no recording is needed because the
970 * end of the grace period will scan the leaf rcu_node structures.
971 * Note that rnp_start->lock must not be released.
972 */
973 raw_lockdep_assert_held_rcu_node(rnp_start);
974 trace_rcu_this_gp(rnp_start, rdp, gp_seq_req, TPS("Startleaf"));
975 for (rnp = rnp_start; 1; rnp = rnp->parent) {
976 if (rnp != rnp_start)
977 raw_spin_lock_rcu_node(rnp);
978 if (ULONG_CMP_GE(rnp->gp_seq_needed, gp_seq_req) ||
979 rcu_seq_started(&rnp->gp_seq, gp_seq_req) ||
980 (rnp != rnp_start &&
981 rcu_seq_state(rcu_seq_current(&rnp->gp_seq)))) {
982 trace_rcu_this_gp(rnp, rdp, gp_seq_req,
983 TPS("Prestarted"));
984 goto unlock_out;
985 }
986 WRITE_ONCE(rnp->gp_seq_needed, gp_seq_req);
987 if (rcu_seq_state(rcu_seq_current(&rnp->gp_seq))) {
988 /*
989 * We just marked the leaf or internal node, and a
990 * grace period is in progress, which means that
991 * rcu_gp_cleanup() will see the marking. Bail to
992 * reduce contention.
993 */
994 trace_rcu_this_gp(rnp_start, rdp, gp_seq_req,
995 TPS("Startedleaf"));
996 goto unlock_out;
997 }
998 if (rnp != rnp_start && rnp->parent != NULL)
999 raw_spin_unlock_rcu_node(rnp);
1000 if (!rnp->parent)
1001 break; /* At root, and perhaps also leaf. */
1002 }
1003
1004 /* If GP already in progress, just leave, otherwise start one. */
1005 if (rcu_gp_in_progress()) {
1006 trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("Startedleafroot"));
1007 goto unlock_out;
1008 }
1009 trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("Startedroot"));
1010 WRITE_ONCE(rcu_state.gp_flags, rcu_state.gp_flags | RCU_GP_FLAG_INIT);
1011 WRITE_ONCE(rcu_state.gp_req_activity, jiffies);
1012 if (!READ_ONCE(rcu_state.gp_kthread)) {
1013 trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("NoGPkthread"));
1014 goto unlock_out;
1015 }
1016 trace_rcu_grace_period(rcu_state.name, data_race(rcu_state.gp_seq), TPS("newreq"));
1017 ret = true; /* Caller must wake GP kthread. */
1018unlock_out:
1019 /* Push furthest requested GP to leaf node and rcu_data structure. */
1020 if (ULONG_CMP_LT(gp_seq_req, rnp->gp_seq_needed)) {
1021 WRITE_ONCE(rnp_start->gp_seq_needed, rnp->gp_seq_needed);
1022 WRITE_ONCE(rdp->gp_seq_needed, rnp->gp_seq_needed);
1023 }
1024 if (rnp != rnp_start)
1025 raw_spin_unlock_rcu_node(rnp);
1026 return ret;
1027}
1028
1029/*
1030 * Clean up any old requests for the just-ended grace period. Also return
1031 * whether any additional grace periods have been requested.
1032 */
1033static bool rcu_future_gp_cleanup(struct rcu_node *rnp)
1034{
1035 bool needmore;
1036 struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
1037
1038 needmore = ULONG_CMP_LT(rnp->gp_seq, rnp->gp_seq_needed);
1039 if (!needmore)
1040 rnp->gp_seq_needed = rnp->gp_seq; /* Avoid counter wrap. */
1041 trace_rcu_this_gp(rnp, rdp, rnp->gp_seq,
1042 needmore ? TPS("CleanupMore") : TPS("Cleanup"));
1043 return needmore;
1044}
1045
1046/*
1047 * Awaken the grace-period kthread. Don't do a self-awaken (unless in an
1048 * interrupt or softirq handler, in which case we just might immediately
1049 * sleep upon return, resulting in a grace-period hang), and don't bother
1050 * awakening when there is nothing for the grace-period kthread to do
1051 * (as in several CPUs raced to awaken, we lost), and finally don't try
1052 * to awaken a kthread that has not yet been created. If all those checks
1053 * are passed, track some debug information and awaken.
1054 *
1055 * So why do the self-wakeup when in an interrupt or softirq handler
1056 * in the grace-period kthread's context? Because the kthread might have
1057 * been interrupted just as it was going to sleep, and just after the final
1058 * pre-sleep check of the awaken condition. In this case, a wakeup really
1059 * is required, and is therefore supplied.
1060 */
1061static void rcu_gp_kthread_wake(void)
1062{
1063 struct task_struct *t = READ_ONCE(rcu_state.gp_kthread);
1064
1065 if ((current == t && !in_hardirq() && !in_serving_softirq()) ||
1066 !READ_ONCE(rcu_state.gp_flags) || !t)
1067 return;
1068 WRITE_ONCE(rcu_state.gp_wake_time, jiffies);
1069 WRITE_ONCE(rcu_state.gp_wake_seq, READ_ONCE(rcu_state.gp_seq));
1070 swake_up_one(&rcu_state.gp_wq);
1071}
1072
1073/*
1074 * If there is room, assign a ->gp_seq number to any callbacks on this
1075 * CPU that have not already been assigned. Also accelerate any callbacks
1076 * that were previously assigned a ->gp_seq number that has since proven
1077 * to be too conservative, which can happen if callbacks get assigned a
1078 * ->gp_seq number while RCU is idle, but with reference to a non-root
1079 * rcu_node structure. This function is idempotent, so it does not hurt
1080 * to call it repeatedly. Returns an flag saying that we should awaken
1081 * the RCU grace-period kthread.
1082 *
1083 * The caller must hold rnp->lock with interrupts disabled.
1084 */
1085static bool rcu_accelerate_cbs(struct rcu_node *rnp, struct rcu_data *rdp)
1086{
1087 unsigned long gp_seq_req;
1088 bool ret = false;
1089
1090 rcu_lockdep_assert_cblist_protected(rdp);
1091 raw_lockdep_assert_held_rcu_node(rnp);
1092
1093 /* If no pending (not yet ready to invoke) callbacks, nothing to do. */
1094 if (!rcu_segcblist_pend_cbs(&rdp->cblist))
1095 return false;
1096
1097 trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCbPreAcc"));
1098
1099 /*
1100 * Callbacks are often registered with incomplete grace-period
1101 * information. Something about the fact that getting exact
1102 * information requires acquiring a global lock... RCU therefore
1103 * makes a conservative estimate of the grace period number at which
1104 * a given callback will become ready to invoke. The following
1105 * code checks this estimate and improves it when possible, thus
1106 * accelerating callback invocation to an earlier grace-period
1107 * number.
1108 */
1109 gp_seq_req = rcu_seq_snap(&rcu_state.gp_seq);
1110 if (rcu_segcblist_accelerate(&rdp->cblist, gp_seq_req))
1111 ret = rcu_start_this_gp(rnp, rdp, gp_seq_req);
1112
1113 /* Trace depending on how much we were able to accelerate. */
1114 if (rcu_segcblist_restempty(&rdp->cblist, RCU_WAIT_TAIL))
1115 trace_rcu_grace_period(rcu_state.name, gp_seq_req, TPS("AccWaitCB"));
1116 else
1117 trace_rcu_grace_period(rcu_state.name, gp_seq_req, TPS("AccReadyCB"));
1118
1119 trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCbPostAcc"));
1120
1121 return ret;
1122}
1123
1124/*
1125 * Similar to rcu_accelerate_cbs(), but does not require that the leaf
1126 * rcu_node structure's ->lock be held. It consults the cached value
1127 * of ->gp_seq_needed in the rcu_data structure, and if that indicates
1128 * that a new grace-period request be made, invokes rcu_accelerate_cbs()
1129 * while holding the leaf rcu_node structure's ->lock.
1130 */
1131static void rcu_accelerate_cbs_unlocked(struct rcu_node *rnp,
1132 struct rcu_data *rdp)
1133{
1134 unsigned long c;
1135 bool needwake;
1136
1137 rcu_lockdep_assert_cblist_protected(rdp);
1138 c = rcu_seq_snap(&rcu_state.gp_seq);
1139 if (!READ_ONCE(rdp->gpwrap) && ULONG_CMP_GE(rdp->gp_seq_needed, c)) {
1140 /* Old request still live, so mark recent callbacks. */
1141 (void)rcu_segcblist_accelerate(&rdp->cblist, c);
1142 return;
1143 }
1144 raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
1145 needwake = rcu_accelerate_cbs(rnp, rdp);
1146 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
1147 if (needwake)
1148 rcu_gp_kthread_wake();
1149}
1150
1151/*
1152 * Move any callbacks whose grace period has completed to the
1153 * RCU_DONE_TAIL sublist, then compact the remaining sublists and
1154 * assign ->gp_seq numbers to any callbacks in the RCU_NEXT_TAIL
1155 * sublist. This function is idempotent, so it does not hurt to
1156 * invoke it repeatedly. As long as it is not invoked -too- often...
1157 * Returns true if the RCU grace-period kthread needs to be awakened.
1158 *
1159 * The caller must hold rnp->lock with interrupts disabled.
1160 */
1161static bool rcu_advance_cbs(struct rcu_node *rnp, struct rcu_data *rdp)
1162{
1163 rcu_lockdep_assert_cblist_protected(rdp);
1164 raw_lockdep_assert_held_rcu_node(rnp);
1165
1166 /* If no pending (not yet ready to invoke) callbacks, nothing to do. */
1167 if (!rcu_segcblist_pend_cbs(&rdp->cblist))
1168 return false;
1169
1170 /*
1171 * Find all callbacks whose ->gp_seq numbers indicate that they
1172 * are ready to invoke, and put them into the RCU_DONE_TAIL sublist.
1173 */
1174 rcu_segcblist_advance(&rdp->cblist, rnp->gp_seq);
1175
1176 /* Classify any remaining callbacks. */
1177 return rcu_accelerate_cbs(rnp, rdp);
1178}
1179
1180/*
1181 * Move and classify callbacks, but only if doing so won't require
1182 * that the RCU grace-period kthread be awakened.
1183 */
1184static void __maybe_unused rcu_advance_cbs_nowake(struct rcu_node *rnp,
1185 struct rcu_data *rdp)
1186{
1187 rcu_lockdep_assert_cblist_protected(rdp);
1188 if (!rcu_seq_state(rcu_seq_current(&rnp->gp_seq)) || !raw_spin_trylock_rcu_node(rnp))
1189 return;
1190 // The grace period cannot end while we hold the rcu_node lock.
1191 if (rcu_seq_state(rcu_seq_current(&rnp->gp_seq)))
1192 WARN_ON_ONCE(rcu_advance_cbs(rnp, rdp));
1193 raw_spin_unlock_rcu_node(rnp);
1194}
1195
1196/*
1197 * In CONFIG_RCU_STRICT_GRACE_PERIOD=y kernels, attempt to generate a
1198 * quiescent state. This is intended to be invoked when the CPU notices
1199 * a new grace period.
1200 */
1201static void rcu_strict_gp_check_qs(void)
1202{
1203 if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD)) {
1204 rcu_read_lock();
1205 rcu_read_unlock();
1206 }
1207}
1208
1209/*
1210 * Update CPU-local rcu_data state to record the beginnings and ends of
1211 * grace periods. The caller must hold the ->lock of the leaf rcu_node
1212 * structure corresponding to the current CPU, and must have irqs disabled.
1213 * Returns true if the grace-period kthread needs to be awakened.
1214 */
1215static bool __note_gp_changes(struct rcu_node *rnp, struct rcu_data *rdp)
1216{
1217 bool ret = false;
1218 bool need_qs;
1219 const bool offloaded = rcu_rdp_is_offloaded(rdp);
1220
1221 raw_lockdep_assert_held_rcu_node(rnp);
1222
1223 if (rdp->gp_seq == rnp->gp_seq)
1224 return false; /* Nothing to do. */
1225
1226 /* Handle the ends of any preceding grace periods first. */
1227 if (rcu_seq_completed_gp(rdp->gp_seq, rnp->gp_seq) ||
1228 unlikely(READ_ONCE(rdp->gpwrap))) {
1229 if (!offloaded)
1230 ret = rcu_advance_cbs(rnp, rdp); /* Advance CBs. */
1231 rdp->core_needs_qs = false;
1232 trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("cpuend"));
1233 } else {
1234 if (!offloaded)
1235 ret = rcu_accelerate_cbs(rnp, rdp); /* Recent CBs. */
1236 if (rdp->core_needs_qs)
1237 rdp->core_needs_qs = !!(rnp->qsmask & rdp->grpmask);
1238 }
1239
1240 /* Now handle the beginnings of any new-to-this-CPU grace periods. */
1241 if (rcu_seq_new_gp(rdp->gp_seq, rnp->gp_seq) ||
1242 unlikely(READ_ONCE(rdp->gpwrap))) {
1243 /*
1244 * If the current grace period is waiting for this CPU,
1245 * set up to detect a quiescent state, otherwise don't
1246 * go looking for one.
1247 */
1248 trace_rcu_grace_period(rcu_state.name, rnp->gp_seq, TPS("cpustart"));
1249 need_qs = !!(rnp->qsmask & rdp->grpmask);
1250 rdp->cpu_no_qs.b.norm = need_qs;
1251 rdp->core_needs_qs = need_qs;
1252 zero_cpu_stall_ticks(rdp);
1253 }
1254 rdp->gp_seq = rnp->gp_seq; /* Remember new grace-period state. */
1255 if (ULONG_CMP_LT(rdp->gp_seq_needed, rnp->gp_seq_needed) || rdp->gpwrap)
1256 WRITE_ONCE(rdp->gp_seq_needed, rnp->gp_seq_needed);
1257 if (IS_ENABLED(CONFIG_PROVE_RCU) && READ_ONCE(rdp->gpwrap))
1258 WRITE_ONCE(rdp->last_sched_clock, jiffies);
1259 WRITE_ONCE(rdp->gpwrap, false);
1260 rcu_gpnum_ovf(rnp, rdp);
1261 return ret;
1262}
1263
1264static void note_gp_changes(struct rcu_data *rdp)
1265{
1266 unsigned long flags;
1267 bool needwake;
1268 struct rcu_node *rnp;
1269
1270 local_irq_save(flags);
1271 rnp = rdp->mynode;
1272 if ((rdp->gp_seq == rcu_seq_current(&rnp->gp_seq) &&
1273 !unlikely(READ_ONCE(rdp->gpwrap))) || /* w/out lock. */
1274 !raw_spin_trylock_rcu_node(rnp)) { /* irqs already off, so later. */
1275 local_irq_restore(flags);
1276 return;
1277 }
1278 needwake = __note_gp_changes(rnp, rdp);
1279 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1280 rcu_strict_gp_check_qs();
1281 if (needwake)
1282 rcu_gp_kthread_wake();
1283}
1284
1285static atomic_t *rcu_gp_slow_suppress;
1286
1287/* Register a counter to suppress debugging grace-period delays. */
1288void rcu_gp_slow_register(atomic_t *rgssp)
1289{
1290 WARN_ON_ONCE(rcu_gp_slow_suppress);
1291
1292 WRITE_ONCE(rcu_gp_slow_suppress, rgssp);
1293}
1294EXPORT_SYMBOL_GPL(rcu_gp_slow_register);
1295
1296/* Unregister a counter, with NULL for not caring which. */
1297void rcu_gp_slow_unregister(atomic_t *rgssp)
1298{
1299 WARN_ON_ONCE(rgssp && rgssp != rcu_gp_slow_suppress);
1300
1301 WRITE_ONCE(rcu_gp_slow_suppress, NULL);
1302}
1303EXPORT_SYMBOL_GPL(rcu_gp_slow_unregister);
1304
1305static bool rcu_gp_slow_is_suppressed(void)
1306{
1307 atomic_t *rgssp = READ_ONCE(rcu_gp_slow_suppress);
1308
1309 return rgssp && atomic_read(rgssp);
1310}
1311
1312static void rcu_gp_slow(int delay)
1313{
1314 if (!rcu_gp_slow_is_suppressed() && delay > 0 &&
1315 !(rcu_seq_ctr(rcu_state.gp_seq) % (rcu_num_nodes * PER_RCU_NODE_PERIOD * delay)))
1316 schedule_timeout_idle(delay);
1317}
1318
1319static unsigned long sleep_duration;
1320
1321/* Allow rcutorture to stall the grace-period kthread. */
1322void rcu_gp_set_torture_wait(int duration)
1323{
1324 if (IS_ENABLED(CONFIG_RCU_TORTURE_TEST) && duration > 0)
1325 WRITE_ONCE(sleep_duration, duration);
1326}
1327EXPORT_SYMBOL_GPL(rcu_gp_set_torture_wait);
1328
1329/* Actually implement the aforementioned wait. */
1330static void rcu_gp_torture_wait(void)
1331{
1332 unsigned long duration;
1333
1334 if (!IS_ENABLED(CONFIG_RCU_TORTURE_TEST))
1335 return;
1336 duration = xchg(&sleep_duration, 0UL);
1337 if (duration > 0) {
1338 pr_alert("%s: Waiting %lu jiffies\n", __func__, duration);
1339 schedule_timeout_idle(duration);
1340 pr_alert("%s: Wait complete\n", __func__);
1341 }
1342}
1343
1344/*
1345 * Handler for on_each_cpu() to invoke the target CPU's RCU core
1346 * processing.
1347 */
1348static void rcu_strict_gp_boundary(void *unused)
1349{
1350 invoke_rcu_core();
1351}
1352
1353// Has rcu_init() been invoked? This is used (for example) to determine
1354// whether spinlocks may be acquired safely.
1355static bool rcu_init_invoked(void)
1356{
1357 return !!rcu_state.n_online_cpus;
1358}
1359
1360// Make the polled API aware of the beginning of a grace period.
1361static void rcu_poll_gp_seq_start(unsigned long *snap)
1362{
1363 struct rcu_node *rnp = rcu_get_root();
1364
1365 if (rcu_scheduler_active != RCU_SCHEDULER_INACTIVE)
1366 raw_lockdep_assert_held_rcu_node(rnp);
1367
1368 // If RCU was idle, note beginning of GP.
1369 if (!rcu_seq_state(rcu_state.gp_seq_polled))
1370 rcu_seq_start(&rcu_state.gp_seq_polled);
1371
1372 // Either way, record current state.
1373 *snap = rcu_state.gp_seq_polled;
1374}
1375
1376// Make the polled API aware of the end of a grace period.
1377static void rcu_poll_gp_seq_end(unsigned long *snap)
1378{
1379 struct rcu_node *rnp = rcu_get_root();
1380
1381 if (rcu_scheduler_active != RCU_SCHEDULER_INACTIVE)
1382 raw_lockdep_assert_held_rcu_node(rnp);
1383
1384 // If the previously noted GP is still in effect, record the
1385 // end of that GP. Either way, zero counter to avoid counter-wrap
1386 // problems.
1387 if (*snap && *snap == rcu_state.gp_seq_polled) {
1388 rcu_seq_end(&rcu_state.gp_seq_polled);
1389 rcu_state.gp_seq_polled_snap = 0;
1390 rcu_state.gp_seq_polled_exp_snap = 0;
1391 } else {
1392 *snap = 0;
1393 }
1394}
1395
1396// Make the polled API aware of the beginning of a grace period, but
1397// where caller does not hold the root rcu_node structure's lock.
1398static void rcu_poll_gp_seq_start_unlocked(unsigned long *snap)
1399{
1400 unsigned long flags;
1401 struct rcu_node *rnp = rcu_get_root();
1402
1403 if (rcu_init_invoked()) {
1404 if (rcu_scheduler_active != RCU_SCHEDULER_INACTIVE)
1405 lockdep_assert_irqs_enabled();
1406 raw_spin_lock_irqsave_rcu_node(rnp, flags);
1407 }
1408 rcu_poll_gp_seq_start(snap);
1409 if (rcu_init_invoked())
1410 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1411}
1412
1413// Make the polled API aware of the end of a grace period, but where
1414// caller does not hold the root rcu_node structure's lock.
1415static void rcu_poll_gp_seq_end_unlocked(unsigned long *snap)
1416{
1417 unsigned long flags;
1418 struct rcu_node *rnp = rcu_get_root();
1419
1420 if (rcu_init_invoked()) {
1421 if (rcu_scheduler_active != RCU_SCHEDULER_INACTIVE)
1422 lockdep_assert_irqs_enabled();
1423 raw_spin_lock_irqsave_rcu_node(rnp, flags);
1424 }
1425 rcu_poll_gp_seq_end(snap);
1426 if (rcu_init_invoked())
1427 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1428}
1429
1430/*
1431 * Initialize a new grace period. Return false if no grace period required.
1432 */
1433static noinline_for_stack bool rcu_gp_init(void)
1434{
1435 unsigned long flags;
1436 unsigned long oldmask;
1437 unsigned long mask;
1438 struct rcu_data *rdp;
1439 struct rcu_node *rnp = rcu_get_root();
1440
1441 WRITE_ONCE(rcu_state.gp_activity, jiffies);
1442 raw_spin_lock_irq_rcu_node(rnp);
1443 if (!READ_ONCE(rcu_state.gp_flags)) {
1444 /* Spurious wakeup, tell caller to go back to sleep. */
1445 raw_spin_unlock_irq_rcu_node(rnp);
1446 return false;
1447 }
1448 WRITE_ONCE(rcu_state.gp_flags, 0); /* Clear all flags: New GP. */
1449
1450 if (WARN_ON_ONCE(rcu_gp_in_progress())) {
1451 /*
1452 * Grace period already in progress, don't start another.
1453 * Not supposed to be able to happen.
1454 */
1455 raw_spin_unlock_irq_rcu_node(rnp);
1456 return false;
1457 }
1458
1459 /* Advance to a new grace period and initialize state. */
1460 record_gp_stall_check_time();
1461 /* Record GP times before starting GP, hence rcu_seq_start(). */
1462 rcu_seq_start(&rcu_state.gp_seq);
1463 ASSERT_EXCLUSIVE_WRITER(rcu_state.gp_seq);
1464 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, TPS("start"));
1465 rcu_poll_gp_seq_start(&rcu_state.gp_seq_polled_snap);
1466 raw_spin_unlock_irq_rcu_node(rnp);
1467
1468 /*
1469 * Apply per-leaf buffered online and offline operations to
1470 * the rcu_node tree. Note that this new grace period need not
1471 * wait for subsequent online CPUs, and that RCU hooks in the CPU
1472 * offlining path, when combined with checks in this function,
1473 * will handle CPUs that are currently going offline or that will
1474 * go offline later. Please also refer to "Hotplug CPU" section
1475 * of RCU's Requirements documentation.
1476 */
1477 WRITE_ONCE(rcu_state.gp_state, RCU_GP_ONOFF);
1478 /* Exclude CPU hotplug operations. */
1479 rcu_for_each_leaf_node(rnp) {
1480 local_irq_save(flags);
1481 arch_spin_lock(&rcu_state.ofl_lock);
1482 raw_spin_lock_rcu_node(rnp);
1483 if (rnp->qsmaskinit == rnp->qsmaskinitnext &&
1484 !rnp->wait_blkd_tasks) {
1485 /* Nothing to do on this leaf rcu_node structure. */
1486 raw_spin_unlock_rcu_node(rnp);
1487 arch_spin_unlock(&rcu_state.ofl_lock);
1488 local_irq_restore(flags);
1489 continue;
1490 }
1491
1492 /* Record old state, apply changes to ->qsmaskinit field. */
1493 oldmask = rnp->qsmaskinit;
1494 rnp->qsmaskinit = rnp->qsmaskinitnext;
1495
1496 /* If zero-ness of ->qsmaskinit changed, propagate up tree. */
1497 if (!oldmask != !rnp->qsmaskinit) {
1498 if (!oldmask) { /* First online CPU for rcu_node. */
1499 if (!rnp->wait_blkd_tasks) /* Ever offline? */
1500 rcu_init_new_rnp(rnp);
1501 } else if (rcu_preempt_has_tasks(rnp)) {
1502 rnp->wait_blkd_tasks = true; /* blocked tasks */
1503 } else { /* Last offline CPU and can propagate. */
1504 rcu_cleanup_dead_rnp(rnp);
1505 }
1506 }
1507
1508 /*
1509 * If all waited-on tasks from prior grace period are
1510 * done, and if all this rcu_node structure's CPUs are
1511 * still offline, propagate up the rcu_node tree and
1512 * clear ->wait_blkd_tasks. Otherwise, if one of this
1513 * rcu_node structure's CPUs has since come back online,
1514 * simply clear ->wait_blkd_tasks.
1515 */
1516 if (rnp->wait_blkd_tasks &&
1517 (!rcu_preempt_has_tasks(rnp) || rnp->qsmaskinit)) {
1518 rnp->wait_blkd_tasks = false;
1519 if (!rnp->qsmaskinit)
1520 rcu_cleanup_dead_rnp(rnp);
1521 }
1522
1523 raw_spin_unlock_rcu_node(rnp);
1524 arch_spin_unlock(&rcu_state.ofl_lock);
1525 local_irq_restore(flags);
1526 }
1527 rcu_gp_slow(gp_preinit_delay); /* Races with CPU hotplug. */
1528
1529 /*
1530 * Set the quiescent-state-needed bits in all the rcu_node
1531 * structures for all currently online CPUs in breadth-first
1532 * order, starting from the root rcu_node structure, relying on the
1533 * layout of the tree within the rcu_state.node[] array. Note that
1534 * other CPUs will access only the leaves of the hierarchy, thus
1535 * seeing that no grace period is in progress, at least until the
1536 * corresponding leaf node has been initialized.
1537 *
1538 * The grace period cannot complete until the initialization
1539 * process finishes, because this kthread handles both.
1540 */
1541 WRITE_ONCE(rcu_state.gp_state, RCU_GP_INIT);
1542 rcu_for_each_node_breadth_first(rnp) {
1543 rcu_gp_slow(gp_init_delay);
1544 raw_spin_lock_irqsave_rcu_node(rnp, flags);
1545 rdp = this_cpu_ptr(&rcu_data);
1546 rcu_preempt_check_blocked_tasks(rnp);
1547 rnp->qsmask = rnp->qsmaskinit;
1548 WRITE_ONCE(rnp->gp_seq, rcu_state.gp_seq);
1549 if (rnp == rdp->mynode)
1550 (void)__note_gp_changes(rnp, rdp);
1551 rcu_preempt_boost_start_gp(rnp);
1552 trace_rcu_grace_period_init(rcu_state.name, rnp->gp_seq,
1553 rnp->level, rnp->grplo,
1554 rnp->grphi, rnp->qsmask);
1555 /* Quiescent states for tasks on any now-offline CPUs. */
1556 mask = rnp->qsmask & ~rnp->qsmaskinitnext;
1557 rnp->rcu_gp_init_mask = mask;
1558 if ((mask || rnp->wait_blkd_tasks) && rcu_is_leaf_node(rnp))
1559 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
1560 else
1561 raw_spin_unlock_irq_rcu_node(rnp);
1562 cond_resched_tasks_rcu_qs();
1563 WRITE_ONCE(rcu_state.gp_activity, jiffies);
1564 }
1565
1566 // If strict, make all CPUs aware of new grace period.
1567 if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD))
1568 on_each_cpu(rcu_strict_gp_boundary, NULL, 0);
1569
1570 return true;
1571}
1572
1573/*
1574 * Helper function for swait_event_idle_exclusive() wakeup at force-quiescent-state
1575 * time.
1576 */
1577static bool rcu_gp_fqs_check_wake(int *gfp)
1578{
1579 struct rcu_node *rnp = rcu_get_root();
1580
1581 // If under overload conditions, force an immediate FQS scan.
1582 if (*gfp & RCU_GP_FLAG_OVLD)
1583 return true;
1584
1585 // Someone like call_rcu() requested a force-quiescent-state scan.
1586 *gfp = READ_ONCE(rcu_state.gp_flags);
1587 if (*gfp & RCU_GP_FLAG_FQS)
1588 return true;
1589
1590 // The current grace period has completed.
1591 if (!READ_ONCE(rnp->qsmask) && !rcu_preempt_blocked_readers_cgp(rnp))
1592 return true;
1593
1594 return false;
1595}
1596
1597/*
1598 * Do one round of quiescent-state forcing.
1599 */
1600static void rcu_gp_fqs(bool first_time)
1601{
1602 struct rcu_node *rnp = rcu_get_root();
1603
1604 WRITE_ONCE(rcu_state.gp_activity, jiffies);
1605 WRITE_ONCE(rcu_state.n_force_qs, rcu_state.n_force_qs + 1);
1606 if (first_time) {
1607 /* Collect dyntick-idle snapshots. */
1608 force_qs_rnp(dyntick_save_progress_counter);
1609 } else {
1610 /* Handle dyntick-idle and offline CPUs. */
1611 force_qs_rnp(rcu_implicit_dynticks_qs);
1612 }
1613 /* Clear flag to prevent immediate re-entry. */
1614 if (READ_ONCE(rcu_state.gp_flags) & RCU_GP_FLAG_FQS) {
1615 raw_spin_lock_irq_rcu_node(rnp);
1616 WRITE_ONCE(rcu_state.gp_flags,
1617 READ_ONCE(rcu_state.gp_flags) & ~RCU_GP_FLAG_FQS);
1618 raw_spin_unlock_irq_rcu_node(rnp);
1619 }
1620}
1621
1622/*
1623 * Loop doing repeated quiescent-state forcing until the grace period ends.
1624 */
1625static noinline_for_stack void rcu_gp_fqs_loop(void)
1626{
1627 bool first_gp_fqs = true;
1628 int gf = 0;
1629 unsigned long j;
1630 int ret;
1631 struct rcu_node *rnp = rcu_get_root();
1632
1633 j = READ_ONCE(jiffies_till_first_fqs);
1634 if (rcu_state.cbovld)
1635 gf = RCU_GP_FLAG_OVLD;
1636 ret = 0;
1637 for (;;) {
1638 if (rcu_state.cbovld) {
1639 j = (j + 2) / 3;
1640 if (j <= 0)
1641 j = 1;
1642 }
1643 if (!ret || time_before(jiffies + j, rcu_state.jiffies_force_qs)) {
1644 WRITE_ONCE(rcu_state.jiffies_force_qs, jiffies + j);
1645 /*
1646 * jiffies_force_qs before RCU_GP_WAIT_FQS state
1647 * update; required for stall checks.
1648 */
1649 smp_wmb();
1650 WRITE_ONCE(rcu_state.jiffies_kick_kthreads,
1651 jiffies + (j ? 3 * j : 2));
1652 }
1653 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
1654 TPS("fqswait"));
1655 WRITE_ONCE(rcu_state.gp_state, RCU_GP_WAIT_FQS);
1656 (void)swait_event_idle_timeout_exclusive(rcu_state.gp_wq,
1657 rcu_gp_fqs_check_wake(&gf), j);
1658 rcu_gp_torture_wait();
1659 WRITE_ONCE(rcu_state.gp_state, RCU_GP_DOING_FQS);
1660 /* Locking provides needed memory barriers. */
1661 /*
1662 * Exit the loop if the root rcu_node structure indicates that the grace period
1663 * has ended, leave the loop. The rcu_preempt_blocked_readers_cgp(rnp) check
1664 * is required only for single-node rcu_node trees because readers blocking
1665 * the current grace period are queued only on leaf rcu_node structures.
1666 * For multi-node trees, checking the root node's ->qsmask suffices, because a
1667 * given root node's ->qsmask bit is cleared only when all CPUs and tasks from
1668 * the corresponding leaf nodes have passed through their quiescent state.
1669 */
1670 if (!READ_ONCE(rnp->qsmask) &&
1671 !rcu_preempt_blocked_readers_cgp(rnp))
1672 break;
1673 /* If time for quiescent-state forcing, do it. */
1674 if (!time_after(rcu_state.jiffies_force_qs, jiffies) ||
1675 (gf & (RCU_GP_FLAG_FQS | RCU_GP_FLAG_OVLD))) {
1676 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
1677 TPS("fqsstart"));
1678 rcu_gp_fqs(first_gp_fqs);
1679 gf = 0;
1680 if (first_gp_fqs) {
1681 first_gp_fqs = false;
1682 gf = rcu_state.cbovld ? RCU_GP_FLAG_OVLD : 0;
1683 }
1684 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
1685 TPS("fqsend"));
1686 cond_resched_tasks_rcu_qs();
1687 WRITE_ONCE(rcu_state.gp_activity, jiffies);
1688 ret = 0; /* Force full wait till next FQS. */
1689 j = READ_ONCE(jiffies_till_next_fqs);
1690 } else {
1691 /* Deal with stray signal. */
1692 cond_resched_tasks_rcu_qs();
1693 WRITE_ONCE(rcu_state.gp_activity, jiffies);
1694 WARN_ON(signal_pending(current));
1695 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
1696 TPS("fqswaitsig"));
1697 ret = 1; /* Keep old FQS timing. */
1698 j = jiffies;
1699 if (time_after(jiffies, rcu_state.jiffies_force_qs))
1700 j = 1;
1701 else
1702 j = rcu_state.jiffies_force_qs - j;
1703 gf = 0;
1704 }
1705 }
1706}
1707
1708/*
1709 * Clean up after the old grace period.
1710 */
1711static noinline void rcu_gp_cleanup(void)
1712{
1713 int cpu;
1714 bool needgp = false;
1715 unsigned long gp_duration;
1716 unsigned long new_gp_seq;
1717 bool offloaded;
1718 struct rcu_data *rdp;
1719 struct rcu_node *rnp = rcu_get_root();
1720 struct swait_queue_head *sq;
1721
1722 WRITE_ONCE(rcu_state.gp_activity, jiffies);
1723 raw_spin_lock_irq_rcu_node(rnp);
1724 rcu_state.gp_end = jiffies;
1725 gp_duration = rcu_state.gp_end - rcu_state.gp_start;
1726 if (gp_duration > rcu_state.gp_max)
1727 rcu_state.gp_max = gp_duration;
1728
1729 /*
1730 * We know the grace period is complete, but to everyone else
1731 * it appears to still be ongoing. But it is also the case
1732 * that to everyone else it looks like there is nothing that
1733 * they can do to advance the grace period. It is therefore
1734 * safe for us to drop the lock in order to mark the grace
1735 * period as completed in all of the rcu_node structures.
1736 */
1737 rcu_poll_gp_seq_end(&rcu_state.gp_seq_polled_snap);
1738 raw_spin_unlock_irq_rcu_node(rnp);
1739
1740 /*
1741 * Propagate new ->gp_seq value to rcu_node structures so that
1742 * other CPUs don't have to wait until the start of the next grace
1743 * period to process their callbacks. This also avoids some nasty
1744 * RCU grace-period initialization races by forcing the end of
1745 * the current grace period to be completely recorded in all of
1746 * the rcu_node structures before the beginning of the next grace
1747 * period is recorded in any of the rcu_node structures.
1748 */
1749 new_gp_seq = rcu_state.gp_seq;
1750 rcu_seq_end(&new_gp_seq);
1751 rcu_for_each_node_breadth_first(rnp) {
1752 raw_spin_lock_irq_rcu_node(rnp);
1753 if (WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)))
1754 dump_blkd_tasks(rnp, 10);
1755 WARN_ON_ONCE(rnp->qsmask);
1756 WRITE_ONCE(rnp->gp_seq, new_gp_seq);
1757 if (!rnp->parent)
1758 smp_mb(); // Order against failing poll_state_synchronize_rcu_full().
1759 rdp = this_cpu_ptr(&rcu_data);
1760 if (rnp == rdp->mynode)
1761 needgp = __note_gp_changes(rnp, rdp) || needgp;
1762 /* smp_mb() provided by prior unlock-lock pair. */
1763 needgp = rcu_future_gp_cleanup(rnp) || needgp;
1764 // Reset overload indication for CPUs no longer overloaded
1765 if (rcu_is_leaf_node(rnp))
1766 for_each_leaf_node_cpu_mask(rnp, cpu, rnp->cbovldmask) {
1767 rdp = per_cpu_ptr(&rcu_data, cpu);
1768 check_cb_ovld_locked(rdp, rnp);
1769 }
1770 sq = rcu_nocb_gp_get(rnp);
1771 raw_spin_unlock_irq_rcu_node(rnp);
1772 rcu_nocb_gp_cleanup(sq);
1773 cond_resched_tasks_rcu_qs();
1774 WRITE_ONCE(rcu_state.gp_activity, jiffies);
1775 rcu_gp_slow(gp_cleanup_delay);
1776 }
1777 rnp = rcu_get_root();
1778 raw_spin_lock_irq_rcu_node(rnp); /* GP before ->gp_seq update. */
1779
1780 /* Declare grace period done, trace first to use old GP number. */
1781 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, TPS("end"));
1782 rcu_seq_end(&rcu_state.gp_seq);
1783 ASSERT_EXCLUSIVE_WRITER(rcu_state.gp_seq);
1784 WRITE_ONCE(rcu_state.gp_state, RCU_GP_IDLE);
1785 /* Check for GP requests since above loop. */
1786 rdp = this_cpu_ptr(&rcu_data);
1787 if (!needgp && ULONG_CMP_LT(rnp->gp_seq, rnp->gp_seq_needed)) {
1788 trace_rcu_this_gp(rnp, rdp, rnp->gp_seq_needed,
1789 TPS("CleanupMore"));
1790 needgp = true;
1791 }
1792 /* Advance CBs to reduce false positives below. */
1793 offloaded = rcu_rdp_is_offloaded(rdp);
1794 if ((offloaded || !rcu_accelerate_cbs(rnp, rdp)) && needgp) {
1795
1796 // We get here if a grace period was needed (“needgp”)
1797 // and the above call to rcu_accelerate_cbs() did not set
1798 // the RCU_GP_FLAG_INIT bit in ->gp_state (which records
1799 // the need for another grace period). The purpose
1800 // of the “offloaded” check is to avoid invoking
1801 // rcu_accelerate_cbs() on an offloaded CPU because we do not
1802 // hold the ->nocb_lock needed to safely access an offloaded
1803 // ->cblist. We do not want to acquire that lock because
1804 // it can be heavily contended during callback floods.
1805
1806 WRITE_ONCE(rcu_state.gp_flags, RCU_GP_FLAG_INIT);
1807 WRITE_ONCE(rcu_state.gp_req_activity, jiffies);
1808 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, TPS("newreq"));
1809 } else {
1810
1811 // We get here either if there is no need for an
1812 // additional grace period or if rcu_accelerate_cbs() has
1813 // already set the RCU_GP_FLAG_INIT bit in ->gp_flags.
1814 // So all we need to do is to clear all of the other
1815 // ->gp_flags bits.
1816
1817 WRITE_ONCE(rcu_state.gp_flags, rcu_state.gp_flags & RCU_GP_FLAG_INIT);
1818 }
1819 raw_spin_unlock_irq_rcu_node(rnp);
1820
1821 // If strict, make all CPUs aware of the end of the old grace period.
1822 if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD))
1823 on_each_cpu(rcu_strict_gp_boundary, NULL, 0);
1824}
1825
1826/*
1827 * Body of kthread that handles grace periods.
1828 */
1829static int __noreturn rcu_gp_kthread(void *unused)
1830{
1831 rcu_bind_gp_kthread();
1832 for (;;) {
1833
1834 /* Handle grace-period start. */
1835 for (;;) {
1836 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
1837 TPS("reqwait"));
1838 WRITE_ONCE(rcu_state.gp_state, RCU_GP_WAIT_GPS);
1839 swait_event_idle_exclusive(rcu_state.gp_wq,
1840 READ_ONCE(rcu_state.gp_flags) &
1841 RCU_GP_FLAG_INIT);
1842 rcu_gp_torture_wait();
1843 WRITE_ONCE(rcu_state.gp_state, RCU_GP_DONE_GPS);
1844 /* Locking provides needed memory barrier. */
1845 if (rcu_gp_init())
1846 break;
1847 cond_resched_tasks_rcu_qs();
1848 WRITE_ONCE(rcu_state.gp_activity, jiffies);
1849 WARN_ON(signal_pending(current));
1850 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
1851 TPS("reqwaitsig"));
1852 }
1853
1854 /* Handle quiescent-state forcing. */
1855 rcu_gp_fqs_loop();
1856
1857 /* Handle grace-period end. */
1858 WRITE_ONCE(rcu_state.gp_state, RCU_GP_CLEANUP);
1859 rcu_gp_cleanup();
1860 WRITE_ONCE(rcu_state.gp_state, RCU_GP_CLEANED);
1861 }
1862}
1863
1864/*
1865 * Report a full set of quiescent states to the rcu_state data structure.
1866 * Invoke rcu_gp_kthread_wake() to awaken the grace-period kthread if
1867 * another grace period is required. Whether we wake the grace-period
1868 * kthread or it awakens itself for the next round of quiescent-state
1869 * forcing, that kthread will clean up after the just-completed grace
1870 * period. Note that the caller must hold rnp->lock, which is released
1871 * before return.
1872 */
1873static void rcu_report_qs_rsp(unsigned long flags)
1874 __releases(rcu_get_root()->lock)
1875{
1876 raw_lockdep_assert_held_rcu_node(rcu_get_root());
1877 WARN_ON_ONCE(!rcu_gp_in_progress());
1878 WRITE_ONCE(rcu_state.gp_flags,
1879 READ_ONCE(rcu_state.gp_flags) | RCU_GP_FLAG_FQS);
1880 raw_spin_unlock_irqrestore_rcu_node(rcu_get_root(), flags);
1881 rcu_gp_kthread_wake();
1882}
1883
1884/*
1885 * Similar to rcu_report_qs_rdp(), for which it is a helper function.
1886 * Allows quiescent states for a group of CPUs to be reported at one go
1887 * to the specified rcu_node structure, though all the CPUs in the group
1888 * must be represented by the same rcu_node structure (which need not be a
1889 * leaf rcu_node structure, though it often will be). The gps parameter
1890 * is the grace-period snapshot, which means that the quiescent states
1891 * are valid only if rnp->gp_seq is equal to gps. That structure's lock
1892 * must be held upon entry, and it is released before return.
1893 *
1894 * As a special case, if mask is zero, the bit-already-cleared check is
1895 * disabled. This allows propagating quiescent state due to resumed tasks
1896 * during grace-period initialization.
1897 */
1898static void rcu_report_qs_rnp(unsigned long mask, struct rcu_node *rnp,
1899 unsigned long gps, unsigned long flags)
1900 __releases(rnp->lock)
1901{
1902 unsigned long oldmask = 0;
1903 struct rcu_node *rnp_c;
1904
1905 raw_lockdep_assert_held_rcu_node(rnp);
1906
1907 /* Walk up the rcu_node hierarchy. */
1908 for (;;) {
1909 if ((!(rnp->qsmask & mask) && mask) || rnp->gp_seq != gps) {
1910
1911 /*
1912 * Our bit has already been cleared, or the
1913 * relevant grace period is already over, so done.
1914 */
1915 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1916 return;
1917 }
1918 WARN_ON_ONCE(oldmask); /* Any child must be all zeroed! */
1919 WARN_ON_ONCE(!rcu_is_leaf_node(rnp) &&
1920 rcu_preempt_blocked_readers_cgp(rnp));
1921 WRITE_ONCE(rnp->qsmask, rnp->qsmask & ~mask);
1922 trace_rcu_quiescent_state_report(rcu_state.name, rnp->gp_seq,
1923 mask, rnp->qsmask, rnp->level,
1924 rnp->grplo, rnp->grphi,
1925 !!rnp->gp_tasks);
1926 if (rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) {
1927
1928 /* Other bits still set at this level, so done. */
1929 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1930 return;
1931 }
1932 rnp->completedqs = rnp->gp_seq;
1933 mask = rnp->grpmask;
1934 if (rnp->parent == NULL) {
1935
1936 /* No more levels. Exit loop holding root lock. */
1937
1938 break;
1939 }
1940 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1941 rnp_c = rnp;
1942 rnp = rnp->parent;
1943 raw_spin_lock_irqsave_rcu_node(rnp, flags);
1944 oldmask = READ_ONCE(rnp_c->qsmask);
1945 }
1946
1947 /*
1948 * Get here if we are the last CPU to pass through a quiescent
1949 * state for this grace period. Invoke rcu_report_qs_rsp()
1950 * to clean up and start the next grace period if one is needed.
1951 */
1952 rcu_report_qs_rsp(flags); /* releases rnp->lock. */
1953}
1954
1955/*
1956 * Record a quiescent state for all tasks that were previously queued
1957 * on the specified rcu_node structure and that were blocking the current
1958 * RCU grace period. The caller must hold the corresponding rnp->lock with
1959 * irqs disabled, and this lock is released upon return, but irqs remain
1960 * disabled.
1961 */
1962static void __maybe_unused
1963rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags)
1964 __releases(rnp->lock)
1965{
1966 unsigned long gps;
1967 unsigned long mask;
1968 struct rcu_node *rnp_p;
1969
1970 raw_lockdep_assert_held_rcu_node(rnp);
1971 if (WARN_ON_ONCE(!IS_ENABLED(CONFIG_PREEMPT_RCU)) ||
1972 WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)) ||
1973 rnp->qsmask != 0) {
1974 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1975 return; /* Still need more quiescent states! */
1976 }
1977
1978 rnp->completedqs = rnp->gp_seq;
1979 rnp_p = rnp->parent;
1980 if (rnp_p == NULL) {
1981 /*
1982 * Only one rcu_node structure in the tree, so don't
1983 * try to report up to its nonexistent parent!
1984 */
1985 rcu_report_qs_rsp(flags);
1986 return;
1987 }
1988
1989 /* Report up the rest of the hierarchy, tracking current ->gp_seq. */
1990 gps = rnp->gp_seq;
1991 mask = rnp->grpmask;
1992 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
1993 raw_spin_lock_rcu_node(rnp_p); /* irqs already disabled. */
1994 rcu_report_qs_rnp(mask, rnp_p, gps, flags);
1995}
1996
1997/*
1998 * Record a quiescent state for the specified CPU to that CPU's rcu_data
1999 * structure. This must be called from the specified CPU.
2000 */
2001static void
2002rcu_report_qs_rdp(struct rcu_data *rdp)
2003{
2004 unsigned long flags;
2005 unsigned long mask;
2006 bool needwake = false;
2007 bool needacc = false;
2008 struct rcu_node *rnp;
2009
2010 WARN_ON_ONCE(rdp->cpu != smp_processor_id());
2011 rnp = rdp->mynode;
2012 raw_spin_lock_irqsave_rcu_node(rnp, flags);
2013 if (rdp->cpu_no_qs.b.norm || rdp->gp_seq != rnp->gp_seq ||
2014 rdp->gpwrap) {
2015
2016 /*
2017 * The grace period in which this quiescent state was
2018 * recorded has ended, so don't report it upwards.
2019 * We will instead need a new quiescent state that lies
2020 * within the current grace period.
2021 */
2022 rdp->cpu_no_qs.b.norm = true; /* need qs for new gp. */
2023 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2024 return;
2025 }
2026 mask = rdp->grpmask;
2027 rdp->core_needs_qs = false;
2028 if ((rnp->qsmask & mask) == 0) {
2029 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2030 } else {
2031 /*
2032 * This GP can't end until cpu checks in, so all of our
2033 * callbacks can be processed during the next GP.
2034 *
2035 * NOCB kthreads have their own way to deal with that...
2036 */
2037 if (!rcu_rdp_is_offloaded(rdp)) {
2038 needwake = rcu_accelerate_cbs(rnp, rdp);
2039 } else if (!rcu_segcblist_completely_offloaded(&rdp->cblist)) {
2040 /*
2041 * ...but NOCB kthreads may miss or delay callbacks acceleration
2042 * if in the middle of a (de-)offloading process.
2043 */
2044 needacc = true;
2045 }
2046
2047 rcu_disable_urgency_upon_qs(rdp);
2048 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
2049 /* ^^^ Released rnp->lock */
2050 if (needwake)
2051 rcu_gp_kthread_wake();
2052
2053 if (needacc) {
2054 rcu_nocb_lock_irqsave(rdp, flags);
2055 rcu_accelerate_cbs_unlocked(rnp, rdp);
2056 rcu_nocb_unlock_irqrestore(rdp, flags);
2057 }
2058 }
2059}
2060
2061/*
2062 * Check to see if there is a new grace period of which this CPU
2063 * is not yet aware, and if so, set up local rcu_data state for it.
2064 * Otherwise, see if this CPU has just passed through its first
2065 * quiescent state for this grace period, and record that fact if so.
2066 */
2067static void
2068rcu_check_quiescent_state(struct rcu_data *rdp)
2069{
2070 /* Check for grace-period ends and beginnings. */
2071 note_gp_changes(rdp);
2072
2073 /*
2074 * Does this CPU still need to do its part for current grace period?
2075 * If no, return and let the other CPUs do their part as well.
2076 */
2077 if (!rdp->core_needs_qs)
2078 return;
2079
2080 /*
2081 * Was there a quiescent state since the beginning of the grace
2082 * period? If no, then exit and wait for the next call.
2083 */
2084 if (rdp->cpu_no_qs.b.norm)
2085 return;
2086
2087 /*
2088 * Tell RCU we are done (but rcu_report_qs_rdp() will be the
2089 * judge of that).
2090 */
2091 rcu_report_qs_rdp(rdp);
2092}
2093
2094/*
2095 * Near the end of the offline process. Trace the fact that this CPU
2096 * is going offline.
2097 */
2098int rcutree_dying_cpu(unsigned int cpu)
2099{
2100 bool blkd;
2101 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
2102 struct rcu_node *rnp = rdp->mynode;
2103
2104 if (!IS_ENABLED(CONFIG_HOTPLUG_CPU))
2105 return 0;
2106
2107 blkd = !!(READ_ONCE(rnp->qsmask) & rdp->grpmask);
2108 trace_rcu_grace_period(rcu_state.name, READ_ONCE(rnp->gp_seq),
2109 blkd ? TPS("cpuofl-bgp") : TPS("cpuofl"));
2110 return 0;
2111}
2112
2113/*
2114 * All CPUs for the specified rcu_node structure have gone offline,
2115 * and all tasks that were preempted within an RCU read-side critical
2116 * section while running on one of those CPUs have since exited their RCU
2117 * read-side critical section. Some other CPU is reporting this fact with
2118 * the specified rcu_node structure's ->lock held and interrupts disabled.
2119 * This function therefore goes up the tree of rcu_node structures,
2120 * clearing the corresponding bits in the ->qsmaskinit fields. Note that
2121 * the leaf rcu_node structure's ->qsmaskinit field has already been
2122 * updated.
2123 *
2124 * This function does check that the specified rcu_node structure has
2125 * all CPUs offline and no blocked tasks, so it is OK to invoke it
2126 * prematurely. That said, invoking it after the fact will cost you
2127 * a needless lock acquisition. So once it has done its work, don't
2128 * invoke it again.
2129 */
2130static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf)
2131{
2132 long mask;
2133 struct rcu_node *rnp = rnp_leaf;
2134
2135 raw_lockdep_assert_held_rcu_node(rnp_leaf);
2136 if (!IS_ENABLED(CONFIG_HOTPLUG_CPU) ||
2137 WARN_ON_ONCE(rnp_leaf->qsmaskinit) ||
2138 WARN_ON_ONCE(rcu_preempt_has_tasks(rnp_leaf)))
2139 return;
2140 for (;;) {
2141 mask = rnp->grpmask;
2142 rnp = rnp->parent;
2143 if (!rnp)
2144 break;
2145 raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
2146 rnp->qsmaskinit &= ~mask;
2147 /* Between grace periods, so better already be zero! */
2148 WARN_ON_ONCE(rnp->qsmask);
2149 if (rnp->qsmaskinit) {
2150 raw_spin_unlock_rcu_node(rnp);
2151 /* irqs remain disabled. */
2152 return;
2153 }
2154 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
2155 }
2156}
2157
2158/*
2159 * The CPU has been completely removed, and some other CPU is reporting
2160 * this fact from process context. Do the remainder of the cleanup.
2161 * There can only be one CPU hotplug operation at a time, so no need for
2162 * explicit locking.
2163 */
2164int rcutree_dead_cpu(unsigned int cpu)
2165{
2166 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
2167 struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */
2168
2169 if (!IS_ENABLED(CONFIG_HOTPLUG_CPU))
2170 return 0;
2171
2172 WRITE_ONCE(rcu_state.n_online_cpus, rcu_state.n_online_cpus - 1);
2173 /* Adjust any no-longer-needed kthreads. */
2174 rcu_boost_kthread_setaffinity(rnp, -1);
2175 // Stop-machine done, so allow nohz_full to disable tick.
2176 tick_dep_clear(TICK_DEP_BIT_RCU);
2177 return 0;
2178}
2179
2180/*
2181 * Invoke any RCU callbacks that have made it to the end of their grace
2182 * period. Throttle as specified by rdp->blimit.
2183 */
2184static void rcu_do_batch(struct rcu_data *rdp)
2185{
2186 int div;
2187 bool __maybe_unused empty;
2188 unsigned long flags;
2189 struct rcu_head *rhp;
2190 struct rcu_cblist rcl = RCU_CBLIST_INITIALIZER(rcl);
2191 long bl, count = 0;
2192 long pending, tlimit = 0;
2193
2194 /* If no callbacks are ready, just return. */
2195 if (!rcu_segcblist_ready_cbs(&rdp->cblist)) {
2196 trace_rcu_batch_start(rcu_state.name,
2197 rcu_segcblist_n_cbs(&rdp->cblist), 0);
2198 trace_rcu_batch_end(rcu_state.name, 0,
2199 !rcu_segcblist_empty(&rdp->cblist),
2200 need_resched(), is_idle_task(current),
2201 rcu_is_callbacks_kthread(rdp));
2202 return;
2203 }
2204
2205 /*
2206 * Extract the list of ready callbacks, disabling IRQs to prevent
2207 * races with call_rcu() from interrupt handlers. Leave the
2208 * callback counts, as rcu_barrier() needs to be conservative.
2209 */
2210 rcu_nocb_lock_irqsave(rdp, flags);
2211 WARN_ON_ONCE(cpu_is_offline(smp_processor_id()));
2212 pending = rcu_segcblist_n_cbs(&rdp->cblist);
2213 div = READ_ONCE(rcu_divisor);
2214 div = div < 0 ? 7 : div > sizeof(long) * 8 - 2 ? sizeof(long) * 8 - 2 : div;
2215 bl = max(rdp->blimit, pending >> div);
2216 if (in_serving_softirq() && unlikely(bl > 100)) {
2217 long rrn = READ_ONCE(rcu_resched_ns);
2218
2219 rrn = rrn < NSEC_PER_MSEC ? NSEC_PER_MSEC : rrn > NSEC_PER_SEC ? NSEC_PER_SEC : rrn;
2220 tlimit = local_clock() + rrn;
2221 }
2222 trace_rcu_batch_start(rcu_state.name,
2223 rcu_segcblist_n_cbs(&rdp->cblist), bl);
2224 rcu_segcblist_extract_done_cbs(&rdp->cblist, &rcl);
2225 if (rcu_rdp_is_offloaded(rdp))
2226 rdp->qlen_last_fqs_check = rcu_segcblist_n_cbs(&rdp->cblist);
2227
2228 trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCbDequeued"));
2229 rcu_nocb_unlock_irqrestore(rdp, flags);
2230
2231 /* Invoke callbacks. */
2232 tick_dep_set_task(current, TICK_DEP_BIT_RCU);
2233 rhp = rcu_cblist_dequeue(&rcl);
2234
2235 for (; rhp; rhp = rcu_cblist_dequeue(&rcl)) {
2236 rcu_callback_t f;
2237
2238 count++;
2239 debug_rcu_head_unqueue(rhp);
2240
2241 rcu_lock_acquire(&rcu_callback_map);
2242 trace_rcu_invoke_callback(rcu_state.name, rhp);
2243
2244 f = rhp->func;
2245 WRITE_ONCE(rhp->func, (rcu_callback_t)0L);
2246 f(rhp);
2247
2248 rcu_lock_release(&rcu_callback_map);
2249
2250 /*
2251 * Stop only if limit reached and CPU has something to do.
2252 */
2253 if (in_serving_softirq()) {
2254 if (count >= bl && (need_resched() || !is_idle_task(current)))
2255 break;
2256 /*
2257 * Make sure we don't spend too much time here and deprive other
2258 * softirq vectors of CPU cycles.
2259 */
2260 if (unlikely(tlimit)) {
2261 /* only call local_clock() every 32 callbacks */
2262 if (likely((count & 31) || local_clock() < tlimit))
2263 continue;
2264 /* Exceeded the time limit, so leave. */
2265 break;
2266 }
2267 } else {
2268 local_bh_enable();
2269 lockdep_assert_irqs_enabled();
2270 cond_resched_tasks_rcu_qs();
2271 lockdep_assert_irqs_enabled();
2272 local_bh_disable();
2273 }
2274 }
2275
2276 rcu_nocb_lock_irqsave(rdp, flags);
2277 rdp->n_cbs_invoked += count;
2278 trace_rcu_batch_end(rcu_state.name, count, !!rcl.head, need_resched(),
2279 is_idle_task(current), rcu_is_callbacks_kthread(rdp));
2280
2281 /* Update counts and requeue any remaining callbacks. */
2282 rcu_segcblist_insert_done_cbs(&rdp->cblist, &rcl);
2283 rcu_segcblist_add_len(&rdp->cblist, -count);
2284
2285 /* Reinstate batch limit if we have worked down the excess. */
2286 count = rcu_segcblist_n_cbs(&rdp->cblist);
2287 if (rdp->blimit >= DEFAULT_MAX_RCU_BLIMIT && count <= qlowmark)
2288 rdp->blimit = blimit;
2289
2290 /* Reset ->qlen_last_fqs_check trigger if enough CBs have drained. */
2291 if (count == 0 && rdp->qlen_last_fqs_check != 0) {
2292 rdp->qlen_last_fqs_check = 0;
2293 rdp->n_force_qs_snap = READ_ONCE(rcu_state.n_force_qs);
2294 } else if (count < rdp->qlen_last_fqs_check - qhimark)
2295 rdp->qlen_last_fqs_check = count;
2296
2297 /*
2298 * The following usually indicates a double call_rcu(). To track
2299 * this down, try building with CONFIG_DEBUG_OBJECTS_RCU_HEAD=y.
2300 */
2301 empty = rcu_segcblist_empty(&rdp->cblist);
2302 WARN_ON_ONCE(count == 0 && !empty);
2303 WARN_ON_ONCE(!IS_ENABLED(CONFIG_RCU_NOCB_CPU) &&
2304 count != 0 && empty);
2305 WARN_ON_ONCE(count == 0 && rcu_segcblist_n_segment_cbs(&rdp->cblist) != 0);
2306 WARN_ON_ONCE(!empty && rcu_segcblist_n_segment_cbs(&rdp->cblist) == 0);
2307
2308 rcu_nocb_unlock_irqrestore(rdp, flags);
2309
2310 tick_dep_clear_task(current, TICK_DEP_BIT_RCU);
2311}
2312
2313/*
2314 * This function is invoked from each scheduling-clock interrupt,
2315 * and checks to see if this CPU is in a non-context-switch quiescent
2316 * state, for example, user mode or idle loop. It also schedules RCU
2317 * core processing. If the current grace period has gone on too long,
2318 * it will ask the scheduler to manufacture a context switch for the sole
2319 * purpose of providing the needed quiescent state.
2320 */
2321void rcu_sched_clock_irq(int user)
2322{
2323 unsigned long j;
2324
2325 if (IS_ENABLED(CONFIG_PROVE_RCU)) {
2326 j = jiffies;
2327 WARN_ON_ONCE(time_before(j, __this_cpu_read(rcu_data.last_sched_clock)));
2328 __this_cpu_write(rcu_data.last_sched_clock, j);
2329 }
2330 trace_rcu_utilization(TPS("Start scheduler-tick"));
2331 lockdep_assert_irqs_disabled();
2332 raw_cpu_inc(rcu_data.ticks_this_gp);
2333 /* The load-acquire pairs with the store-release setting to true. */
2334 if (smp_load_acquire(this_cpu_ptr(&rcu_data.rcu_urgent_qs))) {
2335 /* Idle and userspace execution already are quiescent states. */
2336 if (!rcu_is_cpu_rrupt_from_idle() && !user) {
2337 set_tsk_need_resched(current);
2338 set_preempt_need_resched();
2339 }
2340 __this_cpu_write(rcu_data.rcu_urgent_qs, false);
2341 }
2342 rcu_flavor_sched_clock_irq(user);
2343 if (rcu_pending(user))
2344 invoke_rcu_core();
2345 if (user || rcu_is_cpu_rrupt_from_idle())
2346 rcu_note_voluntary_context_switch(current);
2347 lockdep_assert_irqs_disabled();
2348
2349 trace_rcu_utilization(TPS("End scheduler-tick"));
2350}
2351
2352/*
2353 * Scan the leaf rcu_node structures. For each structure on which all
2354 * CPUs have reported a quiescent state and on which there are tasks
2355 * blocking the current grace period, initiate RCU priority boosting.
2356 * Otherwise, invoke the specified function to check dyntick state for
2357 * each CPU that has not yet reported a quiescent state.
2358 */
2359static void force_qs_rnp(int (*f)(struct rcu_data *rdp))
2360{
2361 int cpu;
2362 unsigned long flags;
2363 unsigned long mask;
2364 struct rcu_data *rdp;
2365 struct rcu_node *rnp;
2366
2367 rcu_state.cbovld = rcu_state.cbovldnext;
2368 rcu_state.cbovldnext = false;
2369 rcu_for_each_leaf_node(rnp) {
2370 cond_resched_tasks_rcu_qs();
2371 mask = 0;
2372 raw_spin_lock_irqsave_rcu_node(rnp, flags);
2373 rcu_state.cbovldnext |= !!rnp->cbovldmask;
2374 if (rnp->qsmask == 0) {
2375 if (rcu_preempt_blocked_readers_cgp(rnp)) {
2376 /*
2377 * No point in scanning bits because they
2378 * are all zero. But we might need to
2379 * priority-boost blocked readers.
2380 */
2381 rcu_initiate_boost(rnp, flags);
2382 /* rcu_initiate_boost() releases rnp->lock */
2383 continue;
2384 }
2385 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2386 continue;
2387 }
2388 for_each_leaf_node_cpu_mask(rnp, cpu, rnp->qsmask) {
2389 rdp = per_cpu_ptr(&rcu_data, cpu);
2390 if (f(rdp)) {
2391 mask |= rdp->grpmask;
2392 rcu_disable_urgency_upon_qs(rdp);
2393 }
2394 }
2395 if (mask != 0) {
2396 /* Idle/offline CPUs, report (releases rnp->lock). */
2397 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
2398 } else {
2399 /* Nothing to do here, so just drop the lock. */
2400 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2401 }
2402 }
2403}
2404
2405/*
2406 * Force quiescent states on reluctant CPUs, and also detect which
2407 * CPUs are in dyntick-idle mode.
2408 */
2409void rcu_force_quiescent_state(void)
2410{
2411 unsigned long flags;
2412 bool ret;
2413 struct rcu_node *rnp;
2414 struct rcu_node *rnp_old = NULL;
2415
2416 /* Funnel through hierarchy to reduce memory contention. */
2417 rnp = raw_cpu_read(rcu_data.mynode);
2418 for (; rnp != NULL; rnp = rnp->parent) {
2419 ret = (READ_ONCE(rcu_state.gp_flags) & RCU_GP_FLAG_FQS) ||
2420 !raw_spin_trylock(&rnp->fqslock);
2421 if (rnp_old != NULL)
2422 raw_spin_unlock(&rnp_old->fqslock);
2423 if (ret)
2424 return;
2425 rnp_old = rnp;
2426 }
2427 /* rnp_old == rcu_get_root(), rnp == NULL. */
2428
2429 /* Reached the root of the rcu_node tree, acquire lock. */
2430 raw_spin_lock_irqsave_rcu_node(rnp_old, flags);
2431 raw_spin_unlock(&rnp_old->fqslock);
2432 if (READ_ONCE(rcu_state.gp_flags) & RCU_GP_FLAG_FQS) {
2433 raw_spin_unlock_irqrestore_rcu_node(rnp_old, flags);
2434 return; /* Someone beat us to it. */
2435 }
2436 WRITE_ONCE(rcu_state.gp_flags,
2437 READ_ONCE(rcu_state.gp_flags) | RCU_GP_FLAG_FQS);
2438 raw_spin_unlock_irqrestore_rcu_node(rnp_old, flags);
2439 rcu_gp_kthread_wake();
2440}
2441EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
2442
2443// Workqueue handler for an RCU reader for kernels enforcing struct RCU
2444// grace periods.
2445static void strict_work_handler(struct work_struct *work)
2446{
2447 rcu_read_lock();
2448 rcu_read_unlock();
2449}
2450
2451/* Perform RCU core processing work for the current CPU. */
2452static __latent_entropy void rcu_core(void)
2453{
2454 unsigned long flags;
2455 struct rcu_data *rdp = raw_cpu_ptr(&rcu_data);
2456 struct rcu_node *rnp = rdp->mynode;
2457 /*
2458 * On RT rcu_core() can be preempted when IRQs aren't disabled.
2459 * Therefore this function can race with concurrent NOCB (de-)offloading
2460 * on this CPU and the below condition must be considered volatile.
2461 * However if we race with:
2462 *
2463 * _ Offloading: In the worst case we accelerate or process callbacks
2464 * concurrently with NOCB kthreads. We are guaranteed to
2465 * call rcu_nocb_lock() if that happens.
2466 *
2467 * _ Deoffloading: In the worst case we miss callbacks acceleration or
2468 * processing. This is fine because the early stage
2469 * of deoffloading invokes rcu_core() after setting
2470 * SEGCBLIST_RCU_CORE. So we guarantee that we'll process
2471 * what could have been dismissed without the need to wait
2472 * for the next rcu_pending() check in the next jiffy.
2473 */
2474 const bool do_batch = !rcu_segcblist_completely_offloaded(&rdp->cblist);
2475
2476 if (cpu_is_offline(smp_processor_id()))
2477 return;
2478 trace_rcu_utilization(TPS("Start RCU core"));
2479 WARN_ON_ONCE(!rdp->beenonline);
2480
2481 /* Report any deferred quiescent states if preemption enabled. */
2482 if (IS_ENABLED(CONFIG_PREEMPT_COUNT) && (!(preempt_count() & PREEMPT_MASK))) {
2483 rcu_preempt_deferred_qs(current);
2484 } else if (rcu_preempt_need_deferred_qs(current)) {
2485 set_tsk_need_resched(current);
2486 set_preempt_need_resched();
2487 }
2488
2489 /* Update RCU state based on any recent quiescent states. */
2490 rcu_check_quiescent_state(rdp);
2491
2492 /* No grace period and unregistered callbacks? */
2493 if (!rcu_gp_in_progress() &&
2494 rcu_segcblist_is_enabled(&rdp->cblist) && do_batch) {
2495 rcu_nocb_lock_irqsave(rdp, flags);
2496 if (!rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL))
2497 rcu_accelerate_cbs_unlocked(rnp, rdp);
2498 rcu_nocb_unlock_irqrestore(rdp, flags);
2499 }
2500
2501 rcu_check_gp_start_stall(rnp, rdp, rcu_jiffies_till_stall_check());
2502
2503 /* If there are callbacks ready, invoke them. */
2504 if (do_batch && rcu_segcblist_ready_cbs(&rdp->cblist) &&
2505 likely(READ_ONCE(rcu_scheduler_fully_active))) {
2506 rcu_do_batch(rdp);
2507 /* Re-invoke RCU core processing if there are callbacks remaining. */
2508 if (rcu_segcblist_ready_cbs(&rdp->cblist))
2509 invoke_rcu_core();
2510 }
2511
2512 /* Do any needed deferred wakeups of rcuo kthreads. */
2513 do_nocb_deferred_wakeup(rdp);
2514 trace_rcu_utilization(TPS("End RCU core"));
2515
2516 // If strict GPs, schedule an RCU reader in a clean environment.
2517 if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD))
2518 queue_work_on(rdp->cpu, rcu_gp_wq, &rdp->strict_work);
2519}
2520
2521static void rcu_core_si(struct softirq_action *h)
2522{
2523 rcu_core();
2524}
2525
2526static void rcu_wake_cond(struct task_struct *t, int status)
2527{
2528 /*
2529 * If the thread is yielding, only wake it when this
2530 * is invoked from idle
2531 */
2532 if (t && (status != RCU_KTHREAD_YIELDING || is_idle_task(current)))
2533 wake_up_process(t);
2534}
2535
2536static void invoke_rcu_core_kthread(void)
2537{
2538 struct task_struct *t;
2539 unsigned long flags;
2540
2541 local_irq_save(flags);
2542 __this_cpu_write(rcu_data.rcu_cpu_has_work, 1);
2543 t = __this_cpu_read(rcu_data.rcu_cpu_kthread_task);
2544 if (t != NULL && t != current)
2545 rcu_wake_cond(t, __this_cpu_read(rcu_data.rcu_cpu_kthread_status));
2546 local_irq_restore(flags);
2547}
2548
2549/*
2550 * Wake up this CPU's rcuc kthread to do RCU core processing.
2551 */
2552static void invoke_rcu_core(void)
2553{
2554 if (!cpu_online(smp_processor_id()))
2555 return;
2556 if (use_softirq)
2557 raise_softirq(RCU_SOFTIRQ);
2558 else
2559 invoke_rcu_core_kthread();
2560}
2561
2562static void rcu_cpu_kthread_park(unsigned int cpu)
2563{
2564 per_cpu(rcu_data.rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU;
2565}
2566
2567static int rcu_cpu_kthread_should_run(unsigned int cpu)
2568{
2569 return __this_cpu_read(rcu_data.rcu_cpu_has_work);
2570}
2571
2572/*
2573 * Per-CPU kernel thread that invokes RCU callbacks. This replaces
2574 * the RCU softirq used in configurations of RCU that do not support RCU
2575 * priority boosting.
2576 */
2577static void rcu_cpu_kthread(unsigned int cpu)
2578{
2579 unsigned int *statusp = this_cpu_ptr(&rcu_data.rcu_cpu_kthread_status);
2580 char work, *workp = this_cpu_ptr(&rcu_data.rcu_cpu_has_work);
2581 unsigned long *j = this_cpu_ptr(&rcu_data.rcuc_activity);
2582 int spincnt;
2583
2584 trace_rcu_utilization(TPS("Start CPU kthread@rcu_run"));
2585 for (spincnt = 0; spincnt < 10; spincnt++) {
2586 WRITE_ONCE(*j, jiffies);
2587 local_bh_disable();
2588 *statusp = RCU_KTHREAD_RUNNING;
2589 local_irq_disable();
2590 work = *workp;
2591 *workp = 0;
2592 local_irq_enable();
2593 if (work)
2594 rcu_core();
2595 local_bh_enable();
2596 if (*workp == 0) {
2597 trace_rcu_utilization(TPS("End CPU kthread@rcu_wait"));
2598 *statusp = RCU_KTHREAD_WAITING;
2599 return;
2600 }
2601 }
2602 *statusp = RCU_KTHREAD_YIELDING;
2603 trace_rcu_utilization(TPS("Start CPU kthread@rcu_yield"));
2604 schedule_timeout_idle(2);
2605 trace_rcu_utilization(TPS("End CPU kthread@rcu_yield"));
2606 *statusp = RCU_KTHREAD_WAITING;
2607 WRITE_ONCE(*j, jiffies);
2608}
2609
2610static struct smp_hotplug_thread rcu_cpu_thread_spec = {
2611 .store = &rcu_data.rcu_cpu_kthread_task,
2612 .thread_should_run = rcu_cpu_kthread_should_run,
2613 .thread_fn = rcu_cpu_kthread,
2614 .thread_comm = "rcuc/%u",
2615 .setup = rcu_cpu_kthread_setup,
2616 .park = rcu_cpu_kthread_park,
2617};
2618
2619/*
2620 * Spawn per-CPU RCU core processing kthreads.
2621 */
2622static int __init rcu_spawn_core_kthreads(void)
2623{
2624 int cpu;
2625
2626 for_each_possible_cpu(cpu)
2627 per_cpu(rcu_data.rcu_cpu_has_work, cpu) = 0;
2628 if (use_softirq)
2629 return 0;
2630 WARN_ONCE(smpboot_register_percpu_thread(&rcu_cpu_thread_spec),
2631 "%s: Could not start rcuc kthread, OOM is now expected behavior\n", __func__);
2632 return 0;
2633}
2634
2635/*
2636 * Handle any core-RCU processing required by a call_rcu() invocation.
2637 */
2638static void __call_rcu_core(struct rcu_data *rdp, struct rcu_head *head,
2639 unsigned long flags)
2640{
2641 /*
2642 * If called from an extended quiescent state, invoke the RCU
2643 * core in order to force a re-evaluation of RCU's idleness.
2644 */
2645 if (!rcu_is_watching())
2646 invoke_rcu_core();
2647
2648 /* If interrupts were disabled or CPU offline, don't invoke RCU core. */
2649 if (irqs_disabled_flags(flags) || cpu_is_offline(smp_processor_id()))
2650 return;
2651
2652 /*
2653 * Force the grace period if too many callbacks or too long waiting.
2654 * Enforce hysteresis, and don't invoke rcu_force_quiescent_state()
2655 * if some other CPU has recently done so. Also, don't bother
2656 * invoking rcu_force_quiescent_state() if the newly enqueued callback
2657 * is the only one waiting for a grace period to complete.
2658 */
2659 if (unlikely(rcu_segcblist_n_cbs(&rdp->cblist) >
2660 rdp->qlen_last_fqs_check + qhimark)) {
2661
2662 /* Are we ignoring a completed grace period? */
2663 note_gp_changes(rdp);
2664
2665 /* Start a new grace period if one not already started. */
2666 if (!rcu_gp_in_progress()) {
2667 rcu_accelerate_cbs_unlocked(rdp->mynode, rdp);
2668 } else {
2669 /* Give the grace period a kick. */
2670 rdp->blimit = DEFAULT_MAX_RCU_BLIMIT;
2671 if (READ_ONCE(rcu_state.n_force_qs) == rdp->n_force_qs_snap &&
2672 rcu_segcblist_first_pend_cb(&rdp->cblist) != head)
2673 rcu_force_quiescent_state();
2674 rdp->n_force_qs_snap = READ_ONCE(rcu_state.n_force_qs);
2675 rdp->qlen_last_fqs_check = rcu_segcblist_n_cbs(&rdp->cblist);
2676 }
2677 }
2678}
2679
2680/*
2681 * RCU callback function to leak a callback.
2682 */
2683static void rcu_leak_callback(struct rcu_head *rhp)
2684{
2685}
2686
2687/*
2688 * Check and if necessary update the leaf rcu_node structure's
2689 * ->cbovldmask bit corresponding to the current CPU based on that CPU's
2690 * number of queued RCU callbacks. The caller must hold the leaf rcu_node
2691 * structure's ->lock.
2692 */
2693static void check_cb_ovld_locked(struct rcu_data *rdp, struct rcu_node *rnp)
2694{
2695 raw_lockdep_assert_held_rcu_node(rnp);
2696 if (qovld_calc <= 0)
2697 return; // Early boot and wildcard value set.
2698 if (rcu_segcblist_n_cbs(&rdp->cblist) >= qovld_calc)
2699 WRITE_ONCE(rnp->cbovldmask, rnp->cbovldmask | rdp->grpmask);
2700 else
2701 WRITE_ONCE(rnp->cbovldmask, rnp->cbovldmask & ~rdp->grpmask);
2702}
2703
2704/*
2705 * Check and if necessary update the leaf rcu_node structure's
2706 * ->cbovldmask bit corresponding to the current CPU based on that CPU's
2707 * number of queued RCU callbacks. No locks need be held, but the
2708 * caller must have disabled interrupts.
2709 *
2710 * Note that this function ignores the possibility that there are a lot
2711 * of callbacks all of which have already seen the end of their respective
2712 * grace periods. This omission is due to the need for no-CBs CPUs to
2713 * be holding ->nocb_lock to do this check, which is too heavy for a
2714 * common-case operation.
2715 */
2716static void check_cb_ovld(struct rcu_data *rdp)
2717{
2718 struct rcu_node *const rnp = rdp->mynode;
2719
2720 if (qovld_calc <= 0 ||
2721 ((rcu_segcblist_n_cbs(&rdp->cblist) >= qovld_calc) ==
2722 !!(READ_ONCE(rnp->cbovldmask) & rdp->grpmask)))
2723 return; // Early boot wildcard value or already set correctly.
2724 raw_spin_lock_rcu_node(rnp);
2725 check_cb_ovld_locked(rdp, rnp);
2726 raw_spin_unlock_rcu_node(rnp);
2727}
2728
2729static void
2730__call_rcu_common(struct rcu_head *head, rcu_callback_t func, bool lazy)
2731{
2732 static atomic_t doublefrees;
2733 unsigned long flags;
2734 struct rcu_data *rdp;
2735 bool was_alldone;
2736
2737 /* Misaligned rcu_head! */
2738 WARN_ON_ONCE((unsigned long)head & (sizeof(void *) - 1));
2739
2740 if (debug_rcu_head_queue(head)) {
2741 /*
2742 * Probable double call_rcu(), so leak the callback.
2743 * Use rcu:rcu_callback trace event to find the previous
2744 * time callback was passed to call_rcu().
2745 */
2746 if (atomic_inc_return(&doublefrees) < 4) {
2747 pr_err("%s(): Double-freed CB %p->%pS()!!! ", __func__, head, head->func);
2748 mem_dump_obj(head);
2749 }
2750 WRITE_ONCE(head->func, rcu_leak_callback);
2751 return;
2752 }
2753 head->func = func;
2754 head->next = NULL;
2755 kasan_record_aux_stack_noalloc(head);
2756 local_irq_save(flags);
2757 rdp = this_cpu_ptr(&rcu_data);
2758
2759 /* Add the callback to our list. */
2760 if (unlikely(!rcu_segcblist_is_enabled(&rdp->cblist))) {
2761 // This can trigger due to call_rcu() from offline CPU:
2762 WARN_ON_ONCE(rcu_scheduler_active != RCU_SCHEDULER_INACTIVE);
2763 WARN_ON_ONCE(!rcu_is_watching());
2764 // Very early boot, before rcu_init(). Initialize if needed
2765 // and then drop through to queue the callback.
2766 if (rcu_segcblist_empty(&rdp->cblist))
2767 rcu_segcblist_init(&rdp->cblist);
2768 }
2769
2770 check_cb_ovld(rdp);
2771 if (rcu_nocb_try_bypass(rdp, head, &was_alldone, flags, lazy))
2772 return; // Enqueued onto ->nocb_bypass, so just leave.
2773 // If no-CBs CPU gets here, rcu_nocb_try_bypass() acquired ->nocb_lock.
2774 rcu_segcblist_enqueue(&rdp->cblist, head);
2775 if (__is_kvfree_rcu_offset((unsigned long)func))
2776 trace_rcu_kvfree_callback(rcu_state.name, head,
2777 (unsigned long)func,
2778 rcu_segcblist_n_cbs(&rdp->cblist));
2779 else
2780 trace_rcu_callback(rcu_state.name, head,
2781 rcu_segcblist_n_cbs(&rdp->cblist));
2782
2783 trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCBQueued"));
2784
2785 /* Go handle any RCU core processing required. */
2786 if (unlikely(rcu_rdp_is_offloaded(rdp))) {
2787 __call_rcu_nocb_wake(rdp, was_alldone, flags); /* unlocks */
2788 } else {
2789 __call_rcu_core(rdp, head, flags);
2790 local_irq_restore(flags);
2791 }
2792}
2793
2794#ifdef CONFIG_RCU_LAZY
2795/**
2796 * call_rcu_hurry() - Queue RCU callback for invocation after grace period, and
2797 * flush all lazy callbacks (including the new one) to the main ->cblist while
2798 * doing so.
2799 *
2800 * @head: structure to be used for queueing the RCU updates.
2801 * @func: actual callback function to be invoked after the grace period
2802 *
2803 * The callback function will be invoked some time after a full grace
2804 * period elapses, in other words after all pre-existing RCU read-side
2805 * critical sections have completed.
2806 *
2807 * Use this API instead of call_rcu() if you don't want the callback to be
2808 * invoked after very long periods of time, which can happen on systems without
2809 * memory pressure and on systems which are lightly loaded or mostly idle.
2810 * This function will cause callbacks to be invoked sooner than later at the
2811 * expense of extra power. Other than that, this function is identical to, and
2812 * reuses call_rcu()'s logic. Refer to call_rcu() for more details about memory
2813 * ordering and other functionality.
2814 */
2815void call_rcu_hurry(struct rcu_head *head, rcu_callback_t func)
2816{
2817 return __call_rcu_common(head, func, false);
2818}
2819EXPORT_SYMBOL_GPL(call_rcu_hurry);
2820#endif
2821
2822/**
2823 * call_rcu() - Queue an RCU callback for invocation after a grace period.
2824 * By default the callbacks are 'lazy' and are kept hidden from the main
2825 * ->cblist to prevent starting of grace periods too soon.
2826 * If you desire grace periods to start very soon, use call_rcu_hurry().
2827 *
2828 * @head: structure to be used for queueing the RCU updates.
2829 * @func: actual callback function to be invoked after the grace period
2830 *
2831 * The callback function will be invoked some time after a full grace
2832 * period elapses, in other words after all pre-existing RCU read-side
2833 * critical sections have completed. However, the callback function
2834 * might well execute concurrently with RCU read-side critical sections
2835 * that started after call_rcu() was invoked.
2836 *
2837 * RCU read-side critical sections are delimited by rcu_read_lock()
2838 * and rcu_read_unlock(), and may be nested. In addition, but only in
2839 * v5.0 and later, regions of code across which interrupts, preemption,
2840 * or softirqs have been disabled also serve as RCU read-side critical
2841 * sections. This includes hardware interrupt handlers, softirq handlers,
2842 * and NMI handlers.
2843 *
2844 * Note that all CPUs must agree that the grace period extended beyond
2845 * all pre-existing RCU read-side critical section. On systems with more
2846 * than one CPU, this means that when "func()" is invoked, each CPU is
2847 * guaranteed to have executed a full memory barrier since the end of its
2848 * last RCU read-side critical section whose beginning preceded the call
2849 * to call_rcu(). It also means that each CPU executing an RCU read-side
2850 * critical section that continues beyond the start of "func()" must have
2851 * executed a memory barrier after the call_rcu() but before the beginning
2852 * of that RCU read-side critical section. Note that these guarantees
2853 * include CPUs that are offline, idle, or executing in user mode, as
2854 * well as CPUs that are executing in the kernel.
2855 *
2856 * Furthermore, if CPU A invoked call_rcu() and CPU B invoked the
2857 * resulting RCU callback function "func()", then both CPU A and CPU B are
2858 * guaranteed to execute a full memory barrier during the time interval
2859 * between the call to call_rcu() and the invocation of "func()" -- even
2860 * if CPU A and CPU B are the same CPU (but again only if the system has
2861 * more than one CPU).
2862 *
2863 * Implementation of these memory-ordering guarantees is described here:
2864 * Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.rst.
2865 */
2866void call_rcu(struct rcu_head *head, rcu_callback_t func)
2867{
2868 return __call_rcu_common(head, func, IS_ENABLED(CONFIG_RCU_LAZY));
2869}
2870EXPORT_SYMBOL_GPL(call_rcu);
2871
2872/* Maximum number of jiffies to wait before draining a batch. */
2873#define KFREE_DRAIN_JIFFIES (5 * HZ)
2874#define KFREE_N_BATCHES 2
2875#define FREE_N_CHANNELS 2
2876
2877/**
2878 * struct kvfree_rcu_bulk_data - single block to store kvfree_rcu() pointers
2879 * @nr_records: Number of active pointers in the array
2880 * @next: Next bulk object in the block chain
2881 * @records: Array of the kvfree_rcu() pointers
2882 */
2883struct kvfree_rcu_bulk_data {
2884 unsigned long nr_records;
2885 struct kvfree_rcu_bulk_data *next;
2886 void *records[];
2887};
2888
2889/*
2890 * This macro defines how many entries the "records" array
2891 * will contain. It is based on the fact that the size of
2892 * kvfree_rcu_bulk_data structure becomes exactly one page.
2893 */
2894#define KVFREE_BULK_MAX_ENTR \
2895 ((PAGE_SIZE - sizeof(struct kvfree_rcu_bulk_data)) / sizeof(void *))
2896
2897/**
2898 * struct kfree_rcu_cpu_work - single batch of kfree_rcu() requests
2899 * @rcu_work: Let queue_rcu_work() invoke workqueue handler after grace period
2900 * @head_free: List of kfree_rcu() objects waiting for a grace period
2901 * @bkvhead_free: Bulk-List of kvfree_rcu() objects waiting for a grace period
2902 * @krcp: Pointer to @kfree_rcu_cpu structure
2903 */
2904
2905struct kfree_rcu_cpu_work {
2906 struct rcu_work rcu_work;
2907 struct rcu_head *head_free;
2908 struct kvfree_rcu_bulk_data *bkvhead_free[FREE_N_CHANNELS];
2909 struct kfree_rcu_cpu *krcp;
2910};
2911
2912/**
2913 * struct kfree_rcu_cpu - batch up kfree_rcu() requests for RCU grace period
2914 * @head: List of kfree_rcu() objects not yet waiting for a grace period
2915 * @bkvhead: Bulk-List of kvfree_rcu() objects not yet waiting for a grace period
2916 * @krw_arr: Array of batches of kfree_rcu() objects waiting for a grace period
2917 * @lock: Synchronize access to this structure
2918 * @monitor_work: Promote @head to @head_free after KFREE_DRAIN_JIFFIES
2919 * @initialized: The @rcu_work fields have been initialized
2920 * @count: Number of objects for which GP not started
2921 * @bkvcache:
2922 * A simple cache list that contains objects for reuse purpose.
2923 * In order to save some per-cpu space the list is singular.
2924 * Even though it is lockless an access has to be protected by the
2925 * per-cpu lock.
2926 * @page_cache_work: A work to refill the cache when it is empty
2927 * @backoff_page_cache_fill: Delay cache refills
2928 * @work_in_progress: Indicates that page_cache_work is running
2929 * @hrtimer: A hrtimer for scheduling a page_cache_work
2930 * @nr_bkv_objs: number of allocated objects at @bkvcache.
2931 *
2932 * This is a per-CPU structure. The reason that it is not included in
2933 * the rcu_data structure is to permit this code to be extracted from
2934 * the RCU files. Such extraction could allow further optimization of
2935 * the interactions with the slab allocators.
2936 */
2937struct kfree_rcu_cpu {
2938 struct rcu_head *head;
2939 struct kvfree_rcu_bulk_data *bkvhead[FREE_N_CHANNELS];
2940 struct kfree_rcu_cpu_work krw_arr[KFREE_N_BATCHES];
2941 raw_spinlock_t lock;
2942 struct delayed_work monitor_work;
2943 bool initialized;
2944 int count;
2945
2946 struct delayed_work page_cache_work;
2947 atomic_t backoff_page_cache_fill;
2948 atomic_t work_in_progress;
2949 struct hrtimer hrtimer;
2950
2951 struct llist_head bkvcache;
2952 int nr_bkv_objs;
2953};
2954
2955static DEFINE_PER_CPU(struct kfree_rcu_cpu, krc) = {
2956 .lock = __RAW_SPIN_LOCK_UNLOCKED(krc.lock),
2957};
2958
2959static __always_inline void
2960debug_rcu_bhead_unqueue(struct kvfree_rcu_bulk_data *bhead)
2961{
2962#ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
2963 int i;
2964
2965 for (i = 0; i < bhead->nr_records; i++)
2966 debug_rcu_head_unqueue((struct rcu_head *)(bhead->records[i]));
2967#endif
2968}
2969
2970static inline struct kfree_rcu_cpu *
2971krc_this_cpu_lock(unsigned long *flags)
2972{
2973 struct kfree_rcu_cpu *krcp;
2974
2975 local_irq_save(*flags); // For safely calling this_cpu_ptr().
2976 krcp = this_cpu_ptr(&krc);
2977 raw_spin_lock(&krcp->lock);
2978
2979 return krcp;
2980}
2981
2982static inline void
2983krc_this_cpu_unlock(struct kfree_rcu_cpu *krcp, unsigned long flags)
2984{
2985 raw_spin_unlock_irqrestore(&krcp->lock, flags);
2986}
2987
2988static inline struct kvfree_rcu_bulk_data *
2989get_cached_bnode(struct kfree_rcu_cpu *krcp)
2990{
2991 if (!krcp->nr_bkv_objs)
2992 return NULL;
2993
2994 WRITE_ONCE(krcp->nr_bkv_objs, krcp->nr_bkv_objs - 1);
2995 return (struct kvfree_rcu_bulk_data *)
2996 llist_del_first(&krcp->bkvcache);
2997}
2998
2999static inline bool
3000put_cached_bnode(struct kfree_rcu_cpu *krcp,
3001 struct kvfree_rcu_bulk_data *bnode)
3002{
3003 // Check the limit.
3004 if (krcp->nr_bkv_objs >= rcu_min_cached_objs)
3005 return false;
3006
3007 llist_add((struct llist_node *) bnode, &krcp->bkvcache);
3008 WRITE_ONCE(krcp->nr_bkv_objs, krcp->nr_bkv_objs + 1);
3009 return true;
3010}
3011
3012static int
3013drain_page_cache(struct kfree_rcu_cpu *krcp)
3014{
3015 unsigned long flags;
3016 struct llist_node *page_list, *pos, *n;
3017 int freed = 0;
3018
3019 raw_spin_lock_irqsave(&krcp->lock, flags);
3020 page_list = llist_del_all(&krcp->bkvcache);
3021 WRITE_ONCE(krcp->nr_bkv_objs, 0);
3022 raw_spin_unlock_irqrestore(&krcp->lock, flags);
3023
3024 llist_for_each_safe(pos, n, page_list) {
3025 free_page((unsigned long)pos);
3026 freed++;
3027 }
3028
3029 return freed;
3030}
3031
3032/*
3033 * This function is invoked in workqueue context after a grace period.
3034 * It frees all the objects queued on ->bkvhead_free or ->head_free.
3035 */
3036static void kfree_rcu_work(struct work_struct *work)
3037{
3038 unsigned long flags;
3039 struct kvfree_rcu_bulk_data *bkvhead[FREE_N_CHANNELS], *bnext;
3040 struct rcu_head *head, *next;
3041 struct kfree_rcu_cpu *krcp;
3042 struct kfree_rcu_cpu_work *krwp;
3043 int i, j;
3044
3045 krwp = container_of(to_rcu_work(work),
3046 struct kfree_rcu_cpu_work, rcu_work);
3047 krcp = krwp->krcp;
3048
3049 raw_spin_lock_irqsave(&krcp->lock, flags);
3050 // Channels 1 and 2.
3051 for (i = 0; i < FREE_N_CHANNELS; i++) {
3052 bkvhead[i] = krwp->bkvhead_free[i];
3053 krwp->bkvhead_free[i] = NULL;
3054 }
3055
3056 // Channel 3.
3057 head = krwp->head_free;
3058 krwp->head_free = NULL;
3059 raw_spin_unlock_irqrestore(&krcp->lock, flags);
3060
3061 // Handle the first two channels.
3062 for (i = 0; i < FREE_N_CHANNELS; i++) {
3063 for (; bkvhead[i]; bkvhead[i] = bnext) {
3064 bnext = bkvhead[i]->next;
3065 debug_rcu_bhead_unqueue(bkvhead[i]);
3066
3067 rcu_lock_acquire(&rcu_callback_map);
3068 if (i == 0) { // kmalloc() / kfree().
3069 trace_rcu_invoke_kfree_bulk_callback(
3070 rcu_state.name, bkvhead[i]->nr_records,
3071 bkvhead[i]->records);
3072
3073 kfree_bulk(bkvhead[i]->nr_records,
3074 bkvhead[i]->records);
3075 } else { // vmalloc() / vfree().
3076 for (j = 0; j < bkvhead[i]->nr_records; j++) {
3077 trace_rcu_invoke_kvfree_callback(
3078 rcu_state.name,
3079 bkvhead[i]->records[j], 0);
3080
3081 vfree(bkvhead[i]->records[j]);
3082 }
3083 }
3084 rcu_lock_release(&rcu_callback_map);
3085
3086 raw_spin_lock_irqsave(&krcp->lock, flags);
3087 if (put_cached_bnode(krcp, bkvhead[i]))
3088 bkvhead[i] = NULL;
3089 raw_spin_unlock_irqrestore(&krcp->lock, flags);
3090
3091 if (bkvhead[i])
3092 free_page((unsigned long) bkvhead[i]);
3093
3094 cond_resched_tasks_rcu_qs();
3095 }
3096 }
3097
3098 /*
3099 * This is used when the "bulk" path can not be used for the
3100 * double-argument of kvfree_rcu(). This happens when the
3101 * page-cache is empty, which means that objects are instead
3102 * queued on a linked list through their rcu_head structures.
3103 * This list is named "Channel 3".
3104 */
3105 for (; head; head = next) {
3106 unsigned long offset = (unsigned long)head->func;
3107 void *ptr = (void *)head - offset;
3108
3109 next = head->next;
3110 debug_rcu_head_unqueue((struct rcu_head *)ptr);
3111 rcu_lock_acquire(&rcu_callback_map);
3112 trace_rcu_invoke_kvfree_callback(rcu_state.name, head, offset);
3113
3114 if (!WARN_ON_ONCE(!__is_kvfree_rcu_offset(offset)))
3115 kvfree(ptr);
3116
3117 rcu_lock_release(&rcu_callback_map);
3118 cond_resched_tasks_rcu_qs();
3119 }
3120}
3121
3122static bool
3123need_offload_krc(struct kfree_rcu_cpu *krcp)
3124{
3125 int i;
3126
3127 for (i = 0; i < FREE_N_CHANNELS; i++)
3128 if (krcp->bkvhead[i])
3129 return true;
3130
3131 return !!krcp->head;
3132}
3133
3134static void
3135schedule_delayed_monitor_work(struct kfree_rcu_cpu *krcp)
3136{
3137 long delay, delay_left;
3138
3139 delay = READ_ONCE(krcp->count) >= KVFREE_BULK_MAX_ENTR ? 1:KFREE_DRAIN_JIFFIES;
3140 if (delayed_work_pending(&krcp->monitor_work)) {
3141 delay_left = krcp->monitor_work.timer.expires - jiffies;
3142 if (delay < delay_left)
3143 mod_delayed_work(system_wq, &krcp->monitor_work, delay);
3144 return;
3145 }
3146 queue_delayed_work(system_wq, &krcp->monitor_work, delay);
3147}
3148
3149/*
3150 * This function is invoked after the KFREE_DRAIN_JIFFIES timeout.
3151 */
3152static void kfree_rcu_monitor(struct work_struct *work)
3153{
3154 struct kfree_rcu_cpu *krcp = container_of(work,
3155 struct kfree_rcu_cpu, monitor_work.work);
3156 unsigned long flags;
3157 int i, j;
3158
3159 raw_spin_lock_irqsave(&krcp->lock, flags);
3160
3161 // Attempt to start a new batch.
3162 for (i = 0; i < KFREE_N_BATCHES; i++) {
3163 struct kfree_rcu_cpu_work *krwp = &(krcp->krw_arr[i]);
3164
3165 // Try to detach bkvhead or head and attach it over any
3166 // available corresponding free channel. It can be that
3167 // a previous RCU batch is in progress, it means that
3168 // immediately to queue another one is not possible so
3169 // in that case the monitor work is rearmed.
3170 if ((krcp->bkvhead[0] && !krwp->bkvhead_free[0]) ||
3171 (krcp->bkvhead[1] && !krwp->bkvhead_free[1]) ||
3172 (krcp->head && !krwp->head_free)) {
3173 // Channel 1 corresponds to the SLAB-pointer bulk path.
3174 // Channel 2 corresponds to vmalloc-pointer bulk path.
3175 for (j = 0; j < FREE_N_CHANNELS; j++) {
3176 if (!krwp->bkvhead_free[j]) {
3177 krwp->bkvhead_free[j] = krcp->bkvhead[j];
3178 krcp->bkvhead[j] = NULL;
3179 }
3180 }
3181
3182 // Channel 3 corresponds to both SLAB and vmalloc
3183 // objects queued on the linked list.
3184 if (!krwp->head_free) {
3185 krwp->head_free = krcp->head;
3186 krcp->head = NULL;
3187 }
3188
3189 WRITE_ONCE(krcp->count, 0);
3190
3191 // One work is per one batch, so there are three
3192 // "free channels", the batch can handle. It can
3193 // be that the work is in the pending state when
3194 // channels have been detached following by each
3195 // other.
3196 queue_rcu_work(system_wq, &krwp->rcu_work);
3197 }
3198 }
3199
3200 // If there is nothing to detach, it means that our job is
3201 // successfully done here. In case of having at least one
3202 // of the channels that is still busy we should rearm the
3203 // work to repeat an attempt. Because previous batches are
3204 // still in progress.
3205 if (need_offload_krc(krcp))
3206 schedule_delayed_monitor_work(krcp);
3207
3208 raw_spin_unlock_irqrestore(&krcp->lock, flags);
3209}
3210
3211static enum hrtimer_restart
3212schedule_page_work_fn(struct hrtimer *t)
3213{
3214 struct kfree_rcu_cpu *krcp =
3215 container_of(t, struct kfree_rcu_cpu, hrtimer);
3216
3217 queue_delayed_work(system_highpri_wq, &krcp->page_cache_work, 0);
3218 return HRTIMER_NORESTART;
3219}
3220
3221static void fill_page_cache_func(struct work_struct *work)
3222{
3223 struct kvfree_rcu_bulk_data *bnode;
3224 struct kfree_rcu_cpu *krcp =
3225 container_of(work, struct kfree_rcu_cpu,
3226 page_cache_work.work);
3227 unsigned long flags;
3228 int nr_pages;
3229 bool pushed;
3230 int i;
3231
3232 nr_pages = atomic_read(&krcp->backoff_page_cache_fill) ?
3233 1 : rcu_min_cached_objs;
3234
3235 for (i = 0; i < nr_pages; i++) {
3236 bnode = (struct kvfree_rcu_bulk_data *)
3237 __get_free_page(GFP_KERNEL | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
3238
3239 if (!bnode)
3240 break;
3241
3242 raw_spin_lock_irqsave(&krcp->lock, flags);
3243 pushed = put_cached_bnode(krcp, bnode);
3244 raw_spin_unlock_irqrestore(&krcp->lock, flags);
3245
3246 if (!pushed) {
3247 free_page((unsigned long) bnode);
3248 break;
3249 }
3250 }
3251
3252 atomic_set(&krcp->work_in_progress, 0);
3253 atomic_set(&krcp->backoff_page_cache_fill, 0);
3254}
3255
3256static void
3257run_page_cache_worker(struct kfree_rcu_cpu *krcp)
3258{
3259 if (rcu_scheduler_active == RCU_SCHEDULER_RUNNING &&
3260 !atomic_xchg(&krcp->work_in_progress, 1)) {
3261 if (atomic_read(&krcp->backoff_page_cache_fill)) {
3262 queue_delayed_work(system_wq,
3263 &krcp->page_cache_work,
3264 msecs_to_jiffies(rcu_delay_page_cache_fill_msec));
3265 } else {
3266 hrtimer_init(&krcp->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3267 krcp->hrtimer.function = schedule_page_work_fn;
3268 hrtimer_start(&krcp->hrtimer, 0, HRTIMER_MODE_REL);
3269 }
3270 }
3271}
3272
3273// Record ptr in a page managed by krcp, with the pre-krc_this_cpu_lock()
3274// state specified by flags. If can_alloc is true, the caller must
3275// be schedulable and not be holding any locks or mutexes that might be
3276// acquired by the memory allocator or anything that it might invoke.
3277// Returns true if ptr was successfully recorded, else the caller must
3278// use a fallback.
3279static inline bool
3280add_ptr_to_bulk_krc_lock(struct kfree_rcu_cpu **krcp,
3281 unsigned long *flags, void *ptr, bool can_alloc)
3282{
3283 struct kvfree_rcu_bulk_data *bnode;
3284 int idx;
3285
3286 *krcp = krc_this_cpu_lock(flags);
3287 if (unlikely(!(*krcp)->initialized))
3288 return false;
3289
3290 idx = !!is_vmalloc_addr(ptr);
3291
3292 /* Check if a new block is required. */
3293 if (!(*krcp)->bkvhead[idx] ||
3294 (*krcp)->bkvhead[idx]->nr_records == KVFREE_BULK_MAX_ENTR) {
3295 bnode = get_cached_bnode(*krcp);
3296 if (!bnode && can_alloc) {
3297 krc_this_cpu_unlock(*krcp, *flags);
3298
3299 // __GFP_NORETRY - allows a light-weight direct reclaim
3300 // what is OK from minimizing of fallback hitting point of
3301 // view. Apart of that it forbids any OOM invoking what is
3302 // also beneficial since we are about to release memory soon.
3303 //
3304 // __GFP_NOMEMALLOC - prevents from consuming of all the
3305 // memory reserves. Please note we have a fallback path.
3306 //
3307 // __GFP_NOWARN - it is supposed that an allocation can
3308 // be failed under low memory or high memory pressure
3309 // scenarios.
3310 bnode = (struct kvfree_rcu_bulk_data *)
3311 __get_free_page(GFP_KERNEL | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
3312 *krcp = krc_this_cpu_lock(flags);
3313 }
3314
3315 if (!bnode)
3316 return false;
3317
3318 /* Initialize the new block. */
3319 bnode->nr_records = 0;
3320 bnode->next = (*krcp)->bkvhead[idx];
3321
3322 /* Attach it to the head. */
3323 (*krcp)->bkvhead[idx] = bnode;
3324 }
3325
3326 /* Finally insert. */
3327 (*krcp)->bkvhead[idx]->records
3328 [(*krcp)->bkvhead[idx]->nr_records++] = ptr;
3329
3330 return true;
3331}
3332
3333/*
3334 * Queue a request for lazy invocation of the appropriate free routine
3335 * after a grace period. Please note that three paths are maintained,
3336 * two for the common case using arrays of pointers and a third one that
3337 * is used only when the main paths cannot be used, for example, due to
3338 * memory pressure.
3339 *
3340 * Each kvfree_call_rcu() request is added to a batch. The batch will be drained
3341 * every KFREE_DRAIN_JIFFIES number of jiffies. All the objects in the batch will
3342 * be free'd in workqueue context. This allows us to: batch requests together to
3343 * reduce the number of grace periods during heavy kfree_rcu()/kvfree_rcu() load.
3344 */
3345void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func)
3346{
3347 unsigned long flags;
3348 struct kfree_rcu_cpu *krcp;
3349 bool success;
3350 void *ptr;
3351
3352 if (head) {
3353 ptr = (void *) head - (unsigned long) func;
3354 } else {
3355 /*
3356 * Please note there is a limitation for the head-less
3357 * variant, that is why there is a clear rule for such
3358 * objects: it can be used from might_sleep() context
3359 * only. For other places please embed an rcu_head to
3360 * your data.
3361 */
3362 might_sleep();
3363 ptr = (unsigned long *) func;
3364 }
3365
3366 // Queue the object but don't yet schedule the batch.
3367 if (debug_rcu_head_queue(ptr)) {
3368 // Probable double kfree_rcu(), just leak.
3369 WARN_ONCE(1, "%s(): Double-freed call. rcu_head %p\n",
3370 __func__, head);
3371
3372 // Mark as success and leave.
3373 return;
3374 }
3375
3376 kasan_record_aux_stack_noalloc(ptr);
3377 success = add_ptr_to_bulk_krc_lock(&krcp, &flags, ptr, !head);
3378 if (!success) {
3379 run_page_cache_worker(krcp);
3380
3381 if (head == NULL)
3382 // Inline if kvfree_rcu(one_arg) call.
3383 goto unlock_return;
3384
3385 head->func = func;
3386 head->next = krcp->head;
3387 krcp->head = head;
3388 success = true;
3389 }
3390
3391 WRITE_ONCE(krcp->count, krcp->count + 1);
3392
3393 // Set timer to drain after KFREE_DRAIN_JIFFIES.
3394 if (rcu_scheduler_active == RCU_SCHEDULER_RUNNING)
3395 schedule_delayed_monitor_work(krcp);
3396
3397unlock_return:
3398 krc_this_cpu_unlock(krcp, flags);
3399
3400 /*
3401 * Inline kvfree() after synchronize_rcu(). We can do
3402 * it from might_sleep() context only, so the current
3403 * CPU can pass the QS state.
3404 */
3405 if (!success) {
3406 debug_rcu_head_unqueue((struct rcu_head *) ptr);
3407 synchronize_rcu();
3408 kvfree(ptr);
3409 }
3410}
3411EXPORT_SYMBOL_GPL(kvfree_call_rcu);
3412
3413static unsigned long
3414kfree_rcu_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
3415{
3416 int cpu;
3417 unsigned long count = 0;
3418
3419 /* Snapshot count of all CPUs */
3420 for_each_possible_cpu(cpu) {
3421 struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);
3422
3423 count += READ_ONCE(krcp->count);
3424 count += READ_ONCE(krcp->nr_bkv_objs);
3425 atomic_set(&krcp->backoff_page_cache_fill, 1);
3426 }
3427
3428 return count == 0 ? SHRINK_EMPTY : count;
3429}
3430
3431static unsigned long
3432kfree_rcu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
3433{
3434 int cpu, freed = 0;
3435
3436 for_each_possible_cpu(cpu) {
3437 int count;
3438 struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);
3439
3440 count = krcp->count;
3441 count += drain_page_cache(krcp);
3442 kfree_rcu_monitor(&krcp->monitor_work.work);
3443
3444 sc->nr_to_scan -= count;
3445 freed += count;
3446
3447 if (sc->nr_to_scan <= 0)
3448 break;
3449 }
3450
3451 return freed == 0 ? SHRINK_STOP : freed;
3452}
3453
3454static struct shrinker kfree_rcu_shrinker = {
3455 .count_objects = kfree_rcu_shrink_count,
3456 .scan_objects = kfree_rcu_shrink_scan,
3457 .batch = 0,
3458 .seeks = DEFAULT_SEEKS,
3459};
3460
3461void __init kfree_rcu_scheduler_running(void)
3462{
3463 int cpu;
3464 unsigned long flags;
3465
3466 for_each_possible_cpu(cpu) {
3467 struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);
3468
3469 raw_spin_lock_irqsave(&krcp->lock, flags);
3470 if (need_offload_krc(krcp))
3471 schedule_delayed_monitor_work(krcp);
3472 raw_spin_unlock_irqrestore(&krcp->lock, flags);
3473 }
3474}
3475
3476/*
3477 * During early boot, any blocking grace-period wait automatically
3478 * implies a grace period.
3479 *
3480 * Later on, this could in theory be the case for kernels built with
3481 * CONFIG_SMP=y && CONFIG_PREEMPTION=y running on a single CPU, but this
3482 * is not a common case. Furthermore, this optimization would cause
3483 * the rcu_gp_oldstate structure to expand by 50%, so this potential
3484 * grace-period optimization is ignored once the scheduler is running.
3485 */
3486static int rcu_blocking_is_gp(void)
3487{
3488 if (rcu_scheduler_active != RCU_SCHEDULER_INACTIVE)
3489 return false;
3490 might_sleep(); /* Check for RCU read-side critical section. */
3491 return true;
3492}
3493
3494/**
3495 * synchronize_rcu - wait until a grace period has elapsed.
3496 *
3497 * Control will return to the caller some time after a full grace
3498 * period has elapsed, in other words after all currently executing RCU
3499 * read-side critical sections have completed. Note, however, that
3500 * upon return from synchronize_rcu(), the caller might well be executing
3501 * concurrently with new RCU read-side critical sections that began while
3502 * synchronize_rcu() was waiting.
3503 *
3504 * RCU read-side critical sections are delimited by rcu_read_lock()
3505 * and rcu_read_unlock(), and may be nested. In addition, but only in
3506 * v5.0 and later, regions of code across which interrupts, preemption,
3507 * or softirqs have been disabled also serve as RCU read-side critical
3508 * sections. This includes hardware interrupt handlers, softirq handlers,
3509 * and NMI handlers.
3510 *
3511 * Note that this guarantee implies further memory-ordering guarantees.
3512 * On systems with more than one CPU, when synchronize_rcu() returns,
3513 * each CPU is guaranteed to have executed a full memory barrier since
3514 * the end of its last RCU read-side critical section whose beginning
3515 * preceded the call to synchronize_rcu(). In addition, each CPU having
3516 * an RCU read-side critical section that extends beyond the return from
3517 * synchronize_rcu() is guaranteed to have executed a full memory barrier
3518 * after the beginning of synchronize_rcu() and before the beginning of
3519 * that RCU read-side critical section. Note that these guarantees include
3520 * CPUs that are offline, idle, or executing in user mode, as well as CPUs
3521 * that are executing in the kernel.
3522 *
3523 * Furthermore, if CPU A invoked synchronize_rcu(), which returned
3524 * to its caller on CPU B, then both CPU A and CPU B are guaranteed
3525 * to have executed a full memory barrier during the execution of
3526 * synchronize_rcu() -- even if CPU A and CPU B are the same CPU (but
3527 * again only if the system has more than one CPU).
3528 *
3529 * Implementation of these memory-ordering guarantees is described here:
3530 * Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.rst.
3531 */
3532void synchronize_rcu(void)
3533{
3534 unsigned long flags;
3535 struct rcu_node *rnp;
3536
3537 RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
3538 lock_is_held(&rcu_lock_map) ||
3539 lock_is_held(&rcu_sched_lock_map),
3540 "Illegal synchronize_rcu() in RCU read-side critical section");
3541 if (!rcu_blocking_is_gp()) {
3542 if (rcu_gp_is_expedited())
3543 synchronize_rcu_expedited();
3544 else
3545 wait_rcu_gp(call_rcu_hurry);
3546 return;
3547 }
3548
3549 // Context allows vacuous grace periods.
3550 // Note well that this code runs with !PREEMPT && !SMP.
3551 // In addition, all code that advances grace periods runs at
3552 // process level. Therefore, this normal GP overlaps with other
3553 // normal GPs only by being fully nested within them, which allows
3554 // reuse of ->gp_seq_polled_snap.
3555 rcu_poll_gp_seq_start_unlocked(&rcu_state.gp_seq_polled_snap);
3556 rcu_poll_gp_seq_end_unlocked(&rcu_state.gp_seq_polled_snap);
3557
3558 // Update the normal grace-period counters to record
3559 // this grace period, but only those used by the boot CPU.
3560 // The rcu_scheduler_starting() will take care of the rest of
3561 // these counters.
3562 local_irq_save(flags);
3563 WARN_ON_ONCE(num_online_cpus() > 1);
3564 rcu_state.gp_seq += (1 << RCU_SEQ_CTR_SHIFT);
3565 for (rnp = this_cpu_ptr(&rcu_data)->mynode; rnp; rnp = rnp->parent)
3566 rnp->gp_seq_needed = rnp->gp_seq = rcu_state.gp_seq;
3567 local_irq_restore(flags);
3568}
3569EXPORT_SYMBOL_GPL(synchronize_rcu);
3570
3571/**
3572 * get_completed_synchronize_rcu_full - Return a full pre-completed polled state cookie
3573 * @rgosp: Place to put state cookie
3574 *
3575 * Stores into @rgosp a value that will always be treated by functions
3576 * like poll_state_synchronize_rcu_full() as a cookie whose grace period
3577 * has already completed.
3578 */
3579void get_completed_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp)
3580{
3581 rgosp->rgos_norm = RCU_GET_STATE_COMPLETED;
3582 rgosp->rgos_exp = RCU_GET_STATE_COMPLETED;
3583}
3584EXPORT_SYMBOL_GPL(get_completed_synchronize_rcu_full);
3585
3586/**
3587 * get_state_synchronize_rcu - Snapshot current RCU state
3588 *
3589 * Returns a cookie that is used by a later call to cond_synchronize_rcu()
3590 * or poll_state_synchronize_rcu() to determine whether or not a full
3591 * grace period has elapsed in the meantime.
3592 */
3593unsigned long get_state_synchronize_rcu(void)
3594{
3595 /*
3596 * Any prior manipulation of RCU-protected data must happen
3597 * before the load from ->gp_seq.
3598 */
3599 smp_mb(); /* ^^^ */
3600 return rcu_seq_snap(&rcu_state.gp_seq_polled);
3601}
3602EXPORT_SYMBOL_GPL(get_state_synchronize_rcu);
3603
3604/**
3605 * get_state_synchronize_rcu_full - Snapshot RCU state, both normal and expedited
3606 * @rgosp: location to place combined normal/expedited grace-period state
3607 *
3608 * Places the normal and expedited grace-period states in @rgosp. This
3609 * state value can be passed to a later call to cond_synchronize_rcu_full()
3610 * or poll_state_synchronize_rcu_full() to determine whether or not a
3611 * grace period (whether normal or expedited) has elapsed in the meantime.
3612 * The rcu_gp_oldstate structure takes up twice the memory of an unsigned
3613 * long, but is guaranteed to see all grace periods. In contrast, the
3614 * combined state occupies less memory, but can sometimes fail to take
3615 * grace periods into account.
3616 *
3617 * This does not guarantee that the needed grace period will actually
3618 * start.
3619 */
3620void get_state_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp)
3621{
3622 struct rcu_node *rnp = rcu_get_root();
3623
3624 /*
3625 * Any prior manipulation of RCU-protected data must happen
3626 * before the loads from ->gp_seq and ->expedited_sequence.
3627 */
3628 smp_mb(); /* ^^^ */
3629 rgosp->rgos_norm = rcu_seq_snap(&rnp->gp_seq);
3630 rgosp->rgos_exp = rcu_seq_snap(&rcu_state.expedited_sequence);
3631}
3632EXPORT_SYMBOL_GPL(get_state_synchronize_rcu_full);
3633
3634/*
3635 * Helper function for start_poll_synchronize_rcu() and
3636 * start_poll_synchronize_rcu_full().
3637 */
3638static void start_poll_synchronize_rcu_common(void)
3639{
3640 unsigned long flags;
3641 bool needwake;
3642 struct rcu_data *rdp;
3643 struct rcu_node *rnp;
3644
3645 lockdep_assert_irqs_enabled();
3646 local_irq_save(flags);
3647 rdp = this_cpu_ptr(&rcu_data);
3648 rnp = rdp->mynode;
3649 raw_spin_lock_rcu_node(rnp); // irqs already disabled.
3650 // Note it is possible for a grace period to have elapsed between
3651 // the above call to get_state_synchronize_rcu() and the below call
3652 // to rcu_seq_snap. This is OK, the worst that happens is that we
3653 // get a grace period that no one needed. These accesses are ordered
3654 // by smp_mb(), and we are accessing them in the opposite order
3655 // from which they are updated at grace-period start, as required.
3656 needwake = rcu_start_this_gp(rnp, rdp, rcu_seq_snap(&rcu_state.gp_seq));
3657 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
3658 if (needwake)
3659 rcu_gp_kthread_wake();
3660}
3661
3662/**
3663 * start_poll_synchronize_rcu - Snapshot and start RCU grace period
3664 *
3665 * Returns a cookie that is used by a later call to cond_synchronize_rcu()
3666 * or poll_state_synchronize_rcu() to determine whether or not a full
3667 * grace period has elapsed in the meantime. If the needed grace period
3668 * is not already slated to start, notifies RCU core of the need for that
3669 * grace period.
3670 *
3671 * Interrupts must be enabled for the case where it is necessary to awaken
3672 * the grace-period kthread.
3673 */
3674unsigned long start_poll_synchronize_rcu(void)
3675{
3676 unsigned long gp_seq = get_state_synchronize_rcu();
3677
3678 start_poll_synchronize_rcu_common();
3679 return gp_seq;
3680}
3681EXPORT_SYMBOL_GPL(start_poll_synchronize_rcu);
3682
3683/**
3684 * start_poll_synchronize_rcu_full - Take a full snapshot and start RCU grace period
3685 * @rgosp: value from get_state_synchronize_rcu_full() or start_poll_synchronize_rcu_full()
3686 *
3687 * Places the normal and expedited grace-period states in *@rgos. This
3688 * state value can be passed to a later call to cond_synchronize_rcu_full()
3689 * or poll_state_synchronize_rcu_full() to determine whether or not a
3690 * grace period (whether normal or expedited) has elapsed in the meantime.
3691 * If the needed grace period is not already slated to start, notifies
3692 * RCU core of the need for that grace period.
3693 *
3694 * Interrupts must be enabled for the case where it is necessary to awaken
3695 * the grace-period kthread.
3696 */
3697void start_poll_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp)
3698{
3699 get_state_synchronize_rcu_full(rgosp);
3700
3701 start_poll_synchronize_rcu_common();
3702}
3703EXPORT_SYMBOL_GPL(start_poll_synchronize_rcu_full);
3704
3705/**
3706 * poll_state_synchronize_rcu - Has the specified RCU grace period completed?
3707 * @oldstate: value from get_state_synchronize_rcu() or start_poll_synchronize_rcu()
3708 *
3709 * If a full RCU grace period has elapsed since the earlier call from
3710 * which @oldstate was obtained, return @true, otherwise return @false.
3711 * If @false is returned, it is the caller's responsibility to invoke this
3712 * function later on until it does return @true. Alternatively, the caller
3713 * can explicitly wait for a grace period, for example, by passing @oldstate
3714 * to cond_synchronize_rcu() or by directly invoking synchronize_rcu().
3715 *
3716 * Yes, this function does not take counter wrap into account.
3717 * But counter wrap is harmless. If the counter wraps, we have waited for
3718 * more than a billion grace periods (and way more on a 64-bit system!).
3719 * Those needing to keep old state values for very long time periods
3720 * (many hours even on 32-bit systems) should check them occasionally and
3721 * either refresh them or set a flag indicating that the grace period has
3722 * completed. Alternatively, they can use get_completed_synchronize_rcu()
3723 * to get a guaranteed-completed grace-period state.
3724 *
3725 * This function provides the same memory-ordering guarantees that
3726 * would be provided by a synchronize_rcu() that was invoked at the call
3727 * to the function that provided @oldstate, and that returned at the end
3728 * of this function.
3729 */
3730bool poll_state_synchronize_rcu(unsigned long oldstate)
3731{
3732 if (oldstate == RCU_GET_STATE_COMPLETED ||
3733 rcu_seq_done_exact(&rcu_state.gp_seq_polled, oldstate)) {
3734 smp_mb(); /* Ensure GP ends before subsequent accesses. */
3735 return true;
3736 }
3737 return false;
3738}
3739EXPORT_SYMBOL_GPL(poll_state_synchronize_rcu);
3740
3741/**
3742 * poll_state_synchronize_rcu_full - Has the specified RCU grace period completed?
3743 * @rgosp: value from get_state_synchronize_rcu_full() or start_poll_synchronize_rcu_full()
3744 *
3745 * If a full RCU grace period has elapsed since the earlier call from
3746 * which *rgosp was obtained, return @true, otherwise return @false.
3747 * If @false is returned, it is the caller's responsibility to invoke this
3748 * function later on until it does return @true. Alternatively, the caller
3749 * can explicitly wait for a grace period, for example, by passing @rgosp
3750 * to cond_synchronize_rcu() or by directly invoking synchronize_rcu().
3751 *
3752 * Yes, this function does not take counter wrap into account.
3753 * But counter wrap is harmless. If the counter wraps, we have waited
3754 * for more than a billion grace periods (and way more on a 64-bit
3755 * system!). Those needing to keep rcu_gp_oldstate values for very
3756 * long time periods (many hours even on 32-bit systems) should check
3757 * them occasionally and either refresh them or set a flag indicating
3758 * that the grace period has completed. Alternatively, they can use
3759 * get_completed_synchronize_rcu_full() to get a guaranteed-completed
3760 * grace-period state.
3761 *
3762 * This function provides the same memory-ordering guarantees that would
3763 * be provided by a synchronize_rcu() that was invoked at the call to
3764 * the function that provided @rgosp, and that returned at the end of this
3765 * function. And this guarantee requires that the root rcu_node structure's
3766 * ->gp_seq field be checked instead of that of the rcu_state structure.
3767 * The problem is that the just-ending grace-period's callbacks can be
3768 * invoked between the time that the root rcu_node structure's ->gp_seq
3769 * field is updated and the time that the rcu_state structure's ->gp_seq
3770 * field is updated. Therefore, if a single synchronize_rcu() is to
3771 * cause a subsequent poll_state_synchronize_rcu_full() to return @true,
3772 * then the root rcu_node structure is the one that needs to be polled.
3773 */
3774bool poll_state_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp)
3775{
3776 struct rcu_node *rnp = rcu_get_root();
3777
3778 smp_mb(); // Order against root rcu_node structure grace-period cleanup.
3779 if (rgosp->rgos_norm == RCU_GET_STATE_COMPLETED ||
3780 rcu_seq_done_exact(&rnp->gp_seq, rgosp->rgos_norm) ||
3781 rgosp->rgos_exp == RCU_GET_STATE_COMPLETED ||
3782 rcu_seq_done_exact(&rcu_state.expedited_sequence, rgosp->rgos_exp)) {
3783 smp_mb(); /* Ensure GP ends before subsequent accesses. */
3784 return true;
3785 }
3786 return false;
3787}
3788EXPORT_SYMBOL_GPL(poll_state_synchronize_rcu_full);
3789
3790/**
3791 * cond_synchronize_rcu - Conditionally wait for an RCU grace period
3792 * @oldstate: value from get_state_synchronize_rcu(), start_poll_synchronize_rcu(), or start_poll_synchronize_rcu_expedited()
3793 *
3794 * If a full RCU grace period has elapsed since the earlier call to
3795 * get_state_synchronize_rcu() or start_poll_synchronize_rcu(), just return.
3796 * Otherwise, invoke synchronize_rcu() to wait for a full grace period.
3797 *
3798 * Yes, this function does not take counter wrap into account.
3799 * But counter wrap is harmless. If the counter wraps, we have waited for
3800 * more than 2 billion grace periods (and way more on a 64-bit system!),
3801 * so waiting for a couple of additional grace periods should be just fine.
3802 *
3803 * This function provides the same memory-ordering guarantees that
3804 * would be provided by a synchronize_rcu() that was invoked at the call
3805 * to the function that provided @oldstate and that returned at the end
3806 * of this function.
3807 */
3808void cond_synchronize_rcu(unsigned long oldstate)
3809{
3810 if (!poll_state_synchronize_rcu(oldstate))
3811 synchronize_rcu();
3812}
3813EXPORT_SYMBOL_GPL(cond_synchronize_rcu);
3814
3815/**
3816 * cond_synchronize_rcu_full - Conditionally wait for an RCU grace period
3817 * @rgosp: value from get_state_synchronize_rcu_full(), start_poll_synchronize_rcu_full(), or start_poll_synchronize_rcu_expedited_full()
3818 *
3819 * If a full RCU grace period has elapsed since the call to
3820 * get_state_synchronize_rcu_full(), start_poll_synchronize_rcu_full(),
3821 * or start_poll_synchronize_rcu_expedited_full() from which @rgosp was
3822 * obtained, just return. Otherwise, invoke synchronize_rcu() to wait
3823 * for a full grace period.
3824 *
3825 * Yes, this function does not take counter wrap into account.
3826 * But counter wrap is harmless. If the counter wraps, we have waited for
3827 * more than 2 billion grace periods (and way more on a 64-bit system!),
3828 * so waiting for a couple of additional grace periods should be just fine.
3829 *
3830 * This function provides the same memory-ordering guarantees that
3831 * would be provided by a synchronize_rcu() that was invoked at the call
3832 * to the function that provided @rgosp and that returned at the end of
3833 * this function.
3834 */
3835void cond_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp)
3836{
3837 if (!poll_state_synchronize_rcu_full(rgosp))
3838 synchronize_rcu();
3839}
3840EXPORT_SYMBOL_GPL(cond_synchronize_rcu_full);
3841
3842/*
3843 * Check to see if there is any immediate RCU-related work to be done by
3844 * the current CPU, returning 1 if so and zero otherwise. The checks are
3845 * in order of increasing expense: checks that can be carried out against
3846 * CPU-local state are performed first. However, we must check for CPU
3847 * stalls first, else we might not get a chance.
3848 */
3849static int rcu_pending(int user)
3850{
3851 bool gp_in_progress;
3852 struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
3853 struct rcu_node *rnp = rdp->mynode;
3854
3855 lockdep_assert_irqs_disabled();
3856
3857 /* Check for CPU stalls, if enabled. */
3858 check_cpu_stall(rdp);
3859
3860 /* Does this CPU need a deferred NOCB wakeup? */
3861 if (rcu_nocb_need_deferred_wakeup(rdp, RCU_NOCB_WAKE))
3862 return 1;
3863
3864 /* Is this a nohz_full CPU in userspace or idle? (Ignore RCU if so.) */
3865 if ((user || rcu_is_cpu_rrupt_from_idle()) && rcu_nohz_full_cpu())
3866 return 0;
3867
3868 /* Is the RCU core waiting for a quiescent state from this CPU? */
3869 gp_in_progress = rcu_gp_in_progress();
3870 if (rdp->core_needs_qs && !rdp->cpu_no_qs.b.norm && gp_in_progress)
3871 return 1;
3872
3873 /* Does this CPU have callbacks ready to invoke? */
3874 if (!rcu_rdp_is_offloaded(rdp) &&
3875 rcu_segcblist_ready_cbs(&rdp->cblist))
3876 return 1;
3877
3878 /* Has RCU gone idle with this CPU needing another grace period? */
3879 if (!gp_in_progress && rcu_segcblist_is_enabled(&rdp->cblist) &&
3880 !rcu_rdp_is_offloaded(rdp) &&
3881 !rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL))
3882 return 1;
3883
3884 /* Have RCU grace period completed or started? */
3885 if (rcu_seq_current(&rnp->gp_seq) != rdp->gp_seq ||
3886 unlikely(READ_ONCE(rdp->gpwrap))) /* outside lock */
3887 return 1;
3888
3889 /* nothing to do */
3890 return 0;
3891}
3892
3893/*
3894 * Helper function for rcu_barrier() tracing. If tracing is disabled,
3895 * the compiler is expected to optimize this away.
3896 */
3897static void rcu_barrier_trace(const char *s, int cpu, unsigned long done)
3898{
3899 trace_rcu_barrier(rcu_state.name, s, cpu,
3900 atomic_read(&rcu_state.barrier_cpu_count), done);
3901}
3902
3903/*
3904 * RCU callback function for rcu_barrier(). If we are last, wake
3905 * up the task executing rcu_barrier().
3906 *
3907 * Note that the value of rcu_state.barrier_sequence must be captured
3908 * before the atomic_dec_and_test(). Otherwise, if this CPU is not last,
3909 * other CPUs might count the value down to zero before this CPU gets
3910 * around to invoking rcu_barrier_trace(), which might result in bogus
3911 * data from the next instance of rcu_barrier().
3912 */
3913static void rcu_barrier_callback(struct rcu_head *rhp)
3914{
3915 unsigned long __maybe_unused s = rcu_state.barrier_sequence;
3916
3917 if (atomic_dec_and_test(&rcu_state.barrier_cpu_count)) {
3918 rcu_barrier_trace(TPS("LastCB"), -1, s);
3919 complete(&rcu_state.barrier_completion);
3920 } else {
3921 rcu_barrier_trace(TPS("CB"), -1, s);
3922 }
3923}
3924
3925/*
3926 * If needed, entrain an rcu_barrier() callback on rdp->cblist.
3927 */
3928static void rcu_barrier_entrain(struct rcu_data *rdp)
3929{
3930 unsigned long gseq = READ_ONCE(rcu_state.barrier_sequence);
3931 unsigned long lseq = READ_ONCE(rdp->barrier_seq_snap);
3932 bool wake_nocb = false;
3933 bool was_alldone = false;
3934
3935 lockdep_assert_held(&rcu_state.barrier_lock);
3936 if (rcu_seq_state(lseq) || !rcu_seq_state(gseq) || rcu_seq_ctr(lseq) != rcu_seq_ctr(gseq))
3937 return;
3938 rcu_barrier_trace(TPS("IRQ"), -1, rcu_state.barrier_sequence);
3939 rdp->barrier_head.func = rcu_barrier_callback;
3940 debug_rcu_head_queue(&rdp->barrier_head);
3941 rcu_nocb_lock(rdp);
3942 /*
3943 * Flush bypass and wakeup rcuog if we add callbacks to an empty regular
3944 * queue. This way we don't wait for bypass timer that can reach seconds
3945 * if it's fully lazy.
3946 */
3947 was_alldone = rcu_rdp_is_offloaded(rdp) && !rcu_segcblist_pend_cbs(&rdp->cblist);
3948 WARN_ON_ONCE(!rcu_nocb_flush_bypass(rdp, NULL, jiffies, false));
3949 wake_nocb = was_alldone && rcu_segcblist_pend_cbs(&rdp->cblist);
3950 if (rcu_segcblist_entrain(&rdp->cblist, &rdp->barrier_head)) {
3951 atomic_inc(&rcu_state.barrier_cpu_count);
3952 } else {
3953 debug_rcu_head_unqueue(&rdp->barrier_head);
3954 rcu_barrier_trace(TPS("IRQNQ"), -1, rcu_state.barrier_sequence);
3955 }
3956 rcu_nocb_unlock(rdp);
3957 if (wake_nocb)
3958 wake_nocb_gp(rdp, false);
3959 smp_store_release(&rdp->barrier_seq_snap, gseq);
3960}
3961
3962/*
3963 * Called with preemption disabled, and from cross-cpu IRQ context.
3964 */
3965static void rcu_barrier_handler(void *cpu_in)
3966{
3967 uintptr_t cpu = (uintptr_t)cpu_in;
3968 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
3969
3970 lockdep_assert_irqs_disabled();
3971 WARN_ON_ONCE(cpu != rdp->cpu);
3972 WARN_ON_ONCE(cpu != smp_processor_id());
3973 raw_spin_lock(&rcu_state.barrier_lock);
3974 rcu_barrier_entrain(rdp);
3975 raw_spin_unlock(&rcu_state.barrier_lock);
3976}
3977
3978/**
3979 * rcu_barrier - Wait until all in-flight call_rcu() callbacks complete.
3980 *
3981 * Note that this primitive does not necessarily wait for an RCU grace period
3982 * to complete. For example, if there are no RCU callbacks queued anywhere
3983 * in the system, then rcu_barrier() is within its rights to return
3984 * immediately, without waiting for anything, much less an RCU grace period.
3985 */
3986void rcu_barrier(void)
3987{
3988 uintptr_t cpu;
3989 unsigned long flags;
3990 unsigned long gseq;
3991 struct rcu_data *rdp;
3992 unsigned long s = rcu_seq_snap(&rcu_state.barrier_sequence);
3993
3994 rcu_barrier_trace(TPS("Begin"), -1, s);
3995
3996 /* Take mutex to serialize concurrent rcu_barrier() requests. */
3997 mutex_lock(&rcu_state.barrier_mutex);
3998
3999 /* Did someone else do our work for us? */
4000 if (rcu_seq_done(&rcu_state.barrier_sequence, s)) {
4001 rcu_barrier_trace(TPS("EarlyExit"), -1, rcu_state.barrier_sequence);
4002 smp_mb(); /* caller's subsequent code after above check. */
4003 mutex_unlock(&rcu_state.barrier_mutex);
4004 return;
4005 }
4006
4007 /* Mark the start of the barrier operation. */
4008 raw_spin_lock_irqsave(&rcu_state.barrier_lock, flags);
4009 rcu_seq_start(&rcu_state.barrier_sequence);
4010 gseq = rcu_state.barrier_sequence;
4011 rcu_barrier_trace(TPS("Inc1"), -1, rcu_state.barrier_sequence);
4012
4013 /*
4014 * Initialize the count to two rather than to zero in order
4015 * to avoid a too-soon return to zero in case of an immediate
4016 * invocation of the just-enqueued callback (or preemption of
4017 * this task). Exclude CPU-hotplug operations to ensure that no
4018 * offline non-offloaded CPU has callbacks queued.
4019 */
4020 init_completion(&rcu_state.barrier_completion);
4021 atomic_set(&rcu_state.barrier_cpu_count, 2);
4022 raw_spin_unlock_irqrestore(&rcu_state.barrier_lock, flags);
4023
4024 /*
4025 * Force each CPU with callbacks to register a new callback.
4026 * When that callback is invoked, we will know that all of the
4027 * corresponding CPU's preceding callbacks have been invoked.
4028 */
4029 for_each_possible_cpu(cpu) {
4030 rdp = per_cpu_ptr(&rcu_data, cpu);
4031retry:
4032 if (smp_load_acquire(&rdp->barrier_seq_snap) == gseq)
4033 continue;
4034 raw_spin_lock_irqsave(&rcu_state.barrier_lock, flags);
4035 if (!rcu_segcblist_n_cbs(&rdp->cblist)) {
4036 WRITE_ONCE(rdp->barrier_seq_snap, gseq);
4037 raw_spin_unlock_irqrestore(&rcu_state.barrier_lock, flags);
4038 rcu_barrier_trace(TPS("NQ"), cpu, rcu_state.barrier_sequence);
4039 continue;
4040 }
4041 if (!rcu_rdp_cpu_online(rdp)) {
4042 rcu_barrier_entrain(rdp);
4043 WARN_ON_ONCE(READ_ONCE(rdp->barrier_seq_snap) != gseq);
4044 raw_spin_unlock_irqrestore(&rcu_state.barrier_lock, flags);
4045 rcu_barrier_trace(TPS("OfflineNoCBQ"), cpu, rcu_state.barrier_sequence);
4046 continue;
4047 }
4048 raw_spin_unlock_irqrestore(&rcu_state.barrier_lock, flags);
4049 if (smp_call_function_single(cpu, rcu_barrier_handler, (void *)cpu, 1)) {
4050 schedule_timeout_uninterruptible(1);
4051 goto retry;
4052 }
4053 WARN_ON_ONCE(READ_ONCE(rdp->barrier_seq_snap) != gseq);
4054 rcu_barrier_trace(TPS("OnlineQ"), cpu, rcu_state.barrier_sequence);
4055 }
4056
4057 /*
4058 * Now that we have an rcu_barrier_callback() callback on each
4059 * CPU, and thus each counted, remove the initial count.
4060 */
4061 if (atomic_sub_and_test(2, &rcu_state.barrier_cpu_count))
4062 complete(&rcu_state.barrier_completion);
4063
4064 /* Wait for all rcu_barrier_callback() callbacks to be invoked. */
4065 wait_for_completion(&rcu_state.barrier_completion);
4066
4067 /* Mark the end of the barrier operation. */
4068 rcu_barrier_trace(TPS("Inc2"), -1, rcu_state.barrier_sequence);
4069 rcu_seq_end(&rcu_state.barrier_sequence);
4070 gseq = rcu_state.barrier_sequence;
4071 for_each_possible_cpu(cpu) {
4072 rdp = per_cpu_ptr(&rcu_data, cpu);
4073
4074 WRITE_ONCE(rdp->barrier_seq_snap, gseq);
4075 }
4076
4077 /* Other rcu_barrier() invocations can now safely proceed. */
4078 mutex_unlock(&rcu_state.barrier_mutex);
4079}
4080EXPORT_SYMBOL_GPL(rcu_barrier);
4081
4082/*
4083 * Propagate ->qsinitmask bits up the rcu_node tree to account for the
4084 * first CPU in a given leaf rcu_node structure coming online. The caller
4085 * must hold the corresponding leaf rcu_node ->lock with interrupts
4086 * disabled.
4087 */
4088static void rcu_init_new_rnp(struct rcu_node *rnp_leaf)
4089{
4090 long mask;
4091 long oldmask;
4092 struct rcu_node *rnp = rnp_leaf;
4093
4094 raw_lockdep_assert_held_rcu_node(rnp_leaf);
4095 WARN_ON_ONCE(rnp->wait_blkd_tasks);
4096 for (;;) {
4097 mask = rnp->grpmask;
4098 rnp = rnp->parent;
4099 if (rnp == NULL)
4100 return;
4101 raw_spin_lock_rcu_node(rnp); /* Interrupts already disabled. */
4102 oldmask = rnp->qsmaskinit;
4103 rnp->qsmaskinit |= mask;
4104 raw_spin_unlock_rcu_node(rnp); /* Interrupts remain disabled. */
4105 if (oldmask)
4106 return;
4107 }
4108}
4109
4110/*
4111 * Do boot-time initialization of a CPU's per-CPU RCU data.
4112 */
4113static void __init
4114rcu_boot_init_percpu_data(int cpu)
4115{
4116 struct context_tracking *ct = this_cpu_ptr(&context_tracking);
4117 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
4118
4119 /* Set up local state, ensuring consistent view of global state. */
4120 rdp->grpmask = leaf_node_cpu_bit(rdp->mynode, cpu);
4121 INIT_WORK(&rdp->strict_work, strict_work_handler);
4122 WARN_ON_ONCE(ct->dynticks_nesting != 1);
4123 WARN_ON_ONCE(rcu_dynticks_in_eqs(rcu_dynticks_snap(cpu)));
4124 rdp->barrier_seq_snap = rcu_state.barrier_sequence;
4125 rdp->rcu_ofl_gp_seq = rcu_state.gp_seq;
4126 rdp->rcu_ofl_gp_flags = RCU_GP_CLEANED;
4127 rdp->rcu_onl_gp_seq = rcu_state.gp_seq;
4128 rdp->rcu_onl_gp_flags = RCU_GP_CLEANED;
4129 rdp->last_sched_clock = jiffies;
4130 rdp->cpu = cpu;
4131 rcu_boot_init_nocb_percpu_data(rdp);
4132}
4133
4134/*
4135 * Invoked early in the CPU-online process, when pretty much all services
4136 * are available. The incoming CPU is not present.
4137 *
4138 * Initializes a CPU's per-CPU RCU data. Note that only one online or
4139 * offline event can be happening at a given time. Note also that we can
4140 * accept some slop in the rsp->gp_seq access due to the fact that this
4141 * CPU cannot possibly have any non-offloaded RCU callbacks in flight yet.
4142 * And any offloaded callbacks are being numbered elsewhere.
4143 */
4144int rcutree_prepare_cpu(unsigned int cpu)
4145{
4146 unsigned long flags;
4147 struct context_tracking *ct = per_cpu_ptr(&context_tracking, cpu);
4148 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
4149 struct rcu_node *rnp = rcu_get_root();
4150
4151 /* Set up local state, ensuring consistent view of global state. */
4152 raw_spin_lock_irqsave_rcu_node(rnp, flags);
4153 rdp->qlen_last_fqs_check = 0;
4154 rdp->n_force_qs_snap = READ_ONCE(rcu_state.n_force_qs);
4155 rdp->blimit = blimit;
4156 ct->dynticks_nesting = 1; /* CPU not up, no tearing. */
4157 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
4158
4159 /*
4160 * Only non-NOCB CPUs that didn't have early-boot callbacks need to be
4161 * (re-)initialized.
4162 */
4163 if (!rcu_segcblist_is_enabled(&rdp->cblist))
4164 rcu_segcblist_init(&rdp->cblist); /* Re-enable callbacks. */
4165
4166 /*
4167 * Add CPU to leaf rcu_node pending-online bitmask. Any needed
4168 * propagation up the rcu_node tree will happen at the beginning
4169 * of the next grace period.
4170 */
4171 rnp = rdp->mynode;
4172 raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
4173 rdp->beenonline = true; /* We have now been online. */
4174 rdp->gp_seq = READ_ONCE(rnp->gp_seq);
4175 rdp->gp_seq_needed = rdp->gp_seq;
4176 rdp->cpu_no_qs.b.norm = true;
4177 rdp->core_needs_qs = false;
4178 rdp->rcu_iw_pending = false;
4179 rdp->rcu_iw = IRQ_WORK_INIT_HARD(rcu_iw_handler);
4180 rdp->rcu_iw_gp_seq = rdp->gp_seq - 1;
4181 trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("cpuonl"));
4182 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
4183 rcu_spawn_one_boost_kthread(rnp);
4184 rcu_spawn_cpu_nocb_kthread(cpu);
4185 WRITE_ONCE(rcu_state.n_online_cpus, rcu_state.n_online_cpus + 1);
4186
4187 return 0;
4188}
4189
4190/*
4191 * Update RCU priority boot kthread affinity for CPU-hotplug changes.
4192 */
4193static void rcutree_affinity_setting(unsigned int cpu, int outgoing)
4194{
4195 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
4196
4197 rcu_boost_kthread_setaffinity(rdp->mynode, outgoing);
4198}
4199
4200/*
4201 * Near the end of the CPU-online process. Pretty much all services
4202 * enabled, and the CPU is now very much alive.
4203 */
4204int rcutree_online_cpu(unsigned int cpu)
4205{
4206 unsigned long flags;
4207 struct rcu_data *rdp;
4208 struct rcu_node *rnp;
4209
4210 rdp = per_cpu_ptr(&rcu_data, cpu);
4211 rnp = rdp->mynode;
4212 raw_spin_lock_irqsave_rcu_node(rnp, flags);
4213 rnp->ffmask |= rdp->grpmask;
4214 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
4215 if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE)
4216 return 0; /* Too early in boot for scheduler work. */
4217 sync_sched_exp_online_cleanup(cpu);
4218 rcutree_affinity_setting(cpu, -1);
4219
4220 // Stop-machine done, so allow nohz_full to disable tick.
4221 tick_dep_clear(TICK_DEP_BIT_RCU);
4222 return 0;
4223}
4224
4225/*
4226 * Near the beginning of the process. The CPU is still very much alive
4227 * with pretty much all services enabled.
4228 */
4229int rcutree_offline_cpu(unsigned int cpu)
4230{
4231 unsigned long flags;
4232 struct rcu_data *rdp;
4233 struct rcu_node *rnp;
4234
4235 rdp = per_cpu_ptr(&rcu_data, cpu);
4236 rnp = rdp->mynode;
4237 raw_spin_lock_irqsave_rcu_node(rnp, flags);
4238 rnp->ffmask &= ~rdp->grpmask;
4239 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
4240
4241 rcutree_affinity_setting(cpu, cpu);
4242
4243 // nohz_full CPUs need the tick for stop-machine to work quickly
4244 tick_dep_set(TICK_DEP_BIT_RCU);
4245 return 0;
4246}
4247
4248/*
4249 * Mark the specified CPU as being online so that subsequent grace periods
4250 * (both expedited and normal) will wait on it. Note that this means that
4251 * incoming CPUs are not allowed to use RCU read-side critical sections
4252 * until this function is called. Failing to observe this restriction
4253 * will result in lockdep splats.
4254 *
4255 * Note that this function is special in that it is invoked directly
4256 * from the incoming CPU rather than from the cpuhp_step mechanism.
4257 * This is because this function must be invoked at a precise location.
4258 */
4259void rcu_cpu_starting(unsigned int cpu)
4260{
4261 unsigned long flags;
4262 unsigned long mask;
4263 struct rcu_data *rdp;
4264 struct rcu_node *rnp;
4265 bool newcpu;
4266
4267 rdp = per_cpu_ptr(&rcu_data, cpu);
4268 if (rdp->cpu_started)
4269 return;
4270 rdp->cpu_started = true;
4271
4272 rnp = rdp->mynode;
4273 mask = rdp->grpmask;
4274 local_irq_save(flags);
4275 arch_spin_lock(&rcu_state.ofl_lock);
4276 rcu_dynticks_eqs_online();
4277 raw_spin_lock(&rcu_state.barrier_lock);
4278 raw_spin_lock_rcu_node(rnp);
4279 WRITE_ONCE(rnp->qsmaskinitnext, rnp->qsmaskinitnext | mask);
4280 raw_spin_unlock(&rcu_state.barrier_lock);
4281 newcpu = !(rnp->expmaskinitnext & mask);
4282 rnp->expmaskinitnext |= mask;
4283 /* Allow lockless access for expedited grace periods. */
4284 smp_store_release(&rcu_state.ncpus, rcu_state.ncpus + newcpu); /* ^^^ */
4285 ASSERT_EXCLUSIVE_WRITER(rcu_state.ncpus);
4286 rcu_gpnum_ovf(rnp, rdp); /* Offline-induced counter wrap? */
4287 rdp->rcu_onl_gp_seq = READ_ONCE(rcu_state.gp_seq);
4288 rdp->rcu_onl_gp_flags = READ_ONCE(rcu_state.gp_flags);
4289
4290 /* An incoming CPU should never be blocking a grace period. */
4291 if (WARN_ON_ONCE(rnp->qsmask & mask)) { /* RCU waiting on incoming CPU? */
4292 /* rcu_report_qs_rnp() *really* wants some flags to restore */
4293 unsigned long flags2;
4294
4295 local_irq_save(flags2);
4296 rcu_disable_urgency_upon_qs(rdp);
4297 /* Report QS -after- changing ->qsmaskinitnext! */
4298 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags2);
4299 } else {
4300 raw_spin_unlock_rcu_node(rnp);
4301 }
4302 arch_spin_unlock(&rcu_state.ofl_lock);
4303 local_irq_restore(flags);
4304 smp_mb(); /* Ensure RCU read-side usage follows above initialization. */
4305}
4306
4307/*
4308 * The outgoing function has no further need of RCU, so remove it from
4309 * the rcu_node tree's ->qsmaskinitnext bit masks.
4310 *
4311 * Note that this function is special in that it is invoked directly
4312 * from the outgoing CPU rather than from the cpuhp_step mechanism.
4313 * This is because this function must be invoked at a precise location.
4314 */
4315void rcu_report_dead(unsigned int cpu)
4316{
4317 unsigned long flags, seq_flags;
4318 unsigned long mask;
4319 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
4320 struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */
4321
4322 // Do any dangling deferred wakeups.
4323 do_nocb_deferred_wakeup(rdp);
4324
4325 rcu_preempt_deferred_qs(current);
4326
4327 /* Remove outgoing CPU from mask in the leaf rcu_node structure. */
4328 mask = rdp->grpmask;
4329 local_irq_save(seq_flags);
4330 arch_spin_lock(&rcu_state.ofl_lock);
4331 raw_spin_lock_irqsave_rcu_node(rnp, flags); /* Enforce GP memory-order guarantee. */
4332 rdp->rcu_ofl_gp_seq = READ_ONCE(rcu_state.gp_seq);
4333 rdp->rcu_ofl_gp_flags = READ_ONCE(rcu_state.gp_flags);
4334 if (rnp->qsmask & mask) { /* RCU waiting on outgoing CPU? */
4335 /* Report quiescent state -before- changing ->qsmaskinitnext! */
4336 rcu_disable_urgency_upon_qs(rdp);
4337 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
4338 raw_spin_lock_irqsave_rcu_node(rnp, flags);
4339 }
4340 WRITE_ONCE(rnp->qsmaskinitnext, rnp->qsmaskinitnext & ~mask);
4341 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
4342 arch_spin_unlock(&rcu_state.ofl_lock);
4343 local_irq_restore(seq_flags);
4344
4345 rdp->cpu_started = false;
4346}
4347
4348#ifdef CONFIG_HOTPLUG_CPU
4349/*
4350 * The outgoing CPU has just passed through the dying-idle state, and we
4351 * are being invoked from the CPU that was IPIed to continue the offline
4352 * operation. Migrate the outgoing CPU's callbacks to the current CPU.
4353 */
4354void rcutree_migrate_callbacks(int cpu)
4355{
4356 unsigned long flags;
4357 struct rcu_data *my_rdp;
4358 struct rcu_node *my_rnp;
4359 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
4360 bool needwake;
4361
4362 if (rcu_rdp_is_offloaded(rdp) ||
4363 rcu_segcblist_empty(&rdp->cblist))
4364 return; /* No callbacks to migrate. */
4365
4366 raw_spin_lock_irqsave(&rcu_state.barrier_lock, flags);
4367 WARN_ON_ONCE(rcu_rdp_cpu_online(rdp));
4368 rcu_barrier_entrain(rdp);
4369 my_rdp = this_cpu_ptr(&rcu_data);
4370 my_rnp = my_rdp->mynode;
4371 rcu_nocb_lock(my_rdp); /* irqs already disabled. */
4372 WARN_ON_ONCE(!rcu_nocb_flush_bypass(my_rdp, NULL, jiffies, false));
4373 raw_spin_lock_rcu_node(my_rnp); /* irqs already disabled. */
4374 /* Leverage recent GPs and set GP for new callbacks. */
4375 needwake = rcu_advance_cbs(my_rnp, rdp) ||
4376 rcu_advance_cbs(my_rnp, my_rdp);
4377 rcu_segcblist_merge(&my_rdp->cblist, &rdp->cblist);
4378 raw_spin_unlock(&rcu_state.barrier_lock); /* irqs remain disabled. */
4379 needwake = needwake || rcu_advance_cbs(my_rnp, my_rdp);
4380 rcu_segcblist_disable(&rdp->cblist);
4381 WARN_ON_ONCE(rcu_segcblist_empty(&my_rdp->cblist) != !rcu_segcblist_n_cbs(&my_rdp->cblist));
4382 check_cb_ovld_locked(my_rdp, my_rnp);
4383 if (rcu_rdp_is_offloaded(my_rdp)) {
4384 raw_spin_unlock_rcu_node(my_rnp); /* irqs remain disabled. */
4385 __call_rcu_nocb_wake(my_rdp, true, flags);
4386 } else {
4387 rcu_nocb_unlock(my_rdp); /* irqs remain disabled. */
4388 raw_spin_unlock_irqrestore_rcu_node(my_rnp, flags);
4389 }
4390 if (needwake)
4391 rcu_gp_kthread_wake();
4392 lockdep_assert_irqs_enabled();
4393 WARN_ONCE(rcu_segcblist_n_cbs(&rdp->cblist) != 0 ||
4394 !rcu_segcblist_empty(&rdp->cblist),
4395 "rcu_cleanup_dead_cpu: Callbacks on offline CPU %d: qlen=%lu, 1stCB=%p\n",
4396 cpu, rcu_segcblist_n_cbs(&rdp->cblist),
4397 rcu_segcblist_first_cb(&rdp->cblist));
4398}
4399#endif
4400
4401/*
4402 * On non-huge systems, use expedited RCU grace periods to make suspend
4403 * and hibernation run faster.
4404 */
4405static int rcu_pm_notify(struct notifier_block *self,
4406 unsigned long action, void *hcpu)
4407{
4408 switch (action) {
4409 case PM_HIBERNATION_PREPARE:
4410 case PM_SUSPEND_PREPARE:
4411 rcu_expedite_gp();
4412 break;
4413 case PM_POST_HIBERNATION:
4414 case PM_POST_SUSPEND:
4415 rcu_unexpedite_gp();
4416 break;
4417 default:
4418 break;
4419 }
4420 return NOTIFY_OK;
4421}
4422
4423#ifdef CONFIG_RCU_EXP_KTHREAD
4424struct kthread_worker *rcu_exp_gp_kworker;
4425struct kthread_worker *rcu_exp_par_gp_kworker;
4426
4427static void __init rcu_start_exp_gp_kworkers(void)
4428{
4429 const char *par_gp_kworker_name = "rcu_exp_par_gp_kthread_worker";
4430 const char *gp_kworker_name = "rcu_exp_gp_kthread_worker";
4431 struct sched_param param = { .sched_priority = kthread_prio };
4432
4433 rcu_exp_gp_kworker = kthread_create_worker(0, gp_kworker_name);
4434 if (IS_ERR_OR_NULL(rcu_exp_gp_kworker)) {
4435 pr_err("Failed to create %s!\n", gp_kworker_name);
4436 return;
4437 }
4438
4439 rcu_exp_par_gp_kworker = kthread_create_worker(0, par_gp_kworker_name);
4440 if (IS_ERR_OR_NULL(rcu_exp_par_gp_kworker)) {
4441 pr_err("Failed to create %s!\n", par_gp_kworker_name);
4442 kthread_destroy_worker(rcu_exp_gp_kworker);
4443 return;
4444 }
4445
4446 sched_setscheduler_nocheck(rcu_exp_gp_kworker->task, SCHED_FIFO, ¶m);
4447 sched_setscheduler_nocheck(rcu_exp_par_gp_kworker->task, SCHED_FIFO,
4448 ¶m);
4449}
4450
4451static inline void rcu_alloc_par_gp_wq(void)
4452{
4453}
4454#else /* !CONFIG_RCU_EXP_KTHREAD */
4455struct workqueue_struct *rcu_par_gp_wq;
4456
4457static void __init rcu_start_exp_gp_kworkers(void)
4458{
4459}
4460
4461static inline void rcu_alloc_par_gp_wq(void)
4462{
4463 rcu_par_gp_wq = alloc_workqueue("rcu_par_gp", WQ_MEM_RECLAIM, 0);
4464 WARN_ON(!rcu_par_gp_wq);
4465}
4466#endif /* CONFIG_RCU_EXP_KTHREAD */
4467
4468/*
4469 * Spawn the kthreads that handle RCU's grace periods.
4470 */
4471static int __init rcu_spawn_gp_kthread(void)
4472{
4473 unsigned long flags;
4474 struct rcu_node *rnp;
4475 struct sched_param sp;
4476 struct task_struct *t;
4477 struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
4478
4479 rcu_scheduler_fully_active = 1;
4480 t = kthread_create(rcu_gp_kthread, NULL, "%s", rcu_state.name);
4481 if (WARN_ONCE(IS_ERR(t), "%s: Could not start grace-period kthread, OOM is now expected behavior\n", __func__))
4482 return 0;
4483 if (kthread_prio) {
4484 sp.sched_priority = kthread_prio;
4485 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
4486 }
4487 rnp = rcu_get_root();
4488 raw_spin_lock_irqsave_rcu_node(rnp, flags);
4489 WRITE_ONCE(rcu_state.gp_activity, jiffies);
4490 WRITE_ONCE(rcu_state.gp_req_activity, jiffies);
4491 // Reset .gp_activity and .gp_req_activity before setting .gp_kthread.
4492 smp_store_release(&rcu_state.gp_kthread, t); /* ^^^ */
4493 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
4494 wake_up_process(t);
4495 /* This is a pre-SMP initcall, we expect a single CPU */
4496 WARN_ON(num_online_cpus() > 1);
4497 /*
4498 * Those kthreads couldn't be created on rcu_init() -> rcutree_prepare_cpu()
4499 * due to rcu_scheduler_fully_active.
4500 */
4501 rcu_spawn_cpu_nocb_kthread(smp_processor_id());
4502 rcu_spawn_one_boost_kthread(rdp->mynode);
4503 rcu_spawn_core_kthreads();
4504 /* Create kthread worker for expedited GPs */
4505 rcu_start_exp_gp_kworkers();
4506 return 0;
4507}
4508early_initcall(rcu_spawn_gp_kthread);
4509
4510/*
4511 * This function is invoked towards the end of the scheduler's
4512 * initialization process. Before this is called, the idle task might
4513 * contain synchronous grace-period primitives (during which time, this idle
4514 * task is booting the system, and such primitives are no-ops). After this
4515 * function is called, any synchronous grace-period primitives are run as
4516 * expedited, with the requesting task driving the grace period forward.
4517 * A later core_initcall() rcu_set_runtime_mode() will switch to full
4518 * runtime RCU functionality.
4519 */
4520void rcu_scheduler_starting(void)
4521{
4522 unsigned long flags;
4523 struct rcu_node *rnp;
4524
4525 WARN_ON(num_online_cpus() != 1);
4526 WARN_ON(nr_context_switches() > 0);
4527 rcu_test_sync_prims();
4528
4529 // Fix up the ->gp_seq counters.
4530 local_irq_save(flags);
4531 rcu_for_each_node_breadth_first(rnp)
4532 rnp->gp_seq_needed = rnp->gp_seq = rcu_state.gp_seq;
4533 local_irq_restore(flags);
4534
4535 // Switch out of early boot mode.
4536 rcu_scheduler_active = RCU_SCHEDULER_INIT;
4537 rcu_test_sync_prims();
4538}
4539
4540/*
4541 * Helper function for rcu_init() that initializes the rcu_state structure.
4542 */
4543static void __init rcu_init_one(void)
4544{
4545 static const char * const buf[] = RCU_NODE_NAME_INIT;
4546 static const char * const fqs[] = RCU_FQS_NAME_INIT;
4547 static struct lock_class_key rcu_node_class[RCU_NUM_LVLS];
4548 static struct lock_class_key rcu_fqs_class[RCU_NUM_LVLS];
4549
4550 int levelspread[RCU_NUM_LVLS]; /* kids/node in each level. */
4551 int cpustride = 1;
4552 int i;
4553 int j;
4554 struct rcu_node *rnp;
4555
4556 BUILD_BUG_ON(RCU_NUM_LVLS > ARRAY_SIZE(buf)); /* Fix buf[] init! */
4557
4558 /* Silence gcc 4.8 false positive about array index out of range. */
4559 if (rcu_num_lvls <= 0 || rcu_num_lvls > RCU_NUM_LVLS)
4560 panic("rcu_init_one: rcu_num_lvls out of range");
4561
4562 /* Initialize the level-tracking arrays. */
4563
4564 for (i = 1; i < rcu_num_lvls; i++)
4565 rcu_state.level[i] =
4566 rcu_state.level[i - 1] + num_rcu_lvl[i - 1];
4567 rcu_init_levelspread(levelspread, num_rcu_lvl);
4568
4569 /* Initialize the elements themselves, starting from the leaves. */
4570
4571 for (i = rcu_num_lvls - 1; i >= 0; i--) {
4572 cpustride *= levelspread[i];
4573 rnp = rcu_state.level[i];
4574 for (j = 0; j < num_rcu_lvl[i]; j++, rnp++) {
4575 raw_spin_lock_init(&ACCESS_PRIVATE(rnp, lock));
4576 lockdep_set_class_and_name(&ACCESS_PRIVATE(rnp, lock),
4577 &rcu_node_class[i], buf[i]);
4578 raw_spin_lock_init(&rnp->fqslock);
4579 lockdep_set_class_and_name(&rnp->fqslock,
4580 &rcu_fqs_class[i], fqs[i]);
4581 rnp->gp_seq = rcu_state.gp_seq;
4582 rnp->gp_seq_needed = rcu_state.gp_seq;
4583 rnp->completedqs = rcu_state.gp_seq;
4584 rnp->qsmask = 0;
4585 rnp->qsmaskinit = 0;
4586 rnp->grplo = j * cpustride;
4587 rnp->grphi = (j + 1) * cpustride - 1;
4588 if (rnp->grphi >= nr_cpu_ids)
4589 rnp->grphi = nr_cpu_ids - 1;
4590 if (i == 0) {
4591 rnp->grpnum = 0;
4592 rnp->grpmask = 0;
4593 rnp->parent = NULL;
4594 } else {
4595 rnp->grpnum = j % levelspread[i - 1];
4596 rnp->grpmask = BIT(rnp->grpnum);
4597 rnp->parent = rcu_state.level[i - 1] +
4598 j / levelspread[i - 1];
4599 }
4600 rnp->level = i;
4601 INIT_LIST_HEAD(&rnp->blkd_tasks);
4602 rcu_init_one_nocb(rnp);
4603 init_waitqueue_head(&rnp->exp_wq[0]);
4604 init_waitqueue_head(&rnp->exp_wq[1]);
4605 init_waitqueue_head(&rnp->exp_wq[2]);
4606 init_waitqueue_head(&rnp->exp_wq[3]);
4607 spin_lock_init(&rnp->exp_lock);
4608 mutex_init(&rnp->boost_kthread_mutex);
4609 raw_spin_lock_init(&rnp->exp_poll_lock);
4610 rnp->exp_seq_poll_rq = RCU_GET_STATE_COMPLETED;
4611 INIT_WORK(&rnp->exp_poll_wq, sync_rcu_do_polled_gp);
4612 }
4613 }
4614
4615 init_swait_queue_head(&rcu_state.gp_wq);
4616 init_swait_queue_head(&rcu_state.expedited_wq);
4617 rnp = rcu_first_leaf_node();
4618 for_each_possible_cpu(i) {
4619 while (i > rnp->grphi)
4620 rnp++;
4621 per_cpu_ptr(&rcu_data, i)->mynode = rnp;
4622 rcu_boot_init_percpu_data(i);
4623 }
4624}
4625
4626/*
4627 * Force priority from the kernel command-line into range.
4628 */
4629static void __init sanitize_kthread_prio(void)
4630{
4631 int kthread_prio_in = kthread_prio;
4632
4633 if (IS_ENABLED(CONFIG_RCU_BOOST) && kthread_prio < 2
4634 && IS_BUILTIN(CONFIG_RCU_TORTURE_TEST))
4635 kthread_prio = 2;
4636 else if (IS_ENABLED(CONFIG_RCU_BOOST) && kthread_prio < 1)
4637 kthread_prio = 1;
4638 else if (kthread_prio < 0)
4639 kthread_prio = 0;
4640 else if (kthread_prio > 99)
4641 kthread_prio = 99;
4642
4643 if (kthread_prio != kthread_prio_in)
4644 pr_alert("%s: Limited prio to %d from %d\n",
4645 __func__, kthread_prio, kthread_prio_in);
4646}
4647
4648/*
4649 * Compute the rcu_node tree geometry from kernel parameters. This cannot
4650 * replace the definitions in tree.h because those are needed to size
4651 * the ->node array in the rcu_state structure.
4652 */
4653void rcu_init_geometry(void)
4654{
4655 ulong d;
4656 int i;
4657 static unsigned long old_nr_cpu_ids;
4658 int rcu_capacity[RCU_NUM_LVLS];
4659 static bool initialized;
4660
4661 if (initialized) {
4662 /*
4663 * Warn if setup_nr_cpu_ids() had not yet been invoked,
4664 * unless nr_cpus_ids == NR_CPUS, in which case who cares?
4665 */
4666 WARN_ON_ONCE(old_nr_cpu_ids != nr_cpu_ids);
4667 return;
4668 }
4669
4670 old_nr_cpu_ids = nr_cpu_ids;
4671 initialized = true;
4672
4673 /*
4674 * Initialize any unspecified boot parameters.
4675 * The default values of jiffies_till_first_fqs and
4676 * jiffies_till_next_fqs are set to the RCU_JIFFIES_TILL_FORCE_QS
4677 * value, which is a function of HZ, then adding one for each
4678 * RCU_JIFFIES_FQS_DIV CPUs that might be on the system.
4679 */
4680 d = RCU_JIFFIES_TILL_FORCE_QS + nr_cpu_ids / RCU_JIFFIES_FQS_DIV;
4681 if (jiffies_till_first_fqs == ULONG_MAX)
4682 jiffies_till_first_fqs = d;
4683 if (jiffies_till_next_fqs == ULONG_MAX)
4684 jiffies_till_next_fqs = d;
4685 adjust_jiffies_till_sched_qs();
4686
4687 /* If the compile-time values are accurate, just leave. */
4688 if (rcu_fanout_leaf == RCU_FANOUT_LEAF &&
4689 nr_cpu_ids == NR_CPUS)
4690 return;
4691 pr_info("Adjusting geometry for rcu_fanout_leaf=%d, nr_cpu_ids=%u\n",
4692 rcu_fanout_leaf, nr_cpu_ids);
4693
4694 /*
4695 * The boot-time rcu_fanout_leaf parameter must be at least two
4696 * and cannot exceed the number of bits in the rcu_node masks.
4697 * Complain and fall back to the compile-time values if this
4698 * limit is exceeded.
4699 */
4700 if (rcu_fanout_leaf < 2 ||
4701 rcu_fanout_leaf > sizeof(unsigned long) * 8) {
4702 rcu_fanout_leaf = RCU_FANOUT_LEAF;
4703 WARN_ON(1);
4704 return;
4705 }
4706
4707 /*
4708 * Compute number of nodes that can be handled an rcu_node tree
4709 * with the given number of levels.
4710 */
4711 rcu_capacity[0] = rcu_fanout_leaf;
4712 for (i = 1; i < RCU_NUM_LVLS; i++)
4713 rcu_capacity[i] = rcu_capacity[i - 1] * RCU_FANOUT;
4714
4715 /*
4716 * The tree must be able to accommodate the configured number of CPUs.
4717 * If this limit is exceeded, fall back to the compile-time values.
4718 */
4719 if (nr_cpu_ids > rcu_capacity[RCU_NUM_LVLS - 1]) {
4720 rcu_fanout_leaf = RCU_FANOUT_LEAF;
4721 WARN_ON(1);
4722 return;
4723 }
4724
4725 /* Calculate the number of levels in the tree. */
4726 for (i = 0; nr_cpu_ids > rcu_capacity[i]; i++) {
4727 }
4728 rcu_num_lvls = i + 1;
4729
4730 /* Calculate the number of rcu_nodes at each level of the tree. */
4731 for (i = 0; i < rcu_num_lvls; i++) {
4732 int cap = rcu_capacity[(rcu_num_lvls - 1) - i];
4733 num_rcu_lvl[i] = DIV_ROUND_UP(nr_cpu_ids, cap);
4734 }
4735
4736 /* Calculate the total number of rcu_node structures. */
4737 rcu_num_nodes = 0;
4738 for (i = 0; i < rcu_num_lvls; i++)
4739 rcu_num_nodes += num_rcu_lvl[i];
4740}
4741
4742/*
4743 * Dump out the structure of the rcu_node combining tree associated
4744 * with the rcu_state structure.
4745 */
4746static void __init rcu_dump_rcu_node_tree(void)
4747{
4748 int level = 0;
4749 struct rcu_node *rnp;
4750
4751 pr_info("rcu_node tree layout dump\n");
4752 pr_info(" ");
4753 rcu_for_each_node_breadth_first(rnp) {
4754 if (rnp->level != level) {
4755 pr_cont("\n");
4756 pr_info(" ");
4757 level = rnp->level;
4758 }
4759 pr_cont("%d:%d ^%d ", rnp->grplo, rnp->grphi, rnp->grpnum);
4760 }
4761 pr_cont("\n");
4762}
4763
4764struct workqueue_struct *rcu_gp_wq;
4765
4766static void __init kfree_rcu_batch_init(void)
4767{
4768 int cpu;
4769 int i;
4770
4771 /* Clamp it to [0:100] seconds interval. */
4772 if (rcu_delay_page_cache_fill_msec < 0 ||
4773 rcu_delay_page_cache_fill_msec > 100 * MSEC_PER_SEC) {
4774
4775 rcu_delay_page_cache_fill_msec =
4776 clamp(rcu_delay_page_cache_fill_msec, 0,
4777 (int) (100 * MSEC_PER_SEC));
4778
4779 pr_info("Adjusting rcutree.rcu_delay_page_cache_fill_msec to %d ms.\n",
4780 rcu_delay_page_cache_fill_msec);
4781 }
4782
4783 for_each_possible_cpu(cpu) {
4784 struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);
4785
4786 for (i = 0; i < KFREE_N_BATCHES; i++) {
4787 INIT_RCU_WORK(&krcp->krw_arr[i].rcu_work, kfree_rcu_work);
4788 krcp->krw_arr[i].krcp = krcp;
4789 }
4790
4791 INIT_DELAYED_WORK(&krcp->monitor_work, kfree_rcu_monitor);
4792 INIT_DELAYED_WORK(&krcp->page_cache_work, fill_page_cache_func);
4793 krcp->initialized = true;
4794 }
4795 if (register_shrinker(&kfree_rcu_shrinker, "rcu-kfree"))
4796 pr_err("Failed to register kfree_rcu() shrinker!\n");
4797}
4798
4799void __init rcu_init(void)
4800{
4801 int cpu = smp_processor_id();
4802
4803 rcu_early_boot_tests();
4804
4805 kfree_rcu_batch_init();
4806 rcu_bootup_announce();
4807 sanitize_kthread_prio();
4808 rcu_init_geometry();
4809 rcu_init_one();
4810 if (dump_tree)
4811 rcu_dump_rcu_node_tree();
4812 if (use_softirq)
4813 open_softirq(RCU_SOFTIRQ, rcu_core_si);
4814
4815 /*
4816 * We don't need protection against CPU-hotplug here because
4817 * this is called early in boot, before either interrupts
4818 * or the scheduler are operational.
4819 */
4820 pm_notifier(rcu_pm_notify, 0);
4821 WARN_ON(num_online_cpus() > 1); // Only one CPU this early in boot.
4822 rcutree_prepare_cpu(cpu);
4823 rcu_cpu_starting(cpu);
4824 rcutree_online_cpu(cpu);
4825
4826 /* Create workqueue for Tree SRCU and for expedited GPs. */
4827 rcu_gp_wq = alloc_workqueue("rcu_gp", WQ_MEM_RECLAIM, 0);
4828 WARN_ON(!rcu_gp_wq);
4829 rcu_alloc_par_gp_wq();
4830
4831 /* Fill in default value for rcutree.qovld boot parameter. */
4832 /* -After- the rcu_node ->lock fields are initialized! */
4833 if (qovld < 0)
4834 qovld_calc = DEFAULT_RCU_QOVLD_MULT * qhimark;
4835 else
4836 qovld_calc = qovld;
4837
4838 // Kick-start any polled grace periods that started early.
4839 if (!(per_cpu_ptr(&rcu_data, cpu)->mynode->exp_seq_poll_rq & 0x1))
4840 (void)start_poll_synchronize_rcu_expedited();
4841}
4842
4843#include "tree_stall.h"
4844#include "tree_exp.h"
4845#include "tree_nocb.h"
4846#include "tree_plugin.h"
1/*
2 * Read-Copy Update mechanism for mutual exclusion
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, you can access it online at
16 * http://www.gnu.org/licenses/gpl-2.0.html.
17 *
18 * Copyright IBM Corporation, 2008
19 *
20 * Authors: Dipankar Sarma <dipankar@in.ibm.com>
21 * Manfred Spraul <manfred@colorfullife.com>
22 * Paul E. McKenney <paulmck@linux.vnet.ibm.com> Hierarchical version
23 *
24 * Based on the original work by Paul McKenney <paulmck@us.ibm.com>
25 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
26 *
27 * For detailed explanation of Read-Copy Update mechanism see -
28 * Documentation/RCU
29 */
30#include <linux/types.h>
31#include <linux/kernel.h>
32#include <linux/init.h>
33#include <linux/spinlock.h>
34#include <linux/smp.h>
35#include <linux/rcupdate.h>
36#include <linux/interrupt.h>
37#include <linux/sched.h>
38#include <linux/nmi.h>
39#include <linux/atomic.h>
40#include <linux/bitops.h>
41#include <linux/export.h>
42#include <linux/completion.h>
43#include <linux/moduleparam.h>
44#include <linux/module.h>
45#include <linux/percpu.h>
46#include <linux/notifier.h>
47#include <linux/cpu.h>
48#include <linux/mutex.h>
49#include <linux/time.h>
50#include <linux/kernel_stat.h>
51#include <linux/wait.h>
52#include <linux/kthread.h>
53#include <linux/prefetch.h>
54#include <linux/delay.h>
55#include <linux/stop_machine.h>
56#include <linux/random.h>
57#include <linux/ftrace_event.h>
58#include <linux/suspend.h>
59
60#include "tree.h"
61#include "rcu.h"
62
63MODULE_ALIAS("rcutree");
64#ifdef MODULE_PARAM_PREFIX
65#undef MODULE_PARAM_PREFIX
66#endif
67#define MODULE_PARAM_PREFIX "rcutree."
68
69/* Data structures. */
70
71static struct lock_class_key rcu_node_class[RCU_NUM_LVLS];
72static struct lock_class_key rcu_fqs_class[RCU_NUM_LVLS];
73
74/*
75 * In order to export the rcu_state name to the tracing tools, it
76 * needs to be added in the __tracepoint_string section.
77 * This requires defining a separate variable tp_<sname>_varname
78 * that points to the string being used, and this will allow
79 * the tracing userspace tools to be able to decipher the string
80 * address to the matching string.
81 */
82#define RCU_STATE_INITIALIZER(sname, sabbr, cr) \
83static char sname##_varname[] = #sname; \
84static const char *tp_##sname##_varname __used __tracepoint_string = sname##_varname; \
85struct rcu_state sname##_state = { \
86 .level = { &sname##_state.node[0] }, \
87 .call = cr, \
88 .fqs_state = RCU_GP_IDLE, \
89 .gpnum = 0UL - 300UL, \
90 .completed = 0UL - 300UL, \
91 .orphan_lock = __RAW_SPIN_LOCK_UNLOCKED(&sname##_state.orphan_lock), \
92 .orphan_nxttail = &sname##_state.orphan_nxtlist, \
93 .orphan_donetail = &sname##_state.orphan_donelist, \
94 .barrier_mutex = __MUTEX_INITIALIZER(sname##_state.barrier_mutex), \
95 .onoff_mutex = __MUTEX_INITIALIZER(sname##_state.onoff_mutex), \
96 .name = sname##_varname, \
97 .abbr = sabbr, \
98}; \
99DEFINE_PER_CPU(struct rcu_data, sname##_data)
100
101RCU_STATE_INITIALIZER(rcu_sched, 's', call_rcu_sched);
102RCU_STATE_INITIALIZER(rcu_bh, 'b', call_rcu_bh);
103
104static struct rcu_state *rcu_state;
105LIST_HEAD(rcu_struct_flavors);
106
107/* Increase (but not decrease) the CONFIG_RCU_FANOUT_LEAF at boot time. */
108static int rcu_fanout_leaf = CONFIG_RCU_FANOUT_LEAF;
109module_param(rcu_fanout_leaf, int, 0444);
110int rcu_num_lvls __read_mostly = RCU_NUM_LVLS;
111static int num_rcu_lvl[] = { /* Number of rcu_nodes at specified level. */
112 NUM_RCU_LVL_0,
113 NUM_RCU_LVL_1,
114 NUM_RCU_LVL_2,
115 NUM_RCU_LVL_3,
116 NUM_RCU_LVL_4,
117};
118int rcu_num_nodes __read_mostly = NUM_RCU_NODES; /* Total # rcu_nodes in use. */
119
120/*
121 * The rcu_scheduler_active variable transitions from zero to one just
122 * before the first task is spawned. So when this variable is zero, RCU
123 * can assume that there is but one task, allowing RCU to (for example)
124 * optimize synchronize_sched() to a simple barrier(). When this variable
125 * is one, RCU must actually do all the hard work required to detect real
126 * grace periods. This variable is also used to suppress boot-time false
127 * positives from lockdep-RCU error checking.
128 */
129int rcu_scheduler_active __read_mostly;
130EXPORT_SYMBOL_GPL(rcu_scheduler_active);
131
132/*
133 * The rcu_scheduler_fully_active variable transitions from zero to one
134 * during the early_initcall() processing, which is after the scheduler
135 * is capable of creating new tasks. So RCU processing (for example,
136 * creating tasks for RCU priority boosting) must be delayed until after
137 * rcu_scheduler_fully_active transitions from zero to one. We also
138 * currently delay invocation of any RCU callbacks until after this point.
139 *
140 * It might later prove better for people registering RCU callbacks during
141 * early boot to take responsibility for these callbacks, but one step at
142 * a time.
143 */
144static int rcu_scheduler_fully_active __read_mostly;
145
146#ifdef CONFIG_RCU_BOOST
147
148/*
149 * Control variables for per-CPU and per-rcu_node kthreads. These
150 * handle all flavors of RCU.
151 */
152static DEFINE_PER_CPU(struct task_struct *, rcu_cpu_kthread_task);
153DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_status);
154DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_loops);
155DEFINE_PER_CPU(char, rcu_cpu_has_work);
156
157#endif /* #ifdef CONFIG_RCU_BOOST */
158
159static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu);
160static void invoke_rcu_core(void);
161static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp);
162
163/*
164 * Track the rcutorture test sequence number and the update version
165 * number within a given test. The rcutorture_testseq is incremented
166 * on every rcutorture module load and unload, so has an odd value
167 * when a test is running. The rcutorture_vernum is set to zero
168 * when rcutorture starts and is incremented on each rcutorture update.
169 * These variables enable correlating rcutorture output with the
170 * RCU tracing information.
171 */
172unsigned long rcutorture_testseq;
173unsigned long rcutorture_vernum;
174
175/*
176 * Return true if an RCU grace period is in progress. The ACCESS_ONCE()s
177 * permit this function to be invoked without holding the root rcu_node
178 * structure's ->lock, but of course results can be subject to change.
179 */
180static int rcu_gp_in_progress(struct rcu_state *rsp)
181{
182 return ACCESS_ONCE(rsp->completed) != ACCESS_ONCE(rsp->gpnum);
183}
184
185/*
186 * Note a quiescent state. Because we do not need to know
187 * how many quiescent states passed, just if there was at least
188 * one since the start of the grace period, this just sets a flag.
189 * The caller must have disabled preemption.
190 */
191void rcu_sched_qs(int cpu)
192{
193 struct rcu_data *rdp = &per_cpu(rcu_sched_data, cpu);
194
195 if (rdp->passed_quiesce == 0)
196 trace_rcu_grace_period(TPS("rcu_sched"), rdp->gpnum, TPS("cpuqs"));
197 rdp->passed_quiesce = 1;
198}
199
200void rcu_bh_qs(int cpu)
201{
202 struct rcu_data *rdp = &per_cpu(rcu_bh_data, cpu);
203
204 if (rdp->passed_quiesce == 0)
205 trace_rcu_grace_period(TPS("rcu_bh"), rdp->gpnum, TPS("cpuqs"));
206 rdp->passed_quiesce = 1;
207}
208
209/*
210 * Note a context switch. This is a quiescent state for RCU-sched,
211 * and requires special handling for preemptible RCU.
212 * The caller must have disabled preemption.
213 */
214void rcu_note_context_switch(int cpu)
215{
216 trace_rcu_utilization(TPS("Start context switch"));
217 rcu_sched_qs(cpu);
218 rcu_preempt_note_context_switch(cpu);
219 trace_rcu_utilization(TPS("End context switch"));
220}
221EXPORT_SYMBOL_GPL(rcu_note_context_switch);
222
223static DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = {
224 .dynticks_nesting = DYNTICK_TASK_EXIT_IDLE,
225 .dynticks = ATOMIC_INIT(1),
226#ifdef CONFIG_NO_HZ_FULL_SYSIDLE
227 .dynticks_idle_nesting = DYNTICK_TASK_NEST_VALUE,
228 .dynticks_idle = ATOMIC_INIT(1),
229#endif /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
230};
231
232static long blimit = 10; /* Maximum callbacks per rcu_do_batch. */
233static long qhimark = 10000; /* If this many pending, ignore blimit. */
234static long qlowmark = 100; /* Once only this many pending, use blimit. */
235
236module_param(blimit, long, 0444);
237module_param(qhimark, long, 0444);
238module_param(qlowmark, long, 0444);
239
240static ulong jiffies_till_first_fqs = ULONG_MAX;
241static ulong jiffies_till_next_fqs = ULONG_MAX;
242
243module_param(jiffies_till_first_fqs, ulong, 0644);
244module_param(jiffies_till_next_fqs, ulong, 0644);
245
246static void rcu_start_gp_advanced(struct rcu_state *rsp, struct rcu_node *rnp,
247 struct rcu_data *rdp);
248static void force_qs_rnp(struct rcu_state *rsp,
249 int (*f)(struct rcu_data *rsp, bool *isidle,
250 unsigned long *maxj),
251 bool *isidle, unsigned long *maxj);
252static void force_quiescent_state(struct rcu_state *rsp);
253static int rcu_pending(int cpu);
254
255/*
256 * Return the number of RCU-sched batches processed thus far for debug & stats.
257 */
258long rcu_batches_completed_sched(void)
259{
260 return rcu_sched_state.completed;
261}
262EXPORT_SYMBOL_GPL(rcu_batches_completed_sched);
263
264/*
265 * Return the number of RCU BH batches processed thus far for debug & stats.
266 */
267long rcu_batches_completed_bh(void)
268{
269 return rcu_bh_state.completed;
270}
271EXPORT_SYMBOL_GPL(rcu_batches_completed_bh);
272
273/*
274 * Force a quiescent state for RCU BH.
275 */
276void rcu_bh_force_quiescent_state(void)
277{
278 force_quiescent_state(&rcu_bh_state);
279}
280EXPORT_SYMBOL_GPL(rcu_bh_force_quiescent_state);
281
282/*
283 * Record the number of times rcutorture tests have been initiated and
284 * terminated. This information allows the debugfs tracing stats to be
285 * correlated to the rcutorture messages, even when the rcutorture module
286 * is being repeatedly loaded and unloaded. In other words, we cannot
287 * store this state in rcutorture itself.
288 */
289void rcutorture_record_test_transition(void)
290{
291 rcutorture_testseq++;
292 rcutorture_vernum = 0;
293}
294EXPORT_SYMBOL_GPL(rcutorture_record_test_transition);
295
296/*
297 * Record the number of writer passes through the current rcutorture test.
298 * This is also used to correlate debugfs tracing stats with the rcutorture
299 * messages.
300 */
301void rcutorture_record_progress(unsigned long vernum)
302{
303 rcutorture_vernum++;
304}
305EXPORT_SYMBOL_GPL(rcutorture_record_progress);
306
307/*
308 * Force a quiescent state for RCU-sched.
309 */
310void rcu_sched_force_quiescent_state(void)
311{
312 force_quiescent_state(&rcu_sched_state);
313}
314EXPORT_SYMBOL_GPL(rcu_sched_force_quiescent_state);
315
316/*
317 * Does the CPU have callbacks ready to be invoked?
318 */
319static int
320cpu_has_callbacks_ready_to_invoke(struct rcu_data *rdp)
321{
322 return &rdp->nxtlist != rdp->nxttail[RCU_DONE_TAIL] &&
323 rdp->nxttail[RCU_DONE_TAIL] != NULL;
324}
325
326/*
327 * Does the current CPU require a not-yet-started grace period?
328 * The caller must have disabled interrupts to prevent races with
329 * normal callback registry.
330 */
331static int
332cpu_needs_another_gp(struct rcu_state *rsp, struct rcu_data *rdp)
333{
334 int i;
335
336 if (rcu_gp_in_progress(rsp))
337 return 0; /* No, a grace period is already in progress. */
338 if (rcu_nocb_needs_gp(rsp))
339 return 1; /* Yes, a no-CBs CPU needs one. */
340 if (!rdp->nxttail[RCU_NEXT_TAIL])
341 return 0; /* No, this is a no-CBs (or offline) CPU. */
342 if (*rdp->nxttail[RCU_NEXT_READY_TAIL])
343 return 1; /* Yes, this CPU has newly registered callbacks. */
344 for (i = RCU_WAIT_TAIL; i < RCU_NEXT_TAIL; i++)
345 if (rdp->nxttail[i - 1] != rdp->nxttail[i] &&
346 ULONG_CMP_LT(ACCESS_ONCE(rsp->completed),
347 rdp->nxtcompleted[i]))
348 return 1; /* Yes, CBs for future grace period. */
349 return 0; /* No grace period needed. */
350}
351
352/*
353 * Return the root node of the specified rcu_state structure.
354 */
355static struct rcu_node *rcu_get_root(struct rcu_state *rsp)
356{
357 return &rsp->node[0];
358}
359
360/*
361 * rcu_eqs_enter_common - current CPU is moving towards extended quiescent state
362 *
363 * If the new value of the ->dynticks_nesting counter now is zero,
364 * we really have entered idle, and must do the appropriate accounting.
365 * The caller must have disabled interrupts.
366 */
367static void rcu_eqs_enter_common(struct rcu_dynticks *rdtp, long long oldval,
368 bool user)
369{
370 struct rcu_state *rsp;
371 struct rcu_data *rdp;
372
373 trace_rcu_dyntick(TPS("Start"), oldval, rdtp->dynticks_nesting);
374 if (!user && !is_idle_task(current)) {
375 struct task_struct *idle __maybe_unused =
376 idle_task(smp_processor_id());
377
378 trace_rcu_dyntick(TPS("Error on entry: not idle task"), oldval, 0);
379 ftrace_dump(DUMP_ORIG);
380 WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
381 current->pid, current->comm,
382 idle->pid, idle->comm); /* must be idle task! */
383 }
384 for_each_rcu_flavor(rsp) {
385 rdp = this_cpu_ptr(rsp->rda);
386 do_nocb_deferred_wakeup(rdp);
387 }
388 rcu_prepare_for_idle(smp_processor_id());
389 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
390 smp_mb__before_atomic_inc(); /* See above. */
391 atomic_inc(&rdtp->dynticks);
392 smp_mb__after_atomic_inc(); /* Force ordering with next sojourn. */
393 WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
394
395 /*
396 * It is illegal to enter an extended quiescent state while
397 * in an RCU read-side critical section.
398 */
399 rcu_lockdep_assert(!lock_is_held(&rcu_lock_map),
400 "Illegal idle entry in RCU read-side critical section.");
401 rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map),
402 "Illegal idle entry in RCU-bh read-side critical section.");
403 rcu_lockdep_assert(!lock_is_held(&rcu_sched_lock_map),
404 "Illegal idle entry in RCU-sched read-side critical section.");
405}
406
407/*
408 * Enter an RCU extended quiescent state, which can be either the
409 * idle loop or adaptive-tickless usermode execution.
410 */
411static void rcu_eqs_enter(bool user)
412{
413 long long oldval;
414 struct rcu_dynticks *rdtp;
415
416 rdtp = this_cpu_ptr(&rcu_dynticks);
417 oldval = rdtp->dynticks_nesting;
418 WARN_ON_ONCE((oldval & DYNTICK_TASK_NEST_MASK) == 0);
419 if ((oldval & DYNTICK_TASK_NEST_MASK) == DYNTICK_TASK_NEST_VALUE) {
420 rdtp->dynticks_nesting = 0;
421 rcu_eqs_enter_common(rdtp, oldval, user);
422 } else {
423 rdtp->dynticks_nesting -= DYNTICK_TASK_NEST_VALUE;
424 }
425}
426
427/**
428 * rcu_idle_enter - inform RCU that current CPU is entering idle
429 *
430 * Enter idle mode, in other words, -leave- the mode in which RCU
431 * read-side critical sections can occur. (Though RCU read-side
432 * critical sections can occur in irq handlers in idle, a possibility
433 * handled by irq_enter() and irq_exit().)
434 *
435 * We crowbar the ->dynticks_nesting field to zero to allow for
436 * the possibility of usermode upcalls having messed up our count
437 * of interrupt nesting level during the prior busy period.
438 */
439void rcu_idle_enter(void)
440{
441 unsigned long flags;
442
443 local_irq_save(flags);
444 rcu_eqs_enter(false);
445 rcu_sysidle_enter(this_cpu_ptr(&rcu_dynticks), 0);
446 local_irq_restore(flags);
447}
448EXPORT_SYMBOL_GPL(rcu_idle_enter);
449
450#ifdef CONFIG_RCU_USER_QS
451/**
452 * rcu_user_enter - inform RCU that we are resuming userspace.
453 *
454 * Enter RCU idle mode right before resuming userspace. No use of RCU
455 * is permitted between this call and rcu_user_exit(). This way the
456 * CPU doesn't need to maintain the tick for RCU maintenance purposes
457 * when the CPU runs in userspace.
458 */
459void rcu_user_enter(void)
460{
461 rcu_eqs_enter(1);
462}
463#endif /* CONFIG_RCU_USER_QS */
464
465/**
466 * rcu_irq_exit - inform RCU that current CPU is exiting irq towards idle
467 *
468 * Exit from an interrupt handler, which might possibly result in entering
469 * idle mode, in other words, leaving the mode in which read-side critical
470 * sections can occur.
471 *
472 * This code assumes that the idle loop never does anything that might
473 * result in unbalanced calls to irq_enter() and irq_exit(). If your
474 * architecture violates this assumption, RCU will give you what you
475 * deserve, good and hard. But very infrequently and irreproducibly.
476 *
477 * Use things like work queues to work around this limitation.
478 *
479 * You have been warned.
480 */
481void rcu_irq_exit(void)
482{
483 unsigned long flags;
484 long long oldval;
485 struct rcu_dynticks *rdtp;
486
487 local_irq_save(flags);
488 rdtp = this_cpu_ptr(&rcu_dynticks);
489 oldval = rdtp->dynticks_nesting;
490 rdtp->dynticks_nesting--;
491 WARN_ON_ONCE(rdtp->dynticks_nesting < 0);
492 if (rdtp->dynticks_nesting)
493 trace_rcu_dyntick(TPS("--="), oldval, rdtp->dynticks_nesting);
494 else
495 rcu_eqs_enter_common(rdtp, oldval, true);
496 rcu_sysidle_enter(rdtp, 1);
497 local_irq_restore(flags);
498}
499
500/*
501 * rcu_eqs_exit_common - current CPU moving away from extended quiescent state
502 *
503 * If the new value of the ->dynticks_nesting counter was previously zero,
504 * we really have exited idle, and must do the appropriate accounting.
505 * The caller must have disabled interrupts.
506 */
507static void rcu_eqs_exit_common(struct rcu_dynticks *rdtp, long long oldval,
508 int user)
509{
510 smp_mb__before_atomic_inc(); /* Force ordering w/previous sojourn. */
511 atomic_inc(&rdtp->dynticks);
512 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
513 smp_mb__after_atomic_inc(); /* See above. */
514 WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
515 rcu_cleanup_after_idle(smp_processor_id());
516 trace_rcu_dyntick(TPS("End"), oldval, rdtp->dynticks_nesting);
517 if (!user && !is_idle_task(current)) {
518 struct task_struct *idle __maybe_unused =
519 idle_task(smp_processor_id());
520
521 trace_rcu_dyntick(TPS("Error on exit: not idle task"),
522 oldval, rdtp->dynticks_nesting);
523 ftrace_dump(DUMP_ORIG);
524 WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
525 current->pid, current->comm,
526 idle->pid, idle->comm); /* must be idle task! */
527 }
528}
529
530/*
531 * Exit an RCU extended quiescent state, which can be either the
532 * idle loop or adaptive-tickless usermode execution.
533 */
534static void rcu_eqs_exit(bool user)
535{
536 struct rcu_dynticks *rdtp;
537 long long oldval;
538
539 rdtp = this_cpu_ptr(&rcu_dynticks);
540 oldval = rdtp->dynticks_nesting;
541 WARN_ON_ONCE(oldval < 0);
542 if (oldval & DYNTICK_TASK_NEST_MASK) {
543 rdtp->dynticks_nesting += DYNTICK_TASK_NEST_VALUE;
544 } else {
545 rdtp->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
546 rcu_eqs_exit_common(rdtp, oldval, user);
547 }
548}
549
550/**
551 * rcu_idle_exit - inform RCU that current CPU is leaving idle
552 *
553 * Exit idle mode, in other words, -enter- the mode in which RCU
554 * read-side critical sections can occur.
555 *
556 * We crowbar the ->dynticks_nesting field to DYNTICK_TASK_NEST to
557 * allow for the possibility of usermode upcalls messing up our count
558 * of interrupt nesting level during the busy period that is just
559 * now starting.
560 */
561void rcu_idle_exit(void)
562{
563 unsigned long flags;
564
565 local_irq_save(flags);
566 rcu_eqs_exit(false);
567 rcu_sysidle_exit(this_cpu_ptr(&rcu_dynticks), 0);
568 local_irq_restore(flags);
569}
570EXPORT_SYMBOL_GPL(rcu_idle_exit);
571
572#ifdef CONFIG_RCU_USER_QS
573/**
574 * rcu_user_exit - inform RCU that we are exiting userspace.
575 *
576 * Exit RCU idle mode while entering the kernel because it can
577 * run a RCU read side critical section anytime.
578 */
579void rcu_user_exit(void)
580{
581 rcu_eqs_exit(1);
582}
583#endif /* CONFIG_RCU_USER_QS */
584
585/**
586 * rcu_irq_enter - inform RCU that current CPU is entering irq away from idle
587 *
588 * Enter an interrupt handler, which might possibly result in exiting
589 * idle mode, in other words, entering the mode in which read-side critical
590 * sections can occur.
591 *
592 * Note that the Linux kernel is fully capable of entering an interrupt
593 * handler that it never exits, for example when doing upcalls to
594 * user mode! This code assumes that the idle loop never does upcalls to
595 * user mode. If your architecture does do upcalls from the idle loop (or
596 * does anything else that results in unbalanced calls to the irq_enter()
597 * and irq_exit() functions), RCU will give you what you deserve, good
598 * and hard. But very infrequently and irreproducibly.
599 *
600 * Use things like work queues to work around this limitation.
601 *
602 * You have been warned.
603 */
604void rcu_irq_enter(void)
605{
606 unsigned long flags;
607 struct rcu_dynticks *rdtp;
608 long long oldval;
609
610 local_irq_save(flags);
611 rdtp = this_cpu_ptr(&rcu_dynticks);
612 oldval = rdtp->dynticks_nesting;
613 rdtp->dynticks_nesting++;
614 WARN_ON_ONCE(rdtp->dynticks_nesting == 0);
615 if (oldval)
616 trace_rcu_dyntick(TPS("++="), oldval, rdtp->dynticks_nesting);
617 else
618 rcu_eqs_exit_common(rdtp, oldval, true);
619 rcu_sysidle_exit(rdtp, 1);
620 local_irq_restore(flags);
621}
622
623/**
624 * rcu_nmi_enter - inform RCU of entry to NMI context
625 *
626 * If the CPU was idle with dynamic ticks active, and there is no
627 * irq handler running, this updates rdtp->dynticks_nmi to let the
628 * RCU grace-period handling know that the CPU is active.
629 */
630void rcu_nmi_enter(void)
631{
632 struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
633
634 if (rdtp->dynticks_nmi_nesting == 0 &&
635 (atomic_read(&rdtp->dynticks) & 0x1))
636 return;
637 rdtp->dynticks_nmi_nesting++;
638 smp_mb__before_atomic_inc(); /* Force delay from prior write. */
639 atomic_inc(&rdtp->dynticks);
640 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
641 smp_mb__after_atomic_inc(); /* See above. */
642 WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
643}
644
645/**
646 * rcu_nmi_exit - inform RCU of exit from NMI context
647 *
648 * If the CPU was idle with dynamic ticks active, and there is no
649 * irq handler running, this updates rdtp->dynticks_nmi to let the
650 * RCU grace-period handling know that the CPU is no longer active.
651 */
652void rcu_nmi_exit(void)
653{
654 struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
655
656 if (rdtp->dynticks_nmi_nesting == 0 ||
657 --rdtp->dynticks_nmi_nesting != 0)
658 return;
659 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
660 smp_mb__before_atomic_inc(); /* See above. */
661 atomic_inc(&rdtp->dynticks);
662 smp_mb__after_atomic_inc(); /* Force delay to next write. */
663 WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
664}
665
666/**
667 * __rcu_is_watching - are RCU read-side critical sections safe?
668 *
669 * Return true if RCU is watching the running CPU, which means that
670 * this CPU can safely enter RCU read-side critical sections. Unlike
671 * rcu_is_watching(), the caller of __rcu_is_watching() must have at
672 * least disabled preemption.
673 */
674bool notrace __rcu_is_watching(void)
675{
676 return atomic_read(this_cpu_ptr(&rcu_dynticks.dynticks)) & 0x1;
677}
678
679/**
680 * rcu_is_watching - see if RCU thinks that the current CPU is idle
681 *
682 * If the current CPU is in its idle loop and is neither in an interrupt
683 * or NMI handler, return true.
684 */
685bool notrace rcu_is_watching(void)
686{
687 int ret;
688
689 preempt_disable();
690 ret = __rcu_is_watching();
691 preempt_enable();
692 return ret;
693}
694EXPORT_SYMBOL_GPL(rcu_is_watching);
695
696#if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU)
697
698/*
699 * Is the current CPU online? Disable preemption to avoid false positives
700 * that could otherwise happen due to the current CPU number being sampled,
701 * this task being preempted, its old CPU being taken offline, resuming
702 * on some other CPU, then determining that its old CPU is now offline.
703 * It is OK to use RCU on an offline processor during initial boot, hence
704 * the check for rcu_scheduler_fully_active. Note also that it is OK
705 * for a CPU coming online to use RCU for one jiffy prior to marking itself
706 * online in the cpu_online_mask. Similarly, it is OK for a CPU going
707 * offline to continue to use RCU for one jiffy after marking itself
708 * offline in the cpu_online_mask. This leniency is necessary given the
709 * non-atomic nature of the online and offline processing, for example,
710 * the fact that a CPU enters the scheduler after completing the CPU_DYING
711 * notifiers.
712 *
713 * This is also why RCU internally marks CPUs online during the
714 * CPU_UP_PREPARE phase and offline during the CPU_DEAD phase.
715 *
716 * Disable checking if in an NMI handler because we cannot safely report
717 * errors from NMI handlers anyway.
718 */
719bool rcu_lockdep_current_cpu_online(void)
720{
721 struct rcu_data *rdp;
722 struct rcu_node *rnp;
723 bool ret;
724
725 if (in_nmi())
726 return true;
727 preempt_disable();
728 rdp = this_cpu_ptr(&rcu_sched_data);
729 rnp = rdp->mynode;
730 ret = (rdp->grpmask & rnp->qsmaskinit) ||
731 !rcu_scheduler_fully_active;
732 preempt_enable();
733 return ret;
734}
735EXPORT_SYMBOL_GPL(rcu_lockdep_current_cpu_online);
736
737#endif /* #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU) */
738
739/**
740 * rcu_is_cpu_rrupt_from_idle - see if idle or immediately interrupted from idle
741 *
742 * If the current CPU is idle or running at a first-level (not nested)
743 * interrupt from idle, return true. The caller must have at least
744 * disabled preemption.
745 */
746static int rcu_is_cpu_rrupt_from_idle(void)
747{
748 return __this_cpu_read(rcu_dynticks.dynticks_nesting) <= 1;
749}
750
751/*
752 * Snapshot the specified CPU's dynticks counter so that we can later
753 * credit them with an implicit quiescent state. Return 1 if this CPU
754 * is in dynticks idle mode, which is an extended quiescent state.
755 */
756static int dyntick_save_progress_counter(struct rcu_data *rdp,
757 bool *isidle, unsigned long *maxj)
758{
759 rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
760 rcu_sysidle_check_cpu(rdp, isidle, maxj);
761 return (rdp->dynticks_snap & 0x1) == 0;
762}
763
764/*
765 * This function really isn't for public consumption, but RCU is special in
766 * that context switches can allow the state machine to make progress.
767 */
768extern void resched_cpu(int cpu);
769
770/*
771 * Return true if the specified CPU has passed through a quiescent
772 * state by virtue of being in or having passed through an dynticks
773 * idle state since the last call to dyntick_save_progress_counter()
774 * for this same CPU, or by virtue of having been offline.
775 */
776static int rcu_implicit_dynticks_qs(struct rcu_data *rdp,
777 bool *isidle, unsigned long *maxj)
778{
779 unsigned int curr;
780 unsigned int snap;
781
782 curr = (unsigned int)atomic_add_return(0, &rdp->dynticks->dynticks);
783 snap = (unsigned int)rdp->dynticks_snap;
784
785 /*
786 * If the CPU passed through or entered a dynticks idle phase with
787 * no active irq/NMI handlers, then we can safely pretend that the CPU
788 * already acknowledged the request to pass through a quiescent
789 * state. Either way, that CPU cannot possibly be in an RCU
790 * read-side critical section that started before the beginning
791 * of the current RCU grace period.
792 */
793 if ((curr & 0x1) == 0 || UINT_CMP_GE(curr, snap + 2)) {
794 trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("dti"));
795 rdp->dynticks_fqs++;
796 return 1;
797 }
798
799 /*
800 * Check for the CPU being offline, but only if the grace period
801 * is old enough. We don't need to worry about the CPU changing
802 * state: If we see it offline even once, it has been through a
803 * quiescent state.
804 *
805 * The reason for insisting that the grace period be at least
806 * one jiffy old is that CPUs that are not quite online and that
807 * have just gone offline can still execute RCU read-side critical
808 * sections.
809 */
810 if (ULONG_CMP_GE(rdp->rsp->gp_start + 2, jiffies))
811 return 0; /* Grace period is not old enough. */
812 barrier();
813 if (cpu_is_offline(rdp->cpu)) {
814 trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("ofl"));
815 rdp->offline_fqs++;
816 return 1;
817 }
818
819 /*
820 * There is a possibility that a CPU in adaptive-ticks state
821 * might run in the kernel with the scheduling-clock tick disabled
822 * for an extended time period. Invoke rcu_kick_nohz_cpu() to
823 * force the CPU to restart the scheduling-clock tick in this
824 * CPU is in this state.
825 */
826 rcu_kick_nohz_cpu(rdp->cpu);
827
828 /*
829 * Alternatively, the CPU might be running in the kernel
830 * for an extended period of time without a quiescent state.
831 * Attempt to force the CPU through the scheduler to gain the
832 * needed quiescent state, but only if the grace period has gone
833 * on for an uncommonly long time. If there are many stuck CPUs,
834 * we will beat on the first one until it gets unstuck, then move
835 * to the next. Only do this for the primary flavor of RCU.
836 */
837 if (rdp->rsp == rcu_state &&
838 ULONG_CMP_GE(jiffies, rdp->rsp->jiffies_resched)) {
839 rdp->rsp->jiffies_resched += 5;
840 resched_cpu(rdp->cpu);
841 }
842
843 return 0;
844}
845
846static void record_gp_stall_check_time(struct rcu_state *rsp)
847{
848 unsigned long j = jiffies;
849 unsigned long j1;
850
851 rsp->gp_start = j;
852 smp_wmb(); /* Record start time before stall time. */
853 j1 = rcu_jiffies_till_stall_check();
854 rsp->jiffies_stall = j + j1;
855 rsp->jiffies_resched = j + j1 / 2;
856}
857
858/*
859 * Dump stacks of all tasks running on stalled CPUs. This is a fallback
860 * for architectures that do not implement trigger_all_cpu_backtrace().
861 * The NMI-triggered stack traces are more accurate because they are
862 * printed by the target CPU.
863 */
864static void rcu_dump_cpu_stacks(struct rcu_state *rsp)
865{
866 int cpu;
867 unsigned long flags;
868 struct rcu_node *rnp;
869
870 rcu_for_each_leaf_node(rsp, rnp) {
871 raw_spin_lock_irqsave(&rnp->lock, flags);
872 if (rnp->qsmask != 0) {
873 for (cpu = 0; cpu <= rnp->grphi - rnp->grplo; cpu++)
874 if (rnp->qsmask & (1UL << cpu))
875 dump_cpu_task(rnp->grplo + cpu);
876 }
877 raw_spin_unlock_irqrestore(&rnp->lock, flags);
878 }
879}
880
881static void print_other_cpu_stall(struct rcu_state *rsp)
882{
883 int cpu;
884 long delta;
885 unsigned long flags;
886 int ndetected = 0;
887 struct rcu_node *rnp = rcu_get_root(rsp);
888 long totqlen = 0;
889
890 /* Only let one CPU complain about others per time interval. */
891
892 raw_spin_lock_irqsave(&rnp->lock, flags);
893 delta = jiffies - rsp->jiffies_stall;
894 if (delta < RCU_STALL_RAT_DELAY || !rcu_gp_in_progress(rsp)) {
895 raw_spin_unlock_irqrestore(&rnp->lock, flags);
896 return;
897 }
898 rsp->jiffies_stall = jiffies + 3 * rcu_jiffies_till_stall_check() + 3;
899 raw_spin_unlock_irqrestore(&rnp->lock, flags);
900
901 /*
902 * OK, time to rat on our buddy...
903 * See Documentation/RCU/stallwarn.txt for info on how to debug
904 * RCU CPU stall warnings.
905 */
906 pr_err("INFO: %s detected stalls on CPUs/tasks:",
907 rsp->name);
908 print_cpu_stall_info_begin();
909 rcu_for_each_leaf_node(rsp, rnp) {
910 raw_spin_lock_irqsave(&rnp->lock, flags);
911 ndetected += rcu_print_task_stall(rnp);
912 if (rnp->qsmask != 0) {
913 for (cpu = 0; cpu <= rnp->grphi - rnp->grplo; cpu++)
914 if (rnp->qsmask & (1UL << cpu)) {
915 print_cpu_stall_info(rsp,
916 rnp->grplo + cpu);
917 ndetected++;
918 }
919 }
920 raw_spin_unlock_irqrestore(&rnp->lock, flags);
921 }
922
923 /*
924 * Now rat on any tasks that got kicked up to the root rcu_node
925 * due to CPU offlining.
926 */
927 rnp = rcu_get_root(rsp);
928 raw_spin_lock_irqsave(&rnp->lock, flags);
929 ndetected += rcu_print_task_stall(rnp);
930 raw_spin_unlock_irqrestore(&rnp->lock, flags);
931
932 print_cpu_stall_info_end();
933 for_each_possible_cpu(cpu)
934 totqlen += per_cpu_ptr(rsp->rda, cpu)->qlen;
935 pr_cont("(detected by %d, t=%ld jiffies, g=%lu, c=%lu, q=%lu)\n",
936 smp_processor_id(), (long)(jiffies - rsp->gp_start),
937 rsp->gpnum, rsp->completed, totqlen);
938 if (ndetected == 0)
939 pr_err("INFO: Stall ended before state dump start\n");
940 else if (!trigger_all_cpu_backtrace())
941 rcu_dump_cpu_stacks(rsp);
942
943 /* Complain about tasks blocking the grace period. */
944
945 rcu_print_detail_task_stall(rsp);
946
947 force_quiescent_state(rsp); /* Kick them all. */
948}
949
950/*
951 * This function really isn't for public consumption, but RCU is special in
952 * that context switches can allow the state machine to make progress.
953 */
954extern void resched_cpu(int cpu);
955
956static void print_cpu_stall(struct rcu_state *rsp)
957{
958 int cpu;
959 unsigned long flags;
960 struct rcu_node *rnp = rcu_get_root(rsp);
961 long totqlen = 0;
962
963 /*
964 * OK, time to rat on ourselves...
965 * See Documentation/RCU/stallwarn.txt for info on how to debug
966 * RCU CPU stall warnings.
967 */
968 pr_err("INFO: %s self-detected stall on CPU", rsp->name);
969 print_cpu_stall_info_begin();
970 print_cpu_stall_info(rsp, smp_processor_id());
971 print_cpu_stall_info_end();
972 for_each_possible_cpu(cpu)
973 totqlen += per_cpu_ptr(rsp->rda, cpu)->qlen;
974 pr_cont(" (t=%lu jiffies g=%lu c=%lu q=%lu)\n",
975 jiffies - rsp->gp_start, rsp->gpnum, rsp->completed, totqlen);
976 if (!trigger_all_cpu_backtrace())
977 dump_stack();
978
979 raw_spin_lock_irqsave(&rnp->lock, flags);
980 if (ULONG_CMP_GE(jiffies, rsp->jiffies_stall))
981 rsp->jiffies_stall = jiffies +
982 3 * rcu_jiffies_till_stall_check() + 3;
983 raw_spin_unlock_irqrestore(&rnp->lock, flags);
984
985 /*
986 * Attempt to revive the RCU machinery by forcing a context switch.
987 *
988 * A context switch would normally allow the RCU state machine to make
989 * progress and it could be we're stuck in kernel space without context
990 * switches for an entirely unreasonable amount of time.
991 */
992 resched_cpu(smp_processor_id());
993}
994
995static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp)
996{
997 unsigned long completed;
998 unsigned long gpnum;
999 unsigned long gps;
1000 unsigned long j;
1001 unsigned long js;
1002 struct rcu_node *rnp;
1003
1004 if (rcu_cpu_stall_suppress || !rcu_gp_in_progress(rsp))
1005 return;
1006 j = jiffies;
1007
1008 /*
1009 * Lots of memory barriers to reject false positives.
1010 *
1011 * The idea is to pick up rsp->gpnum, then rsp->jiffies_stall,
1012 * then rsp->gp_start, and finally rsp->completed. These values
1013 * are updated in the opposite order with memory barriers (or
1014 * equivalent) during grace-period initialization and cleanup.
1015 * Now, a false positive can occur if we get an new value of
1016 * rsp->gp_start and a old value of rsp->jiffies_stall. But given
1017 * the memory barriers, the only way that this can happen is if one
1018 * grace period ends and another starts between these two fetches.
1019 * Detect this by comparing rsp->completed with the previous fetch
1020 * from rsp->gpnum.
1021 *
1022 * Given this check, comparisons of jiffies, rsp->jiffies_stall,
1023 * and rsp->gp_start suffice to forestall false positives.
1024 */
1025 gpnum = ACCESS_ONCE(rsp->gpnum);
1026 smp_rmb(); /* Pick up ->gpnum first... */
1027 js = ACCESS_ONCE(rsp->jiffies_stall);
1028 smp_rmb(); /* ...then ->jiffies_stall before the rest... */
1029 gps = ACCESS_ONCE(rsp->gp_start);
1030 smp_rmb(); /* ...and finally ->gp_start before ->completed. */
1031 completed = ACCESS_ONCE(rsp->completed);
1032 if (ULONG_CMP_GE(completed, gpnum) ||
1033 ULONG_CMP_LT(j, js) ||
1034 ULONG_CMP_GE(gps, js))
1035 return; /* No stall or GP completed since entering function. */
1036 rnp = rdp->mynode;
1037 if (rcu_gp_in_progress(rsp) &&
1038 (ACCESS_ONCE(rnp->qsmask) & rdp->grpmask)) {
1039
1040 /* We haven't checked in, so go dump stack. */
1041 print_cpu_stall(rsp);
1042
1043 } else if (rcu_gp_in_progress(rsp) &&
1044 ULONG_CMP_GE(j, js + RCU_STALL_RAT_DELAY)) {
1045
1046 /* They had a few time units to dump stack, so complain. */
1047 print_other_cpu_stall(rsp);
1048 }
1049}
1050
1051/**
1052 * rcu_cpu_stall_reset - prevent further stall warnings in current grace period
1053 *
1054 * Set the stall-warning timeout way off into the future, thus preventing
1055 * any RCU CPU stall-warning messages from appearing in the current set of
1056 * RCU grace periods.
1057 *
1058 * The caller must disable hard irqs.
1059 */
1060void rcu_cpu_stall_reset(void)
1061{
1062 struct rcu_state *rsp;
1063
1064 for_each_rcu_flavor(rsp)
1065 rsp->jiffies_stall = jiffies + ULONG_MAX / 2;
1066}
1067
1068/*
1069 * Initialize the specified rcu_data structure's callback list to empty.
1070 */
1071static void init_callback_list(struct rcu_data *rdp)
1072{
1073 int i;
1074
1075 if (init_nocb_callback_list(rdp))
1076 return;
1077 rdp->nxtlist = NULL;
1078 for (i = 0; i < RCU_NEXT_SIZE; i++)
1079 rdp->nxttail[i] = &rdp->nxtlist;
1080}
1081
1082/*
1083 * Determine the value that ->completed will have at the end of the
1084 * next subsequent grace period. This is used to tag callbacks so that
1085 * a CPU can invoke callbacks in a timely fashion even if that CPU has
1086 * been dyntick-idle for an extended period with callbacks under the
1087 * influence of RCU_FAST_NO_HZ.
1088 *
1089 * The caller must hold rnp->lock with interrupts disabled.
1090 */
1091static unsigned long rcu_cbs_completed(struct rcu_state *rsp,
1092 struct rcu_node *rnp)
1093{
1094 /*
1095 * If RCU is idle, we just wait for the next grace period.
1096 * But we can only be sure that RCU is idle if we are looking
1097 * at the root rcu_node structure -- otherwise, a new grace
1098 * period might have started, but just not yet gotten around
1099 * to initializing the current non-root rcu_node structure.
1100 */
1101 if (rcu_get_root(rsp) == rnp && rnp->gpnum == rnp->completed)
1102 return rnp->completed + 1;
1103
1104 /*
1105 * Otherwise, wait for a possible partial grace period and
1106 * then the subsequent full grace period.
1107 */
1108 return rnp->completed + 2;
1109}
1110
1111/*
1112 * Trace-event helper function for rcu_start_future_gp() and
1113 * rcu_nocb_wait_gp().
1114 */
1115static void trace_rcu_future_gp(struct rcu_node *rnp, struct rcu_data *rdp,
1116 unsigned long c, const char *s)
1117{
1118 trace_rcu_future_grace_period(rdp->rsp->name, rnp->gpnum,
1119 rnp->completed, c, rnp->level,
1120 rnp->grplo, rnp->grphi, s);
1121}
1122
1123/*
1124 * Start some future grace period, as needed to handle newly arrived
1125 * callbacks. The required future grace periods are recorded in each
1126 * rcu_node structure's ->need_future_gp field.
1127 *
1128 * The caller must hold the specified rcu_node structure's ->lock.
1129 */
1130static unsigned long __maybe_unused
1131rcu_start_future_gp(struct rcu_node *rnp, struct rcu_data *rdp)
1132{
1133 unsigned long c;
1134 int i;
1135 struct rcu_node *rnp_root = rcu_get_root(rdp->rsp);
1136
1137 /*
1138 * Pick up grace-period number for new callbacks. If this
1139 * grace period is already marked as needed, return to the caller.
1140 */
1141 c = rcu_cbs_completed(rdp->rsp, rnp);
1142 trace_rcu_future_gp(rnp, rdp, c, TPS("Startleaf"));
1143 if (rnp->need_future_gp[c & 0x1]) {
1144 trace_rcu_future_gp(rnp, rdp, c, TPS("Prestartleaf"));
1145 return c;
1146 }
1147
1148 /*
1149 * If either this rcu_node structure or the root rcu_node structure
1150 * believe that a grace period is in progress, then we must wait
1151 * for the one following, which is in "c". Because our request
1152 * will be noticed at the end of the current grace period, we don't
1153 * need to explicitly start one.
1154 */
1155 if (rnp->gpnum != rnp->completed ||
1156 ACCESS_ONCE(rnp->gpnum) != ACCESS_ONCE(rnp->completed)) {
1157 rnp->need_future_gp[c & 0x1]++;
1158 trace_rcu_future_gp(rnp, rdp, c, TPS("Startedleaf"));
1159 return c;
1160 }
1161
1162 /*
1163 * There might be no grace period in progress. If we don't already
1164 * hold it, acquire the root rcu_node structure's lock in order to
1165 * start one (if needed).
1166 */
1167 if (rnp != rnp_root) {
1168 raw_spin_lock(&rnp_root->lock);
1169 smp_mb__after_unlock_lock();
1170 }
1171
1172 /*
1173 * Get a new grace-period number. If there really is no grace
1174 * period in progress, it will be smaller than the one we obtained
1175 * earlier. Adjust callbacks as needed. Note that even no-CBs
1176 * CPUs have a ->nxtcompleted[] array, so no no-CBs checks needed.
1177 */
1178 c = rcu_cbs_completed(rdp->rsp, rnp_root);
1179 for (i = RCU_DONE_TAIL; i < RCU_NEXT_TAIL; i++)
1180 if (ULONG_CMP_LT(c, rdp->nxtcompleted[i]))
1181 rdp->nxtcompleted[i] = c;
1182
1183 /*
1184 * If the needed for the required grace period is already
1185 * recorded, trace and leave.
1186 */
1187 if (rnp_root->need_future_gp[c & 0x1]) {
1188 trace_rcu_future_gp(rnp, rdp, c, TPS("Prestartedroot"));
1189 goto unlock_out;
1190 }
1191
1192 /* Record the need for the future grace period. */
1193 rnp_root->need_future_gp[c & 0x1]++;
1194
1195 /* If a grace period is not already in progress, start one. */
1196 if (rnp_root->gpnum != rnp_root->completed) {
1197 trace_rcu_future_gp(rnp, rdp, c, TPS("Startedleafroot"));
1198 } else {
1199 trace_rcu_future_gp(rnp, rdp, c, TPS("Startedroot"));
1200 rcu_start_gp_advanced(rdp->rsp, rnp_root, rdp);
1201 }
1202unlock_out:
1203 if (rnp != rnp_root)
1204 raw_spin_unlock(&rnp_root->lock);
1205 return c;
1206}
1207
1208/*
1209 * Clean up any old requests for the just-ended grace period. Also return
1210 * whether any additional grace periods have been requested. Also invoke
1211 * rcu_nocb_gp_cleanup() in order to wake up any no-callbacks kthreads
1212 * waiting for this grace period to complete.
1213 */
1214static int rcu_future_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp)
1215{
1216 int c = rnp->completed;
1217 int needmore;
1218 struct rcu_data *rdp = this_cpu_ptr(rsp->rda);
1219
1220 rcu_nocb_gp_cleanup(rsp, rnp);
1221 rnp->need_future_gp[c & 0x1] = 0;
1222 needmore = rnp->need_future_gp[(c + 1) & 0x1];
1223 trace_rcu_future_gp(rnp, rdp, c,
1224 needmore ? TPS("CleanupMore") : TPS("Cleanup"));
1225 return needmore;
1226}
1227
1228/*
1229 * If there is room, assign a ->completed number to any callbacks on
1230 * this CPU that have not already been assigned. Also accelerate any
1231 * callbacks that were previously assigned a ->completed number that has
1232 * since proven to be too conservative, which can happen if callbacks get
1233 * assigned a ->completed number while RCU is idle, but with reference to
1234 * a non-root rcu_node structure. This function is idempotent, so it does
1235 * not hurt to call it repeatedly.
1236 *
1237 * The caller must hold rnp->lock with interrupts disabled.
1238 */
1239static void rcu_accelerate_cbs(struct rcu_state *rsp, struct rcu_node *rnp,
1240 struct rcu_data *rdp)
1241{
1242 unsigned long c;
1243 int i;
1244
1245 /* If the CPU has no callbacks, nothing to do. */
1246 if (!rdp->nxttail[RCU_NEXT_TAIL] || !*rdp->nxttail[RCU_DONE_TAIL])
1247 return;
1248
1249 /*
1250 * Starting from the sublist containing the callbacks most
1251 * recently assigned a ->completed number and working down, find the
1252 * first sublist that is not assignable to an upcoming grace period.
1253 * Such a sublist has something in it (first two tests) and has
1254 * a ->completed number assigned that will complete sooner than
1255 * the ->completed number for newly arrived callbacks (last test).
1256 *
1257 * The key point is that any later sublist can be assigned the
1258 * same ->completed number as the newly arrived callbacks, which
1259 * means that the callbacks in any of these later sublist can be
1260 * grouped into a single sublist, whether or not they have already
1261 * been assigned a ->completed number.
1262 */
1263 c = rcu_cbs_completed(rsp, rnp);
1264 for (i = RCU_NEXT_TAIL - 1; i > RCU_DONE_TAIL; i--)
1265 if (rdp->nxttail[i] != rdp->nxttail[i - 1] &&
1266 !ULONG_CMP_GE(rdp->nxtcompleted[i], c))
1267 break;
1268
1269 /*
1270 * If there are no sublist for unassigned callbacks, leave.
1271 * At the same time, advance "i" one sublist, so that "i" will
1272 * index into the sublist where all the remaining callbacks should
1273 * be grouped into.
1274 */
1275 if (++i >= RCU_NEXT_TAIL)
1276 return;
1277
1278 /*
1279 * Assign all subsequent callbacks' ->completed number to the next
1280 * full grace period and group them all in the sublist initially
1281 * indexed by "i".
1282 */
1283 for (; i <= RCU_NEXT_TAIL; i++) {
1284 rdp->nxttail[i] = rdp->nxttail[RCU_NEXT_TAIL];
1285 rdp->nxtcompleted[i] = c;
1286 }
1287 /* Record any needed additional grace periods. */
1288 rcu_start_future_gp(rnp, rdp);
1289
1290 /* Trace depending on how much we were able to accelerate. */
1291 if (!*rdp->nxttail[RCU_WAIT_TAIL])
1292 trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("AccWaitCB"));
1293 else
1294 trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("AccReadyCB"));
1295}
1296
1297/*
1298 * Move any callbacks whose grace period has completed to the
1299 * RCU_DONE_TAIL sublist, then compact the remaining sublists and
1300 * assign ->completed numbers to any callbacks in the RCU_NEXT_TAIL
1301 * sublist. This function is idempotent, so it does not hurt to
1302 * invoke it repeatedly. As long as it is not invoked -too- often...
1303 *
1304 * The caller must hold rnp->lock with interrupts disabled.
1305 */
1306static void rcu_advance_cbs(struct rcu_state *rsp, struct rcu_node *rnp,
1307 struct rcu_data *rdp)
1308{
1309 int i, j;
1310
1311 /* If the CPU has no callbacks, nothing to do. */
1312 if (!rdp->nxttail[RCU_NEXT_TAIL] || !*rdp->nxttail[RCU_DONE_TAIL])
1313 return;
1314
1315 /*
1316 * Find all callbacks whose ->completed numbers indicate that they
1317 * are ready to invoke, and put them into the RCU_DONE_TAIL sublist.
1318 */
1319 for (i = RCU_WAIT_TAIL; i < RCU_NEXT_TAIL; i++) {
1320 if (ULONG_CMP_LT(rnp->completed, rdp->nxtcompleted[i]))
1321 break;
1322 rdp->nxttail[RCU_DONE_TAIL] = rdp->nxttail[i];
1323 }
1324 /* Clean up any sublist tail pointers that were misordered above. */
1325 for (j = RCU_WAIT_TAIL; j < i; j++)
1326 rdp->nxttail[j] = rdp->nxttail[RCU_DONE_TAIL];
1327
1328 /* Copy down callbacks to fill in empty sublists. */
1329 for (j = RCU_WAIT_TAIL; i < RCU_NEXT_TAIL; i++, j++) {
1330 if (rdp->nxttail[j] == rdp->nxttail[RCU_NEXT_TAIL])
1331 break;
1332 rdp->nxttail[j] = rdp->nxttail[i];
1333 rdp->nxtcompleted[j] = rdp->nxtcompleted[i];
1334 }
1335
1336 /* Classify any remaining callbacks. */
1337 rcu_accelerate_cbs(rsp, rnp, rdp);
1338}
1339
1340/*
1341 * Update CPU-local rcu_data state to record the beginnings and ends of
1342 * grace periods. The caller must hold the ->lock of the leaf rcu_node
1343 * structure corresponding to the current CPU, and must have irqs disabled.
1344 */
1345static void __note_gp_changes(struct rcu_state *rsp, struct rcu_node *rnp, struct rcu_data *rdp)
1346{
1347 /* Handle the ends of any preceding grace periods first. */
1348 if (rdp->completed == rnp->completed) {
1349
1350 /* No grace period end, so just accelerate recent callbacks. */
1351 rcu_accelerate_cbs(rsp, rnp, rdp);
1352
1353 } else {
1354
1355 /* Advance callbacks. */
1356 rcu_advance_cbs(rsp, rnp, rdp);
1357
1358 /* Remember that we saw this grace-period completion. */
1359 rdp->completed = rnp->completed;
1360 trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpuend"));
1361 }
1362
1363 if (rdp->gpnum != rnp->gpnum) {
1364 /*
1365 * If the current grace period is waiting for this CPU,
1366 * set up to detect a quiescent state, otherwise don't
1367 * go looking for one.
1368 */
1369 rdp->gpnum = rnp->gpnum;
1370 trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpustart"));
1371 rdp->passed_quiesce = 0;
1372 rdp->qs_pending = !!(rnp->qsmask & rdp->grpmask);
1373 zero_cpu_stall_ticks(rdp);
1374 }
1375}
1376
1377static void note_gp_changes(struct rcu_state *rsp, struct rcu_data *rdp)
1378{
1379 unsigned long flags;
1380 struct rcu_node *rnp;
1381
1382 local_irq_save(flags);
1383 rnp = rdp->mynode;
1384 if ((rdp->gpnum == ACCESS_ONCE(rnp->gpnum) &&
1385 rdp->completed == ACCESS_ONCE(rnp->completed)) || /* w/out lock. */
1386 !raw_spin_trylock(&rnp->lock)) { /* irqs already off, so later. */
1387 local_irq_restore(flags);
1388 return;
1389 }
1390 smp_mb__after_unlock_lock();
1391 __note_gp_changes(rsp, rnp, rdp);
1392 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1393}
1394
1395/*
1396 * Initialize a new grace period. Return 0 if no grace period required.
1397 */
1398static int rcu_gp_init(struct rcu_state *rsp)
1399{
1400 struct rcu_data *rdp;
1401 struct rcu_node *rnp = rcu_get_root(rsp);
1402
1403 rcu_bind_gp_kthread();
1404 raw_spin_lock_irq(&rnp->lock);
1405 smp_mb__after_unlock_lock();
1406 if (rsp->gp_flags == 0) {
1407 /* Spurious wakeup, tell caller to go back to sleep. */
1408 raw_spin_unlock_irq(&rnp->lock);
1409 return 0;
1410 }
1411 rsp->gp_flags = 0; /* Clear all flags: New grace period. */
1412
1413 if (WARN_ON_ONCE(rcu_gp_in_progress(rsp))) {
1414 /*
1415 * Grace period already in progress, don't start another.
1416 * Not supposed to be able to happen.
1417 */
1418 raw_spin_unlock_irq(&rnp->lock);
1419 return 0;
1420 }
1421
1422 /* Advance to a new grace period and initialize state. */
1423 record_gp_stall_check_time(rsp);
1424 /* Record GP times before starting GP, hence smp_store_release(). */
1425 smp_store_release(&rsp->gpnum, rsp->gpnum + 1);
1426 trace_rcu_grace_period(rsp->name, rsp->gpnum, TPS("start"));
1427 raw_spin_unlock_irq(&rnp->lock);
1428
1429 /* Exclude any concurrent CPU-hotplug operations. */
1430 mutex_lock(&rsp->onoff_mutex);
1431 smp_mb__after_unlock_lock(); /* ->gpnum increment before GP! */
1432
1433 /*
1434 * Set the quiescent-state-needed bits in all the rcu_node
1435 * structures for all currently online CPUs in breadth-first order,
1436 * starting from the root rcu_node structure, relying on the layout
1437 * of the tree within the rsp->node[] array. Note that other CPUs
1438 * will access only the leaves of the hierarchy, thus seeing that no
1439 * grace period is in progress, at least until the corresponding
1440 * leaf node has been initialized. In addition, we have excluded
1441 * CPU-hotplug operations.
1442 *
1443 * The grace period cannot complete until the initialization
1444 * process finishes, because this kthread handles both.
1445 */
1446 rcu_for_each_node_breadth_first(rsp, rnp) {
1447 raw_spin_lock_irq(&rnp->lock);
1448 smp_mb__after_unlock_lock();
1449 rdp = this_cpu_ptr(rsp->rda);
1450 rcu_preempt_check_blocked_tasks(rnp);
1451 rnp->qsmask = rnp->qsmaskinit;
1452 ACCESS_ONCE(rnp->gpnum) = rsp->gpnum;
1453 WARN_ON_ONCE(rnp->completed != rsp->completed);
1454 ACCESS_ONCE(rnp->completed) = rsp->completed;
1455 if (rnp == rdp->mynode)
1456 __note_gp_changes(rsp, rnp, rdp);
1457 rcu_preempt_boost_start_gp(rnp);
1458 trace_rcu_grace_period_init(rsp->name, rnp->gpnum,
1459 rnp->level, rnp->grplo,
1460 rnp->grphi, rnp->qsmask);
1461 raw_spin_unlock_irq(&rnp->lock);
1462#ifdef CONFIG_PROVE_RCU_DELAY
1463 if ((prandom_u32() % (rcu_num_nodes + 1)) == 0 &&
1464 system_state == SYSTEM_RUNNING)
1465 udelay(200);
1466#endif /* #ifdef CONFIG_PROVE_RCU_DELAY */
1467 cond_resched();
1468 }
1469
1470 mutex_unlock(&rsp->onoff_mutex);
1471 return 1;
1472}
1473
1474/*
1475 * Do one round of quiescent-state forcing.
1476 */
1477static int rcu_gp_fqs(struct rcu_state *rsp, int fqs_state_in)
1478{
1479 int fqs_state = fqs_state_in;
1480 bool isidle = false;
1481 unsigned long maxj;
1482 struct rcu_node *rnp = rcu_get_root(rsp);
1483
1484 rsp->n_force_qs++;
1485 if (fqs_state == RCU_SAVE_DYNTICK) {
1486 /* Collect dyntick-idle snapshots. */
1487 if (is_sysidle_rcu_state(rsp)) {
1488 isidle = 1;
1489 maxj = jiffies - ULONG_MAX / 4;
1490 }
1491 force_qs_rnp(rsp, dyntick_save_progress_counter,
1492 &isidle, &maxj);
1493 rcu_sysidle_report_gp(rsp, isidle, maxj);
1494 fqs_state = RCU_FORCE_QS;
1495 } else {
1496 /* Handle dyntick-idle and offline CPUs. */
1497 isidle = 0;
1498 force_qs_rnp(rsp, rcu_implicit_dynticks_qs, &isidle, &maxj);
1499 }
1500 /* Clear flag to prevent immediate re-entry. */
1501 if (ACCESS_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) {
1502 raw_spin_lock_irq(&rnp->lock);
1503 smp_mb__after_unlock_lock();
1504 rsp->gp_flags &= ~RCU_GP_FLAG_FQS;
1505 raw_spin_unlock_irq(&rnp->lock);
1506 }
1507 return fqs_state;
1508}
1509
1510/*
1511 * Clean up after the old grace period.
1512 */
1513static void rcu_gp_cleanup(struct rcu_state *rsp)
1514{
1515 unsigned long gp_duration;
1516 int nocb = 0;
1517 struct rcu_data *rdp;
1518 struct rcu_node *rnp = rcu_get_root(rsp);
1519
1520 raw_spin_lock_irq(&rnp->lock);
1521 smp_mb__after_unlock_lock();
1522 gp_duration = jiffies - rsp->gp_start;
1523 if (gp_duration > rsp->gp_max)
1524 rsp->gp_max = gp_duration;
1525
1526 /*
1527 * We know the grace period is complete, but to everyone else
1528 * it appears to still be ongoing. But it is also the case
1529 * that to everyone else it looks like there is nothing that
1530 * they can do to advance the grace period. It is therefore
1531 * safe for us to drop the lock in order to mark the grace
1532 * period as completed in all of the rcu_node structures.
1533 */
1534 raw_spin_unlock_irq(&rnp->lock);
1535
1536 /*
1537 * Propagate new ->completed value to rcu_node structures so
1538 * that other CPUs don't have to wait until the start of the next
1539 * grace period to process their callbacks. This also avoids
1540 * some nasty RCU grace-period initialization races by forcing
1541 * the end of the current grace period to be completely recorded in
1542 * all of the rcu_node structures before the beginning of the next
1543 * grace period is recorded in any of the rcu_node structures.
1544 */
1545 rcu_for_each_node_breadth_first(rsp, rnp) {
1546 raw_spin_lock_irq(&rnp->lock);
1547 smp_mb__after_unlock_lock();
1548 ACCESS_ONCE(rnp->completed) = rsp->gpnum;
1549 rdp = this_cpu_ptr(rsp->rda);
1550 if (rnp == rdp->mynode)
1551 __note_gp_changes(rsp, rnp, rdp);
1552 /* smp_mb() provided by prior unlock-lock pair. */
1553 nocb += rcu_future_gp_cleanup(rsp, rnp);
1554 raw_spin_unlock_irq(&rnp->lock);
1555 cond_resched();
1556 }
1557 rnp = rcu_get_root(rsp);
1558 raw_spin_lock_irq(&rnp->lock);
1559 smp_mb__after_unlock_lock(); /* Order GP before ->completed update. */
1560 rcu_nocb_gp_set(rnp, nocb);
1561
1562 /* Declare grace period done. */
1563 ACCESS_ONCE(rsp->completed) = rsp->gpnum;
1564 trace_rcu_grace_period(rsp->name, rsp->completed, TPS("end"));
1565 rsp->fqs_state = RCU_GP_IDLE;
1566 rdp = this_cpu_ptr(rsp->rda);
1567 rcu_advance_cbs(rsp, rnp, rdp); /* Reduce false positives below. */
1568 if (cpu_needs_another_gp(rsp, rdp)) {
1569 rsp->gp_flags = RCU_GP_FLAG_INIT;
1570 trace_rcu_grace_period(rsp->name,
1571 ACCESS_ONCE(rsp->gpnum),
1572 TPS("newreq"));
1573 }
1574 raw_spin_unlock_irq(&rnp->lock);
1575}
1576
1577/*
1578 * Body of kthread that handles grace periods.
1579 */
1580static int __noreturn rcu_gp_kthread(void *arg)
1581{
1582 int fqs_state;
1583 int gf;
1584 unsigned long j;
1585 int ret;
1586 struct rcu_state *rsp = arg;
1587 struct rcu_node *rnp = rcu_get_root(rsp);
1588
1589 for (;;) {
1590
1591 /* Handle grace-period start. */
1592 for (;;) {
1593 trace_rcu_grace_period(rsp->name,
1594 ACCESS_ONCE(rsp->gpnum),
1595 TPS("reqwait"));
1596 wait_event_interruptible(rsp->gp_wq,
1597 ACCESS_ONCE(rsp->gp_flags) &
1598 RCU_GP_FLAG_INIT);
1599 /* Locking provides needed memory barrier. */
1600 if (rcu_gp_init(rsp))
1601 break;
1602 cond_resched();
1603 flush_signals(current);
1604 trace_rcu_grace_period(rsp->name,
1605 ACCESS_ONCE(rsp->gpnum),
1606 TPS("reqwaitsig"));
1607 }
1608
1609 /* Handle quiescent-state forcing. */
1610 fqs_state = RCU_SAVE_DYNTICK;
1611 j = jiffies_till_first_fqs;
1612 if (j > HZ) {
1613 j = HZ;
1614 jiffies_till_first_fqs = HZ;
1615 }
1616 ret = 0;
1617 for (;;) {
1618 if (!ret)
1619 rsp->jiffies_force_qs = jiffies + j;
1620 trace_rcu_grace_period(rsp->name,
1621 ACCESS_ONCE(rsp->gpnum),
1622 TPS("fqswait"));
1623 ret = wait_event_interruptible_timeout(rsp->gp_wq,
1624 ((gf = ACCESS_ONCE(rsp->gp_flags)) &
1625 RCU_GP_FLAG_FQS) ||
1626 (!ACCESS_ONCE(rnp->qsmask) &&
1627 !rcu_preempt_blocked_readers_cgp(rnp)),
1628 j);
1629 /* Locking provides needed memory barriers. */
1630 /* If grace period done, leave loop. */
1631 if (!ACCESS_ONCE(rnp->qsmask) &&
1632 !rcu_preempt_blocked_readers_cgp(rnp))
1633 break;
1634 /* If time for quiescent-state forcing, do it. */
1635 if (ULONG_CMP_GE(jiffies, rsp->jiffies_force_qs) ||
1636 (gf & RCU_GP_FLAG_FQS)) {
1637 trace_rcu_grace_period(rsp->name,
1638 ACCESS_ONCE(rsp->gpnum),
1639 TPS("fqsstart"));
1640 fqs_state = rcu_gp_fqs(rsp, fqs_state);
1641 trace_rcu_grace_period(rsp->name,
1642 ACCESS_ONCE(rsp->gpnum),
1643 TPS("fqsend"));
1644 cond_resched();
1645 } else {
1646 /* Deal with stray signal. */
1647 cond_resched();
1648 flush_signals(current);
1649 trace_rcu_grace_period(rsp->name,
1650 ACCESS_ONCE(rsp->gpnum),
1651 TPS("fqswaitsig"));
1652 }
1653 j = jiffies_till_next_fqs;
1654 if (j > HZ) {
1655 j = HZ;
1656 jiffies_till_next_fqs = HZ;
1657 } else if (j < 1) {
1658 j = 1;
1659 jiffies_till_next_fqs = 1;
1660 }
1661 }
1662
1663 /* Handle grace-period end. */
1664 rcu_gp_cleanup(rsp);
1665 }
1666}
1667
1668static void rsp_wakeup(struct irq_work *work)
1669{
1670 struct rcu_state *rsp = container_of(work, struct rcu_state, wakeup_work);
1671
1672 /* Wake up rcu_gp_kthread() to start the grace period. */
1673 wake_up(&rsp->gp_wq);
1674}
1675
1676/*
1677 * Start a new RCU grace period if warranted, re-initializing the hierarchy
1678 * in preparation for detecting the next grace period. The caller must hold
1679 * the root node's ->lock and hard irqs must be disabled.
1680 *
1681 * Note that it is legal for a dying CPU (which is marked as offline) to
1682 * invoke this function. This can happen when the dying CPU reports its
1683 * quiescent state.
1684 */
1685static void
1686rcu_start_gp_advanced(struct rcu_state *rsp, struct rcu_node *rnp,
1687 struct rcu_data *rdp)
1688{
1689 if (!rsp->gp_kthread || !cpu_needs_another_gp(rsp, rdp)) {
1690 /*
1691 * Either we have not yet spawned the grace-period
1692 * task, this CPU does not need another grace period,
1693 * or a grace period is already in progress.
1694 * Either way, don't start a new grace period.
1695 */
1696 return;
1697 }
1698 rsp->gp_flags = RCU_GP_FLAG_INIT;
1699 trace_rcu_grace_period(rsp->name, ACCESS_ONCE(rsp->gpnum),
1700 TPS("newreq"));
1701
1702 /*
1703 * We can't do wakeups while holding the rnp->lock, as that
1704 * could cause possible deadlocks with the rq->lock. Defer
1705 * the wakeup to interrupt context. And don't bother waking
1706 * up the running kthread.
1707 */
1708 if (current != rsp->gp_kthread)
1709 irq_work_queue(&rsp->wakeup_work);
1710}
1711
1712/*
1713 * Similar to rcu_start_gp_advanced(), but also advance the calling CPU's
1714 * callbacks. Note that rcu_start_gp_advanced() cannot do this because it
1715 * is invoked indirectly from rcu_advance_cbs(), which would result in
1716 * endless recursion -- or would do so if it wasn't for the self-deadlock
1717 * that is encountered beforehand.
1718 */
1719static void
1720rcu_start_gp(struct rcu_state *rsp)
1721{
1722 struct rcu_data *rdp = this_cpu_ptr(rsp->rda);
1723 struct rcu_node *rnp = rcu_get_root(rsp);
1724
1725 /*
1726 * If there is no grace period in progress right now, any
1727 * callbacks we have up to this point will be satisfied by the
1728 * next grace period. Also, advancing the callbacks reduces the
1729 * probability of false positives from cpu_needs_another_gp()
1730 * resulting in pointless grace periods. So, advance callbacks
1731 * then start the grace period!
1732 */
1733 rcu_advance_cbs(rsp, rnp, rdp);
1734 rcu_start_gp_advanced(rsp, rnp, rdp);
1735}
1736
1737/*
1738 * Report a full set of quiescent states to the specified rcu_state
1739 * data structure. This involves cleaning up after the prior grace
1740 * period and letting rcu_start_gp() start up the next grace period
1741 * if one is needed. Note that the caller must hold rnp->lock, which
1742 * is released before return.
1743 */
1744static void rcu_report_qs_rsp(struct rcu_state *rsp, unsigned long flags)
1745 __releases(rcu_get_root(rsp)->lock)
1746{
1747 WARN_ON_ONCE(!rcu_gp_in_progress(rsp));
1748 raw_spin_unlock_irqrestore(&rcu_get_root(rsp)->lock, flags);
1749 wake_up(&rsp->gp_wq); /* Memory barrier implied by wake_up() path. */
1750}
1751
1752/*
1753 * Similar to rcu_report_qs_rdp(), for which it is a helper function.
1754 * Allows quiescent states for a group of CPUs to be reported at one go
1755 * to the specified rcu_node structure, though all the CPUs in the group
1756 * must be represented by the same rcu_node structure (which need not be
1757 * a leaf rcu_node structure, though it often will be). That structure's
1758 * lock must be held upon entry, and it is released before return.
1759 */
1760static void
1761rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
1762 struct rcu_node *rnp, unsigned long flags)
1763 __releases(rnp->lock)
1764{
1765 struct rcu_node *rnp_c;
1766
1767 /* Walk up the rcu_node hierarchy. */
1768 for (;;) {
1769 if (!(rnp->qsmask & mask)) {
1770
1771 /* Our bit has already been cleared, so done. */
1772 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1773 return;
1774 }
1775 rnp->qsmask &= ~mask;
1776 trace_rcu_quiescent_state_report(rsp->name, rnp->gpnum,
1777 mask, rnp->qsmask, rnp->level,
1778 rnp->grplo, rnp->grphi,
1779 !!rnp->gp_tasks);
1780 if (rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) {
1781
1782 /* Other bits still set at this level, so done. */
1783 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1784 return;
1785 }
1786 mask = rnp->grpmask;
1787 if (rnp->parent == NULL) {
1788
1789 /* No more levels. Exit loop holding root lock. */
1790
1791 break;
1792 }
1793 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1794 rnp_c = rnp;
1795 rnp = rnp->parent;
1796 raw_spin_lock_irqsave(&rnp->lock, flags);
1797 smp_mb__after_unlock_lock();
1798 WARN_ON_ONCE(rnp_c->qsmask);
1799 }
1800
1801 /*
1802 * Get here if we are the last CPU to pass through a quiescent
1803 * state for this grace period. Invoke rcu_report_qs_rsp()
1804 * to clean up and start the next grace period if one is needed.
1805 */
1806 rcu_report_qs_rsp(rsp, flags); /* releases rnp->lock. */
1807}
1808
1809/*
1810 * Record a quiescent state for the specified CPU to that CPU's rcu_data
1811 * structure. This must be either called from the specified CPU, or
1812 * called when the specified CPU is known to be offline (and when it is
1813 * also known that no other CPU is concurrently trying to help the offline
1814 * CPU). The lastcomp argument is used to make sure we are still in the
1815 * grace period of interest. We don't want to end the current grace period
1816 * based on quiescent states detected in an earlier grace period!
1817 */
1818static void
1819rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp)
1820{
1821 unsigned long flags;
1822 unsigned long mask;
1823 struct rcu_node *rnp;
1824
1825 rnp = rdp->mynode;
1826 raw_spin_lock_irqsave(&rnp->lock, flags);
1827 smp_mb__after_unlock_lock();
1828 if (rdp->passed_quiesce == 0 || rdp->gpnum != rnp->gpnum ||
1829 rnp->completed == rnp->gpnum) {
1830
1831 /*
1832 * The grace period in which this quiescent state was
1833 * recorded has ended, so don't report it upwards.
1834 * We will instead need a new quiescent state that lies
1835 * within the current grace period.
1836 */
1837 rdp->passed_quiesce = 0; /* need qs for new gp. */
1838 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1839 return;
1840 }
1841 mask = rdp->grpmask;
1842 if ((rnp->qsmask & mask) == 0) {
1843 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1844 } else {
1845 rdp->qs_pending = 0;
1846
1847 /*
1848 * This GP can't end until cpu checks in, so all of our
1849 * callbacks can be processed during the next GP.
1850 */
1851 rcu_accelerate_cbs(rsp, rnp, rdp);
1852
1853 rcu_report_qs_rnp(mask, rsp, rnp, flags); /* rlses rnp->lock */
1854 }
1855}
1856
1857/*
1858 * Check to see if there is a new grace period of which this CPU
1859 * is not yet aware, and if so, set up local rcu_data state for it.
1860 * Otherwise, see if this CPU has just passed through its first
1861 * quiescent state for this grace period, and record that fact if so.
1862 */
1863static void
1864rcu_check_quiescent_state(struct rcu_state *rsp, struct rcu_data *rdp)
1865{
1866 /* Check for grace-period ends and beginnings. */
1867 note_gp_changes(rsp, rdp);
1868
1869 /*
1870 * Does this CPU still need to do its part for current grace period?
1871 * If no, return and let the other CPUs do their part as well.
1872 */
1873 if (!rdp->qs_pending)
1874 return;
1875
1876 /*
1877 * Was there a quiescent state since the beginning of the grace
1878 * period? If no, then exit and wait for the next call.
1879 */
1880 if (!rdp->passed_quiesce)
1881 return;
1882
1883 /*
1884 * Tell RCU we are done (but rcu_report_qs_rdp() will be the
1885 * judge of that).
1886 */
1887 rcu_report_qs_rdp(rdp->cpu, rsp, rdp);
1888}
1889
1890#ifdef CONFIG_HOTPLUG_CPU
1891
1892/*
1893 * Send the specified CPU's RCU callbacks to the orphanage. The
1894 * specified CPU must be offline, and the caller must hold the
1895 * ->orphan_lock.
1896 */
1897static void
1898rcu_send_cbs_to_orphanage(int cpu, struct rcu_state *rsp,
1899 struct rcu_node *rnp, struct rcu_data *rdp)
1900{
1901 /* No-CBs CPUs do not have orphanable callbacks. */
1902 if (rcu_is_nocb_cpu(rdp->cpu))
1903 return;
1904
1905 /*
1906 * Orphan the callbacks. First adjust the counts. This is safe
1907 * because _rcu_barrier() excludes CPU-hotplug operations, so it
1908 * cannot be running now. Thus no memory barrier is required.
1909 */
1910 if (rdp->nxtlist != NULL) {
1911 rsp->qlen_lazy += rdp->qlen_lazy;
1912 rsp->qlen += rdp->qlen;
1913 rdp->n_cbs_orphaned += rdp->qlen;
1914 rdp->qlen_lazy = 0;
1915 ACCESS_ONCE(rdp->qlen) = 0;
1916 }
1917
1918 /*
1919 * Next, move those callbacks still needing a grace period to
1920 * the orphanage, where some other CPU will pick them up.
1921 * Some of the callbacks might have gone partway through a grace
1922 * period, but that is too bad. They get to start over because we
1923 * cannot assume that grace periods are synchronized across CPUs.
1924 * We don't bother updating the ->nxttail[] array yet, instead
1925 * we just reset the whole thing later on.
1926 */
1927 if (*rdp->nxttail[RCU_DONE_TAIL] != NULL) {
1928 *rsp->orphan_nxttail = *rdp->nxttail[RCU_DONE_TAIL];
1929 rsp->orphan_nxttail = rdp->nxttail[RCU_NEXT_TAIL];
1930 *rdp->nxttail[RCU_DONE_TAIL] = NULL;
1931 }
1932
1933 /*
1934 * Then move the ready-to-invoke callbacks to the orphanage,
1935 * where some other CPU will pick them up. These will not be
1936 * required to pass though another grace period: They are done.
1937 */
1938 if (rdp->nxtlist != NULL) {
1939 *rsp->orphan_donetail = rdp->nxtlist;
1940 rsp->orphan_donetail = rdp->nxttail[RCU_DONE_TAIL];
1941 }
1942
1943 /* Finally, initialize the rcu_data structure's list to empty. */
1944 init_callback_list(rdp);
1945}
1946
1947/*
1948 * Adopt the RCU callbacks from the specified rcu_state structure's
1949 * orphanage. The caller must hold the ->orphan_lock.
1950 */
1951static void rcu_adopt_orphan_cbs(struct rcu_state *rsp, unsigned long flags)
1952{
1953 int i;
1954 struct rcu_data *rdp = __this_cpu_ptr(rsp->rda);
1955
1956 /* No-CBs CPUs are handled specially. */
1957 if (rcu_nocb_adopt_orphan_cbs(rsp, rdp, flags))
1958 return;
1959
1960 /* Do the accounting first. */
1961 rdp->qlen_lazy += rsp->qlen_lazy;
1962 rdp->qlen += rsp->qlen;
1963 rdp->n_cbs_adopted += rsp->qlen;
1964 if (rsp->qlen_lazy != rsp->qlen)
1965 rcu_idle_count_callbacks_posted();
1966 rsp->qlen_lazy = 0;
1967 rsp->qlen = 0;
1968
1969 /*
1970 * We do not need a memory barrier here because the only way we
1971 * can get here if there is an rcu_barrier() in flight is if
1972 * we are the task doing the rcu_barrier().
1973 */
1974
1975 /* First adopt the ready-to-invoke callbacks. */
1976 if (rsp->orphan_donelist != NULL) {
1977 *rsp->orphan_donetail = *rdp->nxttail[RCU_DONE_TAIL];
1978 *rdp->nxttail[RCU_DONE_TAIL] = rsp->orphan_donelist;
1979 for (i = RCU_NEXT_SIZE - 1; i >= RCU_DONE_TAIL; i--)
1980 if (rdp->nxttail[i] == rdp->nxttail[RCU_DONE_TAIL])
1981 rdp->nxttail[i] = rsp->orphan_donetail;
1982 rsp->orphan_donelist = NULL;
1983 rsp->orphan_donetail = &rsp->orphan_donelist;
1984 }
1985
1986 /* And then adopt the callbacks that still need a grace period. */
1987 if (rsp->orphan_nxtlist != NULL) {
1988 *rdp->nxttail[RCU_NEXT_TAIL] = rsp->orphan_nxtlist;
1989 rdp->nxttail[RCU_NEXT_TAIL] = rsp->orphan_nxttail;
1990 rsp->orphan_nxtlist = NULL;
1991 rsp->orphan_nxttail = &rsp->orphan_nxtlist;
1992 }
1993}
1994
1995/*
1996 * Trace the fact that this CPU is going offline.
1997 */
1998static void rcu_cleanup_dying_cpu(struct rcu_state *rsp)
1999{
2000 RCU_TRACE(unsigned long mask);
2001 RCU_TRACE(struct rcu_data *rdp = this_cpu_ptr(rsp->rda));
2002 RCU_TRACE(struct rcu_node *rnp = rdp->mynode);
2003
2004 RCU_TRACE(mask = rdp->grpmask);
2005 trace_rcu_grace_period(rsp->name,
2006 rnp->gpnum + 1 - !!(rnp->qsmask & mask),
2007 TPS("cpuofl"));
2008}
2009
2010/*
2011 * The CPU has been completely removed, and some other CPU is reporting
2012 * this fact from process context. Do the remainder of the cleanup,
2013 * including orphaning the outgoing CPU's RCU callbacks, and also
2014 * adopting them. There can only be one CPU hotplug operation at a time,
2015 * so no other CPU can be attempting to update rcu_cpu_kthread_task.
2016 */
2017static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp)
2018{
2019 unsigned long flags;
2020 unsigned long mask;
2021 int need_report = 0;
2022 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
2023 struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */
2024
2025 /* Adjust any no-longer-needed kthreads. */
2026 rcu_boost_kthread_setaffinity(rnp, -1);
2027
2028 /* Remove the dead CPU from the bitmasks in the rcu_node hierarchy. */
2029
2030 /* Exclude any attempts to start a new grace period. */
2031 mutex_lock(&rsp->onoff_mutex);
2032 raw_spin_lock_irqsave(&rsp->orphan_lock, flags);
2033
2034 /* Orphan the dead CPU's callbacks, and adopt them if appropriate. */
2035 rcu_send_cbs_to_orphanage(cpu, rsp, rnp, rdp);
2036 rcu_adopt_orphan_cbs(rsp, flags);
2037
2038 /* Remove the outgoing CPU from the masks in the rcu_node hierarchy. */
2039 mask = rdp->grpmask; /* rnp->grplo is constant. */
2040 do {
2041 raw_spin_lock(&rnp->lock); /* irqs already disabled. */
2042 smp_mb__after_unlock_lock();
2043 rnp->qsmaskinit &= ~mask;
2044 if (rnp->qsmaskinit != 0) {
2045 if (rnp != rdp->mynode)
2046 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
2047 break;
2048 }
2049 if (rnp == rdp->mynode)
2050 need_report = rcu_preempt_offline_tasks(rsp, rnp, rdp);
2051 else
2052 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
2053 mask = rnp->grpmask;
2054 rnp = rnp->parent;
2055 } while (rnp != NULL);
2056
2057 /*
2058 * We still hold the leaf rcu_node structure lock here, and
2059 * irqs are still disabled. The reason for this subterfuge is
2060 * because invoking rcu_report_unblock_qs_rnp() with ->orphan_lock
2061 * held leads to deadlock.
2062 */
2063 raw_spin_unlock(&rsp->orphan_lock); /* irqs remain disabled. */
2064 rnp = rdp->mynode;
2065 if (need_report & RCU_OFL_TASKS_NORM_GP)
2066 rcu_report_unblock_qs_rnp(rnp, flags);
2067 else
2068 raw_spin_unlock_irqrestore(&rnp->lock, flags);
2069 if (need_report & RCU_OFL_TASKS_EXP_GP)
2070 rcu_report_exp_rnp(rsp, rnp, true);
2071 WARN_ONCE(rdp->qlen != 0 || rdp->nxtlist != NULL,
2072 "rcu_cleanup_dead_cpu: Callbacks on offline CPU %d: qlen=%lu, nxtlist=%p\n",
2073 cpu, rdp->qlen, rdp->nxtlist);
2074 init_callback_list(rdp);
2075 /* Disallow further callbacks on this CPU. */
2076 rdp->nxttail[RCU_NEXT_TAIL] = NULL;
2077 mutex_unlock(&rsp->onoff_mutex);
2078}
2079
2080#else /* #ifdef CONFIG_HOTPLUG_CPU */
2081
2082static void rcu_cleanup_dying_cpu(struct rcu_state *rsp)
2083{
2084}
2085
2086static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp)
2087{
2088}
2089
2090#endif /* #else #ifdef CONFIG_HOTPLUG_CPU */
2091
2092/*
2093 * Invoke any RCU callbacks that have made it to the end of their grace
2094 * period. Thottle as specified by rdp->blimit.
2095 */
2096static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
2097{
2098 unsigned long flags;
2099 struct rcu_head *next, *list, **tail;
2100 long bl, count, count_lazy;
2101 int i;
2102
2103 /* If no callbacks are ready, just return. */
2104 if (!cpu_has_callbacks_ready_to_invoke(rdp)) {
2105 trace_rcu_batch_start(rsp->name, rdp->qlen_lazy, rdp->qlen, 0);
2106 trace_rcu_batch_end(rsp->name, 0, !!ACCESS_ONCE(rdp->nxtlist),
2107 need_resched(), is_idle_task(current),
2108 rcu_is_callbacks_kthread());
2109 return;
2110 }
2111
2112 /*
2113 * Extract the list of ready callbacks, disabling to prevent
2114 * races with call_rcu() from interrupt handlers.
2115 */
2116 local_irq_save(flags);
2117 WARN_ON_ONCE(cpu_is_offline(smp_processor_id()));
2118 bl = rdp->blimit;
2119 trace_rcu_batch_start(rsp->name, rdp->qlen_lazy, rdp->qlen, bl);
2120 list = rdp->nxtlist;
2121 rdp->nxtlist = *rdp->nxttail[RCU_DONE_TAIL];
2122 *rdp->nxttail[RCU_DONE_TAIL] = NULL;
2123 tail = rdp->nxttail[RCU_DONE_TAIL];
2124 for (i = RCU_NEXT_SIZE - 1; i >= 0; i--)
2125 if (rdp->nxttail[i] == rdp->nxttail[RCU_DONE_TAIL])
2126 rdp->nxttail[i] = &rdp->nxtlist;
2127 local_irq_restore(flags);
2128
2129 /* Invoke callbacks. */
2130 count = count_lazy = 0;
2131 while (list) {
2132 next = list->next;
2133 prefetch(next);
2134 debug_rcu_head_unqueue(list);
2135 if (__rcu_reclaim(rsp->name, list))
2136 count_lazy++;
2137 list = next;
2138 /* Stop only if limit reached and CPU has something to do. */
2139 if (++count >= bl &&
2140 (need_resched() ||
2141 (!is_idle_task(current) && !rcu_is_callbacks_kthread())))
2142 break;
2143 }
2144
2145 local_irq_save(flags);
2146 trace_rcu_batch_end(rsp->name, count, !!list, need_resched(),
2147 is_idle_task(current),
2148 rcu_is_callbacks_kthread());
2149
2150 /* Update count, and requeue any remaining callbacks. */
2151 if (list != NULL) {
2152 *tail = rdp->nxtlist;
2153 rdp->nxtlist = list;
2154 for (i = 0; i < RCU_NEXT_SIZE; i++)
2155 if (&rdp->nxtlist == rdp->nxttail[i])
2156 rdp->nxttail[i] = tail;
2157 else
2158 break;
2159 }
2160 smp_mb(); /* List handling before counting for rcu_barrier(). */
2161 rdp->qlen_lazy -= count_lazy;
2162 ACCESS_ONCE(rdp->qlen) -= count;
2163 rdp->n_cbs_invoked += count;
2164
2165 /* Reinstate batch limit if we have worked down the excess. */
2166 if (rdp->blimit == LONG_MAX && rdp->qlen <= qlowmark)
2167 rdp->blimit = blimit;
2168
2169 /* Reset ->qlen_last_fqs_check trigger if enough CBs have drained. */
2170 if (rdp->qlen == 0 && rdp->qlen_last_fqs_check != 0) {
2171 rdp->qlen_last_fqs_check = 0;
2172 rdp->n_force_qs_snap = rsp->n_force_qs;
2173 } else if (rdp->qlen < rdp->qlen_last_fqs_check - qhimark)
2174 rdp->qlen_last_fqs_check = rdp->qlen;
2175 WARN_ON_ONCE((rdp->nxtlist == NULL) != (rdp->qlen == 0));
2176
2177 local_irq_restore(flags);
2178
2179 /* Re-invoke RCU core processing if there are callbacks remaining. */
2180 if (cpu_has_callbacks_ready_to_invoke(rdp))
2181 invoke_rcu_core();
2182}
2183
2184/*
2185 * Check to see if this CPU is in a non-context-switch quiescent state
2186 * (user mode or idle loop for rcu, non-softirq execution for rcu_bh).
2187 * Also schedule RCU core processing.
2188 *
2189 * This function must be called from hardirq context. It is normally
2190 * invoked from the scheduling-clock interrupt. If rcu_pending returns
2191 * false, there is no point in invoking rcu_check_callbacks().
2192 */
2193void rcu_check_callbacks(int cpu, int user)
2194{
2195 trace_rcu_utilization(TPS("Start scheduler-tick"));
2196 increment_cpu_stall_ticks();
2197 if (user || rcu_is_cpu_rrupt_from_idle()) {
2198
2199 /*
2200 * Get here if this CPU took its interrupt from user
2201 * mode or from the idle loop, and if this is not a
2202 * nested interrupt. In this case, the CPU is in
2203 * a quiescent state, so note it.
2204 *
2205 * No memory barrier is required here because both
2206 * rcu_sched_qs() and rcu_bh_qs() reference only CPU-local
2207 * variables that other CPUs neither access nor modify,
2208 * at least not while the corresponding CPU is online.
2209 */
2210
2211 rcu_sched_qs(cpu);
2212 rcu_bh_qs(cpu);
2213
2214 } else if (!in_softirq()) {
2215
2216 /*
2217 * Get here if this CPU did not take its interrupt from
2218 * softirq, in other words, if it is not interrupting
2219 * a rcu_bh read-side critical section. This is an _bh
2220 * critical section, so note it.
2221 */
2222
2223 rcu_bh_qs(cpu);
2224 }
2225 rcu_preempt_check_callbacks(cpu);
2226 if (rcu_pending(cpu))
2227 invoke_rcu_core();
2228 trace_rcu_utilization(TPS("End scheduler-tick"));
2229}
2230
2231/*
2232 * Scan the leaf rcu_node structures, processing dyntick state for any that
2233 * have not yet encountered a quiescent state, using the function specified.
2234 * Also initiate boosting for any threads blocked on the root rcu_node.
2235 *
2236 * The caller must have suppressed start of new grace periods.
2237 */
2238static void force_qs_rnp(struct rcu_state *rsp,
2239 int (*f)(struct rcu_data *rsp, bool *isidle,
2240 unsigned long *maxj),
2241 bool *isidle, unsigned long *maxj)
2242{
2243 unsigned long bit;
2244 int cpu;
2245 unsigned long flags;
2246 unsigned long mask;
2247 struct rcu_node *rnp;
2248
2249 rcu_for_each_leaf_node(rsp, rnp) {
2250 cond_resched();
2251 mask = 0;
2252 raw_spin_lock_irqsave(&rnp->lock, flags);
2253 smp_mb__after_unlock_lock();
2254 if (!rcu_gp_in_progress(rsp)) {
2255 raw_spin_unlock_irqrestore(&rnp->lock, flags);
2256 return;
2257 }
2258 if (rnp->qsmask == 0) {
2259 rcu_initiate_boost(rnp, flags); /* releases rnp->lock */
2260 continue;
2261 }
2262 cpu = rnp->grplo;
2263 bit = 1;
2264 for (; cpu <= rnp->grphi; cpu++, bit <<= 1) {
2265 if ((rnp->qsmask & bit) != 0) {
2266 if ((rnp->qsmaskinit & bit) != 0)
2267 *isidle = 0;
2268 if (f(per_cpu_ptr(rsp->rda, cpu), isidle, maxj))
2269 mask |= bit;
2270 }
2271 }
2272 if (mask != 0) {
2273
2274 /* rcu_report_qs_rnp() releases rnp->lock. */
2275 rcu_report_qs_rnp(mask, rsp, rnp, flags);
2276 continue;
2277 }
2278 raw_spin_unlock_irqrestore(&rnp->lock, flags);
2279 }
2280 rnp = rcu_get_root(rsp);
2281 if (rnp->qsmask == 0) {
2282 raw_spin_lock_irqsave(&rnp->lock, flags);
2283 smp_mb__after_unlock_lock();
2284 rcu_initiate_boost(rnp, flags); /* releases rnp->lock. */
2285 }
2286}
2287
2288/*
2289 * Force quiescent states on reluctant CPUs, and also detect which
2290 * CPUs are in dyntick-idle mode.
2291 */
2292static void force_quiescent_state(struct rcu_state *rsp)
2293{
2294 unsigned long flags;
2295 bool ret;
2296 struct rcu_node *rnp;
2297 struct rcu_node *rnp_old = NULL;
2298
2299 /* Funnel through hierarchy to reduce memory contention. */
2300 rnp = per_cpu_ptr(rsp->rda, raw_smp_processor_id())->mynode;
2301 for (; rnp != NULL; rnp = rnp->parent) {
2302 ret = (ACCESS_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) ||
2303 !raw_spin_trylock(&rnp->fqslock);
2304 if (rnp_old != NULL)
2305 raw_spin_unlock(&rnp_old->fqslock);
2306 if (ret) {
2307 ACCESS_ONCE(rsp->n_force_qs_lh)++;
2308 return;
2309 }
2310 rnp_old = rnp;
2311 }
2312 /* rnp_old == rcu_get_root(rsp), rnp == NULL. */
2313
2314 /* Reached the root of the rcu_node tree, acquire lock. */
2315 raw_spin_lock_irqsave(&rnp_old->lock, flags);
2316 smp_mb__after_unlock_lock();
2317 raw_spin_unlock(&rnp_old->fqslock);
2318 if (ACCESS_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) {
2319 ACCESS_ONCE(rsp->n_force_qs_lh)++;
2320 raw_spin_unlock_irqrestore(&rnp_old->lock, flags);
2321 return; /* Someone beat us to it. */
2322 }
2323 rsp->gp_flags |= RCU_GP_FLAG_FQS;
2324 raw_spin_unlock_irqrestore(&rnp_old->lock, flags);
2325 wake_up(&rsp->gp_wq); /* Memory barrier implied by wake_up() path. */
2326}
2327
2328/*
2329 * This does the RCU core processing work for the specified rcu_state
2330 * and rcu_data structures. This may be called only from the CPU to
2331 * whom the rdp belongs.
2332 */
2333static void
2334__rcu_process_callbacks(struct rcu_state *rsp)
2335{
2336 unsigned long flags;
2337 struct rcu_data *rdp = __this_cpu_ptr(rsp->rda);
2338
2339 WARN_ON_ONCE(rdp->beenonline == 0);
2340
2341 /* Update RCU state based on any recent quiescent states. */
2342 rcu_check_quiescent_state(rsp, rdp);
2343
2344 /* Does this CPU require a not-yet-started grace period? */
2345 local_irq_save(flags);
2346 if (cpu_needs_another_gp(rsp, rdp)) {
2347 raw_spin_lock(&rcu_get_root(rsp)->lock); /* irqs disabled. */
2348 rcu_start_gp(rsp);
2349 raw_spin_unlock_irqrestore(&rcu_get_root(rsp)->lock, flags);
2350 } else {
2351 local_irq_restore(flags);
2352 }
2353
2354 /* If there are callbacks ready, invoke them. */
2355 if (cpu_has_callbacks_ready_to_invoke(rdp))
2356 invoke_rcu_callbacks(rsp, rdp);
2357
2358 /* Do any needed deferred wakeups of rcuo kthreads. */
2359 do_nocb_deferred_wakeup(rdp);
2360}
2361
2362/*
2363 * Do RCU core processing for the current CPU.
2364 */
2365static void rcu_process_callbacks(struct softirq_action *unused)
2366{
2367 struct rcu_state *rsp;
2368
2369 if (cpu_is_offline(smp_processor_id()))
2370 return;
2371 trace_rcu_utilization(TPS("Start RCU core"));
2372 for_each_rcu_flavor(rsp)
2373 __rcu_process_callbacks(rsp);
2374 trace_rcu_utilization(TPS("End RCU core"));
2375}
2376
2377/*
2378 * Schedule RCU callback invocation. If the specified type of RCU
2379 * does not support RCU priority boosting, just do a direct call,
2380 * otherwise wake up the per-CPU kernel kthread. Note that because we
2381 * are running on the current CPU with interrupts disabled, the
2382 * rcu_cpu_kthread_task cannot disappear out from under us.
2383 */
2384static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
2385{
2386 if (unlikely(!ACCESS_ONCE(rcu_scheduler_fully_active)))
2387 return;
2388 if (likely(!rsp->boost)) {
2389 rcu_do_batch(rsp, rdp);
2390 return;
2391 }
2392 invoke_rcu_callbacks_kthread();
2393}
2394
2395static void invoke_rcu_core(void)
2396{
2397 if (cpu_online(smp_processor_id()))
2398 raise_softirq(RCU_SOFTIRQ);
2399}
2400
2401/*
2402 * Handle any core-RCU processing required by a call_rcu() invocation.
2403 */
2404static void __call_rcu_core(struct rcu_state *rsp, struct rcu_data *rdp,
2405 struct rcu_head *head, unsigned long flags)
2406{
2407 /*
2408 * If called from an extended quiescent state, invoke the RCU
2409 * core in order to force a re-evaluation of RCU's idleness.
2410 */
2411 if (!rcu_is_watching() && cpu_online(smp_processor_id()))
2412 invoke_rcu_core();
2413
2414 /* If interrupts were disabled or CPU offline, don't invoke RCU core. */
2415 if (irqs_disabled_flags(flags) || cpu_is_offline(smp_processor_id()))
2416 return;
2417
2418 /*
2419 * Force the grace period if too many callbacks or too long waiting.
2420 * Enforce hysteresis, and don't invoke force_quiescent_state()
2421 * if some other CPU has recently done so. Also, don't bother
2422 * invoking force_quiescent_state() if the newly enqueued callback
2423 * is the only one waiting for a grace period to complete.
2424 */
2425 if (unlikely(rdp->qlen > rdp->qlen_last_fqs_check + qhimark)) {
2426
2427 /* Are we ignoring a completed grace period? */
2428 note_gp_changes(rsp, rdp);
2429
2430 /* Start a new grace period if one not already started. */
2431 if (!rcu_gp_in_progress(rsp)) {
2432 struct rcu_node *rnp_root = rcu_get_root(rsp);
2433
2434 raw_spin_lock(&rnp_root->lock);
2435 smp_mb__after_unlock_lock();
2436 rcu_start_gp(rsp);
2437 raw_spin_unlock(&rnp_root->lock);
2438 } else {
2439 /* Give the grace period a kick. */
2440 rdp->blimit = LONG_MAX;
2441 if (rsp->n_force_qs == rdp->n_force_qs_snap &&
2442 *rdp->nxttail[RCU_DONE_TAIL] != head)
2443 force_quiescent_state(rsp);
2444 rdp->n_force_qs_snap = rsp->n_force_qs;
2445 rdp->qlen_last_fqs_check = rdp->qlen;
2446 }
2447 }
2448}
2449
2450/*
2451 * RCU callback function to leak a callback.
2452 */
2453static void rcu_leak_callback(struct rcu_head *rhp)
2454{
2455}
2456
2457/*
2458 * Helper function for call_rcu() and friends. The cpu argument will
2459 * normally be -1, indicating "currently running CPU". It may specify
2460 * a CPU only if that CPU is a no-CBs CPU. Currently, only _rcu_barrier()
2461 * is expected to specify a CPU.
2462 */
2463static void
2464__call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
2465 struct rcu_state *rsp, int cpu, bool lazy)
2466{
2467 unsigned long flags;
2468 struct rcu_data *rdp;
2469
2470 WARN_ON_ONCE((unsigned long)head & 0x3); /* Misaligned rcu_head! */
2471 if (debug_rcu_head_queue(head)) {
2472 /* Probable double call_rcu(), so leak the callback. */
2473 ACCESS_ONCE(head->func) = rcu_leak_callback;
2474 WARN_ONCE(1, "__call_rcu(): Leaked duplicate callback\n");
2475 return;
2476 }
2477 head->func = func;
2478 head->next = NULL;
2479
2480 /*
2481 * Opportunistically note grace-period endings and beginnings.
2482 * Note that we might see a beginning right after we see an
2483 * end, but never vice versa, since this CPU has to pass through
2484 * a quiescent state betweentimes.
2485 */
2486 local_irq_save(flags);
2487 rdp = this_cpu_ptr(rsp->rda);
2488
2489 /* Add the callback to our list. */
2490 if (unlikely(rdp->nxttail[RCU_NEXT_TAIL] == NULL) || cpu != -1) {
2491 int offline;
2492
2493 if (cpu != -1)
2494 rdp = per_cpu_ptr(rsp->rda, cpu);
2495 offline = !__call_rcu_nocb(rdp, head, lazy, flags);
2496 WARN_ON_ONCE(offline);
2497 /* _call_rcu() is illegal on offline CPU; leak the callback. */
2498 local_irq_restore(flags);
2499 return;
2500 }
2501 ACCESS_ONCE(rdp->qlen)++;
2502 if (lazy)
2503 rdp->qlen_lazy++;
2504 else
2505 rcu_idle_count_callbacks_posted();
2506 smp_mb(); /* Count before adding callback for rcu_barrier(). */
2507 *rdp->nxttail[RCU_NEXT_TAIL] = head;
2508 rdp->nxttail[RCU_NEXT_TAIL] = &head->next;
2509
2510 if (__is_kfree_rcu_offset((unsigned long)func))
2511 trace_rcu_kfree_callback(rsp->name, head, (unsigned long)func,
2512 rdp->qlen_lazy, rdp->qlen);
2513 else
2514 trace_rcu_callback(rsp->name, head, rdp->qlen_lazy, rdp->qlen);
2515
2516 /* Go handle any RCU core processing required. */
2517 __call_rcu_core(rsp, rdp, head, flags);
2518 local_irq_restore(flags);
2519}
2520
2521/*
2522 * Queue an RCU-sched callback for invocation after a grace period.
2523 */
2524void call_rcu_sched(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
2525{
2526 __call_rcu(head, func, &rcu_sched_state, -1, 0);
2527}
2528EXPORT_SYMBOL_GPL(call_rcu_sched);
2529
2530/*
2531 * Queue an RCU callback for invocation after a quicker grace period.
2532 */
2533void call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
2534{
2535 __call_rcu(head, func, &rcu_bh_state, -1, 0);
2536}
2537EXPORT_SYMBOL_GPL(call_rcu_bh);
2538
2539/*
2540 * Because a context switch is a grace period for RCU-sched and RCU-bh,
2541 * any blocking grace-period wait automatically implies a grace period
2542 * if there is only one CPU online at any point time during execution
2543 * of either synchronize_sched() or synchronize_rcu_bh(). It is OK to
2544 * occasionally incorrectly indicate that there are multiple CPUs online
2545 * when there was in fact only one the whole time, as this just adds
2546 * some overhead: RCU still operates correctly.
2547 */
2548static inline int rcu_blocking_is_gp(void)
2549{
2550 int ret;
2551
2552 might_sleep(); /* Check for RCU read-side critical section. */
2553 preempt_disable();
2554 ret = num_online_cpus() <= 1;
2555 preempt_enable();
2556 return ret;
2557}
2558
2559/**
2560 * synchronize_sched - wait until an rcu-sched grace period has elapsed.
2561 *
2562 * Control will return to the caller some time after a full rcu-sched
2563 * grace period has elapsed, in other words after all currently executing
2564 * rcu-sched read-side critical sections have completed. These read-side
2565 * critical sections are delimited by rcu_read_lock_sched() and
2566 * rcu_read_unlock_sched(), and may be nested. Note that preempt_disable(),
2567 * local_irq_disable(), and so on may be used in place of
2568 * rcu_read_lock_sched().
2569 *
2570 * This means that all preempt_disable code sequences, including NMI and
2571 * non-threaded hardware-interrupt handlers, in progress on entry will
2572 * have completed before this primitive returns. However, this does not
2573 * guarantee that softirq handlers will have completed, since in some
2574 * kernels, these handlers can run in process context, and can block.
2575 *
2576 * Note that this guarantee implies further memory-ordering guarantees.
2577 * On systems with more than one CPU, when synchronize_sched() returns,
2578 * each CPU is guaranteed to have executed a full memory barrier since the
2579 * end of its last RCU-sched read-side critical section whose beginning
2580 * preceded the call to synchronize_sched(). In addition, each CPU having
2581 * an RCU read-side critical section that extends beyond the return from
2582 * synchronize_sched() is guaranteed to have executed a full memory barrier
2583 * after the beginning of synchronize_sched() and before the beginning of
2584 * that RCU read-side critical section. Note that these guarantees include
2585 * CPUs that are offline, idle, or executing in user mode, as well as CPUs
2586 * that are executing in the kernel.
2587 *
2588 * Furthermore, if CPU A invoked synchronize_sched(), which returned
2589 * to its caller on CPU B, then both CPU A and CPU B are guaranteed
2590 * to have executed a full memory barrier during the execution of
2591 * synchronize_sched() -- even if CPU A and CPU B are the same CPU (but
2592 * again only if the system has more than one CPU).
2593 *
2594 * This primitive provides the guarantees made by the (now removed)
2595 * synchronize_kernel() API. In contrast, synchronize_rcu() only
2596 * guarantees that rcu_read_lock() sections will have completed.
2597 * In "classic RCU", these two guarantees happen to be one and
2598 * the same, but can differ in realtime RCU implementations.
2599 */
2600void synchronize_sched(void)
2601{
2602 rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map) &&
2603 !lock_is_held(&rcu_lock_map) &&
2604 !lock_is_held(&rcu_sched_lock_map),
2605 "Illegal synchronize_sched() in RCU-sched read-side critical section");
2606 if (rcu_blocking_is_gp())
2607 return;
2608 if (rcu_expedited)
2609 synchronize_sched_expedited();
2610 else
2611 wait_rcu_gp(call_rcu_sched);
2612}
2613EXPORT_SYMBOL_GPL(synchronize_sched);
2614
2615/**
2616 * synchronize_rcu_bh - wait until an rcu_bh grace period has elapsed.
2617 *
2618 * Control will return to the caller some time after a full rcu_bh grace
2619 * period has elapsed, in other words after all currently executing rcu_bh
2620 * read-side critical sections have completed. RCU read-side critical
2621 * sections are delimited by rcu_read_lock_bh() and rcu_read_unlock_bh(),
2622 * and may be nested.
2623 *
2624 * See the description of synchronize_sched() for more detailed information
2625 * on memory ordering guarantees.
2626 */
2627void synchronize_rcu_bh(void)
2628{
2629 rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map) &&
2630 !lock_is_held(&rcu_lock_map) &&
2631 !lock_is_held(&rcu_sched_lock_map),
2632 "Illegal synchronize_rcu_bh() in RCU-bh read-side critical section");
2633 if (rcu_blocking_is_gp())
2634 return;
2635 if (rcu_expedited)
2636 synchronize_rcu_bh_expedited();
2637 else
2638 wait_rcu_gp(call_rcu_bh);
2639}
2640EXPORT_SYMBOL_GPL(synchronize_rcu_bh);
2641
2642/**
2643 * get_state_synchronize_rcu - Snapshot current RCU state
2644 *
2645 * Returns a cookie that is used by a later call to cond_synchronize_rcu()
2646 * to determine whether or not a full grace period has elapsed in the
2647 * meantime.
2648 */
2649unsigned long get_state_synchronize_rcu(void)
2650{
2651 /*
2652 * Any prior manipulation of RCU-protected data must happen
2653 * before the load from ->gpnum.
2654 */
2655 smp_mb(); /* ^^^ */
2656
2657 /*
2658 * Make sure this load happens before the purportedly
2659 * time-consuming work between get_state_synchronize_rcu()
2660 * and cond_synchronize_rcu().
2661 */
2662 return smp_load_acquire(&rcu_state->gpnum);
2663}
2664EXPORT_SYMBOL_GPL(get_state_synchronize_rcu);
2665
2666/**
2667 * cond_synchronize_rcu - Conditionally wait for an RCU grace period
2668 *
2669 * @oldstate: return value from earlier call to get_state_synchronize_rcu()
2670 *
2671 * If a full RCU grace period has elapsed since the earlier call to
2672 * get_state_synchronize_rcu(), just return. Otherwise, invoke
2673 * synchronize_rcu() to wait for a full grace period.
2674 *
2675 * Yes, this function does not take counter wrap into account. But
2676 * counter wrap is harmless. If the counter wraps, we have waited for
2677 * more than 2 billion grace periods (and way more on a 64-bit system!),
2678 * so waiting for one additional grace period should be just fine.
2679 */
2680void cond_synchronize_rcu(unsigned long oldstate)
2681{
2682 unsigned long newstate;
2683
2684 /*
2685 * Ensure that this load happens before any RCU-destructive
2686 * actions the caller might carry out after we return.
2687 */
2688 newstate = smp_load_acquire(&rcu_state->completed);
2689 if (ULONG_CMP_GE(oldstate, newstate))
2690 synchronize_rcu();
2691}
2692EXPORT_SYMBOL_GPL(cond_synchronize_rcu);
2693
2694static int synchronize_sched_expedited_cpu_stop(void *data)
2695{
2696 /*
2697 * There must be a full memory barrier on each affected CPU
2698 * between the time that try_stop_cpus() is called and the
2699 * time that it returns.
2700 *
2701 * In the current initial implementation of cpu_stop, the
2702 * above condition is already met when the control reaches
2703 * this point and the following smp_mb() is not strictly
2704 * necessary. Do smp_mb() anyway for documentation and
2705 * robustness against future implementation changes.
2706 */
2707 smp_mb(); /* See above comment block. */
2708 return 0;
2709}
2710
2711/**
2712 * synchronize_sched_expedited - Brute-force RCU-sched grace period
2713 *
2714 * Wait for an RCU-sched grace period to elapse, but use a "big hammer"
2715 * approach to force the grace period to end quickly. This consumes
2716 * significant time on all CPUs and is unfriendly to real-time workloads,
2717 * so is thus not recommended for any sort of common-case code. In fact,
2718 * if you are using synchronize_sched_expedited() in a loop, please
2719 * restructure your code to batch your updates, and then use a single
2720 * synchronize_sched() instead.
2721 *
2722 * Note that it is illegal to call this function while holding any lock
2723 * that is acquired by a CPU-hotplug notifier. And yes, it is also illegal
2724 * to call this function from a CPU-hotplug notifier. Failing to observe
2725 * these restriction will result in deadlock.
2726 *
2727 * This implementation can be thought of as an application of ticket
2728 * locking to RCU, with sync_sched_expedited_started and
2729 * sync_sched_expedited_done taking on the roles of the halves
2730 * of the ticket-lock word. Each task atomically increments
2731 * sync_sched_expedited_started upon entry, snapshotting the old value,
2732 * then attempts to stop all the CPUs. If this succeeds, then each
2733 * CPU will have executed a context switch, resulting in an RCU-sched
2734 * grace period. We are then done, so we use atomic_cmpxchg() to
2735 * update sync_sched_expedited_done to match our snapshot -- but
2736 * only if someone else has not already advanced past our snapshot.
2737 *
2738 * On the other hand, if try_stop_cpus() fails, we check the value
2739 * of sync_sched_expedited_done. If it has advanced past our
2740 * initial snapshot, then someone else must have forced a grace period
2741 * some time after we took our snapshot. In this case, our work is
2742 * done for us, and we can simply return. Otherwise, we try again,
2743 * but keep our initial snapshot for purposes of checking for someone
2744 * doing our work for us.
2745 *
2746 * If we fail too many times in a row, we fall back to synchronize_sched().
2747 */
2748void synchronize_sched_expedited(void)
2749{
2750 long firstsnap, s, snap;
2751 int trycount = 0;
2752 struct rcu_state *rsp = &rcu_sched_state;
2753
2754 /*
2755 * If we are in danger of counter wrap, just do synchronize_sched().
2756 * By allowing sync_sched_expedited_started to advance no more than
2757 * ULONG_MAX/8 ahead of sync_sched_expedited_done, we are ensuring
2758 * that more than 3.5 billion CPUs would be required to force a
2759 * counter wrap on a 32-bit system. Quite a few more CPUs would of
2760 * course be required on a 64-bit system.
2761 */
2762 if (ULONG_CMP_GE((ulong)atomic_long_read(&rsp->expedited_start),
2763 (ulong)atomic_long_read(&rsp->expedited_done) +
2764 ULONG_MAX / 8)) {
2765 synchronize_sched();
2766 atomic_long_inc(&rsp->expedited_wrap);
2767 return;
2768 }
2769
2770 /*
2771 * Take a ticket. Note that atomic_inc_return() implies a
2772 * full memory barrier.
2773 */
2774 snap = atomic_long_inc_return(&rsp->expedited_start);
2775 firstsnap = snap;
2776 get_online_cpus();
2777 WARN_ON_ONCE(cpu_is_offline(raw_smp_processor_id()));
2778
2779 /*
2780 * Each pass through the following loop attempts to force a
2781 * context switch on each CPU.
2782 */
2783 while (try_stop_cpus(cpu_online_mask,
2784 synchronize_sched_expedited_cpu_stop,
2785 NULL) == -EAGAIN) {
2786 put_online_cpus();
2787 atomic_long_inc(&rsp->expedited_tryfail);
2788
2789 /* Check to see if someone else did our work for us. */
2790 s = atomic_long_read(&rsp->expedited_done);
2791 if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
2792 /* ensure test happens before caller kfree */
2793 smp_mb__before_atomic_inc(); /* ^^^ */
2794 atomic_long_inc(&rsp->expedited_workdone1);
2795 return;
2796 }
2797
2798 /* No joy, try again later. Or just synchronize_sched(). */
2799 if (trycount++ < 10) {
2800 udelay(trycount * num_online_cpus());
2801 } else {
2802 wait_rcu_gp(call_rcu_sched);
2803 atomic_long_inc(&rsp->expedited_normal);
2804 return;
2805 }
2806
2807 /* Recheck to see if someone else did our work for us. */
2808 s = atomic_long_read(&rsp->expedited_done);
2809 if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
2810 /* ensure test happens before caller kfree */
2811 smp_mb__before_atomic_inc(); /* ^^^ */
2812 atomic_long_inc(&rsp->expedited_workdone2);
2813 return;
2814 }
2815
2816 /*
2817 * Refetching sync_sched_expedited_started allows later
2818 * callers to piggyback on our grace period. We retry
2819 * after they started, so our grace period works for them,
2820 * and they started after our first try, so their grace
2821 * period works for us.
2822 */
2823 get_online_cpus();
2824 snap = atomic_long_read(&rsp->expedited_start);
2825 smp_mb(); /* ensure read is before try_stop_cpus(). */
2826 }
2827 atomic_long_inc(&rsp->expedited_stoppedcpus);
2828
2829 /*
2830 * Everyone up to our most recent fetch is covered by our grace
2831 * period. Update the counter, but only if our work is still
2832 * relevant -- which it won't be if someone who started later
2833 * than we did already did their update.
2834 */
2835 do {
2836 atomic_long_inc(&rsp->expedited_done_tries);
2837 s = atomic_long_read(&rsp->expedited_done);
2838 if (ULONG_CMP_GE((ulong)s, (ulong)snap)) {
2839 /* ensure test happens before caller kfree */
2840 smp_mb__before_atomic_inc(); /* ^^^ */
2841 atomic_long_inc(&rsp->expedited_done_lost);
2842 break;
2843 }
2844 } while (atomic_long_cmpxchg(&rsp->expedited_done, s, snap) != s);
2845 atomic_long_inc(&rsp->expedited_done_exit);
2846
2847 put_online_cpus();
2848}
2849EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
2850
2851/*
2852 * Check to see if there is any immediate RCU-related work to be done
2853 * by the current CPU, for the specified type of RCU, returning 1 if so.
2854 * The checks are in order of increasing expense: checks that can be
2855 * carried out against CPU-local state are performed first. However,
2856 * we must check for CPU stalls first, else we might not get a chance.
2857 */
2858static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp)
2859{
2860 struct rcu_node *rnp = rdp->mynode;
2861
2862 rdp->n_rcu_pending++;
2863
2864 /* Check for CPU stalls, if enabled. */
2865 check_cpu_stall(rsp, rdp);
2866
2867 /* Is this CPU a NO_HZ_FULL CPU that should ignore RCU? */
2868 if (rcu_nohz_full_cpu(rsp))
2869 return 0;
2870
2871 /* Is the RCU core waiting for a quiescent state from this CPU? */
2872 if (rcu_scheduler_fully_active &&
2873 rdp->qs_pending && !rdp->passed_quiesce) {
2874 rdp->n_rp_qs_pending++;
2875 } else if (rdp->qs_pending && rdp->passed_quiesce) {
2876 rdp->n_rp_report_qs++;
2877 return 1;
2878 }
2879
2880 /* Does this CPU have callbacks ready to invoke? */
2881 if (cpu_has_callbacks_ready_to_invoke(rdp)) {
2882 rdp->n_rp_cb_ready++;
2883 return 1;
2884 }
2885
2886 /* Has RCU gone idle with this CPU needing another grace period? */
2887 if (cpu_needs_another_gp(rsp, rdp)) {
2888 rdp->n_rp_cpu_needs_gp++;
2889 return 1;
2890 }
2891
2892 /* Has another RCU grace period completed? */
2893 if (ACCESS_ONCE(rnp->completed) != rdp->completed) { /* outside lock */
2894 rdp->n_rp_gp_completed++;
2895 return 1;
2896 }
2897
2898 /* Has a new RCU grace period started? */
2899 if (ACCESS_ONCE(rnp->gpnum) != rdp->gpnum) { /* outside lock */
2900 rdp->n_rp_gp_started++;
2901 return 1;
2902 }
2903
2904 /* Does this CPU need a deferred NOCB wakeup? */
2905 if (rcu_nocb_need_deferred_wakeup(rdp)) {
2906 rdp->n_rp_nocb_defer_wakeup++;
2907 return 1;
2908 }
2909
2910 /* nothing to do */
2911 rdp->n_rp_need_nothing++;
2912 return 0;
2913}
2914
2915/*
2916 * Check to see if there is any immediate RCU-related work to be done
2917 * by the current CPU, returning 1 if so. This function is part of the
2918 * RCU implementation; it is -not- an exported member of the RCU API.
2919 */
2920static int rcu_pending(int cpu)
2921{
2922 struct rcu_state *rsp;
2923
2924 for_each_rcu_flavor(rsp)
2925 if (__rcu_pending(rsp, per_cpu_ptr(rsp->rda, cpu)))
2926 return 1;
2927 return 0;
2928}
2929
2930/*
2931 * Return true if the specified CPU has any callback. If all_lazy is
2932 * non-NULL, store an indication of whether all callbacks are lazy.
2933 * (If there are no callbacks, all of them are deemed to be lazy.)
2934 */
2935static int __maybe_unused rcu_cpu_has_callbacks(int cpu, bool *all_lazy)
2936{
2937 bool al = true;
2938 bool hc = false;
2939 struct rcu_data *rdp;
2940 struct rcu_state *rsp;
2941
2942 for_each_rcu_flavor(rsp) {
2943 rdp = per_cpu_ptr(rsp->rda, cpu);
2944 if (!rdp->nxtlist)
2945 continue;
2946 hc = true;
2947 if (rdp->qlen != rdp->qlen_lazy || !all_lazy) {
2948 al = false;
2949 break;
2950 }
2951 }
2952 if (all_lazy)
2953 *all_lazy = al;
2954 return hc;
2955}
2956
2957/*
2958 * Helper function for _rcu_barrier() tracing. If tracing is disabled,
2959 * the compiler is expected to optimize this away.
2960 */
2961static void _rcu_barrier_trace(struct rcu_state *rsp, const char *s,
2962 int cpu, unsigned long done)
2963{
2964 trace_rcu_barrier(rsp->name, s, cpu,
2965 atomic_read(&rsp->barrier_cpu_count), done);
2966}
2967
2968/*
2969 * RCU callback function for _rcu_barrier(). If we are last, wake
2970 * up the task executing _rcu_barrier().
2971 */
2972static void rcu_barrier_callback(struct rcu_head *rhp)
2973{
2974 struct rcu_data *rdp = container_of(rhp, struct rcu_data, barrier_head);
2975 struct rcu_state *rsp = rdp->rsp;
2976
2977 if (atomic_dec_and_test(&rsp->barrier_cpu_count)) {
2978 _rcu_barrier_trace(rsp, "LastCB", -1, rsp->n_barrier_done);
2979 complete(&rsp->barrier_completion);
2980 } else {
2981 _rcu_barrier_trace(rsp, "CB", -1, rsp->n_barrier_done);
2982 }
2983}
2984
2985/*
2986 * Called with preemption disabled, and from cross-cpu IRQ context.
2987 */
2988static void rcu_barrier_func(void *type)
2989{
2990 struct rcu_state *rsp = type;
2991 struct rcu_data *rdp = __this_cpu_ptr(rsp->rda);
2992
2993 _rcu_barrier_trace(rsp, "IRQ", -1, rsp->n_barrier_done);
2994 atomic_inc(&rsp->barrier_cpu_count);
2995 rsp->call(&rdp->barrier_head, rcu_barrier_callback);
2996}
2997
2998/*
2999 * Orchestrate the specified type of RCU barrier, waiting for all
3000 * RCU callbacks of the specified type to complete.
3001 */
3002static void _rcu_barrier(struct rcu_state *rsp)
3003{
3004 int cpu;
3005 struct rcu_data *rdp;
3006 unsigned long snap = ACCESS_ONCE(rsp->n_barrier_done);
3007 unsigned long snap_done;
3008
3009 _rcu_barrier_trace(rsp, "Begin", -1, snap);
3010
3011 /* Take mutex to serialize concurrent rcu_barrier() requests. */
3012 mutex_lock(&rsp->barrier_mutex);
3013
3014 /*
3015 * Ensure that all prior references, including to ->n_barrier_done,
3016 * are ordered before the _rcu_barrier() machinery.
3017 */
3018 smp_mb(); /* See above block comment. */
3019
3020 /*
3021 * Recheck ->n_barrier_done to see if others did our work for us.
3022 * This means checking ->n_barrier_done for an even-to-odd-to-even
3023 * transition. The "if" expression below therefore rounds the old
3024 * value up to the next even number and adds two before comparing.
3025 */
3026 snap_done = rsp->n_barrier_done;
3027 _rcu_barrier_trace(rsp, "Check", -1, snap_done);
3028
3029 /*
3030 * If the value in snap is odd, we needed to wait for the current
3031 * rcu_barrier() to complete, then wait for the next one, in other
3032 * words, we need the value of snap_done to be three larger than
3033 * the value of snap. On the other hand, if the value in snap is
3034 * even, we only had to wait for the next rcu_barrier() to complete,
3035 * in other words, we need the value of snap_done to be only two
3036 * greater than the value of snap. The "(snap + 3) & ~0x1" computes
3037 * this for us (thank you, Linus!).
3038 */
3039 if (ULONG_CMP_GE(snap_done, (snap + 3) & ~0x1)) {
3040 _rcu_barrier_trace(rsp, "EarlyExit", -1, snap_done);
3041 smp_mb(); /* caller's subsequent code after above check. */
3042 mutex_unlock(&rsp->barrier_mutex);
3043 return;
3044 }
3045
3046 /*
3047 * Increment ->n_barrier_done to avoid duplicate work. Use
3048 * ACCESS_ONCE() to prevent the compiler from speculating
3049 * the increment to precede the early-exit check.
3050 */
3051 ACCESS_ONCE(rsp->n_barrier_done)++;
3052 WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 1);
3053 _rcu_barrier_trace(rsp, "Inc1", -1, rsp->n_barrier_done);
3054 smp_mb(); /* Order ->n_barrier_done increment with below mechanism. */
3055
3056 /*
3057 * Initialize the count to one rather than to zero in order to
3058 * avoid a too-soon return to zero in case of a short grace period
3059 * (or preemption of this task). Exclude CPU-hotplug operations
3060 * to ensure that no offline CPU has callbacks queued.
3061 */
3062 init_completion(&rsp->barrier_completion);
3063 atomic_set(&rsp->barrier_cpu_count, 1);
3064 get_online_cpus();
3065
3066 /*
3067 * Force each CPU with callbacks to register a new callback.
3068 * When that callback is invoked, we will know that all of the
3069 * corresponding CPU's preceding callbacks have been invoked.
3070 */
3071 for_each_possible_cpu(cpu) {
3072 if (!cpu_online(cpu) && !rcu_is_nocb_cpu(cpu))
3073 continue;
3074 rdp = per_cpu_ptr(rsp->rda, cpu);
3075 if (rcu_is_nocb_cpu(cpu)) {
3076 _rcu_barrier_trace(rsp, "OnlineNoCB", cpu,
3077 rsp->n_barrier_done);
3078 atomic_inc(&rsp->barrier_cpu_count);
3079 __call_rcu(&rdp->barrier_head, rcu_barrier_callback,
3080 rsp, cpu, 0);
3081 } else if (ACCESS_ONCE(rdp->qlen)) {
3082 _rcu_barrier_trace(rsp, "OnlineQ", cpu,
3083 rsp->n_barrier_done);
3084 smp_call_function_single(cpu, rcu_barrier_func, rsp, 1);
3085 } else {
3086 _rcu_barrier_trace(rsp, "OnlineNQ", cpu,
3087 rsp->n_barrier_done);
3088 }
3089 }
3090 put_online_cpus();
3091
3092 /*
3093 * Now that we have an rcu_barrier_callback() callback on each
3094 * CPU, and thus each counted, remove the initial count.
3095 */
3096 if (atomic_dec_and_test(&rsp->barrier_cpu_count))
3097 complete(&rsp->barrier_completion);
3098
3099 /* Increment ->n_barrier_done to prevent duplicate work. */
3100 smp_mb(); /* Keep increment after above mechanism. */
3101 ACCESS_ONCE(rsp->n_barrier_done)++;
3102 WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 0);
3103 _rcu_barrier_trace(rsp, "Inc2", -1, rsp->n_barrier_done);
3104 smp_mb(); /* Keep increment before caller's subsequent code. */
3105
3106 /* Wait for all rcu_barrier_callback() callbacks to be invoked. */
3107 wait_for_completion(&rsp->barrier_completion);
3108
3109 /* Other rcu_barrier() invocations can now safely proceed. */
3110 mutex_unlock(&rsp->barrier_mutex);
3111}
3112
3113/**
3114 * rcu_barrier_bh - Wait until all in-flight call_rcu_bh() callbacks complete.
3115 */
3116void rcu_barrier_bh(void)
3117{
3118 _rcu_barrier(&rcu_bh_state);
3119}
3120EXPORT_SYMBOL_GPL(rcu_barrier_bh);
3121
3122/**
3123 * rcu_barrier_sched - Wait for in-flight call_rcu_sched() callbacks.
3124 */
3125void rcu_barrier_sched(void)
3126{
3127 _rcu_barrier(&rcu_sched_state);
3128}
3129EXPORT_SYMBOL_GPL(rcu_barrier_sched);
3130
3131/*
3132 * Do boot-time initialization of a CPU's per-CPU RCU data.
3133 */
3134static void __init
3135rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
3136{
3137 unsigned long flags;
3138 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
3139 struct rcu_node *rnp = rcu_get_root(rsp);
3140
3141 /* Set up local state, ensuring consistent view of global state. */
3142 raw_spin_lock_irqsave(&rnp->lock, flags);
3143 rdp->grpmask = 1UL << (cpu - rdp->mynode->grplo);
3144 init_callback_list(rdp);
3145 rdp->qlen_lazy = 0;
3146 ACCESS_ONCE(rdp->qlen) = 0;
3147 rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
3148 WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_EXIT_IDLE);
3149 WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1);
3150 rdp->cpu = cpu;
3151 rdp->rsp = rsp;
3152 rcu_boot_init_nocb_percpu_data(rdp);
3153 raw_spin_unlock_irqrestore(&rnp->lock, flags);
3154}
3155
3156/*
3157 * Initialize a CPU's per-CPU RCU data. Note that only one online or
3158 * offline event can be happening at a given time. Note also that we
3159 * can accept some slop in the rsp->completed access due to the fact
3160 * that this CPU cannot possibly have any RCU callbacks in flight yet.
3161 */
3162static void
3163rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptible)
3164{
3165 unsigned long flags;
3166 unsigned long mask;
3167 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
3168 struct rcu_node *rnp = rcu_get_root(rsp);
3169
3170 /* Exclude new grace periods. */
3171 mutex_lock(&rsp->onoff_mutex);
3172
3173 /* Set up local state, ensuring consistent view of global state. */
3174 raw_spin_lock_irqsave(&rnp->lock, flags);
3175 rdp->beenonline = 1; /* We have now been online. */
3176 rdp->preemptible = preemptible;
3177 rdp->qlen_last_fqs_check = 0;
3178 rdp->n_force_qs_snap = rsp->n_force_qs;
3179 rdp->blimit = blimit;
3180 init_callback_list(rdp); /* Re-enable callbacks on this CPU. */
3181 rdp->dynticks->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
3182 rcu_sysidle_init_percpu_data(rdp->dynticks);
3183 atomic_set(&rdp->dynticks->dynticks,
3184 (atomic_read(&rdp->dynticks->dynticks) & ~0x1) + 1);
3185 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
3186
3187 /* Add CPU to rcu_node bitmasks. */
3188 rnp = rdp->mynode;
3189 mask = rdp->grpmask;
3190 do {
3191 /* Exclude any attempts to start a new GP on small systems. */
3192 raw_spin_lock(&rnp->lock); /* irqs already disabled. */
3193 rnp->qsmaskinit |= mask;
3194 mask = rnp->grpmask;
3195 if (rnp == rdp->mynode) {
3196 /*
3197 * If there is a grace period in progress, we will
3198 * set up to wait for it next time we run the
3199 * RCU core code.
3200 */
3201 rdp->gpnum = rnp->completed;
3202 rdp->completed = rnp->completed;
3203 rdp->passed_quiesce = 0;
3204 rdp->qs_pending = 0;
3205 trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpuonl"));
3206 }
3207 raw_spin_unlock(&rnp->lock); /* irqs already disabled. */
3208 rnp = rnp->parent;
3209 } while (rnp != NULL && !(rnp->qsmaskinit & mask));
3210 local_irq_restore(flags);
3211
3212 mutex_unlock(&rsp->onoff_mutex);
3213}
3214
3215static void rcu_prepare_cpu(int cpu)
3216{
3217 struct rcu_state *rsp;
3218
3219 for_each_rcu_flavor(rsp)
3220 rcu_init_percpu_data(cpu, rsp,
3221 strcmp(rsp->name, "rcu_preempt") == 0);
3222}
3223
3224/*
3225 * Handle CPU online/offline notification events.
3226 */
3227static int rcu_cpu_notify(struct notifier_block *self,
3228 unsigned long action, void *hcpu)
3229{
3230 long cpu = (long)hcpu;
3231 struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, cpu);
3232 struct rcu_node *rnp = rdp->mynode;
3233 struct rcu_state *rsp;
3234
3235 trace_rcu_utilization(TPS("Start CPU hotplug"));
3236 switch (action) {
3237 case CPU_UP_PREPARE:
3238 case CPU_UP_PREPARE_FROZEN:
3239 rcu_prepare_cpu(cpu);
3240 rcu_prepare_kthreads(cpu);
3241 break;
3242 case CPU_ONLINE:
3243 case CPU_DOWN_FAILED:
3244 rcu_boost_kthread_setaffinity(rnp, -1);
3245 break;
3246 case CPU_DOWN_PREPARE:
3247 rcu_boost_kthread_setaffinity(rnp, cpu);
3248 break;
3249 case CPU_DYING:
3250 case CPU_DYING_FROZEN:
3251 for_each_rcu_flavor(rsp)
3252 rcu_cleanup_dying_cpu(rsp);
3253 break;
3254 case CPU_DEAD:
3255 case CPU_DEAD_FROZEN:
3256 case CPU_UP_CANCELED:
3257 case CPU_UP_CANCELED_FROZEN:
3258 for_each_rcu_flavor(rsp)
3259 rcu_cleanup_dead_cpu(cpu, rsp);
3260 break;
3261 default:
3262 break;
3263 }
3264 trace_rcu_utilization(TPS("End CPU hotplug"));
3265 return NOTIFY_OK;
3266}
3267
3268static int rcu_pm_notify(struct notifier_block *self,
3269 unsigned long action, void *hcpu)
3270{
3271 switch (action) {
3272 case PM_HIBERNATION_PREPARE:
3273 case PM_SUSPEND_PREPARE:
3274 if (nr_cpu_ids <= 256) /* Expediting bad for large systems. */
3275 rcu_expedited = 1;
3276 break;
3277 case PM_POST_HIBERNATION:
3278 case PM_POST_SUSPEND:
3279 rcu_expedited = 0;
3280 break;
3281 default:
3282 break;
3283 }
3284 return NOTIFY_OK;
3285}
3286
3287/*
3288 * Spawn the kthread that handles this RCU flavor's grace periods.
3289 */
3290static int __init rcu_spawn_gp_kthread(void)
3291{
3292 unsigned long flags;
3293 struct rcu_node *rnp;
3294 struct rcu_state *rsp;
3295 struct task_struct *t;
3296
3297 for_each_rcu_flavor(rsp) {
3298 t = kthread_run(rcu_gp_kthread, rsp, "%s", rsp->name);
3299 BUG_ON(IS_ERR(t));
3300 rnp = rcu_get_root(rsp);
3301 raw_spin_lock_irqsave(&rnp->lock, flags);
3302 rsp->gp_kthread = t;
3303 raw_spin_unlock_irqrestore(&rnp->lock, flags);
3304 rcu_spawn_nocb_kthreads(rsp);
3305 }
3306 return 0;
3307}
3308early_initcall(rcu_spawn_gp_kthread);
3309
3310/*
3311 * This function is invoked towards the end of the scheduler's initialization
3312 * process. Before this is called, the idle task might contain
3313 * RCU read-side critical sections (during which time, this idle
3314 * task is booting the system). After this function is called, the
3315 * idle tasks are prohibited from containing RCU read-side critical
3316 * sections. This function also enables RCU lockdep checking.
3317 */
3318void rcu_scheduler_starting(void)
3319{
3320 WARN_ON(num_online_cpus() != 1);
3321 WARN_ON(nr_context_switches() > 0);
3322 rcu_scheduler_active = 1;
3323}
3324
3325/*
3326 * Compute the per-level fanout, either using the exact fanout specified
3327 * or balancing the tree, depending on CONFIG_RCU_FANOUT_EXACT.
3328 */
3329#ifdef CONFIG_RCU_FANOUT_EXACT
3330static void __init rcu_init_levelspread(struct rcu_state *rsp)
3331{
3332 int i;
3333
3334 rsp->levelspread[rcu_num_lvls - 1] = rcu_fanout_leaf;
3335 for (i = rcu_num_lvls - 2; i >= 0; i--)
3336 rsp->levelspread[i] = CONFIG_RCU_FANOUT;
3337}
3338#else /* #ifdef CONFIG_RCU_FANOUT_EXACT */
3339static void __init rcu_init_levelspread(struct rcu_state *rsp)
3340{
3341 int ccur;
3342 int cprv;
3343 int i;
3344
3345 cprv = nr_cpu_ids;
3346 for (i = rcu_num_lvls - 1; i >= 0; i--) {
3347 ccur = rsp->levelcnt[i];
3348 rsp->levelspread[i] = (cprv + ccur - 1) / ccur;
3349 cprv = ccur;
3350 }
3351}
3352#endif /* #else #ifdef CONFIG_RCU_FANOUT_EXACT */
3353
3354/*
3355 * Helper function for rcu_init() that initializes one rcu_state structure.
3356 */
3357static void __init rcu_init_one(struct rcu_state *rsp,
3358 struct rcu_data __percpu *rda)
3359{
3360 static char *buf[] = { "rcu_node_0",
3361 "rcu_node_1",
3362 "rcu_node_2",
3363 "rcu_node_3" }; /* Match MAX_RCU_LVLS */
3364 static char *fqs[] = { "rcu_node_fqs_0",
3365 "rcu_node_fqs_1",
3366 "rcu_node_fqs_2",
3367 "rcu_node_fqs_3" }; /* Match MAX_RCU_LVLS */
3368 int cpustride = 1;
3369 int i;
3370 int j;
3371 struct rcu_node *rnp;
3372
3373 BUILD_BUG_ON(MAX_RCU_LVLS > ARRAY_SIZE(buf)); /* Fix buf[] init! */
3374
3375 /* Silence gcc 4.8 warning about array index out of range. */
3376 if (rcu_num_lvls > RCU_NUM_LVLS)
3377 panic("rcu_init_one: rcu_num_lvls overflow");
3378
3379 /* Initialize the level-tracking arrays. */
3380
3381 for (i = 0; i < rcu_num_lvls; i++)
3382 rsp->levelcnt[i] = num_rcu_lvl[i];
3383 for (i = 1; i < rcu_num_lvls; i++)
3384 rsp->level[i] = rsp->level[i - 1] + rsp->levelcnt[i - 1];
3385 rcu_init_levelspread(rsp);
3386
3387 /* Initialize the elements themselves, starting from the leaves. */
3388
3389 for (i = rcu_num_lvls - 1; i >= 0; i--) {
3390 cpustride *= rsp->levelspread[i];
3391 rnp = rsp->level[i];
3392 for (j = 0; j < rsp->levelcnt[i]; j++, rnp++) {
3393 raw_spin_lock_init(&rnp->lock);
3394 lockdep_set_class_and_name(&rnp->lock,
3395 &rcu_node_class[i], buf[i]);
3396 raw_spin_lock_init(&rnp->fqslock);
3397 lockdep_set_class_and_name(&rnp->fqslock,
3398 &rcu_fqs_class[i], fqs[i]);
3399 rnp->gpnum = rsp->gpnum;
3400 rnp->completed = rsp->completed;
3401 rnp->qsmask = 0;
3402 rnp->qsmaskinit = 0;
3403 rnp->grplo = j * cpustride;
3404 rnp->grphi = (j + 1) * cpustride - 1;
3405 if (rnp->grphi >= NR_CPUS)
3406 rnp->grphi = NR_CPUS - 1;
3407 if (i == 0) {
3408 rnp->grpnum = 0;
3409 rnp->grpmask = 0;
3410 rnp->parent = NULL;
3411 } else {
3412 rnp->grpnum = j % rsp->levelspread[i - 1];
3413 rnp->grpmask = 1UL << rnp->grpnum;
3414 rnp->parent = rsp->level[i - 1] +
3415 j / rsp->levelspread[i - 1];
3416 }
3417 rnp->level = i;
3418 INIT_LIST_HEAD(&rnp->blkd_tasks);
3419 rcu_init_one_nocb(rnp);
3420 }
3421 }
3422
3423 rsp->rda = rda;
3424 init_waitqueue_head(&rsp->gp_wq);
3425 init_irq_work(&rsp->wakeup_work, rsp_wakeup);
3426 rnp = rsp->level[rcu_num_lvls - 1];
3427 for_each_possible_cpu(i) {
3428 while (i > rnp->grphi)
3429 rnp++;
3430 per_cpu_ptr(rsp->rda, i)->mynode = rnp;
3431 rcu_boot_init_percpu_data(i, rsp);
3432 }
3433 list_add(&rsp->flavors, &rcu_struct_flavors);
3434}
3435
3436/*
3437 * Compute the rcu_node tree geometry from kernel parameters. This cannot
3438 * replace the definitions in tree.h because those are needed to size
3439 * the ->node array in the rcu_state structure.
3440 */
3441static void __init rcu_init_geometry(void)
3442{
3443 ulong d;
3444 int i;
3445 int j;
3446 int n = nr_cpu_ids;
3447 int rcu_capacity[MAX_RCU_LVLS + 1];
3448
3449 /*
3450 * Initialize any unspecified boot parameters.
3451 * The default values of jiffies_till_first_fqs and
3452 * jiffies_till_next_fqs are set to the RCU_JIFFIES_TILL_FORCE_QS
3453 * value, which is a function of HZ, then adding one for each
3454 * RCU_JIFFIES_FQS_DIV CPUs that might be on the system.
3455 */
3456 d = RCU_JIFFIES_TILL_FORCE_QS + nr_cpu_ids / RCU_JIFFIES_FQS_DIV;
3457 if (jiffies_till_first_fqs == ULONG_MAX)
3458 jiffies_till_first_fqs = d;
3459 if (jiffies_till_next_fqs == ULONG_MAX)
3460 jiffies_till_next_fqs = d;
3461
3462 /* If the compile-time values are accurate, just leave. */
3463 if (rcu_fanout_leaf == CONFIG_RCU_FANOUT_LEAF &&
3464 nr_cpu_ids == NR_CPUS)
3465 return;
3466 pr_info("RCU: Adjusting geometry for rcu_fanout_leaf=%d, nr_cpu_ids=%d\n",
3467 rcu_fanout_leaf, nr_cpu_ids);
3468
3469 /*
3470 * Compute number of nodes that can be handled an rcu_node tree
3471 * with the given number of levels. Setting rcu_capacity[0] makes
3472 * some of the arithmetic easier.
3473 */
3474 rcu_capacity[0] = 1;
3475 rcu_capacity[1] = rcu_fanout_leaf;
3476 for (i = 2; i <= MAX_RCU_LVLS; i++)
3477 rcu_capacity[i] = rcu_capacity[i - 1] * CONFIG_RCU_FANOUT;
3478
3479 /*
3480 * The boot-time rcu_fanout_leaf parameter is only permitted
3481 * to increase the leaf-level fanout, not decrease it. Of course,
3482 * the leaf-level fanout cannot exceed the number of bits in
3483 * the rcu_node masks. Finally, the tree must be able to accommodate
3484 * the configured number of CPUs. Complain and fall back to the
3485 * compile-time values if these limits are exceeded.
3486 */
3487 if (rcu_fanout_leaf < CONFIG_RCU_FANOUT_LEAF ||
3488 rcu_fanout_leaf > sizeof(unsigned long) * 8 ||
3489 n > rcu_capacity[MAX_RCU_LVLS]) {
3490 WARN_ON(1);
3491 return;
3492 }
3493
3494 /* Calculate the number of rcu_nodes at each level of the tree. */
3495 for (i = 1; i <= MAX_RCU_LVLS; i++)
3496 if (n <= rcu_capacity[i]) {
3497 for (j = 0; j <= i; j++)
3498 num_rcu_lvl[j] =
3499 DIV_ROUND_UP(n, rcu_capacity[i - j]);
3500 rcu_num_lvls = i;
3501 for (j = i + 1; j <= MAX_RCU_LVLS; j++)
3502 num_rcu_lvl[j] = 0;
3503 break;
3504 }
3505
3506 /* Calculate the total number of rcu_node structures. */
3507 rcu_num_nodes = 0;
3508 for (i = 0; i <= MAX_RCU_LVLS; i++)
3509 rcu_num_nodes += num_rcu_lvl[i];
3510 rcu_num_nodes -= n;
3511}
3512
3513void __init rcu_init(void)
3514{
3515 int cpu;
3516
3517 rcu_bootup_announce();
3518 rcu_init_geometry();
3519 rcu_init_one(&rcu_bh_state, &rcu_bh_data);
3520 rcu_init_one(&rcu_sched_state, &rcu_sched_data);
3521 __rcu_init_preempt();
3522 open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
3523
3524 /*
3525 * We don't need protection against CPU-hotplug here because
3526 * this is called early in boot, before either interrupts
3527 * or the scheduler are operational.
3528 */
3529 cpu_notifier(rcu_cpu_notify, 0);
3530 pm_notifier(rcu_pm_notify, 0);
3531 for_each_online_cpu(cpu)
3532 rcu_cpu_notify(NULL, CPU_UP_PREPARE, (void *)(long)cpu);
3533}
3534
3535#include "tree_plugin.h"