Loading...
1/* SPDX-License-Identifier: GPL-2.0+ */
2/*
3 * Read-Copy Update mechanism for mutual exclusion (tree-based version)
4 * Internal non-public definitions.
5 *
6 * Copyright IBM Corporation, 2008
7 *
8 * Author: Ingo Molnar <mingo@elte.hu>
9 * Paul E. McKenney <paulmck@linux.ibm.com>
10 */
11
12#include <linux/cache.h>
13#include <linux/kthread.h>
14#include <linux/spinlock.h>
15#include <linux/rtmutex.h>
16#include <linux/threads.h>
17#include <linux/cpumask.h>
18#include <linux/seqlock.h>
19#include <linux/swait.h>
20#include <linux/rcu_node_tree.h>
21
22#include "rcu_segcblist.h"
23
24/* Communicate arguments to a workqueue handler. */
25struct rcu_exp_work {
26 unsigned long rew_s;
27#ifdef CONFIG_RCU_EXP_KTHREAD
28 struct kthread_work rew_work;
29#else
30 struct work_struct rew_work;
31#endif /* CONFIG_RCU_EXP_KTHREAD */
32};
33
34/* RCU's kthread states for tracing. */
35#define RCU_KTHREAD_STOPPED 0
36#define RCU_KTHREAD_RUNNING 1
37#define RCU_KTHREAD_WAITING 2
38#define RCU_KTHREAD_OFFCPU 3
39#define RCU_KTHREAD_YIELDING 4
40#define RCU_KTHREAD_MAX 4
41
42/*
43 * Definition for node within the RCU grace-period-detection hierarchy.
44 */
45struct rcu_node {
46 raw_spinlock_t __private lock; /* Root rcu_node's lock protects */
47 /* some rcu_state fields as well as */
48 /* following. */
49 unsigned long gp_seq; /* Track rsp->gp_seq. */
50 unsigned long gp_seq_needed; /* Track furthest future GP request. */
51 unsigned long completedqs; /* All QSes done for this node. */
52 unsigned long qsmask; /* CPUs or groups that need to switch in */
53 /* order for current grace period to proceed.*/
54 /* In leaf rcu_node, each bit corresponds to */
55 /* an rcu_data structure, otherwise, each */
56 /* bit corresponds to a child rcu_node */
57 /* structure. */
58 unsigned long rcu_gp_init_mask; /* Mask of offline CPUs at GP init. */
59 unsigned long qsmaskinit;
60 /* Per-GP initial value for qsmask. */
61 /* Initialized from ->qsmaskinitnext at the */
62 /* beginning of each grace period. */
63 unsigned long qsmaskinitnext;
64 unsigned long expmask; /* CPUs or groups that need to check in */
65 /* to allow the current expedited GP */
66 /* to complete. */
67 unsigned long expmaskinit;
68 /* Per-GP initial values for expmask. */
69 /* Initialized from ->expmaskinitnext at the */
70 /* beginning of each expedited GP. */
71 unsigned long expmaskinitnext;
72 /* Online CPUs for next expedited GP. */
73 /* Any CPU that has ever been online will */
74 /* have its bit set. */
75 unsigned long cbovldmask;
76 /* CPUs experiencing callback overload. */
77 unsigned long ffmask; /* Fully functional CPUs. */
78 unsigned long grpmask; /* Mask to apply to parent qsmask. */
79 /* Only one bit will be set in this mask. */
80 int grplo; /* lowest-numbered CPU here. */
81 int grphi; /* highest-numbered CPU here. */
82 u8 grpnum; /* group number for next level up. */
83 u8 level; /* root is at level 0. */
84 bool wait_blkd_tasks;/* Necessary to wait for blocked tasks to */
85 /* exit RCU read-side critical sections */
86 /* before propagating offline up the */
87 /* rcu_node tree? */
88 struct rcu_node *parent;
89 struct list_head blkd_tasks;
90 /* Tasks blocked in RCU read-side critical */
91 /* section. Tasks are placed at the head */
92 /* of this list and age towards the tail. */
93 struct list_head *gp_tasks;
94 /* Pointer to the first task blocking the */
95 /* current grace period, or NULL if there */
96 /* is no such task. */
97 struct list_head *exp_tasks;
98 /* Pointer to the first task blocking the */
99 /* current expedited grace period, or NULL */
100 /* if there is no such task. If there */
101 /* is no current expedited grace period, */
102 /* then there can cannot be any such task. */
103 struct list_head *boost_tasks;
104 /* Pointer to first task that needs to be */
105 /* priority boosted, or NULL if no priority */
106 /* boosting is needed for this rcu_node */
107 /* structure. If there are no tasks */
108 /* queued on this rcu_node structure that */
109 /* are blocking the current grace period, */
110 /* there can be no such task. */
111 struct rt_mutex boost_mtx;
112 /* Used only for the priority-boosting */
113 /* side effect, not as a lock. */
114 unsigned long boost_time;
115 /* When to start boosting (jiffies). */
116 struct mutex boost_kthread_mutex;
117 /* Exclusion for thread spawning and affinity */
118 /* manipulation. */
119 struct task_struct *boost_kthread_task;
120 /* kthread that takes care of priority */
121 /* boosting for this rcu_node structure. */
122 unsigned int boost_kthread_status;
123 /* State of boost_kthread_task for tracing. */
124 unsigned long n_boosts; /* Number of boosts for this rcu_node structure. */
125#ifdef CONFIG_RCU_NOCB_CPU
126 struct swait_queue_head nocb_gp_wq[2];
127 /* Place for rcu_nocb_kthread() to wait GP. */
128#endif /* #ifdef CONFIG_RCU_NOCB_CPU */
129 raw_spinlock_t fqslock ____cacheline_internodealigned_in_smp;
130
131 spinlock_t exp_lock ____cacheline_internodealigned_in_smp;
132 unsigned long exp_seq_rq;
133 wait_queue_head_t exp_wq[4];
134 struct rcu_exp_work rew;
135 bool exp_need_flush; /* Need to flush workitem? */
136 raw_spinlock_t exp_poll_lock;
137 /* Lock and data for polled expedited grace periods. */
138 unsigned long exp_seq_poll_rq;
139 struct work_struct exp_poll_wq;
140} ____cacheline_internodealigned_in_smp;
141
142/*
143 * Bitmasks in an rcu_node cover the interval [grplo, grphi] of CPU IDs, and
144 * are indexed relative to this interval rather than the global CPU ID space.
145 * This generates the bit for a CPU in node-local masks.
146 */
147#define leaf_node_cpu_bit(rnp, cpu) (BIT((cpu) - (rnp)->grplo))
148
149/*
150 * Union to allow "aggregate OR" operation on the need for a quiescent
151 * state by the normal and expedited grace periods.
152 */
153union rcu_noqs {
154 struct {
155 u8 norm;
156 u8 exp;
157 } b; /* Bits. */
158 u16 s; /* Set of bits, aggregate OR here. */
159};
160
161/* Per-CPU data for read-copy update. */
162struct rcu_data {
163 /* 1) quiescent-state and grace-period handling : */
164 unsigned long gp_seq; /* Track rsp->gp_seq counter. */
165 unsigned long gp_seq_needed; /* Track furthest future GP request. */
166 union rcu_noqs cpu_no_qs; /* No QSes yet for this CPU. */
167 bool core_needs_qs; /* Core waits for quiescent state. */
168 bool beenonline; /* CPU online at least once. */
169 bool gpwrap; /* Possible ->gp_seq wrap. */
170 bool cpu_started; /* RCU watching this onlining CPU. */
171 struct rcu_node *mynode; /* This CPU's leaf of hierarchy */
172 unsigned long grpmask; /* Mask to apply to leaf qsmask. */
173 unsigned long ticks_this_gp; /* The number of scheduling-clock */
174 /* ticks this CPU has handled */
175 /* during and after the last grace */
176 /* period it is aware of. */
177 struct irq_work defer_qs_iw; /* Obtain later scheduler attention. */
178 bool defer_qs_iw_pending; /* Scheduler attention pending? */
179 struct work_struct strict_work; /* Schedule readers for strict GPs. */
180
181 /* 2) batch handling */
182 struct rcu_segcblist cblist; /* Segmented callback list, with */
183 /* different callbacks waiting for */
184 /* different grace periods. */
185 long qlen_last_fqs_check;
186 /* qlen at last check for QS forcing */
187 unsigned long n_cbs_invoked; /* # callbacks invoked since boot. */
188 unsigned long n_force_qs_snap;
189 /* did other CPU force QS recently? */
190 long blimit; /* Upper limit on a processed batch */
191
192 /* 3) dynticks interface. */
193 int dynticks_snap; /* Per-GP tracking for dynticks. */
194 bool rcu_need_heavy_qs; /* GP old, so heavy quiescent state! */
195 bool rcu_urgent_qs; /* GP old need light quiescent state. */
196 bool rcu_forced_tick; /* Forced tick to provide QS. */
197 bool rcu_forced_tick_exp; /* ... provide QS to expedited GP. */
198
199 /* 4) rcu_barrier(), OOM callbacks, and expediting. */
200 unsigned long barrier_seq_snap; /* Snap of rcu_state.barrier_sequence. */
201 struct rcu_head barrier_head;
202 int exp_dynticks_snap; /* Double-check need for IPI. */
203
204 /* 5) Callback offloading. */
205#ifdef CONFIG_RCU_NOCB_CPU
206 struct swait_queue_head nocb_cb_wq; /* For nocb kthreads to sleep on. */
207 struct swait_queue_head nocb_state_wq; /* For offloading state changes */
208 struct task_struct *nocb_gp_kthread;
209 raw_spinlock_t nocb_lock; /* Guard following pair of fields. */
210 atomic_t nocb_lock_contended; /* Contention experienced. */
211 int nocb_defer_wakeup; /* Defer wakeup of nocb_kthread. */
212 struct timer_list nocb_timer; /* Enforce finite deferral. */
213 unsigned long nocb_gp_adv_time; /* Last call_rcu() CB adv (jiffies). */
214 struct mutex nocb_gp_kthread_mutex; /* Exclusion for nocb gp kthread */
215 /* spawning */
216
217 /* The following fields are used by call_rcu, hence own cacheline. */
218 raw_spinlock_t nocb_bypass_lock ____cacheline_internodealigned_in_smp;
219 struct rcu_cblist nocb_bypass; /* Lock-contention-bypass CB list. */
220 unsigned long nocb_bypass_first; /* Time (jiffies) of first enqueue. */
221 unsigned long nocb_nobypass_last; /* Last ->cblist enqueue (jiffies). */
222 int nocb_nobypass_count; /* # ->cblist enqueues at ^^^ time. */
223
224 /* The following fields are used by GP kthread, hence own cacheline. */
225 raw_spinlock_t nocb_gp_lock ____cacheline_internodealigned_in_smp;
226 u8 nocb_gp_sleep; /* Is the nocb GP thread asleep? */
227 u8 nocb_gp_bypass; /* Found a bypass on last scan? */
228 u8 nocb_gp_gp; /* GP to wait for on last scan? */
229 unsigned long nocb_gp_seq; /* If so, ->gp_seq to wait for. */
230 unsigned long nocb_gp_loops; /* # passes through wait code. */
231 struct swait_queue_head nocb_gp_wq; /* For nocb kthreads to sleep on. */
232 bool nocb_cb_sleep; /* Is the nocb CB thread asleep? */
233 struct task_struct *nocb_cb_kthread;
234 struct list_head nocb_head_rdp; /*
235 * Head of rcu_data list in wakeup chain,
236 * if rdp_gp.
237 */
238 struct list_head nocb_entry_rdp; /* rcu_data node in wakeup chain. */
239 struct rcu_data *nocb_toggling_rdp; /* rdp queued for (de-)offloading */
240
241 /* The following fields are used by CB kthread, hence new cacheline. */
242 struct rcu_data *nocb_gp_rdp ____cacheline_internodealigned_in_smp;
243 /* GP rdp takes GP-end wakeups. */
244#endif /* #ifdef CONFIG_RCU_NOCB_CPU */
245
246 /* 6) RCU priority boosting. */
247 struct task_struct *rcu_cpu_kthread_task;
248 /* rcuc per-CPU kthread or NULL. */
249 unsigned int rcu_cpu_kthread_status;
250 char rcu_cpu_has_work;
251 unsigned long rcuc_activity;
252
253 /* 7) Diagnostic data, including RCU CPU stall warnings. */
254 unsigned int softirq_snap; /* Snapshot of softirq activity. */
255 /* ->rcu_iw* fields protected by leaf rcu_node ->lock. */
256 struct irq_work rcu_iw; /* Check for non-irq activity. */
257 bool rcu_iw_pending; /* Is ->rcu_iw pending? */
258 unsigned long rcu_iw_gp_seq; /* ->gp_seq associated with ->rcu_iw. */
259 unsigned long rcu_ofl_gp_seq; /* ->gp_seq at last offline. */
260 short rcu_ofl_gp_flags; /* ->gp_flags at last offline. */
261 unsigned long rcu_onl_gp_seq; /* ->gp_seq at last online. */
262 short rcu_onl_gp_flags; /* ->gp_flags at last online. */
263 unsigned long last_fqs_resched; /* Time of last rcu_resched(). */
264 unsigned long last_sched_clock; /* Jiffies of last rcu_sched_clock_irq(). */
265
266 long lazy_len; /* Length of buffered lazy callbacks. */
267 int cpu;
268};
269
270/* Values for nocb_defer_wakeup field in struct rcu_data. */
271#define RCU_NOCB_WAKE_NOT 0
272#define RCU_NOCB_WAKE_BYPASS 1
273#define RCU_NOCB_WAKE_LAZY 2
274#define RCU_NOCB_WAKE 3
275#define RCU_NOCB_WAKE_FORCE 4
276
277#define RCU_JIFFIES_TILL_FORCE_QS (1 + (HZ > 250) + (HZ > 500))
278 /* For jiffies_till_first_fqs and */
279 /* and jiffies_till_next_fqs. */
280
281#define RCU_JIFFIES_FQS_DIV 256 /* Very large systems need more */
282 /* delay between bouts of */
283 /* quiescent-state forcing. */
284
285#define RCU_STALL_RAT_DELAY 2 /* Allow other CPUs time to take */
286 /* at least one scheduling clock */
287 /* irq before ratting on them. */
288
289#define rcu_wait(cond) \
290do { \
291 for (;;) { \
292 set_current_state(TASK_INTERRUPTIBLE); \
293 if (cond) \
294 break; \
295 schedule(); \
296 } \
297 __set_current_state(TASK_RUNNING); \
298} while (0)
299
300/*
301 * RCU global state, including node hierarchy. This hierarchy is
302 * represented in "heap" form in a dense array. The root (first level)
303 * of the hierarchy is in ->node[0] (referenced by ->level[0]), the second
304 * level in ->node[1] through ->node[m] (->node[1] referenced by ->level[1]),
305 * and the third level in ->node[m+1] and following (->node[m+1] referenced
306 * by ->level[2]). The number of levels is determined by the number of
307 * CPUs and by CONFIG_RCU_FANOUT. Small systems will have a "hierarchy"
308 * consisting of a single rcu_node.
309 */
310struct rcu_state {
311 struct rcu_node node[NUM_RCU_NODES]; /* Hierarchy. */
312 struct rcu_node *level[RCU_NUM_LVLS + 1];
313 /* Hierarchy levels (+1 to */
314 /* shut bogus gcc warning) */
315 int ncpus; /* # CPUs seen so far. */
316 int n_online_cpus; /* # CPUs online for RCU. */
317
318 /* The following fields are guarded by the root rcu_node's lock. */
319
320 unsigned long gp_seq ____cacheline_internodealigned_in_smp;
321 /* Grace-period sequence #. */
322 unsigned long gp_max; /* Maximum GP duration in */
323 /* jiffies. */
324 struct task_struct *gp_kthread; /* Task for grace periods. */
325 struct swait_queue_head gp_wq; /* Where GP task waits. */
326 short gp_flags; /* Commands for GP task. */
327 short gp_state; /* GP kthread sleep state. */
328 unsigned long gp_wake_time; /* Last GP kthread wake. */
329 unsigned long gp_wake_seq; /* ->gp_seq at ^^^. */
330 unsigned long gp_seq_polled; /* GP seq for polled API. */
331 unsigned long gp_seq_polled_snap; /* ->gp_seq_polled at normal GP start. */
332 unsigned long gp_seq_polled_exp_snap; /* ->gp_seq_polled at expedited GP start. */
333
334 /* End of fields guarded by root rcu_node's lock. */
335
336 struct mutex barrier_mutex; /* Guards barrier fields. */
337 atomic_t barrier_cpu_count; /* # CPUs waiting on. */
338 struct completion barrier_completion; /* Wake at barrier end. */
339 unsigned long barrier_sequence; /* ++ at start and end of */
340 /* rcu_barrier(). */
341 /* End of fields guarded by barrier_mutex. */
342
343 raw_spinlock_t barrier_lock; /* Protects ->barrier_seq_snap. */
344
345 struct mutex exp_mutex; /* Serialize expedited GP. */
346 struct mutex exp_wake_mutex; /* Serialize wakeup. */
347 unsigned long expedited_sequence; /* Take a ticket. */
348 atomic_t expedited_need_qs; /* # CPUs left to check in. */
349 struct swait_queue_head expedited_wq; /* Wait for check-ins. */
350 int ncpus_snap; /* # CPUs seen last time. */
351 u8 cbovld; /* Callback overload now? */
352 u8 cbovldnext; /* ^ ^ next time? */
353
354 unsigned long jiffies_force_qs; /* Time at which to invoke */
355 /* force_quiescent_state(). */
356 unsigned long jiffies_kick_kthreads; /* Time at which to kick */
357 /* kthreads, if configured. */
358 unsigned long n_force_qs; /* Number of calls to */
359 /* force_quiescent_state(). */
360 unsigned long gp_start; /* Time at which GP started, */
361 /* but in jiffies. */
362 unsigned long gp_end; /* Time last GP ended, again */
363 /* in jiffies. */
364 unsigned long gp_activity; /* Time of last GP kthread */
365 /* activity in jiffies. */
366 unsigned long gp_req_activity; /* Time of last GP request */
367 /* in jiffies. */
368 unsigned long jiffies_stall; /* Time at which to check */
369 /* for CPU stalls. */
370 unsigned long jiffies_resched; /* Time at which to resched */
371 /* a reluctant CPU. */
372 unsigned long n_force_qs_gpstart; /* Snapshot of n_force_qs at */
373 /* GP start. */
374 const char *name; /* Name of structure. */
375 char abbr; /* Abbreviated name. */
376
377 arch_spinlock_t ofl_lock ____cacheline_internodealigned_in_smp;
378 /* Synchronize offline with */
379 /* GP pre-initialization. */
380 int nocb_is_setup; /* nocb is setup from boot */
381};
382
383/* Values for rcu_state structure's gp_flags field. */
384#define RCU_GP_FLAG_INIT 0x1 /* Need grace-period initialization. */
385#define RCU_GP_FLAG_FQS 0x2 /* Need grace-period quiescent-state forcing. */
386#define RCU_GP_FLAG_OVLD 0x4 /* Experiencing callback overload. */
387
388/* Values for rcu_state structure's gp_state field. */
389#define RCU_GP_IDLE 0 /* Initial state and no GP in progress. */
390#define RCU_GP_WAIT_GPS 1 /* Wait for grace-period start. */
391#define RCU_GP_DONE_GPS 2 /* Wait done for grace-period start. */
392#define RCU_GP_ONOFF 3 /* Grace-period initialization hotplug. */
393#define RCU_GP_INIT 4 /* Grace-period initialization. */
394#define RCU_GP_WAIT_FQS 5 /* Wait for force-quiescent-state time. */
395#define RCU_GP_DOING_FQS 6 /* Wait done for force-quiescent-state time. */
396#define RCU_GP_CLEANUP 7 /* Grace-period cleanup started. */
397#define RCU_GP_CLEANED 8 /* Grace-period cleanup complete. */
398
399/*
400 * In order to export the rcu_state name to the tracing tools, it
401 * needs to be added in the __tracepoint_string section.
402 * This requires defining a separate variable tp_<sname>_varname
403 * that points to the string being used, and this will allow
404 * the tracing userspace tools to be able to decipher the string
405 * address to the matching string.
406 */
407#ifdef CONFIG_PREEMPT_RCU
408#define RCU_ABBR 'p'
409#define RCU_NAME_RAW "rcu_preempt"
410#else /* #ifdef CONFIG_PREEMPT_RCU */
411#define RCU_ABBR 's'
412#define RCU_NAME_RAW "rcu_sched"
413#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
414#ifndef CONFIG_TRACING
415#define RCU_NAME RCU_NAME_RAW
416#else /* #ifdef CONFIG_TRACING */
417static char rcu_name[] = RCU_NAME_RAW;
418static const char *tp_rcu_varname __used __tracepoint_string = rcu_name;
419#define RCU_NAME rcu_name
420#endif /* #else #ifdef CONFIG_TRACING */
421
422/* Forward declarations for tree_plugin.h */
423static void rcu_bootup_announce(void);
424static void rcu_qs(void);
425static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp);
426#ifdef CONFIG_HOTPLUG_CPU
427static bool rcu_preempt_has_tasks(struct rcu_node *rnp);
428#endif /* #ifdef CONFIG_HOTPLUG_CPU */
429static int rcu_print_task_exp_stall(struct rcu_node *rnp);
430static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp);
431static void rcu_flavor_sched_clock_irq(int user);
432static void dump_blkd_tasks(struct rcu_node *rnp, int ncheck);
433static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags);
434static void rcu_preempt_boost_start_gp(struct rcu_node *rnp);
435static bool rcu_is_callbacks_kthread(struct rcu_data *rdp);
436static void rcu_cpu_kthread_setup(unsigned int cpu);
437static void rcu_spawn_one_boost_kthread(struct rcu_node *rnp);
438static bool rcu_preempt_has_tasks(struct rcu_node *rnp);
439static bool rcu_preempt_need_deferred_qs(struct task_struct *t);
440static void zero_cpu_stall_ticks(struct rcu_data *rdp);
441static struct swait_queue_head *rcu_nocb_gp_get(struct rcu_node *rnp);
442static void rcu_nocb_gp_cleanup(struct swait_queue_head *sq);
443static void rcu_init_one_nocb(struct rcu_node *rnp);
444static bool wake_nocb_gp(struct rcu_data *rdp, bool force);
445static bool rcu_nocb_flush_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
446 unsigned long j, bool lazy);
447static bool rcu_nocb_try_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
448 bool *was_alldone, unsigned long flags,
449 bool lazy);
450static void __call_rcu_nocb_wake(struct rcu_data *rdp, bool was_empty,
451 unsigned long flags);
452static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp, int level);
453static bool do_nocb_deferred_wakeup(struct rcu_data *rdp);
454static void rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp);
455static void rcu_spawn_cpu_nocb_kthread(int cpu);
456static void show_rcu_nocb_state(struct rcu_data *rdp);
457static void rcu_nocb_lock(struct rcu_data *rdp);
458static void rcu_nocb_unlock(struct rcu_data *rdp);
459static void rcu_nocb_unlock_irqrestore(struct rcu_data *rdp,
460 unsigned long flags);
461static void rcu_lockdep_assert_cblist_protected(struct rcu_data *rdp);
462#ifdef CONFIG_RCU_NOCB_CPU
463static void __init rcu_organize_nocb_kthreads(void);
464
465/*
466 * Disable IRQs before checking offloaded state so that local
467 * locking is safe against concurrent de-offloading.
468 */
469#define rcu_nocb_lock_irqsave(rdp, flags) \
470do { \
471 local_irq_save(flags); \
472 if (rcu_segcblist_is_offloaded(&(rdp)->cblist)) \
473 raw_spin_lock(&(rdp)->nocb_lock); \
474} while (0)
475#else /* #ifdef CONFIG_RCU_NOCB_CPU */
476#define rcu_nocb_lock_irqsave(rdp, flags) local_irq_save(flags)
477#endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */
478
479static void rcu_bind_gp_kthread(void);
480static bool rcu_nohz_full_cpu(void);
481
482/* Forward declarations for tree_stall.h */
483static void record_gp_stall_check_time(void);
484static void rcu_iw_handler(struct irq_work *iwp);
485static void check_cpu_stall(struct rcu_data *rdp);
486static void rcu_check_gp_start_stall(struct rcu_node *rnp, struct rcu_data *rdp,
487 const unsigned long gpssdelay);
488
489/* Forward declarations for tree_exp.h. */
490static void sync_rcu_do_polled_gp(struct work_struct *wp);
1/* SPDX-License-Identifier: GPL-2.0+ */
2/*
3 * Read-Copy Update mechanism for mutual exclusion (tree-based version)
4 * Internal non-public definitions.
5 *
6 * Copyright IBM Corporation, 2008
7 *
8 * Author: Ingo Molnar <mingo@elte.hu>
9 * Paul E. McKenney <paulmck@linux.ibm.com>
10 */
11
12#include <linux/cache.h>
13#include <linux/kthread.h>
14#include <linux/spinlock.h>
15#include <linux/rtmutex.h>
16#include <linux/threads.h>
17#include <linux/cpumask.h>
18#include <linux/seqlock.h>
19#include <linux/swait.h>
20#include <linux/rcu_node_tree.h>
21
22#include "rcu_segcblist.h"
23
24/* Communicate arguments to a kthread worker handler. */
25struct rcu_exp_work {
26 unsigned long rew_s;
27 struct kthread_work rew_work;
28};
29
30/* RCU's kthread states for tracing. */
31#define RCU_KTHREAD_STOPPED 0
32#define RCU_KTHREAD_RUNNING 1
33#define RCU_KTHREAD_WAITING 2
34#define RCU_KTHREAD_OFFCPU 3
35#define RCU_KTHREAD_YIELDING 4
36#define RCU_KTHREAD_MAX 4
37
38/*
39 * Definition for node within the RCU grace-period-detection hierarchy.
40 */
41struct rcu_node {
42 raw_spinlock_t __private lock; /* Root rcu_node's lock protects */
43 /* some rcu_state fields as well as */
44 /* following. */
45 unsigned long gp_seq; /* Track rsp->gp_seq. */
46 unsigned long gp_seq_needed; /* Track furthest future GP request. */
47 unsigned long completedqs; /* All QSes done for this node. */
48 unsigned long qsmask; /* CPUs or groups that need to switch in */
49 /* order for current grace period to proceed.*/
50 /* In leaf rcu_node, each bit corresponds to */
51 /* an rcu_data structure, otherwise, each */
52 /* bit corresponds to a child rcu_node */
53 /* structure. */
54 unsigned long rcu_gp_init_mask; /* Mask of offline CPUs at GP init. */
55 unsigned long qsmaskinit;
56 /* Per-GP initial value for qsmask. */
57 /* Initialized from ->qsmaskinitnext at the */
58 /* beginning of each grace period. */
59 unsigned long qsmaskinitnext;
60 unsigned long expmask; /* CPUs or groups that need to check in */
61 /* to allow the current expedited GP */
62 /* to complete. */
63 unsigned long expmaskinit;
64 /* Per-GP initial values for expmask. */
65 /* Initialized from ->expmaskinitnext at the */
66 /* beginning of each expedited GP. */
67 unsigned long expmaskinitnext;
68 /* Online CPUs for next expedited GP. */
69 /* Any CPU that has ever been online will */
70 /* have its bit set. */
71 struct kthread_worker *exp_kworker;
72 /* Workers performing per node expedited GP */
73 /* initialization. */
74 unsigned long cbovldmask;
75 /* CPUs experiencing callback overload. */
76 unsigned long ffmask; /* Fully functional CPUs. */
77 unsigned long grpmask; /* Mask to apply to parent qsmask. */
78 /* Only one bit will be set in this mask. */
79 int grplo; /* lowest-numbered CPU here. */
80 int grphi; /* highest-numbered CPU here. */
81 u8 grpnum; /* group number for next level up. */
82 u8 level; /* root is at level 0. */
83 bool wait_blkd_tasks;/* Necessary to wait for blocked tasks to */
84 /* exit RCU read-side critical sections */
85 /* before propagating offline up the */
86 /* rcu_node tree? */
87 struct rcu_node *parent;
88 struct list_head blkd_tasks;
89 /* Tasks blocked in RCU read-side critical */
90 /* section. Tasks are placed at the head */
91 /* of this list and age towards the tail. */
92 struct list_head *gp_tasks;
93 /* Pointer to the first task blocking the */
94 /* current grace period, or NULL if there */
95 /* is no such task. */
96 struct list_head *exp_tasks;
97 /* Pointer to the first task blocking the */
98 /* current expedited grace period, or NULL */
99 /* if there is no such task. If there */
100 /* is no current expedited grace period, */
101 /* then there can cannot be any such task. */
102 struct list_head *boost_tasks;
103 /* Pointer to first task that needs to be */
104 /* priority boosted, or NULL if no priority */
105 /* boosting is needed for this rcu_node */
106 /* structure. If there are no tasks */
107 /* queued on this rcu_node structure that */
108 /* are blocking the current grace period, */
109 /* there can be no such task. */
110 struct rt_mutex boost_mtx;
111 /* Used only for the priority-boosting */
112 /* side effect, not as a lock. */
113 unsigned long boost_time;
114 /* When to start boosting (jiffies). */
115 struct mutex kthread_mutex;
116 /* Exclusion for thread spawning and affinity */
117 /* manipulation. */
118 struct task_struct *boost_kthread_task;
119 /* kthread that takes care of priority */
120 /* boosting for this rcu_node structure. */
121 unsigned int boost_kthread_status;
122 /* State of boost_kthread_task for tracing. */
123 unsigned long n_boosts; /* Number of boosts for this rcu_node structure. */
124#ifdef CONFIG_RCU_NOCB_CPU
125 struct swait_queue_head nocb_gp_wq[2];
126 /* Place for rcu_nocb_kthread() to wait GP. */
127#endif /* #ifdef CONFIG_RCU_NOCB_CPU */
128 raw_spinlock_t fqslock ____cacheline_internodealigned_in_smp;
129
130 spinlock_t exp_lock ____cacheline_internodealigned_in_smp;
131 unsigned long exp_seq_rq;
132 wait_queue_head_t exp_wq[4];
133 struct rcu_exp_work rew;
134 bool exp_need_flush; /* Need to flush workitem? */
135 raw_spinlock_t exp_poll_lock;
136 /* Lock and data for polled expedited grace periods. */
137 unsigned long exp_seq_poll_rq;
138 struct work_struct exp_poll_wq;
139} ____cacheline_internodealigned_in_smp;
140
141/*
142 * Bitmasks in an rcu_node cover the interval [grplo, grphi] of CPU IDs, and
143 * are indexed relative to this interval rather than the global CPU ID space.
144 * This generates the bit for a CPU in node-local masks.
145 */
146#define leaf_node_cpu_bit(rnp, cpu) (BIT((cpu) - (rnp)->grplo))
147
148/*
149 * Union to allow "aggregate OR" operation on the need for a quiescent
150 * state by the normal and expedited grace periods.
151 */
152union rcu_noqs {
153 struct {
154 u8 norm;
155 u8 exp;
156 } b; /* Bits. */
157 u16 s; /* Set of bits, aggregate OR here. */
158};
159
160/*
161 * Record the snapshot of the core stats at half of the first RCU stall timeout.
162 * The member gp_seq is used to ensure that all members are updated only once
163 * during the sampling period. The snapshot is taken only if this gp_seq is not
164 * equal to rdp->gp_seq.
165 */
166struct rcu_snap_record {
167 unsigned long gp_seq; /* Track rdp->gp_seq counter */
168 u64 cputime_irq; /* Accumulated cputime of hard irqs */
169 u64 cputime_softirq;/* Accumulated cputime of soft irqs */
170 u64 cputime_system; /* Accumulated cputime of kernel tasks */
171 unsigned long nr_hardirqs; /* Accumulated number of hard irqs */
172 unsigned int nr_softirqs; /* Accumulated number of soft irqs */
173 unsigned long long nr_csw; /* Accumulated number of task switches */
174 unsigned long jiffies; /* Track jiffies value */
175};
176
177/* Per-CPU data for read-copy update. */
178struct rcu_data {
179 /* 1) quiescent-state and grace-period handling : */
180 unsigned long gp_seq; /* Track rsp->gp_seq counter. */
181 unsigned long gp_seq_needed; /* Track furthest future GP request. */
182 union rcu_noqs cpu_no_qs; /* No QSes yet for this CPU. */
183 bool core_needs_qs; /* Core waits for quiescent state. */
184 bool beenonline; /* CPU online at least once. */
185 bool gpwrap; /* Possible ->gp_seq wrap. */
186 bool cpu_started; /* RCU watching this onlining CPU. */
187 struct rcu_node *mynode; /* This CPU's leaf of hierarchy */
188 unsigned long grpmask; /* Mask to apply to leaf qsmask. */
189 unsigned long ticks_this_gp; /* The number of scheduling-clock */
190 /* ticks this CPU has handled */
191 /* during and after the last grace */
192 /* period it is aware of. */
193 struct irq_work defer_qs_iw; /* Obtain later scheduler attention. */
194 bool defer_qs_iw_pending; /* Scheduler attention pending? */
195 struct work_struct strict_work; /* Schedule readers for strict GPs. */
196
197 /* 2) batch handling */
198 struct rcu_segcblist cblist; /* Segmented callback list, with */
199 /* different callbacks waiting for */
200 /* different grace periods. */
201 long qlen_last_fqs_check;
202 /* qlen at last check for QS forcing */
203 unsigned long n_cbs_invoked; /* # callbacks invoked since boot. */
204 unsigned long n_force_qs_snap;
205 /* did other CPU force QS recently? */
206 long blimit; /* Upper limit on a processed batch */
207
208 /* 3) dynticks interface. */
209 int watching_snap; /* Per-GP tracking for dynticks. */
210 bool rcu_need_heavy_qs; /* GP old, so heavy quiescent state! */
211 bool rcu_urgent_qs; /* GP old need light quiescent state. */
212 bool rcu_forced_tick; /* Forced tick to provide QS. */
213 bool rcu_forced_tick_exp; /* ... provide QS to expedited GP. */
214
215 /* 4) rcu_barrier(), OOM callbacks, and expediting. */
216 unsigned long barrier_seq_snap; /* Snap of rcu_state.barrier_sequence. */
217 struct rcu_head barrier_head;
218 int exp_watching_snap; /* Double-check need for IPI. */
219
220 /* 5) Callback offloading. */
221#ifdef CONFIG_RCU_NOCB_CPU
222 struct swait_queue_head nocb_cb_wq; /* For nocb kthreads to sleep on. */
223 struct swait_queue_head nocb_state_wq; /* For offloading state changes */
224 struct task_struct *nocb_gp_kthread;
225 raw_spinlock_t nocb_lock; /* Guard following pair of fields. */
226 int nocb_defer_wakeup; /* Defer wakeup of nocb_kthread. */
227 struct timer_list nocb_timer; /* Enforce finite deferral. */
228 unsigned long nocb_gp_adv_time; /* Last call_rcu() CB adv (jiffies). */
229 struct mutex nocb_gp_kthread_mutex; /* Exclusion for nocb gp kthread */
230 /* spawning */
231
232 /* The following fields are used by call_rcu, hence own cacheline. */
233 raw_spinlock_t nocb_bypass_lock ____cacheline_internodealigned_in_smp;
234 struct rcu_cblist nocb_bypass; /* Lock-contention-bypass CB list. */
235 unsigned long nocb_bypass_first; /* Time (jiffies) of first enqueue. */
236 unsigned long nocb_nobypass_last; /* Last ->cblist enqueue (jiffies). */
237 int nocb_nobypass_count; /* # ->cblist enqueues at ^^^ time. */
238
239 /* The following fields are used by GP kthread, hence own cacheline. */
240 raw_spinlock_t nocb_gp_lock ____cacheline_internodealigned_in_smp;
241 u8 nocb_gp_sleep; /* Is the nocb GP thread asleep? */
242 u8 nocb_gp_bypass; /* Found a bypass on last scan? */
243 u8 nocb_gp_gp; /* GP to wait for on last scan? */
244 unsigned long nocb_gp_seq; /* If so, ->gp_seq to wait for. */
245 unsigned long nocb_gp_loops; /* # passes through wait code. */
246 struct swait_queue_head nocb_gp_wq; /* For nocb kthreads to sleep on. */
247 bool nocb_cb_sleep; /* Is the nocb CB thread asleep? */
248 struct task_struct *nocb_cb_kthread;
249 struct list_head nocb_head_rdp; /*
250 * Head of rcu_data list in wakeup chain,
251 * if rdp_gp.
252 */
253 struct list_head nocb_entry_rdp; /* rcu_data node in wakeup chain. */
254 struct rcu_data *nocb_toggling_rdp; /* rdp queued for (de-)offloading */
255
256 /* The following fields are used by CB kthread, hence new cacheline. */
257 struct rcu_data *nocb_gp_rdp ____cacheline_internodealigned_in_smp;
258 /* GP rdp takes GP-end wakeups. */
259#endif /* #ifdef CONFIG_RCU_NOCB_CPU */
260
261 /* 6) RCU priority boosting. */
262 struct task_struct *rcu_cpu_kthread_task;
263 /* rcuc per-CPU kthread or NULL. */
264 unsigned int rcu_cpu_kthread_status;
265 char rcu_cpu_has_work;
266 unsigned long rcuc_activity;
267
268 /* 7) Diagnostic data, including RCU CPU stall warnings. */
269 unsigned int softirq_snap; /* Snapshot of softirq activity. */
270 /* ->rcu_iw* fields protected by leaf rcu_node ->lock. */
271 struct irq_work rcu_iw; /* Check for non-irq activity. */
272 bool rcu_iw_pending; /* Is ->rcu_iw pending? */
273 unsigned long rcu_iw_gp_seq; /* ->gp_seq associated with ->rcu_iw. */
274 unsigned long rcu_ofl_gp_seq; /* ->gp_seq at last offline. */
275 short rcu_ofl_gp_state; /* ->gp_state at last offline. */
276 unsigned long rcu_onl_gp_seq; /* ->gp_seq at last online. */
277 short rcu_onl_gp_state; /* ->gp_state at last online. */
278 unsigned long last_fqs_resched; /* Time of last rcu_resched(). */
279 unsigned long last_sched_clock; /* Jiffies of last rcu_sched_clock_irq(). */
280 struct rcu_snap_record snap_record; /* Snapshot of core stats at half of */
281 /* the first RCU stall timeout */
282
283 long lazy_len; /* Length of buffered lazy callbacks. */
284 int cpu;
285};
286
287/* Values for nocb_defer_wakeup field in struct rcu_data. */
288#define RCU_NOCB_WAKE_NOT 0
289#define RCU_NOCB_WAKE_BYPASS 1
290#define RCU_NOCB_WAKE_LAZY 2
291#define RCU_NOCB_WAKE 3
292#define RCU_NOCB_WAKE_FORCE 4
293
294#define RCU_JIFFIES_TILL_FORCE_QS (1 + (HZ > 250) + (HZ > 500))
295 /* For jiffies_till_first_fqs and */
296 /* and jiffies_till_next_fqs. */
297
298#define RCU_JIFFIES_FQS_DIV 256 /* Very large systems need more */
299 /* delay between bouts of */
300 /* quiescent-state forcing. */
301
302#define RCU_STALL_RAT_DELAY 2 /* Allow other CPUs time to take */
303 /* at least one scheduling clock */
304 /* irq before ratting on them. */
305
306#define rcu_wait(cond) \
307do { \
308 for (;;) { \
309 set_current_state(TASK_INTERRUPTIBLE); \
310 if (cond) \
311 break; \
312 schedule(); \
313 } \
314 __set_current_state(TASK_RUNNING); \
315} while (0)
316
317/*
318 * A max threshold for synchronize_rcu() users which are
319 * awaken directly by the rcu_gp_kthread(). Left part is
320 * deferred to the main worker.
321 */
322#define SR_MAX_USERS_WAKE_FROM_GP 5
323#define SR_NORMAL_GP_WAIT_HEAD_MAX 5
324
325struct sr_wait_node {
326 atomic_t inuse;
327 struct llist_node node;
328};
329
330/*
331 * RCU global state, including node hierarchy. This hierarchy is
332 * represented in "heap" form in a dense array. The root (first level)
333 * of the hierarchy is in ->node[0] (referenced by ->level[0]), the second
334 * level in ->node[1] through ->node[m] (->node[1] referenced by ->level[1]),
335 * and the third level in ->node[m+1] and following (->node[m+1] referenced
336 * by ->level[2]). The number of levels is determined by the number of
337 * CPUs and by CONFIG_RCU_FANOUT. Small systems will have a "hierarchy"
338 * consisting of a single rcu_node.
339 */
340struct rcu_state {
341 struct rcu_node node[NUM_RCU_NODES]; /* Hierarchy. */
342 struct rcu_node *level[RCU_NUM_LVLS + 1];
343 /* Hierarchy levels (+1 to */
344 /* shut bogus gcc warning) */
345 int ncpus; /* # CPUs seen so far. */
346 int n_online_cpus; /* # CPUs online for RCU. */
347
348 /* The following fields are guarded by the root rcu_node's lock. */
349
350 unsigned long gp_seq ____cacheline_internodealigned_in_smp;
351 /* Grace-period sequence #. */
352 unsigned long gp_max; /* Maximum GP duration in */
353 /* jiffies. */
354 struct task_struct *gp_kthread; /* Task for grace periods. */
355 struct swait_queue_head gp_wq; /* Where GP task waits. */
356 short gp_flags; /* Commands for GP task. */
357 short gp_state; /* GP kthread sleep state. */
358 unsigned long gp_wake_time; /* Last GP kthread wake. */
359 unsigned long gp_wake_seq; /* ->gp_seq at ^^^. */
360 unsigned long gp_seq_polled; /* GP seq for polled API. */
361 unsigned long gp_seq_polled_snap; /* ->gp_seq_polled at normal GP start. */
362 unsigned long gp_seq_polled_exp_snap; /* ->gp_seq_polled at expedited GP start. */
363
364 /* End of fields guarded by root rcu_node's lock. */
365
366 struct mutex barrier_mutex; /* Guards barrier fields. */
367 atomic_t barrier_cpu_count; /* # CPUs waiting on. */
368 struct completion barrier_completion; /* Wake at barrier end. */
369 unsigned long barrier_sequence; /* ++ at start and end of */
370 /* rcu_barrier(). */
371 /* End of fields guarded by barrier_mutex. */
372
373 raw_spinlock_t barrier_lock; /* Protects ->barrier_seq_snap. */
374
375 struct mutex exp_mutex; /* Serialize expedited GP. */
376 struct mutex exp_wake_mutex; /* Serialize wakeup. */
377 unsigned long expedited_sequence; /* Take a ticket. */
378 atomic_t expedited_need_qs; /* # CPUs left to check in. */
379 struct swait_queue_head expedited_wq; /* Wait for check-ins. */
380 int ncpus_snap; /* # CPUs seen last time. */
381 u8 cbovld; /* Callback overload now? */
382 u8 cbovldnext; /* ^ ^ next time? */
383
384 unsigned long jiffies_force_qs; /* Time at which to invoke */
385 /* force_quiescent_state(). */
386 unsigned long jiffies_kick_kthreads; /* Time at which to kick */
387 /* kthreads, if configured. */
388 unsigned long n_force_qs; /* Number of calls to */
389 /* force_quiescent_state(). */
390 unsigned long gp_start; /* Time at which GP started, */
391 /* but in jiffies. */
392 unsigned long gp_end; /* Time last GP ended, again */
393 /* in jiffies. */
394 unsigned long gp_activity; /* Time of last GP kthread */
395 /* activity in jiffies. */
396 unsigned long gp_req_activity; /* Time of last GP request */
397 /* in jiffies. */
398 unsigned long jiffies_stall; /* Time at which to check */
399 /* for CPU stalls. */
400 int nr_fqs_jiffies_stall; /* Number of fqs loops after
401 * which read jiffies and set
402 * jiffies_stall. Stall
403 * warnings disabled if !0. */
404 unsigned long jiffies_resched; /* Time at which to resched */
405 /* a reluctant CPU. */
406 unsigned long n_force_qs_gpstart; /* Snapshot of n_force_qs at */
407 /* GP start. */
408 const char *name; /* Name of structure. */
409 char abbr; /* Abbreviated name. */
410
411 arch_spinlock_t ofl_lock ____cacheline_internodealigned_in_smp;
412 /* Synchronize offline with */
413 /* GP pre-initialization. */
414
415 /* synchronize_rcu() part. */
416 struct llist_head srs_next; /* request a GP users. */
417 struct llist_node *srs_wait_tail; /* wait for GP users. */
418 struct llist_node *srs_done_tail; /* ready for GP users. */
419 struct sr_wait_node srs_wait_nodes[SR_NORMAL_GP_WAIT_HEAD_MAX];
420 struct work_struct srs_cleanup_work;
421 atomic_t srs_cleanups_pending; /* srs inflight worker cleanups. */
422
423#ifdef CONFIG_RCU_NOCB_CPU
424 struct mutex nocb_mutex; /* Guards (de-)offloading */
425 int nocb_is_setup; /* nocb is setup from boot */
426#endif
427};
428
429/* Values for rcu_state structure's gp_flags field. */
430#define RCU_GP_FLAG_INIT 0x1 /* Need grace-period initialization. */
431#define RCU_GP_FLAG_FQS 0x2 /* Need grace-period quiescent-state forcing. */
432#define RCU_GP_FLAG_OVLD 0x4 /* Experiencing callback overload. */
433
434/* Values for rcu_state structure's gp_state field. */
435#define RCU_GP_IDLE 0 /* Initial state and no GP in progress. */
436#define RCU_GP_WAIT_GPS 1 /* Wait for grace-period start. */
437#define RCU_GP_DONE_GPS 2 /* Wait done for grace-period start. */
438#define RCU_GP_ONOFF 3 /* Grace-period initialization hotplug. */
439#define RCU_GP_INIT 4 /* Grace-period initialization. */
440#define RCU_GP_WAIT_FQS 5 /* Wait for force-quiescent-state time. */
441#define RCU_GP_DOING_FQS 6 /* Wait done for force-quiescent-state time. */
442#define RCU_GP_CLEANUP 7 /* Grace-period cleanup started. */
443#define RCU_GP_CLEANED 8 /* Grace-period cleanup complete. */
444
445/*
446 * In order to export the rcu_state name to the tracing tools, it
447 * needs to be added in the __tracepoint_string section.
448 * This requires defining a separate variable tp_<sname>_varname
449 * that points to the string being used, and this will allow
450 * the tracing userspace tools to be able to decipher the string
451 * address to the matching string.
452 */
453#ifdef CONFIG_PREEMPT_RCU
454#define RCU_ABBR 'p'
455#define RCU_NAME_RAW "rcu_preempt"
456#else /* #ifdef CONFIG_PREEMPT_RCU */
457#define RCU_ABBR 's'
458#define RCU_NAME_RAW "rcu_sched"
459#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
460#ifndef CONFIG_TRACING
461#define RCU_NAME RCU_NAME_RAW
462#else /* #ifdef CONFIG_TRACING */
463static char rcu_name[] = RCU_NAME_RAW;
464static const char *tp_rcu_varname __used __tracepoint_string = rcu_name;
465#define RCU_NAME rcu_name
466#endif /* #else #ifdef CONFIG_TRACING */
467
468/* Forward declarations for tree_plugin.h */
469static void rcu_bootup_announce(void);
470static void rcu_qs(void);
471static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp);
472#ifdef CONFIG_HOTPLUG_CPU
473static bool rcu_preempt_has_tasks(struct rcu_node *rnp);
474#endif /* #ifdef CONFIG_HOTPLUG_CPU */
475static int rcu_print_task_exp_stall(struct rcu_node *rnp);
476static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp);
477static void rcu_flavor_sched_clock_irq(int user);
478static void dump_blkd_tasks(struct rcu_node *rnp, int ncheck);
479static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags);
480static void rcu_preempt_boost_start_gp(struct rcu_node *rnp);
481static bool rcu_is_callbacks_kthread(struct rcu_data *rdp);
482static void rcu_cpu_kthread_setup(unsigned int cpu);
483static void rcu_spawn_one_boost_kthread(struct rcu_node *rnp);
484static bool rcu_preempt_has_tasks(struct rcu_node *rnp);
485static bool rcu_preempt_need_deferred_qs(struct task_struct *t);
486static void zero_cpu_stall_ticks(struct rcu_data *rdp);
487static struct swait_queue_head *rcu_nocb_gp_get(struct rcu_node *rnp);
488static void rcu_nocb_gp_cleanup(struct swait_queue_head *sq);
489static void rcu_init_one_nocb(struct rcu_node *rnp);
490static bool wake_nocb_gp(struct rcu_data *rdp, bool force);
491static bool rcu_nocb_flush_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
492 unsigned long j, bool lazy);
493static void call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *head,
494 rcu_callback_t func, unsigned long flags, bool lazy);
495static void __maybe_unused __call_rcu_nocb_wake(struct rcu_data *rdp, bool was_empty,
496 unsigned long flags);
497static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp, int level);
498static bool do_nocb_deferred_wakeup(struct rcu_data *rdp);
499static void rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp);
500static void rcu_spawn_cpu_nocb_kthread(int cpu);
501static void show_rcu_nocb_state(struct rcu_data *rdp);
502static void rcu_nocb_lock(struct rcu_data *rdp);
503static void rcu_nocb_unlock(struct rcu_data *rdp);
504static void rcu_nocb_unlock_irqrestore(struct rcu_data *rdp,
505 unsigned long flags);
506static void rcu_lockdep_assert_cblist_protected(struct rcu_data *rdp);
507#ifdef CONFIG_RCU_NOCB_CPU
508static void __init rcu_organize_nocb_kthreads(void);
509
510/*
511 * Disable IRQs before checking offloaded state so that local
512 * locking is safe against concurrent de-offloading.
513 */
514#define rcu_nocb_lock_irqsave(rdp, flags) \
515do { \
516 local_irq_save(flags); \
517 if (rcu_segcblist_is_offloaded(&(rdp)->cblist)) \
518 raw_spin_lock(&(rdp)->nocb_lock); \
519} while (0)
520#else /* #ifdef CONFIG_RCU_NOCB_CPU */
521#define rcu_nocb_lock_irqsave(rdp, flags) local_irq_save(flags)
522#endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */
523
524static void rcu_bind_gp_kthread(void);
525static bool rcu_nohz_full_cpu(void);
526
527/* Forward declarations for tree_stall.h */
528static void record_gp_stall_check_time(void);
529static void rcu_iw_handler(struct irq_work *iwp);
530static void check_cpu_stall(struct rcu_data *rdp);
531static void rcu_check_gp_start_stall(struct rcu_node *rnp, struct rcu_data *rdp,
532 const unsigned long gpssdelay);
533
534/* Forward declarations for tree_exp.h. */
535static void sync_rcu_do_polled_gp(struct work_struct *wp);