Loading...
1/* SPDX-License-Identifier: GPL-2.0+ */
2/*
3 * Read-Copy Update mechanism for mutual exclusion (tree-based version)
4 * Internal non-public definitions.
5 *
6 * Copyright IBM Corporation, 2008
7 *
8 * Author: Ingo Molnar <mingo@elte.hu>
9 * Paul E. McKenney <paulmck@linux.ibm.com>
10 */
11
12#include <linux/cache.h>
13#include <linux/spinlock.h>
14#include <linux/rtmutex.h>
15#include <linux/threads.h>
16#include <linux/cpumask.h>
17#include <linux/seqlock.h>
18#include <linux/swait.h>
19#include <linux/stop_machine.h>
20#include <linux/rcu_node_tree.h>
21
22#include "rcu_segcblist.h"
23
24/* Communicate arguments to a workqueue handler. */
25struct rcu_exp_work {
26 unsigned long rew_s;
27 struct work_struct rew_work;
28};
29
30/* RCU's kthread states for tracing. */
31#define RCU_KTHREAD_STOPPED 0
32#define RCU_KTHREAD_RUNNING 1
33#define RCU_KTHREAD_WAITING 2
34#define RCU_KTHREAD_OFFCPU 3
35#define RCU_KTHREAD_YIELDING 4
36#define RCU_KTHREAD_MAX 4
37
38/*
39 * Definition for node within the RCU grace-period-detection hierarchy.
40 */
41struct rcu_node {
42 raw_spinlock_t __private lock; /* Root rcu_node's lock protects */
43 /* some rcu_state fields as well as */
44 /* following. */
45 unsigned long gp_seq; /* Track rsp->rcu_gp_seq. */
46 unsigned long gp_seq_needed; /* Track furthest future GP request. */
47 unsigned long completedqs; /* All QSes done for this node. */
48 unsigned long qsmask; /* CPUs or groups that need to switch in */
49 /* order for current grace period to proceed.*/
50 /* In leaf rcu_node, each bit corresponds to */
51 /* an rcu_data structure, otherwise, each */
52 /* bit corresponds to a child rcu_node */
53 /* structure. */
54 unsigned long rcu_gp_init_mask; /* Mask of offline CPUs at GP init. */
55 unsigned long qsmaskinit;
56 /* Per-GP initial value for qsmask. */
57 /* Initialized from ->qsmaskinitnext at the */
58 /* beginning of each grace period. */
59 unsigned long qsmaskinitnext;
60 /* Online CPUs for next grace period. */
61 unsigned long expmask; /* CPUs or groups that need to check in */
62 /* to allow the current expedited GP */
63 /* to complete. */
64 unsigned long expmaskinit;
65 /* Per-GP initial values for expmask. */
66 /* Initialized from ->expmaskinitnext at the */
67 /* beginning of each expedited GP. */
68 unsigned long expmaskinitnext;
69 /* Online CPUs for next expedited GP. */
70 /* Any CPU that has ever been online will */
71 /* have its bit set. */
72 unsigned long ffmask; /* Fully functional CPUs. */
73 unsigned long grpmask; /* Mask to apply to parent qsmask. */
74 /* Only one bit will be set in this mask. */
75 int grplo; /* lowest-numbered CPU or group here. */
76 int grphi; /* highest-numbered CPU or group here. */
77 u8 grpnum; /* CPU/group number for next level up. */
78 u8 level; /* root is at level 0. */
79 bool wait_blkd_tasks;/* Necessary to wait for blocked tasks to */
80 /* exit RCU read-side critical sections */
81 /* before propagating offline up the */
82 /* rcu_node tree? */
83 struct rcu_node *parent;
84 struct list_head blkd_tasks;
85 /* Tasks blocked in RCU read-side critical */
86 /* section. Tasks are placed at the head */
87 /* of this list and age towards the tail. */
88 struct list_head *gp_tasks;
89 /* Pointer to the first task blocking the */
90 /* current grace period, or NULL if there */
91 /* is no such task. */
92 struct list_head *exp_tasks;
93 /* Pointer to the first task blocking the */
94 /* current expedited grace period, or NULL */
95 /* if there is no such task. If there */
96 /* is no current expedited grace period, */
97 /* then there can cannot be any such task. */
98 struct list_head *boost_tasks;
99 /* Pointer to first task that needs to be */
100 /* priority boosted, or NULL if no priority */
101 /* boosting is needed for this rcu_node */
102 /* structure. If there are no tasks */
103 /* queued on this rcu_node structure that */
104 /* are blocking the current grace period, */
105 /* there can be no such task. */
106 struct rt_mutex boost_mtx;
107 /* Used only for the priority-boosting */
108 /* side effect, not as a lock. */
109 unsigned long boost_time;
110 /* When to start boosting (jiffies). */
111 struct task_struct *boost_kthread_task;
112 /* kthread that takes care of priority */
113 /* boosting for this rcu_node structure. */
114 unsigned int boost_kthread_status;
115 /* State of boost_kthread_task for tracing. */
116#ifdef CONFIG_RCU_NOCB_CPU
117 struct swait_queue_head nocb_gp_wq[2];
118 /* Place for rcu_nocb_kthread() to wait GP. */
119#endif /* #ifdef CONFIG_RCU_NOCB_CPU */
120 raw_spinlock_t fqslock ____cacheline_internodealigned_in_smp;
121
122 spinlock_t exp_lock ____cacheline_internodealigned_in_smp;
123 unsigned long exp_seq_rq;
124 wait_queue_head_t exp_wq[4];
125 struct rcu_exp_work rew;
126 bool exp_need_flush; /* Need to flush workitem? */
127} ____cacheline_internodealigned_in_smp;
128
129/*
130 * Bitmasks in an rcu_node cover the interval [grplo, grphi] of CPU IDs, and
131 * are indexed relative to this interval rather than the global CPU ID space.
132 * This generates the bit for a CPU in node-local masks.
133 */
134#define leaf_node_cpu_bit(rnp, cpu) (BIT((cpu) - (rnp)->grplo))
135
136/*
137 * Union to allow "aggregate OR" operation on the need for a quiescent
138 * state by the normal and expedited grace periods.
139 */
140union rcu_noqs {
141 struct {
142 u8 norm;
143 u8 exp;
144 } b; /* Bits. */
145 u16 s; /* Set of bits, aggregate OR here. */
146};
147
148/* Per-CPU data for read-copy update. */
149struct rcu_data {
150 /* 1) quiescent-state and grace-period handling : */
151 unsigned long gp_seq; /* Track rsp->rcu_gp_seq counter. */
152 unsigned long gp_seq_needed; /* Track furthest future GP request. */
153 union rcu_noqs cpu_no_qs; /* No QSes yet for this CPU. */
154 bool core_needs_qs; /* Core waits for quiesc state. */
155 bool beenonline; /* CPU online at least once. */
156 bool gpwrap; /* Possible ->gp_seq wrap. */
157 bool exp_deferred_qs; /* This CPU awaiting a deferred QS? */
158 struct rcu_node *mynode; /* This CPU's leaf of hierarchy */
159 unsigned long grpmask; /* Mask to apply to leaf qsmask. */
160 unsigned long ticks_this_gp; /* The number of scheduling-clock */
161 /* ticks this CPU has handled */
162 /* during and after the last grace */
163 /* period it is aware of. */
164 struct irq_work defer_qs_iw; /* Obtain later scheduler attention. */
165 bool defer_qs_iw_pending; /* Scheduler attention pending? */
166
167 /* 2) batch handling */
168 struct rcu_segcblist cblist; /* Segmented callback list, with */
169 /* different callbacks waiting for */
170 /* different grace periods. */
171 long qlen_last_fqs_check;
172 /* qlen at last check for QS forcing */
173 unsigned long n_force_qs_snap;
174 /* did other CPU force QS recently? */
175 long blimit; /* Upper limit on a processed batch */
176
177 /* 3) dynticks interface. */
178 int dynticks_snap; /* Per-GP tracking for dynticks. */
179 long dynticks_nesting; /* Track process nesting level. */
180 long dynticks_nmi_nesting; /* Track irq/NMI nesting level. */
181 atomic_t dynticks; /* Even value for idle, else odd. */
182 bool rcu_need_heavy_qs; /* GP old, so heavy quiescent state! */
183 bool rcu_urgent_qs; /* GP old need light quiescent state. */
184#ifdef CONFIG_RCU_FAST_NO_HZ
185 bool all_lazy; /* All CPU's CBs lazy at idle start? */
186 unsigned long last_accelerate; /* Last jiffy CBs were accelerated. */
187 unsigned long last_advance_all; /* Last jiffy CBs were all advanced. */
188 int tick_nohz_enabled_snap; /* Previously seen value from sysfs. */
189#endif /* #ifdef CONFIG_RCU_FAST_NO_HZ */
190
191 /* 4) rcu_barrier(), OOM callbacks, and expediting. */
192 struct rcu_head barrier_head;
193 int exp_dynticks_snap; /* Double-check need for IPI. */
194
195 /* 5) Callback offloading. */
196#ifdef CONFIG_RCU_NOCB_CPU
197 struct swait_queue_head nocb_cb_wq; /* For nocb kthreads to sleep on. */
198 struct task_struct *nocb_gp_kthread;
199 raw_spinlock_t nocb_lock; /* Guard following pair of fields. */
200 atomic_t nocb_lock_contended; /* Contention experienced. */
201 int nocb_defer_wakeup; /* Defer wakeup of nocb_kthread. */
202 struct timer_list nocb_timer; /* Enforce finite deferral. */
203 unsigned long nocb_gp_adv_time; /* Last call_rcu() CB adv (jiffies). */
204
205 /* The following fields are used by call_rcu, hence own cacheline. */
206 raw_spinlock_t nocb_bypass_lock ____cacheline_internodealigned_in_smp;
207 struct rcu_cblist nocb_bypass; /* Lock-contention-bypass CB list. */
208 unsigned long nocb_bypass_first; /* Time (jiffies) of first enqueue. */
209 unsigned long nocb_nobypass_last; /* Last ->cblist enqueue (jiffies). */
210 int nocb_nobypass_count; /* # ->cblist enqueues at ^^^ time. */
211
212 /* The following fields are used by GP kthread, hence own cacheline. */
213 raw_spinlock_t nocb_gp_lock ____cacheline_internodealigned_in_smp;
214 struct timer_list nocb_bypass_timer; /* Force nocb_bypass flush. */
215 u8 nocb_gp_sleep; /* Is the nocb GP thread asleep? */
216 u8 nocb_gp_bypass; /* Found a bypass on last scan? */
217 u8 nocb_gp_gp; /* GP to wait for on last scan? */
218 unsigned long nocb_gp_seq; /* If so, ->gp_seq to wait for. */
219 unsigned long nocb_gp_loops; /* # passes through wait code. */
220 struct swait_queue_head nocb_gp_wq; /* For nocb kthreads to sleep on. */
221 bool nocb_cb_sleep; /* Is the nocb CB thread asleep? */
222 struct task_struct *nocb_cb_kthread;
223 struct rcu_data *nocb_next_cb_rdp;
224 /* Next rcu_data in wakeup chain. */
225
226 /* The following fields are used by CB kthread, hence new cacheline. */
227 struct rcu_data *nocb_gp_rdp ____cacheline_internodealigned_in_smp;
228 /* GP rdp takes GP-end wakeups. */
229#endif /* #ifdef CONFIG_RCU_NOCB_CPU */
230
231 /* 6) RCU priority boosting. */
232 struct task_struct *rcu_cpu_kthread_task;
233 /* rcuc per-CPU kthread or NULL. */
234 unsigned int rcu_cpu_kthread_status;
235 char rcu_cpu_has_work;
236
237 /* 7) Diagnostic data, including RCU CPU stall warnings. */
238 unsigned int softirq_snap; /* Snapshot of softirq activity. */
239 /* ->rcu_iw* fields protected by leaf rcu_node ->lock. */
240 struct irq_work rcu_iw; /* Check for non-irq activity. */
241 bool rcu_iw_pending; /* Is ->rcu_iw pending? */
242 unsigned long rcu_iw_gp_seq; /* ->gp_seq associated with ->rcu_iw. */
243 unsigned long rcu_ofl_gp_seq; /* ->gp_seq at last offline. */
244 short rcu_ofl_gp_flags; /* ->gp_flags at last offline. */
245 unsigned long rcu_onl_gp_seq; /* ->gp_seq at last online. */
246 short rcu_onl_gp_flags; /* ->gp_flags at last online. */
247 unsigned long last_fqs_resched; /* Time of last rcu_resched(). */
248
249 int cpu;
250};
251
252/* Values for nocb_defer_wakeup field in struct rcu_data. */
253#define RCU_NOCB_WAKE_NOT 0
254#define RCU_NOCB_WAKE 1
255#define RCU_NOCB_WAKE_FORCE 2
256
257#define RCU_JIFFIES_TILL_FORCE_QS (1 + (HZ > 250) + (HZ > 500))
258 /* For jiffies_till_first_fqs and */
259 /* and jiffies_till_next_fqs. */
260
261#define RCU_JIFFIES_FQS_DIV 256 /* Very large systems need more */
262 /* delay between bouts of */
263 /* quiescent-state forcing. */
264
265#define RCU_STALL_RAT_DELAY 2 /* Allow other CPUs time to take */
266 /* at least one scheduling clock */
267 /* irq before ratting on them. */
268
269#define rcu_wait(cond) \
270do { \
271 for (;;) { \
272 set_current_state(TASK_INTERRUPTIBLE); \
273 if (cond) \
274 break; \
275 schedule(); \
276 } \
277 __set_current_state(TASK_RUNNING); \
278} while (0)
279
280/*
281 * RCU global state, including node hierarchy. This hierarchy is
282 * represented in "heap" form in a dense array. The root (first level)
283 * of the hierarchy is in ->node[0] (referenced by ->level[0]), the second
284 * level in ->node[1] through ->node[m] (->node[1] referenced by ->level[1]),
285 * and the third level in ->node[m+1] and following (->node[m+1] referenced
286 * by ->level[2]). The number of levels is determined by the number of
287 * CPUs and by CONFIG_RCU_FANOUT. Small systems will have a "hierarchy"
288 * consisting of a single rcu_node.
289 */
290struct rcu_state {
291 struct rcu_node node[NUM_RCU_NODES]; /* Hierarchy. */
292 struct rcu_node *level[RCU_NUM_LVLS + 1];
293 /* Hierarchy levels (+1 to */
294 /* shut bogus gcc warning) */
295 int ncpus; /* # CPUs seen so far. */
296
297 /* The following fields are guarded by the root rcu_node's lock. */
298
299 u8 boost ____cacheline_internodealigned_in_smp;
300 /* Subject to priority boost. */
301 unsigned long gp_seq; /* Grace-period sequence #. */
302 struct task_struct *gp_kthread; /* Task for grace periods. */
303 struct swait_queue_head gp_wq; /* Where GP task waits. */
304 short gp_flags; /* Commands for GP task. */
305 short gp_state; /* GP kthread sleep state. */
306 unsigned long gp_wake_time; /* Last GP kthread wake. */
307 unsigned long gp_wake_seq; /* ->gp_seq at ^^^. */
308
309 /* End of fields guarded by root rcu_node's lock. */
310
311 struct mutex barrier_mutex; /* Guards barrier fields. */
312 atomic_t barrier_cpu_count; /* # CPUs waiting on. */
313 struct completion barrier_completion; /* Wake at barrier end. */
314 unsigned long barrier_sequence; /* ++ at start and end of */
315 /* rcu_barrier(). */
316 /* End of fields guarded by barrier_mutex. */
317
318 struct mutex exp_mutex; /* Serialize expedited GP. */
319 struct mutex exp_wake_mutex; /* Serialize wakeup. */
320 unsigned long expedited_sequence; /* Take a ticket. */
321 atomic_t expedited_need_qs; /* # CPUs left to check in. */
322 struct swait_queue_head expedited_wq; /* Wait for check-ins. */
323 int ncpus_snap; /* # CPUs seen last time. */
324
325 unsigned long jiffies_force_qs; /* Time at which to invoke */
326 /* force_quiescent_state(). */
327 unsigned long jiffies_kick_kthreads; /* Time at which to kick */
328 /* kthreads, if configured. */
329 unsigned long n_force_qs; /* Number of calls to */
330 /* force_quiescent_state(). */
331 unsigned long gp_start; /* Time at which GP started, */
332 /* but in jiffies. */
333 unsigned long gp_end; /* Time last GP ended, again */
334 /* in jiffies. */
335 unsigned long gp_activity; /* Time of last GP kthread */
336 /* activity in jiffies. */
337 unsigned long gp_req_activity; /* Time of last GP request */
338 /* in jiffies. */
339 unsigned long jiffies_stall; /* Time at which to check */
340 /* for CPU stalls. */
341 unsigned long jiffies_resched; /* Time at which to resched */
342 /* a reluctant CPU. */
343 unsigned long n_force_qs_gpstart; /* Snapshot of n_force_qs at */
344 /* GP start. */
345 unsigned long gp_max; /* Maximum GP duration in */
346 /* jiffies. */
347 const char *name; /* Name of structure. */
348 char abbr; /* Abbreviated name. */
349
350 raw_spinlock_t ofl_lock ____cacheline_internodealigned_in_smp;
351 /* Synchronize offline with */
352 /* GP pre-initialization. */
353};
354
355/* Values for rcu_state structure's gp_flags field. */
356#define RCU_GP_FLAG_INIT 0x1 /* Need grace-period initialization. */
357#define RCU_GP_FLAG_FQS 0x2 /* Need grace-period quiescent-state forcing. */
358
359/* Values for rcu_state structure's gp_state field. */
360#define RCU_GP_IDLE 0 /* Initial state and no GP in progress. */
361#define RCU_GP_WAIT_GPS 1 /* Wait for grace-period start. */
362#define RCU_GP_DONE_GPS 2 /* Wait done for grace-period start. */
363#define RCU_GP_ONOFF 3 /* Grace-period initialization hotplug. */
364#define RCU_GP_INIT 4 /* Grace-period initialization. */
365#define RCU_GP_WAIT_FQS 5 /* Wait for force-quiescent-state time. */
366#define RCU_GP_DOING_FQS 6 /* Wait done for force-quiescent-state time. */
367#define RCU_GP_CLEANUP 7 /* Grace-period cleanup started. */
368#define RCU_GP_CLEANED 8 /* Grace-period cleanup complete. */
369
370static const char * const gp_state_names[] = {
371 "RCU_GP_IDLE",
372 "RCU_GP_WAIT_GPS",
373 "RCU_GP_DONE_GPS",
374 "RCU_GP_ONOFF",
375 "RCU_GP_INIT",
376 "RCU_GP_WAIT_FQS",
377 "RCU_GP_DOING_FQS",
378 "RCU_GP_CLEANUP",
379 "RCU_GP_CLEANED",
380};
381
382/*
383 * In order to export the rcu_state name to the tracing tools, it
384 * needs to be added in the __tracepoint_string section.
385 * This requires defining a separate variable tp_<sname>_varname
386 * that points to the string being used, and this will allow
387 * the tracing userspace tools to be able to decipher the string
388 * address to the matching string.
389 */
390#ifdef CONFIG_PREEMPT_RCU
391#define RCU_ABBR 'p'
392#define RCU_NAME_RAW "rcu_preempt"
393#else /* #ifdef CONFIG_PREEMPT_RCU */
394#define RCU_ABBR 's'
395#define RCU_NAME_RAW "rcu_sched"
396#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
397#ifndef CONFIG_TRACING
398#define RCU_NAME RCU_NAME_RAW
399#else /* #ifdef CONFIG_TRACING */
400static char rcu_name[] = RCU_NAME_RAW;
401static const char *tp_rcu_varname __used __tracepoint_string = rcu_name;
402#define RCU_NAME rcu_name
403#endif /* #else #ifdef CONFIG_TRACING */
404
405int rcu_dynticks_snap(struct rcu_data *rdp);
406
407/* Forward declarations for tree_plugin.h */
408static void rcu_bootup_announce(void);
409static void rcu_qs(void);
410static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp);
411#ifdef CONFIG_HOTPLUG_CPU
412static bool rcu_preempt_has_tasks(struct rcu_node *rnp);
413#endif /* #ifdef CONFIG_HOTPLUG_CPU */
414static int rcu_print_task_exp_stall(struct rcu_node *rnp);
415static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp);
416static void rcu_flavor_sched_clock_irq(int user);
417void call_rcu(struct rcu_head *head, rcu_callback_t func);
418static void dump_blkd_tasks(struct rcu_node *rnp, int ncheck);
419static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags);
420static void rcu_preempt_boost_start_gp(struct rcu_node *rnp);
421static bool rcu_is_callbacks_kthread(void);
422static void rcu_cpu_kthread_setup(unsigned int cpu);
423static void __init rcu_spawn_boost_kthreads(void);
424static void rcu_prepare_kthreads(int cpu);
425static void rcu_cleanup_after_idle(void);
426static void rcu_prepare_for_idle(void);
427static bool rcu_preempt_has_tasks(struct rcu_node *rnp);
428static bool rcu_preempt_need_deferred_qs(struct task_struct *t);
429static void rcu_preempt_deferred_qs(struct task_struct *t);
430static void zero_cpu_stall_ticks(struct rcu_data *rdp);
431static struct swait_queue_head *rcu_nocb_gp_get(struct rcu_node *rnp);
432static void rcu_nocb_gp_cleanup(struct swait_queue_head *sq);
433static void rcu_init_one_nocb(struct rcu_node *rnp);
434static bool rcu_nocb_flush_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
435 unsigned long j);
436static bool rcu_nocb_try_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
437 bool *was_alldone, unsigned long flags);
438static void __call_rcu_nocb_wake(struct rcu_data *rdp, bool was_empty,
439 unsigned long flags);
440static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp);
441static void do_nocb_deferred_wakeup(struct rcu_data *rdp);
442static void rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp);
443static void rcu_spawn_cpu_nocb_kthread(int cpu);
444static void __init rcu_spawn_nocb_kthreads(void);
445static void show_rcu_nocb_state(struct rcu_data *rdp);
446static void rcu_nocb_lock(struct rcu_data *rdp);
447static void rcu_nocb_unlock(struct rcu_data *rdp);
448static void rcu_nocb_unlock_irqrestore(struct rcu_data *rdp,
449 unsigned long flags);
450static void rcu_lockdep_assert_cblist_protected(struct rcu_data *rdp);
451#ifdef CONFIG_RCU_NOCB_CPU
452static void __init rcu_organize_nocb_kthreads(void);
453#define rcu_nocb_lock_irqsave(rdp, flags) \
454do { \
455 if (!rcu_segcblist_is_offloaded(&(rdp)->cblist)) \
456 local_irq_save(flags); \
457 else \
458 raw_spin_lock_irqsave(&(rdp)->nocb_lock, (flags)); \
459} while (0)
460#else /* #ifdef CONFIG_RCU_NOCB_CPU */
461#define rcu_nocb_lock_irqsave(rdp, flags) local_irq_save(flags)
462#endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */
463
464static void rcu_bind_gp_kthread(void);
465static bool rcu_nohz_full_cpu(void);
466static void rcu_dynticks_task_enter(void);
467static void rcu_dynticks_task_exit(void);
468
469/* Forward declarations for tree_stall.h */
470static void record_gp_stall_check_time(void);
471static void rcu_iw_handler(struct irq_work *iwp);
472static void check_cpu_stall(struct rcu_data *rdp);
473static void rcu_check_gp_start_stall(struct rcu_node *rnp, struct rcu_data *rdp,
474 const unsigned long gpssdelay);
1/* SPDX-License-Identifier: GPL-2.0+ */
2/*
3 * Read-Copy Update mechanism for mutual exclusion (tree-based version)
4 * Internal non-public definitions.
5 *
6 * Copyright IBM Corporation, 2008
7 *
8 * Author: Ingo Molnar <mingo@elte.hu>
9 * Paul E. McKenney <paulmck@linux.ibm.com>
10 */
11
12#include <linux/cache.h>
13#include <linux/spinlock.h>
14#include <linux/rtmutex.h>
15#include <linux/threads.h>
16#include <linux/cpumask.h>
17#include <linux/seqlock.h>
18#include <linux/swait.h>
19#include <linux/rcu_node_tree.h>
20
21#include "rcu_segcblist.h"
22
23/* Communicate arguments to a workqueue handler. */
24struct rcu_exp_work {
25 unsigned long rew_s;
26 struct work_struct rew_work;
27};
28
29/* RCU's kthread states for tracing. */
30#define RCU_KTHREAD_STOPPED 0
31#define RCU_KTHREAD_RUNNING 1
32#define RCU_KTHREAD_WAITING 2
33#define RCU_KTHREAD_OFFCPU 3
34#define RCU_KTHREAD_YIELDING 4
35#define RCU_KTHREAD_MAX 4
36
37/*
38 * Definition for node within the RCU grace-period-detection hierarchy.
39 */
40struct rcu_node {
41 raw_spinlock_t __private lock; /* Root rcu_node's lock protects */
42 /* some rcu_state fields as well as */
43 /* following. */
44 unsigned long gp_seq; /* Track rsp->gp_seq. */
45 unsigned long gp_seq_needed; /* Track furthest future GP request. */
46 unsigned long completedqs; /* All QSes done for this node. */
47 unsigned long qsmask; /* CPUs or groups that need to switch in */
48 /* order for current grace period to proceed.*/
49 /* In leaf rcu_node, each bit corresponds to */
50 /* an rcu_data structure, otherwise, each */
51 /* bit corresponds to a child rcu_node */
52 /* structure. */
53 unsigned long rcu_gp_init_mask; /* Mask of offline CPUs at GP init. */
54 unsigned long qsmaskinit;
55 /* Per-GP initial value for qsmask. */
56 /* Initialized from ->qsmaskinitnext at the */
57 /* beginning of each grace period. */
58 unsigned long qsmaskinitnext;
59 /* Online CPUs for next grace period. */
60 unsigned long expmask; /* CPUs or groups that need to check in */
61 /* to allow the current expedited GP */
62 /* to complete. */
63 unsigned long expmaskinit;
64 /* Per-GP initial values for expmask. */
65 /* Initialized from ->expmaskinitnext at the */
66 /* beginning of each expedited GP. */
67 unsigned long expmaskinitnext;
68 /* Online CPUs for next expedited GP. */
69 /* Any CPU that has ever been online will */
70 /* have its bit set. */
71 unsigned long cbovldmask;
72 /* CPUs experiencing callback overload. */
73 unsigned long ffmask; /* Fully functional CPUs. */
74 unsigned long grpmask; /* Mask to apply to parent qsmask. */
75 /* Only one bit will be set in this mask. */
76 int grplo; /* lowest-numbered CPU here. */
77 int grphi; /* highest-numbered CPU here. */
78 u8 grpnum; /* group number for next level up. */
79 u8 level; /* root is at level 0. */
80 bool wait_blkd_tasks;/* Necessary to wait for blocked tasks to */
81 /* exit RCU read-side critical sections */
82 /* before propagating offline up the */
83 /* rcu_node tree? */
84 struct rcu_node *parent;
85 struct list_head blkd_tasks;
86 /* Tasks blocked in RCU read-side critical */
87 /* section. Tasks are placed at the head */
88 /* of this list and age towards the tail. */
89 struct list_head *gp_tasks;
90 /* Pointer to the first task blocking the */
91 /* current grace period, or NULL if there */
92 /* is no such task. */
93 struct list_head *exp_tasks;
94 /* Pointer to the first task blocking the */
95 /* current expedited grace period, or NULL */
96 /* if there is no such task. If there */
97 /* is no current expedited grace period, */
98 /* then there can cannot be any such task. */
99 struct list_head *boost_tasks;
100 /* Pointer to first task that needs to be */
101 /* priority boosted, or NULL if no priority */
102 /* boosting is needed for this rcu_node */
103 /* structure. If there are no tasks */
104 /* queued on this rcu_node structure that */
105 /* are blocking the current grace period, */
106 /* there can be no such task. */
107 struct rt_mutex boost_mtx;
108 /* Used only for the priority-boosting */
109 /* side effect, not as a lock. */
110 unsigned long boost_time;
111 /* When to start boosting (jiffies). */
112 struct task_struct *boost_kthread_task;
113 /* kthread that takes care of priority */
114 /* boosting for this rcu_node structure. */
115 unsigned int boost_kthread_status;
116 /* State of boost_kthread_task for tracing. */
117#ifdef CONFIG_RCU_NOCB_CPU
118 struct swait_queue_head nocb_gp_wq[2];
119 /* Place for rcu_nocb_kthread() to wait GP. */
120#endif /* #ifdef CONFIG_RCU_NOCB_CPU */
121 raw_spinlock_t fqslock ____cacheline_internodealigned_in_smp;
122
123 spinlock_t exp_lock ____cacheline_internodealigned_in_smp;
124 unsigned long exp_seq_rq;
125 wait_queue_head_t exp_wq[4];
126 struct rcu_exp_work rew;
127 bool exp_need_flush; /* Need to flush workitem? */
128} ____cacheline_internodealigned_in_smp;
129
130/*
131 * Bitmasks in an rcu_node cover the interval [grplo, grphi] of CPU IDs, and
132 * are indexed relative to this interval rather than the global CPU ID space.
133 * This generates the bit for a CPU in node-local masks.
134 */
135#define leaf_node_cpu_bit(rnp, cpu) (BIT((cpu) - (rnp)->grplo))
136
137/*
138 * Union to allow "aggregate OR" operation on the need for a quiescent
139 * state by the normal and expedited grace periods.
140 */
141union rcu_noqs {
142 struct {
143 u8 norm;
144 u8 exp;
145 } b; /* Bits. */
146 u16 s; /* Set of bits, aggregate OR here. */
147};
148
149/* Per-CPU data for read-copy update. */
150struct rcu_data {
151 /* 1) quiescent-state and grace-period handling : */
152 unsigned long gp_seq; /* Track rsp->gp_seq counter. */
153 unsigned long gp_seq_needed; /* Track furthest future GP request. */
154 union rcu_noqs cpu_no_qs; /* No QSes yet for this CPU. */
155 bool core_needs_qs; /* Core waits for quiesc state. */
156 bool beenonline; /* CPU online at least once. */
157 bool gpwrap; /* Possible ->gp_seq wrap. */
158 bool exp_deferred_qs; /* This CPU awaiting a deferred QS? */
159 struct rcu_node *mynode; /* This CPU's leaf of hierarchy */
160 unsigned long grpmask; /* Mask to apply to leaf qsmask. */
161 unsigned long ticks_this_gp; /* The number of scheduling-clock */
162 /* ticks this CPU has handled */
163 /* during and after the last grace */
164 /* period it is aware of. */
165 struct irq_work defer_qs_iw; /* Obtain later scheduler attention. */
166 bool defer_qs_iw_pending; /* Scheduler attention pending? */
167
168 /* 2) batch handling */
169 struct rcu_segcblist cblist; /* Segmented callback list, with */
170 /* different callbacks waiting for */
171 /* different grace periods. */
172 long qlen_last_fqs_check;
173 /* qlen at last check for QS forcing */
174 unsigned long n_cbs_invoked; /* # callbacks invoked since boot. */
175 unsigned long n_force_qs_snap;
176 /* did other CPU force QS recently? */
177 long blimit; /* Upper limit on a processed batch */
178
179 /* 3) dynticks interface. */
180 int dynticks_snap; /* Per-GP tracking for dynticks. */
181 long dynticks_nesting; /* Track process nesting level. */
182 long dynticks_nmi_nesting; /* Track irq/NMI nesting level. */
183 atomic_t dynticks; /* Even value for idle, else odd. */
184 bool rcu_need_heavy_qs; /* GP old, so heavy quiescent state! */
185 bool rcu_urgent_qs; /* GP old need light quiescent state. */
186 bool rcu_forced_tick; /* Forced tick to provide QS. */
187 bool rcu_forced_tick_exp; /* ... provide QS to expedited GP. */
188#ifdef CONFIG_RCU_FAST_NO_HZ
189 unsigned long last_accelerate; /* Last jiffy CBs were accelerated. */
190 unsigned long last_advance_all; /* Last jiffy CBs were all advanced. */
191 int tick_nohz_enabled_snap; /* Previously seen value from sysfs. */
192#endif /* #ifdef CONFIG_RCU_FAST_NO_HZ */
193
194 /* 4) rcu_barrier(), OOM callbacks, and expediting. */
195 struct rcu_head barrier_head;
196 int exp_dynticks_snap; /* Double-check need for IPI. */
197
198 /* 5) Callback offloading. */
199#ifdef CONFIG_RCU_NOCB_CPU
200 struct swait_queue_head nocb_cb_wq; /* For nocb kthreads to sleep on. */
201 struct task_struct *nocb_gp_kthread;
202 raw_spinlock_t nocb_lock; /* Guard following pair of fields. */
203 atomic_t nocb_lock_contended; /* Contention experienced. */
204 int nocb_defer_wakeup; /* Defer wakeup of nocb_kthread. */
205 struct timer_list nocb_timer; /* Enforce finite deferral. */
206 unsigned long nocb_gp_adv_time; /* Last call_rcu() CB adv (jiffies). */
207
208 /* The following fields are used by call_rcu, hence own cacheline. */
209 raw_spinlock_t nocb_bypass_lock ____cacheline_internodealigned_in_smp;
210 struct rcu_cblist nocb_bypass; /* Lock-contention-bypass CB list. */
211 unsigned long nocb_bypass_first; /* Time (jiffies) of first enqueue. */
212 unsigned long nocb_nobypass_last; /* Last ->cblist enqueue (jiffies). */
213 int nocb_nobypass_count; /* # ->cblist enqueues at ^^^ time. */
214
215 /* The following fields are used by GP kthread, hence own cacheline. */
216 raw_spinlock_t nocb_gp_lock ____cacheline_internodealigned_in_smp;
217 struct timer_list nocb_bypass_timer; /* Force nocb_bypass flush. */
218 u8 nocb_gp_sleep; /* Is the nocb GP thread asleep? */
219 u8 nocb_gp_bypass; /* Found a bypass on last scan? */
220 u8 nocb_gp_gp; /* GP to wait for on last scan? */
221 unsigned long nocb_gp_seq; /* If so, ->gp_seq to wait for. */
222 unsigned long nocb_gp_loops; /* # passes through wait code. */
223 struct swait_queue_head nocb_gp_wq; /* For nocb kthreads to sleep on. */
224 bool nocb_cb_sleep; /* Is the nocb CB thread asleep? */
225 struct task_struct *nocb_cb_kthread;
226 struct rcu_data *nocb_next_cb_rdp;
227 /* Next rcu_data in wakeup chain. */
228
229 /* The following fields are used by CB kthread, hence new cacheline. */
230 struct rcu_data *nocb_gp_rdp ____cacheline_internodealigned_in_smp;
231 /* GP rdp takes GP-end wakeups. */
232#endif /* #ifdef CONFIG_RCU_NOCB_CPU */
233
234 /* 6) RCU priority boosting. */
235 struct task_struct *rcu_cpu_kthread_task;
236 /* rcuc per-CPU kthread or NULL. */
237 unsigned int rcu_cpu_kthread_status;
238 char rcu_cpu_has_work;
239
240 /* 7) Diagnostic data, including RCU CPU stall warnings. */
241 unsigned int softirq_snap; /* Snapshot of softirq activity. */
242 /* ->rcu_iw* fields protected by leaf rcu_node ->lock. */
243 struct irq_work rcu_iw; /* Check for non-irq activity. */
244 bool rcu_iw_pending; /* Is ->rcu_iw pending? */
245 unsigned long rcu_iw_gp_seq; /* ->gp_seq associated with ->rcu_iw. */
246 unsigned long rcu_ofl_gp_seq; /* ->gp_seq at last offline. */
247 short rcu_ofl_gp_flags; /* ->gp_flags at last offline. */
248 unsigned long rcu_onl_gp_seq; /* ->gp_seq at last online. */
249 short rcu_onl_gp_flags; /* ->gp_flags at last online. */
250 unsigned long last_fqs_resched; /* Time of last rcu_resched(). */
251
252 int cpu;
253};
254
255/* Values for nocb_defer_wakeup field in struct rcu_data. */
256#define RCU_NOCB_WAKE_NOT 0
257#define RCU_NOCB_WAKE 1
258#define RCU_NOCB_WAKE_FORCE 2
259
260#define RCU_JIFFIES_TILL_FORCE_QS (1 + (HZ > 250) + (HZ > 500))
261 /* For jiffies_till_first_fqs and */
262 /* and jiffies_till_next_fqs. */
263
264#define RCU_JIFFIES_FQS_DIV 256 /* Very large systems need more */
265 /* delay between bouts of */
266 /* quiescent-state forcing. */
267
268#define RCU_STALL_RAT_DELAY 2 /* Allow other CPUs time to take */
269 /* at least one scheduling clock */
270 /* irq before ratting on them. */
271
272#define rcu_wait(cond) \
273do { \
274 for (;;) { \
275 set_current_state(TASK_INTERRUPTIBLE); \
276 if (cond) \
277 break; \
278 schedule(); \
279 } \
280 __set_current_state(TASK_RUNNING); \
281} while (0)
282
283/*
284 * RCU global state, including node hierarchy. This hierarchy is
285 * represented in "heap" form in a dense array. The root (first level)
286 * of the hierarchy is in ->node[0] (referenced by ->level[0]), the second
287 * level in ->node[1] through ->node[m] (->node[1] referenced by ->level[1]),
288 * and the third level in ->node[m+1] and following (->node[m+1] referenced
289 * by ->level[2]). The number of levels is determined by the number of
290 * CPUs and by CONFIG_RCU_FANOUT. Small systems will have a "hierarchy"
291 * consisting of a single rcu_node.
292 */
293struct rcu_state {
294 struct rcu_node node[NUM_RCU_NODES]; /* Hierarchy. */
295 struct rcu_node *level[RCU_NUM_LVLS + 1];
296 /* Hierarchy levels (+1 to */
297 /* shut bogus gcc warning) */
298 int ncpus; /* # CPUs seen so far. */
299
300 /* The following fields are guarded by the root rcu_node's lock. */
301
302 u8 boost ____cacheline_internodealigned_in_smp;
303 /* Subject to priority boost. */
304 unsigned long gp_seq; /* Grace-period sequence #. */
305 unsigned long gp_max; /* Maximum GP duration in */
306 /* jiffies. */
307 struct task_struct *gp_kthread; /* Task for grace periods. */
308 struct swait_queue_head gp_wq; /* Where GP task waits. */
309 short gp_flags; /* Commands for GP task. */
310 short gp_state; /* GP kthread sleep state. */
311 unsigned long gp_wake_time; /* Last GP kthread wake. */
312 unsigned long gp_wake_seq; /* ->gp_seq at ^^^. */
313
314 /* End of fields guarded by root rcu_node's lock. */
315
316 struct mutex barrier_mutex; /* Guards barrier fields. */
317 atomic_t barrier_cpu_count; /* # CPUs waiting on. */
318 struct completion barrier_completion; /* Wake at barrier end. */
319 unsigned long barrier_sequence; /* ++ at start and end of */
320 /* rcu_barrier(). */
321 /* End of fields guarded by barrier_mutex. */
322
323 struct mutex exp_mutex; /* Serialize expedited GP. */
324 struct mutex exp_wake_mutex; /* Serialize wakeup. */
325 unsigned long expedited_sequence; /* Take a ticket. */
326 atomic_t expedited_need_qs; /* # CPUs left to check in. */
327 struct swait_queue_head expedited_wq; /* Wait for check-ins. */
328 int ncpus_snap; /* # CPUs seen last time. */
329 u8 cbovld; /* Callback overload now? */
330 u8 cbovldnext; /* ^ ^ next time? */
331
332 unsigned long jiffies_force_qs; /* Time at which to invoke */
333 /* force_quiescent_state(). */
334 unsigned long jiffies_kick_kthreads; /* Time at which to kick */
335 /* kthreads, if configured. */
336 unsigned long n_force_qs; /* Number of calls to */
337 /* force_quiescent_state(). */
338 unsigned long gp_start; /* Time at which GP started, */
339 /* but in jiffies. */
340 unsigned long gp_end; /* Time last GP ended, again */
341 /* in jiffies. */
342 unsigned long gp_activity; /* Time of last GP kthread */
343 /* activity in jiffies. */
344 unsigned long gp_req_activity; /* Time of last GP request */
345 /* in jiffies. */
346 unsigned long jiffies_stall; /* Time at which to check */
347 /* for CPU stalls. */
348 unsigned long jiffies_resched; /* Time at which to resched */
349 /* a reluctant CPU. */
350 unsigned long n_force_qs_gpstart; /* Snapshot of n_force_qs at */
351 /* GP start. */
352 const char *name; /* Name of structure. */
353 char abbr; /* Abbreviated name. */
354
355 raw_spinlock_t ofl_lock ____cacheline_internodealigned_in_smp;
356 /* Synchronize offline with */
357 /* GP pre-initialization. */
358};
359
360/* Values for rcu_state structure's gp_flags field. */
361#define RCU_GP_FLAG_INIT 0x1 /* Need grace-period initialization. */
362#define RCU_GP_FLAG_FQS 0x2 /* Need grace-period quiescent-state forcing. */
363#define RCU_GP_FLAG_OVLD 0x4 /* Experiencing callback overload. */
364
365/* Values for rcu_state structure's gp_state field. */
366#define RCU_GP_IDLE 0 /* Initial state and no GP in progress. */
367#define RCU_GP_WAIT_GPS 1 /* Wait for grace-period start. */
368#define RCU_GP_DONE_GPS 2 /* Wait done for grace-period start. */
369#define RCU_GP_ONOFF 3 /* Grace-period initialization hotplug. */
370#define RCU_GP_INIT 4 /* Grace-period initialization. */
371#define RCU_GP_WAIT_FQS 5 /* Wait for force-quiescent-state time. */
372#define RCU_GP_DOING_FQS 6 /* Wait done for force-quiescent-state time. */
373#define RCU_GP_CLEANUP 7 /* Grace-period cleanup started. */
374#define RCU_GP_CLEANED 8 /* Grace-period cleanup complete. */
375
376/*
377 * In order to export the rcu_state name to the tracing tools, it
378 * needs to be added in the __tracepoint_string section.
379 * This requires defining a separate variable tp_<sname>_varname
380 * that points to the string being used, and this will allow
381 * the tracing userspace tools to be able to decipher the string
382 * address to the matching string.
383 */
384#ifdef CONFIG_PREEMPT_RCU
385#define RCU_ABBR 'p'
386#define RCU_NAME_RAW "rcu_preempt"
387#else /* #ifdef CONFIG_PREEMPT_RCU */
388#define RCU_ABBR 's'
389#define RCU_NAME_RAW "rcu_sched"
390#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
391#ifndef CONFIG_TRACING
392#define RCU_NAME RCU_NAME_RAW
393#else /* #ifdef CONFIG_TRACING */
394static char rcu_name[] = RCU_NAME_RAW;
395static const char *tp_rcu_varname __used __tracepoint_string = rcu_name;
396#define RCU_NAME rcu_name
397#endif /* #else #ifdef CONFIG_TRACING */
398
399/* Forward declarations for tree_plugin.h */
400static void rcu_bootup_announce(void);
401static void rcu_qs(void);
402static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp);
403#ifdef CONFIG_HOTPLUG_CPU
404static bool rcu_preempt_has_tasks(struct rcu_node *rnp);
405#endif /* #ifdef CONFIG_HOTPLUG_CPU */
406static int rcu_print_task_exp_stall(struct rcu_node *rnp);
407static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp);
408static void rcu_flavor_sched_clock_irq(int user);
409static void dump_blkd_tasks(struct rcu_node *rnp, int ncheck);
410static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags);
411static void rcu_preempt_boost_start_gp(struct rcu_node *rnp);
412static bool rcu_is_callbacks_kthread(void);
413static void rcu_cpu_kthread_setup(unsigned int cpu);
414static void __init rcu_spawn_boost_kthreads(void);
415static void rcu_prepare_kthreads(int cpu);
416static void rcu_cleanup_after_idle(void);
417static void rcu_prepare_for_idle(void);
418static bool rcu_preempt_has_tasks(struct rcu_node *rnp);
419static bool rcu_preempt_need_deferred_qs(struct task_struct *t);
420static void rcu_preempt_deferred_qs(struct task_struct *t);
421static void zero_cpu_stall_ticks(struct rcu_data *rdp);
422static struct swait_queue_head *rcu_nocb_gp_get(struct rcu_node *rnp);
423static void rcu_nocb_gp_cleanup(struct swait_queue_head *sq);
424static void rcu_init_one_nocb(struct rcu_node *rnp);
425static bool rcu_nocb_flush_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
426 unsigned long j);
427static bool rcu_nocb_try_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
428 bool *was_alldone, unsigned long flags);
429static void __call_rcu_nocb_wake(struct rcu_data *rdp, bool was_empty,
430 unsigned long flags);
431static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp);
432static void do_nocb_deferred_wakeup(struct rcu_data *rdp);
433static void rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp);
434static void rcu_spawn_cpu_nocb_kthread(int cpu);
435static void __init rcu_spawn_nocb_kthreads(void);
436static void show_rcu_nocb_state(struct rcu_data *rdp);
437static void rcu_nocb_lock(struct rcu_data *rdp);
438static void rcu_nocb_unlock(struct rcu_data *rdp);
439static void rcu_nocb_unlock_irqrestore(struct rcu_data *rdp,
440 unsigned long flags);
441static void rcu_lockdep_assert_cblist_protected(struct rcu_data *rdp);
442#ifdef CONFIG_RCU_NOCB_CPU
443static void __init rcu_organize_nocb_kthreads(void);
444#define rcu_nocb_lock_irqsave(rdp, flags) \
445do { \
446 if (!rcu_segcblist_is_offloaded(&(rdp)->cblist)) \
447 local_irq_save(flags); \
448 else \
449 raw_spin_lock_irqsave(&(rdp)->nocb_lock, (flags)); \
450} while (0)
451#else /* #ifdef CONFIG_RCU_NOCB_CPU */
452#define rcu_nocb_lock_irqsave(rdp, flags) local_irq_save(flags)
453#endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */
454
455static void rcu_bind_gp_kthread(void);
456static bool rcu_nohz_full_cpu(void);
457static void rcu_dynticks_task_enter(void);
458static void rcu_dynticks_task_exit(void);
459static void rcu_dynticks_task_trace_enter(void);
460static void rcu_dynticks_task_trace_exit(void);
461
462/* Forward declarations for tree_stall.h */
463static void record_gp_stall_check_time(void);
464static void rcu_iw_handler(struct irq_work *iwp);
465static void check_cpu_stall(struct rcu_data *rdp);
466static void rcu_check_gp_start_stall(struct rcu_node *rnp, struct rcu_data *rdp,
467 const unsigned long gpssdelay);