Loading...
1/*
2 * Read-Copy Update mechanism for mutual exclusion (tree-based version)
3 * Internal non-public definitions.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, you can access it online at
17 * http://www.gnu.org/licenses/gpl-2.0.html.
18 *
19 * Copyright IBM Corporation, 2008
20 *
21 * Author: Ingo Molnar <mingo@elte.hu>
22 * Paul E. McKenney <paulmck@linux.vnet.ibm.com>
23 */
24
25#include <linux/cache.h>
26#include <linux/spinlock.h>
27#include <linux/threads.h>
28#include <linux/cpumask.h>
29#include <linux/seqlock.h>
30#include <linux/swait.h>
31#include <linux/stop_machine.h>
32
33/*
34 * Define shape of hierarchy based on NR_CPUS, CONFIG_RCU_FANOUT, and
35 * CONFIG_RCU_FANOUT_LEAF.
36 * In theory, it should be possible to add more levels straightforwardly.
37 * In practice, this did work well going from three levels to four.
38 * Of course, your mileage may vary.
39 */
40
41#ifdef CONFIG_RCU_FANOUT
42#define RCU_FANOUT CONFIG_RCU_FANOUT
43#else /* #ifdef CONFIG_RCU_FANOUT */
44# ifdef CONFIG_64BIT
45# define RCU_FANOUT 64
46# else
47# define RCU_FANOUT 32
48# endif
49#endif /* #else #ifdef CONFIG_RCU_FANOUT */
50
51#ifdef CONFIG_RCU_FANOUT_LEAF
52#define RCU_FANOUT_LEAF CONFIG_RCU_FANOUT_LEAF
53#else /* #ifdef CONFIG_RCU_FANOUT_LEAF */
54# ifdef CONFIG_64BIT
55# define RCU_FANOUT_LEAF 64
56# else
57# define RCU_FANOUT_LEAF 32
58# endif
59#endif /* #else #ifdef CONFIG_RCU_FANOUT_LEAF */
60
61#define RCU_FANOUT_1 (RCU_FANOUT_LEAF)
62#define RCU_FANOUT_2 (RCU_FANOUT_1 * RCU_FANOUT)
63#define RCU_FANOUT_3 (RCU_FANOUT_2 * RCU_FANOUT)
64#define RCU_FANOUT_4 (RCU_FANOUT_3 * RCU_FANOUT)
65
66#if NR_CPUS <= RCU_FANOUT_1
67# define RCU_NUM_LVLS 1
68# define NUM_RCU_LVL_0 1
69# define NUM_RCU_NODES NUM_RCU_LVL_0
70# define NUM_RCU_LVL_INIT { NUM_RCU_LVL_0 }
71# define RCU_NODE_NAME_INIT { "rcu_node_0" }
72# define RCU_FQS_NAME_INIT { "rcu_node_fqs_0" }
73# define RCU_EXP_NAME_INIT { "rcu_node_exp_0" }
74#elif NR_CPUS <= RCU_FANOUT_2
75# define RCU_NUM_LVLS 2
76# define NUM_RCU_LVL_0 1
77# define NUM_RCU_LVL_1 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_1)
78# define NUM_RCU_NODES (NUM_RCU_LVL_0 + NUM_RCU_LVL_1)
79# define NUM_RCU_LVL_INIT { NUM_RCU_LVL_0, NUM_RCU_LVL_1 }
80# define RCU_NODE_NAME_INIT { "rcu_node_0", "rcu_node_1" }
81# define RCU_FQS_NAME_INIT { "rcu_node_fqs_0", "rcu_node_fqs_1" }
82# define RCU_EXP_NAME_INIT { "rcu_node_exp_0", "rcu_node_exp_1" }
83#elif NR_CPUS <= RCU_FANOUT_3
84# define RCU_NUM_LVLS 3
85# define NUM_RCU_LVL_0 1
86# define NUM_RCU_LVL_1 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_2)
87# define NUM_RCU_LVL_2 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_1)
88# define NUM_RCU_NODES (NUM_RCU_LVL_0 + NUM_RCU_LVL_1 + NUM_RCU_LVL_2)
89# define NUM_RCU_LVL_INIT { NUM_RCU_LVL_0, NUM_RCU_LVL_1, NUM_RCU_LVL_2 }
90# define RCU_NODE_NAME_INIT { "rcu_node_0", "rcu_node_1", "rcu_node_2" }
91# define RCU_FQS_NAME_INIT { "rcu_node_fqs_0", "rcu_node_fqs_1", "rcu_node_fqs_2" }
92# define RCU_EXP_NAME_INIT { "rcu_node_exp_0", "rcu_node_exp_1", "rcu_node_exp_2" }
93#elif NR_CPUS <= RCU_FANOUT_4
94# define RCU_NUM_LVLS 4
95# define NUM_RCU_LVL_0 1
96# define NUM_RCU_LVL_1 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_3)
97# define NUM_RCU_LVL_2 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_2)
98# define NUM_RCU_LVL_3 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_1)
99# define NUM_RCU_NODES (NUM_RCU_LVL_0 + NUM_RCU_LVL_1 + NUM_RCU_LVL_2 + NUM_RCU_LVL_3)
100# define NUM_RCU_LVL_INIT { NUM_RCU_LVL_0, NUM_RCU_LVL_1, NUM_RCU_LVL_2, NUM_RCU_LVL_3 }
101# define RCU_NODE_NAME_INIT { "rcu_node_0", "rcu_node_1", "rcu_node_2", "rcu_node_3" }
102# define RCU_FQS_NAME_INIT { "rcu_node_fqs_0", "rcu_node_fqs_1", "rcu_node_fqs_2", "rcu_node_fqs_3" }
103# define RCU_EXP_NAME_INIT { "rcu_node_exp_0", "rcu_node_exp_1", "rcu_node_exp_2", "rcu_node_exp_3" }
104#else
105# error "CONFIG_RCU_FANOUT insufficient for NR_CPUS"
106#endif /* #if (NR_CPUS) <= RCU_FANOUT_1 */
107
108extern int rcu_num_lvls;
109extern int rcu_num_nodes;
110
111/*
112 * Dynticks per-CPU state.
113 */
114struct rcu_dynticks {
115 long long dynticks_nesting; /* Track irq/process nesting level. */
116 /* Process level is worth LLONG_MAX/2. */
117 int dynticks_nmi_nesting; /* Track NMI nesting level. */
118 atomic_t dynticks; /* Even value for idle, else odd. */
119#ifdef CONFIG_NO_HZ_FULL_SYSIDLE
120 long long dynticks_idle_nesting;
121 /* irq/process nesting level from idle. */
122 atomic_t dynticks_idle; /* Even value for idle, else odd. */
123 /* "Idle" excludes userspace execution. */
124 unsigned long dynticks_idle_jiffies;
125 /* End of last non-NMI non-idle period. */
126#endif /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
127#ifdef CONFIG_RCU_FAST_NO_HZ
128 bool all_lazy; /* Are all CPU's CBs lazy? */
129 unsigned long nonlazy_posted;
130 /* # times non-lazy CBs posted to CPU. */
131 unsigned long nonlazy_posted_snap;
132 /* idle-period nonlazy_posted snapshot. */
133 unsigned long last_accelerate;
134 /* Last jiffy CBs were accelerated. */
135 unsigned long last_advance_all;
136 /* Last jiffy CBs were all advanced. */
137 int tick_nohz_enabled_snap; /* Previously seen value from sysfs. */
138#endif /* #ifdef CONFIG_RCU_FAST_NO_HZ */
139};
140
141/* RCU's kthread states for tracing. */
142#define RCU_KTHREAD_STOPPED 0
143#define RCU_KTHREAD_RUNNING 1
144#define RCU_KTHREAD_WAITING 2
145#define RCU_KTHREAD_OFFCPU 3
146#define RCU_KTHREAD_YIELDING 4
147#define RCU_KTHREAD_MAX 4
148
149/*
150 * Definition for node within the RCU grace-period-detection hierarchy.
151 */
152struct rcu_node {
153 raw_spinlock_t __private lock; /* Root rcu_node's lock protects */
154 /* some rcu_state fields as well as */
155 /* following. */
156 unsigned long gpnum; /* Current grace period for this node. */
157 /* This will either be equal to or one */
158 /* behind the root rcu_node's gpnum. */
159 unsigned long completed; /* Last GP completed for this node. */
160 /* This will either be equal to or one */
161 /* behind the root rcu_node's gpnum. */
162 unsigned long qsmask; /* CPUs or groups that need to switch in */
163 /* order for current grace period to proceed.*/
164 /* In leaf rcu_node, each bit corresponds to */
165 /* an rcu_data structure, otherwise, each */
166 /* bit corresponds to a child rcu_node */
167 /* structure. */
168 unsigned long qsmaskinit;
169 /* Per-GP initial value for qsmask. */
170 /* Initialized from ->qsmaskinitnext at the */
171 /* beginning of each grace period. */
172 unsigned long qsmaskinitnext;
173 /* Online CPUs for next grace period. */
174 unsigned long expmask; /* CPUs or groups that need to check in */
175 /* to allow the current expedited GP */
176 /* to complete. */
177 unsigned long expmaskinit;
178 /* Per-GP initial values for expmask. */
179 /* Initialized from ->expmaskinitnext at the */
180 /* beginning of each expedited GP. */
181 unsigned long expmaskinitnext;
182 /* Online CPUs for next expedited GP. */
183 /* Any CPU that has ever been online will */
184 /* have its bit set. */
185 unsigned long grpmask; /* Mask to apply to parent qsmask. */
186 /* Only one bit will be set in this mask. */
187 int grplo; /* lowest-numbered CPU or group here. */
188 int grphi; /* highest-numbered CPU or group here. */
189 u8 grpnum; /* CPU/group number for next level up. */
190 u8 level; /* root is at level 0. */
191 bool wait_blkd_tasks;/* Necessary to wait for blocked tasks to */
192 /* exit RCU read-side critical sections */
193 /* before propagating offline up the */
194 /* rcu_node tree? */
195 struct rcu_node *parent;
196 struct list_head blkd_tasks;
197 /* Tasks blocked in RCU read-side critical */
198 /* section. Tasks are placed at the head */
199 /* of this list and age towards the tail. */
200 struct list_head *gp_tasks;
201 /* Pointer to the first task blocking the */
202 /* current grace period, or NULL if there */
203 /* is no such task. */
204 struct list_head *exp_tasks;
205 /* Pointer to the first task blocking the */
206 /* current expedited grace period, or NULL */
207 /* if there is no such task. If there */
208 /* is no current expedited grace period, */
209 /* then there can cannot be any such task. */
210 struct list_head *boost_tasks;
211 /* Pointer to first task that needs to be */
212 /* priority boosted, or NULL if no priority */
213 /* boosting is needed for this rcu_node */
214 /* structure. If there are no tasks */
215 /* queued on this rcu_node structure that */
216 /* are blocking the current grace period, */
217 /* there can be no such task. */
218 struct rt_mutex boost_mtx;
219 /* Used only for the priority-boosting */
220 /* side effect, not as a lock. */
221 unsigned long boost_time;
222 /* When to start boosting (jiffies). */
223 struct task_struct *boost_kthread_task;
224 /* kthread that takes care of priority */
225 /* boosting for this rcu_node structure. */
226 unsigned int boost_kthread_status;
227 /* State of boost_kthread_task for tracing. */
228 unsigned long n_tasks_boosted;
229 /* Total number of tasks boosted. */
230 unsigned long n_exp_boosts;
231 /* Number of tasks boosted for expedited GP. */
232 unsigned long n_normal_boosts;
233 /* Number of tasks boosted for normal GP. */
234 unsigned long n_balk_blkd_tasks;
235 /* Refused to boost: no blocked tasks. */
236 unsigned long n_balk_exp_gp_tasks;
237 /* Refused to boost: nothing blocking GP. */
238 unsigned long n_balk_boost_tasks;
239 /* Refused to boost: already boosting. */
240 unsigned long n_balk_notblocked;
241 /* Refused to boost: RCU RS CS still running. */
242 unsigned long n_balk_notyet;
243 /* Refused to boost: not yet time. */
244 unsigned long n_balk_nos;
245 /* Refused to boost: not sure why, though. */
246 /* This can happen due to race conditions. */
247#ifdef CONFIG_RCU_NOCB_CPU
248 struct swait_queue_head nocb_gp_wq[2];
249 /* Place for rcu_nocb_kthread() to wait GP. */
250#endif /* #ifdef CONFIG_RCU_NOCB_CPU */
251 int need_future_gp[2];
252 /* Counts of upcoming no-CB GP requests. */
253 raw_spinlock_t fqslock ____cacheline_internodealigned_in_smp;
254
255 struct mutex exp_funnel_mutex ____cacheline_internodealigned_in_smp;
256} ____cacheline_internodealigned_in_smp;
257
258/*
259 * Do a full breadth-first scan of the rcu_node structures for the
260 * specified rcu_state structure.
261 */
262#define rcu_for_each_node_breadth_first(rsp, rnp) \
263 for ((rnp) = &(rsp)->node[0]; \
264 (rnp) < &(rsp)->node[rcu_num_nodes]; (rnp)++)
265
266/*
267 * Do a breadth-first scan of the non-leaf rcu_node structures for the
268 * specified rcu_state structure. Note that if there is a singleton
269 * rcu_node tree with but one rcu_node structure, this loop is a no-op.
270 */
271#define rcu_for_each_nonleaf_node_breadth_first(rsp, rnp) \
272 for ((rnp) = &(rsp)->node[0]; \
273 (rnp) < (rsp)->level[rcu_num_lvls - 1]; (rnp)++)
274
275/*
276 * Scan the leaves of the rcu_node hierarchy for the specified rcu_state
277 * structure. Note that if there is a singleton rcu_node tree with but
278 * one rcu_node structure, this loop -will- visit the rcu_node structure.
279 * It is still a leaf node, even if it is also the root node.
280 */
281#define rcu_for_each_leaf_node(rsp, rnp) \
282 for ((rnp) = (rsp)->level[rcu_num_lvls - 1]; \
283 (rnp) < &(rsp)->node[rcu_num_nodes]; (rnp)++)
284
285/*
286 * Union to allow "aggregate OR" operation on the need for a quiescent
287 * state by the normal and expedited grace periods.
288 */
289union rcu_noqs {
290 struct {
291 u8 norm;
292 u8 exp;
293 } b; /* Bits. */
294 u16 s; /* Set of bits, aggregate OR here. */
295};
296
297/* Index values for nxttail array in struct rcu_data. */
298#define RCU_DONE_TAIL 0 /* Also RCU_WAIT head. */
299#define RCU_WAIT_TAIL 1 /* Also RCU_NEXT_READY head. */
300#define RCU_NEXT_READY_TAIL 2 /* Also RCU_NEXT head. */
301#define RCU_NEXT_TAIL 3
302#define RCU_NEXT_SIZE 4
303
304/* Per-CPU data for read-copy update. */
305struct rcu_data {
306 /* 1) quiescent-state and grace-period handling : */
307 unsigned long completed; /* Track rsp->completed gp number */
308 /* in order to detect GP end. */
309 unsigned long gpnum; /* Highest gp number that this CPU */
310 /* is aware of having started. */
311 unsigned long rcu_qs_ctr_snap;/* Snapshot of rcu_qs_ctr to check */
312 /* for rcu_all_qs() invocations. */
313 union rcu_noqs cpu_no_qs; /* No QSes yet for this CPU. */
314 bool core_needs_qs; /* Core waits for quiesc state. */
315 bool beenonline; /* CPU online at least once. */
316 bool gpwrap; /* Possible gpnum/completed wrap. */
317 struct rcu_node *mynode; /* This CPU's leaf of hierarchy */
318 unsigned long grpmask; /* Mask to apply to leaf qsmask. */
319 unsigned long ticks_this_gp; /* The number of scheduling-clock */
320 /* ticks this CPU has handled */
321 /* during and after the last grace */
322 /* period it is aware of. */
323
324 /* 2) batch handling */
325 /*
326 * If nxtlist is not NULL, it is partitioned as follows.
327 * Any of the partitions might be empty, in which case the
328 * pointer to that partition will be equal to the pointer for
329 * the following partition. When the list is empty, all of
330 * the nxttail elements point to the ->nxtlist pointer itself,
331 * which in that case is NULL.
332 *
333 * [nxtlist, *nxttail[RCU_DONE_TAIL]):
334 * Entries that batch # <= ->completed
335 * The grace period for these entries has completed, and
336 * the other grace-period-completed entries may be moved
337 * here temporarily in rcu_process_callbacks().
338 * [*nxttail[RCU_DONE_TAIL], *nxttail[RCU_WAIT_TAIL]):
339 * Entries that batch # <= ->completed - 1: waiting for current GP
340 * [*nxttail[RCU_WAIT_TAIL], *nxttail[RCU_NEXT_READY_TAIL]):
341 * Entries known to have arrived before current GP ended
342 * [*nxttail[RCU_NEXT_READY_TAIL], *nxttail[RCU_NEXT_TAIL]):
343 * Entries that might have arrived after current GP ended
344 * Note that the value of *nxttail[RCU_NEXT_TAIL] will
345 * always be NULL, as this is the end of the list.
346 */
347 struct rcu_head *nxtlist;
348 struct rcu_head **nxttail[RCU_NEXT_SIZE];
349 unsigned long nxtcompleted[RCU_NEXT_SIZE];
350 /* grace periods for sublists. */
351 long qlen_lazy; /* # of lazy queued callbacks */
352 long qlen; /* # of queued callbacks, incl lazy */
353 long qlen_last_fqs_check;
354 /* qlen at last check for QS forcing */
355 unsigned long n_cbs_invoked; /* count of RCU cbs invoked. */
356 unsigned long n_nocbs_invoked; /* count of no-CBs RCU cbs invoked. */
357 unsigned long n_cbs_orphaned; /* RCU cbs orphaned by dying CPU */
358 unsigned long n_cbs_adopted; /* RCU cbs adopted from dying CPU */
359 unsigned long n_force_qs_snap;
360 /* did other CPU force QS recently? */
361 long blimit; /* Upper limit on a processed batch */
362
363 /* 3) dynticks interface. */
364 struct rcu_dynticks *dynticks; /* Shared per-CPU dynticks state. */
365 int dynticks_snap; /* Per-GP tracking for dynticks. */
366
367 /* 4) reasons this CPU needed to be kicked by force_quiescent_state */
368 unsigned long dynticks_fqs; /* Kicked due to dynticks idle. */
369 unsigned long offline_fqs; /* Kicked due to being offline. */
370 unsigned long cond_resched_completed;
371 /* Grace period that needs help */
372 /* from cond_resched(). */
373
374 /* 5) __rcu_pending() statistics. */
375 unsigned long n_rcu_pending; /* rcu_pending() calls since boot. */
376 unsigned long n_rp_core_needs_qs;
377 unsigned long n_rp_report_qs;
378 unsigned long n_rp_cb_ready;
379 unsigned long n_rp_cpu_needs_gp;
380 unsigned long n_rp_gp_completed;
381 unsigned long n_rp_gp_started;
382 unsigned long n_rp_nocb_defer_wakeup;
383 unsigned long n_rp_need_nothing;
384
385 /* 6) _rcu_barrier(), OOM callbacks, and expediting. */
386 struct rcu_head barrier_head;
387#ifdef CONFIG_RCU_FAST_NO_HZ
388 struct rcu_head oom_head;
389#endif /* #ifdef CONFIG_RCU_FAST_NO_HZ */
390 struct mutex exp_funnel_mutex;
391 atomic_long_t expedited_workdone0; /* # done by others #0. */
392 atomic_long_t expedited_workdone1; /* # done by others #1. */
393 atomic_long_t expedited_workdone2; /* # done by others #2. */
394 atomic_long_t expedited_workdone3; /* # done by others #3. */
395
396 /* 7) Callback offloading. */
397#ifdef CONFIG_RCU_NOCB_CPU
398 struct rcu_head *nocb_head; /* CBs waiting for kthread. */
399 struct rcu_head **nocb_tail;
400 atomic_long_t nocb_q_count; /* # CBs waiting for nocb */
401 atomic_long_t nocb_q_count_lazy; /* invocation (all stages). */
402 struct rcu_head *nocb_follower_head; /* CBs ready to invoke. */
403 struct rcu_head **nocb_follower_tail;
404 struct swait_queue_head nocb_wq; /* For nocb kthreads to sleep on. */
405 struct task_struct *nocb_kthread;
406 int nocb_defer_wakeup; /* Defer wakeup of nocb_kthread. */
407
408 /* The following fields are used by the leader, hence own cacheline. */
409 struct rcu_head *nocb_gp_head ____cacheline_internodealigned_in_smp;
410 /* CBs waiting for GP. */
411 struct rcu_head **nocb_gp_tail;
412 bool nocb_leader_sleep; /* Is the nocb leader thread asleep? */
413 struct rcu_data *nocb_next_follower;
414 /* Next follower in wakeup chain. */
415
416 /* The following fields are used by the follower, hence new cachline. */
417 struct rcu_data *nocb_leader ____cacheline_internodealigned_in_smp;
418 /* Leader CPU takes GP-end wakeups. */
419#endif /* #ifdef CONFIG_RCU_NOCB_CPU */
420
421 /* 8) RCU CPU stall data. */
422 unsigned int softirq_snap; /* Snapshot of softirq activity. */
423
424 int cpu;
425 struct rcu_state *rsp;
426};
427
428/* Values for nocb_defer_wakeup field in struct rcu_data. */
429#define RCU_NOGP_WAKE_NOT 0
430#define RCU_NOGP_WAKE 1
431#define RCU_NOGP_WAKE_FORCE 2
432
433#define RCU_JIFFIES_TILL_FORCE_QS (1 + (HZ > 250) + (HZ > 500))
434 /* For jiffies_till_first_fqs and */
435 /* and jiffies_till_next_fqs. */
436
437#define RCU_JIFFIES_FQS_DIV 256 /* Very large systems need more */
438 /* delay between bouts of */
439 /* quiescent-state forcing. */
440
441#define RCU_STALL_RAT_DELAY 2 /* Allow other CPUs time to take */
442 /* at least one scheduling clock */
443 /* irq before ratting on them. */
444
445#define rcu_wait(cond) \
446do { \
447 for (;;) { \
448 set_current_state(TASK_INTERRUPTIBLE); \
449 if (cond) \
450 break; \
451 schedule(); \
452 } \
453 __set_current_state(TASK_RUNNING); \
454} while (0)
455
456/*
457 * RCU global state, including node hierarchy. This hierarchy is
458 * represented in "heap" form in a dense array. The root (first level)
459 * of the hierarchy is in ->node[0] (referenced by ->level[0]), the second
460 * level in ->node[1] through ->node[m] (->node[1] referenced by ->level[1]),
461 * and the third level in ->node[m+1] and following (->node[m+1] referenced
462 * by ->level[2]). The number of levels is determined by the number of
463 * CPUs and by CONFIG_RCU_FANOUT. Small systems will have a "hierarchy"
464 * consisting of a single rcu_node.
465 */
466struct rcu_state {
467 struct rcu_node node[NUM_RCU_NODES]; /* Hierarchy. */
468 struct rcu_node *level[RCU_NUM_LVLS + 1];
469 /* Hierarchy levels (+1 to */
470 /* shut bogus gcc warning) */
471 u8 flavor_mask; /* bit in flavor mask. */
472 struct rcu_data __percpu *rda; /* pointer of percu rcu_data. */
473 call_rcu_func_t call; /* call_rcu() flavor. */
474 int ncpus; /* # CPUs seen so far. */
475
476 /* The following fields are guarded by the root rcu_node's lock. */
477
478 u8 boost ____cacheline_internodealigned_in_smp;
479 /* Subject to priority boost. */
480 unsigned long gpnum; /* Current gp number. */
481 unsigned long completed; /* # of last completed gp. */
482 struct task_struct *gp_kthread; /* Task for grace periods. */
483 struct swait_queue_head gp_wq; /* Where GP task waits. */
484 short gp_flags; /* Commands for GP task. */
485 short gp_state; /* GP kthread sleep state. */
486
487 /* End of fields guarded by root rcu_node's lock. */
488
489 raw_spinlock_t orphan_lock ____cacheline_internodealigned_in_smp;
490 /* Protect following fields. */
491 struct rcu_head *orphan_nxtlist; /* Orphaned callbacks that */
492 /* need a grace period. */
493 struct rcu_head **orphan_nxttail; /* Tail of above. */
494 struct rcu_head *orphan_donelist; /* Orphaned callbacks that */
495 /* are ready to invoke. */
496 struct rcu_head **orphan_donetail; /* Tail of above. */
497 long qlen_lazy; /* Number of lazy callbacks. */
498 long qlen; /* Total number of callbacks. */
499 /* End of fields guarded by orphan_lock. */
500
501 struct mutex barrier_mutex; /* Guards barrier fields. */
502 atomic_t barrier_cpu_count; /* # CPUs waiting on. */
503 struct completion barrier_completion; /* Wake at barrier end. */
504 unsigned long barrier_sequence; /* ++ at start and end of */
505 /* _rcu_barrier(). */
506 /* End of fields guarded by barrier_mutex. */
507
508 unsigned long expedited_sequence; /* Take a ticket. */
509 atomic_long_t expedited_normal; /* # fallbacks to normal. */
510 atomic_t expedited_need_qs; /* # CPUs left to check in. */
511 struct swait_queue_head expedited_wq; /* Wait for check-ins. */
512 int ncpus_snap; /* # CPUs seen last time. */
513
514 unsigned long jiffies_force_qs; /* Time at which to invoke */
515 /* force_quiescent_state(). */
516 unsigned long n_force_qs; /* Number of calls to */
517 /* force_quiescent_state(). */
518 unsigned long n_force_qs_lh; /* ~Number of calls leaving */
519 /* due to lock unavailable. */
520 unsigned long n_force_qs_ngp; /* Number of calls leaving */
521 /* due to no GP active. */
522 unsigned long gp_start; /* Time at which GP started, */
523 /* but in jiffies. */
524 unsigned long gp_activity; /* Time of last GP kthread */
525 /* activity in jiffies. */
526 unsigned long jiffies_stall; /* Time at which to check */
527 /* for CPU stalls. */
528 unsigned long jiffies_resched; /* Time at which to resched */
529 /* a reluctant CPU. */
530 unsigned long n_force_qs_gpstart; /* Snapshot of n_force_qs at */
531 /* GP start. */
532 unsigned long gp_max; /* Maximum GP duration in */
533 /* jiffies. */
534 const char *name; /* Name of structure. */
535 char abbr; /* Abbreviated name. */
536 struct list_head flavors; /* List of RCU flavors. */
537};
538
539/* Values for rcu_state structure's gp_flags field. */
540#define RCU_GP_FLAG_INIT 0x1 /* Need grace-period initialization. */
541#define RCU_GP_FLAG_FQS 0x2 /* Need grace-period quiescent-state forcing. */
542
543/* Values for rcu_state structure's gp_state field. */
544#define RCU_GP_IDLE 0 /* Initial state and no GP in progress. */
545#define RCU_GP_WAIT_GPS 1 /* Wait for grace-period start. */
546#define RCU_GP_DONE_GPS 2 /* Wait done for grace-period start. */
547#define RCU_GP_WAIT_FQS 3 /* Wait for force-quiescent-state time. */
548#define RCU_GP_DOING_FQS 4 /* Wait done for force-quiescent-state time. */
549#define RCU_GP_CLEANUP 5 /* Grace-period cleanup started. */
550#define RCU_GP_CLEANED 6 /* Grace-period cleanup complete. */
551
552#ifndef RCU_TREE_NONCORE
553static const char * const gp_state_names[] = {
554 "RCU_GP_IDLE",
555 "RCU_GP_WAIT_GPS",
556 "RCU_GP_DONE_GPS",
557 "RCU_GP_WAIT_FQS",
558 "RCU_GP_DOING_FQS",
559 "RCU_GP_CLEANUP",
560 "RCU_GP_CLEANED",
561};
562#endif /* #ifndef RCU_TREE_NONCORE */
563
564extern struct list_head rcu_struct_flavors;
565
566/* Sequence through rcu_state structures for each RCU flavor. */
567#define for_each_rcu_flavor(rsp) \
568 list_for_each_entry((rsp), &rcu_struct_flavors, flavors)
569
570/*
571 * RCU implementation internal declarations:
572 */
573extern struct rcu_state rcu_sched_state;
574
575extern struct rcu_state rcu_bh_state;
576
577#ifdef CONFIG_PREEMPT_RCU
578extern struct rcu_state rcu_preempt_state;
579#endif /* #ifdef CONFIG_PREEMPT_RCU */
580
581#ifdef CONFIG_RCU_BOOST
582DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_status);
583DECLARE_PER_CPU(int, rcu_cpu_kthread_cpu);
584DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_loops);
585DECLARE_PER_CPU(char, rcu_cpu_has_work);
586#endif /* #ifdef CONFIG_RCU_BOOST */
587
588#ifndef RCU_TREE_NONCORE
589
590/* Forward declarations for rcutree_plugin.h */
591static void rcu_bootup_announce(void);
592static void rcu_preempt_note_context_switch(void);
593static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp);
594#ifdef CONFIG_HOTPLUG_CPU
595static bool rcu_preempt_has_tasks(struct rcu_node *rnp);
596#endif /* #ifdef CONFIG_HOTPLUG_CPU */
597static void rcu_print_detail_task_stall(struct rcu_state *rsp);
598static int rcu_print_task_stall(struct rcu_node *rnp);
599static int rcu_print_task_exp_stall(struct rcu_node *rnp);
600static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp);
601static void rcu_preempt_check_callbacks(void);
602void call_rcu(struct rcu_head *head, rcu_callback_t func);
603static void __init __rcu_init_preempt(void);
604static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags);
605static void rcu_preempt_boost_start_gp(struct rcu_node *rnp);
606static void invoke_rcu_callbacks_kthread(void);
607static bool rcu_is_callbacks_kthread(void);
608#ifdef CONFIG_RCU_BOOST
609static void rcu_preempt_do_callbacks(void);
610static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
611 struct rcu_node *rnp);
612#endif /* #ifdef CONFIG_RCU_BOOST */
613static void __init rcu_spawn_boost_kthreads(void);
614static void rcu_prepare_kthreads(int cpu);
615static void rcu_cleanup_after_idle(void);
616static void rcu_prepare_for_idle(void);
617static void rcu_idle_count_callbacks_posted(void);
618static bool rcu_preempt_has_tasks(struct rcu_node *rnp);
619static void print_cpu_stall_info_begin(void);
620static void print_cpu_stall_info(struct rcu_state *rsp, int cpu);
621static void print_cpu_stall_info_end(void);
622static void zero_cpu_stall_ticks(struct rcu_data *rdp);
623static void increment_cpu_stall_ticks(void);
624static bool rcu_nocb_cpu_needs_barrier(struct rcu_state *rsp, int cpu);
625static void rcu_nocb_gp_set(struct rcu_node *rnp, int nrq);
626static struct swait_queue_head *rcu_nocb_gp_get(struct rcu_node *rnp);
627static void rcu_nocb_gp_cleanup(struct swait_queue_head *sq);
628static void rcu_init_one_nocb(struct rcu_node *rnp);
629static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp,
630 bool lazy, unsigned long flags);
631static bool rcu_nocb_adopt_orphan_cbs(struct rcu_state *rsp,
632 struct rcu_data *rdp,
633 unsigned long flags);
634static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp);
635static void do_nocb_deferred_wakeup(struct rcu_data *rdp);
636static void rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp);
637static void rcu_spawn_all_nocb_kthreads(int cpu);
638static void __init rcu_spawn_nocb_kthreads(void);
639#ifdef CONFIG_RCU_NOCB_CPU
640static void __init rcu_organize_nocb_kthreads(struct rcu_state *rsp);
641#endif /* #ifdef CONFIG_RCU_NOCB_CPU */
642static void __maybe_unused rcu_kick_nohz_cpu(int cpu);
643static bool init_nocb_callback_list(struct rcu_data *rdp);
644static void rcu_sysidle_enter(int irq);
645static void rcu_sysidle_exit(int irq);
646static void rcu_sysidle_check_cpu(struct rcu_data *rdp, bool *isidle,
647 unsigned long *maxj);
648static bool is_sysidle_rcu_state(struct rcu_state *rsp);
649static void rcu_sysidle_report_gp(struct rcu_state *rsp, int isidle,
650 unsigned long maxj);
651static void rcu_bind_gp_kthread(void);
652static void rcu_sysidle_init_percpu_data(struct rcu_dynticks *rdtp);
653static bool rcu_nohz_full_cpu(struct rcu_state *rsp);
654static void rcu_dynticks_task_enter(void);
655static void rcu_dynticks_task_exit(void);
656
657#endif /* #ifndef RCU_TREE_NONCORE */
658
659#ifdef CONFIG_RCU_TRACE
660/* Read out queue lengths for tracing. */
661static inline void rcu_nocb_q_lengths(struct rcu_data *rdp, long *ql, long *qll)
662{
663#ifdef CONFIG_RCU_NOCB_CPU
664 *ql = atomic_long_read(&rdp->nocb_q_count);
665 *qll = atomic_long_read(&rdp->nocb_q_count_lazy);
666#else /* #ifdef CONFIG_RCU_NOCB_CPU */
667 *ql = 0;
668 *qll = 0;
669#endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */
670}
671#endif /* #ifdef CONFIG_RCU_TRACE */
672
673/*
674 * Place this after a lock-acquisition primitive to guarantee that
675 * an UNLOCK+LOCK pair act as a full barrier. This guarantee applies
676 * if the UNLOCK and LOCK are executed by the same CPU or if the
677 * UNLOCK and LOCK operate on the same lock variable.
678 */
679#ifdef CONFIG_PPC
680#define smp_mb__after_unlock_lock() smp_mb() /* Full ordering for lock. */
681#else /* #ifdef CONFIG_PPC */
682#define smp_mb__after_unlock_lock() do { } while (0)
683#endif /* #else #ifdef CONFIG_PPC */
684
685/*
686 * Wrappers for the rcu_node::lock acquire and release.
687 *
688 * Because the rcu_nodes form a tree, the tree traversal locking will observe
689 * different lock values, this in turn means that an UNLOCK of one level
690 * followed by a LOCK of another level does not imply a full memory barrier;
691 * and most importantly transitivity is lost.
692 *
693 * In order to restore full ordering between tree levels, augment the regular
694 * lock acquire functions with smp_mb__after_unlock_lock().
695 *
696 * As ->lock of struct rcu_node is a __private field, therefore one should use
697 * these wrappers rather than directly call raw_spin_{lock,unlock}* on ->lock.
698 */
699static inline void raw_spin_lock_rcu_node(struct rcu_node *rnp)
700{
701 raw_spin_lock(&ACCESS_PRIVATE(rnp, lock));
702 smp_mb__after_unlock_lock();
703}
704
705static inline void raw_spin_unlock_rcu_node(struct rcu_node *rnp)
706{
707 raw_spin_unlock(&ACCESS_PRIVATE(rnp, lock));
708}
709
710static inline void raw_spin_lock_irq_rcu_node(struct rcu_node *rnp)
711{
712 raw_spin_lock_irq(&ACCESS_PRIVATE(rnp, lock));
713 smp_mb__after_unlock_lock();
714}
715
716static inline void raw_spin_unlock_irq_rcu_node(struct rcu_node *rnp)
717{
718 raw_spin_unlock_irq(&ACCESS_PRIVATE(rnp, lock));
719}
720
721#define raw_spin_lock_irqsave_rcu_node(rnp, flags) \
722do { \
723 typecheck(unsigned long, flags); \
724 raw_spin_lock_irqsave(&ACCESS_PRIVATE(rnp, lock), flags); \
725 smp_mb__after_unlock_lock(); \
726} while (0)
727
728#define raw_spin_unlock_irqrestore_rcu_node(rnp, flags) \
729do { \
730 typecheck(unsigned long, flags); \
731 raw_spin_unlock_irqrestore(&ACCESS_PRIVATE(rnp, lock), flags); \
732} while (0)
733
734static inline bool raw_spin_trylock_rcu_node(struct rcu_node *rnp)
735{
736 bool locked = raw_spin_trylock(&ACCESS_PRIVATE(rnp, lock));
737
738 if (locked)
739 smp_mb__after_unlock_lock();
740 return locked;
741}
1/*
2 * Read-Copy Update mechanism for mutual exclusion (tree-based version)
3 * Internal non-public definitions.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, you can access it online at
17 * http://www.gnu.org/licenses/gpl-2.0.html.
18 *
19 * Copyright IBM Corporation, 2008
20 *
21 * Author: Ingo Molnar <mingo@elte.hu>
22 * Paul E. McKenney <paulmck@linux.vnet.ibm.com>
23 */
24
25#include <linux/cache.h>
26#include <linux/spinlock.h>
27#include <linux/threads.h>
28#include <linux/cpumask.h>
29#include <linux/seqlock.h>
30#include <linux/irq_work.h>
31
32/*
33 * Define shape of hierarchy based on NR_CPUS, CONFIG_RCU_FANOUT, and
34 * CONFIG_RCU_FANOUT_LEAF.
35 * In theory, it should be possible to add more levels straightforwardly.
36 * In practice, this did work well going from three levels to four.
37 * Of course, your mileage may vary.
38 */
39#define MAX_RCU_LVLS 4
40#define RCU_FANOUT_1 (CONFIG_RCU_FANOUT_LEAF)
41#define RCU_FANOUT_2 (RCU_FANOUT_1 * CONFIG_RCU_FANOUT)
42#define RCU_FANOUT_3 (RCU_FANOUT_2 * CONFIG_RCU_FANOUT)
43#define RCU_FANOUT_4 (RCU_FANOUT_3 * CONFIG_RCU_FANOUT)
44
45#if NR_CPUS <= RCU_FANOUT_1
46# define RCU_NUM_LVLS 1
47# define NUM_RCU_LVL_0 1
48# define NUM_RCU_LVL_1 (NR_CPUS)
49# define NUM_RCU_LVL_2 0
50# define NUM_RCU_LVL_3 0
51# define NUM_RCU_LVL_4 0
52#elif NR_CPUS <= RCU_FANOUT_2
53# define RCU_NUM_LVLS 2
54# define NUM_RCU_LVL_0 1
55# define NUM_RCU_LVL_1 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_1)
56# define NUM_RCU_LVL_2 (NR_CPUS)
57# define NUM_RCU_LVL_3 0
58# define NUM_RCU_LVL_4 0
59#elif NR_CPUS <= RCU_FANOUT_3
60# define RCU_NUM_LVLS 3
61# define NUM_RCU_LVL_0 1
62# define NUM_RCU_LVL_1 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_2)
63# define NUM_RCU_LVL_2 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_1)
64# define NUM_RCU_LVL_3 (NR_CPUS)
65# define NUM_RCU_LVL_4 0
66#elif NR_CPUS <= RCU_FANOUT_4
67# define RCU_NUM_LVLS 4
68# define NUM_RCU_LVL_0 1
69# define NUM_RCU_LVL_1 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_3)
70# define NUM_RCU_LVL_2 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_2)
71# define NUM_RCU_LVL_3 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_1)
72# define NUM_RCU_LVL_4 (NR_CPUS)
73#else
74# error "CONFIG_RCU_FANOUT insufficient for NR_CPUS"
75#endif /* #if (NR_CPUS) <= RCU_FANOUT_1 */
76
77#define RCU_SUM (NUM_RCU_LVL_0 + NUM_RCU_LVL_1 + NUM_RCU_LVL_2 + NUM_RCU_LVL_3 + NUM_RCU_LVL_4)
78#define NUM_RCU_NODES (RCU_SUM - NR_CPUS)
79
80extern int rcu_num_lvls;
81extern int rcu_num_nodes;
82
83/*
84 * Dynticks per-CPU state.
85 */
86struct rcu_dynticks {
87 long long dynticks_nesting; /* Track irq/process nesting level. */
88 /* Process level is worth LLONG_MAX/2. */
89 int dynticks_nmi_nesting; /* Track NMI nesting level. */
90 atomic_t dynticks; /* Even value for idle, else odd. */
91#ifdef CONFIG_NO_HZ_FULL_SYSIDLE
92 long long dynticks_idle_nesting;
93 /* irq/process nesting level from idle. */
94 atomic_t dynticks_idle; /* Even value for idle, else odd. */
95 /* "Idle" excludes userspace execution. */
96 unsigned long dynticks_idle_jiffies;
97 /* End of last non-NMI non-idle period. */
98#endif /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
99#ifdef CONFIG_RCU_FAST_NO_HZ
100 bool all_lazy; /* Are all CPU's CBs lazy? */
101 unsigned long nonlazy_posted;
102 /* # times non-lazy CBs posted to CPU. */
103 unsigned long nonlazy_posted_snap;
104 /* idle-period nonlazy_posted snapshot. */
105 unsigned long last_accelerate;
106 /* Last jiffy CBs were accelerated. */
107 unsigned long last_advance_all;
108 /* Last jiffy CBs were all advanced. */
109 int tick_nohz_enabled_snap; /* Previously seen value from sysfs. */
110#endif /* #ifdef CONFIG_RCU_FAST_NO_HZ */
111};
112
113/* RCU's kthread states for tracing. */
114#define RCU_KTHREAD_STOPPED 0
115#define RCU_KTHREAD_RUNNING 1
116#define RCU_KTHREAD_WAITING 2
117#define RCU_KTHREAD_OFFCPU 3
118#define RCU_KTHREAD_YIELDING 4
119#define RCU_KTHREAD_MAX 4
120
121/*
122 * Definition for node within the RCU grace-period-detection hierarchy.
123 */
124struct rcu_node {
125 raw_spinlock_t lock; /* Root rcu_node's lock protects some */
126 /* rcu_state fields as well as following. */
127 unsigned long gpnum; /* Current grace period for this node. */
128 /* This will either be equal to or one */
129 /* behind the root rcu_node's gpnum. */
130 unsigned long completed; /* Last GP completed for this node. */
131 /* This will either be equal to or one */
132 /* behind the root rcu_node's gpnum. */
133 unsigned long qsmask; /* CPUs or groups that need to switch in */
134 /* order for current grace period to proceed.*/
135 /* In leaf rcu_node, each bit corresponds to */
136 /* an rcu_data structure, otherwise, each */
137 /* bit corresponds to a child rcu_node */
138 /* structure. */
139 unsigned long expmask; /* Groups that have ->blkd_tasks */
140 /* elements that need to drain to allow the */
141 /* current expedited grace period to */
142 /* complete (only for TREE_PREEMPT_RCU). */
143 unsigned long qsmaskinit;
144 /* Per-GP initial value for qsmask & expmask. */
145 unsigned long grpmask; /* Mask to apply to parent qsmask. */
146 /* Only one bit will be set in this mask. */
147 int grplo; /* lowest-numbered CPU or group here. */
148 int grphi; /* highest-numbered CPU or group here. */
149 u8 grpnum; /* CPU/group number for next level up. */
150 u8 level; /* root is at level 0. */
151 struct rcu_node *parent;
152 struct list_head blkd_tasks;
153 /* Tasks blocked in RCU read-side critical */
154 /* section. Tasks are placed at the head */
155 /* of this list and age towards the tail. */
156 struct list_head *gp_tasks;
157 /* Pointer to the first task blocking the */
158 /* current grace period, or NULL if there */
159 /* is no such task. */
160 struct list_head *exp_tasks;
161 /* Pointer to the first task blocking the */
162 /* current expedited grace period, or NULL */
163 /* if there is no such task. If there */
164 /* is no current expedited grace period, */
165 /* then there can cannot be any such task. */
166#ifdef CONFIG_RCU_BOOST
167 struct list_head *boost_tasks;
168 /* Pointer to first task that needs to be */
169 /* priority boosted, or NULL if no priority */
170 /* boosting is needed for this rcu_node */
171 /* structure. If there are no tasks */
172 /* queued on this rcu_node structure that */
173 /* are blocking the current grace period, */
174 /* there can be no such task. */
175 unsigned long boost_time;
176 /* When to start boosting (jiffies). */
177 struct task_struct *boost_kthread_task;
178 /* kthread that takes care of priority */
179 /* boosting for this rcu_node structure. */
180 unsigned int boost_kthread_status;
181 /* State of boost_kthread_task for tracing. */
182 unsigned long n_tasks_boosted;
183 /* Total number of tasks boosted. */
184 unsigned long n_exp_boosts;
185 /* Number of tasks boosted for expedited GP. */
186 unsigned long n_normal_boosts;
187 /* Number of tasks boosted for normal GP. */
188 unsigned long n_balk_blkd_tasks;
189 /* Refused to boost: no blocked tasks. */
190 unsigned long n_balk_exp_gp_tasks;
191 /* Refused to boost: nothing blocking GP. */
192 unsigned long n_balk_boost_tasks;
193 /* Refused to boost: already boosting. */
194 unsigned long n_balk_notblocked;
195 /* Refused to boost: RCU RS CS still running. */
196 unsigned long n_balk_notyet;
197 /* Refused to boost: not yet time. */
198 unsigned long n_balk_nos;
199 /* Refused to boost: not sure why, though. */
200 /* This can happen due to race conditions. */
201#endif /* #ifdef CONFIG_RCU_BOOST */
202#ifdef CONFIG_RCU_NOCB_CPU
203 wait_queue_head_t nocb_gp_wq[2];
204 /* Place for rcu_nocb_kthread() to wait GP. */
205#endif /* #ifdef CONFIG_RCU_NOCB_CPU */
206 int need_future_gp[2];
207 /* Counts of upcoming no-CB GP requests. */
208 raw_spinlock_t fqslock ____cacheline_internodealigned_in_smp;
209} ____cacheline_internodealigned_in_smp;
210
211/*
212 * Do a full breadth-first scan of the rcu_node structures for the
213 * specified rcu_state structure.
214 */
215#define rcu_for_each_node_breadth_first(rsp, rnp) \
216 for ((rnp) = &(rsp)->node[0]; \
217 (rnp) < &(rsp)->node[rcu_num_nodes]; (rnp)++)
218
219/*
220 * Do a breadth-first scan of the non-leaf rcu_node structures for the
221 * specified rcu_state structure. Note that if there is a singleton
222 * rcu_node tree with but one rcu_node structure, this loop is a no-op.
223 */
224#define rcu_for_each_nonleaf_node_breadth_first(rsp, rnp) \
225 for ((rnp) = &(rsp)->node[0]; \
226 (rnp) < (rsp)->level[rcu_num_lvls - 1]; (rnp)++)
227
228/*
229 * Scan the leaves of the rcu_node hierarchy for the specified rcu_state
230 * structure. Note that if there is a singleton rcu_node tree with but
231 * one rcu_node structure, this loop -will- visit the rcu_node structure.
232 * It is still a leaf node, even if it is also the root node.
233 */
234#define rcu_for_each_leaf_node(rsp, rnp) \
235 for ((rnp) = (rsp)->level[rcu_num_lvls - 1]; \
236 (rnp) < &(rsp)->node[rcu_num_nodes]; (rnp)++)
237
238/* Index values for nxttail array in struct rcu_data. */
239#define RCU_DONE_TAIL 0 /* Also RCU_WAIT head. */
240#define RCU_WAIT_TAIL 1 /* Also RCU_NEXT_READY head. */
241#define RCU_NEXT_READY_TAIL 2 /* Also RCU_NEXT head. */
242#define RCU_NEXT_TAIL 3
243#define RCU_NEXT_SIZE 4
244
245/* Per-CPU data for read-copy update. */
246struct rcu_data {
247 /* 1) quiescent-state and grace-period handling : */
248 unsigned long completed; /* Track rsp->completed gp number */
249 /* in order to detect GP end. */
250 unsigned long gpnum; /* Highest gp number that this CPU */
251 /* is aware of having started. */
252 bool passed_quiesce; /* User-mode/idle loop etc. */
253 bool qs_pending; /* Core waits for quiesc state. */
254 bool beenonline; /* CPU online at least once. */
255 bool preemptible; /* Preemptible RCU? */
256 struct rcu_node *mynode; /* This CPU's leaf of hierarchy */
257 unsigned long grpmask; /* Mask to apply to leaf qsmask. */
258#ifdef CONFIG_RCU_CPU_STALL_INFO
259 unsigned long ticks_this_gp; /* The number of scheduling-clock */
260 /* ticks this CPU has handled */
261 /* during and after the last grace */
262 /* period it is aware of. */
263#endif /* #ifdef CONFIG_RCU_CPU_STALL_INFO */
264
265 /* 2) batch handling */
266 /*
267 * If nxtlist is not NULL, it is partitioned as follows.
268 * Any of the partitions might be empty, in which case the
269 * pointer to that partition will be equal to the pointer for
270 * the following partition. When the list is empty, all of
271 * the nxttail elements point to the ->nxtlist pointer itself,
272 * which in that case is NULL.
273 *
274 * [nxtlist, *nxttail[RCU_DONE_TAIL]):
275 * Entries that batch # <= ->completed
276 * The grace period for these entries has completed, and
277 * the other grace-period-completed entries may be moved
278 * here temporarily in rcu_process_callbacks().
279 * [*nxttail[RCU_DONE_TAIL], *nxttail[RCU_WAIT_TAIL]):
280 * Entries that batch # <= ->completed - 1: waiting for current GP
281 * [*nxttail[RCU_WAIT_TAIL], *nxttail[RCU_NEXT_READY_TAIL]):
282 * Entries known to have arrived before current GP ended
283 * [*nxttail[RCU_NEXT_READY_TAIL], *nxttail[RCU_NEXT_TAIL]):
284 * Entries that might have arrived after current GP ended
285 * Note that the value of *nxttail[RCU_NEXT_TAIL] will
286 * always be NULL, as this is the end of the list.
287 */
288 struct rcu_head *nxtlist;
289 struct rcu_head **nxttail[RCU_NEXT_SIZE];
290 unsigned long nxtcompleted[RCU_NEXT_SIZE];
291 /* grace periods for sublists. */
292 long qlen_lazy; /* # of lazy queued callbacks */
293 long qlen; /* # of queued callbacks, incl lazy */
294 long qlen_last_fqs_check;
295 /* qlen at last check for QS forcing */
296 unsigned long n_cbs_invoked; /* count of RCU cbs invoked. */
297 unsigned long n_nocbs_invoked; /* count of no-CBs RCU cbs invoked. */
298 unsigned long n_cbs_orphaned; /* RCU cbs orphaned by dying CPU */
299 unsigned long n_cbs_adopted; /* RCU cbs adopted from dying CPU */
300 unsigned long n_force_qs_snap;
301 /* did other CPU force QS recently? */
302 long blimit; /* Upper limit on a processed batch */
303
304 /* 3) dynticks interface. */
305 struct rcu_dynticks *dynticks; /* Shared per-CPU dynticks state. */
306 int dynticks_snap; /* Per-GP tracking for dynticks. */
307
308 /* 4) reasons this CPU needed to be kicked by force_quiescent_state */
309 unsigned long dynticks_fqs; /* Kicked due to dynticks idle. */
310 unsigned long offline_fqs; /* Kicked due to being offline. */
311
312 /* 5) __rcu_pending() statistics. */
313 unsigned long n_rcu_pending; /* rcu_pending() calls since boot. */
314 unsigned long n_rp_qs_pending;
315 unsigned long n_rp_report_qs;
316 unsigned long n_rp_cb_ready;
317 unsigned long n_rp_cpu_needs_gp;
318 unsigned long n_rp_gp_completed;
319 unsigned long n_rp_gp_started;
320 unsigned long n_rp_nocb_defer_wakeup;
321 unsigned long n_rp_need_nothing;
322
323 /* 6) _rcu_barrier() and OOM callbacks. */
324 struct rcu_head barrier_head;
325#ifdef CONFIG_RCU_FAST_NO_HZ
326 struct rcu_head oom_head;
327#endif /* #ifdef CONFIG_RCU_FAST_NO_HZ */
328
329 /* 7) Callback offloading. */
330#ifdef CONFIG_RCU_NOCB_CPU
331 struct rcu_head *nocb_head; /* CBs waiting for kthread. */
332 struct rcu_head **nocb_tail;
333 atomic_long_t nocb_q_count; /* # CBs waiting for kthread */
334 atomic_long_t nocb_q_count_lazy; /* (approximate). */
335 int nocb_p_count; /* # CBs being invoked by kthread */
336 int nocb_p_count_lazy; /* (approximate). */
337 wait_queue_head_t nocb_wq; /* For nocb kthreads to sleep on. */
338 struct task_struct *nocb_kthread;
339 bool nocb_defer_wakeup; /* Defer wakeup of nocb_kthread. */
340#endif /* #ifdef CONFIG_RCU_NOCB_CPU */
341
342 /* 8) RCU CPU stall data. */
343#ifdef CONFIG_RCU_CPU_STALL_INFO
344 unsigned int softirq_snap; /* Snapshot of softirq activity. */
345#endif /* #ifdef CONFIG_RCU_CPU_STALL_INFO */
346
347 int cpu;
348 struct rcu_state *rsp;
349};
350
351/* Values for fqs_state field in struct rcu_state. */
352#define RCU_GP_IDLE 0 /* No grace period in progress. */
353#define RCU_GP_INIT 1 /* Grace period being initialized. */
354#define RCU_SAVE_DYNTICK 2 /* Need to scan dyntick state. */
355#define RCU_FORCE_QS 3 /* Need to force quiescent state. */
356#define RCU_SIGNAL_INIT RCU_SAVE_DYNTICK
357
358#define RCU_JIFFIES_TILL_FORCE_QS (1 + (HZ > 250) + (HZ > 500))
359 /* For jiffies_till_first_fqs and */
360 /* and jiffies_till_next_fqs. */
361
362#define RCU_JIFFIES_FQS_DIV 256 /* Very large systems need more */
363 /* delay between bouts of */
364 /* quiescent-state forcing. */
365
366#define RCU_STALL_RAT_DELAY 2 /* Allow other CPUs time to take */
367 /* at least one scheduling clock */
368 /* irq before ratting on them. */
369
370#define rcu_wait(cond) \
371do { \
372 for (;;) { \
373 set_current_state(TASK_INTERRUPTIBLE); \
374 if (cond) \
375 break; \
376 schedule(); \
377 } \
378 __set_current_state(TASK_RUNNING); \
379} while (0)
380
381/*
382 * RCU global state, including node hierarchy. This hierarchy is
383 * represented in "heap" form in a dense array. The root (first level)
384 * of the hierarchy is in ->node[0] (referenced by ->level[0]), the second
385 * level in ->node[1] through ->node[m] (->node[1] referenced by ->level[1]),
386 * and the third level in ->node[m+1] and following (->node[m+1] referenced
387 * by ->level[2]). The number of levels is determined by the number of
388 * CPUs and by CONFIG_RCU_FANOUT. Small systems will have a "hierarchy"
389 * consisting of a single rcu_node.
390 */
391struct rcu_state {
392 struct rcu_node node[NUM_RCU_NODES]; /* Hierarchy. */
393 struct rcu_node *level[RCU_NUM_LVLS]; /* Hierarchy levels. */
394 u32 levelcnt[MAX_RCU_LVLS + 1]; /* # nodes in each level. */
395 u8 levelspread[RCU_NUM_LVLS]; /* kids/node in each level. */
396 struct rcu_data __percpu *rda; /* pointer of percu rcu_data. */
397 void (*call)(struct rcu_head *head, /* call_rcu() flavor. */
398 void (*func)(struct rcu_head *head));
399
400 /* The following fields are guarded by the root rcu_node's lock. */
401
402 u8 fqs_state ____cacheline_internodealigned_in_smp;
403 /* Force QS state. */
404 u8 boost; /* Subject to priority boost. */
405 unsigned long gpnum; /* Current gp number. */
406 unsigned long completed; /* # of last completed gp. */
407 struct task_struct *gp_kthread; /* Task for grace periods. */
408 wait_queue_head_t gp_wq; /* Where GP task waits. */
409 int gp_flags; /* Commands for GP task. */
410
411 /* End of fields guarded by root rcu_node's lock. */
412
413 raw_spinlock_t orphan_lock ____cacheline_internodealigned_in_smp;
414 /* Protect following fields. */
415 struct rcu_head *orphan_nxtlist; /* Orphaned callbacks that */
416 /* need a grace period. */
417 struct rcu_head **orphan_nxttail; /* Tail of above. */
418 struct rcu_head *orphan_donelist; /* Orphaned callbacks that */
419 /* are ready to invoke. */
420 struct rcu_head **orphan_donetail; /* Tail of above. */
421 long qlen_lazy; /* Number of lazy callbacks. */
422 long qlen; /* Total number of callbacks. */
423 /* End of fields guarded by orphan_lock. */
424
425 struct mutex onoff_mutex; /* Coordinate hotplug & GPs. */
426
427 struct mutex barrier_mutex; /* Guards barrier fields. */
428 atomic_t barrier_cpu_count; /* # CPUs waiting on. */
429 struct completion barrier_completion; /* Wake at barrier end. */
430 unsigned long n_barrier_done; /* ++ at start and end of */
431 /* _rcu_barrier(). */
432 /* End of fields guarded by barrier_mutex. */
433
434 atomic_long_t expedited_start; /* Starting ticket. */
435 atomic_long_t expedited_done; /* Done ticket. */
436 atomic_long_t expedited_wrap; /* # near-wrap incidents. */
437 atomic_long_t expedited_tryfail; /* # acquisition failures. */
438 atomic_long_t expedited_workdone1; /* # done by others #1. */
439 atomic_long_t expedited_workdone2; /* # done by others #2. */
440 atomic_long_t expedited_normal; /* # fallbacks to normal. */
441 atomic_long_t expedited_stoppedcpus; /* # successful stop_cpus. */
442 atomic_long_t expedited_done_tries; /* # tries to update _done. */
443 atomic_long_t expedited_done_lost; /* # times beaten to _done. */
444 atomic_long_t expedited_done_exit; /* # times exited _done loop. */
445
446 unsigned long jiffies_force_qs; /* Time at which to invoke */
447 /* force_quiescent_state(). */
448 unsigned long n_force_qs; /* Number of calls to */
449 /* force_quiescent_state(). */
450 unsigned long n_force_qs_lh; /* ~Number of calls leaving */
451 /* due to lock unavailable. */
452 unsigned long n_force_qs_ngp; /* Number of calls leaving */
453 /* due to no GP active. */
454 unsigned long gp_start; /* Time at which GP started, */
455 /* but in jiffies. */
456 unsigned long jiffies_stall; /* Time at which to check */
457 /* for CPU stalls. */
458 unsigned long jiffies_resched; /* Time at which to resched */
459 /* a reluctant CPU. */
460 unsigned long gp_max; /* Maximum GP duration in */
461 /* jiffies. */
462 const char *name; /* Name of structure. */
463 char abbr; /* Abbreviated name. */
464 struct list_head flavors; /* List of RCU flavors. */
465 struct irq_work wakeup_work; /* Postponed wakeups */
466};
467
468/* Values for rcu_state structure's gp_flags field. */
469#define RCU_GP_FLAG_INIT 0x1 /* Need grace-period initialization. */
470#define RCU_GP_FLAG_FQS 0x2 /* Need grace-period quiescent-state forcing. */
471
472extern struct list_head rcu_struct_flavors;
473
474/* Sequence through rcu_state structures for each RCU flavor. */
475#define for_each_rcu_flavor(rsp) \
476 list_for_each_entry((rsp), &rcu_struct_flavors, flavors)
477
478/* Return values for rcu_preempt_offline_tasks(). */
479
480#define RCU_OFL_TASKS_NORM_GP 0x1 /* Tasks blocking normal */
481 /* GP were moved to root. */
482#define RCU_OFL_TASKS_EXP_GP 0x2 /* Tasks blocking expedited */
483 /* GP were moved to root. */
484
485/*
486 * RCU implementation internal declarations:
487 */
488extern struct rcu_state rcu_sched_state;
489DECLARE_PER_CPU(struct rcu_data, rcu_sched_data);
490
491extern struct rcu_state rcu_bh_state;
492DECLARE_PER_CPU(struct rcu_data, rcu_bh_data);
493
494#ifdef CONFIG_TREE_PREEMPT_RCU
495extern struct rcu_state rcu_preempt_state;
496DECLARE_PER_CPU(struct rcu_data, rcu_preempt_data);
497#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
498
499#ifdef CONFIG_RCU_BOOST
500DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_status);
501DECLARE_PER_CPU(int, rcu_cpu_kthread_cpu);
502DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_loops);
503DECLARE_PER_CPU(char, rcu_cpu_has_work);
504#endif /* #ifdef CONFIG_RCU_BOOST */
505
506#ifndef RCU_TREE_NONCORE
507
508/* Forward declarations for rcutree_plugin.h */
509static void rcu_bootup_announce(void);
510long rcu_batches_completed(void);
511static void rcu_preempt_note_context_switch(int cpu);
512static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp);
513#ifdef CONFIG_HOTPLUG_CPU
514static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp,
515 unsigned long flags);
516#endif /* #ifdef CONFIG_HOTPLUG_CPU */
517static void rcu_print_detail_task_stall(struct rcu_state *rsp);
518static int rcu_print_task_stall(struct rcu_node *rnp);
519static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp);
520#ifdef CONFIG_HOTPLUG_CPU
521static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
522 struct rcu_node *rnp,
523 struct rcu_data *rdp);
524#endif /* #ifdef CONFIG_HOTPLUG_CPU */
525static void rcu_preempt_check_callbacks(int cpu);
526void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu));
527#if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_TREE_PREEMPT_RCU)
528static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
529 bool wake);
530#endif /* #if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_TREE_PREEMPT_RCU) */
531static void __init __rcu_init_preempt(void);
532static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags);
533static void rcu_preempt_boost_start_gp(struct rcu_node *rnp);
534static void invoke_rcu_callbacks_kthread(void);
535static bool rcu_is_callbacks_kthread(void);
536#ifdef CONFIG_RCU_BOOST
537static void rcu_preempt_do_callbacks(void);
538static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
539 struct rcu_node *rnp);
540#endif /* #ifdef CONFIG_RCU_BOOST */
541static void rcu_prepare_kthreads(int cpu);
542static void rcu_cleanup_after_idle(int cpu);
543static void rcu_prepare_for_idle(int cpu);
544static void rcu_idle_count_callbacks_posted(void);
545static void print_cpu_stall_info_begin(void);
546static void print_cpu_stall_info(struct rcu_state *rsp, int cpu);
547static void print_cpu_stall_info_end(void);
548static void zero_cpu_stall_ticks(struct rcu_data *rdp);
549static void increment_cpu_stall_ticks(void);
550static int rcu_nocb_needs_gp(struct rcu_state *rsp);
551static void rcu_nocb_gp_set(struct rcu_node *rnp, int nrq);
552static void rcu_nocb_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp);
553static void rcu_init_one_nocb(struct rcu_node *rnp);
554static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp,
555 bool lazy, unsigned long flags);
556static bool rcu_nocb_adopt_orphan_cbs(struct rcu_state *rsp,
557 struct rcu_data *rdp,
558 unsigned long flags);
559static bool rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp);
560static void do_nocb_deferred_wakeup(struct rcu_data *rdp);
561static void rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp);
562static void rcu_spawn_nocb_kthreads(struct rcu_state *rsp);
563static void rcu_kick_nohz_cpu(int cpu);
564static bool init_nocb_callback_list(struct rcu_data *rdp);
565static void rcu_sysidle_enter(struct rcu_dynticks *rdtp, int irq);
566static void rcu_sysidle_exit(struct rcu_dynticks *rdtp, int irq);
567static void rcu_sysidle_check_cpu(struct rcu_data *rdp, bool *isidle,
568 unsigned long *maxj);
569static bool is_sysidle_rcu_state(struct rcu_state *rsp);
570static void rcu_sysidle_report_gp(struct rcu_state *rsp, int isidle,
571 unsigned long maxj);
572static void rcu_bind_gp_kthread(void);
573static void rcu_sysidle_init_percpu_data(struct rcu_dynticks *rdtp);
574static bool rcu_nohz_full_cpu(struct rcu_state *rsp);
575
576#endif /* #ifndef RCU_TREE_NONCORE */
577
578#ifdef CONFIG_RCU_TRACE
579#ifdef CONFIG_RCU_NOCB_CPU
580/* Sum up queue lengths for tracing. */
581static inline void rcu_nocb_q_lengths(struct rcu_data *rdp, long *ql, long *qll)
582{
583 *ql = atomic_long_read(&rdp->nocb_q_count) + rdp->nocb_p_count;
584 *qll = atomic_long_read(&rdp->nocb_q_count_lazy) + rdp->nocb_p_count_lazy;
585}
586#else /* #ifdef CONFIG_RCU_NOCB_CPU */
587static inline void rcu_nocb_q_lengths(struct rcu_data *rdp, long *ql, long *qll)
588{
589 *ql = 0;
590 *qll = 0;
591}
592#endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */
593#endif /* #ifdef CONFIG_RCU_TRACE */