Linux Audio

Check our new training course

Loading...
v4.6
  1/*
  2 * Read-Copy Update mechanism for mutual exclusion (tree-based version)
  3 * Internal non-public definitions.
  4 *
  5 * This program is free software; you can redistribute it and/or modify
  6 * it under the terms of the GNU General Public License as published by
  7 * the Free Software Foundation; either version 2 of the License, or
  8 * (at your option) any later version.
  9 *
 10 * This program is distributed in the hope that it will be useful,
 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 13 * GNU General Public License for more details.
 14 *
 15 * You should have received a copy of the GNU General Public License
 16 * along with this program; if not, you can access it online at
 17 * http://www.gnu.org/licenses/gpl-2.0.html.
 18 *
 19 * Copyright IBM Corporation, 2008
 20 *
 21 * Author: Ingo Molnar <mingo@elte.hu>
 22 *	   Paul E. McKenney <paulmck@linux.vnet.ibm.com>
 23 */
 24
 25#include <linux/cache.h>
 26#include <linux/spinlock.h>
 27#include <linux/threads.h>
 28#include <linux/cpumask.h>
 29#include <linux/seqlock.h>
 30#include <linux/swait.h>
 31#include <linux/stop_machine.h>
 32
 33/*
 34 * Define shape of hierarchy based on NR_CPUS, CONFIG_RCU_FANOUT, and
 35 * CONFIG_RCU_FANOUT_LEAF.
 36 * In theory, it should be possible to add more levels straightforwardly.
 37 * In practice, this did work well going from three levels to four.
 38 * Of course, your mileage may vary.
 39 */
 40
 41#ifdef CONFIG_RCU_FANOUT
 42#define RCU_FANOUT CONFIG_RCU_FANOUT
 43#else /* #ifdef CONFIG_RCU_FANOUT */
 44# ifdef CONFIG_64BIT
 45# define RCU_FANOUT 64
 46# else
 47# define RCU_FANOUT 32
 48# endif
 49#endif /* #else #ifdef CONFIG_RCU_FANOUT */
 50
 51#ifdef CONFIG_RCU_FANOUT_LEAF
 52#define RCU_FANOUT_LEAF CONFIG_RCU_FANOUT_LEAF
 53#else /* #ifdef CONFIG_RCU_FANOUT_LEAF */
 54# ifdef CONFIG_64BIT
 55# define RCU_FANOUT_LEAF 64
 56# else
 57# define RCU_FANOUT_LEAF 32
 58# endif
 59#endif /* #else #ifdef CONFIG_RCU_FANOUT_LEAF */
 60
 61#define RCU_FANOUT_1	      (RCU_FANOUT_LEAF)
 62#define RCU_FANOUT_2	      (RCU_FANOUT_1 * RCU_FANOUT)
 63#define RCU_FANOUT_3	      (RCU_FANOUT_2 * RCU_FANOUT)
 64#define RCU_FANOUT_4	      (RCU_FANOUT_3 * RCU_FANOUT)
 65
 66#if NR_CPUS <= RCU_FANOUT_1
 67#  define RCU_NUM_LVLS	      1
 68#  define NUM_RCU_LVL_0	      1
 69#  define NUM_RCU_NODES	      NUM_RCU_LVL_0
 70#  define NUM_RCU_LVL_INIT    { NUM_RCU_LVL_0 }
 71#  define RCU_NODE_NAME_INIT  { "rcu_node_0" }
 72#  define RCU_FQS_NAME_INIT   { "rcu_node_fqs_0" }
 73#  define RCU_EXP_NAME_INIT   { "rcu_node_exp_0" }
 74#elif NR_CPUS <= RCU_FANOUT_2
 75#  define RCU_NUM_LVLS	      2
 76#  define NUM_RCU_LVL_0	      1
 77#  define NUM_RCU_LVL_1	      DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_1)
 78#  define NUM_RCU_NODES	      (NUM_RCU_LVL_0 + NUM_RCU_LVL_1)
 79#  define NUM_RCU_LVL_INIT    { NUM_RCU_LVL_0, NUM_RCU_LVL_1 }
 80#  define RCU_NODE_NAME_INIT  { "rcu_node_0", "rcu_node_1" }
 81#  define RCU_FQS_NAME_INIT   { "rcu_node_fqs_0", "rcu_node_fqs_1" }
 82#  define RCU_EXP_NAME_INIT   { "rcu_node_exp_0", "rcu_node_exp_1" }
 83#elif NR_CPUS <= RCU_FANOUT_3
 84#  define RCU_NUM_LVLS	      3
 85#  define NUM_RCU_LVL_0	      1
 86#  define NUM_RCU_LVL_1	      DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_2)
 87#  define NUM_RCU_LVL_2	      DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_1)
 88#  define NUM_RCU_NODES	      (NUM_RCU_LVL_0 + NUM_RCU_LVL_1 + NUM_RCU_LVL_2)
 89#  define NUM_RCU_LVL_INIT    { NUM_RCU_LVL_0, NUM_RCU_LVL_1, NUM_RCU_LVL_2 }
 90#  define RCU_NODE_NAME_INIT  { "rcu_node_0", "rcu_node_1", "rcu_node_2" }
 91#  define RCU_FQS_NAME_INIT   { "rcu_node_fqs_0", "rcu_node_fqs_1", "rcu_node_fqs_2" }
 92#  define RCU_EXP_NAME_INIT   { "rcu_node_exp_0", "rcu_node_exp_1", "rcu_node_exp_2" }
 93#elif NR_CPUS <= RCU_FANOUT_4
 94#  define RCU_NUM_LVLS	      4
 95#  define NUM_RCU_LVL_0	      1
 96#  define NUM_RCU_LVL_1	      DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_3)
 97#  define NUM_RCU_LVL_2	      DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_2)
 98#  define NUM_RCU_LVL_3	      DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_1)
 99#  define NUM_RCU_NODES	      (NUM_RCU_LVL_0 + NUM_RCU_LVL_1 + NUM_RCU_LVL_2 + NUM_RCU_LVL_3)
100#  define NUM_RCU_LVL_INIT    { NUM_RCU_LVL_0, NUM_RCU_LVL_1, NUM_RCU_LVL_2, NUM_RCU_LVL_3 }
101#  define RCU_NODE_NAME_INIT  { "rcu_node_0", "rcu_node_1", "rcu_node_2", "rcu_node_3" }
102#  define RCU_FQS_NAME_INIT   { "rcu_node_fqs_0", "rcu_node_fqs_1", "rcu_node_fqs_2", "rcu_node_fqs_3" }
103#  define RCU_EXP_NAME_INIT   { "rcu_node_exp_0", "rcu_node_exp_1", "rcu_node_exp_2", "rcu_node_exp_3" }
104#else
105# error "CONFIG_RCU_FANOUT insufficient for NR_CPUS"
106#endif /* #if (NR_CPUS) <= RCU_FANOUT_1 */
107
108extern int rcu_num_lvls;
109extern int rcu_num_nodes;
110
111/*
112 * Dynticks per-CPU state.
113 */
114struct rcu_dynticks {
115	long long dynticks_nesting; /* Track irq/process nesting level. */
116				    /* Process level is worth LLONG_MAX/2. */
117	int dynticks_nmi_nesting;   /* Track NMI nesting level. */
118	atomic_t dynticks;	    /* Even value for idle, else odd. */
119#ifdef CONFIG_NO_HZ_FULL_SYSIDLE
120	long long dynticks_idle_nesting;
121				    /* irq/process nesting level from idle. */
122	atomic_t dynticks_idle;	    /* Even value for idle, else odd. */
123				    /*  "Idle" excludes userspace execution. */
124	unsigned long dynticks_idle_jiffies;
125				    /* End of last non-NMI non-idle period. */
126#endif /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
127#ifdef CONFIG_RCU_FAST_NO_HZ
128	bool all_lazy;		    /* Are all CPU's CBs lazy? */
129	unsigned long nonlazy_posted;
130				    /* # times non-lazy CBs posted to CPU. */
131	unsigned long nonlazy_posted_snap;
132				    /* idle-period nonlazy_posted snapshot. */
133	unsigned long last_accelerate;
134				    /* Last jiffy CBs were accelerated. */
135	unsigned long last_advance_all;
136				    /* Last jiffy CBs were all advanced. */
137	int tick_nohz_enabled_snap; /* Previously seen value from sysfs. */
138#endif /* #ifdef CONFIG_RCU_FAST_NO_HZ */
139};
140
141/* RCU's kthread states for tracing. */
142#define RCU_KTHREAD_STOPPED  0
143#define RCU_KTHREAD_RUNNING  1
144#define RCU_KTHREAD_WAITING  2
145#define RCU_KTHREAD_OFFCPU   3
146#define RCU_KTHREAD_YIELDING 4
147#define RCU_KTHREAD_MAX      4
148
149/*
150 * Definition for node within the RCU grace-period-detection hierarchy.
151 */
152struct rcu_node {
153	raw_spinlock_t __private lock;	/* Root rcu_node's lock protects */
154					/*  some rcu_state fields as well as */
155					/*  following. */
156	unsigned long gpnum;	/* Current grace period for this node. */
157				/*  This will either be equal to or one */
158				/*  behind the root rcu_node's gpnum. */
159	unsigned long completed; /* Last GP completed for this node. */
160				/*  This will either be equal to or one */
161				/*  behind the root rcu_node's gpnum. */
162	unsigned long qsmask;	/* CPUs or groups that need to switch in */
163				/*  order for current grace period to proceed.*/
164				/*  In leaf rcu_node, each bit corresponds to */
165				/*  an rcu_data structure, otherwise, each */
166				/*  bit corresponds to a child rcu_node */
167				/*  structure. */
168	unsigned long qsmaskinit;
169				/* Per-GP initial value for qsmask. */
170				/*  Initialized from ->qsmaskinitnext at the */
171				/*  beginning of each grace period. */
172	unsigned long qsmaskinitnext;
173				/* Online CPUs for next grace period. */
174	unsigned long expmask;	/* CPUs or groups that need to check in */
175				/*  to allow the current expedited GP */
176				/*  to complete. */
177	unsigned long expmaskinit;
178				/* Per-GP initial values for expmask. */
179				/*  Initialized from ->expmaskinitnext at the */
180				/*  beginning of each expedited GP. */
181	unsigned long expmaskinitnext;
182				/* Online CPUs for next expedited GP. */
183				/*  Any CPU that has ever been online will */
184				/*  have its bit set. */
185	unsigned long grpmask;	/* Mask to apply to parent qsmask. */
186				/*  Only one bit will be set in this mask. */
187	int	grplo;		/* lowest-numbered CPU or group here. */
188	int	grphi;		/* highest-numbered CPU or group here. */
189	u8	grpnum;		/* CPU/group number for next level up. */
190	u8	level;		/* root is at level 0. */
191	bool	wait_blkd_tasks;/* Necessary to wait for blocked tasks to */
192				/*  exit RCU read-side critical sections */
193				/*  before propagating offline up the */
194				/*  rcu_node tree? */
195	struct rcu_node *parent;
196	struct list_head blkd_tasks;
197				/* Tasks blocked in RCU read-side critical */
198				/*  section.  Tasks are placed at the head */
199				/*  of this list and age towards the tail. */
200	struct list_head *gp_tasks;
201				/* Pointer to the first task blocking the */
202				/*  current grace period, or NULL if there */
203				/*  is no such task. */
204	struct list_head *exp_tasks;
205				/* Pointer to the first task blocking the */
206				/*  current expedited grace period, or NULL */
207				/*  if there is no such task.  If there */
208				/*  is no current expedited grace period, */
209				/*  then there can cannot be any such task. */
210	struct list_head *boost_tasks;
211				/* Pointer to first task that needs to be */
212				/*  priority boosted, or NULL if no priority */
213				/*  boosting is needed for this rcu_node */
214				/*  structure.  If there are no tasks */
215				/*  queued on this rcu_node structure that */
216				/*  are blocking the current grace period, */
217				/*  there can be no such task. */
218	struct rt_mutex boost_mtx;
219				/* Used only for the priority-boosting */
220				/*  side effect, not as a lock. */
221	unsigned long boost_time;
222				/* When to start boosting (jiffies). */
223	struct task_struct *boost_kthread_task;
224				/* kthread that takes care of priority */
225				/*  boosting for this rcu_node structure. */
226	unsigned int boost_kthread_status;
227				/* State of boost_kthread_task for tracing. */
228	unsigned long n_tasks_boosted;
229				/* Total number of tasks boosted. */
230	unsigned long n_exp_boosts;
231				/* Number of tasks boosted for expedited GP. */
232	unsigned long n_normal_boosts;
233				/* Number of tasks boosted for normal GP. */
234	unsigned long n_balk_blkd_tasks;
235				/* Refused to boost: no blocked tasks. */
236	unsigned long n_balk_exp_gp_tasks;
237				/* Refused to boost: nothing blocking GP. */
238	unsigned long n_balk_boost_tasks;
239				/* Refused to boost: already boosting. */
240	unsigned long n_balk_notblocked;
241				/* Refused to boost: RCU RS CS still running. */
242	unsigned long n_balk_notyet;
243				/* Refused to boost: not yet time. */
244	unsigned long n_balk_nos;
245				/* Refused to boost: not sure why, though. */
246				/*  This can happen due to race conditions. */
247#ifdef CONFIG_RCU_NOCB_CPU
248	struct swait_queue_head nocb_gp_wq[2];
249				/* Place for rcu_nocb_kthread() to wait GP. */
250#endif /* #ifdef CONFIG_RCU_NOCB_CPU */
251	int need_future_gp[2];
252				/* Counts of upcoming no-CB GP requests. */
253	raw_spinlock_t fqslock ____cacheline_internodealigned_in_smp;
254
255	struct mutex exp_funnel_mutex ____cacheline_internodealigned_in_smp;
 
 
256} ____cacheline_internodealigned_in_smp;
257
258/*
 
 
 
 
 
 
 
259 * Do a full breadth-first scan of the rcu_node structures for the
260 * specified rcu_state structure.
261 */
262#define rcu_for_each_node_breadth_first(rsp, rnp) \
263	for ((rnp) = &(rsp)->node[0]; \
264	     (rnp) < &(rsp)->node[rcu_num_nodes]; (rnp)++)
265
266/*
267 * Do a breadth-first scan of the non-leaf rcu_node structures for the
268 * specified rcu_state structure.  Note that if there is a singleton
269 * rcu_node tree with but one rcu_node structure, this loop is a no-op.
270 */
271#define rcu_for_each_nonleaf_node_breadth_first(rsp, rnp) \
272	for ((rnp) = &(rsp)->node[0]; \
273	     (rnp) < (rsp)->level[rcu_num_lvls - 1]; (rnp)++)
274
275/*
276 * Scan the leaves of the rcu_node hierarchy for the specified rcu_state
277 * structure.  Note that if there is a singleton rcu_node tree with but
278 * one rcu_node structure, this loop -will- visit the rcu_node structure.
279 * It is still a leaf node, even if it is also the root node.
280 */
281#define rcu_for_each_leaf_node(rsp, rnp) \
282	for ((rnp) = (rsp)->level[rcu_num_lvls - 1]; \
283	     (rnp) < &(rsp)->node[rcu_num_nodes]; (rnp)++)
284
285/*
 
 
 
 
 
 
 
 
286 * Union to allow "aggregate OR" operation on the need for a quiescent
287 * state by the normal and expedited grace periods.
288 */
289union rcu_noqs {
290	struct {
291		u8 norm;
292		u8 exp;
293	} b; /* Bits. */
294	u16 s; /* Set of bits, aggregate OR here. */
295};
296
297/* Index values for nxttail array in struct rcu_data. */
298#define RCU_DONE_TAIL		0	/* Also RCU_WAIT head. */
299#define RCU_WAIT_TAIL		1	/* Also RCU_NEXT_READY head. */
300#define RCU_NEXT_READY_TAIL	2	/* Also RCU_NEXT head. */
301#define RCU_NEXT_TAIL		3
302#define RCU_NEXT_SIZE		4
303
304/* Per-CPU data for read-copy update. */
305struct rcu_data {
306	/* 1) quiescent-state and grace-period handling : */
307	unsigned long	completed;	/* Track rsp->completed gp number */
308					/*  in order to detect GP end. */
309	unsigned long	gpnum;		/* Highest gp number that this CPU */
310					/*  is aware of having started. */
311	unsigned long	rcu_qs_ctr_snap;/* Snapshot of rcu_qs_ctr to check */
312					/*  for rcu_all_qs() invocations. */
313	union rcu_noqs	cpu_no_qs;	/* No QSes yet for this CPU. */
314	bool		core_needs_qs;	/* Core waits for quiesc state. */
315	bool		beenonline;	/* CPU online at least once. */
316	bool		gpwrap;		/* Possible gpnum/completed wrap. */
317	struct rcu_node *mynode;	/* This CPU's leaf of hierarchy */
318	unsigned long grpmask;		/* Mask to apply to leaf qsmask. */
319	unsigned long	ticks_this_gp;	/* The number of scheduling-clock */
320					/*  ticks this CPU has handled */
321					/*  during and after the last grace */
322					/* period it is aware of. */
323
324	/* 2) batch handling */
325	/*
326	 * If nxtlist is not NULL, it is partitioned as follows.
327	 * Any of the partitions might be empty, in which case the
328	 * pointer to that partition will be equal to the pointer for
329	 * the following partition.  When the list is empty, all of
330	 * the nxttail elements point to the ->nxtlist pointer itself,
331	 * which in that case is NULL.
332	 *
333	 * [nxtlist, *nxttail[RCU_DONE_TAIL]):
334	 *	Entries that batch # <= ->completed
335	 *	The grace period for these entries has completed, and
336	 *	the other grace-period-completed entries may be moved
337	 *	here temporarily in rcu_process_callbacks().
338	 * [*nxttail[RCU_DONE_TAIL], *nxttail[RCU_WAIT_TAIL]):
339	 *	Entries that batch # <= ->completed - 1: waiting for current GP
340	 * [*nxttail[RCU_WAIT_TAIL], *nxttail[RCU_NEXT_READY_TAIL]):
341	 *	Entries known to have arrived before current GP ended
342	 * [*nxttail[RCU_NEXT_READY_TAIL], *nxttail[RCU_NEXT_TAIL]):
343	 *	Entries that might have arrived after current GP ended
344	 *	Note that the value of *nxttail[RCU_NEXT_TAIL] will
345	 *	always be NULL, as this is the end of the list.
346	 */
347	struct rcu_head *nxtlist;
348	struct rcu_head **nxttail[RCU_NEXT_SIZE];
349	unsigned long	nxtcompleted[RCU_NEXT_SIZE];
350					/* grace periods for sublists. */
351	long		qlen_lazy;	/* # of lazy queued callbacks */
352	long		qlen;		/* # of queued callbacks, incl lazy */
353	long		qlen_last_fqs_check;
354					/* qlen at last check for QS forcing */
355	unsigned long	n_cbs_invoked;	/* count of RCU cbs invoked. */
356	unsigned long	n_nocbs_invoked; /* count of no-CBs RCU cbs invoked. */
357	unsigned long   n_cbs_orphaned; /* RCU cbs orphaned by dying CPU */
358	unsigned long   n_cbs_adopted;  /* RCU cbs adopted from dying CPU */
359	unsigned long	n_force_qs_snap;
360					/* did other CPU force QS recently? */
361	long		blimit;		/* Upper limit on a processed batch */
362
363	/* 3) dynticks interface. */
364	struct rcu_dynticks *dynticks;	/* Shared per-CPU dynticks state. */
365	int dynticks_snap;		/* Per-GP tracking for dynticks. */
366
367	/* 4) reasons this CPU needed to be kicked by force_quiescent_state */
368	unsigned long dynticks_fqs;	/* Kicked due to dynticks idle. */
369	unsigned long offline_fqs;	/* Kicked due to being offline. */
370	unsigned long cond_resched_completed;
371					/* Grace period that needs help */
372					/*  from cond_resched(). */
373
374	/* 5) __rcu_pending() statistics. */
375	unsigned long n_rcu_pending;	/* rcu_pending() calls since boot. */
376	unsigned long n_rp_core_needs_qs;
377	unsigned long n_rp_report_qs;
378	unsigned long n_rp_cb_ready;
379	unsigned long n_rp_cpu_needs_gp;
380	unsigned long n_rp_gp_completed;
381	unsigned long n_rp_gp_started;
382	unsigned long n_rp_nocb_defer_wakeup;
383	unsigned long n_rp_need_nothing;
384
385	/* 6) _rcu_barrier(), OOM callbacks, and expediting. */
386	struct rcu_head barrier_head;
387#ifdef CONFIG_RCU_FAST_NO_HZ
388	struct rcu_head oom_head;
389#endif /* #ifdef CONFIG_RCU_FAST_NO_HZ */
390	struct mutex exp_funnel_mutex;
391	atomic_long_t expedited_workdone0;	/* # done by others #0. */
392	atomic_long_t expedited_workdone1;	/* # done by others #1. */
393	atomic_long_t expedited_workdone2;	/* # done by others #2. */
394	atomic_long_t expedited_workdone3;	/* # done by others #3. */
395
396	/* 7) Callback offloading. */
397#ifdef CONFIG_RCU_NOCB_CPU
398	struct rcu_head *nocb_head;	/* CBs waiting for kthread. */
399	struct rcu_head **nocb_tail;
400	atomic_long_t nocb_q_count;	/* # CBs waiting for nocb */
401	atomic_long_t nocb_q_count_lazy; /*  invocation (all stages). */
402	struct rcu_head *nocb_follower_head; /* CBs ready to invoke. */
403	struct rcu_head **nocb_follower_tail;
404	struct swait_queue_head nocb_wq; /* For nocb kthreads to sleep on. */
405	struct task_struct *nocb_kthread;
406	int nocb_defer_wakeup;		/* Defer wakeup of nocb_kthread. */
407
408	/* The following fields are used by the leader, hence own cacheline. */
409	struct rcu_head *nocb_gp_head ____cacheline_internodealigned_in_smp;
410					/* CBs waiting for GP. */
411	struct rcu_head **nocb_gp_tail;
412	bool nocb_leader_sleep;		/* Is the nocb leader thread asleep? */
413	struct rcu_data *nocb_next_follower;
414					/* Next follower in wakeup chain. */
415
416	/* The following fields are used by the follower, hence new cachline. */
417	struct rcu_data *nocb_leader ____cacheline_internodealigned_in_smp;
418					/* Leader CPU takes GP-end wakeups. */
419#endif /* #ifdef CONFIG_RCU_NOCB_CPU */
420
421	/* 8) RCU CPU stall data. */
422	unsigned int softirq_snap;	/* Snapshot of softirq activity. */
423
424	int cpu;
425	struct rcu_state *rsp;
426};
427
428/* Values for nocb_defer_wakeup field in struct rcu_data. */
429#define RCU_NOGP_WAKE_NOT	0
430#define RCU_NOGP_WAKE		1
431#define RCU_NOGP_WAKE_FORCE	2
432
433#define RCU_JIFFIES_TILL_FORCE_QS (1 + (HZ > 250) + (HZ > 500))
434					/* For jiffies_till_first_fqs and */
435					/*  and jiffies_till_next_fqs. */
436
437#define RCU_JIFFIES_FQS_DIV	256	/* Very large systems need more */
438					/*  delay between bouts of */
439					/*  quiescent-state forcing. */
440
441#define RCU_STALL_RAT_DELAY	2	/* Allow other CPUs time to take */
442					/*  at least one scheduling clock */
443					/*  irq before ratting on them. */
444
445#define rcu_wait(cond)							\
446do {									\
447	for (;;) {							\
448		set_current_state(TASK_INTERRUPTIBLE);			\
449		if (cond)						\
450			break;						\
451		schedule();						\
452	}								\
453	__set_current_state(TASK_RUNNING);				\
454} while (0)
455
456/*
457 * RCU global state, including node hierarchy.  This hierarchy is
458 * represented in "heap" form in a dense array.  The root (first level)
459 * of the hierarchy is in ->node[0] (referenced by ->level[0]), the second
460 * level in ->node[1] through ->node[m] (->node[1] referenced by ->level[1]),
461 * and the third level in ->node[m+1] and following (->node[m+1] referenced
462 * by ->level[2]).  The number of levels is determined by the number of
463 * CPUs and by CONFIG_RCU_FANOUT.  Small systems will have a "hierarchy"
464 * consisting of a single rcu_node.
465 */
466struct rcu_state {
467	struct rcu_node node[NUM_RCU_NODES];	/* Hierarchy. */
468	struct rcu_node *level[RCU_NUM_LVLS + 1];
469						/* Hierarchy levels (+1 to */
470						/*  shut bogus gcc warning) */
471	u8 flavor_mask;				/* bit in flavor mask. */
472	struct rcu_data __percpu *rda;		/* pointer of percu rcu_data. */
473	call_rcu_func_t call;			/* call_rcu() flavor. */
474	int ncpus;				/* # CPUs seen so far. */
475
476	/* The following fields are guarded by the root rcu_node's lock. */
477
478	u8	boost ____cacheline_internodealigned_in_smp;
479						/* Subject to priority boost. */
480	unsigned long gpnum;			/* Current gp number. */
481	unsigned long completed;		/* # of last completed gp. */
482	struct task_struct *gp_kthread;		/* Task for grace periods. */
483	struct swait_queue_head gp_wq;		/* Where GP task waits. */
484	short gp_flags;				/* Commands for GP task. */
485	short gp_state;				/* GP kthread sleep state. */
486
487	/* End of fields guarded by root rcu_node's lock. */
488
489	raw_spinlock_t orphan_lock ____cacheline_internodealigned_in_smp;
490						/* Protect following fields. */
491	struct rcu_head *orphan_nxtlist;	/* Orphaned callbacks that */
492						/*  need a grace period. */
493	struct rcu_head **orphan_nxttail;	/* Tail of above. */
494	struct rcu_head *orphan_donelist;	/* Orphaned callbacks that */
495						/*  are ready to invoke. */
496	struct rcu_head **orphan_donetail;	/* Tail of above. */
497	long qlen_lazy;				/* Number of lazy callbacks. */
498	long qlen;				/* Total number of callbacks. */
499	/* End of fields guarded by orphan_lock. */
500
501	struct mutex barrier_mutex;		/* Guards barrier fields. */
502	atomic_t barrier_cpu_count;		/* # CPUs waiting on. */
503	struct completion barrier_completion;	/* Wake at barrier end. */
504	unsigned long barrier_sequence;		/* ++ at start and end of */
505						/*  _rcu_barrier(). */
506	/* End of fields guarded by barrier_mutex. */
507
 
 
508	unsigned long expedited_sequence;	/* Take a ticket. */
509	atomic_long_t expedited_normal;		/* # fallbacks to normal. */
510	atomic_t expedited_need_qs;		/* # CPUs left to check in. */
511	struct swait_queue_head expedited_wq;	/* Wait for check-ins. */
512	int ncpus_snap;				/* # CPUs seen last time. */
513
514	unsigned long jiffies_force_qs;		/* Time at which to invoke */
515						/*  force_quiescent_state(). */
 
 
516	unsigned long n_force_qs;		/* Number of calls to */
517						/*  force_quiescent_state(). */
518	unsigned long n_force_qs_lh;		/* ~Number of calls leaving */
519						/*  due to lock unavailable. */
520	unsigned long n_force_qs_ngp;		/* Number of calls leaving */
521						/*  due to no GP active. */
522	unsigned long gp_start;			/* Time at which GP started, */
523						/*  but in jiffies. */
524	unsigned long gp_activity;		/* Time of last GP kthread */
525						/*  activity in jiffies. */
526	unsigned long jiffies_stall;		/* Time at which to check */
527						/*  for CPU stalls. */
528	unsigned long jiffies_resched;		/* Time at which to resched */
529						/*  a reluctant CPU. */
530	unsigned long n_force_qs_gpstart;	/* Snapshot of n_force_qs at */
531						/*  GP start. */
532	unsigned long gp_max;			/* Maximum GP duration in */
533						/*  jiffies. */
534	const char *name;			/* Name of structure. */
535	char abbr;				/* Abbreviated name. */
536	struct list_head flavors;		/* List of RCU flavors. */
537};
538
539/* Values for rcu_state structure's gp_flags field. */
540#define RCU_GP_FLAG_INIT 0x1	/* Need grace-period initialization. */
541#define RCU_GP_FLAG_FQS  0x2	/* Need grace-period quiescent-state forcing. */
542
543/* Values for rcu_state structure's gp_state field. */
544#define RCU_GP_IDLE	 0	/* Initial state and no GP in progress. */
545#define RCU_GP_WAIT_GPS  1	/* Wait for grace-period start. */
546#define RCU_GP_DONE_GPS  2	/* Wait done for grace-period start. */
547#define RCU_GP_WAIT_FQS  3	/* Wait for force-quiescent-state time. */
548#define RCU_GP_DOING_FQS 4	/* Wait done for force-quiescent-state time. */
549#define RCU_GP_CLEANUP   5	/* Grace-period cleanup started. */
550#define RCU_GP_CLEANED   6	/* Grace-period cleanup complete. */
551
552#ifndef RCU_TREE_NONCORE
553static const char * const gp_state_names[] = {
554	"RCU_GP_IDLE",
555	"RCU_GP_WAIT_GPS",
556	"RCU_GP_DONE_GPS",
557	"RCU_GP_WAIT_FQS",
558	"RCU_GP_DOING_FQS",
559	"RCU_GP_CLEANUP",
560	"RCU_GP_CLEANED",
561};
562#endif /* #ifndef RCU_TREE_NONCORE */
563
564extern struct list_head rcu_struct_flavors;
565
566/* Sequence through rcu_state structures for each RCU flavor. */
567#define for_each_rcu_flavor(rsp) \
568	list_for_each_entry((rsp), &rcu_struct_flavors, flavors)
569
570/*
571 * RCU implementation internal declarations:
572 */
573extern struct rcu_state rcu_sched_state;
574
575extern struct rcu_state rcu_bh_state;
576
577#ifdef CONFIG_PREEMPT_RCU
578extern struct rcu_state rcu_preempt_state;
579#endif /* #ifdef CONFIG_PREEMPT_RCU */
580
581#ifdef CONFIG_RCU_BOOST
582DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_status);
583DECLARE_PER_CPU(int, rcu_cpu_kthread_cpu);
584DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_loops);
585DECLARE_PER_CPU(char, rcu_cpu_has_work);
586#endif /* #ifdef CONFIG_RCU_BOOST */
587
588#ifndef RCU_TREE_NONCORE
589
590/* Forward declarations for rcutree_plugin.h */
591static void rcu_bootup_announce(void);
592static void rcu_preempt_note_context_switch(void);
593static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp);
594#ifdef CONFIG_HOTPLUG_CPU
595static bool rcu_preempt_has_tasks(struct rcu_node *rnp);
596#endif /* #ifdef CONFIG_HOTPLUG_CPU */
597static void rcu_print_detail_task_stall(struct rcu_state *rsp);
598static int rcu_print_task_stall(struct rcu_node *rnp);
599static int rcu_print_task_exp_stall(struct rcu_node *rnp);
600static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp);
601static void rcu_preempt_check_callbacks(void);
602void call_rcu(struct rcu_head *head, rcu_callback_t func);
603static void __init __rcu_init_preempt(void);
604static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags);
605static void rcu_preempt_boost_start_gp(struct rcu_node *rnp);
606static void invoke_rcu_callbacks_kthread(void);
607static bool rcu_is_callbacks_kthread(void);
608#ifdef CONFIG_RCU_BOOST
609static void rcu_preempt_do_callbacks(void);
610static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
611						 struct rcu_node *rnp);
612#endif /* #ifdef CONFIG_RCU_BOOST */
613static void __init rcu_spawn_boost_kthreads(void);
614static void rcu_prepare_kthreads(int cpu);
615static void rcu_cleanup_after_idle(void);
616static void rcu_prepare_for_idle(void);
617static void rcu_idle_count_callbacks_posted(void);
618static bool rcu_preempt_has_tasks(struct rcu_node *rnp);
619static void print_cpu_stall_info_begin(void);
620static void print_cpu_stall_info(struct rcu_state *rsp, int cpu);
621static void print_cpu_stall_info_end(void);
622static void zero_cpu_stall_ticks(struct rcu_data *rdp);
623static void increment_cpu_stall_ticks(void);
624static bool rcu_nocb_cpu_needs_barrier(struct rcu_state *rsp, int cpu);
625static void rcu_nocb_gp_set(struct rcu_node *rnp, int nrq);
626static struct swait_queue_head *rcu_nocb_gp_get(struct rcu_node *rnp);
627static void rcu_nocb_gp_cleanup(struct swait_queue_head *sq);
628static void rcu_init_one_nocb(struct rcu_node *rnp);
629static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp,
630			    bool lazy, unsigned long flags);
631static bool rcu_nocb_adopt_orphan_cbs(struct rcu_state *rsp,
632				      struct rcu_data *rdp,
633				      unsigned long flags);
634static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp);
635static void do_nocb_deferred_wakeup(struct rcu_data *rdp);
636static void rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp);
637static void rcu_spawn_all_nocb_kthreads(int cpu);
638static void __init rcu_spawn_nocb_kthreads(void);
639#ifdef CONFIG_RCU_NOCB_CPU
640static void __init rcu_organize_nocb_kthreads(struct rcu_state *rsp);
641#endif /* #ifdef CONFIG_RCU_NOCB_CPU */
642static void __maybe_unused rcu_kick_nohz_cpu(int cpu);
643static bool init_nocb_callback_list(struct rcu_data *rdp);
644static void rcu_sysidle_enter(int irq);
645static void rcu_sysidle_exit(int irq);
646static void rcu_sysidle_check_cpu(struct rcu_data *rdp, bool *isidle,
647				  unsigned long *maxj);
648static bool is_sysidle_rcu_state(struct rcu_state *rsp);
649static void rcu_sysidle_report_gp(struct rcu_state *rsp, int isidle,
650				  unsigned long maxj);
651static void rcu_bind_gp_kthread(void);
652static void rcu_sysidle_init_percpu_data(struct rcu_dynticks *rdtp);
653static bool rcu_nohz_full_cpu(struct rcu_state *rsp);
654static void rcu_dynticks_task_enter(void);
655static void rcu_dynticks_task_exit(void);
656
657#endif /* #ifndef RCU_TREE_NONCORE */
658
659#ifdef CONFIG_RCU_TRACE
660/* Read out queue lengths for tracing. */
661static inline void rcu_nocb_q_lengths(struct rcu_data *rdp, long *ql, long *qll)
662{
663#ifdef CONFIG_RCU_NOCB_CPU
664	*ql = atomic_long_read(&rdp->nocb_q_count);
665	*qll = atomic_long_read(&rdp->nocb_q_count_lazy);
666#else /* #ifdef CONFIG_RCU_NOCB_CPU */
667	*ql = 0;
668	*qll = 0;
669#endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */
670}
671#endif /* #ifdef CONFIG_RCU_TRACE */
672
673/*
674 * Place this after a lock-acquisition primitive to guarantee that
675 * an UNLOCK+LOCK pair act as a full barrier.  This guarantee applies
676 * if the UNLOCK and LOCK are executed by the same CPU or if the
677 * UNLOCK and LOCK operate on the same lock variable.
678 */
679#ifdef CONFIG_PPC
680#define smp_mb__after_unlock_lock()	smp_mb()  /* Full ordering for lock. */
681#else /* #ifdef CONFIG_PPC */
682#define smp_mb__after_unlock_lock()	do { } while (0)
683#endif /* #else #ifdef CONFIG_PPC */
684
685/*
686 * Wrappers for the rcu_node::lock acquire and release.
687 *
688 * Because the rcu_nodes form a tree, the tree traversal locking will observe
689 * different lock values, this in turn means that an UNLOCK of one level
690 * followed by a LOCK of another level does not imply a full memory barrier;
691 * and most importantly transitivity is lost.
692 *
693 * In order to restore full ordering between tree levels, augment the regular
694 * lock acquire functions with smp_mb__after_unlock_lock().
695 *
696 * As ->lock of struct rcu_node is a __private field, therefore one should use
697 * these wrappers rather than directly call raw_spin_{lock,unlock}* on ->lock.
698 */
699static inline void raw_spin_lock_rcu_node(struct rcu_node *rnp)
700{
701	raw_spin_lock(&ACCESS_PRIVATE(rnp, lock));
702	smp_mb__after_unlock_lock();
703}
704
705static inline void raw_spin_unlock_rcu_node(struct rcu_node *rnp)
706{
707	raw_spin_unlock(&ACCESS_PRIVATE(rnp, lock));
708}
709
710static inline void raw_spin_lock_irq_rcu_node(struct rcu_node *rnp)
711{
712	raw_spin_lock_irq(&ACCESS_PRIVATE(rnp, lock));
713	smp_mb__after_unlock_lock();
714}
715
716static inline void raw_spin_unlock_irq_rcu_node(struct rcu_node *rnp)
717{
718	raw_spin_unlock_irq(&ACCESS_PRIVATE(rnp, lock));
719}
720
721#define raw_spin_lock_irqsave_rcu_node(rnp, flags)			\
722do {									\
723	typecheck(unsigned long, flags);				\
724	raw_spin_lock_irqsave(&ACCESS_PRIVATE(rnp, lock), flags);	\
725	smp_mb__after_unlock_lock();					\
726} while (0)
727
728#define raw_spin_unlock_irqrestore_rcu_node(rnp, flags)			\
729do {									\
730	typecheck(unsigned long, flags);				\
731	raw_spin_unlock_irqrestore(&ACCESS_PRIVATE(rnp, lock), flags);	\
732} while (0)
733
734static inline bool raw_spin_trylock_rcu_node(struct rcu_node *rnp)
735{
736	bool locked = raw_spin_trylock(&ACCESS_PRIVATE(rnp, lock));
737
738	if (locked)
739		smp_mb__after_unlock_lock();
740	return locked;
741}
v4.10.11
  1/*
  2 * Read-Copy Update mechanism for mutual exclusion (tree-based version)
  3 * Internal non-public definitions.
  4 *
  5 * This program is free software; you can redistribute it and/or modify
  6 * it under the terms of the GNU General Public License as published by
  7 * the Free Software Foundation; either version 2 of the License, or
  8 * (at your option) any later version.
  9 *
 10 * This program is distributed in the hope that it will be useful,
 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 13 * GNU General Public License for more details.
 14 *
 15 * You should have received a copy of the GNU General Public License
 16 * along with this program; if not, you can access it online at
 17 * http://www.gnu.org/licenses/gpl-2.0.html.
 18 *
 19 * Copyright IBM Corporation, 2008
 20 *
 21 * Author: Ingo Molnar <mingo@elte.hu>
 22 *	   Paul E. McKenney <paulmck@linux.vnet.ibm.com>
 23 */
 24
 25#include <linux/cache.h>
 26#include <linux/spinlock.h>
 27#include <linux/threads.h>
 28#include <linux/cpumask.h>
 29#include <linux/seqlock.h>
 30#include <linux/swait.h>
 31#include <linux/stop_machine.h>
 32
 33/*
 34 * Define shape of hierarchy based on NR_CPUS, CONFIG_RCU_FANOUT, and
 35 * CONFIG_RCU_FANOUT_LEAF.
 36 * In theory, it should be possible to add more levels straightforwardly.
 37 * In practice, this did work well going from three levels to four.
 38 * Of course, your mileage may vary.
 39 */
 40
 41#ifdef CONFIG_RCU_FANOUT
 42#define RCU_FANOUT CONFIG_RCU_FANOUT
 43#else /* #ifdef CONFIG_RCU_FANOUT */
 44# ifdef CONFIG_64BIT
 45# define RCU_FANOUT 64
 46# else
 47# define RCU_FANOUT 32
 48# endif
 49#endif /* #else #ifdef CONFIG_RCU_FANOUT */
 50
 51#ifdef CONFIG_RCU_FANOUT_LEAF
 52#define RCU_FANOUT_LEAF CONFIG_RCU_FANOUT_LEAF
 53#else /* #ifdef CONFIG_RCU_FANOUT_LEAF */
 54# ifdef CONFIG_64BIT
 55# define RCU_FANOUT_LEAF 64
 56# else
 57# define RCU_FANOUT_LEAF 32
 58# endif
 59#endif /* #else #ifdef CONFIG_RCU_FANOUT_LEAF */
 60
 61#define RCU_FANOUT_1	      (RCU_FANOUT_LEAF)
 62#define RCU_FANOUT_2	      (RCU_FANOUT_1 * RCU_FANOUT)
 63#define RCU_FANOUT_3	      (RCU_FANOUT_2 * RCU_FANOUT)
 64#define RCU_FANOUT_4	      (RCU_FANOUT_3 * RCU_FANOUT)
 65
 66#if NR_CPUS <= RCU_FANOUT_1
 67#  define RCU_NUM_LVLS	      1
 68#  define NUM_RCU_LVL_0	      1
 69#  define NUM_RCU_NODES	      NUM_RCU_LVL_0
 70#  define NUM_RCU_LVL_INIT    { NUM_RCU_LVL_0 }
 71#  define RCU_NODE_NAME_INIT  { "rcu_node_0" }
 72#  define RCU_FQS_NAME_INIT   { "rcu_node_fqs_0" }
 
 73#elif NR_CPUS <= RCU_FANOUT_2
 74#  define RCU_NUM_LVLS	      2
 75#  define NUM_RCU_LVL_0	      1
 76#  define NUM_RCU_LVL_1	      DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_1)
 77#  define NUM_RCU_NODES	      (NUM_RCU_LVL_0 + NUM_RCU_LVL_1)
 78#  define NUM_RCU_LVL_INIT    { NUM_RCU_LVL_0, NUM_RCU_LVL_1 }
 79#  define RCU_NODE_NAME_INIT  { "rcu_node_0", "rcu_node_1" }
 80#  define RCU_FQS_NAME_INIT   { "rcu_node_fqs_0", "rcu_node_fqs_1" }
 
 81#elif NR_CPUS <= RCU_FANOUT_3
 82#  define RCU_NUM_LVLS	      3
 83#  define NUM_RCU_LVL_0	      1
 84#  define NUM_RCU_LVL_1	      DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_2)
 85#  define NUM_RCU_LVL_2	      DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_1)
 86#  define NUM_RCU_NODES	      (NUM_RCU_LVL_0 + NUM_RCU_LVL_1 + NUM_RCU_LVL_2)
 87#  define NUM_RCU_LVL_INIT    { NUM_RCU_LVL_0, NUM_RCU_LVL_1, NUM_RCU_LVL_2 }
 88#  define RCU_NODE_NAME_INIT  { "rcu_node_0", "rcu_node_1", "rcu_node_2" }
 89#  define RCU_FQS_NAME_INIT   { "rcu_node_fqs_0", "rcu_node_fqs_1", "rcu_node_fqs_2" }
 
 90#elif NR_CPUS <= RCU_FANOUT_4
 91#  define RCU_NUM_LVLS	      4
 92#  define NUM_RCU_LVL_0	      1
 93#  define NUM_RCU_LVL_1	      DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_3)
 94#  define NUM_RCU_LVL_2	      DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_2)
 95#  define NUM_RCU_LVL_3	      DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_1)
 96#  define NUM_RCU_NODES	      (NUM_RCU_LVL_0 + NUM_RCU_LVL_1 + NUM_RCU_LVL_2 + NUM_RCU_LVL_3)
 97#  define NUM_RCU_LVL_INIT    { NUM_RCU_LVL_0, NUM_RCU_LVL_1, NUM_RCU_LVL_2, NUM_RCU_LVL_3 }
 98#  define RCU_NODE_NAME_INIT  { "rcu_node_0", "rcu_node_1", "rcu_node_2", "rcu_node_3" }
 99#  define RCU_FQS_NAME_INIT   { "rcu_node_fqs_0", "rcu_node_fqs_1", "rcu_node_fqs_2", "rcu_node_fqs_3" }
 
100#else
101# error "CONFIG_RCU_FANOUT insufficient for NR_CPUS"
102#endif /* #if (NR_CPUS) <= RCU_FANOUT_1 */
103
104extern int rcu_num_lvls;
105extern int rcu_num_nodes;
106
107/*
108 * Dynticks per-CPU state.
109 */
110struct rcu_dynticks {
111	long long dynticks_nesting; /* Track irq/process nesting level. */
112				    /* Process level is worth LLONG_MAX/2. */
113	int dynticks_nmi_nesting;   /* Track NMI nesting level. */
114	atomic_t dynticks;	    /* Even value for idle, else odd. */
115#ifdef CONFIG_NO_HZ_FULL_SYSIDLE
116	long long dynticks_idle_nesting;
117				    /* irq/process nesting level from idle. */
118	atomic_t dynticks_idle;	    /* Even value for idle, else odd. */
119				    /*  "Idle" excludes userspace execution. */
120	unsigned long dynticks_idle_jiffies;
121				    /* End of last non-NMI non-idle period. */
122#endif /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
123#ifdef CONFIG_RCU_FAST_NO_HZ
124	bool all_lazy;		    /* Are all CPU's CBs lazy? */
125	unsigned long nonlazy_posted;
126				    /* # times non-lazy CBs posted to CPU. */
127	unsigned long nonlazy_posted_snap;
128				    /* idle-period nonlazy_posted snapshot. */
129	unsigned long last_accelerate;
130				    /* Last jiffy CBs were accelerated. */
131	unsigned long last_advance_all;
132				    /* Last jiffy CBs were all advanced. */
133	int tick_nohz_enabled_snap; /* Previously seen value from sysfs. */
134#endif /* #ifdef CONFIG_RCU_FAST_NO_HZ */
135};
136
137/* RCU's kthread states for tracing. */
138#define RCU_KTHREAD_STOPPED  0
139#define RCU_KTHREAD_RUNNING  1
140#define RCU_KTHREAD_WAITING  2
141#define RCU_KTHREAD_OFFCPU   3
142#define RCU_KTHREAD_YIELDING 4
143#define RCU_KTHREAD_MAX      4
144
145/*
146 * Definition for node within the RCU grace-period-detection hierarchy.
147 */
148struct rcu_node {
149	raw_spinlock_t __private lock;	/* Root rcu_node's lock protects */
150					/*  some rcu_state fields as well as */
151					/*  following. */
152	unsigned long gpnum;	/* Current grace period for this node. */
153				/*  This will either be equal to or one */
154				/*  behind the root rcu_node's gpnum. */
155	unsigned long completed; /* Last GP completed for this node. */
156				/*  This will either be equal to or one */
157				/*  behind the root rcu_node's gpnum. */
158	unsigned long qsmask;	/* CPUs or groups that need to switch in */
159				/*  order for current grace period to proceed.*/
160				/*  In leaf rcu_node, each bit corresponds to */
161				/*  an rcu_data structure, otherwise, each */
162				/*  bit corresponds to a child rcu_node */
163				/*  structure. */
164	unsigned long qsmaskinit;
165				/* Per-GP initial value for qsmask. */
166				/*  Initialized from ->qsmaskinitnext at the */
167				/*  beginning of each grace period. */
168	unsigned long qsmaskinitnext;
169				/* Online CPUs for next grace period. */
170	unsigned long expmask;	/* CPUs or groups that need to check in */
171				/*  to allow the current expedited GP */
172				/*  to complete. */
173	unsigned long expmaskinit;
174				/* Per-GP initial values for expmask. */
175				/*  Initialized from ->expmaskinitnext at the */
176				/*  beginning of each expedited GP. */
177	unsigned long expmaskinitnext;
178				/* Online CPUs for next expedited GP. */
179				/*  Any CPU that has ever been online will */
180				/*  have its bit set. */
181	unsigned long grpmask;	/* Mask to apply to parent qsmask. */
182				/*  Only one bit will be set in this mask. */
183	int	grplo;		/* lowest-numbered CPU or group here. */
184	int	grphi;		/* highest-numbered CPU or group here. */
185	u8	grpnum;		/* CPU/group number for next level up. */
186	u8	level;		/* root is at level 0. */
187	bool	wait_blkd_tasks;/* Necessary to wait for blocked tasks to */
188				/*  exit RCU read-side critical sections */
189				/*  before propagating offline up the */
190				/*  rcu_node tree? */
191	struct rcu_node *parent;
192	struct list_head blkd_tasks;
193				/* Tasks blocked in RCU read-side critical */
194				/*  section.  Tasks are placed at the head */
195				/*  of this list and age towards the tail. */
196	struct list_head *gp_tasks;
197				/* Pointer to the first task blocking the */
198				/*  current grace period, or NULL if there */
199				/*  is no such task. */
200	struct list_head *exp_tasks;
201				/* Pointer to the first task blocking the */
202				/*  current expedited grace period, or NULL */
203				/*  if there is no such task.  If there */
204				/*  is no current expedited grace period, */
205				/*  then there can cannot be any such task. */
206	struct list_head *boost_tasks;
207				/* Pointer to first task that needs to be */
208				/*  priority boosted, or NULL if no priority */
209				/*  boosting is needed for this rcu_node */
210				/*  structure.  If there are no tasks */
211				/*  queued on this rcu_node structure that */
212				/*  are blocking the current grace period, */
213				/*  there can be no such task. */
214	struct rt_mutex boost_mtx;
215				/* Used only for the priority-boosting */
216				/*  side effect, not as a lock. */
217	unsigned long boost_time;
218				/* When to start boosting (jiffies). */
219	struct task_struct *boost_kthread_task;
220				/* kthread that takes care of priority */
221				/*  boosting for this rcu_node structure. */
222	unsigned int boost_kthread_status;
223				/* State of boost_kthread_task for tracing. */
224	unsigned long n_tasks_boosted;
225				/* Total number of tasks boosted. */
226	unsigned long n_exp_boosts;
227				/* Number of tasks boosted for expedited GP. */
228	unsigned long n_normal_boosts;
229				/* Number of tasks boosted for normal GP. */
230	unsigned long n_balk_blkd_tasks;
231				/* Refused to boost: no blocked tasks. */
232	unsigned long n_balk_exp_gp_tasks;
233				/* Refused to boost: nothing blocking GP. */
234	unsigned long n_balk_boost_tasks;
235				/* Refused to boost: already boosting. */
236	unsigned long n_balk_notblocked;
237				/* Refused to boost: RCU RS CS still running. */
238	unsigned long n_balk_notyet;
239				/* Refused to boost: not yet time. */
240	unsigned long n_balk_nos;
241				/* Refused to boost: not sure why, though. */
242				/*  This can happen due to race conditions. */
243#ifdef CONFIG_RCU_NOCB_CPU
244	struct swait_queue_head nocb_gp_wq[2];
245				/* Place for rcu_nocb_kthread() to wait GP. */
246#endif /* #ifdef CONFIG_RCU_NOCB_CPU */
247	int need_future_gp[2];
248				/* Counts of upcoming no-CB GP requests. */
249	raw_spinlock_t fqslock ____cacheline_internodealigned_in_smp;
250
251	spinlock_t exp_lock ____cacheline_internodealigned_in_smp;
252	unsigned long exp_seq_rq;
253	wait_queue_head_t exp_wq[4];
254} ____cacheline_internodealigned_in_smp;
255
256/*
257 * Bitmasks in an rcu_node cover the interval [grplo, grphi] of CPU IDs, and
258 * are indexed relative to this interval rather than the global CPU ID space.
259 * This generates the bit for a CPU in node-local masks.
260 */
261#define leaf_node_cpu_bit(rnp, cpu) (1UL << ((cpu) - (rnp)->grplo))
262
263/*
264 * Do a full breadth-first scan of the rcu_node structures for the
265 * specified rcu_state structure.
266 */
267#define rcu_for_each_node_breadth_first(rsp, rnp) \
268	for ((rnp) = &(rsp)->node[0]; \
269	     (rnp) < &(rsp)->node[rcu_num_nodes]; (rnp)++)
270
271/*
272 * Do a breadth-first scan of the non-leaf rcu_node structures for the
273 * specified rcu_state structure.  Note that if there is a singleton
274 * rcu_node tree with but one rcu_node structure, this loop is a no-op.
275 */
276#define rcu_for_each_nonleaf_node_breadth_first(rsp, rnp) \
277	for ((rnp) = &(rsp)->node[0]; \
278	     (rnp) < (rsp)->level[rcu_num_lvls - 1]; (rnp)++)
279
280/*
281 * Scan the leaves of the rcu_node hierarchy for the specified rcu_state
282 * structure.  Note that if there is a singleton rcu_node tree with but
283 * one rcu_node structure, this loop -will- visit the rcu_node structure.
284 * It is still a leaf node, even if it is also the root node.
285 */
286#define rcu_for_each_leaf_node(rsp, rnp) \
287	for ((rnp) = (rsp)->level[rcu_num_lvls - 1]; \
288	     (rnp) < &(rsp)->node[rcu_num_nodes]; (rnp)++)
289
290/*
291 * Iterate over all possible CPUs in a leaf RCU node.
292 */
293#define for_each_leaf_node_possible_cpu(rnp, cpu) \
294	for ((cpu) = cpumask_next(rnp->grplo - 1, cpu_possible_mask); \
295	     cpu <= rnp->grphi; \
296	     cpu = cpumask_next((cpu), cpu_possible_mask))
297
298/*
299 * Union to allow "aggregate OR" operation on the need for a quiescent
300 * state by the normal and expedited grace periods.
301 */
302union rcu_noqs {
303	struct {
304		u8 norm;
305		u8 exp;
306	} b; /* Bits. */
307	u16 s; /* Set of bits, aggregate OR here. */
308};
309
310/* Index values for nxttail array in struct rcu_data. */
311#define RCU_DONE_TAIL		0	/* Also RCU_WAIT head. */
312#define RCU_WAIT_TAIL		1	/* Also RCU_NEXT_READY head. */
313#define RCU_NEXT_READY_TAIL	2	/* Also RCU_NEXT head. */
314#define RCU_NEXT_TAIL		3
315#define RCU_NEXT_SIZE		4
316
317/* Per-CPU data for read-copy update. */
318struct rcu_data {
319	/* 1) quiescent-state and grace-period handling : */
320	unsigned long	completed;	/* Track rsp->completed gp number */
321					/*  in order to detect GP end. */
322	unsigned long	gpnum;		/* Highest gp number that this CPU */
323					/*  is aware of having started. */
324	unsigned long	rcu_qs_ctr_snap;/* Snapshot of rcu_qs_ctr to check */
325					/*  for rcu_all_qs() invocations. */
326	union rcu_noqs	cpu_no_qs;	/* No QSes yet for this CPU. */
327	bool		core_needs_qs;	/* Core waits for quiesc state. */
328	bool		beenonline;	/* CPU online at least once. */
329	bool		gpwrap;		/* Possible gpnum/completed wrap. */
330	struct rcu_node *mynode;	/* This CPU's leaf of hierarchy */
331	unsigned long grpmask;		/* Mask to apply to leaf qsmask. */
332	unsigned long	ticks_this_gp;	/* The number of scheduling-clock */
333					/*  ticks this CPU has handled */
334					/*  during and after the last grace */
335					/* period it is aware of. */
336
337	/* 2) batch handling */
338	/*
339	 * If nxtlist is not NULL, it is partitioned as follows.
340	 * Any of the partitions might be empty, in which case the
341	 * pointer to that partition will be equal to the pointer for
342	 * the following partition.  When the list is empty, all of
343	 * the nxttail elements point to the ->nxtlist pointer itself,
344	 * which in that case is NULL.
345	 *
346	 * [nxtlist, *nxttail[RCU_DONE_TAIL]):
347	 *	Entries that batch # <= ->completed
348	 *	The grace period for these entries has completed, and
349	 *	the other grace-period-completed entries may be moved
350	 *	here temporarily in rcu_process_callbacks().
351	 * [*nxttail[RCU_DONE_TAIL], *nxttail[RCU_WAIT_TAIL]):
352	 *	Entries that batch # <= ->completed - 1: waiting for current GP
353	 * [*nxttail[RCU_WAIT_TAIL], *nxttail[RCU_NEXT_READY_TAIL]):
354	 *	Entries known to have arrived before current GP ended
355	 * [*nxttail[RCU_NEXT_READY_TAIL], *nxttail[RCU_NEXT_TAIL]):
356	 *	Entries that might have arrived after current GP ended
357	 *	Note that the value of *nxttail[RCU_NEXT_TAIL] will
358	 *	always be NULL, as this is the end of the list.
359	 */
360	struct rcu_head *nxtlist;
361	struct rcu_head **nxttail[RCU_NEXT_SIZE];
362	unsigned long	nxtcompleted[RCU_NEXT_SIZE];
363					/* grace periods for sublists. */
364	long		qlen_lazy;	/* # of lazy queued callbacks */
365	long		qlen;		/* # of queued callbacks, incl lazy */
366	long		qlen_last_fqs_check;
367					/* qlen at last check for QS forcing */
368	unsigned long	n_cbs_invoked;	/* count of RCU cbs invoked. */
369	unsigned long	n_nocbs_invoked; /* count of no-CBs RCU cbs invoked. */
370	unsigned long   n_cbs_orphaned; /* RCU cbs orphaned by dying CPU */
371	unsigned long   n_cbs_adopted;  /* RCU cbs adopted from dying CPU */
372	unsigned long	n_force_qs_snap;
373					/* did other CPU force QS recently? */
374	long		blimit;		/* Upper limit on a processed batch */
375
376	/* 3) dynticks interface. */
377	struct rcu_dynticks *dynticks;	/* Shared per-CPU dynticks state. */
378	int dynticks_snap;		/* Per-GP tracking for dynticks. */
379
380	/* 4) reasons this CPU needed to be kicked by force_quiescent_state */
381	unsigned long dynticks_fqs;	/* Kicked due to dynticks idle. */
382	unsigned long offline_fqs;	/* Kicked due to being offline. */
383	unsigned long cond_resched_completed;
384					/* Grace period that needs help */
385					/*  from cond_resched(). */
386
387	/* 5) __rcu_pending() statistics. */
388	unsigned long n_rcu_pending;	/* rcu_pending() calls since boot. */
389	unsigned long n_rp_core_needs_qs;
390	unsigned long n_rp_report_qs;
391	unsigned long n_rp_cb_ready;
392	unsigned long n_rp_cpu_needs_gp;
393	unsigned long n_rp_gp_completed;
394	unsigned long n_rp_gp_started;
395	unsigned long n_rp_nocb_defer_wakeup;
396	unsigned long n_rp_need_nothing;
397
398	/* 6) _rcu_barrier(), OOM callbacks, and expediting. */
399	struct rcu_head barrier_head;
400#ifdef CONFIG_RCU_FAST_NO_HZ
401	struct rcu_head oom_head;
402#endif /* #ifdef CONFIG_RCU_FAST_NO_HZ */
403	atomic_long_t exp_workdone0;	/* # done by workqueue. */
404	atomic_long_t exp_workdone1;	/* # done by others #1. */
405	atomic_long_t exp_workdone2;	/* # done by others #2. */
406	atomic_long_t exp_workdone3;	/* # done by others #3. */
407	int exp_dynticks_snap;		/* Double-check need for IPI. */
408
409	/* 7) Callback offloading. */
410#ifdef CONFIG_RCU_NOCB_CPU
411	struct rcu_head *nocb_head;	/* CBs waiting for kthread. */
412	struct rcu_head **nocb_tail;
413	atomic_long_t nocb_q_count;	/* # CBs waiting for nocb */
414	atomic_long_t nocb_q_count_lazy; /*  invocation (all stages). */
415	struct rcu_head *nocb_follower_head; /* CBs ready to invoke. */
416	struct rcu_head **nocb_follower_tail;
417	struct swait_queue_head nocb_wq; /* For nocb kthreads to sleep on. */
418	struct task_struct *nocb_kthread;
419	int nocb_defer_wakeup;		/* Defer wakeup of nocb_kthread. */
420
421	/* The following fields are used by the leader, hence own cacheline. */
422	struct rcu_head *nocb_gp_head ____cacheline_internodealigned_in_smp;
423					/* CBs waiting for GP. */
424	struct rcu_head **nocb_gp_tail;
425	bool nocb_leader_sleep;		/* Is the nocb leader thread asleep? */
426	struct rcu_data *nocb_next_follower;
427					/* Next follower in wakeup chain. */
428
429	/* The following fields are used by the follower, hence new cachline. */
430	struct rcu_data *nocb_leader ____cacheline_internodealigned_in_smp;
431					/* Leader CPU takes GP-end wakeups. */
432#endif /* #ifdef CONFIG_RCU_NOCB_CPU */
433
434	/* 8) RCU CPU stall data. */
435	unsigned int softirq_snap;	/* Snapshot of softirq activity. */
436
437	int cpu;
438	struct rcu_state *rsp;
439};
440
441/* Values for nocb_defer_wakeup field in struct rcu_data. */
442#define RCU_NOGP_WAKE_NOT	0
443#define RCU_NOGP_WAKE		1
444#define RCU_NOGP_WAKE_FORCE	2
445
446#define RCU_JIFFIES_TILL_FORCE_QS (1 + (HZ > 250) + (HZ > 500))
447					/* For jiffies_till_first_fqs and */
448					/*  and jiffies_till_next_fqs. */
449
450#define RCU_JIFFIES_FQS_DIV	256	/* Very large systems need more */
451					/*  delay between bouts of */
452					/*  quiescent-state forcing. */
453
454#define RCU_STALL_RAT_DELAY	2	/* Allow other CPUs time to take */
455					/*  at least one scheduling clock */
456					/*  irq before ratting on them. */
457
458#define rcu_wait(cond)							\
459do {									\
460	for (;;) {							\
461		set_current_state(TASK_INTERRUPTIBLE);			\
462		if (cond)						\
463			break;						\
464		schedule();						\
465	}								\
466	__set_current_state(TASK_RUNNING);				\
467} while (0)
468
469/*
470 * RCU global state, including node hierarchy.  This hierarchy is
471 * represented in "heap" form in a dense array.  The root (first level)
472 * of the hierarchy is in ->node[0] (referenced by ->level[0]), the second
473 * level in ->node[1] through ->node[m] (->node[1] referenced by ->level[1]),
474 * and the third level in ->node[m+1] and following (->node[m+1] referenced
475 * by ->level[2]).  The number of levels is determined by the number of
476 * CPUs and by CONFIG_RCU_FANOUT.  Small systems will have a "hierarchy"
477 * consisting of a single rcu_node.
478 */
479struct rcu_state {
480	struct rcu_node node[NUM_RCU_NODES];	/* Hierarchy. */
481	struct rcu_node *level[RCU_NUM_LVLS + 1];
482						/* Hierarchy levels (+1 to */
483						/*  shut bogus gcc warning) */
484	u8 flavor_mask;				/* bit in flavor mask. */
485	struct rcu_data __percpu *rda;		/* pointer of percu rcu_data. */
486	call_rcu_func_t call;			/* call_rcu() flavor. */
487	int ncpus;				/* # CPUs seen so far. */
488
489	/* The following fields are guarded by the root rcu_node's lock. */
490
491	u8	boost ____cacheline_internodealigned_in_smp;
492						/* Subject to priority boost. */
493	unsigned long gpnum;			/* Current gp number. */
494	unsigned long completed;		/* # of last completed gp. */
495	struct task_struct *gp_kthread;		/* Task for grace periods. */
496	struct swait_queue_head gp_wq;		/* Where GP task waits. */
497	short gp_flags;				/* Commands for GP task. */
498	short gp_state;				/* GP kthread sleep state. */
499
500	/* End of fields guarded by root rcu_node's lock. */
501
502	raw_spinlock_t orphan_lock ____cacheline_internodealigned_in_smp;
503						/* Protect following fields. */
504	struct rcu_head *orphan_nxtlist;	/* Orphaned callbacks that */
505						/*  need a grace period. */
506	struct rcu_head **orphan_nxttail;	/* Tail of above. */
507	struct rcu_head *orphan_donelist;	/* Orphaned callbacks that */
508						/*  are ready to invoke. */
509	struct rcu_head **orphan_donetail;	/* Tail of above. */
510	long qlen_lazy;				/* Number of lazy callbacks. */
511	long qlen;				/* Total number of callbacks. */
512	/* End of fields guarded by orphan_lock. */
513
514	struct mutex barrier_mutex;		/* Guards barrier fields. */
515	atomic_t barrier_cpu_count;		/* # CPUs waiting on. */
516	struct completion barrier_completion;	/* Wake at barrier end. */
517	unsigned long barrier_sequence;		/* ++ at start and end of */
518						/*  _rcu_barrier(). */
519	/* End of fields guarded by barrier_mutex. */
520
521	struct mutex exp_mutex;			/* Serialize expedited GP. */
522	struct mutex exp_wake_mutex;		/* Serialize wakeup. */
523	unsigned long expedited_sequence;	/* Take a ticket. */
524	atomic_long_t expedited_normal;		/* # fallbacks to normal. */
525	atomic_t expedited_need_qs;		/* # CPUs left to check in. */
526	struct swait_queue_head expedited_wq;	/* Wait for check-ins. */
527	int ncpus_snap;				/* # CPUs seen last time. */
528
529	unsigned long jiffies_force_qs;		/* Time at which to invoke */
530						/*  force_quiescent_state(). */
531	unsigned long jiffies_kick_kthreads;	/* Time at which to kick */
532						/*  kthreads, if configured. */
533	unsigned long n_force_qs;		/* Number of calls to */
534						/*  force_quiescent_state(). */
535	unsigned long n_force_qs_lh;		/* ~Number of calls leaving */
536						/*  due to lock unavailable. */
537	unsigned long n_force_qs_ngp;		/* Number of calls leaving */
538						/*  due to no GP active. */
539	unsigned long gp_start;			/* Time at which GP started, */
540						/*  but in jiffies. */
541	unsigned long gp_activity;		/* Time of last GP kthread */
542						/*  activity in jiffies. */
543	unsigned long jiffies_stall;		/* Time at which to check */
544						/*  for CPU stalls. */
545	unsigned long jiffies_resched;		/* Time at which to resched */
546						/*  a reluctant CPU. */
547	unsigned long n_force_qs_gpstart;	/* Snapshot of n_force_qs at */
548						/*  GP start. */
549	unsigned long gp_max;			/* Maximum GP duration in */
550						/*  jiffies. */
551	const char *name;			/* Name of structure. */
552	char abbr;				/* Abbreviated name. */
553	struct list_head flavors;		/* List of RCU flavors. */
554};
555
556/* Values for rcu_state structure's gp_flags field. */
557#define RCU_GP_FLAG_INIT 0x1	/* Need grace-period initialization. */
558#define RCU_GP_FLAG_FQS  0x2	/* Need grace-period quiescent-state forcing. */
559
560/* Values for rcu_state structure's gp_state field. */
561#define RCU_GP_IDLE	 0	/* Initial state and no GP in progress. */
562#define RCU_GP_WAIT_GPS  1	/* Wait for grace-period start. */
563#define RCU_GP_DONE_GPS  2	/* Wait done for grace-period start. */
564#define RCU_GP_WAIT_FQS  3	/* Wait for force-quiescent-state time. */
565#define RCU_GP_DOING_FQS 4	/* Wait done for force-quiescent-state time. */
566#define RCU_GP_CLEANUP   5	/* Grace-period cleanup started. */
567#define RCU_GP_CLEANED   6	/* Grace-period cleanup complete. */
568
569#ifndef RCU_TREE_NONCORE
570static const char * const gp_state_names[] = {
571	"RCU_GP_IDLE",
572	"RCU_GP_WAIT_GPS",
573	"RCU_GP_DONE_GPS",
574	"RCU_GP_WAIT_FQS",
575	"RCU_GP_DOING_FQS",
576	"RCU_GP_CLEANUP",
577	"RCU_GP_CLEANED",
578};
579#endif /* #ifndef RCU_TREE_NONCORE */
580
581extern struct list_head rcu_struct_flavors;
582
583/* Sequence through rcu_state structures for each RCU flavor. */
584#define for_each_rcu_flavor(rsp) \
585	list_for_each_entry((rsp), &rcu_struct_flavors, flavors)
586
587/*
588 * RCU implementation internal declarations:
589 */
590extern struct rcu_state rcu_sched_state;
591
592extern struct rcu_state rcu_bh_state;
593
594#ifdef CONFIG_PREEMPT_RCU
595extern struct rcu_state rcu_preempt_state;
596#endif /* #ifdef CONFIG_PREEMPT_RCU */
597
598#ifdef CONFIG_RCU_BOOST
599DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_status);
600DECLARE_PER_CPU(int, rcu_cpu_kthread_cpu);
601DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_loops);
602DECLARE_PER_CPU(char, rcu_cpu_has_work);
603#endif /* #ifdef CONFIG_RCU_BOOST */
604
605#ifndef RCU_TREE_NONCORE
606
607/* Forward declarations for rcutree_plugin.h */
608static void rcu_bootup_announce(void);
609static void rcu_preempt_note_context_switch(void);
610static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp);
611#ifdef CONFIG_HOTPLUG_CPU
612static bool rcu_preempt_has_tasks(struct rcu_node *rnp);
613#endif /* #ifdef CONFIG_HOTPLUG_CPU */
614static void rcu_print_detail_task_stall(struct rcu_state *rsp);
615static int rcu_print_task_stall(struct rcu_node *rnp);
616static int rcu_print_task_exp_stall(struct rcu_node *rnp);
617static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp);
618static void rcu_preempt_check_callbacks(void);
619void call_rcu(struct rcu_head *head, rcu_callback_t func);
620static void __init __rcu_init_preempt(void);
621static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags);
622static void rcu_preempt_boost_start_gp(struct rcu_node *rnp);
623static void invoke_rcu_callbacks_kthread(void);
624static bool rcu_is_callbacks_kthread(void);
625#ifdef CONFIG_RCU_BOOST
626static void rcu_preempt_do_callbacks(void);
627static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
628						 struct rcu_node *rnp);
629#endif /* #ifdef CONFIG_RCU_BOOST */
630static void __init rcu_spawn_boost_kthreads(void);
631static void rcu_prepare_kthreads(int cpu);
632static void rcu_cleanup_after_idle(void);
633static void rcu_prepare_for_idle(void);
634static void rcu_idle_count_callbacks_posted(void);
635static bool rcu_preempt_has_tasks(struct rcu_node *rnp);
636static void print_cpu_stall_info_begin(void);
637static void print_cpu_stall_info(struct rcu_state *rsp, int cpu);
638static void print_cpu_stall_info_end(void);
639static void zero_cpu_stall_ticks(struct rcu_data *rdp);
640static void increment_cpu_stall_ticks(void);
641static bool rcu_nocb_cpu_needs_barrier(struct rcu_state *rsp, int cpu);
642static void rcu_nocb_gp_set(struct rcu_node *rnp, int nrq);
643static struct swait_queue_head *rcu_nocb_gp_get(struct rcu_node *rnp);
644static void rcu_nocb_gp_cleanup(struct swait_queue_head *sq);
645static void rcu_init_one_nocb(struct rcu_node *rnp);
646static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp,
647			    bool lazy, unsigned long flags);
648static bool rcu_nocb_adopt_orphan_cbs(struct rcu_state *rsp,
649				      struct rcu_data *rdp,
650				      unsigned long flags);
651static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp);
652static void do_nocb_deferred_wakeup(struct rcu_data *rdp);
653static void rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp);
654static void rcu_spawn_all_nocb_kthreads(int cpu);
655static void __init rcu_spawn_nocb_kthreads(void);
656#ifdef CONFIG_RCU_NOCB_CPU
657static void __init rcu_organize_nocb_kthreads(struct rcu_state *rsp);
658#endif /* #ifdef CONFIG_RCU_NOCB_CPU */
659static void __maybe_unused rcu_kick_nohz_cpu(int cpu);
660static bool init_nocb_callback_list(struct rcu_data *rdp);
661static void rcu_sysidle_enter(int irq);
662static void rcu_sysidle_exit(int irq);
663static void rcu_sysidle_check_cpu(struct rcu_data *rdp, bool *isidle,
664				  unsigned long *maxj);
665static bool is_sysidle_rcu_state(struct rcu_state *rsp);
666static void rcu_sysidle_report_gp(struct rcu_state *rsp, int isidle,
667				  unsigned long maxj);
668static void rcu_bind_gp_kthread(void);
669static void rcu_sysidle_init_percpu_data(struct rcu_dynticks *rdtp);
670static bool rcu_nohz_full_cpu(struct rcu_state *rsp);
671static void rcu_dynticks_task_enter(void);
672static void rcu_dynticks_task_exit(void);
673
674#endif /* #ifndef RCU_TREE_NONCORE */
675
676#ifdef CONFIG_RCU_TRACE
677/* Read out queue lengths for tracing. */
678static inline void rcu_nocb_q_lengths(struct rcu_data *rdp, long *ql, long *qll)
679{
680#ifdef CONFIG_RCU_NOCB_CPU
681	*ql = atomic_long_read(&rdp->nocb_q_count);
682	*qll = atomic_long_read(&rdp->nocb_q_count_lazy);
683#else /* #ifdef CONFIG_RCU_NOCB_CPU */
684	*ql = 0;
685	*qll = 0;
686#endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */
687}
688#endif /* #ifdef CONFIG_RCU_TRACE */
689
690/*
691 * Place this after a lock-acquisition primitive to guarantee that
692 * an UNLOCK+LOCK pair act as a full barrier.  This guarantee applies
693 * if the UNLOCK and LOCK are executed by the same CPU or if the
694 * UNLOCK and LOCK operate on the same lock variable.
695 */
696#ifdef CONFIG_PPC
697#define smp_mb__after_unlock_lock()	smp_mb()  /* Full ordering for lock. */
698#else /* #ifdef CONFIG_PPC */
699#define smp_mb__after_unlock_lock()	do { } while (0)
700#endif /* #else #ifdef CONFIG_PPC */
701
702/*
703 * Wrappers for the rcu_node::lock acquire and release.
704 *
705 * Because the rcu_nodes form a tree, the tree traversal locking will observe
706 * different lock values, this in turn means that an UNLOCK of one level
707 * followed by a LOCK of another level does not imply a full memory barrier;
708 * and most importantly transitivity is lost.
709 *
710 * In order to restore full ordering between tree levels, augment the regular
711 * lock acquire functions with smp_mb__after_unlock_lock().
712 *
713 * As ->lock of struct rcu_node is a __private field, therefore one should use
714 * these wrappers rather than directly call raw_spin_{lock,unlock}* on ->lock.
715 */
716static inline void raw_spin_lock_rcu_node(struct rcu_node *rnp)
717{
718	raw_spin_lock(&ACCESS_PRIVATE(rnp, lock));
719	smp_mb__after_unlock_lock();
720}
721
722static inline void raw_spin_unlock_rcu_node(struct rcu_node *rnp)
723{
724	raw_spin_unlock(&ACCESS_PRIVATE(rnp, lock));
725}
726
727static inline void raw_spin_lock_irq_rcu_node(struct rcu_node *rnp)
728{
729	raw_spin_lock_irq(&ACCESS_PRIVATE(rnp, lock));
730	smp_mb__after_unlock_lock();
731}
732
733static inline void raw_spin_unlock_irq_rcu_node(struct rcu_node *rnp)
734{
735	raw_spin_unlock_irq(&ACCESS_PRIVATE(rnp, lock));
736}
737
738#define raw_spin_lock_irqsave_rcu_node(rnp, flags)			\
739do {									\
740	typecheck(unsigned long, flags);				\
741	raw_spin_lock_irqsave(&ACCESS_PRIVATE(rnp, lock), flags);	\
742	smp_mb__after_unlock_lock();					\
743} while (0)
744
745#define raw_spin_unlock_irqrestore_rcu_node(rnp, flags)			\
746do {									\
747	typecheck(unsigned long, flags);				\
748	raw_spin_unlock_irqrestore(&ACCESS_PRIVATE(rnp, lock), flags);	\
749} while (0)
750
751static inline bool raw_spin_trylock_rcu_node(struct rcu_node *rnp)
752{
753	bool locked = raw_spin_trylock(&ACCESS_PRIVATE(rnp, lock));
754
755	if (locked)
756		smp_mb__after_unlock_lock();
757	return locked;
758}